diff --git a/samples/snippets/create_table_schema_from_json.py b/samples/snippets/create_table_schema_from_json.py new file mode 100644 index 000000000..b866e2ebe --- /dev/null +++ b/samples/snippets/create_table_schema_from_json.py @@ -0,0 +1,42 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pathlib + + +def create_table(table_id: str) -> None: + orig_table_id = table_id + current_directory = pathlib.Path(__file__).parent + orig_schema_path = str(current_directory / "schema.json") + # [START bigquery_schema_file_create] + from google.cloud import bigquery + + client = bigquery.Client() + + # TODO(dev): Change table_id to the full name of the table you want to create. + table_id = "your-project.your_dataset.your_table_name" + # TODO(dev): Change schema_path variable to the path of your schema file. + schema_path = "path/to/schema.json" + # [END bigquery_schema_file_create] + table_id = orig_table_id + schema_path = orig_schema_path + + # [START bigquery_schema_file_create] + # To load a schema file use the schema_from_json method. + schema = client.schema_from_json(schema_path) + + table = bigquery.Table(table_id, schema=schema) + table = client.create_table(table) # API request + print(f"Created table {table_id}.") + # [END bigquery_schema_file_create] diff --git a/samples/snippets/create_table_schema_from_json_test.py b/samples/snippets/create_table_schema_from_json_test.py new file mode 100644 index 000000000..e99b92672 --- /dev/null +++ b/samples/snippets/create_table_schema_from_json_test.py @@ -0,0 +1,32 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing + +import create_table_schema_from_json + +if typing.TYPE_CHECKING: + import pytest + + +def test_create_table( + capsys: "pytest.CaptureFixture[str]", + random_table_id: str, +) -> None: + + create_table_schema_from_json.create_table(random_table_id) + + out, _ = capsys.readouterr() + assert "Created" in out + assert random_table_id in out diff --git a/samples/snippets/dataset_access_test.py b/samples/snippets/dataset_access_test.py index 4d1a70eb1..cc6a9af61 100644 --- a/samples/snippets/dataset_access_test.py +++ b/samples/snippets/dataset_access_test.py @@ -18,8 +18,8 @@ import update_dataset_access if typing.TYPE_CHECKING: - import pytest from google.cloud import bigquery + import pytest def test_dataset_access_permissions( diff --git a/samples/snippets/delete_job.py b/samples/snippets/delete_job.py index 7c8640baf..2aeb53849 100644 --- a/samples/snippets/delete_job.py +++ b/samples/snippets/delete_job.py @@ -17,8 +17,8 @@ def delete_job_metadata(job_id: str, location: str) -> None: orig_job_id = job_id orig_location = location # [START bigquery_delete_job] - from google.cloud import bigquery from google.api_core import exceptions + from google.cloud import bigquery # TODO(developer): Set the job ID to the ID of the job whose metadata you # wish to delete. diff --git a/samples/snippets/load_table_schema_from_json.py b/samples/snippets/load_table_schema_from_json.py new file mode 100644 index 000000000..3f1f85430 --- /dev/null +++ b/samples/snippets/load_table_schema_from_json.py @@ -0,0 +1,60 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pathlib + + +def load_table(table_id: str) -> None: + orig_uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv" + orig_table_id = table_id + current_directory = pathlib.Path(__file__).parent + orig_schema_path = str(current_directory / "schema_us_states.json") + # [START bigquery_schema_file_load] + from google.cloud import bigquery + + client = bigquery.Client() + + # TODO(dev): Change uri variable to the path of your data file. + uri = "gs://your-bucket/path/to/your-file.csv" + # TODO(dev): Change table_id to the full name of the table you want to create. + table_id = "your-project.your_dataset.your_table" + # TODO(dev): Change schema_path variable to the path of your schema file. + schema_path = "path/to/schema.json" + # [END bigquery_schema_file_load] + uri = orig_uri + table_id = orig_table_id + schema_path = orig_schema_path + # [START bigquery_schema_file_load] + # To load a schema file use the schema_from_json method. + schema = client.schema_from_json(schema_path) + + job_config = bigquery.LoadJobConfig( + # To use the schema you loaded pass it into the + # LoadJobConfig constructor. + schema=schema, + skip_leading_rows=1, + ) + + # Pass the job_config object to the load_table_from_file, + # load_table_from_json, or load_table_from_uri method + # to use the schema on a new table. + load_job = client.load_table_from_uri( + uri, table_id, job_config=job_config + ) # Make an API request. + + load_job.result() # Waits for the job to complete. + + destination_table = client.get_table(table_id) # Make an API request. + print(f"Loaded {destination_table.num_rows} rows to {table_id}.") + # [END bigquery_schema_file_load] diff --git a/samples/snippets/load_table_schema_from_json_test.py b/samples/snippets/load_table_schema_from_json_test.py new file mode 100644 index 000000000..267a6786c --- /dev/null +++ b/samples/snippets/load_table_schema_from_json_test.py @@ -0,0 +1,32 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing + +import load_table_schema_from_json + +if typing.TYPE_CHECKING: + import pytest + + +def test_load_table( + capsys: "pytest.CaptureFixture[str]", + random_table_id: str, +) -> None: + + load_table_schema_from_json.load_table(random_table_id) + + out, _ = capsys.readouterr() + assert "Loaded" in out + assert random_table_id in out diff --git a/samples/snippets/materialized_view.py b/samples/snippets/materialized_view.py index adb3688a4..a47ee5b81 100644 --- a/samples/snippets/materialized_view.py +++ b/samples/snippets/materialized_view.py @@ -60,6 +60,7 @@ def update_materialized_view( # [START bigquery_update_materialized_view] import datetime + from google.cloud import bigquery bigquery_client = bigquery.Client() diff --git a/samples/snippets/quickstart_test.py b/samples/snippets/quickstart_test.py index b0bad5ee5..610c63c3b 100644 --- a/samples/snippets/quickstart_test.py +++ b/samples/snippets/quickstart_test.py @@ -20,7 +20,6 @@ import quickstart - # Must match the dataset listed in quickstart.py (there's no easy way to # extract this). DATASET_ID = "my_new_dataset" diff --git a/samples/snippets/schema.json b/samples/snippets/schema.json new file mode 100644 index 000000000..bd2164dad --- /dev/null +++ b/samples/snippets/schema.json @@ -0,0 +1,20 @@ +[ + { + "name": "qtr", + "type": "STRING", + "mode": "REQUIRED", + "description": "quarter" + }, + { + "name": "rep", + "type": "STRING", + "mode": "NULLABLE", + "description": "sales representative" + }, + { + "name": "sales", + "type": "FLOAT", + "mode": "NULLABLE", + "defaultValueExpression": "2.55" + } +] diff --git a/samples/snippets/schema_us_states.json b/samples/snippets/schema_us_states.json new file mode 100644 index 000000000..7f2ccc277 --- /dev/null +++ b/samples/snippets/schema_us_states.json @@ -0,0 +1,12 @@ +[ + { + "name": "name", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "post_abbr", + "type": "STRING", + "mode": "NULLABLE" + } +] diff --git a/samples/snippets/user_credentials_test.py b/samples/snippets/user_credentials_test.py index e2794e83b..df8a6354d 100644 --- a/samples/snippets/user_credentials_test.py +++ b/samples/snippets/user_credentials_test.py @@ -21,7 +21,6 @@ from user_credentials import main - PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] MockType = Union[mock.mock.MagicMock, mock.mock.AsyncMock]