diff --git a/bigquery/docs/snippets.py b/bigquery/docs/snippets.py index 6093abe95c1c..594f7391c918 100644 --- a/bigquery/docs/snippets.py +++ b/bigquery/docs/snippets.py @@ -1512,85 +1512,6 @@ def test_load_table_from_uri_autodetect(client, to_delete, capsys): assert 'Loaded 50 rows.' in out -def test_load_table_from_uri_append(client, to_delete, capsys): - """Appends data to a table from a GCS URI using various formats - - Each file format has its own tested load from URI sample. Because most of - the code is common for autodetect, append, and truncate, this sample - includes snippets for all supported formats but only calls a single load - job. - - This code snippet is made up of shared code, then format-specific code, - followed by more shared code. Note that only the last format in the - format-specific code section will be tested in this test. - """ - dataset_id = 'load_table_dataset_{}'.format(_millis()) - dataset = bigquery.Dataset(client.dataset(dataset_id)) - client.create_dataset(dataset) - to_delete.append(dataset) - - job_config = bigquery.LoadJobConfig() - job_config.schema = [ - bigquery.SchemaField('name', 'STRING'), - bigquery.SchemaField('post_abbr', 'STRING') - ] - table_ref = dataset.table('us_states') - body = six.BytesIO(b'Washington,WA') - client.load_table_from_file( - body, table_ref, job_config=job_config).result() - - # SHared code - # [START bigquery_load_table_gcs_csv_append] - # [START bigquery_load_table_gcs_json_append] - # from google.cloud import bigquery - # client = bigquery.Client() - # table_ref = client.dataset('my_dataset').table('existing_table') - - previous_rows = client.get_table(table_ref).num_rows - assert previous_rows > 0 - - job_config = bigquery.LoadJobConfig() - job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND - # [END bigquery_load_table_gcs_csv_append] - # [END bigquery_load_table_gcs_json_append] - - # Format-specific code - # [START bigquery_load_table_gcs_csv_append] - job_config.skip_leading_rows = 1 - # The source format defaults to CSV, so the line below is optional. - job_config.source_format = bigquery.SourceFormat.CSV - uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv' - # [END bigquery_load_table_gcs_csv_append] - # unset csv-specific attribute - del job_config._properties['load']['skipLeadingRows'] - - # [START bigquery_load_table_gcs_json_append] - job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON - uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json' - # [END bigquery_load_table_gcs_json_append] - - # Shared code - # [START bigquery_load_table_gcs_csv_append] - # [START bigquery_load_table_gcs_json_append] - load_job = client.load_table_from_uri( - uri, - table_ref, - job_config=job_config) # API request - print('Starting job {}'.format(load_job.job_id)) - - load_job.result() # Waits for table load to complete. - print('Job finished.') - - destination_table = client.get_table(table_ref) - print('Loaded {} rows.'.format(destination_table.num_rows - previous_rows)) - # [END bigquery_load_table_gcs_csv_append] - # [END bigquery_load_table_gcs_json_append] - - out, _ = capsys.readouterr() - assert previous_rows == 1 - assert 'Loaded 50 rows.' in out - - def test_load_table_from_uri_truncate(client, to_delete, capsys): """Replaces table data with data from a GCS URI using various formats