If you are using the BigQuery API with Python, you can run a copy job:
https://cloud.google.com/bigquery/docs/tables#copyingtable
Copying the Python example from the docs:
def copyTable(service):
try:
sourceProjectId = raw_input("What is your source project? ")
sourceDatasetId = raw_input("What is your source dataset? ")
sourceTableId = raw_input("What is your source table? ")
targetProjectId = raw_input("What is your target project? ")
targetDatasetId = raw_input("What is your target dataset? ")
targetTableId = raw_input("What is your target table? ")
jobCollection = service.jobs()
jobData = {
"projectId": sourceProjectId,
"configuration": {
"copy": {
"sourceTable": {
"projectId": sourceProjectId,
"datasetId": sourceDatasetId,
"tableId": sourceTableId,
},
"destinationTable": {
"projectId": targetProjectId,
"datasetId": targetDatasetId,
"tableId": targetTableId,
},
"createDisposition": "CREATE_IF_NEEDED",
"writeDisposition": "WRITE_TRUNCATE"
}
}
}
insertResponse = jobCollection.insert(projectId=targetProjectId, body=jobData).execute()
# Ping for status until it is done, with a short pause between calls.
import time
while True:
status = jobCollection.get(projectId=targetProjectId,
jobId=insertResponse['jobReference']['jobId']).execute()
if 'DONE' == status['status']['state']:
break
print 'Waiting for the import to complete...'
time.sleep(10)
if 'errors' in status['status']:
print 'Error loading table: ', pprint.pprint(status)
return
print 'Loaded the table:' , pprint.pprint(status)#!!!!!!!!!!
# Now query and print out the generated results table.
queryTableData(service, targetProjectId, targetDatasetId, targetTableId)
except HttpError as err:
print 'Error in loadTable: ', pprint.pprint(err.resp)
The bq cp
command does basically the same, internally (you could call that function too, depending on what bq
you are importing).