use of com.google.api.services.bigquery.model.TableReference in project beam by apache.
the class CreateTableHelpers method tryCreateTable.
@SuppressWarnings({ "nullness" })
private static void tryCreateTable(DoFn<?, ?>.ProcessContext context, Supplier<TableSchema> schemaSupplier, TableDestination tableDestination, CreateDisposition createDisposition, String tableSpec, String kmsKey, BigQueryServices bqServices) {
TableReference tableReference = tableDestination.getTableReference().clone();
tableReference.setTableId(BigQueryHelpers.stripPartitionDecorator(tableReference.getTableId()));
try (DatasetService datasetService = bqServices.getDatasetService(context.getPipelineOptions().as(BigQueryOptions.class))) {
if (datasetService.getTable(tableReference) == null) {
TableSchema tableSchema = schemaSupplier.get();
checkArgument(tableSchema != null, "Unless create disposition is %s, a schema must be specified, i.e. " + "DynamicDestinations.getSchema() may not return null. " + "However, create disposition is %s, and " + " %s returned null for destination %s", CreateDisposition.CREATE_NEVER, createDisposition, tableDestination);
Table table = new Table().setTableReference(tableReference).setSchema(tableSchema);
if (tableDestination.getTableDescription() != null) {
table = table.setDescription(tableDestination.getTableDescription());
}
if (tableDestination.getTimePartitioning() != null) {
table.setTimePartitioning(tableDestination.getTimePartitioning());
if (tableDestination.getClustering() != null) {
table.setClustering(tableDestination.getClustering());
}
}
if (kmsKey != null) {
table.setEncryptionConfiguration(new EncryptionConfiguration().setKmsKeyName(kmsKey));
}
datasetService.createTable(table);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
createdTables.add(tableSpec);
}
use of com.google.api.services.bigquery.model.TableReference in project beam by apache.
the class CreateTableHelpers method possiblyCreateTable.
static TableDestination possiblyCreateTable(DoFn<?, ?>.ProcessContext context, TableDestination tableDestination, Supplier<TableSchema> schemaSupplier, CreateDisposition createDisposition, Coder<?> tableDestinationCoder, String kmsKey, BigQueryServices bqServices) {
checkArgument(tableDestination.getTableSpec() != null, "DynamicDestinations.getTable() must return a TableDestination " + "with a non-null table spec, but %s returned %s for destination %s," + "which has a null table spec", tableDestination);
boolean destinationCoderSupportsClustering = !(tableDestinationCoder instanceof TableDestinationCoderV2);
checkArgument(tableDestination.getClustering() == null || destinationCoderSupportsClustering, "DynamicDestinations.getTable() may only return destinations with clustering configured" + " if a destination coder is supplied that supports clustering, but %s is configured" + " to use TableDestinationCoderV2. Set withClustering() on BigQueryIO.write() and, " + " if you provided a custom DynamicDestinations instance, override" + " getDestinationCoder() to return TableDestinationCoderV3.");
TableReference tableReference = tableDestination.getTableReference().clone();
if (Strings.isNullOrEmpty(tableReference.getProjectId())) {
tableReference.setProjectId(context.getPipelineOptions().as(BigQueryOptions.class).getProject());
tableDestination = tableDestination.withTableReference(tableReference);
}
if (createDisposition == CreateDisposition.CREATE_NEVER) {
return tableDestination;
}
String tableSpec = BigQueryHelpers.stripPartitionDecorator(tableDestination.getTableSpec());
if (!createdTables.contains(tableSpec)) {
// every thread from attempting a create and overwhelming our BigQuery quota.
synchronized (createdTables) {
if (!createdTables.contains(tableSpec)) {
tryCreateTable(context, schemaSupplier, tableDestination, createDisposition, tableSpec, kmsKey, bqServices);
}
}
}
return tableDestination;
}
use of com.google.api.services.bigquery.model.TableReference in project beam by apache.
the class BigQueryHelpers method parseTableSpec.
/**
* Parse a table specification in the form {@code "[project_id]:[dataset_id].[table_id]"} or
* {@code "[project_id].[dataset_id].[table_id]"} or {@code "[dataset_id].[table_id]"}.
*
* <p>If the project id is omitted, the default project id is used.
*/
public static TableReference parseTableSpec(String tableSpec) {
Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec);
if (!match.matches()) {
throw new IllegalArgumentException(String.format("Table specification [%s] is not in one of the expected formats (" + " [project_id]:[dataset_id].[table_id]," + " [project_id].[dataset_id].[table_id]," + " [dataset_id].[table_id])", tableSpec));
}
TableReference ref = new TableReference();
ref.setProjectId(match.group("PROJECT"));
return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE"));
}
use of com.google.api.services.bigquery.model.TableReference in project beam by apache.
the class BigQueryInsertErrorCoder method decode.
@Override
public BigQueryInsertError decode(InputStream inStream) throws IOException {
TableDataInsertAllResponse.InsertErrors err = MAPPER.readValue(StringUtf8Coder.of().decode(inStream), TableDataInsertAllResponse.InsertErrors.class);
TableRow row = TableRowJsonCoder.of().decode(inStream);
TableReference ref = BigQueryHelpers.parseTableSpec(StringUtf8Coder.of().decode(inStream));
return new BigQueryInsertError(row, err, ref);
}
use of com.google.api.services.bigquery.model.TableReference in project beam by apache.
the class BigQueryQuerySourceDef method cleanupTempResource.
void cleanupTempResource(BigQueryOptions bqOptions, String stepUuid) throws Exception {
Optional<String> queryTempDatasetOpt = Optional.ofNullable(tempDatasetId);
TableReference tableToRemove = createTempTableReference(bqOptions.getBigQueryProject() == null ? bqOptions.getProject() : bqOptions.getBigQueryProject(), BigQueryResourceNaming.createJobIdPrefix(bqOptions.getJobName(), stepUuid, JobType.QUERY), queryTempDatasetOpt);
BigQueryServices.DatasetService tableService = bqServices.getDatasetService(bqOptions);
LOG.info("Deleting temporary table with query results {}", tableToRemove);
tableService.deleteTable(tableToRemove);
boolean datasetCreatedByBeam = !queryTempDatasetOpt.isPresent();
if (datasetCreatedByBeam) {
// Remove temporary dataset only if it was created by Beam
LOG.info("Deleting temporary dataset with query results {}", tableToRemove.getDatasetId());
tableService.deleteDataset(tableToRemove.getProjectId(), tableToRemove.getDatasetId());
}
}
Aggregations