use of com.google.cloud.teleport.v2.templates.spanner.ddl.Table in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexBigQueryToGcsFilterTest method test_whenBeforeDateSet_filterExcludesTablesAndPartitions.
@Test
public void test_whenBeforeDateSet_filterExcludesTablesAndPartitions() {
BigQueryTable.Builder olderTable = table().setLastModificationTime(TS_MICROS_2021_01_01_15_00_00_UTC - 1000L);
BigQueryTable.Builder newerTable = table().setLastModificationTime(TS_MICROS_2021_01_01_15_00_00_UTC + 1000L);
BigQueryTablePartition olderPartition = partition().setPartitionName("p1").setLastModificationTime(TS_MICROS_2021_01_01_15_00_00_UTC - 1000L).build();
BigQueryTablePartition newerPartition = partition().setPartitionName("p2").setLastModificationTime(TS_MICROS_2021_01_01_15_00_00_UTC + 1000L).build();
List<BigQueryTablePartition> partitions = Arrays.asList(olderPartition, newerPartition);
options.setTables(null);
options.setExportDataModifiedBeforeDateTime("2021-01-01T15:00:00Z");
Filter f = new DataplexBigQueryToGcsFilter(options, new ArrayList<String>());
assertThat(f.shouldSkipUnpartitionedTable(newerTable)).isTrue();
assertThat(f.shouldSkipUnpartitionedTable(olderTable)).isFalse();
// If a table is partitioned, we should filter individual partitions by modification time,
// so the table itself should NOT be skipped no matter what the table modification time is.
// Expecting shouldSkip = false for both newer and older tables:
assertThat(f.shouldSkipPartitionedTable(newerTable, partitions)).isFalse();
assertThat(f.shouldSkipPartitionedTable(olderTable, partitions)).isFalse();
assertThat(f.shouldSkipPartition(olderTable, newerPartition)).isTrue();
assertThat(f.shouldSkipPartition(olderTable, olderPartition)).isFalse();
}
use of com.google.cloud.teleport.v2.templates.spanner.ddl.Table in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexBigQueryToGcsFilterTest method test_whenTargetFileExistsWithWriteDispositionSKIP_filterExcludesTables.
@Test
public void test_whenTargetFileExistsWithWriteDispositionSKIP_filterExcludesTables() {
BigQueryTable.Builder t = table().setTableName("table1").setPartitioningColumn("p2");
BigQueryTablePartition p = partition().setPartitionName("partition1").build();
options.setTables(null);
options.setExportDataModifiedBeforeDateTime(null);
options.setFileFormat(FileFormatOptions.AVRO);
options.setWriteDisposition(WriteDispositionOptions.SKIP);
Filter f = new DataplexBigQueryToGcsFilter(options, Arrays.asList("table1/output-table1.avro", "table1/p2=partition1/output-table1-partition1.avro"));
assertThat(f.shouldSkipUnpartitionedTable(t)).isTrue();
assertThat(f.shouldSkipPartition(t, p)).isTrue();
}
use of com.google.cloud.teleport.v2.templates.spanner.ddl.Table in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexBigQueryToGcsFilterTest method test_whenBeforeDateIs1DayDuration_dateParsedCorrectly.
@Test
public void test_whenBeforeDateIs1DayDuration_dateParsedCorrectly() {
// current time in the DEFAULT time zone minus one day:
long micros = Instant.now().minus(Duration.standardDays(1)).getMillis() * 1000L;
BigQueryTable.Builder olderTable = table().setLastModificationTime(micros - 100000L);
BigQueryTable.Builder newerTable = table().setLastModificationTime(micros + 100000L);
options.setTables(null);
options.setExportDataModifiedBeforeDateTime("-P1D");
Filter f = new DataplexBigQueryToGcsFilter(options, new ArrayList<String>());
assertThat(f.shouldSkipUnpartitionedTable(newerTable)).isTrue();
assertThat(f.shouldSkipUnpartitionedTable(olderTable)).isFalse();
}
use of com.google.cloud.teleport.v2.templates.spanner.ddl.Table in project DataflowTemplates by GoogleCloudPlatform.
the class BigQueryMetadataLoader method loadTablePartitions.
private List<BigQueryTablePartition> loadTablePartitions(BigQueryTable.Builder table, Filter filter) throws InterruptedException {
String partitionSql = String.format("select partition_id, last_modified_time\n" + "from `%s.%s.INFORMATION_SCHEMA.PARTITIONS`\n" + "where table_name = @table_name", table.getProject(), table.getDataset());
TableResult partitionRows = bqClient.query(QueryJobConfiguration.newBuilder(partitionSql).addNamedParameter("table_name", QueryParameterValue.string(table.getTableName())).build());
List<BigQueryTablePartition> partitions = new ArrayList<>();
partitionRows.iterateAll().forEach(// TODO(an2x): Check we didn't get duplicate partition names.
r -> {
BigQueryTablePartition p = BigQueryTablePartition.builder().setPartitionName(r.get(0).getStringValue()).setLastModificationTime(r.get(1).getTimestampValue()).build();
if (filter == null || !filter.shouldSkipPartition(table, p)) {
partitions.add(p);
}
});
return partitions;
}
use of com.google.cloud.teleport.v2.templates.spanner.ddl.Table in project DataflowTemplates by GoogleCloudPlatform.
the class BigQueryMetadataLoader method loadDatasetMetadata.
/**
* Loads metadata for all tables in the dataset {@code datasetId} returning only those accepted by
* the {@code filter}.
*
* @param filter if {@code null}, will include all tables and partitions
*/
public List<BigQueryTable> loadDatasetMetadata(DatasetId datasetId, @Nullable Filter filter) throws InterruptedException, ExecutionException {
String tableSql = String.format("select\n" + " table_id,\n" + " timestamp_millis(last_modified_time) as last_modified_time,\n" + " (select column_name from `%s.%s.INFORMATION_SCHEMA.COLUMNS` c\n" + " where c.table_catalog = t.project_id\n" + " and c.table_schema = t.dataset_id\n" + " and c.table_name = t.table_id\n" + " and c.is_partitioning_column = 'YES') as partitioning_column,\n" + " from `%s.%s.__TABLES__` t\n" + // Tables only (1), not views (2), or external tables (3).
" where type = 1", datasetId.getProject(), datasetId.getDataset(), datasetId.getProject(), datasetId.getDataset());
TableResult tableRows = bqClient.query(QueryJobConfiguration.newBuilder(tableSql).build());
List<Callable<BigQueryTable>> tableQueries = new ArrayList<>();
tableRows.iterateAll().forEach(row -> tableQueries.add(() -> {
BigQueryTable.Builder table = BigQueryTable.builder().setProject(datasetId.getProject()).setDataset(datasetId.getDataset()).setTableName(row.get(0).getStringValue()).setLastModificationTime(row.get(1).getTimestampValue()).setPartitioningColumn(!row.get(2).isNull() ? row.get(2).getStringValue() : null);
try {
if (!loadTableMetadata(table, filter)) {
return null;
}
} catch (RuntimeException e) {
throw new RuntimeException("Error loading table " + table.getTableName() + " metadata.", e);
}
return table.build();
}));
ExecutorService executor = Executors.newFixedThreadPool(maxParallelRequests);
List<Future<BigQueryTable>> tableFutures = executor.invokeAll(tableQueries);
executor.shutdown();
List<BigQueryTable> tables = new ArrayList<>(tableFutures.size());
for (Future<BigQueryTable> ft : tableFutures) {
BigQueryTable t = ft.get();
if (t != null) {
tables.add(t);
}
}
return tables;
}
Aggregations