use of com.google.api.services.bigquery.model.JobStatistics4 in project beam by apache.
the class BigQueryIOTest method testBigQueryQuerySourceInitSplit.
@Test
public void testBigQueryQuerySourceInitSplit() throws Exception {
TableReference dryRunTable = new TableReference();
Job queryJob = new Job();
JobStatistics queryJobStats = new JobStatistics();
JobStatistics2 queryStats = new JobStatistics2();
queryStats.setReferencedTables(ImmutableList.of(dryRunTable));
queryJobStats.setQuery(queryStats);
queryJob.setStatus(new JobStatus()).setStatistics(queryJobStats);
Job extractJob = new Job();
JobStatistics extractJobStats = new JobStatistics();
JobStatistics4 extractStats = new JobStatistics4();
extractStats.setDestinationUriFileCounts(ImmutableList.of(1L));
extractJobStats.setExtract(extractStats);
extractJob.setStatus(new JobStatus()).setStatistics(extractJobStats);
FakeJobService fakeJobService = new FakeJobService();
FakeDatasetService fakeDatasetService = new FakeDatasetService();
FakeBigQueryServices fakeBqServices = new FakeBigQueryServices().withJobService(fakeJobService).withDatasetService(fakeDatasetService);
List<TableRow> expected = ImmutableList.of(new TableRow().set("name", "a").set("number", 1L), new TableRow().set("name", "b").set("number", 2L), new TableRow().set("name", "c").set("number", 3L), new TableRow().set("name", "d").set("number", 4L), new TableRow().set("name", "e").set("number", 5L), new TableRow().set("name", "f").set("number", 6L));
PipelineOptions options = PipelineOptionsFactory.create();
BigQueryOptions bqOptions = options.as(BigQueryOptions.class);
bqOptions.setProject("project");
String stepUuid = "testStepUuid";
TableReference tempTableReference = createTempTableReference(bqOptions.getProject(), createJobIdToken(bqOptions.getJobName(), stepUuid));
fakeDatasetService.createDataset(bqOptions.getProject(), tempTableReference.getDatasetId(), "", "");
fakeDatasetService.createTable(new Table().setTableReference(tempTableReference).setSchema(new TableSchema().setFields(ImmutableList.of(new TableFieldSchema().setName("name").setType("STRING"), new TableFieldSchema().setName("number").setType("INTEGER")))));
Path baseDir = Files.createTempDirectory(tempFolder, "testBigQueryQuerySourceInitSplit");
String query = FakeBigQueryServices.encodeQuery(expected);
BoundedSource<TableRow> bqSource = BigQueryQuerySource.create(stepUuid, StaticValueProvider.of(query), true, /* flattenResults */
true, /* useLegacySql */
fakeBqServices);
options.setTempLocation(baseDir.toString());
TableReference queryTable = new TableReference().setProjectId(bqOptions.getProject()).setDatasetId(tempTableReference.getDatasetId()).setTableId(tempTableReference.getTableId());
fakeJobService.expectDryRunQuery(bqOptions.getProject(), query, new JobStatistics().setQuery(new JobStatistics2().setTotalBytesProcessed(100L).setReferencedTables(ImmutableList.of(queryTable))));
List<TableRow> read = SourceTestUtils.readFromSource(bqSource, options);
assertThat(read, containsInAnyOrder(Iterables.toArray(expected, TableRow.class)));
SourceTestUtils.assertSplitAtFractionBehavior(bqSource, 2, 0.3, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS, options);
List<? extends BoundedSource<TableRow>> sources = bqSource.split(100, options);
assertEquals(2, sources.size());
BoundedSource<TableRow> actual = sources.get(0);
assertThat(actual, CoreMatchers.instanceOf(TransformingSource.class));
}
use of com.google.api.services.bigquery.model.JobStatistics4 in project beam by apache.
the class BigQueryIOTest method testBigQueryNoTableQuerySourceInitSplit.
@Test
public void testBigQueryNoTableQuerySourceInitSplit() throws Exception {
TableReference dryRunTable = new TableReference();
Job queryJob = new Job();
JobStatistics queryJobStats = new JobStatistics();
JobStatistics2 queryStats = new JobStatistics2();
queryStats.setReferencedTables(ImmutableList.of(dryRunTable));
queryJobStats.setQuery(queryStats);
queryJob.setStatus(new JobStatus()).setStatistics(queryJobStats);
Job extractJob = new Job();
JobStatistics extractJobStats = new JobStatistics();
JobStatistics4 extractStats = new JobStatistics4();
extractStats.setDestinationUriFileCounts(ImmutableList.of(1L));
extractJobStats.setExtract(extractStats);
extractJob.setStatus(new JobStatus()).setStatistics(extractJobStats);
FakeDatasetService datasetService = new FakeDatasetService();
FakeJobService jobService = new FakeJobService();
FakeBigQueryServices fakeBqServices = new FakeBigQueryServices().withJobService(jobService).withDatasetService(datasetService);
PipelineOptions options = PipelineOptionsFactory.create();
BigQueryOptions bqOptions = options.as(BigQueryOptions.class);
bqOptions.setProject("project");
String stepUuid = "testStepUuid";
TableReference tempTableReference = createTempTableReference(bqOptions.getProject(), createJobIdToken(bqOptions.getJobName(), stepUuid));
List<TableRow> expected = ImmutableList.of(new TableRow().set("name", "a").set("number", 1L), new TableRow().set("name", "b").set("number", 2L), new TableRow().set("name", "c").set("number", 3L), new TableRow().set("name", "d").set("number", 4L), new TableRow().set("name", "e").set("number", 5L), new TableRow().set("name", "f").set("number", 6L));
datasetService.createDataset(tempTableReference.getProjectId(), tempTableReference.getDatasetId(), "", "");
Table table = new Table().setTableReference(tempTableReference).setSchema(new TableSchema().setFields(ImmutableList.of(new TableFieldSchema().setName("name").setType("STRING"), new TableFieldSchema().setName("number").setType("INTEGER"))));
datasetService.createTable(table);
String query = FakeBigQueryServices.encodeQuery(expected);
jobService.expectDryRunQuery("project", query, new JobStatistics().setQuery(new JobStatistics2().setTotalBytesProcessed(100L).setReferencedTables(ImmutableList.of(table.getTableReference()))));
Path baseDir = Files.createTempDirectory(tempFolder, "testBigQueryNoTableQuerySourceInitSplit");
BoundedSource<TableRow> bqSource = BigQueryQuerySource.create(stepUuid, StaticValueProvider.of(query), true, /* flattenResults */
true, /* useLegacySql */
fakeBqServices);
options.setTempLocation(baseDir.toString());
List<TableRow> read = convertBigDecimaslToLong(SourceTestUtils.readFromSource(bqSource, options));
assertThat(read, containsInAnyOrder(Iterables.toArray(expected, TableRow.class)));
SourceTestUtils.assertSplitAtFractionBehavior(bqSource, 2, 0.3, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS, options);
List<? extends BoundedSource<TableRow>> sources = bqSource.split(100, options);
assertEquals(2, sources.size());
BoundedSource<TableRow> actual = sources.get(0);
assertThat(actual, CoreMatchers.instanceOf(TransformingSource.class));
}
use of com.google.api.services.bigquery.model.JobStatistics4 in project beam by apache.
the class FakeJobService method runExtractJob.
private JobStatus runExtractJob(Job job, JobConfigurationExtract extract) throws InterruptedException, IOException {
TableReference sourceTable = extract.getSourceTable();
List<TableRow> rows = datasetService.getAllRows(sourceTable.getProjectId(), sourceTable.getDatasetId(), sourceTable.getTableId());
TableSchema schema = datasetService.getTable(sourceTable).getSchema();
List<Long> destinationFileCounts = Lists.newArrayList();
for (String destination : extract.getDestinationUris()) {
destinationFileCounts.add(writeRows(sourceTable.getTableId(), rows, schema, destination));
}
job.setStatistics(new JobStatistics().setExtract(new JobStatistics4().setDestinationUriFileCounts(destinationFileCounts)));
return new JobStatus().setState("DONE");
}
use of com.google.api.services.bigquery.model.JobStatistics4 in project beam by apache.
the class BigQueryIOTest method testReadFromTable.
@Test
public void testReadFromTable() throws IOException, InterruptedException {
BigQueryOptions bqOptions = TestPipeline.testingPipelineOptions().as(BigQueryOptions.class);
bqOptions.setProject("defaultproject");
bqOptions.setTempLocation(testFolder.newFolder("BigQueryIOTest").getAbsolutePath());
Job job = new Job();
JobStatus status = new JobStatus();
job.setStatus(status);
JobStatistics jobStats = new JobStatistics();
job.setStatistics(jobStats);
JobStatistics4 extract = new JobStatistics4();
jobStats.setExtract(extract);
extract.setDestinationUriFileCounts(ImmutableList.of(1L));
Table sometable = new Table();
sometable.setSchema(new TableSchema().setFields(ImmutableList.of(new TableFieldSchema().setName("name").setType("STRING"), new TableFieldSchema().setName("number").setType("INTEGER"))));
sometable.setTableReference(new TableReference().setProjectId("non-executing-project").setDatasetId("somedataset").setTableId("sometable"));
sometable.setNumBytes(1024L * 1024L);
FakeDatasetService fakeDatasetService = new FakeDatasetService();
fakeDatasetService.createDataset("non-executing-project", "somedataset", "", "");
fakeDatasetService.createTable(sometable);
List<TableRow> records = Lists.newArrayList(new TableRow().set("name", "a").set("number", 1L), new TableRow().set("name", "b").set("number", 2L), new TableRow().set("name", "c").set("number", 3L));
fakeDatasetService.insertAll(sometable.getTableReference(), records, null);
FakeBigQueryServices fakeBqServices = new FakeBigQueryServices().withJobService(new FakeJobService()).withDatasetService(fakeDatasetService);
Pipeline p = TestPipeline.create(bqOptions);
PCollection<KV<String, Long>> output = p.apply(BigQueryIO.read().from("non-executing-project:somedataset.sometable").withTestServices(fakeBqServices).withoutValidation()).apply(ParDo.of(new DoFn<TableRow, KV<String, Long>>() {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
c.output(KV.of((String) c.element().get("name"), Long.valueOf((String) c.element().get("number"))));
}
}));
PAssert.that(output).containsInAnyOrder(ImmutableList.of(KV.of("a", 1L), KV.of("b", 2L), KV.of("c", 3L)));
p.run();
}
use of com.google.api.services.bigquery.model.JobStatistics4 in project beam by apache.
the class FakeJobService method runExtractJob.
private JobStatus runExtractJob(Job job, JobConfigurationExtract extract) throws InterruptedException, IOException {
TableReference sourceTable = extract.getSourceTable();
List<TableRow> rows = datasetService.getAllRows(sourceTable.getProjectId(), sourceTable.getDatasetId(), sourceTable.getTableId());
TableSchema schema = datasetService.getTable(sourceTable).getSchema();
List<Long> destinationFileCounts = Lists.newArrayList();
for (String destination : extract.getDestinationUris()) {
destinationFileCounts.add(writeRows(sourceTable.getTableId(), rows, schema, destination));
}
job.setStatistics(new JobStatistics().setExtract(new JobStatistics4().setDestinationUriFileCounts(destinationFileCounts)));
return new JobStatus().setState("DONE");
}
Aggregations