use of org.apache.beam.sdk.io.gcp.testing.FakeBigQueryServices in project beam by apache.
the class BigQueryIOStorageReadTest method testStreamSourceSplitAtFractionSucceeds.
@Test
public void testStreamSourceSplitAtFractionSucceeds() throws Exception {
List<ReadRowsResponse> parentResponses = Lists.newArrayList(createResponse(AVRO_SCHEMA, Lists.newArrayList(createRecord("A", 1, AVRO_SCHEMA), createRecord("B", 2, AVRO_SCHEMA)), 0.0, 0.25), createResponse(AVRO_SCHEMA, Lists.newArrayList(createRecord("C", 3, AVRO_SCHEMA)), 0.25, 0.50), createResponse(AVRO_SCHEMA, Lists.newArrayList(createRecord("D", 4, AVRO_SCHEMA), createRecord("E", 5, AVRO_SCHEMA)), 0.50, 0.75));
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.readRows(ReadRowsRequest.newBuilder().setReadStream("parentStream").build(), "")).thenReturn(new FakeBigQueryServerStream<>(parentResponses));
// Mocks the split call.
when(fakeStorageClient.splitReadStream(SplitReadStreamRequest.newBuilder().setName("parentStream").setFraction(0.5f).build())).thenReturn(SplitReadStreamResponse.newBuilder().setPrimaryStream(ReadStream.newBuilder().setName("primaryStream")).setRemainderStream(ReadStream.newBuilder().setName("remainderStream")).build());
// Mocks the ReadRows calls expected on the primary and residual streams.
when(fakeStorageClient.readRows(ReadRowsRequest.newBuilder().setReadStream("primaryStream").setOffset(2).build(), "")).thenReturn(new FakeBigQueryServerStream<>(parentResponses.subList(1, 2)));
when(fakeStorageClient.readRows(ReadRowsRequest.newBuilder().setReadStream("remainderStream").build(), "")).thenReturn(new FakeBigQueryServerStream<>(parentResponses.subList(2, parentResponses.size())));
BigQueryStorageStreamSource<TableRow> streamSource = BigQueryStorageStreamSource.create(ReadSession.newBuilder().setName("readSession").setAvroSchema(AvroSchema.newBuilder().setSchema(AVRO_SCHEMA_STRING)).build(), ReadStream.newBuilder().setName("parentStream").build(), TABLE_SCHEMA, new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withStorageClient(fakeStorageClient));
// Read a few records from the parent stream and ensure that records are returned in the
// prescribed order.
BoundedReader<TableRow> parent = streamSource.createReader(options);
assertTrue(parent.start());
assertEquals("A", parent.getCurrent().get("name"));
assertTrue(parent.advance());
assertEquals("B", parent.getCurrent().get("name"));
// Now split the stream, and ensure that the "parent" reader has been replaced with the
// primary stream and that the returned source points to the residual stream.
BoundedReader<TableRow> primary = parent;
BoundedSource<TableRow> residualSource = parent.splitAtFraction(0.5);
assertNotNull(residualSource);
BoundedReader<TableRow> residual = residualSource.createReader(options);
assertTrue(primary.advance());
assertEquals("C", primary.getCurrent().get("name"));
assertFalse(primary.advance());
assertTrue(residual.start());
assertEquals("D", residual.getCurrent().get("name"));
assertTrue(residual.advance());
assertEquals("E", residual.getCurrent().get("name"));
assertFalse(residual.advance());
}
use of org.apache.beam.sdk.io.gcp.testing.FakeBigQueryServices in project beam by apache.
the class BigQueryIOStorageReadTest method doTableSourceInitialSplitTest.
private void doTableSourceInitialSplitTest(long bundleSize, int streamCount) throws Exception {
fakeDatasetService.createDataset("foo.com:project", "dataset", "", "", null);
TableReference tableRef = BigQueryHelpers.parseTableSpec("foo.com:project:dataset.table");
Table table = new Table().setTableReference(tableRef).setNumBytes(1024L * 1024L).setSchema(TABLE_SCHEMA);
fakeDatasetService.createTable(table);
CreateReadSessionRequest expectedRequest = CreateReadSessionRequest.newBuilder().setParent("projects/project-id").setReadSession(ReadSession.newBuilder().setTable("projects/foo.com:project/datasets/dataset/tables/table")).setMaxStreamCount(streamCount).build();
ReadSession.Builder builder = ReadSession.newBuilder().setAvroSchema(AvroSchema.newBuilder().setSchema(AVRO_SCHEMA_STRING)).setDataFormat(DataFormat.AVRO);
for (int i = 0; i < streamCount; i++) {
builder.addStreams(ReadStream.newBuilder().setName("stream-" + i));
}
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.createReadSession(expectedRequest)).thenReturn(builder.build());
BigQueryStorageTableSource<TableRow> tableSource = BigQueryStorageTableSource.create(ValueProvider.StaticValueProvider.of(tableRef), null, null, new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withDatasetService(fakeDatasetService).withStorageClient(fakeStorageClient));
List<? extends BoundedSource<TableRow>> sources = tableSource.split(bundleSize, options);
assertEquals(streamCount, sources.size());
}
use of org.apache.beam.sdk.io.gcp.testing.FakeBigQueryServices in project beam by apache.
the class BigQueryIOStorageReadTest method testTableSourceInitialSplit_WithSelectedFieldsAndRowRestriction.
@Test
public void testTableSourceInitialSplit_WithSelectedFieldsAndRowRestriction() throws Exception {
fakeDatasetService.createDataset("foo.com:project", "dataset", "", "", null);
TableReference tableRef = BigQueryHelpers.parseTableSpec("foo.com:project:dataset.table");
Table table = new Table().setTableReference(tableRef).setNumBytes(100L).setSchema(TABLE_SCHEMA);
fakeDatasetService.createTable(table);
CreateReadSessionRequest expectedRequest = CreateReadSessionRequest.newBuilder().setParent("projects/project-id").setReadSession(ReadSession.newBuilder().setTable("projects/foo.com:project/datasets/dataset/tables/table").setReadOptions(ReadSession.TableReadOptions.newBuilder().addSelectedFields("name").setRowRestriction("number > 5"))).setMaxStreamCount(10).build();
ReadSession.Builder builder = ReadSession.newBuilder().setAvroSchema(AvroSchema.newBuilder().setSchema(TRIMMED_AVRO_SCHEMA_STRING)).setDataFormat(DataFormat.AVRO);
for (int i = 0; i < 10; i++) {
builder.addStreams(ReadStream.newBuilder().setName("stream-" + i));
}
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.createReadSession(expectedRequest)).thenReturn(builder.build());
BigQueryStorageTableSource<TableRow> tableSource = BigQueryStorageTableSource.create(ValueProvider.StaticValueProvider.of(tableRef), StaticValueProvider.of(Lists.newArrayList("name")), StaticValueProvider.of("number > 5"), new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withDatasetService(fakeDatasetService).withStorageClient(fakeStorageClient));
List<? extends BoundedSource<TableRow>> sources = tableSource.split(10L, options);
assertEquals(10L, sources.size());
}
use of org.apache.beam.sdk.io.gcp.testing.FakeBigQueryServices in project beam by apache.
the class BigQueryIOStorageQueryTest method testQuerySourceInitialSplit_NoReferencedTables.
/**
* This test simulates the scenario where the SQL text which is executed by the query job doesn't
* by itself refer to any tables (e.g. "SELECT 17 AS value"), and thus there are no referenced
* tables when the dry run of the query is performed.
*/
@Test
public void testQuerySourceInitialSplit_NoReferencedTables() throws Exception {
Table queryResultTable = new Table().setSchema(new TableSchema().setFields(ImmutableList.of(new TableFieldSchema().setName("name").setType("STRING"), new TableFieldSchema().setName("number").setType("INTEGER")))).setNumBytes(1024L * 1024L);
String encodedQuery = FakeBigQueryServices.encodeQueryResult(queryResultTable);
fakeJobService.expectDryRunQuery(options.getProject(), encodedQuery, new JobStatistics().setQuery(new JobStatistics2().setTotalBytesProcessed(1024L * 1024L).setReferencedTables(ImmutableList.of())));
String stepUuid = "testStepUuid";
TableReference tempTableReference = createTempTableReference(options.getProject(), BigQueryResourceNaming.createJobIdPrefix(options.getJobName(), stepUuid, JobType.QUERY), Optional.empty());
CreateReadSessionRequest expectedRequest = CreateReadSessionRequest.newBuilder().setParent("projects/" + options.getProject()).setReadSession(ReadSession.newBuilder().setTable(BigQueryHelpers.toTableResourceName(tempTableReference))).setMaxStreamCount(1024).build();
Schema sessionSchema = SchemaBuilder.record("__root__").fields().name("name").type().nullable().stringType().noDefault().name("number").type().nullable().longType().noDefault().endRecord();
ReadSession.Builder builder = ReadSession.newBuilder().setAvroSchema(AvroSchema.newBuilder().setSchema(sessionSchema.toString())).setDataFormat(DataFormat.AVRO);
for (int i = 0; i < 1024; i++) {
builder.addStreams(ReadStream.newBuilder().setName("stream-" + i));
}
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.createReadSession(expectedRequest)).thenReturn(builder.build());
BigQueryStorageQuerySource<TableRow> querySource = BigQueryStorageQuerySource.create(stepUuid, ValueProvider.StaticValueProvider.of(encodedQuery), /* flattenResults = */
true, /* useLegacySql = */
true, /* priority = */
QueryPriority.BATCH, /* location = */
null, /* queryTempDataset = */
null, /* kmsKey = */
null, null, new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withDatasetService(fakeDatasetService).withJobService(fakeJobService).withStorageClient(fakeStorageClient));
List<? extends BoundedSource<TableRow>> sources = querySource.split(1024, options);
assertEquals(1024, sources.size());
}
use of org.apache.beam.sdk.io.gcp.testing.FakeBigQueryServices in project beam by apache.
the class BigQueryIOStorageQueryTest method doReadFromBigQueryIO.
private void doReadFromBigQueryIO(boolean templateCompatibility) throws Exception {
TableReference sourceTableRef = BigQueryHelpers.parseTableSpec("project:dataset.table");
fakeDatasetService.createDataset(sourceTableRef.getProjectId(), sourceTableRef.getDatasetId(), "asia-northeast1", "Fake plastic tree^H^H^H^Htables", null);
fakeDatasetService.createTable(new Table().setTableReference(sourceTableRef).setLocation("asia-northeast1"));
Table queryResultTable = new Table().setSchema(TABLE_SCHEMA).setNumBytes(0L);
String encodedQuery = FakeBigQueryServices.encodeQueryResult(queryResultTable);
fakeJobService.expectDryRunQuery(options.getProject(), encodedQuery, new JobStatistics().setQuery(new JobStatistics2().setTotalBytesProcessed(1024L * 1024L).setReferencedTables(ImmutableList.of(sourceTableRef))));
ReadSession readSession = ReadSession.newBuilder().setName("readSessionName").setAvroSchema(AvroSchema.newBuilder().setSchema(AVRO_SCHEMA_STRING)).addStreams(ReadStream.newBuilder().setName("streamName")).setDataFormat(DataFormat.AVRO).build();
ReadRowsRequest expectedReadRowsRequest = ReadRowsRequest.newBuilder().setReadStream("streamName").build();
List<GenericRecord> records = Lists.newArrayList(createRecord("A", 1, AVRO_SCHEMA), createRecord("B", 2, AVRO_SCHEMA), createRecord("C", 3, AVRO_SCHEMA), createRecord("D", 4, AVRO_SCHEMA));
List<ReadRowsResponse> readRowsResponses = Lists.newArrayList(createResponse(AVRO_SCHEMA, records.subList(0, 2), 0.0, 0.500), createResponse(AVRO_SCHEMA, records.subList(2, 4), 0.5, 0.875));
//
// Note that since the temporary table name is generated by the pipeline, we can't match the
// expected create read session request exactly. For now, match against any appropriately typed
// proto object.
//
StorageClient fakeStorageClient = mock(StorageClient.class, withSettings().serializable());
when(fakeStorageClient.createReadSession(any())).thenReturn(readSession);
when(fakeStorageClient.readRows(expectedReadRowsRequest, "")).thenReturn(new FakeBigQueryServerStream<>(readRowsResponses));
BigQueryIO.TypedRead<KV<String, Long>> typedRead = BigQueryIO.read(new ParseKeyValue()).fromQuery(encodedQuery).withMethod(Method.DIRECT_READ).withTestServices(new FakeBigQueryServices().withDatasetService(fakeDatasetService).withJobService(fakeJobService).withStorageClient(fakeStorageClient));
if (templateCompatibility) {
typedRead = typedRead.withTemplateCompatibility();
}
PCollection<KV<String, Long>> output = p.apply(typedRead);
PAssert.that(output).containsInAnyOrder(ImmutableList.of(KV.of("A", 1L), KV.of("B", 2L), KV.of("C", 3L), KV.of("D", 4L)));
p.run();
}
Aggregations