use of org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.StorageClient in project beam by apache.
the class BigQueryStorageSourceBase method split.
@Override
public List<BigQueryStorageStreamSource<T>> split(long desiredBundleSizeBytes, PipelineOptions options) throws Exception {
BigQueryOptions bqOptions = options.as(BigQueryOptions.class);
Table targetTable = getTargetTable(bqOptions);
ReadSession.Builder readSessionBuilder = ReadSession.newBuilder();
if (targetTable != null) {
readSessionBuilder.setTable(BigQueryHelpers.toTableResourceName(targetTable.getTableReference()));
} else {
// If the table does not exist targetTable will be null.
// Construct the table id if we can generate it. For error recording/logging.
@Nullable String tableReferenceId = getTargetTableId(bqOptions);
if (tableReferenceId != null) {
readSessionBuilder.setTable(tableReferenceId);
}
}
if (selectedFieldsProvider != null || rowRestrictionProvider != null) {
ReadSession.TableReadOptions.Builder tableReadOptionsBuilder = ReadSession.TableReadOptions.newBuilder();
if (selectedFieldsProvider != null) {
tableReadOptionsBuilder.addAllSelectedFields(selectedFieldsProvider.get());
}
if (rowRestrictionProvider != null) {
tableReadOptionsBuilder.setRowRestriction(rowRestrictionProvider.get());
}
readSessionBuilder.setReadOptions(tableReadOptionsBuilder);
}
if (format != null) {
readSessionBuilder.setDataFormat(format);
}
int streamCount = 0;
if (desiredBundleSizeBytes > 0) {
long tableSizeBytes = (targetTable != null) ? targetTable.getNumBytes() : 0;
streamCount = (int) Math.min(tableSizeBytes / desiredBundleSizeBytes, MAX_SPLIT_COUNT);
}
streamCount = Math.max(streamCount, MIN_SPLIT_COUNT);
CreateReadSessionRequest createReadSessionRequest = CreateReadSessionRequest.newBuilder().setParent(BigQueryHelpers.toProjectResourceName(bqOptions.getBigQueryProject() == null ? bqOptions.getProject() : bqOptions.getBigQueryProject())).setReadSession(readSessionBuilder).setMaxStreamCount(streamCount).build();
ReadSession readSession;
try (StorageClient client = bqServices.getStorageClient(bqOptions)) {
readSession = client.createReadSession(createReadSessionRequest);
LOG.info("Sent BigQuery Storage API CreateReadSession request '{}'; received response '{}'.", createReadSessionRequest, readSession);
}
if (readSession.getStreamsList().isEmpty()) {
// The underlying table is empty or all rows have been pruned.
return ImmutableList.of();
}
Schema sessionSchema;
if (readSession.getDataFormat() == DataFormat.ARROW) {
org.apache.arrow.vector.types.pojo.Schema schema = ArrowConversion.arrowSchemaFromInput(readSession.getArrowSchema().getSerializedSchema().newInput());
org.apache.beam.sdk.schemas.Schema beamSchema = ArrowConversion.ArrowSchemaTranslator.toBeamSchema(schema);
sessionSchema = AvroUtils.toAvroSchema(beamSchema);
} else if (readSession.getDataFormat() == DataFormat.AVRO) {
sessionSchema = new Schema.Parser().parse(readSession.getAvroSchema().getSchema());
} else {
throw new IllegalArgumentException("data is not in a supported dataFormat: " + readSession.getDataFormat());
}
TableSchema trimmedSchema = BigQueryAvroUtils.trimBigQueryTableSchema(targetTable.getSchema(), sessionSchema);
List<BigQueryStorageStreamSource<T>> sources = Lists.newArrayList();
for (ReadStream readStream : readSession.getStreamsList()) {
sources.add(BigQueryStorageStreamSource.create(readSession, readStream, trimmedSchema, parseFn, outputCoder, bqServices));
}
return ImmutableList.copyOf(sources);
}
use of org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.StorageClient in project beam by apache.
the class BigQueryIOStorageReadTest method testReadFromBigQueryIO.
@Test
public void testReadFromBigQueryIO() throws Exception {
fakeDatasetService.createDataset("foo.com:project", "dataset", "", "", null);
TableReference tableRef = BigQueryHelpers.parseTableSpec("foo.com:project:dataset.table");
Table table = new Table().setTableReference(tableRef).setNumBytes(10L).setSchema(TABLE_SCHEMA);
fakeDatasetService.createTable(table);
CreateReadSessionRequest expectedCreateReadSessionRequest = CreateReadSessionRequest.newBuilder().setParent("projects/project-id").setReadSession(ReadSession.newBuilder().setTable("projects/foo.com:project/datasets/dataset/tables/table").setDataFormat(DataFormat.AVRO)).setMaxStreamCount(10).build();
ReadSession readSession = ReadSession.newBuilder().setName("readSessionName").setAvroSchema(AvroSchema.newBuilder().setSchema(AVRO_SCHEMA_STRING)).addStreams(ReadStream.newBuilder().setName("streamName")).setDataFormat(DataFormat.AVRO).build();
ReadRowsRequest expectedReadRowsRequest = ReadRowsRequest.newBuilder().setReadStream("streamName").build();
List<GenericRecord> records = Lists.newArrayList(createRecord("A", 1, AVRO_SCHEMA), createRecord("B", 2, AVRO_SCHEMA), createRecord("C", 3, AVRO_SCHEMA), createRecord("D", 4, AVRO_SCHEMA));
List<ReadRowsResponse> readRowsResponses = Lists.newArrayList(createResponse(AVRO_SCHEMA, records.subList(0, 2), 0.0, 0.50), createResponse(AVRO_SCHEMA, records.subList(2, 4), 0.5, 0.75));
StorageClient fakeStorageClient = mock(StorageClient.class, withSettings().serializable());
when(fakeStorageClient.createReadSession(expectedCreateReadSessionRequest)).thenReturn(readSession);
when(fakeStorageClient.readRows(expectedReadRowsRequest, "")).thenReturn(new FakeBigQueryServerStream<>(readRowsResponses));
PCollection<KV<String, Long>> output = p.apply(BigQueryIO.read(new ParseKeyValue()).from("foo.com:project:dataset.table").withMethod(Method.DIRECT_READ).withFormat(DataFormat.AVRO).withTestServices(new FakeBigQueryServices().withDatasetService(fakeDatasetService).withStorageClient(fakeStorageClient)));
PAssert.that(output).containsInAnyOrder(ImmutableList.of(KV.of("A", 1L), KV.of("B", 2L), KV.of("C", 3L), KV.of("D", 4L)));
p.run();
}
use of org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.StorageClient in project beam by apache.
the class BigQueryIOStorageReadTest method testFractionConsumedArrow.
@Test
public void testFractionConsumedArrow() throws Exception {
ReadSession readSession = ReadSession.newBuilder().setName("readSession").setArrowSchema(ArrowSchema.newBuilder().setSerializedSchema(serializeArrowSchema(ARROW_SCHEMA)).build()).setDataFormat(DataFormat.ARROW).build();
ReadRowsRequest expectedRequest = ReadRowsRequest.newBuilder().setReadStream("readStream").build();
List<String> names = Arrays.asList("A", "B", "C", "D", "E", "F", "G");
List<Long> values = Arrays.asList(1L, 2L, 3L, 4L, 5L, 6L, 7L);
List<ReadRowsResponse> responses = Lists.newArrayList(createResponseArrow(ARROW_SCHEMA, names.subList(0, 2), values.subList(0, 2), 0.0, 0.25), createResponseArrow(ARROW_SCHEMA, Lists.newArrayList(), Lists.newArrayList(), 0.25, 0.25), createResponseArrow(ARROW_SCHEMA, names.subList(2, 4), values.subList(2, 4), 0.3, 0.5), createResponseArrow(ARROW_SCHEMA, names.subList(4, 7), values.subList(4, 7), 0.7, 1.0));
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.readRows(expectedRequest, "")).thenReturn(new FakeBigQueryServerStream<>(responses));
BigQueryStorageStreamSource<TableRow> streamSource = BigQueryStorageStreamSource.create(readSession, ReadStream.newBuilder().setName("readStream").build(), TABLE_SCHEMA, new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withStorageClient(fakeStorageClient));
BoundedReader<TableRow> reader = streamSource.createReader(options);
// Before call to BoundedReader#start, fraction consumed must be zero.
assertEquals(0.0, reader.getFractionConsumed(), DELTA);
// Reads A.
assertTrue(reader.start());
assertEquals(0.125, reader.getFractionConsumed(), DELTA);
// Reads B.
assertTrue(reader.advance());
assertEquals(0.25, reader.getFractionConsumed(), DELTA);
// Reads C.
assertTrue(reader.advance());
assertEquals(0.4, reader.getFractionConsumed(), DELTA);
// Reads D.
assertTrue(reader.advance());
assertEquals(0.5, reader.getFractionConsumed(), DELTA);
// Reads E.
assertTrue(reader.advance());
assertEquals(0.8, reader.getFractionConsumed(), DELTA);
// Reads F.
assertTrue(reader.advance());
assertEquals(0.9, reader.getFractionConsumed(), DELTA);
// Reads G.
assertTrue(reader.advance());
assertEquals(1.0, reader.getFractionConsumed(), DELTA);
// Reaches the end.
assertFalse(reader.advance());
// We are done with the stream, so we should report 100% consumption.
assertEquals(Double.valueOf(1.0), reader.getFractionConsumed());
}
use of org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.StorageClient in project beam by apache.
the class BigQueryIOStorageReadTest method testStreamSourceSplitAtFractionFailsWhenSplitIsNotPossible.
@Test
public void testStreamSourceSplitAtFractionFailsWhenSplitIsNotPossible() throws Exception {
List<ReadRowsResponse> parentResponses = Lists.newArrayList(createResponse(AVRO_SCHEMA, Lists.newArrayList(createRecord("A", 1, AVRO_SCHEMA), createRecord("B", 2, AVRO_SCHEMA)), 0.0, 0.25), createResponse(AVRO_SCHEMA, Lists.newArrayList(createRecord("C", 3, AVRO_SCHEMA)), 0.25, 0.50), createResponse(AVRO_SCHEMA, Lists.newArrayList(createRecord("D", 4, AVRO_SCHEMA), createRecord("E", 5, AVRO_SCHEMA)), 0.5, 0.75));
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.readRows(ReadRowsRequest.newBuilder().setReadStream("parentStream").build(), "")).thenReturn(new FakeBigQueryServerStream<>(parentResponses));
// Mocks the split call. A response without a primary_stream and remainder_stream means
// that the split is not possible.
when(fakeStorageClient.splitReadStream(SplitReadStreamRequest.newBuilder().setName("parentStream").setFraction(0.5f).build())).thenReturn(SplitReadStreamResponse.getDefaultInstance());
BigQueryStorageStreamSource<TableRow> streamSource = BigQueryStorageStreamSource.create(ReadSession.newBuilder().setName("readSession").setAvroSchema(AvroSchema.newBuilder().setSchema(AVRO_SCHEMA_STRING)).build(), ReadStream.newBuilder().setName("parentStream").build(), TABLE_SCHEMA, new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withStorageClient(fakeStorageClient));
// Read a few records from the parent stream and ensure that records are returned in the
// prescribed order.
BoundedReader<TableRow> parent = streamSource.createReader(options);
assertTrue(parent.start());
assertEquals("A", parent.getCurrent().get("name"));
assertTrue(parent.advance());
assertEquals("B", parent.getCurrent().get("name"));
assertNull(parent.splitAtFraction(0.5));
verify(fakeStorageClient, times(1)).splitReadStream(ArgumentMatchers.any());
// Verify that subsequent splitAtFraction() calls after a failed splitAtFraction() attempt
// do NOT invoke SplitReadStream.
assertNull(parent.splitAtFraction(0.5));
verify(fakeStorageClient, times(1)).splitReadStream(ArgumentMatchers.any());
// Verify that the parent source still works okay even after an unsuccessful split attempt.
assertTrue(parent.advance());
assertEquals("C", parent.getCurrent().get("name"));
assertTrue(parent.advance());
assertEquals("D", parent.getCurrent().get("name"));
assertTrue(parent.advance());
assertEquals("E", parent.getCurrent().get("name"));
assertFalse(parent.advance());
}
use of org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.StorageClient in project beam by apache.
the class BigQueryIOStorageReadTest method testTableSourceInitialSplit_WithDefaultProject.
@Test
public void testTableSourceInitialSplit_WithDefaultProject() throws Exception {
fakeDatasetService.createDataset("project-id", "dataset", "", "", null);
TableReference tableRef = BigQueryHelpers.parseTableSpec("project-id:dataset.table");
Table table = new Table().setTableReference(tableRef).setNumBytes(1024L * 1024L).setSchema(TABLE_SCHEMA);
fakeDatasetService.createTable(table);
CreateReadSessionRequest expectedRequest = CreateReadSessionRequest.newBuilder().setParent("projects/project-id").setReadSession(ReadSession.newBuilder().setTable("projects/project-id/datasets/dataset/tables/table")).setMaxStreamCount(1024).build();
ReadSession.Builder builder = ReadSession.newBuilder().setAvroSchema(AvroSchema.newBuilder().setSchema(AVRO_SCHEMA_STRING)).setDataFormat(DataFormat.AVRO);
for (int i = 0; i < 50; i++) {
builder.addStreams(ReadStream.newBuilder().setName("stream-" + i));
}
StorageClient fakeStorageClient = mock(StorageClient.class);
when(fakeStorageClient.createReadSession(expectedRequest)).thenReturn(builder.build());
BigQueryStorageTableSource<TableRow> tableSource = BigQueryStorageTableSource.create(ValueProvider.StaticValueProvider.of(BigQueryHelpers.parseTableSpec("dataset.table")), null, null, new TableRowParser(), TableRowJsonCoder.of(), new FakeBigQueryServices().withDatasetService(fakeDatasetService).withStorageClient(fakeStorageClient));
List<? extends BoundedSource<TableRow>> sources = tableSource.split(1024L, options);
assertEquals(50L, sources.size());
}
Aggregations