use of com.google.cloud.bigquery.storage.v1beta2.ReadSession in project spark-bigquery-connector by GoogleCloudDataproc.
the class ReadSessionCreator method create.
/**
* Creates a new ReadSession for parallel reads.
*
* <p>Some attributes are governed by the {@link ReadSessionCreatorConfig} that this object was
* constructed with.
*
* @param table The table to create the session for.
* @param selectedFields
* @param filter
* @return
*/
public ReadSessionResponse create(TableId table, ImmutableList<String> selectedFields, Optional<String> filter) {
TableInfo tableDetails = bigQueryClient.getTable(table);
TableInfo actualTable = getActualTable(tableDetails, selectedFields, filter);
StandardTableDefinition tableDefinition = actualTable.getDefinition();
BigQueryReadClient bigQueryReadClient = bigQueryReadClientFactory.getBigQueryReadClient();
String tablePath = toTablePath(actualTable.getTableId());
CreateReadSessionRequest request = config.getRequestEncodedBase().map(value -> {
try {
return com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.parseFrom(java.util.Base64.getDecoder().decode(value));
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw new RuntimeException("Couldn't decode:" + value, e);
}
}).orElse(CreateReadSessionRequest.newBuilder().build());
ReadSession.Builder requestedSession = request.getReadSession().toBuilder();
config.getTraceId().ifPresent(traceId -> requestedSession.setTraceId(traceId));
TableReadOptions.Builder readOptions = requestedSession.getReadOptionsBuilder();
if (!isInputTableAView(tableDetails)) {
filter.ifPresent(readOptions::setRowRestriction);
}
readOptions.addAllSelectedFields(selectedFields);
readOptions.setArrowSerializationOptions(ArrowSerializationOptions.newBuilder().setBufferCompression(config.getArrowCompressionCodec()).build());
ReadSession readSession = bigQueryReadClient.createReadSession(request.newBuilder().setParent("projects/" + bigQueryClient.getProjectId()).setReadSession(requestedSession.setDataFormat(config.getReadDataFormat()).setReadOptions(readOptions).setTable(tablePath).build()).setMaxStreamCount(getMaxNumPartitionsRequested(config.getMaxParallelism(), tableDefinition)).build());
return new ReadSessionResponse(readSession, actualTable);
}
use of com.google.cloud.bigquery.storage.v1beta2.ReadSession in project java-bigquerystorage by googleapis.
the class ITBigQueryStorageTest method ProcessRowsAtSnapshot.
/**
* Reads all the rows from the specified table.
*
* <p>For every row, the consumer is called for processing.
*
* @param table
* @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned.
* @param filter Optional. If specified, it will be used to restrict returned data.
* @param consumer that receives all Avro rows.
* @throws IOException
*/
private void ProcessRowsAtSnapshot(String table, Long snapshotInMillis, String filter, AvroRowConsumer consumer) throws IOException {
Preconditions.checkNotNull(table);
Preconditions.checkNotNull(consumer);
CreateReadSessionRequest.Builder createSessionRequestBuilder = CreateReadSessionRequest.newBuilder().setParent(parentProjectId).setMaxStreamCount(1).setReadSession(ReadSession.newBuilder().setTable(table).setDataFormat(DataFormat.AVRO).build());
if (snapshotInMillis != null) {
Timestamp snapshotTimestamp = Timestamp.newBuilder().setSeconds(snapshotInMillis / 1_000).setNanos((int) ((snapshotInMillis % 1000) * 1000000)).build();
createSessionRequestBuilder.getReadSessionBuilder().setTableModifiers(TableModifiers.newBuilder().setSnapshotTime(snapshotTimestamp).build());
}
if (filter != null && !filter.isEmpty()) {
createSessionRequestBuilder.getReadSessionBuilder().setReadOptions(TableReadOptions.newBuilder().setRowRestriction(filter).build());
}
ReadSession session = client.createReadSession(createSessionRequestBuilder.build());
assertEquals(String.format("Did not receive expected number of streams for table '%s' CreateReadSession response:%n%s", table, session.toString()), 1, session.getStreamsCount());
ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build();
SimpleRowReader reader = new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema()));
ServerStream<ReadRowsResponse> stream = client.readRowsCallable().call(readRowsRequest);
for (ReadRowsResponse response : stream) {
reader.processRows(response.getAvroRows(), consumer);
}
}
use of com.google.cloud.bigquery.storage.v1beta2.ReadSession in project java-bigquerystorage by googleapis.
the class ITBigQueryStorageTest method testColumnSelection.
@Test
public void testColumnSelection() throws IOException {
String table = BigQueryResource.FormatTableResource(/* projectId = */
"bigquery-public-data", /* datasetId = */
"samples", /* tableId = */
"shakespeare");
TableReadOptions options = TableReadOptions.newBuilder().addSelectedFields("word").addSelectedFields("word_count").setRowRestriction("word_count > 100").build();
CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().setParent(parentProjectId).setMaxStreamCount(1).setReadSession(ReadSession.newBuilder().setTable(table).setReadOptions(options).setDataFormat(DataFormat.AVRO).build()).build();
ReadSession session = client.createReadSession(request);
assertEquals(String.format("Did not receive expected number of streams for table '%s' CreateReadSession response:%n%s", table, session.toString()), 1, session.getStreamsCount());
ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build();
Schema avroSchema = new Schema.Parser().parse(session.getAvroSchema().getSchema());
String actualSchemaMessage = String.format("Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty = */
true));
assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType());
assertEquals(actualSchemaMessage, "__root__", avroSchema.getName());
assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size());
assertEquals(actualSchemaMessage, Schema.Type.STRING, avroSchema.getField("word").schema().getType());
assertEquals(actualSchemaMessage, Schema.Type.LONG, avroSchema.getField("word_count").schema().getType());
SimpleRowReader reader = new SimpleRowReader(avroSchema);
long rowCount = 0;
ServerStream<ReadRowsResponse> stream = client.readRowsCallable().call(readRowsRequest);
for (ReadRowsResponse response : stream) {
rowCount += response.getRowCount();
reader.processRows(response.getAvroRows(), new AvroRowConsumer() {
@Override
public void accept(GenericData.Record record) {
String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString());
Long wordCount = (Long) record.get("word_count");
assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L);
Utf8 word = (Utf8) record.get("word");
assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0);
}
});
}
assertEquals(1_333, rowCount);
}
use of com.google.cloud.bigquery.storage.v1beta2.ReadSession in project java-bigquerystorage by googleapis.
the class ITBigQueryStorageTest method testFilter.
@Test
public void testFilter() throws IOException {
String table = BigQueryResource.FormatTableResource(/* projectId = */
"bigquery-public-data", /* datasetId = */
"samples", /* tableId = */
"shakespeare");
TableReadOptions options = TableReadOptions.newBuilder().setRowRestriction("word_count > 100").build();
CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().setParent(parentProjectId).setMaxStreamCount(1).setReadSession(ReadSession.newBuilder().setTable(table).setReadOptions(options).setDataFormat(DataFormat.AVRO).build()).build();
ReadSession session = client.createReadSession(request);
assertEquals(String.format("Did not receive expected number of streams for table '%s' CreateReadSession response:%n%s", table, session.toString()), 1, session.getStreamsCount());
ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build();
SimpleRowReader reader = new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema()));
long rowCount = 0;
ServerStream<ReadRowsResponse> stream = client.readRowsCallable().call(readRowsRequest);
for (ReadRowsResponse response : stream) {
rowCount += response.getRowCount();
reader.processRows(response.getAvroRows(), new AvroRowConsumer() {
@Override
public void accept(GenericData.Record record) {
Long wordCount = (Long) record.get("word_count");
assertWithMessage("Row not matching expectations: %s", record.toString()).that(wordCount).isGreaterThan(100L);
}
});
}
assertEquals(1_333, rowCount);
}
use of com.google.cloud.bigquery.storage.v1beta2.ReadSession in project java-bigquerystorage by googleapis.
the class ITBigQueryStorageTest method ProcessRowsAtSnapshot.
/**
* Reads all the rows from the specified table.
*
* <p>For every row, the consumer is called for processing.
*
* @param table
* @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned.
* @param filter Optional. If specified, it will be used to restrict returned data.
* @param consumer that receives all Avro rows.
* @throws IOException
*/
private void ProcessRowsAtSnapshot(String table, Long snapshotInMillis, String filter, AvroRowConsumer consumer) throws IOException {
Preconditions.checkNotNull(table);
Preconditions.checkNotNull(consumer);
CreateReadSessionRequest.Builder createSessionRequestBuilder = CreateReadSessionRequest.newBuilder().setParent(parentProjectId).setMaxStreamCount(1).setReadSession(ReadSession.newBuilder().setTable(table).setDataFormat(DataFormat.AVRO).build());
if (snapshotInMillis != null) {
Timestamp snapshotTimestamp = Timestamp.newBuilder().setSeconds(snapshotInMillis / 1_000).setNanos((int) ((snapshotInMillis % 1000) * 1000000)).build();
createSessionRequestBuilder.getReadSessionBuilder().setTableModifiers(TableModifiers.newBuilder().setSnapshotTime(snapshotTimestamp).build());
}
if (filter != null && !filter.isEmpty()) {
createSessionRequestBuilder.getReadSessionBuilder().setReadOptions(TableReadOptions.newBuilder().setRowRestriction(filter).build());
}
ReadSession session = client.createReadSession(createSessionRequestBuilder.build());
assertEquals(String.format("Did not receive expected number of streams for table '%s' CreateReadSession response:%n%s", table, session.toString()), 1, session.getStreamsCount());
ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build();
SimpleRowReader reader = new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema()));
ServerStream<ReadRowsResponse> stream = client.readRowsCallable().call(readRowsRequest);
for (ReadRowsResponse response : stream) {
reader.processRows(response.getAvroRows(), consumer);
}
}
Aggregations