use of com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest in project java-bigquerystorage by googleapis.
the class ITBigQueryStorageTest method testSimpleReadAndResume.
@Test
public void testSimpleReadAndResume() {
TableReference tableReference = TableReference.newBuilder().setProjectId("bigquery-public-data").setDatasetId("samples").setTableId("shakespeare").build();
ReadSession session = client.createReadSession(/* tableReference = */
tableReference, /* parent = */
parentProjectId, /* requestedStreams = */
1);
assertEquals(String.format("Did not receive expected number of streams for table reference '%s' CreateReadSession response:%n%s", TextFormat.shortDebugString(tableReference), session.toString()), 1, session.getStreamsCount());
// We have to read some number of rows in order to be able to resume. More details:
// https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1beta1#google.cloud.bigquery.storage.v1beta1.ReadRowsRequest
long rowCount = ReadStreamToOffset(session.getStreams(0), /* rowOffset = */
34_846);
StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).setOffset(rowCount).build();
ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadPosition(readPosition).build();
ServerStream<ReadRowsResponse> stream = client.readRowsCallable().call(readRowsRequest);
for (ReadRowsResponse response : stream) {
rowCount += response.getRowCount();
}
// Verifies that the number of rows skipped and read equals to the total number of rows in the
// table.
assertEquals(164_656, rowCount);
}
use of com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest in project java-bigquerystorage by googleapis.
the class ITBigQueryStorageTest method ReadStreamToOffset.
/**
* Reads to the specified row offset within the stream. If the stream does not have the desired
* rows to read, it will read all of them.
*
* @param stream
* @param rowOffset
* @return the number of requested rows to skip or the total rows read if stream had less rows.
*/
private long ReadStreamToOffset(Stream stream, long rowOffset) {
StreamPosition readPosition = StreamPosition.newBuilder().setStream(stream).build();
ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadPosition(readPosition).build();
long rowCount = 0;
ServerStream<ReadRowsResponse> serverStream = client.readRowsCallable().call(readRowsRequest);
Iterator<ReadRowsResponse> responseIterator = serverStream.iterator();
while (responseIterator.hasNext()) {
ReadRowsResponse response = responseIterator.next();
rowCount += response.getRowCount();
if (rowCount >= rowOffset) {
return rowOffset;
}
}
return rowCount;
}
use of com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest in project java-bigquerystorage by googleapis.
the class ResourceHeaderTest method readRowsTest.
@Test
public void readRowsTest() {
try {
ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadPosition(StreamPosition.newBuilder().setStream(TEST_STREAM).setOffset(125)).build();
client.readRowsCallable().call(request);
} catch (UnimplementedException e) {
// Ignore the error: none of the methods are actually implemented.
}
verifyHeaderSent(READ_POSITION_PATTERN);
}
use of com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest in project hadoop-connectors by GoogleCloudDataproc.
the class DirectBigQueryRecordReaderTest method initialize.
private void initialize() throws Exception {
ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadPosition(StreamPosition.newBuilder().setStream(STREAM)).build();
reader.initialize(split, taskContext);
verify(bqClient).readRowsCallable();
verify(readRows).call(eq(request));
}
use of com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest in project hadoop-connectors by GoogleCloudDataproc.
the class DirectBigQueryRecordReader method initialize.
@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
DirectBigQueryInputSplit split = (DirectBigQueryInputSplit) genericSplit;
schema = parser.parse(checkNotNull(split.getSchema(), "schema"));
stream = Stream.newBuilder().setName(checkNotNull(split.getName(), "name")).build();
ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadPosition(StreamPosition.newBuilder().setStream(stream).build()).build();
client = getClient(context.getConfiguration());
responseIterator = client.readRowsCallable().call(request).iterator();
recordIterator = Collections.emptyIterator();
limit = split.getLimit();
idx = 0;
finalized = false;
}
Aggregations