use of com.google.cloud.spanner.Partition in project beam by apache.
the class SpannerIOReadTest method runReadTest.
private void runReadTest(SpannerConfig spannerConfig) throws Exception {
Timestamp timestamp = Timestamp.ofTimeMicroseconds(12345);
TimestampBound timestampBound = TimestampBound.ofReadTimestamp(timestamp);
PCollection<Struct> one = pipeline.apply("read q", SpannerIO.read().withSpannerConfig(spannerConfig).withTable("users").withColumns("id", "name").withTimestampBound(timestampBound));
FakeBatchTransactionId id = new FakeBatchTransactionId("runReadTest");
when(mockBatchTx.getBatchTransactionId()).thenReturn(id);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(timestampBound)).thenReturn(mockBatchTx);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(any(BatchTransactionId.class))).thenReturn(mockBatchTx);
Partition fakePartition = FakePartitionFactory.createFakeReadPartition(ByteString.copyFromUtf8("one"));
when(mockBatchTx.partitionRead(any(PartitionOptions.class), eq("users"), eq(KeySet.all()), eq(Arrays.asList("id", "name")), any(ReadQueryUpdateTransactionOption.class))).thenReturn(Arrays.asList(fakePartition, fakePartition, fakePartition));
when(mockBatchTx.execute(any(Partition.class))).thenReturn(ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(0, 2)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(2, 4)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(4, 6)));
PAssert.that(one).containsInAnyOrder(FAKE_ROWS);
pipeline.run();
}
use of com.google.cloud.spanner.Partition in project beam by apache.
the class SpannerIOReadTest method testQueryMetricsFail.
@Test
public void testQueryMetricsFail() throws Exception {
Timestamp timestamp = Timestamp.ofTimeMicroseconds(12345);
TimestampBound timestampBound = TimestampBound.ofReadTimestamp(timestamp);
SpannerConfig spannerConfig = getSpannerConfig();
pipeline.apply("read q", SpannerIO.read().withSpannerConfig(spannerConfig).withQuery("SELECT * FROM users").withQueryName("queryName").withTimestampBound(timestampBound));
FakeBatchTransactionId id = new FakeBatchTransactionId("runQueryTest");
when(mockBatchTx.getBatchTransactionId()).thenReturn(id);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(timestampBound)).thenReturn(mockBatchTx);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(any(BatchTransactionId.class))).thenReturn(mockBatchTx);
Partition fakePartition = FakePartitionFactory.createFakeQueryPartition(ByteString.copyFromUtf8("one"));
when(mockBatchTx.partitionQuery(any(PartitionOptions.class), eq(Statement.of("SELECT * FROM users")), any(ReadQueryUpdateTransactionOption.class))).thenReturn(Arrays.asList(fakePartition));
when(mockBatchTx.execute(any(Partition.class))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "Simulated Timeout 1"));
try {
pipeline.run();
} catch (PipelineExecutionException e) {
if (e.getCause() instanceof SpannerException && ((SpannerException) e.getCause()).getErrorCode().getGrpcStatusCode() == Code.DEADLINE_EXCEEDED) {
// expected
} else {
throw e;
}
}
verifyMetricWasSet("test", "aaa", "123", "deadline_exceeded", null, 1);
verifyMetricWasSet("test", "aaa", "123", "ok", null, 0);
}
use of com.google.cloud.spanner.Partition in project beam by apache.
the class SpannerIOReadTest method testQueryMetricsSucceed.
@Test
public void testQueryMetricsSucceed() throws Exception {
Timestamp timestamp = Timestamp.ofTimeMicroseconds(12345);
TimestampBound timestampBound = TimestampBound.ofReadTimestamp(timestamp);
SpannerConfig spannerConfig = getSpannerConfig();
pipeline.apply("read q", SpannerIO.read().withSpannerConfig(spannerConfig).withQuery("SELECT * FROM users").withQueryName("queryName").withTimestampBound(timestampBound));
FakeBatchTransactionId id = new FakeBatchTransactionId("runQueryTest");
when(mockBatchTx.getBatchTransactionId()).thenReturn(id);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(timestampBound)).thenReturn(mockBatchTx);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(any(BatchTransactionId.class))).thenReturn(mockBatchTx);
Partition fakePartition = FakePartitionFactory.createFakeQueryPartition(ByteString.copyFromUtf8("one"));
when(mockBatchTx.partitionQuery(any(PartitionOptions.class), eq(Statement.of("SELECT * FROM users")), any(ReadQueryUpdateTransactionOption.class))).thenReturn(Arrays.asList(fakePartition, fakePartition));
when(mockBatchTx.execute(any(Partition.class))).thenReturn(ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(0, 2)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(2, 4)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(4, 6))).thenReturn(ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(0, 2)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(2, 4)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(4, 6)));
pipeline.run();
verifyMetricWasSet("test", "aaa", "123", "deadline_exceeded", null, 0);
verifyMetricWasSet("test", "aaa", "123", "ok", null, 2);
}
use of com.google.cloud.spanner.Partition in project beam by apache.
the class SpannerIOReadTest method runReadUsingIndex.
@Test
public void runReadUsingIndex() throws Exception {
Timestamp timestamp = Timestamp.ofTimeMicroseconds(12345);
TimestampBound timestampBound = TimestampBound.ofReadTimestamp(timestamp);
SpannerConfig spannerConfig = getSpannerConfig();
PCollection<Struct> one = pipeline.apply("read q", SpannerIO.read().withTimestamp(Timestamp.now()).withSpannerConfig(spannerConfig).withTable("users").withColumns("id", "name").withIndex("theindex").withTimestampBound(timestampBound));
FakeBatchTransactionId id = new FakeBatchTransactionId("runReadUsingIndexTest");
when(mockBatchTx.getBatchTransactionId()).thenReturn(id);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(timestampBound)).thenReturn(mockBatchTx);
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(any(BatchTransactionId.class))).thenReturn(mockBatchTx);
Partition fakePartition = FakePartitionFactory.createFakeReadPartition(ByteString.copyFromUtf8("one"));
when(serviceFactory.mockBatchClient().batchReadOnlyTransaction(id)).thenReturn(mockBatchTx);
when(mockBatchTx.partitionReadUsingIndex(any(PartitionOptions.class), eq("users"), eq("theindex"), eq(KeySet.all()), eq(Arrays.asList("id", "name")), any(ReadQueryUpdateTransactionOption.class))).thenReturn(Arrays.asList(fakePartition, fakePartition, fakePartition));
when(mockBatchTx.execute(any(Partition.class))).thenReturn(ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(0, 2)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(2, 4)), ResultSets.forRows(FAKE_TYPE, FAKE_ROWS.subList(4, 6)));
PAssert.that(one).containsInAnyOrder(FAKE_ROWS);
pipeline.run();
}
use of com.google.cloud.spanner.Partition in project spanner-jdbc by olavloite.
the class BatchReadOnlyTest method testExecuteBatchReadOnly.
@Test
public void testExecuteBatchReadOnly() throws SQLException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
for (int testRun = 0; testRun < 2; testRun++) {
final int numberOfPartitions = 6;
BatchClient batchClient = mock(BatchClient.class);
BatchReadOnlyTransaction tx = mock(BatchReadOnlyTransaction.class);
List<Partition> partitions = new ArrayList<>(numberOfPartitions);
for (int i = 0; i < numberOfPartitions; i++) partitions.add(mock(Partition.class));
when(tx.partitionQuery(any(), any())).then(new Returns(partitions));
when(batchClient.batchReadOnlyTransaction(TimestampBound.strong())).then(new Returns(tx));
Field field = CloudSpannerTransaction.class.getDeclaredField("batchClient");
field.setAccessible(true);
field.set(connection.getTransaction(), batchClient);
connection.setBatchReadOnly(true);
Statement statement;
if (testRun % 2 == 0) {
statement = connection.createStatement();
assertTrue(statement.execute(SELECT_ALL_FROM_FOO));
} else {
PreparedStatement ps = connection.prepareStatement(SELECT_ALL_FROM_FOO);
assertTrue(ps.execute());
statement = ps;
}
List<ResultSet> resultSets = new ArrayList<>();
do {
resultSets.add(statement.getResultSet());
} while (statement.getMoreResults());
assertEquals(numberOfPartitions, resultSets.size());
}
}
Aggregations