use of com.google.cloud.spanner.Mutation in project beam by apache.
the class MutationUtilsTest method testCreateReplaceMutationFromRowWithNulls.
@Test
public void testCreateReplaceMutationFromRowWithNulls() {
Mutation expectedMutation = createMutationNulls(Mutation.Op.REPLACE);
Mutation mutation = beamRowToMutationFn(Mutation.Op.REPLACE, TABLE).apply(WRITE_ROW_NULLS);
assertEquals(expectedMutation, mutation);
}
use of com.google.cloud.spanner.Mutation in project beam by apache.
the class MutationUtilsTest method testCreateReplaceMutationFromRow.
@Test
public void testCreateReplaceMutationFromRow() {
Mutation expectedMutation = createMutation(Mutation.Op.REPLACE);
Mutation mutation = beamRowToMutationFn(Mutation.Op.REPLACE, TABLE).apply(WRITE_ROW);
assertEquals(expectedMutation, mutation);
}
use of com.google.cloud.spanner.Mutation in project beam by apache.
the class SpannerIOWriteTest method testBatchableMutationFilterFn_cells.
@Test
public void testBatchableMutationFilterFn_cells() {
Mutation all = Mutation.delete("test", KeySet.all());
Mutation prefix = Mutation.delete("test", KeySet.prefixRange(Key.of(1L)));
Mutation range = Mutation.delete("test", KeySet.range(KeyRange.openOpen(Key.of(1L), Key.newBuilder().build())));
MutationGroup[] mutationGroups = new MutationGroup[] { g(m(1L)), g(m(2L), m(3L)), // not batchable - too big.
g(m(2L), m(3L), m(4L), m(5L)), g(del(1L)), // not point delete.
g(del(5L, 6L)), g(all), g(prefix), g(range) };
BatchableMutationFilterFn testFn = new BatchableMutationFilterFn(null, null, 10000000, 3 * CELLS_PER_KEY, 1000);
BatchableMutationFilterFn.ProcessContext mockProcessContext = Mockito.mock(ProcessContext.class);
when(mockProcessContext.sideInput(any())).thenReturn(getSchema());
// Capture the outputs.
doNothing().when(mockProcessContext).output(mutationGroupCaptor.capture());
doNothing().when(mockProcessContext).output(any(), mutationGroupListCaptor.capture());
// Process all elements.
for (MutationGroup m : mutationGroups) {
when(mockProcessContext.element()).thenReturn(m);
testFn.processElement(mockProcessContext);
}
// Verify captured batchable elements.
assertThat(mutationGroupCaptor.getAllValues(), containsInAnyOrder(g(m(1L)), g(m(2L), m(3L)), g(del(1L))));
// Verify captured unbatchable mutations
Iterable<MutationGroup> unbatchableMutations = Iterables.concat(mutationGroupListCaptor.getAllValues());
assertThat(unbatchableMutations, containsInAnyOrder(// not batchable - too big.
g(m(2L), m(3L), m(4L), m(5L)), // not point delete.
g(del(5L, 6L)), g(all), g(prefix), g(range)));
}
use of com.google.cloud.spanner.Mutation in project beam by apache.
the class SpannerIOWriteTest method deadlineExceededRetries.
@Test
public void deadlineExceededRetries() throws InterruptedException {
List<Mutation> mutationList = Arrays.asList(m((long) 1));
// mock sleeper so that it does not actually sleep.
WriteToSpannerFn.sleeper = Mockito.mock(Sleeper.class);
// respond with 2 timeouts and a success.
when(serviceFactory.mockDatabaseClient().writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "simulated Timeout 1")).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "simulated Timeout 2")).thenReturn(new CommitResponse(Timestamp.now()));
SpannerWriteResult result = pipeline.apply(Create.of(mutationList)).apply(SpannerIO.write().withProjectId("test-project").withInstanceId("test-instance").withDatabaseId("test-database").withServiceFactory(serviceFactory).withBatchSizeBytes(0).withFailureMode(SpannerIO.FailureMode.REPORT_FAILURES));
// all success, so veryify no errors
PAssert.that(result.getFailedMutations()).satisfies(m -> {
assertEquals(0, Iterables.size(m));
return null;
});
pipeline.run().waitUntilFinish();
// 2 calls to sleeper
verify(WriteToSpannerFn.sleeper, times(2)).sleep(anyLong());
// 3 write attempts for the single mutationGroup.
verify(serviceFactory.mockDatabaseClient(), times(3)).writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class));
}
use of com.google.cloud.spanner.Mutation in project beam by apache.
the class SpannerChangeStreamOrderedWithinKeyIT method testOrderedWithinKey.
@Test
public void testOrderedWithinKey() {
final SpannerConfig spannerConfig = SpannerConfig.create().withProjectId(projectId).withInstanceId(instanceId).withDatabaseId(databaseId);
// Commit a initial transaction to get the timestamp to start reading from.
List<Mutation> mutations = new ArrayList<>();
mutations.add(insertRecordMutation(0));
final com.google.cloud.Timestamp startTimestamp = databaseClient.write(mutations);
// Get the timestamp of the last committed transaction to get the end timestamp.
final com.google.cloud.Timestamp endTimestamp = writeTransactionsToDatabase();
final PCollection<String> tokens = pipeline.apply(SpannerIO.readChangeStream().withSpannerConfig(spannerConfig).withChangeStreamName(changeStreamName).withMetadataDatabase(databaseId).withInclusiveStartAt(startTimestamp).withInclusiveEndAt(endTimestamp)).apply(ParDo.of(new BreakRecordByModFn())).apply(ParDo.of(new KeyByIdFn())).apply(ParDo.of(new KeyValueByCommitTimestampAndRecordSequenceFn<>())).apply(Window.into(FixedWindows.of(Duration.standardMinutes(2)))).apply(GroupByKey.create()).apply(ParDo.of(new ToStringFn()));
// Assert that the returned PCollection contains one entry per key for the committed
// transactions, and that each entry contains the mutations in commit timestamp order.
// Note that if inserts and updates to the same key are in the same transaction, the change
// record for that transaction will only contain a record for the last update for that key.
// Note that if an insert then a delete for a key happens in the same transaction, there will be
// change records for that key.
PAssert.that(tokens).containsInAnyOrder("{\"SingerId\":\"0\"}\n" + "{\"FirstName\":\"Inserting mutation 0\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"1\"}\n" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 1\"};" + "Deleted record;" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"2\"}\n" + "{\"FirstName\":\"Inserting mutation 2\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 2\"};" + "Deleted record;", "{\"SingerId\":\"3\"}\n" + "{\"FirstName\":\"Inserting mutation 3\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 3\"};" + "Deleted record;", "{\"SingerId\":\"4\"}\n" + "{\"FirstName\":\"Inserting mutation 4\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"5\"}\n" + "{\"FirstName\":\"Updating mutation 5\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 5\"};" + "Deleted record;");
pipeline.run().waitUntilFinish();
}
Aggregations