use of com.google.spanner.v1.Mutation in project beam by apache.
the class DatastoreV1Test method testDeleteKeys.
/**
* Test that valid keys are transformed to delete mutations.
*/
@Test
public void testDeleteKeys() {
Key key = makeKey("bird", "finch").build();
DeleteKeyFn deleteKeyFn = new DeleteKeyFn();
Mutation expectedMutation = makeDelete(key).build();
assertEquals(expectedMutation, deleteKeyFn.apply(key));
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class DatastoreV1Test method testDatatoreWriterFnRetriesErrors.
/**
* Tests {@link DatastoreWriterFn} with a failed request which is retried.
*/
@Test
public void testDatatoreWriterFnRetriesErrors() throws Exception {
List<Mutation> mutations = new ArrayList<>();
int numRpcs = 2;
for (int i = 0; i < DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * numRpcs; ++i) {
mutations.add(makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
}
CommitResponse successfulCommit = CommitResponse.getDefaultInstance();
when(mockDatastore.commit(any(CommitRequest.class))).thenReturn(successfulCommit).thenThrow(new DatastoreException("commit", Code.DEADLINE_EXCEEDED, "", null)).thenReturn(successfulCommit);
DatastoreWriterFn datastoreWriter = new DatastoreWriterFn(StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
doFnTester.processBundle(mutations);
verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
verifyMetricWasSet("BatchDatastoreWrite", "unknown", "", 1);
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class DatastoreV1Test method datastoreWriterFnTest.
// A helper method to test DatastoreWriterFn for various batch sizes.
private void datastoreWriterFnTest(int numMutations) throws Exception {
// Create the requested number of mutations.
List<Mutation> mutations = new ArrayList<>(numMutations);
for (int i = 0; i < numMutations; ++i) {
mutations.add(makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
}
DatastoreWriterFn datastoreWriter = new DatastoreWriterFn(StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
doFnTester.processBundle(mutations);
int start = 0;
while (start < numMutations) {
int end = Math.min(numMutations, start + DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START);
CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
commitRequest.addAllMutations(mutations.subList(start, end));
// Verify all the batch requests were made with the expected mutations.
verify(mockDatastore, times(1)).commit(commitRequest.build());
start = end;
}
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class DatastoreV1Test method testAddEntities.
@Test
public /**
* Test that entities with valid keys are transformed to upsert mutations.
*/
void testAddEntities() throws Exception {
Key key = makeKey("bird", "finch").build();
Entity entity = Entity.newBuilder().setKey(key).build();
UpsertFn upsertFn = new UpsertFn();
Mutation expectedMutation = makeUpsert(entity).build();
assertEquals(expectedMutation, upsertFn.apply(entity));
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class V1WriteIT method testDatastoreWriterFnWithDuplicatedEntities.
/**
* Tests {@link DatastoreV1.DatastoreWriterFn} with duplicated entries. Once a duplicated entry is
* found the batch gets flushed.
*/
@Test
public void testDatastoreWriterFnWithDuplicatedEntities() throws Exception {
List<Mutation> mutations = new ArrayList<>(200);
V1TestOptions options = TestPipeline.testingPipelineOptions().as(V1TestOptions.class);
Pipeline pipeline = TestPipeline.create(options);
for (int i = 1; i <= 200; i++) {
Key key = makeKey("key" + i, i + 1).build();
mutations.add(makeUpsert(Entity.newBuilder().setKey(key).build()).build());
if (i % 30 == 0) {
mutations.add(makeUpsert(Entity.newBuilder().setKey(key).build()).build());
}
}
DatastoreV1.DatastoreWriterFn datastoreWriter = new DatastoreV1.DatastoreWriterFn(TestPipeline.testingPipelineOptions().as(GcpOptions.class).getProject(), null);
PTransform<PCollection<? extends Mutation>, PCollection<Void>> datastoreWriterTransform = ParDo.of(datastoreWriter);
/**
* Following three lines turn the original arrayList into a member of the first PCollection
*/
List<Mutation> newArrayList = new ArrayList<>(mutations);
Create.Values<Iterable<Mutation>> mutationIterable = Create.of(Collections.singleton(newArrayList));
PCollection<Iterable<Mutation>> input = pipeline.apply(mutationIterable);
/**
* Flatten divides the PCollection into several elements of the same bundle. By doing this we're
* forcing the processing of the List of mutation in the same order the mutations were added to
* the original List.
*/
input.apply(Flatten.<Mutation>iterables()).apply(datastoreWriterTransform);
PipelineResult pResult = pipeline.run();
MetricQueryResults metricResults = pResult.metrics().queryMetrics(MetricsFilter.builder().addNameFilter(MetricNameFilter.named(DatastoreV1.DatastoreWriterFn.class, "batchSize")).build());
AtomicLong timesCommitted = new AtomicLong();
metricResults.getDistributions().forEach(distribution -> {
if (distribution.getName().getName().equals("batchSize")) {
timesCommitted.set(distribution.getCommitted().getCount());
}
});
assertEquals(7, timesCommitted.get());
}
Aggregations