use of com.google.datastore.v1.Mutation in project beam by apache.
the class BigtableClientWrapper method writeRow.
void writeRow(String key, String table, String familyColumn, String columnQualifier, byte[] value, long timestampMicros) {
Mutation.SetCell setCell = Mutation.SetCell.newBuilder().setFamilyName(familyColumn).setColumnQualifier(byteStringUtf8(columnQualifier)).setValue(byteString(value)).setTimestampMicros(timestampMicros).build();
Mutation mutation = Mutation.newBuilder().setSetCell(setCell).build();
MutateRowRequest mutateRowRequest = MutateRowRequest.newBuilder().setRowKey(byteStringUtf8(key)).setTableName(bigtableOptions.getInstanceName().toTableNameStr(table)).addMutations(mutation).build();
dataClient.mutateRow(mutateRowRequest);
}
use of com.google.datastore.v1.Mutation in project beam by apache.
the class BigtableIOTest method testWritingAndWaitingOnResults.
/**
* Tests that the outputs of the Bigtable writer are correctly windowed, and can be used in a
* Wait.on transform as the trigger.
*/
@Test
public void testWritingAndWaitingOnResults() throws Exception {
final String table = "table";
final String key = "key";
final String value = "value";
service.createTable(table);
Instant elementTimestamp = Instant.parse("2019-06-10T00:00:00");
Duration windowDuration = Duration.standardMinutes(1);
TestStream<KV<ByteString, Iterable<Mutation>>> writeInputs = TestStream.create(bigtableCoder).advanceWatermarkTo(elementTimestamp).addElements(makeWrite(key, value)).advanceWatermarkToInfinity();
TestStream<String> testInputs = TestStream.create(StringUtf8Coder.of()).advanceWatermarkTo(elementTimestamp).addElements("done").advanceWatermarkToInfinity();
PCollection<BigtableWriteResult> writes = p.apply("rows", writeInputs).apply("window rows", Window.<KV<ByteString, Iterable<Mutation>>>into(FixedWindows.of(windowDuration)).withAllowedLateness(Duration.ZERO)).apply("write", defaultWrite.withTableId(table).withWriteResults());
PCollection<String> inputs = p.apply("inputs", testInputs).apply("window inputs", Window.into(FixedWindows.of(windowDuration))).apply("wait", Wait.on(writes));
BoundedWindow expectedWindow = new IntervalWindow(elementTimestamp, windowDuration);
PAssert.that(inputs).inWindow(expectedWindow).containsInAnyOrder("done");
p.run();
}
use of com.google.datastore.v1.Mutation in project beam by apache.
the class BigtableIOTest method makeWrite.
/**
* Helper function to make a single row mutation to be written.
*/
private static KV<ByteString, Iterable<Mutation>> makeWrite(String key, String value) {
ByteString rowKey = ByteString.copyFromUtf8(key);
Iterable<Mutation> mutations = ImmutableList.of(Mutation.newBuilder().setSetCell(SetCell.newBuilder().setValue(ByteString.copyFromUtf8(value))).build());
return KV.of(rowKey, mutations);
}
use of com.google.datastore.v1.Mutation in project beam by apache.
the class V1WriteIT method testDatastoreWriterFnWithDuplicatedEntities.
/**
* Tests {@link DatastoreV1.DatastoreWriterFn} with duplicated entries. Once a duplicated entry is
* found the batch gets flushed.
*/
@Test
public void testDatastoreWriterFnWithDuplicatedEntities() throws Exception {
List<Mutation> mutations = new ArrayList<>(200);
V1TestOptions options = TestPipeline.testingPipelineOptions().as(V1TestOptions.class);
Pipeline pipeline = TestPipeline.create(options);
for (int i = 1; i <= 200; i++) {
Key key = makeKey("key" + i, i + 1).build();
mutations.add(makeUpsert(Entity.newBuilder().setKey(key).build()).build());
if (i % 30 == 0) {
mutations.add(makeUpsert(Entity.newBuilder().setKey(key).build()).build());
}
}
DatastoreV1.DatastoreWriterFn datastoreWriter = new DatastoreV1.DatastoreWriterFn(TestPipeline.testingPipelineOptions().as(GcpOptions.class).getProject(), null);
PTransform<PCollection<? extends Mutation>, PCollection<Void>> datastoreWriterTransform = ParDo.of(datastoreWriter);
/**
* Following three lines turn the original arrayList into a member of the first PCollection
*/
List<Mutation> newArrayList = new ArrayList<>(mutations);
Create.Values<Iterable<Mutation>> mutationIterable = Create.of(Collections.singleton(newArrayList));
PCollection<Iterable<Mutation>> input = pipeline.apply(mutationIterable);
/**
* Flatten divides the PCollection into several elements of the same bundle. By doing this we're
* forcing the processing of the List of mutation in the same order the mutations were added to
* the original List.
*/
input.apply(Flatten.<Mutation>iterables()).apply(datastoreWriterTransform);
PipelineResult pResult = pipeline.run();
MetricQueryResults metricResults = pResult.metrics().queryMetrics(MetricsFilter.builder().addNameFilter(MetricNameFilter.named(DatastoreV1.DatastoreWriterFn.class, "batchSize")).build());
AtomicLong timesCommitted = new AtomicLong();
metricResults.getDistributions().forEach(distribution -> {
if (distribution.getName().getName().equals("batchSize")) {
timesCommitted.set(distribution.getCommitted().getCount());
}
});
assertEquals(7, timesCommitted.get());
}
use of com.google.datastore.v1.Mutation in project beam by apache.
the class DatastoreV1Test method testDatatoreWriterFnRetriesErrors.
/**
* Tests {@link DatastoreWriterFn} with a failed request which is retried.
*/
@Test
public void testDatatoreWriterFnRetriesErrors() throws Exception {
List<Mutation> mutations = new ArrayList<>();
int numRpcs = 2;
for (int i = 0; i < DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * numRpcs; ++i) {
mutations.add(makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
}
CommitResponse successfulCommit = CommitResponse.getDefaultInstance();
when(mockDatastore.commit(any(CommitRequest.class))).thenReturn(successfulCommit).thenThrow(new DatastoreException("commit", Code.DEADLINE_EXCEEDED, "", null)).thenReturn(successfulCommit);
DatastoreWriterFn datastoreWriter = new DatastoreWriterFn(StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
doFnTester.processBundle(mutations);
verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
verifyMetricWasSet("BatchDatastoreWrite", "unknown", "", 1);
}
Aggregations