Search in sources :

Example 1 with Mutation

use of com.google.spanner.v1.Mutation in project google-cloud-java by GoogleCloudPlatform.

the class SessionImplTest method writeAtLeastOnce.

@Test
public void writeAtLeastOnce() throws ParseException {
    String timestampString = "2015-10-01T10:54:20.021Z";
    ArgumentCaptor<CommitRequest> commit = ArgumentCaptor.forClass(CommitRequest.class);
    CommitResponse response = CommitResponse.newBuilder().setCommitTimestamp(Timestamps.parse(timestampString)).build();
    Mockito.when(rpc.commit(commit.capture(), Mockito.eq(options))).thenReturn(response);
    Timestamp timestamp = session.writeAtLeastOnce(Arrays.asList(Mutation.newInsertBuilder("T").set("C").to("x").build()));
    assertThat(timestamp.getSeconds()).isEqualTo(utcTimeSeconds(2015, Calendar.OCTOBER, 1, 10, 54, 20));
    assertThat(timestamp.getNanos()).isEqualTo(TimeUnit.MILLISECONDS.toNanos(21));
    CommitRequest request = commit.getValue();
    assertThat(request.getSingleUseTransaction()).isNotNull();
    assertThat(request.getSingleUseTransaction().getReadWrite()).isNotNull();
    com.google.spanner.v1.Mutation mutation = com.google.spanner.v1.Mutation.newBuilder().setInsert(Write.newBuilder().setTable("T").addColumns("C").addValues(ListValue.newBuilder().addValues(com.google.protobuf.Value.newBuilder().setStringValue("x")))).build();
    assertThat(request.getMutationsList()).containsExactly(mutation);
}
Also used : CommitRequest(com.google.spanner.v1.CommitRequest) CommitResponse(com.google.spanner.v1.CommitResponse) ByteString(com.google.protobuf.ByteString) Timestamp(com.google.cloud.Timestamp) Test(org.junit.Test)

Example 2 with Mutation

use of com.google.spanner.v1.Mutation in project beam by apache.

the class BigtableClientWrapper method writeRow.

void writeRow(String key, String table, String familyColumn, String columnQualifier, byte[] value, long timestampMicros) {
    Mutation.SetCell setCell = Mutation.SetCell.newBuilder().setFamilyName(familyColumn).setColumnQualifier(byteStringUtf8(columnQualifier)).setValue(byteString(value)).setTimestampMicros(timestampMicros).build();
    Mutation mutation = Mutation.newBuilder().setSetCell(setCell).build();
    MutateRowRequest mutateRowRequest = MutateRowRequest.newBuilder().setRowKey(byteStringUtf8(key)).setTableName(bigtableOptions.getInstanceName().toTableNameStr(table)).addMutations(mutation).build();
    dataClient.mutateRow(mutateRowRequest);
}
Also used : MutateRowRequest(com.google.bigtable.v2.MutateRowRequest) Mutation(com.google.bigtable.v2.Mutation)

Example 3 with Mutation

use of com.google.spanner.v1.Mutation in project beam by apache.

the class BigtableIOTest method testWritingAndWaitingOnResults.

/**
 * Tests that the outputs of the Bigtable writer are correctly windowed, and can be used in a
 * Wait.on transform as the trigger.
 */
@Test
public void testWritingAndWaitingOnResults() throws Exception {
    final String table = "table";
    final String key = "key";
    final String value = "value";
    service.createTable(table);
    Instant elementTimestamp = Instant.parse("2019-06-10T00:00:00");
    Duration windowDuration = Duration.standardMinutes(1);
    TestStream<KV<ByteString, Iterable<Mutation>>> writeInputs = TestStream.create(bigtableCoder).advanceWatermarkTo(elementTimestamp).addElements(makeWrite(key, value)).advanceWatermarkToInfinity();
    TestStream<String> testInputs = TestStream.create(StringUtf8Coder.of()).advanceWatermarkTo(elementTimestamp).addElements("done").advanceWatermarkToInfinity();
    PCollection<BigtableWriteResult> writes = p.apply("rows", writeInputs).apply("window rows", Window.<KV<ByteString, Iterable<Mutation>>>into(FixedWindows.of(windowDuration)).withAllowedLateness(Duration.ZERO)).apply("write", defaultWrite.withTableId(table).withWriteResults());
    PCollection<String> inputs = p.apply("inputs", testInputs).apply("window inputs", Window.into(FixedWindows.of(windowDuration))).apply("wait", Wait.on(writes));
    BoundedWindow expectedWindow = new IntervalWindow(elementTimestamp, windowDuration);
    PAssert.that(inputs).inWindow(expectedWindow).containsInAnyOrder("done");
    p.run();
}
Also used : ByteString(com.google.protobuf.ByteString) Instant(org.joda.time.Instant) Duration(org.joda.time.Duration) ByteString(com.google.protobuf.ByteString) KV(org.apache.beam.sdk.values.KV) BoundedWindow(org.apache.beam.sdk.transforms.windowing.BoundedWindow) Mutation(com.google.bigtable.v2.Mutation) IntervalWindow(org.apache.beam.sdk.transforms.windowing.IntervalWindow) Test(org.junit.Test)

Example 4 with Mutation

use of com.google.spanner.v1.Mutation in project beam by apache.

the class BigtableIOTest method makeWrite.

/**
 * Helper function to make a single row mutation to be written.
 */
private static KV<ByteString, Iterable<Mutation>> makeWrite(String key, String value) {
    ByteString rowKey = ByteString.copyFromUtf8(key);
    Iterable<Mutation> mutations = ImmutableList.of(Mutation.newBuilder().setSetCell(SetCell.newBuilder().setValue(ByteString.copyFromUtf8(value))).build());
    return KV.of(rowKey, mutations);
}
Also used : ByteString(com.google.protobuf.ByteString) Mutation(com.google.bigtable.v2.Mutation)

Example 5 with Mutation

use of com.google.spanner.v1.Mutation in project beam by apache.

the class V1WriteIT method testDatastoreWriterFnWithDuplicatedEntities.

/**
 * Tests {@link DatastoreV1.DatastoreWriterFn} with duplicated entries. Once a duplicated entry is
 * found the batch gets flushed.
 */
@Test
public void testDatastoreWriterFnWithDuplicatedEntities() throws Exception {
    List<Mutation> mutations = new ArrayList<>(200);
    V1TestOptions options = TestPipeline.testingPipelineOptions().as(V1TestOptions.class);
    Pipeline pipeline = TestPipeline.create(options);
    for (int i = 1; i <= 200; i++) {
        Key key = makeKey("key" + i, i + 1).build();
        mutations.add(makeUpsert(Entity.newBuilder().setKey(key).build()).build());
        if (i % 30 == 0) {
            mutations.add(makeUpsert(Entity.newBuilder().setKey(key).build()).build());
        }
    }
    DatastoreV1.DatastoreWriterFn datastoreWriter = new DatastoreV1.DatastoreWriterFn(TestPipeline.testingPipelineOptions().as(GcpOptions.class).getProject(), null);
    PTransform<PCollection<? extends Mutation>, PCollection<Void>> datastoreWriterTransform = ParDo.of(datastoreWriter);
    /**
     * Following three lines turn the original arrayList into a member of the first PCollection
     */
    List<Mutation> newArrayList = new ArrayList<>(mutations);
    Create.Values<Iterable<Mutation>> mutationIterable = Create.of(Collections.singleton(newArrayList));
    PCollection<Iterable<Mutation>> input = pipeline.apply(mutationIterable);
    /**
     * Flatten divides the PCollection into several elements of the same bundle. By doing this we're
     * forcing the processing of the List of mutation in the same order the mutations were added to
     * the original List.
     */
    input.apply(Flatten.<Mutation>iterables()).apply(datastoreWriterTransform);
    PipelineResult pResult = pipeline.run();
    MetricQueryResults metricResults = pResult.metrics().queryMetrics(MetricsFilter.builder().addNameFilter(MetricNameFilter.named(DatastoreV1.DatastoreWriterFn.class, "batchSize")).build());
    AtomicLong timesCommitted = new AtomicLong();
    metricResults.getDistributions().forEach(distribution -> {
        if (distribution.getName().getName().equals("batchSize")) {
            timesCommitted.set(distribution.getCommitted().getCount());
        }
    });
    assertEquals(7, timesCommitted.get());
}
Also used : ArrayList(java.util.ArrayList) MetricQueryResults(org.apache.beam.sdk.metrics.MetricQueryResults) PipelineResult(org.apache.beam.sdk.PipelineResult) TestPipeline(org.apache.beam.sdk.testing.TestPipeline) Pipeline(org.apache.beam.sdk.Pipeline) PCollection(org.apache.beam.sdk.values.PCollection) AtomicLong(java.util.concurrent.atomic.AtomicLong) Create(org.apache.beam.sdk.transforms.Create) Mutation(com.google.datastore.v1.Mutation) Key(com.google.datastore.v1.Key) DatastoreHelper.makeKey(com.google.datastore.v1.client.DatastoreHelper.makeKey) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)51 Mutation (com.google.bigtable.v2.Mutation)35 ArrayList (java.util.ArrayList)33 ByteString (com.google.protobuf.ByteString)31 KV (org.apache.beam.sdk.values.KV)15 CommitRequest (com.google.spanner.v1.CommitRequest)13 SetCell (com.google.bigtable.v2.Mutation.SetCell)9 CommitResponse (com.google.spanner.v1.CommitResponse)9 Mutation (com.google.spanner.v1.Mutation)9 Mutation (com.google.datastore.v1.Mutation)8 Row (org.apache.beam.sdk.values.Row)8 Schema (org.apache.beam.sdk.schemas.Schema)7 MutateRowRequest (com.google.bigtable.v2.MutateRowRequest)6 List (java.util.List)6 Timestamp (com.google.cloud.Timestamp)5 RowMutation (com.google.cloud.bigtable.data.v2.models.RowMutation)5 Arrays (java.util.Arrays)5 Map (java.util.Map)5 Put (org.apache.hadoop.hbase.client.Put)5 Mutation (com.airbnb.jitney.event.spinaltap.v1.Mutation)4