use of com.google.cloud.spanner.Mutation in project spring-cloud-gcp by spring-cloud.
the class SpannerTemplateTests method insertTest.
@Test
public void insertTest() {
Mutation mutation = Mutation.newInsertBuilder("custom_test_table").build();
TestEntity entity = new TestEntity();
when(this.mutationFactory.insert(entity)).thenReturn(mutation);
this.spannerTemplate.insert(entity);
verify(this.databaseClient, times(1)).write(eq(Arrays.asList(mutation)));
}
use of com.google.cloud.spanner.Mutation in project spring-cloud-gcp by spring-cloud.
the class SpannerMutationFactoryImpl method saveObject.
private Mutation saveObject(Op op, Object object, Set<String> includeColumns) {
SpannerPersistentEntity<?> persistentEntity = this.spannerMappingContext.getPersistentEntity(object.getClass());
Mutation.WriteBuilder writeBuilder = writeBuilder(op, persistentEntity.tableName());
this.spannerConverter.write(object, writeBuilder, includeColumns);
return writeBuilder.build();
}
use of com.google.cloud.spanner.Mutation in project google-cloud-java by GoogleCloudPlatform.
the class DatabaseClientSnippets method write.
/**
* Example of blind write.
*/
// [TARGET write(Iterable)]
// [VARIABLE my_singer_id]
public void write(long singerId) {
// [START write]
Mutation mutation = Mutation.newInsertBuilder("Singer").set("SingerId").to(singerId).set("FirstName").to("Billy").set("LastName").to("Joel").build();
dbClient.write(Collections.singletonList(mutation));
// [END write]
}
use of com.google.cloud.spanner.Mutation in project google-cloud-java by GoogleCloudPlatform.
the class DatabaseClientSnippets method writeAtLeastOnce.
/**
* Example of unprotected blind write.
*/
// [TARGET writeAtLeastOnce(Iterable)]
// [VARIABLE my_singer_id]
public void writeAtLeastOnce(long singerId) {
// [START writeAtLeastOnce]
Mutation mutation = Mutation.newInsertBuilder("Singers").set("SingerId").to(singerId).set("FirstName").to("Billy").set("LastName").to("Joel").build();
dbClient.writeAtLeastOnce(Collections.singletonList(mutation));
// [END writeAtLeastOnce]
}
use of com.google.cloud.spanner.Mutation in project beam by apache.
the class SpannerChangeStreamOrderedWithinKeyGloballyIT method testOrderedWithinKey.
@Test
public void testOrderedWithinKey() {
final SpannerConfig spannerConfig = SpannerConfig.create().withProjectId(projectId).withInstanceId(instanceId).withDatabaseId(databaseId);
// Get the time increment interval at which to flush data changes ordered by key.
final long timeIncrementInSeconds = 70;
// Commit a initial transaction to get the timestamp to start reading from.
List<Mutation> mutations = new ArrayList<>();
mutations.add(insertRecordMutation(0));
final com.google.cloud.Timestamp startTimestamp = databaseClient.write(mutations);
// This will be the first batch of transactions that will have strict timestamp ordering
// per key.
writeTransactionsToDatabase();
// Sleep the time increment interval.
try {
Thread.sleep(timeIncrementInSeconds * 1000);
} catch (InterruptedException e) {
System.out.println(e);
}
// This will be the second batch of transactions that will have strict timestamp ordering
// per key.
writeTransactionsToDatabase();
// Sleep the time increment interval.
try {
Thread.sleep(timeIncrementInSeconds * 1000);
} catch (InterruptedException e) {
System.out.println(e);
}
// This will be the final batch of transactions that will have strict timestamp ordering
// per key.
com.google.cloud.Timestamp endTimestamp = writeTransactionsToDatabase();
LOG.debug("Reading change streams from {} to {}", startTimestamp.toString(), endTimestamp.toString());
final PCollection<String> tokens = pipeline.apply(SpannerIO.readChangeStream().withSpannerConfig(spannerConfig).withChangeStreamName(changeStreamName).withMetadataDatabase(databaseId).withInclusiveStartAt(startTimestamp).withInclusiveEndAt(endTimestamp)).apply(ParDo.of(new BreakRecordByModFn())).apply(ParDo.of(new KeyByIdFn())).apply(ParDo.of(new KeyValueByCommitTimestampAndTransactionIdFn<>())).apply(ParDo.of(new BufferKeyUntilOutputTimestamp(endTimestamp, timeIncrementInSeconds))).apply(ParDo.of(new ToStringFn()));
// Assert that the returned PCollection contains one entry per key for the committed
// transactions, and that each entry contains the mutations in commit timestamp order.
// Note that if inserts and updates to the same key are in the same transaction, the change
// record for that transaction will only contain a record for the last update for that key.
// Note that if an insert then a delete for a key happens in the same transaction, there will be
// change records for that key.
PAssert.that(tokens).containsInAnyOrder(// First batch of records ordered within key.
"{\"SingerId\":\"0\"}\n" + "{\"FirstName\":\"Inserting mutation 0\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"1\"}\n" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 1\"};" + "Deleted record;" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"2\"}\n" + "{\"FirstName\":\"Inserting mutation 2\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 2\"};" + "Deleted record;", "{\"SingerId\":\"3\"}\n" + "{\"FirstName\":\"Inserting mutation 3\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 3\"};" + "Deleted record;", "{\"SingerId\":\"4\"}\n" + "{\"FirstName\":\"Inserting mutation 4\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"5\"}\n" + "{\"FirstName\":\"Updating mutation 5\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 5\"};" + "Deleted record;", // Second batch of records ordered within key.
"{\"SingerId\":\"1\"}\n" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 1\"};" + "Deleted record;" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"2\"}\n" + "{\"FirstName\":\"Inserting mutation 2\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 2\"};" + "Deleted record;", "{\"SingerId\":\"3\"}\n" + "{\"FirstName\":\"Inserting mutation 3\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 3\"};" + "Deleted record;", "{\"SingerId\":\"4\"}\n" + "{\"FirstName\":\"Inserting mutation 4\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"5\"}\n" + "{\"FirstName\":\"Updating mutation 5\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 5\"};" + "Deleted record;", // Third batch of records ordered within key.
"{\"SingerId\":\"1\"}\n" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 1\"};" + "Deleted record;" + "{\"FirstName\":\"Inserting mutation 1\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"2\"}\n" + "{\"FirstName\":\"Inserting mutation 2\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 2\"};" + "Deleted record;", "{\"SingerId\":\"3\"}\n" + "{\"FirstName\":\"Inserting mutation 3\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 3\"};" + "Deleted record;", "{\"SingerId\":\"4\"}\n" + "{\"FirstName\":\"Inserting mutation 4\",\"LastName\":null,\"SingerInfo\":null};" + "Deleted record;", "{\"SingerId\":\"5\"}\n" + "{\"FirstName\":\"Updating mutation 5\",\"LastName\":null,\"SingerInfo\":null};" + "{\"FirstName\":\"Updating mutation 5\"};" + "Deleted record;");
pipeline.run().waitUntilFinish();
}
Aggregations