use of com.google.spanner.v1.Mutation in project pgadapter by GoogleCloudPlatform.
the class CopyInMockServerTest method testCopyInExceedsCommitSizeLimit_BatchesInNonAtomicMode.
@Test
public void testCopyInExceedsCommitSizeLimit_BatchesInNonAtomicMode() throws SQLException, IOException {
setupCopyInformationSchemaResults();
try (Connection connection = DriverManager.getConnection(createUrl())) {
System.setProperty("copy_in_commit_limit", "10");
connection.createStatement().execute("set spanner.autocommit_dml_mode='partitioned_non_atomic'");
CopyManager copyManager = new CopyManager(connection.unwrap(BaseConnection.class));
copyManager.copyIn("COPY users FROM STDIN;", new StringReader("5\t5\t5\n6\t6\t6\n7\t7\t7\n"));
} finally {
System.getProperties().remove("copy_in_commit_limit");
}
List<CommitRequest> commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class);
assertEquals(3, commitRequests.size());
for (CommitRequest request : commitRequests) {
assertEquals(1, request.getMutationsCount());
Mutation mutation = request.getMutations(0);
assertEquals(OperationCase.INSERT, mutation.getOperationCase());
assertEquals(1, mutation.getInsert().getValuesCount());
}
}
use of com.google.spanner.v1.Mutation in project pgadapter by GoogleCloudPlatform.
the class MutationWriter method buildMutation.
private Mutation buildMutation(CSVRecord record) {
TimestampUtils timestampUtils = new TimestampUtils(false, () -> null);
WriteBuilder builder;
// existing records instead of failing on a UniqueKeyConstraint violation.
if (this.insertOrUpdate) {
builder = Mutation.newInsertOrUpdateBuilder(this.tableName);
} else {
builder = Mutation.newInsertBuilder(this.tableName);
}
// Iterate through all table column to copy into
for (String columnName : this.tableColumns.keySet()) {
TypeCode columnType = this.tableColumns.get(columnName);
String recordValue = "";
try {
recordValue = record.get(columnName).trim();
switch(columnType) {
case STRING:
builder.set(columnName).to(recordValue);
break;
case JSON:
builder.set(columnName).to(Value.json(recordValue));
break;
case BOOL:
builder.set(columnName).to(Boolean.parseBoolean(recordValue));
break;
case INT64:
builder.set(columnName).to(Long.parseLong(recordValue));
break;
case FLOAT64:
builder.set(columnName).to(Double.parseDouble(recordValue));
break;
case NUMERIC:
builder.set(columnName).to(Value.pgNumeric(recordValue));
break;
case BYTES:
if (recordValue.startsWith("\\x")) {
builder.set(columnName).to(ByteArray.copyFrom(Hex.decodeHex(recordValue.substring(2))));
}
break;
case DATE:
builder.set(columnName).to(Date.parseDate(recordValue));
break;
case TIMESTAMP:
Timestamp timestamp = timestampUtils.toTimestamp(null, recordValue);
builder.set(columnName).to(com.google.cloud.Timestamp.of(timestamp));
break;
}
} catch (NumberFormatException | DateTimeParseException e) {
handleError(e);
throw SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Invalid input syntax for type " + columnType.toString() + ":" + "\"" + recordValue + "\"", e);
} catch (IllegalArgumentException e) {
handleError(e);
throw SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Invalid input syntax for column \"" + columnName + "\"", e);
} catch (Exception e) {
handleError(e);
throw SpannerExceptionFactory.asSpannerException(e);
}
}
return builder.build();
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class BigtableIOTest method testWritingFailsTableDoesNotExist.
/**
* Tests that when writing to a non-existent table, the write fails.
*/
@Test
public void testWritingFailsTableDoesNotExist() throws Exception {
final String table = "TEST-TABLE";
PCollection<KV<ByteString, Iterable<Mutation>>> emptyInput = p.apply(Create.empty(KvCoder.of(ByteStringCoder.of(), IterableCoder.of(ProtoCoder.of(Mutation.class)))));
// Exception will be thrown by write.validate() when writeToDynamic is applied.
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage(String.format("Table %s does not exist", table));
emptyInput.apply("write", defaultWrite.withTableId(table));
p.run();
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class DatastoreV1Test method testDeleteEntities.
/**
* Test that entities with valid keys are transformed to delete mutations.
*/
@Test
public void testDeleteEntities() throws Exception {
Key key = makeKey("bird", "finch").build();
Entity entity = Entity.newBuilder().setKey(key).build();
DeleteEntityFn deleteEntityFn = new DeleteEntityFn();
Mutation expectedMutation = makeDelete(entity.getKey()).build();
assertEquals(expectedMutation, deleteEntityFn.apply(entity));
}
use of com.google.spanner.v1.Mutation in project beam by apache.
the class DatastoreV1Test method testDatatoreWriterFnWithLargeEntities.
/**
* Tests {@link DatastoreWriterFn} with large entities that need to be split into more batches.
*/
@Test
public void testDatatoreWriterFnWithLargeEntities() throws Exception {
List<Mutation> mutations = new ArrayList<>();
int entitySize = 0;
for (int i = 0; i < 12; ++i) {
Entity entity = Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).putProperties("long", makeValue(new String(new char[900_000])).setExcludeFromIndexes(true).build()).build();
// Take the size of any one entity.
entitySize = entity.getSerializedSize();
mutations.add(makeUpsert(entity).build());
}
DatastoreWriterFn datastoreWriter = new DatastoreWriterFn(StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
doFnTester.processBundle(mutations);
// This test is over-specific currently; it requires that we split the 12 entity writes into 3
// requests, but we only need each CommitRequest to be less than 10MB in size.
int entitiesPerRpc = DATASTORE_BATCH_UPDATE_BYTES_LIMIT / entitySize;
int start = 0;
while (start < mutations.size()) {
int end = Math.min(mutations.size(), start + entitiesPerRpc);
CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
commitRequest.addAllMutations(mutations.subList(start, end));
// Verify all the batch requests were made with the expected mutations.
verify(mockDatastore).commit(commitRequest.build());
start = end;
}
}
Aggregations