use of com.google.cloud.spanner.connection.Connection in project pgadapter by GoogleCloudPlatform.
the class MutationWriterTest method testWriteMutations_NonAtomic_SucceedsForLargeCommit.
@Test
public void testWriteMutations_NonAtomic_SucceedsForLargeCommit() throws Exception {
System.setProperty("copy_in_commit_limit", "80");
try {
Map<String, TypeCode> tableColumns = ImmutableMap.of("number", TypeCode.INT64, "name", TypeCode.STRING);
CSVFormat format = CSVFormat.POSTGRESQL_TEXT.builder().setHeader(tableColumns.keySet().toArray(new String[0])).build();
Connection connection = mock(Connection.class);
DatabaseClient databaseClient = mock(DatabaseClient.class);
MutationWriter mutationWriter = new MutationWriter(CopyTransactionMode.ImplicitNonAtomic, connection, databaseClient, "numbers", tableColumns, /* indexedColumnsCount = */
1, format, false);
mutationWriter.addCopyData("1\t\"One\"\n2\t\"Two\"\n3\t\"Three\"\n4\t\"Four\"\n5\t\"Five\"\n".getBytes(StandardCharsets.UTF_8));
mutationWriter.close();
long updateCount = mutationWriter.call();
assertEquals(5L, updateCount);
// We expect two batches, because:
// 1. The commit limit is 80 bytes. That is for safety halved down to 40 bytes.
// 2. Each record is 20 bytes.
// 3. The first batch contains 20 + 20 bytes.
// 4. The second batch contains 28 bytes. (3 - 'Three')
// 5. The third batch contains 24 bytes. (4 - 'Four')
// 6. the fourth batch contains 24 bytes. (5 - 'Five')
verify(databaseClient, times(4)).write(anyIterable());
} finally {
System.getProperties().remove("copy_in_commit_limit");
}
}
use of com.google.cloud.spanner.connection.Connection in project pgadapter by GoogleCloudPlatform.
the class MutationWriterTest method testWriteMutations_FailsForLargeCommit.
@Test
public void testWriteMutations_FailsForLargeCommit() throws Exception {
System.setProperty("copy_in_commit_limit", "30");
try {
Map<String, TypeCode> tableColumns = ImmutableMap.of("number", TypeCode.INT64, "name", TypeCode.STRING);
CSVFormat format = CSVFormat.POSTGRESQL_TEXT.builder().setHeader(tableColumns.keySet().toArray(new String[0])).build();
Connection connection = mock(Connection.class);
MutationWriter mutationWriter = new MutationWriter(CopyTransactionMode.ImplicitAtomic, connection, "numbers", tableColumns, /* indexedColumnsCount = */
0, format, false);
// The calculated size of these mutations are:
// 1. 8 bytes for the INT64
// 2. 3 characters * 4 bytes per character = 12 bytes for STRING
// 3. Total: 20 bytes per record, 40 bytes for the entire batch.
mutationWriter.addCopyData("1\t\"One\"\n2\t\"Two\"\n".getBytes(StandardCharsets.UTF_8));
mutationWriter.close();
SpannerException exception = assertThrows(SpannerException.class, mutationWriter::call);
assertTrue(exception.getMessage().contains("Commit size: 40 has exceeded the limit: 30"));
verify(connection, never()).write(anyIterable());
} finally {
System.getProperties().remove("copy_in_commit_limit");
}
}
use of com.google.cloud.spanner.connection.Connection in project java-spanner-jdbc by googleapis.
the class JdbcPreparedStatementTest method testGetResultSetMetadata.
@Test
public void testGetResultSetMetadata() throws SQLException {
final String sql = "SELECT * FROM FOO";
Connection connection = mock(Connection.class);
ResultSet rs = ResultSets.forRows(Type.struct(StructField.of("ID", Type.int64()), StructField.of("NAME", Type.string()), StructField.of("AMOUNT", Type.float64()), dialect == Dialect.POSTGRESQL ? StructField.of("PERCENTAGE", Type.pgNumeric()) : StructField.of("PERCENTAGE", Type.numeric())), Collections.singletonList(Struct.newBuilder().set("ID").to(1L).set("NAME").to("foo").set("AMOUNT").to(Math.PI).set("PERCENTAGE").to(dialect == Dialect.POSTGRESQL ? Value.pgNumeric("1.23") : Value.numeric(new BigDecimal("1.23"))).build()));
when(connection.analyzeQuery(Statement.of(sql), QueryAnalyzeMode.PLAN)).thenReturn(rs);
try (JdbcPreparedStatement ps = new JdbcPreparedStatement(createMockConnection(connection), sql)) {
ResultSetMetaData metadata = ps.getMetaData();
assertEquals(4, metadata.getColumnCount());
assertEquals("ID", metadata.getColumnLabel(1));
assertEquals("NAME", metadata.getColumnLabel(2));
assertEquals("AMOUNT", metadata.getColumnLabel(3));
assertEquals("PERCENTAGE", metadata.getColumnLabel(4));
assertEquals(Types.BIGINT, metadata.getColumnType(1));
assertEquals(Types.NVARCHAR, metadata.getColumnType(2));
assertEquals(Types.DOUBLE, metadata.getColumnType(3));
assertEquals(Types.NUMERIC, metadata.getColumnType(4));
}
}
use of com.google.cloud.spanner.connection.Connection in project java-spanner-jdbc by googleapis.
the class JdbcPreparedStatementTest method testGetResultSetMetaDataForDml.
@Test
public void testGetResultSetMetaDataForDml() throws SQLException {
Connection connection = mock(Connection.class);
try (JdbcPreparedStatement ps = new JdbcPreparedStatement(createMockConnection(connection), "UPDATE FOO SET BAR=1 WHERE TRUE")) {
ResultSetMetaData metadata = ps.getMetaData();
assertEquals(0, metadata.getColumnCount());
}
}
use of com.google.cloud.spanner.connection.Connection in project java-spanner-jdbc by googleapis.
the class JdbcStatementTest method testInternalExecuteLargeUpdate.
@Test
public void testInternalExecuteLargeUpdate() throws SQLException {
JdbcConnection connection = mock(JdbcConnection.class);
when(connection.getDialect()).thenReturn(dialect);
Connection spannerConnection = mock(Connection.class);
when(connection.getSpannerConnection()).thenReturn(spannerConnection);
com.google.cloud.spanner.Statement updateStatement = com.google.cloud.spanner.Statement.of(UPDATE);
com.google.cloud.spanner.Statement largeUpdateStatement = com.google.cloud.spanner.Statement.of(LARGE_UPDATE);
when(spannerConnection.executeUpdate(updateStatement)).thenReturn(1L);
when(spannerConnection.executeUpdate(largeUpdateStatement)).thenReturn(Integer.MAX_VALUE + 1L);
try (JdbcStatement statement = new JdbcStatement(connection)) {
assertThat(statement.executeLargeUpdate(updateStatement)).isEqualTo(1);
assertThat(statement.executeLargeUpdate(largeUpdateStatement)).isEqualTo(Integer.MAX_VALUE + 1L);
}
}
Aggregations