use of com.google.cloud.spanner.BatchClient in project java-docs-samples by GoogleCloudPlatform.
the class BatchSample method main.
/**
* This example showcases how to create a batch client, partition a query, and concurrently read
* from multiple partitions.
*/
public static void main(String[] args) throws InterruptedException {
if (args.length != 2) {
System.err.println("Usage: BatchSample <instance_id> <database_id>");
return;
}
/*
* CREATE TABLE Singers (
* SingerId INT64 NOT NULL,
* FirstName STRING(1024),
* LastName STRING(1024),
* SingerInfo BYTES(MAX),
* ) PRIMARY KEY (SingerId);
*/
String instanceId = args[0];
String databaseId = args[1];
SpannerOptions options = SpannerOptions.newBuilder().build();
Spanner spanner = options.getService();
// [START spanner_batch_client]
int numThreads = Runtime.getRuntime().availableProcessors();
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
// Statistics
int totalPartitions;
AtomicInteger totalRecords = new AtomicInteger(0);
try {
BatchClient batchClient = spanner.getBatchClient(DatabaseId.of(options.getProjectId(), instanceId, databaseId));
final BatchReadOnlyTransaction txn = batchClient.batchReadOnlyTransaction(TimestampBound.strong());
// A Partition object is serializable and can be used from a different process.
List<Partition> partitions = txn.partitionQuery(PartitionOptions.getDefaultInstance(), Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"));
totalPartitions = partitions.size();
for (final Partition p : partitions) {
executor.execute(() -> {
try (ResultSet results = txn.execute(p)) {
while (results.next()) {
long singerId = results.getLong(0);
String firstName = results.getString(1);
String lastName = results.getString(2);
System.out.println("[" + singerId + "] " + firstName + " " + lastName);
totalRecords.getAndIncrement();
}
}
});
}
} finally {
executor.shutdown();
executor.awaitTermination(1, TimeUnit.HOURS);
spanner.close();
}
double avgRecordsPerPartition = 0.0;
if (totalPartitions != 0) {
avgRecordsPerPartition = (double) totalRecords.get() / totalPartitions;
}
System.out.println("totalPartitions=" + totalPartitions);
System.out.println("totalRecords=" + totalRecords);
System.out.println("avgRecordsPerPartition=" + avgRecordsPerPartition);
// [END spanner_batch_client]
}
use of com.google.cloud.spanner.BatchClient in project google-cloud-java by GoogleCloudPlatform.
the class SpannerSnippets method getBatchClient.
BatchClient getBatchClient() {
// [START get_batch_client]
SpannerOptions options = SpannerOptions.newBuilder().build();
Spanner spanner = options.getService();
final String project = "test-project";
final String instance = "test-instance";
final String database = "example-db";
DatabaseId db = DatabaseId.of(project, instance, database);
BatchClient batchClient = spanner.getBatchClient(db);
return batchClient;
}
use of com.google.cloud.spanner.BatchClient in project beam by apache.
the class SpannerAccessor method createAndConnect.
private static SpannerAccessor createAndConnect(SpannerConfig spannerConfig) {
SpannerOptions.Builder builder = SpannerOptions.newBuilder();
// Set retryable codes for all API methods
if (spannerConfig.getRetryableCodes() != null) {
builder.getSpannerStubSettingsBuilder().applyToAllUnaryMethods(input -> {
input.setRetryableCodes(spannerConfig.getRetryableCodes());
return null;
});
builder.getSpannerStubSettingsBuilder().executeStreamingSqlSettings().setRetryableCodes(spannerConfig.getRetryableCodes());
}
// Set commit retry settings
UnaryCallSettings.Builder<CommitRequest, CommitResponse> commitSettings = builder.getSpannerStubSettingsBuilder().commitSettings();
ValueProvider<Duration> commitDeadline = spannerConfig.getCommitDeadline();
if (spannerConfig.getCommitRetrySettings() != null) {
commitSettings.setRetrySettings(spannerConfig.getCommitRetrySettings());
} else if (commitDeadline != null && commitDeadline.get().getMillis() > 0) {
// Set the GRPC deadline on the Commit API call.
RetrySettings.Builder commitRetrySettingsBuilder = commitSettings.getRetrySettings().toBuilder();
commitSettings.setRetrySettings(commitRetrySettingsBuilder.setTotalTimeout(org.threeten.bp.Duration.ofMillis(commitDeadline.get().getMillis())).setMaxRpcTimeout(org.threeten.bp.Duration.ofMillis(commitDeadline.get().getMillis())).setInitialRpcTimeout(org.threeten.bp.Duration.ofMillis(commitDeadline.get().getMillis())).build());
}
// Set execute streaming sql retry settings
ServerStreamingCallSettings.Builder<ExecuteSqlRequest, PartialResultSet> executeStreamingSqlSettings = builder.getSpannerStubSettingsBuilder().executeStreamingSqlSettings();
if (spannerConfig.getExecuteStreamingSqlRetrySettings() != null) {
executeStreamingSqlSettings.setRetrySettings(spannerConfig.getExecuteStreamingSqlRetrySettings());
} else {
// Setting the timeout for streaming read to 2 hours. This is 1 hour by default
// after BEAM 2.20.
RetrySettings.Builder executeSqlStreamingRetrySettings = executeStreamingSqlSettings.getRetrySettings().toBuilder();
executeStreamingSqlSettings.setRetrySettings(executeSqlStreamingRetrySettings.setInitialRpcTimeout(org.threeten.bp.Duration.ofMinutes(120)).setMaxRpcTimeout(org.threeten.bp.Duration.ofMinutes(120)).setTotalTimeout(org.threeten.bp.Duration.ofMinutes(120)).build());
}
ValueProvider<String> projectId = spannerConfig.getProjectId();
if (projectId != null) {
builder.setProjectId(projectId.get());
}
ServiceFactory<Spanner, SpannerOptions> serviceFactory = spannerConfig.getServiceFactory();
if (serviceFactory != null) {
builder.setServiceFactory(serviceFactory);
}
ValueProvider<String> host = spannerConfig.getHost();
if (host != null) {
builder.setHost(host.get());
}
ValueProvider<String> emulatorHost = spannerConfig.getEmulatorHost();
if (emulatorHost != null) {
builder.setEmulatorHost(emulatorHost.get());
if (spannerConfig.getIsLocalChannelProvider() != null && spannerConfig.getIsLocalChannelProvider().get()) {
builder.setChannelProvider(LocalChannelProvider.create(emulatorHost.get()));
}
builder.setCredentials(NoCredentials.getInstance());
}
String userAgentString = USER_AGENT_PREFIX + "/" + ReleaseInfo.getReleaseInfo().getVersion();
builder.setHeaderProvider(FixedHeaderProvider.create("user-agent", userAgentString));
SpannerOptions options = builder.build();
Spanner spanner = options.getService();
String instanceId = spannerConfig.getInstanceId().get();
String databaseId = spannerConfig.getDatabaseId().get();
DatabaseClient databaseClient = spanner.getDatabaseClient(DatabaseId.of(options.getProjectId(), instanceId, databaseId));
BatchClient batchClient = spanner.getBatchClient(DatabaseId.of(options.getProjectId(), instanceId, databaseId));
DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient();
return new SpannerAccessor(spanner, databaseClient, databaseAdminClient, batchClient, spannerConfig);
}
use of com.google.cloud.spanner.BatchClient in project spanner-jdbc by olavloite.
the class BatchReadOnlyTest method testExecuteBatchReadOnly.
@Test
public void testExecuteBatchReadOnly() throws SQLException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
for (int testRun = 0; testRun < 2; testRun++) {
final int numberOfPartitions = 6;
BatchClient batchClient = mock(BatchClient.class);
BatchReadOnlyTransaction tx = mock(BatchReadOnlyTransaction.class);
List<Partition> partitions = new ArrayList<>(numberOfPartitions);
for (int i = 0; i < numberOfPartitions; i++) partitions.add(mock(Partition.class));
when(tx.partitionQuery(any(), any())).then(new Returns(partitions));
when(batchClient.batchReadOnlyTransaction(TimestampBound.strong())).then(new Returns(tx));
Field field = CloudSpannerTransaction.class.getDeclaredField("batchClient");
field.setAccessible(true);
field.set(connection.getTransaction(), batchClient);
connection.setBatchReadOnly(true);
Statement statement;
if (testRun % 2 == 0) {
statement = connection.createStatement();
assertTrue(statement.execute(SELECT_ALL_FROM_FOO));
} else {
PreparedStatement ps = connection.prepareStatement(SELECT_ALL_FROM_FOO);
assertTrue(ps.execute());
statement = ps;
}
List<ResultSet> resultSets = new ArrayList<>();
do {
resultSets.add(statement.getResultSet());
} while (statement.getMoreResults());
assertEquals(numberOfPartitions, resultSets.size());
}
}
Aggregations