use of com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient in project java-docs-samples by GoogleCloudPlatform.
the class CreateBackupTestIT method testCreateBackup.
@Test
public void testCreateBackup() throws Throwable {
String functionUrl = BASE_URL + "/createBackup";
String msg = String.format("{\"projectId\":\"%s\", \"instanceId\":\"%s\", \"tableId\":\"%s\", \"clusterId\":\"%s\"," + "\"expireHours\":%d}", projectId, INSTANCE_ID, TABLE_ID, CLUSTER_ID, 8);
String msgBase64 = Base64.getEncoder().encodeToString(msg.getBytes(StandardCharsets.UTF_8));
Map<String, String> msgMap = new HashMap<>();
msgMap.put("data", msgBase64);
Map<String, Map<String, String>> dataMap = new HashMap<>();
dataMap.put("data", msgMap);
String jsonStr = gson.toJson(dataMap);
HttpPost postRequest = new HttpPost(URI.create(functionUrl));
postRequest.setEntity(new StringEntity(jsonStr));
// The Functions Framework Maven plugin process takes time to start up
// Use resilience4j to retry the test HTTP request until the plugin responds
RetryRegistry registry = RetryRegistry.of(RetryConfig.custom().maxAttempts(8).retryExceptions(HttpHostConnectException.class).intervalFunction(IntervalFunction.ofExponentialBackoff(200, 2)).build());
Retry retry = registry.retry("my");
// Perform the request-retry process
CheckedRunnable retriableFunc = Retry.decorateCheckedRunnable(retry, () -> client.execute(postRequest));
retriableFunc.run();
// Wait 2 mins for the backup to be created.
TimeUnit.MINUTES.sleep(2);
// Check if backup exists
List<String> backups = new ArrayList<String>();
try (BigtableTableAdminClient tableAdmin = BigtableTableAdminClient.create(projectId, INSTANCE_ID)) {
backups = tableAdmin.listBackups(CLUSTER_ID);
} catch (IOException e) {
System.out.println("Unable to list backups: \n" + e.toString());
throw (e);
}
assertThat(backups.size()).isEqualTo(1);
String expectedBackupPrefix = TABLE_ID + "-backup-";
assertThat(backups.get(0).contains(expectedBackupPrefix));
}
use of com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient in project java-docs-samples by GoogleCloudPlatform.
the class FiltersTest method beforeClass.
@BeforeClass
public static void beforeClass() throws IOException {
projectId = requireEnv("GOOGLE_CLOUD_PROJECT");
instanceId = requireEnv(INSTANCE_ENV);
try (BigtableTableAdminClient adminClient = BigtableTableAdminClient.create(projectId, instanceId)) {
CreateTableRequest createTableRequest = CreateTableRequest.of(TABLE_ID).addFamily(COLUMN_FAMILY_NAME_STATS).addFamily(COLUMN_FAMILY_NAME_DATA);
adminClient.createTable(createTableRequest);
try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) {
BulkMutation bulkMutation = BulkMutation.create(TABLE_ID).add("phone#4c410523#20190501", Mutation.create().setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_cell".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_wifi".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, "os_build", TIMESTAMP_NANO, "PQ2A.190405.003").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_01gb", TIMESTAMP_MINUS_HR_NANO, "true").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_01gb", TIMESTAMP_NANO, "false").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_05gb", TIMESTAMP_NANO, "true")).add("phone#4c410523#20190502", Mutation.create().setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_cell".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_wifi".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, "os_build", TIMESTAMP_NANO, "PQ2A.190405.004").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_05gb", TIMESTAMP_NANO, "true")).add("phone#4c410523#20190505", Mutation.create().setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_cell".getBytes()), TIMESTAMP_NANO, 0).setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_wifi".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, "os_build", TIMESTAMP_NANO, "PQ2A.190406.000").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_05gb", TIMESTAMP_NANO, "true")).add("phone#5c10102#20190501", Mutation.create().setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_cell".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_wifi".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, "os_build", TIMESTAMP_NANO, "PQ2A.190401.002").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_10gb", TIMESTAMP_NANO, "true")).add("phone#5c10102#20190502", Mutation.create().setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_cell".getBytes()), TIMESTAMP_NANO, 1).setCell(COLUMN_FAMILY_NAME_STATS, ByteString.copyFrom("connected_wifi".getBytes()), TIMESTAMP_NANO, 0).setCell(COLUMN_FAMILY_NAME_STATS, "os_build", TIMESTAMP_NANO, "PQ2A.190406.000").setCell(COLUMN_FAMILY_NAME_DATA, "data_plan_10gb", TIMESTAMP_NANO, "true"));
dataClient.bulkMutateRows(bulkMutation);
}
} catch (Exception e) {
System.out.println("Error during beforeClass: \n" + e.toString());
throw (e);
}
}
use of com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient in project java-docs-samples by GoogleCloudPlatform.
the class MemcachedTest method afterClass.
@AfterClass
public static void afterClass() {
try (BigtableTableAdminClient adminClient = BigtableTableAdminClient.create(projectId, instanceId)) {
adminClient.deleteTable(TABLE_ID);
String[] dockerCommand = (String.format("docker stop %s", MEMCACHED_CONTAINER_NAME)).split(" ");
Process process = new ProcessBuilder(dockerCommand).start();
process.waitFor();
} catch (Exception e) {
System.out.println("Error during afterClass: \n" + e.toString());
}
}
use of com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient in project java-docs-samples by GoogleCloudPlatform.
the class CreateBackup method accept.
@Override
public void accept(PubSubMessage message, Context context) {
if (message != null && message.getData() != null) {
logger.info("Trigger event:" + message.getData());
try {
String payload = new String(Base64.getDecoder().decode(message.getData().getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8);
logger.info("Decoded payload:" + payload);
CreateBackupMessage cbMessage = mapper.readValue(payload, CreateBackupMessage.class);
logger.info("CreateBackup message:" + cbMessage.toString());
logger.info("Submitting the create backup request");
// Create an admin client
BigtableTableAdminSettings adminSettings = BigtableTableAdminSettings.newBuilder().setProjectId(cbMessage.getProjectId()).setInstanceId(cbMessage.getInstanceId()).build();
try (BigtableTableAdminClient adminClient = BigtableTableAdminClient.create(adminSettings)) {
CreateBackupRequest request = CreateBackupRequest.of(cbMessage.getClusterId(), buildBackupId(cbMessage.getTableId())).setSourceTableId(cbMessage.getTableId()).setExpireTime(buildExpireTime(cbMessage.getExpireHours()));
Backup backupDetails = adminClient.createBackup(request);
logger.info("Submitted backup request :" + backupDetails.getId() + ": that will expire at:" + backupDetails.getExpireTime());
} catch (IOException e) {
logger.log(Level.SEVERE, "Caught Exception creating backup:" + e.toString(), e);
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Caught Exception running the create backup function:" + e.toString(), e);
}
return;
}
}
use of com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient in project java-docs-samples by GoogleCloudPlatform.
the class BulkWrite method bulkWrite.
static void bulkWrite(BigtableOptions options) throws IOException, GeneralSecurityException {
BigtableTableAdminSettings adminSettings = BigtableTableAdminSettings.newBuilder().setProjectId(options.getProject()).setInstanceId(options.getBigtableInstanceId()).build();
BigtableTableAdminClient adminClient = BigtableTableAdminClient.create(adminSettings);
int clusterNodeCount = getClusterNodeCount(options.getProject(), options.getBigtableInstanceId());
List<String> newTableIds = getNewTableIds(adminClient, options.getBigtableSize());
// If the specified size of Bigtable is already met, don't run the pipeline.
if (newTableIds.isEmpty()) {
return;
}
long numRows = (long) ((TB_PER_TABLE * ONE_TB) / (MB_PER_ROW * ONE_MB));
long rate = clusterNodeCount * MB_PER_SEC / newTableIds.size();
String generateLabel = String.format("Generate %d rows at %dMB per second for %d tables", numRows, rate, newTableIds.size());
String mutationLabel = String.format("Create mutations that write %d MB to each row", MB_PER_ROW);
System.out.println(generateLabel);
System.out.println(mutationLabel);
Pipeline p = Pipeline.create(options);
PCollection<Mutation> mutations = p.apply(generateLabel, GenerateSequence.from(0).to(numRows).withRate(rate, Duration.standardSeconds(1))).apply(mutationLabel, ParDo.of(new CreateMutationFn()));
for (String tableId : newTableIds) {
mutations.apply(String.format("Write data to table %s", tableId), CloudBigtableIO.writeToTable(new CloudBigtableTableConfiguration.Builder().withProjectId(options.getProject()).withInstanceId(options.getBigtableInstanceId()).withTableId(tableId).build()));
}
p.run();
}
Aggregations