use of com.google.cloud.bigtable.data.v2.models.RowMutationEntry in project java-bigtable-hbase by googleapis.
the class TestDataClientVeneerApi method testCreateBulkMutation.
@Test
public void testCreateBulkMutation() throws Exception {
RowMutationEntry entry = RowMutationEntry.create(ROW_KEY);
when(mockDataClient.newBulkMutationBatcher(TABLE_ID)).thenReturn(mockMutationBatcher);
when(mockMutationBatcher.add(entry)).thenReturn(ApiFutures.<Void>immediateFuture(null));
BulkMutationWrapper mutationWrapper = dataClientWrapper.createBulkMutation(TABLE_ID);
mutationWrapper.add(entry).get();
verify(mockDataClient).newBulkMutationBatcher(TABLE_ID);
verify(mockMutationBatcher).add(entry);
}
use of com.google.cloud.bigtable.data.v2.models.RowMutationEntry in project java-bigtable by googleapis.
the class EnhancedBigtableStubTest method testCallContextPropagatedInMutationBatcher.
@Test
public void testCallContextPropagatedInMutationBatcher() throws IOException, InterruptedException, ExecutionException {
EnhancedBigtableStubSettings settings = defaultSettings.toBuilder().setRefreshingChannel(true).setPrimedTableIds("table1", "table2").build();
try (EnhancedBigtableStub stub = EnhancedBigtableStub.create(settings)) {
// clear the previous contexts
contextInterceptor.contexts.clear();
// Override the timeout
GrpcCallContext clientCtx = GrpcCallContext.createDefault().withTimeout(Duration.ofMinutes(10));
// Send a batch
try (Batcher<RowMutationEntry, Void> batcher = stub.newMutateRowsBatcher("table1", clientCtx)) {
batcher.add(RowMutationEntry.create("key").deleteRow()).get();
}
// Ensure that the server got the overriden deadline
Context serverCtx = contextInterceptor.contexts.poll();
assertThat(serverCtx).isNotNull();
assertThat(serverCtx.getDeadline()).isAtLeast(Deadline.after(8, TimeUnit.MINUTES));
}
}
use of com.google.cloud.bigtable.data.v2.models.RowMutationEntry in project java-bigtable by googleapis.
the class MutateRowsBatchingDescriptorTest method requestBuilderTest.
@Test
public void requestBuilderTest() {
MutateRowsBatchingDescriptor underTest = new MutateRowsBatchingDescriptor();
long timestamp = 10_000L;
BulkMutation bulkMutation = BulkMutation.create("fake-table");
BatchingRequestBuilder<RowMutationEntry, BulkMutation> requestBuilder = underTest.newRequestBuilder(bulkMutation);
requestBuilder.add(RowMutationEntry.create(ROW_KEY).setCell(FAMILY, QUALIFIER, timestamp, VALUE));
requestBuilder.add(RowMutationEntry.create("rowKey-2").setCell("family-2", "q", 20_000L, "some-value"));
BulkMutation actualBulkMutation = requestBuilder.build();
assertThat(actualBulkMutation.toProto(requestContext)).isEqualTo(BulkMutation.create("fake-table").add(ROW_KEY, Mutation.create().setCell(FAMILY, QUALIFIER, timestamp, VALUE)).add("rowKey-2", Mutation.create().setCell("family-2", "q", 20_000L, "some-value")).toProto(requestContext));
}
use of com.google.cloud.bigtable.data.v2.models.RowMutationEntry in project java-bigtable by googleapis.
the class BulkMutateIT method test.
@Test(timeout = 60 * 1000)
public void test() throws IOException, InterruptedException {
BigtableDataSettings settings = testEnvRule.env().getDataClientSettings();
String rowPrefix = UUID.randomUUID().toString();
// Set target latency really low so it'll trigger adjusting thresholds
BigtableDataSettings.Builder builder = settings.toBuilder().enableBatchMutationLatencyBasedThrottling(2L);
try (BigtableDataClient client = BigtableDataClient.create(builder.build());
BatcherImpl<RowMutationEntry, Void, BulkMutation, Void> batcher = (BatcherImpl<RowMutationEntry, Void, BulkMutation, Void>) client.newBulkMutationBatcher(testEnvRule.env().getTableId())) {
FlowControlEventStats events = batcher.getFlowController().getFlowControlEventStats();
long initialThreashold = Objects.requireNonNull(batcher.getFlowController().getCurrentElementCountLimit());
assertThat(batcher.getFlowController().getCurrentElementCountLimit()).isNotEqualTo(batcher.getFlowController().getMinElementCountLimit());
assertThat(batcher.getFlowController().getCurrentElementCountLimit()).isNotEqualTo(batcher.getFlowController().getMaxElementCountLimit());
String familyId = testEnvRule.env().getFamilyId();
long initial = batcher.getFlowController().getCurrentElementCountLimit();
for (long i = 0; i < initial * 3; i++) {
String key = rowPrefix + "test-key" + i;
batcher.add(RowMutationEntry.create(key).setCell(familyId, "qualifier", i));
}
batcher.flush();
assertThat(events.getLastFlowControlEvent()).isNotNull();
// Verify that the threshold is adjusted
assertThat(batcher.getFlowController().getCurrentElementCountLimit()).isNotEqualTo(initialThreashold);
// Query a key to make sure the write succeeded
Row row = testEnvRule.env().getDataClient().readRowsCallable().first().call(Query.create(testEnvRule.env().getTableId()).rowKey(rowPrefix + "test-key" + initial));
assertThat(row.getCells()).hasSize(1);
}
}
use of com.google.cloud.bigtable.data.v2.models.RowMutationEntry in project java-bigtable by googleapis.
the class BigtableDataClientTest method proxyNewBulkMutationBatcherTest.
@Test
public void proxyNewBulkMutationBatcherTest() {
Mockito.when(mockStub.newMutateRowsBatcher(Mockito.any(String.class), Mockito.any())).thenReturn(mockBulkMutationBatcher);
ApiFuture<Void> expectedResponse = ApiFutures.immediateFuture(null);
Batcher<RowMutationEntry, Void> batcher = bigtableDataClient.newBulkMutationBatcher("fake-table");
RowMutationEntry request = RowMutationEntry.create("some-key").setCell("some-family", "fake-qualifier", "fake-value");
Mockito.when(mockBulkMutationBatcher.add(request)).thenReturn(expectedResponse);
ApiFuture<Void> actualRes = batcher.add(request);
assertThat(actualRes).isSameInstanceAs(expectedResponse);
Mockito.verify(mockStub).newMutateRowsBatcher(Mockito.any(String.class), Mockito.any());
}
Aggregations