use of org.apache.hadoop.hbase.client.AsyncTable in project hbase by apache.
the class TestSyncReplicationMoreLogsInLocalCopyToRemote method testSplitLog.
@Test
public void testSplitLog() throws Exception {
UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
DualAsyncFSWALForTest wal = (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
wal.setRemoteBroken();
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
AsyncTable<?> table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build();
try {
table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))).get();
fail("Should fail since the rs will crash and we will not retry");
} catch (ExecutionException e) {
// expected
LOG.info("Expected error:", e);
}
}
UTIL1.waitFor(60000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
return table.exists(new Get(Bytes.toBytes(0)));
}
}
@Override
public String explainFailure() throws Exception {
return "The row is still not available";
}
});
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
// We should have copied the local log to remote, so we should be able to get the value
try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
assertEquals(0, Bytes.toInt(table.get(new Get(Bytes.toBytes(0))).getValue(CF, CQ)));
}
}
use of org.apache.hadoop.hbase.client.AsyncTable in project hbase by apache.
the class RSGroupInfoManagerImpl method multiMutate.
private void multiMutate(List<Mutation> mutations) throws IOException {
MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
builder.addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multiMutate doesn't support " + mutation.getClass().getName());
}
}
MutateRowsRequest request = builder.build();
AsyncTable<?> table = conn.getTable(RSGROUP_TABLE_NAME);
LOG.debug("Multimutating {} with {} mutations", RSGROUP_TABLE_NAME, mutations.size());
FutureUtils.get(table.<MultiRowMutationService, MutateRowsResponse>coprocessorService(MultiRowMutationService::newStub, (stub, controller, done) -> stub.mutateRows(controller, request, done), ROW_KEY));
LOG.info("Multimutating {} with {} mutations done", RSGROUP_TABLE_NAME, mutations.size());
}
use of org.apache.hadoop.hbase.client.AsyncTable in project hbase by apache.
the class TestSyncReplicationMoreLogsInLocalGiveUpSplitting method testSplitLog.
@Test
public void testSplitLog() throws Exception {
UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
UTIL2.getAdmin().disableReplicationPeer(PEER_ID);
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0)));
}
HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
DualAsyncFSWALForTest wal = (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
wal.setRemoteBroken();
wal.suspendLogRoll();
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
AsyncTable<?> table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).setWriteRpcTimeout(5, TimeUnit.SECONDS).build();
try {
table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1))).get();
fail("Should fail since the rs will hang and we will get a rpc timeout");
} catch (ExecutionException e) {
// expected
LOG.info("Expected error:", e);
}
}
wal.waitUntilArrive();
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
wal.resumeLogRoll();
try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
assertEquals(0, Bytes.toInt(table.get(new Get(Bytes.toBytes(0))).getValue(CF, CQ)));
// we failed to write this entry to remote so it should not exist
assertFalse(table.exists(new Get(Bytes.toBytes(1))));
}
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
// stand by state can not be read from client.
try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
try {
table.exists(new Get(Bytes.toBytes(0)));
} catch (DoNotRetryIOException | RetriesExhaustedException e) {
// expected
assertThat(e.getMessage(), containsString("STANDBY"));
}
}
HRegion region = UTIL1.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0);
// we give up splitting the whole wal file so this record will also be gone.
assertTrue(region.get(new Get(Bytes.toBytes(0))).isEmpty());
UTIL2.getAdmin().enableReplicationPeer(PEER_ID);
// finally it should be replicated back
waitUntilReplicationDone(UTIL1, 1);
}
use of org.apache.hadoop.hbase.client.AsyncTable in project hbase by apache.
the class AsyncAggregationClient method std.
public static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<Double> std(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
CompletableFuture<Double> future = new CompletableFuture<>();
AggregateRequest req;
try {
req = validateArgAndGetPB(scan, ci, false);
} catch (IOException e) {
future.completeExceptionally(e);
return future;
}
AbstractAggregationCallback<Double> callback = new AbstractAggregationCallback<Double>(future) {
private S sum;
private S sumSq;
private long count;
@Override
protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
if (resp.getFirstPartCount() > 0) {
sum = ci.add(sum, getPromotedValueFromProto(ci, resp, 0));
sumSq = ci.add(sumSq, getPromotedValueFromProto(ci, resp, 1));
count += resp.getSecondPart().asReadOnlyByteBuffer().getLong();
}
}
@Override
protected Double getFinalResult() {
double avg = ci.divideForAvg(sum, count);
double avgSq = ci.divideForAvg(sumSq, count);
return Math.sqrt(avgSq - avg * avg);
}
};
table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getStd(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
return future;
}
use of org.apache.hadoop.hbase.client.AsyncTable in project hbase by apache.
the class AsyncAggregationClient method rowCount.
public static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<Long> rowCount(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
CompletableFuture<Long> future = new CompletableFuture<>();
AggregateRequest req;
try {
req = validateArgAndGetPB(scan, ci, true);
} catch (IOException e) {
future.completeExceptionally(e);
return future;
}
AbstractAggregationCallback<Long> callback = new AbstractAggregationCallback<Long>(future) {
private long count;
@Override
protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
count += resp.getFirstPart(0).asReadOnlyByteBuffer().getLong();
}
@Override
protected Long getFinalResult() {
return count;
}
};
table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getRowNum(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
return future;
}
Aggregations