use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestClientClusterMetrics method testAsyncClient.
@Test
public void testAsyncClient() throws Exception {
try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
AsyncAdmin asyncAdmin = asyncConnect.getAdmin();
CompletableFuture<ClusterMetrics> originFuture = asyncAdmin.getClusterMetrics();
CompletableFuture<ClusterMetrics> defaultsFuture = asyncAdmin.getClusterMetrics(EnumSet.allOf(Option.class));
ClusterMetrics origin = originFuture.get();
ClusterMetrics defaults = defaultsFuture.get();
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0);
Assert.assertEquals(origin.getBackupMasterNames().size(), defaults.getBackupMasterNames().size());
Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size());
Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount());
Assert.assertEquals(origin.getLiveServerMetrics().size(), defaults.getLiveServerMetrics().size());
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> {
RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount().get(tableName);
Assert.assertEquals(defaultRegionStatesCount, regionStatesCount);
}));
}
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestClearRegionBlockCache method testClearBlockCacheFromAsyncAdmin.
@Test
public void testClearBlockCacheFromAsyncAdmin() throws Exception {
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get()) {
AsyncAdmin admin = conn.getAdmin();
BlockCache blockCache1 = rs1.getBlockCache().get();
BlockCache blockCache2 = rs2.getBlockCache().get();
long initialBlockCount1 = blockCache1.getBlockCount();
long initialBlockCount2 = blockCache2.getBlockCount();
// scan will cause blocks to be added in BlockCache
scanAllRegionsForRS(rs1);
assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
scanAllRegionsForRS(rs2);
assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get();
assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
assertEquals(initialBlockCount1, blockCache1.getBlockCount());
assertEquals(initialBlockCount2, blockCache2.getBlockCount());
}
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestSyncReplicationMoreLogsInLocalGiveUpSplitting method testSplitLog.
@Test
public void testSplitLog() throws Exception {
UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
UTIL2.getAdmin().disableReplicationPeer(PEER_ID);
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0)));
}
HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
DualAsyncFSWALForTest wal = (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
wal.setRemoteBroken();
wal.suspendLogRoll();
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
AsyncTable<?> table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).setWriteRpcTimeout(5, TimeUnit.SECONDS).build();
try {
table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1))).get();
fail("Should fail since the rs will hang and we will get a rpc timeout");
} catch (ExecutionException e) {
// expected
LOG.info("Expected error:", e);
}
}
wal.waitUntilArrive();
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
wal.resumeLogRoll();
try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
assertEquals(0, Bytes.toInt(table.get(new Get(Bytes.toBytes(0))).getValue(CF, CQ)));
// we failed to write this entry to remote so it should not exist
assertFalse(table.exists(new Get(Bytes.toBytes(1))));
}
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
// stand by state can not be read from client.
try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
try {
table.exists(new Get(Bytes.toBytes(0)));
} catch (DoNotRetryIOException | RetriesExhaustedException e) {
// expected
assertThat(e.getMessage(), containsString("STANDBY"));
}
}
HRegion region = UTIL1.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0);
// we give up splitting the whole wal file so this record will also be gone.
assertTrue(region.get(new Get(Bytes.toBytes(0))).isEmpty());
UTIL2.getAdmin().enableReplicationPeer(PEER_ID);
// finally it should be replicated back
waitUntilReplicationDone(UTIL1, 1);
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestShellExecEndpointCoprocessor method testShellExecBackground.
@Test
public void testShellExecBackground() throws IOException {
final AsyncConnection conn = connectionRule.getConnection();
final AsyncAdmin admin = conn.getAdmin();
final File testDataDir = ensureTestDataDirExists(miniClusterRule.getTestingUtility());
final File testFile = new File(testDataDir, "shell_exec_background.txt");
assertTrue(testFile.createNewFile());
assertEquals(0, testFile.length());
final String command = "echo \"hello world\" >> " + testFile.getAbsolutePath();
final ShellExecRequest req = ShellExecRequest.newBuilder().setCommand(command).setAwaitResponse(false).build();
final ShellExecResponse resp = admin.<ShellExecService.Stub, ShellExecResponse>coprocessorService(ShellExecService::newStub, (stub, controller, callback) -> stub.shellExec(controller, req, callback)).join();
assertFalse("the response from a background task should have no exit code", resp.hasExitCode());
assertFalse("the response from a background task should have no stdout", resp.hasStdout());
assertFalse("the response from a background task should have no stderr", resp.hasStderr());
Waiter.waitFor(conn.getConfiguration(), 5_000, () -> testFile.length() > 0);
final String content = new String(Files.readAllBytes(testFile.toPath())).trim();
assertEquals("hello world", content);
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestSyncReplicationActive method testActive.
@Test
public void testActive() throws Exception {
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
// confirm that peer with state A will reject replication request.
verifyReplicationRequestRejection(UTIL1, true);
verifyReplicationRequestRejection(UTIL2, false);
UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
write(UTIL1, 0, 100);
Thread.sleep(2000);
// peer is disabled so no data have been replicated
verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
// Ensure that there's no cluster id in remote log entries.
verifyNoClusterIdInRemoteLog(UTIL2, REMOTE_WAL_DIR2, PEER_ID);
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
// confirm that peer with state DA will reject replication request.
verifyReplicationRequestRejection(UTIL2, true);
// confirm that the data is there after we convert the peer to DA
verify(UTIL2, 0, 100);
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
AsyncTable<?> table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build();
CompletableFuture<Void> future = table.put(new Put(Bytes.toBytes(1000)).addColumn(CF, CQ, Bytes.toBytes(1000)));
Thread.sleep(2000);
// should hang on rolling
assertFalse(future.isDone());
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
try {
future.get();
fail("should fail because of the wal is closing");
} catch (ExecutionException e) {
// expected
assertThat(e.getCause().getMessage(), containsString("only marker edit is allowed"));
}
}
// confirm that the data has not been persisted
HRegion region = UTIL1.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0);
assertTrue(region.get(new Get(Bytes.toBytes(1000))).isEmpty());
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
writeAndVerifyReplication(UTIL2, UTIL1, 100, 200);
// shutdown the cluster completely
UTIL1.shutdownMiniCluster();
// confirm that we can convert to DA even if the remote slave cluster is down
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
// confirm that peer with state DA will reject replication request.
verifyReplicationRequestRejection(UTIL2, true);
write(UTIL2, 200, 300);
}
Aggregations