use of org.apache.hadoop.hbase.client.AsyncAdmin in project hbase by apache.
the class CoprocClusterManager method exec.
@Override
protected Pair<Integer, String> exec(String hostname, ServiceType service, String... cmd) throws IOException {
if (!supportedServices.contains(service)) {
throw unsupportedServiceType(service);
}
// We only support actions vs. Master or Region Server processes. We're issuing those actions
// via the coprocessor that's running within those processes. Thus, there's no support for
// honoring the configured service user.
final String command = StringUtils.join(cmd, " ");
LOG.info("Executing remote command: {}, hostname:{}", command, hostname);
try (final AsyncConnection conn = ConnectionFactory.createAsyncConnection(getConf()).join()) {
final AsyncAdmin admin = conn.getAdmin();
final ShellExecRequest req = ShellExecRequest.newBuilder().setCommand(command).setAwaitResponse(false).build();
final ShellExecResponse resp;
switch(service) {
case HBASE_MASTER:
// What happens if the intended action was killing a backup master? Right now we have
// no `RestartBackupMasterAction` so it's probably fine.
resp = masterExec(admin, req);
break;
case HBASE_REGIONSERVER:
final ServerName targetHost = resolveRegionServerName(admin, hostname);
resp = regionServerExec(admin, req, targetHost);
break;
default:
throw new RuntimeException("should not happen");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Executed remote command: {}, exit code:{} , output:{}", command, resp.getExitCode(), resp.getStdout());
} else {
LOG.info("Executed remote command: {}, exit code:{}", command, resp.getExitCode());
}
return new Pair<>(resp.getExitCode(), resp.getStdout());
}
}
use of org.apache.hadoop.hbase.client.AsyncAdmin in project hbase by apache.
the class TestShellExecEndpointCoprocessor method testShellExecForeground.
private void testShellExecForeground(final Consumer<ShellExecRequest.Builder> consumer) {
final AsyncConnection conn = connectionRule.getConnection();
final AsyncAdmin admin = conn.getAdmin();
final String command = "echo -n \"hello world\"";
final ShellExecRequest.Builder builder = ShellExecRequest.newBuilder().setCommand(command);
consumer.accept(builder);
final ShellExecResponse resp = admin.<ShellExecService.Stub, ShellExecResponse>coprocessorService(ShellExecService::newStub, (stub, controller, callback) -> stub.shellExec(controller, builder.build(), callback)).join();
assertEquals(0, resp.getExitCode());
assertEquals("hello world", resp.getStdout());
}
use of org.apache.hadoop.hbase.client.AsyncAdmin in project hbase by apache.
the class TestServerCrashProcedureStuck method test.
@Test
public void test() throws Exception {
RegionServerThread rsThread = null;
for (RegionServerThread t : UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
if (!t.getRegionServer().getRegions(TABLE_NAME).isEmpty()) {
rsThread = t;
break;
}
}
HRegionServer rs = rsThread.getRegionServer();
RegionInfo hri = rs.getRegions(TABLE_NAME).get(0).getRegionInfo();
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
ProcedureExecutor<MasterProcedureEnv> executor = master.getMasterProcedureExecutor();
DummyRegionProcedure proc = new DummyRegionProcedure(executor.getEnvironment(), hri);
long procId = master.getMasterProcedureExecutor().submitProcedure(proc);
proc.waitUntilArrive();
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
AsyncAdmin admin = conn.getAdmin();
CompletableFuture<Void> future = admin.move(hri.getRegionName());
rs.abort("For testing!");
UTIL.waitFor(30000, () -> executor.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure).map(p -> (TransitRegionStateProcedure) p).anyMatch(p -> Bytes.equals(hri.getRegionName(), p.getRegion().getRegionName())));
proc.resume();
UTIL.waitFor(30000, () -> executor.isFinished(procId));
// see whether the move region procedure can finish properly
future.get(30, TimeUnit.SECONDS);
}
}
use of org.apache.hadoop.hbase.client.AsyncAdmin in project hbase by apache.
the class TestClientClusterMetrics method testAsyncClient.
@Test
public void testAsyncClient() throws Exception {
try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
AsyncAdmin asyncAdmin = asyncConnect.getAdmin();
CompletableFuture<ClusterMetrics> originFuture = asyncAdmin.getClusterMetrics();
CompletableFuture<ClusterMetrics> defaultsFuture = asyncAdmin.getClusterMetrics(EnumSet.allOf(Option.class));
ClusterMetrics origin = originFuture.get();
ClusterMetrics defaults = defaultsFuture.get();
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0);
Assert.assertEquals(origin.getBackupMasterNames().size(), defaults.getBackupMasterNames().size());
Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size());
Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount());
Assert.assertEquals(origin.getLiveServerMetrics().size(), defaults.getLiveServerMetrics().size());
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> {
RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount().get(tableName);
Assert.assertEquals(defaultRegionStatesCount, regionStatesCount);
}));
}
}
use of org.apache.hadoop.hbase.client.AsyncAdmin in project hbase by apache.
the class TestClearRegionBlockCache method testClearBlockCacheFromAsyncAdmin.
@Test
public void testClearBlockCacheFromAsyncAdmin() throws Exception {
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get()) {
AsyncAdmin admin = conn.getAdmin();
BlockCache blockCache1 = rs1.getBlockCache().get();
BlockCache blockCache2 = rs2.getBlockCache().get();
long initialBlockCount1 = blockCache1.getBlockCount();
long initialBlockCount2 = blockCache2.getBlockCount();
// scan will cause blocks to be added in BlockCache
scanAllRegionsForRS(rs1);
assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
scanAllRegionsForRS(rs2);
assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get();
assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
assertEquals(initialBlockCount1, blockCache1.getBlockCount());
assertEquals(initialBlockCount2, blockCache2.getBlockCount());
}
}
Aggregations