use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestShellExecEndpointCoprocessor method testShellExecForeground.
private void testShellExecForeground(final Consumer<ShellExecRequest.Builder> consumer) {
final AsyncConnection conn = connectionRule.getConnection();
final AsyncAdmin admin = conn.getAdmin();
final String command = "echo -n \"hello world\"";
final ShellExecRequest.Builder builder = ShellExecRequest.newBuilder().setCommand(command);
consumer.accept(builder);
final ShellExecResponse resp = admin.<ShellExecService.Stub, ShellExecResponse>coprocessorService(ShellExecService::newStub, (stub, controller, callback) -> stub.shellExec(controller, builder.build(), callback)).join();
assertEquals(0, resp.getExitCode());
assertEquals("hello world", resp.getStdout());
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestSplitMerge method test.
@Test
public void test() throws Exception {
TableName tableName = TableName.valueOf("SplitMerge");
byte[] family = Bytes.toBytes("CF");
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
UTIL.getAdmin().createTable(td, new byte[][] { Bytes.toBytes(1) });
UTIL.waitTableAvailable(tableName);
UTIL.getAdmin().split(tableName, Bytes.toBytes(2));
UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3;
}
@Override
public String explainFailure() throws Exception {
return "Split has not finished yet";
}
});
UTIL.waitUntilNoRegionsInTransition();
RegionInfo regionA = null;
RegionInfo regionB = null;
for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) {
if (region.getStartKey().length == 0) {
regionA = region;
} else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) {
regionB = region;
}
}
assertNotNull(regionA);
assertNotNull(regionB);
UTIL.getAdmin().mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false).get(30, TimeUnit.SECONDS);
assertEquals(2, UTIL.getAdmin().getRegions(tableName).size());
ServerName expected = UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName();
assertEquals(expected, UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).getServerName());
try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
assertEquals(expected, asyncConn.getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
}
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestServerCrashProcedureStuck method test.
@Test
public void test() throws Exception {
RegionServerThread rsThread = null;
for (RegionServerThread t : UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
if (!t.getRegionServer().getRegions(TABLE_NAME).isEmpty()) {
rsThread = t;
break;
}
}
HRegionServer rs = rsThread.getRegionServer();
RegionInfo hri = rs.getRegions(TABLE_NAME).get(0).getRegionInfo();
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
ProcedureExecutor<MasterProcedureEnv> executor = master.getMasterProcedureExecutor();
DummyRegionProcedure proc = new DummyRegionProcedure(executor.getEnvironment(), hri);
long procId = master.getMasterProcedureExecutor().submitProcedure(proc);
proc.waitUntilArrive();
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
AsyncAdmin admin = conn.getAdmin();
CompletableFuture<Void> future = admin.move(hri.getRegionName());
rs.abort("For testing!");
UTIL.waitFor(30000, () -> executor.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure).map(p -> (TransitRegionStateProcedure) p).anyMatch(p -> Bytes.equals(hri.getRegionName(), p.getRegion().getRegionName())));
proc.resume();
UTIL.waitFor(30000, () -> executor.isFinished(procId));
// see whether the move region procedure can finish properly
future.get(30, TimeUnit.SECONDS);
}
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class TestSyncReplicationMoreLogsInLocalCopyToRemote method testSplitLog.
@Test
public void testSplitLog() throws Exception {
UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
DualAsyncFSWALForTest wal = (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
wal.setRemoteBroken();
try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
AsyncTable<?> table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build();
try {
table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))).get();
fail("Should fail since the rs will crash and we will not retry");
} catch (ExecutionException e) {
// expected
LOG.info("Expected error:", e);
}
}
UTIL1.waitFor(60000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
return table.exists(new Get(Bytes.toBytes(0)));
}
}
@Override
public String explainFailure() throws Exception {
return "The row is still not available";
}
});
UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
// We should have copied the local log to remote, so we should be able to get the value
try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
assertEquals(0, Bytes.toInt(table.get(new Get(Bytes.toBytes(0))).getValue(CF, CQ)));
}
}
use of org.apache.hadoop.hbase.client.AsyncConnection in project hbase by apache.
the class ClearUserNamespacesAndTablesRule method before.
@Override
protected void before() throws Throwable {
final AsyncConnection connection = Objects.requireNonNull(connectionSupplier.get());
admin = connection.getAdmin();
clearTablesAndNamespaces().join();
}
Aggregations