use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class AsyncHBaseAdmin method getTableDescriptor.
@Override
public CompletableFuture<HTableDescriptor> getTableDescriptor(TableName tableName) {
CompletableFuture<HTableDescriptor> future = new CompletableFuture<>();
this.<List<TableSchema>>newMasterCaller().action((controller, stub) -> this.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>>call(controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())).call().whenComplete((tableSchemas, error) -> {
if (error != null) {
future.completeExceptionally(error);
return;
}
if (!tableSchemas.isEmpty()) {
future.complete(ProtobufUtil.convertToHTableDesc(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}
});
return future;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestClientNoCluster method cycle.
/**
* Code for each 'client' to run.
*
* @param id
* @param c
* @param sharedConnection
* @throws IOException
*/
static void cycle(int id, final Configuration c, final Connection sharedConnection) throws IOException {
long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000);
long startTime = System.currentTimeMillis();
final int printInterval = 100000;
Random rd = new Random(id);
boolean get = c.getBoolean("hbase.test.do.gets", false);
TableName tableName = TableName.valueOf(BIG_USER_TABLE);
if (get) {
try (Table table = sharedConnection.getTable(tableName)) {
Stopwatch stopWatch = new Stopwatch();
stopWatch.start();
for (int i = 0; i < namespaceSpan; i++) {
byte[] b = format(rd.nextLong());
Get g = new Get(b);
table.get(g);
if (i % printInterval == 0) {
LOG.info("Get " + printInterval + "/" + stopWatch.elapsedMillis());
stopWatch.reset();
stopWatch.start();
}
}
LOG.info("Finished a cycle putting " + namespaceSpan + " in " + (System.currentTimeMillis() - startTime) + "ms");
}
} else {
try (BufferedMutator mutator = sharedConnection.getBufferedMutator(tableName)) {
Stopwatch stopWatch = new Stopwatch();
stopWatch.start();
for (int i = 0; i < namespaceSpan; i++) {
byte[] b = format(rd.nextLong());
Put p = new Put(b);
p.addColumn(HConstants.CATALOG_FAMILY, b, b);
mutator.mutate(p);
if (i % printInterval == 0) {
LOG.info("Put " + printInterval + "/" + stopWatch.elapsedMillis());
stopWatch.reset();
stopWatch.start();
}
}
LOG.info("Finished a cycle putting " + namespaceSpan + " in " + (System.currentTimeMillis() - startTime) + "ms");
}
}
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestSnapshotFromAdmin method testBackoffLogic.
/**
* Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
* passed from the server ensures the correct overall waiting for the snapshot to finish.
* @throws Exception
*/
@Test(timeout = 60000)
public void testBackoffLogic() throws Exception {
final int pauseTime = 100;
final int maxWaitTime = HConstants.RETRY_BACKOFF[HConstants.RETRY_BACKOFF.length - 1] * pauseTime;
final int numRetries = HConstants.RETRY_BACKOFF.length;
// calculate the wait time, if we just do straight backoff (ignoring the expected time from
// master)
long ignoreExpectedTime = 0;
for (int i = 0; i < HConstants.RETRY_BACKOFF.length; i++) {
ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
}
// the correct wait time, capping at the maxTime/tries + fudge room
final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time " + "- further testing won't prove anything.", time < ignoreExpectedTime);
// setup the mocks
ConnectionImplementation mockConnection = Mockito.mock(ConnectionImplementation.class);
Configuration conf = HBaseConfiguration.create();
// setup the conf to match the expected properties
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries);
conf.setLong("hbase.client.pause", pauseTime);
// mock the master admin to our mock
MasterKeepAliveConnection mockMaster = Mockito.mock(MasterKeepAliveConnection.class);
Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
Mockito.when(mockConnection.getKeepAliveMasterService()).thenReturn(mockMaster);
// we need a real retrying caller
RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf);
RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class);
Mockito.when(controllerFactory.newController()).thenReturn(Mockito.mock(HBaseRpcController.class));
Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory);
Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory);
// set the max wait time for the snapshot to complete
SnapshotResponse response = SnapshotResponse.newBuilder().setExpectedTimeout(maxWaitTime).build();
Mockito.when(mockMaster.snapshot((RpcController) Mockito.any(), Mockito.any(SnapshotRequest.class))).thenReturn(response);
// setup the response
IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
builder.setDone(false);
// first five times, we return false, last we get success
Mockito.when(mockMaster.isSnapshotDone((RpcController) Mockito.any(), Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(builder.build(), builder.build(), builder.build(), builder.build(), builder.build(), builder.setDone(true).build());
// setup the admin and run the test
Admin admin = new HBaseAdmin(mockConnection);
String snapshot = "snapshot";
final TableName table = TableName.valueOf(name.getMethodName());
// get start time
long start = System.currentTimeMillis();
admin.snapshot(snapshot, table);
long finish = System.currentTimeMillis();
long elapsed = (finish - start);
assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
admin.close();
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class MultiThreadedClientExample method run.
@Override
public int run(String[] args) throws Exception {
if (args.length < 1 || args.length > 2) {
System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]");
return -1;
}
final TableName tableName = TableName.valueOf(args[0]);
int numOperations = DEFAULT_NUM_OPERATIONS;
// the second arg is the number of operations to send.
if (args.length == 2) {
numOperations = Integer.parseInt(args[1]);
}
// Threads for the client only.
//
// We don't want to mix hbase and business logic.
//
ExecutorService service = new ForkJoinPool(threads * 2);
// Create two different connections showing how it's possible to
// separate different types of requests onto different connections
final Connection writeConnection = ConnectionFactory.createConnection(getConf(), service);
final Connection readConnection = ConnectionFactory.createConnection(getConf(), service);
// At this point the entire cache for the region locations is full.
// Only do this if the number of regions in a table is easy to fit into memory.
//
// If you are interacting with more than 25k regions on a client then it's probably not good
// to do this at all.
warmUpConnectionCache(readConnection, tableName);
warmUpConnectionCache(writeConnection, tableName);
List<Future<Boolean>> futures = new ArrayList<>(numOperations);
for (int i = 0; i < numOperations; i++) {
double r = ThreadLocalRandom.current().nextDouble();
Future<Boolean> f;
// These callables are meant to represent real work done by your application.
if (r < .30) {
f = internalPool.submit(new WriteExampleCallable(writeConnection, tableName));
} else if (r < .50) {
f = internalPool.submit(new SingleWriteExampleCallable(writeConnection, tableName));
} else {
f = internalPool.submit(new ReadExampleCallable(writeConnection, tableName));
}
futures.add(f);
}
// Wait a long time for all the reads/writes to complete
for (Future<Boolean> f : futures) {
f.get(10, TimeUnit.MINUTES);
}
// Clean up after our selves for cleanliness
internalPool.shutdownNow();
service.shutdownNow();
return 0;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class RSGroupInfoManagerImpl method flushConfigTable.
private synchronized Map<TableName, String> flushConfigTable(Map<String, RSGroupInfo> groupMap) throws IOException {
Map<TableName, String> newTableMap = Maps.newHashMap();
List<Mutation> mutations = Lists.newArrayList();
// populate deletes
for (String groupName : prevRSGroups) {
if (!groupMap.containsKey(groupName)) {
Delete d = new Delete(Bytes.toBytes(groupName));
mutations.add(d);
}
}
// populate puts
for (RSGroupInfo RSGroupInfo : groupMap.values()) {
RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
mutations.add(p);
for (TableName entry : RSGroupInfo.getTables()) {
newTableMap.put(entry, RSGroupInfo.getName());
}
}
if (mutations.size() > 0) {
multiMutate(mutations);
}
return newTableMap;
}
Aggregations