use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class MetaTableAccessor method getRegionLocation.
/**
* Returns the HRegionLocation from meta for the given region
* @param connection connection we're using
* @param regionInfo region information
* @return HRegionLocation for the given region
* @throws IOException
*/
public static HRegionLocation getRegionLocation(Connection connection, HRegionInfo regionInfo) throws IOException {
byte[] row = getMetaKeyForRegion(regionInfo);
Get get = new Get(row);
get.addFamily(HConstants.CATALOG_FAMILY);
Result r = get(getMetaHTable(connection), get);
return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class AsyncMetaTableAccessor method getRegion.
public static CompletableFuture<Pair<HRegionInfo, ServerName>> getRegion(RawAsyncTable metaTable, byte[] regionName) {
CompletableFuture<Pair<HRegionInfo, ServerName>> future = new CompletableFuture<>();
byte[] row = regionName;
HRegionInfo parsedInfo = null;
try {
parsedInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName);
row = MetaTableAccessor.getMetaKeyForRegion(parsedInfo);
} catch (Exception parseEx) {
// Ignore if regionName is a encoded region name.
}
final HRegionInfo finalHRI = parsedInfo;
metaTable.get(new Get(row).addFamily(HConstants.CATALOG_FAMILY)).whenComplete((r, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
HRegionLocation hrl = locations == null ? null : locations.getRegionLocation(finalHRI == null ? 0 : finalHRI.getReplicaId());
if (hrl == null) {
future.complete(null);
} else {
future.complete(new Pair<>(hrl.getRegionInfo(), hrl.getServerName()));
}
});
return future;
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class AsyncMetaTableAccessor method getTableState.
public static CompletableFuture<Optional<TableState>> getTableState(RawAsyncTable metaTable, TableName tableName) {
CompletableFuture<Optional<TableState>> future = new CompletableFuture<>();
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
long time = EnvironmentEdgeManager.currentTime();
try {
get.setTimeRange(0, time);
metaTable.get(get).whenComplete((result, error) -> {
if (error != null) {
future.completeExceptionally(error);
return;
}
try {
future.complete(getTableState(result));
} catch (IOException e) {
future.completeExceptionally(e);
}
});
} catch (IOException ioe) {
future.completeExceptionally(ioe);
}
return future;
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestRowProcessorEndpoint method testMultipleRows.
@Test
public void testMultipleRows() throws Throwable {
prepareTestData();
failures.set(0);
int numThreads = 100;
concurrentExec(new SwapRowsRunner(), numThreads);
LOG.debug("row keyvalues:" + stringifyKvs(table.get(new Get(ROW)).listCells()));
LOG.debug("row2 keyvalues:" + stringifyKvs(table.get(new Get(ROW2)).listCells()));
int failureNumber = failures.get();
if (failureNumber > 0) {
LOG.debug("We failed " + failureNumber + " times during test");
}
if (!swapped) {
assertEquals(rowSize, table.get(new Get(ROW)).listCells().size());
assertEquals(row2Size, table.get(new Get(ROW2)).listCells().size());
} else {
assertEquals(rowSize, table.get(new Get(ROW2)).listCells().size());
assertEquals(row2Size, table.get(new Get(ROW)).listCells().size());
}
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestRowProcessorEndpoint method testReadModifyWrite.
@Test
public void testReadModifyWrite() throws Throwable {
prepareTestData();
failures.set(0);
int numThreads = 100;
concurrentExec(new IncrementRunner(), numThreads);
Get get = new Get(ROW);
LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells()));
int finalCounter = incrementCounter(table);
int failureNumber = failures.get();
if (failureNumber > 0) {
LOG.debug("We failed " + failureNumber + " times during test");
}
assertEquals(numThreads + 1 - failureNumber, finalCounter);
}
Aggregations