use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestCoprocessorEndpoint method testCoprocessorServiceNullResponse.
@Test
public void testCoprocessorServiceNullResponse() throws Throwable {
Table table = util.getConnection().getTable(TEST_TABLE);
List<HRegionLocation> regions;
try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) {
regions = rl.getAllRegionLocations();
}
final TestProtos.EchoRequestProto request = TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build();
try {
// scan: for all regions
final RpcController controller = new ServerRpcController();
// test that null results are supported
Map<byte[], String> results = table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], ROWS[ROWS.length - 1], new Batch.Call<TestRpcServiceProtos.TestProtobufRpcProto, String>() {
public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException {
CoprocessorRpcUtils.BlockingRpcCallback<TestProtos.EchoResponseProto> callback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
instance.echo(controller, request, callback);
TestProtos.EchoResponseProto response = callback.get();
LOG.debug("Batch.Call got result " + response);
return null;
}
});
for (Map.Entry<byte[], String> e : results.entrySet()) {
LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey()));
}
assertEquals(3, results.size());
for (HRegionLocation region : regions) {
RegionInfo info = region.getRegion();
LOG.info("Region info is " + info.getRegionNameAsString());
assertTrue(results.containsKey(info.getRegionName()));
assertNull(results.get(info.getRegionName()));
}
} finally {
table.close();
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestZstdDictionarySplitMerge method test.
@Test
public void test() throws Exception {
// Create the table
final TableName tableName = TableName.valueOf("TestZstdDictionarySplitMerge");
final byte[] cfName = Bytes.toBytes("info");
final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict";
final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName).setCompressionType(Compression.Algorithm.ZSTD).setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()).build();
final Admin admin = TEST_UTIL.getAdmin();
admin.createTable(td, new byte[][] { Bytes.toBytes(1) });
TEST_UTIL.waitTableAvailable(tableName);
// Load some data
Table t = ConnectionFactory.createConnection(conf).getTable(tableName);
TEST_UTIL.loadNumericRows(t, cfName, 0, 100_000);
admin.flush(tableName);
assertTrue("Dictionary was not loaded", DictionaryCache.contains(dictionaryPath));
TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
// Test split procedure
admin.split(tableName, Bytes.toBytes(50_000));
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3;
}
@Override
public String explainFailure() throws Exception {
return "Split has not finished yet";
}
});
TEST_UTIL.waitUntilNoRegionsInTransition();
TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
// Test merge procedure
RegionInfo regionA = null;
RegionInfo regionB = null;
for (RegionInfo region : admin.getRegions(tableName)) {
if (region.getStartKey().length == 0) {
regionA = region;
} else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) {
regionB = region;
}
}
assertNotNull(regionA);
assertNotNull(regionB);
admin.mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false).get(30, TimeUnit.SECONDS);
assertEquals(2, admin.getRegions(tableName).size());
ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName();
assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).getServerName());
try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) {
assertEquals(expected, asyncConn.getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
}
TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionsResource method get.
@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
TableName tableName = TableName.valueOf(tableResource.getName());
if (!tableResource.exists()) {
throw new TableNotFoundException(tableName);
}
TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
List<HRegionLocation> locs;
try (Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration());
RegionLocator locator = connection.getRegionLocator(tableName)) {
locs = locator.getAllRegionLocations();
}
for (HRegionLocation loc : locs) {
RegionInfo hri = loc.getRegion();
ServerName addr = loc.getServerName();
model.add(new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString()));
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
} catch (TableNotFoundException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class QuotaObserverChore method _chore.
void _chore() throws IOException {
// Get the total set of tables that have quotas defined. Includes table quotas
// and tables included by namespace quotas.
TablesWithQuotas tablesWithQuotas = fetchAllTablesWithQuotasDefined();
if (LOG.isTraceEnabled()) {
LOG.trace("Found following tables with quotas: " + tablesWithQuotas);
}
if (metrics != null) {
// Set the number of namespaces and tables with quotas defined
metrics.setNumSpaceQuotas(tablesWithQuotas.getTableQuotaTables().size() + tablesWithQuotas.getNamespacesWithQuotas().size());
}
// The current "view" of region space use. Used henceforth.
final Map<RegionInfo, Long> reportedRegionSpaceUse = quotaManager.snapshotRegionSizes();
if (LOG.isTraceEnabled()) {
LOG.trace("Using " + reportedRegionSpaceUse.size() + " region space use reports: " + reportedRegionSpaceUse);
}
// Remove the "old" region reports
pruneOldRegionReports();
// Create the stores to track table and namespace snapshots
initializeSnapshotStores(reportedRegionSpaceUse);
// Report the number of (non-expired) region size reports
if (metrics != null) {
metrics.setNumRegionSizeReports(reportedRegionSpaceUse.size());
}
// Filter out tables for which we don't have adequate regionspace reports yet.
// Important that we do this after we instantiate the stores above
// This gives us a set of Tables which may or may not be violating their quota.
// To be safe, we want to make sure that these are not in violation.
Set<TableName> tablesInLimbo = tablesWithQuotas.filterInsufficientlyReportedTables(tableSnapshotStore);
if (LOG.isTraceEnabled()) {
LOG.trace("Filtered insufficiently reported tables, left with " + reportedRegionSpaceUse.size() + " regions reported");
}
for (TableName tableInLimbo : tablesInLimbo) {
final SpaceQuotaSnapshot currentSnapshot = tableSnapshotStore.getCurrentState(tableInLimbo);
SpaceQuotaStatus currentStatus = currentSnapshot.getQuotaStatus();
if (currentStatus.isInViolation()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Moving " + tableInLimbo + " out of violation because fewer region sizes were" + " reported than required.");
}
SpaceQuotaSnapshot targetSnapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), currentSnapshot.getUsage(), currentSnapshot.getLimit());
this.snapshotNotifier.transitionTable(tableInLimbo, targetSnapshot);
// Update it in the Table QuotaStore so that memory is consistent with no violation.
tableSnapshotStore.setCurrentState(tableInLimbo, targetSnapshot);
// In case of Disable SVP, we need to enable the table as it moves out of violation
if (SpaceViolationPolicy.DISABLE == currentStatus.getPolicy().orElse(null)) {
QuotaUtil.enableTableIfNotEnabled(conn, tableInLimbo);
}
}
}
// Transition each table to/from quota violation based on the current and target state.
// Only table quotas are enacted.
final Set<TableName> tablesWithTableQuotas = tablesWithQuotas.getTableQuotaTables();
processTablesWithQuotas(tablesWithTableQuotas);
// For each Namespace quota, transition each table in the namespace in or out of violation
// only if a table quota violation policy has not already been applied.
final Set<String> namespacesWithQuotas = tablesWithQuotas.getNamespacesWithQuotas();
final Multimap<String, TableName> tablesByNamespace = tablesWithQuotas.getTablesByNamespace();
processNamespacesWithQuotas(namespacesWithQuotas, tablesByNamespace);
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionSizeReportingChore method removeNonOnlineRegions.
void removeNonOnlineRegions(RegionSizeStore store, Set<RegionInfo> onlineRegions) {
// continue to be sent to the Master which will prevent size report expiration.
if (onlineRegions.isEmpty()) {
// Easy-case, no online regions means no size reports
store.clear();
return;
}
Iterator<Entry<RegionInfo, RegionSize>> iter = store.iterator();
int numEntriesRemoved = 0;
while (iter.hasNext()) {
Entry<RegionInfo, RegionSize> entry = iter.next();
RegionInfo regionInfo = entry.getKey();
if (!onlineRegions.contains(regionInfo)) {
numEntriesRemoved++;
iter.remove();
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Removed " + numEntriesRemoved + " region sizes before reporting to Master " + "because they are for non-online regions.");
}
}
Aggregations