use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class AsyncHBaseAdmin method getTableDescriptor.
@Override
public CompletableFuture<HTableDescriptor> getTableDescriptor(TableName tableName) {
CompletableFuture<HTableDescriptor> future = new CompletableFuture<>();
this.<List<TableSchema>>newMasterCaller().action((controller, stub) -> this.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>>call(controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())).call().whenComplete((tableSchemas, error) -> {
if (error != null) {
future.completeExceptionally(error);
return;
}
if (!tableSchemas.isEmpty()) {
future.complete(ProtobufUtil.convertToHTableDesc(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}
});
return future;
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class ConnectionImplementation method locateRegionInMeta.
/*
* Search the hbase:meta table for the HRegionLocation
* info that contains the table and row we're seeking.
*/
private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
// we already have the region.
if (useCache) {
RegionLocations locations = getCachedLocation(tableName, row);
if (locations != null && locations.getRegionLocation(replicaId) != null) {
return locations;
}
}
// build the key of the meta region we should be looking for.
// the extra 9's on the end are necessary to allow "exact" matches
// without knowing the precise region names.
byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan s = new Scan();
s.setReversed(true);
s.withStartRow(metaKey);
s.addFamily(HConstants.CATALOG_FAMILY);
s.setOneRowLimit();
if (this.useMetaReplicas) {
s.setConsistency(Consistency.TIMELINE);
}
int maxAttempts = (retry ? numTries : 1);
for (int tries = 0; true; tries++) {
if (tries >= maxAttempts) {
throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries.");
}
if (useCache) {
RegionLocations locations = getCachedLocation(tableName, row);
if (locations != null && locations.getRegionLocation(replicaId) != null) {
return locations;
}
} else {
// If we are not supposed to be using the cache, delete any existing cached location
// so it won't interfere.
metaCache.clearCache(tableName, row);
}
// Query the meta region
long pauseBase = this.pause;
try {
Result regionInfoRow = null;
s.resetMvccReadPoint();
try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0)) {
regionInfoRow = rcs.next();
}
if (regionInfoRow == null) {
throw new TableNotFoundException(tableName);
}
// convert the row result into the HRegionLocation we need!
RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
if (locations == null || locations.getRegionLocation(replicaId) == null) {
throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
}
HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
}
// possible we got a region of a different table...
if (!regionInfo.getTable().equals(tableName)) {
throw new TableNotFoundException("Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
}
if (regionInfo.isSplit()) {
throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + " the daughters should be online soon: " + regionInfo.getRegionNameAsString());
}
if (regionInfo.isOffline()) {
throw new RegionOfflineException("the region is offline, could" + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
}
ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
if (serverName == null) {
throw new NoServerForRegionException("No server address listed " + "in " + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row));
}
if (isDeadServer(serverName)) {
throw new RegionServerStoppedException("hbase:meta says the region " + regionInfo.getRegionNameAsString() + " is managed by the server " + serverName + ", but it is dead.");
}
// Instantiate the location
cacheLocation(tableName, locations);
return locations;
} catch (TableNotFoundException e) {
// from the HTable constructor.
throw e;
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
if (e instanceof RemoteException) {
e = ((RemoteException) e).unwrapRemoteException();
}
if (e instanceof CallQueueTooBigException) {
// Give a special check on CallQueueTooBigException, see #HBASE-17114
pauseBase = this.pauseForCQTBE;
}
if (tries < maxAttempts - 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts + " failed; retrying after sleep of " + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage());
}
} else {
throw e;
}
// Only relocate the parent region if necessary
if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
}
}
try {
Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
} catch (InterruptedException e) {
throw new InterruptedIOException("Giving up trying to location region in " + "meta: thread is interrupted.");
}
}
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class ConnectionImplementation method isTableAvailable.
@Override
public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException {
if (this.closed) {
throw new IOException(toString() + " closed");
}
try {
if (!isTableEnabled(tableName)) {
LOG.debug("Table " + tableName + " not enabled");
return false;
}
List<Pair<HRegionInfo, ServerName>> locations = MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
int notDeployed = 0;
int regionCount = 0;
for (Pair<HRegionInfo, ServerName> pair : locations) {
HRegionInfo info = pair.getFirst();
if (pair.getSecond() == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst().getEncodedName());
}
notDeployed++;
} else if (splitKeys != null && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
for (byte[] splitKey : splitKeys) {
// Just check if the splitkey is available
if (Bytes.equals(info.getStartKey(), splitKey)) {
regionCount++;
break;
}
}
} else {
// Always empty start row should be counted
regionCount++;
}
}
if (notDeployed > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
}
return false;
} else if (splitKeys != null && regionCount != splitKeys.length + 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) + " regions, but only " + regionCount + " available");
}
return false;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " should be available");
}
return true;
}
} catch (TableNotFoundException tnfe) {
LOG.warn("Table " + tableName + " not enabled, it is not exists");
return false;
}
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class HBaseInterClusterReplicationEndpoint method replicate.
/**
* Do the shipping logic
*/
@Override
public boolean replicate(ReplicateContext replicateContext) {
CompletionService<Integer> pool = new ExecutorCompletionService<>(this.exec);
List<Entry> entries = replicateContext.getEntries();
String walGroupId = replicateContext.getWalGroupId();
int sleepMultiplier = 1;
int numReplicated = 0;
if (!peersSelected && this.isRunning()) {
connectToPeers();
peersSelected = true;
}
int numSinks = replicationSinkMgr.getNumSinks();
if (numSinks == 0) {
LOG.warn("No replication sinks found, returning without replicating. The source should retry" + " with the same set of edits.");
return false;
}
// minimum of: configured threads, number of 100-waledit batches,
// and number of current sinks
int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks);
List<List<Entry>> entryLists = new ArrayList<>(n);
if (n == 1) {
entryLists.add(entries);
} else {
for (int i = 0; i < n; i++) {
entryLists.add(new ArrayList<>(entries.size() / n + 1));
}
// now group by region
for (Entry e : entries) {
entryLists.get(Math.abs(Bytes.hashCode(e.getKey().getEncodedRegionName()) % n)).add(e);
}
}
while (this.isRunning() && !exec.isShutdown()) {
if (!isPeerEnabled()) {
if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
sleepMultiplier++;
}
continue;
}
try {
if (LOG.isTraceEnabled()) {
LOG.trace("Replicating " + entries.size() + " entries of total size " + replicateContext.getSize());
}
int futures = 0;
for (int i = 0; i < entryLists.size(); i++) {
if (!entryLists.get(i).isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Submitting " + entryLists.get(i).size() + " entries of total size " + replicateContext.getSize());
}
// RuntimeExceptions encountered here bubble up and are handled in ReplicationSource
pool.submit(createReplicator(entryLists.get(i), i));
futures++;
}
}
IOException iox = null;
for (int i = 0; i < futures; i++) {
try {
// wait for all futures, remove successful parts
// (only the remaining parts will be retried)
Future<Integer> f = pool.take();
int index = f.get().intValue();
int batchSize = entryLists.get(index).size();
entryLists.set(index, Collections.<Entry>emptyList());
// Now, we have marked the batch as done replicating, record its size
numReplicated += batchSize;
} catch (InterruptedException ie) {
iox = new IOException(ie);
} catch (ExecutionException ee) {
// cause must be an IOException
iox = (IOException) ee.getCause();
}
}
if (iox != null) {
// if we had any exceptions, try again
throw iox;
}
if (numReplicated != entries.size()) {
// Something went wrong here and we don't know what, let's just fail and retry.
LOG.warn("The number of edits replicated is different from the number received," + " failing for now.");
return false;
}
// update metrics
this.metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId);
return true;
} catch (IOException ioe) {
// Didn't ship anything, but must still age the last time we did
this.metrics.refreshAgeOfLastShippedOp(walGroupId);
if (ioe instanceof RemoteException) {
ioe = ((RemoteException) ioe).unwrapRemoteException();
LOG.warn("Can't replicate because of an error on the remote cluster: ", ioe);
if (ioe instanceof TableNotFoundException) {
if (sleepForRetries("A table is missing in the peer cluster. " + "Replication cannot proceed without losing data.", sleepMultiplier)) {
sleepMultiplier++;
}
} else if (ioe instanceof SaslException) {
LOG.warn("Peer encountered SaslException, rechecking all sinks: ", ioe);
replicationSinkMgr.chooseSinks();
}
} else {
if (ioe instanceof SocketTimeoutException) {
// This exception means we waited for more than 60s and nothing
// happened, the cluster is alive and calling it right away
// even for a test just makes things worse.
sleepForRetries("Encountered a SocketTimeoutException. Since the " + "call to the remote cluster timed out, which is usually " + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier);
} else if (ioe instanceof ConnectException) {
LOG.warn("Peer is unavailable, rechecking all sinks: ", ioe);
replicationSinkMgr.chooseSinks();
} else {
LOG.warn("Can't replicate because of a local or network error: ", ioe);
}
}
if (sleepForRetries("Since we are unable to replicate", sleepMultiplier)) {
sleepMultiplier++;
}
}
}
// in case we exited before replicating
return false;
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class ScannerInstanceResource method get.
@GET
@Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo, @QueryParam("n") int maxRows, @QueryParam("c") final int maxValues) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
if (generator == null) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
}
CellSetModel model = new CellSetModel();
RowModel rowModel = null;
byte[] rowKey = null;
int limit = batch;
if (maxValues > 0) {
limit = maxValues;
}
int count = limit;
do {
Cell value = null;
try {
value = generator.next();
} catch (IllegalStateException e) {
if (ScannerResource.delete(id)) {
servlet.getMetrics().incrementSucessfulDeleteRequests(1);
} else {
servlet.getMetrics().incrementFailedDeleteRequests(1);
}
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF).build();
} catch (IllegalArgumentException e) {
Throwable t = e.getCause();
if (t instanceof TableNotFoundException) {
return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
}
throw e;
}
if (value == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("generator exhausted");
}
// returned
if (count == limit) {
return Response.noContent().build();
}
break;
}
if (rowKey == null) {
rowKey = CellUtil.cloneRow(value);
rowModel = new RowModel(rowKey);
}
if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
// specified number of rows
if (maxRows > 0) {
if (--maxRows == 0) {
generator.putBack(value);
break;
}
}
model.addRow(rowModel);
rowKey = CellUtil.cloneRow(value);
rowModel = new RowModel(rowKey);
}
rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value)));
} while (--count > 0);
model.addRow(rowModel);
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
}
Aggregations