Search in sources :

Example 1 with TableNotFoundException

use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.

the class AsyncHBaseAdmin method getTableDescriptor.

@Override
public CompletableFuture<HTableDescriptor> getTableDescriptor(TableName tableName) {
    CompletableFuture<HTableDescriptor> future = new CompletableFuture<>();
    this.<List<TableSchema>>newMasterCaller().action((controller, stub) -> this.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>>call(controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())).call().whenComplete((tableSchemas, error) -> {
        if (error != null) {
            future.completeExceptionally(error);
            return;
        }
        if (!tableSchemas.isEmpty()) {
            future.complete(ProtobufUtil.convertToHTableDesc(tableSchemas.get(0)));
        } else {
            future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
        }
    });
    return future;
}
Also used : GetSchemaAlterStatusRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest) Arrays(java.util.Arrays) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) CreateTableResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse) AsyncMetaTableAccessor(org.apache.hadoop.hbase.AsyncMetaTableAccessor) DeleteTableResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) RegionLocations(org.apache.hadoop.hbase.RegionLocations) GetTableDescriptorsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse) IsBalancerEnabledRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest) DisableTableResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) GetNamespaceDescriptorRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) IsBalancerEnabledResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) ForeignExceptionUtil(org.apache.hadoop.hbase.util.ForeignExceptionUtil) DeleteNamespaceRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest) CreateNamespaceResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse) InterfaceStability(org.apache.hadoop.hbase.classification.InterfaceStability) CreateTableRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest) Pair(org.apache.hadoop.hbase.util.Pair) TruncateTableRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest) RpcCallback(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) EnableTableRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest) CloseRegionRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest) AddColumnResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) MasterService(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService) SetBalancerRunningResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse) List(java.util.List) BalanceResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) ModifyNamespaceRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest) GetTableNamesRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest) BalanceRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest) Pattern(java.util.regex.Pattern) LogFactory(org.apache.commons.logging.LogFactory) InterfaceAudience(org.apache.hadoop.hbase.classification.InterfaceAudience) AdminRequestCallerBuilder(org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) META_TABLE_NAME(org.apache.hadoop.hbase.TableName.META_TABLE_NAME) GetTableDescriptorsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest) ModifyColumnRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest) EnableTableResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse) ListNamespaceDescriptorsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) GetProcedureResultRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest) CompletableFuture(java.util.concurrent.CompletableFuture) AddColumnRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest) DeleteTableRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest) GetNamespaceDescriptorResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) HConstants(org.apache.hadoop.hbase.HConstants) DisableTableRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest) BiConsumer(java.util.function.BiConsumer) LinkedList(java.util.LinkedList) ModifyColumnResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse) ServerName(org.apache.hadoop.hbase.ServerName) Bytes(org.apache.hadoop.hbase.util.Bytes) ReadType(org.apache.hadoop.hbase.client.Scan.ReadType) CreateNamespaceRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest) SetBalancerRunningRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest) TableName(org.apache.hadoop.hbase.TableName) TableSchema(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema) ModifyNamespaceResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse) AdminService(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService) GetProcedureResultResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse) GetTableNamesResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse) IOException(java.io.IOException) MasterRequestCallerBuilder(org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder) ListNamespaceDescriptorsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest) RequestConverter(org.apache.hadoop.hbase.shaded.protobuf.RequestConverter) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) DeleteNamespaceResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse) TimeUnit(java.util.concurrent.TimeUnit) DeleteColumnRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest) GetSchemaAlterStatusResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse) CloseRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) DeleteColumnResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse) TruncateTableResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse) Log(org.apache.commons.logging.Log) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) CompletableFuture(java.util.concurrent.CompletableFuture) TableSchema(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 2 with TableNotFoundException

use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.

the class ConnectionImplementation method locateRegionInMeta.

/*
    * Search the hbase:meta table for the HRegionLocation
    * info that contains the table and row we're seeking.
    */
private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
    // we already have the region.
    if (useCache) {
        RegionLocations locations = getCachedLocation(tableName, row);
        if (locations != null && locations.getRegionLocation(replicaId) != null) {
            return locations;
        }
    }
    // build the key of the meta region we should be looking for.
    // the extra 9's on the end are necessary to allow "exact" matches
    // without knowing the precise region names.
    byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
    Scan s = new Scan();
    s.setReversed(true);
    s.withStartRow(metaKey);
    s.addFamily(HConstants.CATALOG_FAMILY);
    s.setOneRowLimit();
    if (this.useMetaReplicas) {
        s.setConsistency(Consistency.TIMELINE);
    }
    int maxAttempts = (retry ? numTries : 1);
    for (int tries = 0; true; tries++) {
        if (tries >= maxAttempts) {
            throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries.");
        }
        if (useCache) {
            RegionLocations locations = getCachedLocation(tableName, row);
            if (locations != null && locations.getRegionLocation(replicaId) != null) {
                return locations;
            }
        } else {
            // If we are not supposed to be using the cache, delete any existing cached location
            // so it won't interfere.
            metaCache.clearCache(tableName, row);
        }
        // Query the meta region
        long pauseBase = this.pause;
        try {
            Result regionInfoRow = null;
            s.resetMvccReadPoint();
            try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0)) {
                regionInfoRow = rcs.next();
            }
            if (regionInfoRow == null) {
                throw new TableNotFoundException(tableName);
            }
            // convert the row result into the HRegionLocation we need!
            RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
            if (locations == null || locations.getRegionLocation(replicaId) == null) {
                throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
            }
            HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
            if (regionInfo == null) {
                throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
            }
            // possible we got a region of a different table...
            if (!regionInfo.getTable().equals(tableName)) {
                throw new TableNotFoundException("Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
            }
            if (regionInfo.isSplit()) {
                throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + " the daughters should be online soon: " + regionInfo.getRegionNameAsString());
            }
            if (regionInfo.isOffline()) {
                throw new RegionOfflineException("the region is offline, could" + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
            }
            ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
            if (serverName == null) {
                throw new NoServerForRegionException("No server address listed " + "in " + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row));
            }
            if (isDeadServer(serverName)) {
                throw new RegionServerStoppedException("hbase:meta says the region " + regionInfo.getRegionNameAsString() + " is managed by the server " + serverName + ", but it is dead.");
            }
            // Instantiate the location
            cacheLocation(tableName, locations);
            return locations;
        } catch (TableNotFoundException e) {
            // from the HTable constructor.
            throw e;
        } catch (IOException e) {
            ExceptionUtil.rethrowIfInterrupt(e);
            if (e instanceof RemoteException) {
                e = ((RemoteException) e).unwrapRemoteException();
            }
            if (e instanceof CallQueueTooBigException) {
                // Give a special check on CallQueueTooBigException, see #HBASE-17114
                pauseBase = this.pauseForCQTBE;
            }
            if (tries < maxAttempts - 1) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts + " failed; retrying after sleep of " + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage());
                }
            } else {
                throw e;
            }
            // Only relocate the parent region if necessary
            if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
                relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
            }
        }
        try {
            Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Giving up trying to location region in " + "meta: thread is interrupted.");
        }
    }
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) InterruptedIOException(java.io.InterruptedIOException) CallQueueTooBigException(org.apache.hadoop.hbase.CallQueueTooBigException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) RegionServerStoppedException(org.apache.hadoop.hbase.regionserver.RegionServerStoppedException) ServerName(org.apache.hadoop.hbase.ServerName) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 3 with TableNotFoundException

use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.

the class ConnectionImplementation method isTableAvailable.

@Override
public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException {
    if (this.closed) {
        throw new IOException(toString() + " closed");
    }
    try {
        if (!isTableEnabled(tableName)) {
            LOG.debug("Table " + tableName + " not enabled");
            return false;
        }
        List<Pair<HRegionInfo, ServerName>> locations = MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
        int notDeployed = 0;
        int regionCount = 0;
        for (Pair<HRegionInfo, ServerName> pair : locations) {
            HRegionInfo info = pair.getFirst();
            if (pair.getSecond() == null) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst().getEncodedName());
                }
                notDeployed++;
            } else if (splitKeys != null && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
                for (byte[] splitKey : splitKeys) {
                    // Just check if the splitkey is available
                    if (Bytes.equals(info.getStartKey(), splitKey)) {
                        regionCount++;
                        break;
                    }
                }
            } else {
                // Always empty start row should be counted
                regionCount++;
            }
        }
        if (notDeployed > 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
            }
            return false;
        } else if (splitKeys != null && regionCount != splitKeys.length + 1) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) + " regions, but only " + regionCount + " available");
            }
            return false;
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Table " + tableName + " should be available");
            }
            return true;
        }
    } catch (TableNotFoundException tnfe) {
        LOG.warn("Table " + tableName + " not enabled, it is not exists");
        return false;
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) ServerName(org.apache.hadoop.hbase.ServerName) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Pair(org.apache.hadoop.hbase.util.Pair)

Example 4 with TableNotFoundException

use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.

the class HBaseInterClusterReplicationEndpoint method replicate.

/**
   * Do the shipping logic
   */
@Override
public boolean replicate(ReplicateContext replicateContext) {
    CompletionService<Integer> pool = new ExecutorCompletionService<>(this.exec);
    List<Entry> entries = replicateContext.getEntries();
    String walGroupId = replicateContext.getWalGroupId();
    int sleepMultiplier = 1;
    int numReplicated = 0;
    if (!peersSelected && this.isRunning()) {
        connectToPeers();
        peersSelected = true;
    }
    int numSinks = replicationSinkMgr.getNumSinks();
    if (numSinks == 0) {
        LOG.warn("No replication sinks found, returning without replicating. The source should retry" + " with the same set of edits.");
        return false;
    }
    // minimum of: configured threads, number of 100-waledit batches,
    //  and number of current sinks
    int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks);
    List<List<Entry>> entryLists = new ArrayList<>(n);
    if (n == 1) {
        entryLists.add(entries);
    } else {
        for (int i = 0; i < n; i++) {
            entryLists.add(new ArrayList<>(entries.size() / n + 1));
        }
        // now group by region
        for (Entry e : entries) {
            entryLists.get(Math.abs(Bytes.hashCode(e.getKey().getEncodedRegionName()) % n)).add(e);
        }
    }
    while (this.isRunning() && !exec.isShutdown()) {
        if (!isPeerEnabled()) {
            if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        try {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Replicating " + entries.size() + " entries of total size " + replicateContext.getSize());
            }
            int futures = 0;
            for (int i = 0; i < entryLists.size(); i++) {
                if (!entryLists.get(i).isEmpty()) {
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Submitting " + entryLists.get(i).size() + " entries of total size " + replicateContext.getSize());
                    }
                    // RuntimeExceptions encountered here bubble up and are handled in ReplicationSource
                    pool.submit(createReplicator(entryLists.get(i), i));
                    futures++;
                }
            }
            IOException iox = null;
            for (int i = 0; i < futures; i++) {
                try {
                    // wait for all futures, remove successful parts
                    // (only the remaining parts will be retried)
                    Future<Integer> f = pool.take();
                    int index = f.get().intValue();
                    int batchSize = entryLists.get(index).size();
                    entryLists.set(index, Collections.<Entry>emptyList());
                    // Now, we have marked the batch as done replicating, record its size
                    numReplicated += batchSize;
                } catch (InterruptedException ie) {
                    iox = new IOException(ie);
                } catch (ExecutionException ee) {
                    // cause must be an IOException
                    iox = (IOException) ee.getCause();
                }
            }
            if (iox != null) {
                // if we had any exceptions, try again
                throw iox;
            }
            if (numReplicated != entries.size()) {
                // Something went wrong here and we don't know what, let's just fail and retry.
                LOG.warn("The number of edits replicated is different from the number received," + " failing for now.");
                return false;
            }
            // update metrics
            this.metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId);
            return true;
        } catch (IOException ioe) {
            // Didn't ship anything, but must still age the last time we did
            this.metrics.refreshAgeOfLastShippedOp(walGroupId);
            if (ioe instanceof RemoteException) {
                ioe = ((RemoteException) ioe).unwrapRemoteException();
                LOG.warn("Can't replicate because of an error on the remote cluster: ", ioe);
                if (ioe instanceof TableNotFoundException) {
                    if (sleepForRetries("A table is missing in the peer cluster. " + "Replication cannot proceed without losing data.", sleepMultiplier)) {
                        sleepMultiplier++;
                    }
                } else if (ioe instanceof SaslException) {
                    LOG.warn("Peer encountered SaslException, rechecking all sinks: ", ioe);
                    replicationSinkMgr.chooseSinks();
                }
            } else {
                if (ioe instanceof SocketTimeoutException) {
                    // This exception means we waited for more than 60s and nothing
                    // happened, the cluster is alive and calling it right away
                    // even for a test just makes things worse.
                    sleepForRetries("Encountered a SocketTimeoutException. Since the " + "call to the remote cluster timed out, which is usually " + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier);
                } else if (ioe instanceof ConnectException) {
                    LOG.warn("Peer is unavailable, rechecking all sinks: ", ioe);
                    replicationSinkMgr.chooseSinks();
                } else {
                    LOG.warn("Can't replicate because of a local or network error: ", ioe);
                }
            }
            if (sleepForRetries("Since we are unable to replicate", sleepMultiplier)) {
                sleepMultiplier++;
            }
        }
    }
    // in case we exited before replicating
    return false;
}
Also used : ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) IOException(java.io.IOException) SaslException(javax.security.sasl.SaslException) HBaseReplicationEndpoint(org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) SocketTimeoutException(java.net.SocketTimeoutException) ArrayList(java.util.ArrayList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) RemoteException(org.apache.hadoop.ipc.RemoteException) ConnectException(java.net.ConnectException)

Example 5 with TableNotFoundException

use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.

the class ScannerInstanceResource method get.

@GET
@Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo, @QueryParam("n") int maxRows, @QueryParam("c") final int maxValues) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("GET " + uriInfo.getAbsolutePath());
    }
    servlet.getMetrics().incrementRequests(1);
    if (generator == null) {
        servlet.getMetrics().incrementFailedGetRequests(1);
        return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
    }
    CellSetModel model = new CellSetModel();
    RowModel rowModel = null;
    byte[] rowKey = null;
    int limit = batch;
    if (maxValues > 0) {
        limit = maxValues;
    }
    int count = limit;
    do {
        Cell value = null;
        try {
            value = generator.next();
        } catch (IllegalStateException e) {
            if (ScannerResource.delete(id)) {
                servlet.getMetrics().incrementSucessfulDeleteRequests(1);
            } else {
                servlet.getMetrics().incrementFailedDeleteRequests(1);
            }
            servlet.getMetrics().incrementFailedGetRequests(1);
            return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF).build();
        } catch (IllegalArgumentException e) {
            Throwable t = e.getCause();
            if (t instanceof TableNotFoundException) {
                return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
            }
            throw e;
        }
        if (value == null) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("generator exhausted");
            }
            // returned
            if (count == limit) {
                return Response.noContent().build();
            }
            break;
        }
        if (rowKey == null) {
            rowKey = CellUtil.cloneRow(value);
            rowModel = new RowModel(rowKey);
        }
        if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
            // specified number of rows
            if (maxRows > 0) {
                if (--maxRows == 0) {
                    generator.putBack(value);
                    break;
                }
            }
            model.addRow(rowModel);
            rowKey = CellUtil.cloneRow(value);
            rowModel = new RowModel(rowKey);
        }
        rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value)));
    } while (--count > 0);
    model.addRow(rowModel);
    ResponseBuilder response = Response.ok(model);
    response.cacheControl(cacheControl);
    servlet.getMetrics().incrementSucessfulGetRequests(1);
    return response.build();
}
Also used : TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) CellSetModel(org.apache.hadoop.hbase.rest.model.CellSetModel) RowModel(org.apache.hadoop.hbase.rest.model.RowModel) CellModel(org.apache.hadoop.hbase.rest.model.CellModel) ResponseBuilder(javax.ws.rs.core.Response.ResponseBuilder) Cell(org.apache.hadoop.hbase.Cell) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Aggregations

TableNotFoundException (org.apache.hadoop.hbase.TableNotFoundException)41 IOException (java.io.IOException)19 TableName (org.apache.hadoop.hbase.TableName)14 TableNotEnabledException (org.apache.hadoop.hbase.TableNotEnabledException)8 Test (org.junit.Test)8 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)7 ServerName (org.apache.hadoop.hbase.ServerName)6 ArrayList (java.util.ArrayList)5 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)5 Connection (org.apache.hadoop.hbase.client.Connection)5 Table (org.apache.hadoop.hbase.client.Table)5 Path (org.apache.hadoop.fs.Path)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 TableNotDisabledException (org.apache.hadoop.hbase.TableNotDisabledException)4 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)4 InterruptedIOException (java.io.InterruptedIOException)3 LinkedList (java.util.LinkedList)3 List (java.util.List)3 Map (java.util.Map)3