Search in sources :

Example 11 with InterruptedIOException

use of java.io.InterruptedIOException in project hadoop by apache.

the class S3AUtils method translateException.

/**
   * Translate an exception raised in an operation into an IOException.
   * The specific type of IOException depends on the class of
   * {@link AmazonClientException} passed in, and any status codes included
   * in the operation. That is: HTTP error codes are examined and can be
   * used to build a more specific response.
   * @param operation operation
   * @param path path operated on (may be null)
   * @param exception amazon exception raised
   * @return an IOE which wraps the caught exception.
   */
@SuppressWarnings("ThrowableInstanceNeverThrown")
public static IOException translateException(String operation, String path, AmazonClientException exception) {
    String message = String.format("%s%s: %s", operation, path != null ? (" on " + path) : "", exception);
    if (!(exception instanceof AmazonServiceException)) {
        if (containsInterruptedException(exception)) {
            return (IOException) new InterruptedIOException(message).initCause(exception);
        }
        return new AWSClientIOException(message, exception);
    } else {
        IOException ioe;
        AmazonServiceException ase = (AmazonServiceException) exception;
        // this exception is non-null if the service exception is an s3 one
        AmazonS3Exception s3Exception = ase instanceof AmazonS3Exception ? (AmazonS3Exception) ase : null;
        int status = ase.getStatusCode();
        switch(status) {
            case 301:
                if (s3Exception != null) {
                    if (s3Exception.getAdditionalDetails() != null && s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) {
                        message = String.format("Received permanent redirect response to " + "endpoint %s.  This likely indicates that the S3 endpoint " + "configured in %s does not match the AWS region containing " + "the bucket.", s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), ENDPOINT);
                    }
                    ioe = new AWSS3IOException(message, s3Exception);
                } else {
                    ioe = new AWSServiceIOException(message, ase);
                }
                break;
            // permissions
            case 401:
            case 403:
                ioe = new AccessDeniedException(path, null, message);
                ioe.initCause(ase);
                break;
            // the object isn't there
            case 404:
            case 410:
                ioe = new FileNotFoundException(message);
                ioe.initCause(ase);
                break;
            // a shorter one while it is being read.
            case 416:
                ioe = new EOFException(message);
                break;
            default:
                // no specific exit code. Choose an IOE subclass based on the class
                // of the caught exception
                ioe = s3Exception != null ? new AWSS3IOException(message, s3Exception) : new AWSServiceIOException(message, ase);
                break;
        }
        return ioe;
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) AccessDeniedException(java.nio.file.AccessDeniedException) AmazonServiceException(com.amazonaws.AmazonServiceException) FileNotFoundException(java.io.FileNotFoundException) EOFException(java.io.EOFException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception)

Example 12 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class ConnectionImplementation method locateRegionInMeta.

/*
    * Search the hbase:meta table for the HRegionLocation
    * info that contains the table and row we're seeking.
    */
private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
    // we already have the region.
    if (useCache) {
        RegionLocations locations = getCachedLocation(tableName, row);
        if (locations != null && locations.getRegionLocation(replicaId) != null) {
            return locations;
        }
    }
    // build the key of the meta region we should be looking for.
    // the extra 9's on the end are necessary to allow "exact" matches
    // without knowing the precise region names.
    byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
    Scan s = new Scan();
    s.setReversed(true);
    s.withStartRow(metaKey);
    s.addFamily(HConstants.CATALOG_FAMILY);
    s.setOneRowLimit();
    if (this.useMetaReplicas) {
        s.setConsistency(Consistency.TIMELINE);
    }
    int maxAttempts = (retry ? numTries : 1);
    for (int tries = 0; true; tries++) {
        if (tries >= maxAttempts) {
            throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries.");
        }
        if (useCache) {
            RegionLocations locations = getCachedLocation(tableName, row);
            if (locations != null && locations.getRegionLocation(replicaId) != null) {
                return locations;
            }
        } else {
            // If we are not supposed to be using the cache, delete any existing cached location
            // so it won't interfere.
            metaCache.clearCache(tableName, row);
        }
        // Query the meta region
        long pauseBase = this.pause;
        try {
            Result regionInfoRow = null;
            s.resetMvccReadPoint();
            try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0)) {
                regionInfoRow = rcs.next();
            }
            if (regionInfoRow == null) {
                throw new TableNotFoundException(tableName);
            }
            // convert the row result into the HRegionLocation we need!
            RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
            if (locations == null || locations.getRegionLocation(replicaId) == null) {
                throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
            }
            HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
            if (regionInfo == null) {
                throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
            }
            // possible we got a region of a different table...
            if (!regionInfo.getTable().equals(tableName)) {
                throw new TableNotFoundException("Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
            }
            if (regionInfo.isSplit()) {
                throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + " the daughters should be online soon: " + regionInfo.getRegionNameAsString());
            }
            if (regionInfo.isOffline()) {
                throw new RegionOfflineException("the region is offline, could" + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
            }
            ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
            if (serverName == null) {
                throw new NoServerForRegionException("No server address listed " + "in " + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row));
            }
            if (isDeadServer(serverName)) {
                throw new RegionServerStoppedException("hbase:meta says the region " + regionInfo.getRegionNameAsString() + " is managed by the server " + serverName + ", but it is dead.");
            }
            // Instantiate the location
            cacheLocation(tableName, locations);
            return locations;
        } catch (TableNotFoundException e) {
            // from the HTable constructor.
            throw e;
        } catch (IOException e) {
            ExceptionUtil.rethrowIfInterrupt(e);
            if (e instanceof RemoteException) {
                e = ((RemoteException) e).unwrapRemoteException();
            }
            if (e instanceof CallQueueTooBigException) {
                // Give a special check on CallQueueTooBigException, see #HBASE-17114
                pauseBase = this.pauseForCQTBE;
            }
            if (tries < maxAttempts - 1) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts + " failed; retrying after sleep of " + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage());
                }
            } else {
                throw e;
            }
            // Only relocate the parent region if necessary
            if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
                relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
            }
        }
        try {
            Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Giving up trying to location region in " + "meta: thread is interrupted.");
        }
    }
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) InterruptedIOException(java.io.InterruptedIOException) CallQueueTooBigException(org.apache.hadoop.hbase.CallQueueTooBigException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) RegionServerStoppedException(org.apache.hadoop.hbase.regionserver.RegionServerStoppedException) ServerName(org.apache.hadoop.hbase.ServerName) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 13 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class AsyncTableResultScanner method next.

@Override
public synchronized Result next() throws IOException {
    while (queue.isEmpty()) {
        if (closed) {
            return null;
        }
        if (error != null) {
            Throwables.propagateIfPossible(error, IOException.class);
            throw new IOException(error);
        }
        try {
            wait();
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }
    }
    Result result = queue.poll();
    cacheSize -= calcEstimatedSize(result);
    if (resumer != null && cacheSize <= maxCacheSize / 2) {
        resumePrefetch();
    }
    return result;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException)

Example 14 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class HFileReplicator method copyHFilesToStagingDir.

private Map<String, Path> copyHFilesToStagingDir() throws IOException {
    Map<String, Path> mapOfCopiedHFiles = new HashMap<>();
    Pair<byte[], List<String>> familyHFilePathsPair;
    List<String> hfilePaths;
    byte[] family;
    Path familyStagingDir;
    int familyHFilePathsPairsListSize;
    int totalNoOfHFiles;
    List<Pair<byte[], List<String>>> familyHFilePathsPairsList;
    FileSystem sourceFs = null;
    try {
        Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath);
        /*
       * Path#getFileSystem will by default get the FS from cache. If both source and sink cluster
       * has same FS name service then it will return peer cluster FS. To avoid this we explicitly
       * disable the loading of FS from cache, so that a new FS is created with source cluster
       * configuration.
       */
        String sourceScheme = sourceClusterPath.toUri().getScheme();
        String disableCacheName = String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme });
        sourceClusterConf.setBoolean(disableCacheName, true);
        sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf);
        User user = userProvider.getCurrent();
        // For each table name in the map
        for (Entry<String, List<Pair<byte[], List<String>>>> tableEntry : bulkLoadHFileMap.entrySet()) {
            String tableName = tableEntry.getKey();
            // Create staging directory for each table
            Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName));
            familyHFilePathsPairsList = tableEntry.getValue();
            familyHFilePathsPairsListSize = familyHFilePathsPairsList.size();
            // For each list of family hfile paths pair in the table
            for (int i = 0; i < familyHFilePathsPairsListSize; i++) {
                familyHFilePathsPair = familyHFilePathsPairsList.get(i);
                family = familyHFilePathsPair.getFirst();
                hfilePaths = familyHFilePathsPair.getSecond();
                familyStagingDir = new Path(stagingDir, Bytes.toString(family));
                totalNoOfHFiles = hfilePaths.size();
                // For each list of hfile paths for the family
                List<Future<Void>> futures = new ArrayList<>();
                Callable<Void> c;
                Future<Void> future;
                int currentCopied = 0;
                // Copy the hfiles parallely
                while (totalNoOfHFiles > currentCopied + this.copiesPerThread) {
                    c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread));
                    future = exec.submit(c);
                    futures.add(future);
                    currentCopied += this.copiesPerThread;
                }
                int remaining = totalNoOfHFiles - currentCopied;
                if (remaining > 0) {
                    c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + remaining));
                    future = exec.submit(c);
                    futures.add(future);
                }
                for (Future<Void> f : futures) {
                    try {
                        f.get();
                    } catch (InterruptedException e) {
                        InterruptedIOException iioe = new InterruptedIOException("Failed to copy HFiles to local file system. This will be retried again " + "by the source cluster.");
                        iioe.initCause(e);
                        throw iioe;
                    } catch (ExecutionException e) {
                        throw new IOException("Failed to copy HFiles to local file system. This will " + "be retried again by the source cluster.", e);
                    }
                }
            }
            // Add the staging directory to this table. Staging directory contains all the hfiles
            // belonging to this table
            mapOfCopiedHFiles.put(tableName, stagingDir);
        }
        return mapOfCopiedHFiles;
    } finally {
        if (sourceFs != null) {
            sourceFs.close();
        }
        if (exec != null) {
            exec.shutdown();
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) User(org.apache.hadoop.hbase.security.User) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) Pair(org.apache.hadoop.hbase.util.Pair) Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Future(java.util.concurrent.Future)

Example 15 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class WALSplitter method getReader.

/**
   * Create a new {@link Reader} for reading logs to split.
   *
   * @param file
   * @return A new Reader instance, caller should close
   * @throws IOException
   * @throws CorruptedLogFileException
   */
protected Reader getReader(FileStatus file, boolean skipErrors, CancelableProgressable reporter) throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;
    // HDFS-878 is committed.
    if (length <= 0) {
        LOG.warn("File " + path + " might be still open, length is 0");
    }
    try {
        FSUtils.getInstance(fs, conf).recoverFileLease(fs, path, conf, reporter);
        try {
            in = getReader(path, reporter);
        } catch (EOFException e) {
            if (length <= 0) {
                // TODO should we ignore an empty, not-last log file if skip.errors
                // is false? Either way, the caller should decide what to do. E.g.
                // ignore if this is the last log in sequence.
                // TODO is this scenario still possible if the log has been
                // recovered (i.e. closed)
                LOG.warn("Could not open " + path + " for reading. File is empty", e);
                return null;
            } else {
                // EOFException being ignored
                return null;
            }
        }
    } catch (IOException e) {
        if (e instanceof FileNotFoundException) {
            // A wal file may not exist anymore. Nothing can be recovered so move on
            LOG.warn("File " + path + " doesn't exist anymore.", e);
            return null;
        }
        if (!skipErrors || e instanceof InterruptedIOException) {
            // Don't mark the file corrupted if interrupted, or not skipErrors
            throw e;
        }
        CorruptedLogFileException t = new CorruptedLogFileException("skipErrors=true Could not open wal " + path + " ignoring");
        t.initCause(e);
        throw t;
    }
    return in;
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) Reader(org.apache.hadoop.hbase.wal.WAL.Reader) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

InterruptedIOException (java.io.InterruptedIOException)274 IOException (java.io.IOException)186 Test (org.junit.Test)39 ArrayList (java.util.ArrayList)27 Socket (java.net.Socket)26 ConnectException (java.net.ConnectException)22 ExecutionException (java.util.concurrent.ExecutionException)22 InputStream (java.io.InputStream)21 InetSocketAddress (java.net.InetSocketAddress)21 ByteBuffer (java.nio.ByteBuffer)21 Path (org.apache.hadoop.fs.Path)20 NoRouteToHostException (java.net.NoRouteToHostException)19 ServletException (javax.servlet.ServletException)17 CountDownLatch (java.util.concurrent.CountDownLatch)16 SocketTimeoutException (java.net.SocketTimeoutException)15 HttpServletRequest (javax.servlet.http.HttpServletRequest)15 HttpServletResponse (javax.servlet.http.HttpServletResponse)15 EOFException (java.io.EOFException)14 SocketException (java.net.SocketException)14 OutputStream (java.io.OutputStream)13