Search in sources :

Example 1 with ReaderRetryPolicy

use of org.apache.hadoop.hdfs.StripeReader.ReaderRetryPolicy in project hadoop by apache.

the class DFSStripedInputStream method createBlockReader.

boolean createBlockReader(LocatedBlock block, long offsetInBlock, LocatedBlock[] targetBlocks, BlockReaderInfo[] readerInfos, int chunkIndex) throws IOException {
    BlockReader reader = null;
    final ReaderRetryPolicy retry = new ReaderRetryPolicy();
    DFSInputStream.DNAddrPair dnInfo = new DFSInputStream.DNAddrPair(null, null, null);
    while (true) {
        try {
            // the cached block location might have been re-fetched, so always
            // get it from cache.
            block = refreshLocatedBlock(block);
            targetBlocks[chunkIndex] = block;
            // internal block has one location, just rule out the deadNodes
            dnInfo = getBestNodeDNAddrPair(block, null);
            if (dnInfo == null) {
                break;
            }
            reader = getBlockReader(block, offsetInBlock, block.getBlockSize() - offsetInBlock, dnInfo.addr, dnInfo.storageType, dnInfo.info);
        } catch (IOException e) {
            if (e instanceof InvalidEncryptionKeyException && retry.shouldRefetchEncryptionKey()) {
                DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + dnInfo.addr + " : " + e);
                dfsClient.clearDataEncryptionKey();
                retry.refetchEncryptionKey();
            } else if (retry.shouldRefetchToken() && tokenRefetchNeeded(e, dnInfo.addr)) {
                fetchBlockAt(block.getStartOffset());
                retry.refetchToken();
            } else {
                //TODO: handles connection issues
                DFSClient.LOG.warn("Failed to connect to " + dnInfo.addr + " for " + "block" + block.getBlock(), e);
                // re-fetch the block in case the block has been moved
                fetchBlockAt(block.getStartOffset());
                addToDeadNodes(dnInfo.info);
            }
        }
        if (reader != null) {
            readerInfos[chunkIndex] = new BlockReaderInfo(reader, dnInfo.info, offsetInBlock);
            return true;
        }
    }
    return false;
}
Also used : InvalidEncryptionKeyException(org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException) ReaderRetryPolicy(org.apache.hadoop.hdfs.StripeReader.ReaderRetryPolicy) IOException(java.io.IOException) BlockReaderInfo(org.apache.hadoop.hdfs.StripeReader.BlockReaderInfo)

Aggregations

IOException (java.io.IOException)1 BlockReaderInfo (org.apache.hadoop.hdfs.StripeReader.BlockReaderInfo)1 ReaderRetryPolicy (org.apache.hadoop.hdfs.StripeReader.ReaderRetryPolicy)1 InvalidEncryptionKeyException (org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException)1