Search in sources :

Example 1 with BlockLocalPathInfo

use of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo in project hadoop by apache.

the class FsDatasetImpl method getBlockLocalPathInfo.

// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
        if (replica == null) {
            throw new ReplicaNotFoundException(block);
        }
        if (replica.getGenerationStamp() < block.getGenerationStamp()) {
            throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
        } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
            block.setGenerationStamp(replica.getGenerationStamp());
        }
    }
    ReplicaInfo r = getBlockReplica(block);
    File blockFile = new File(r.getBlockURI());
    File metaFile = new File(r.getMetadataURI());
    BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
    return info;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) File(java.io.File)

Example 2 with BlockLocalPathInfo

use of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo in project hadoop by apache.

the class ClientDatanodeProtocolTranslatorPB method getBlockLocalPathInfo.

@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token<BlockTokenIdentifier> token) throws IOException {
    GetBlockLocalPathInfoRequestProto req = GetBlockLocalPathInfoRequestProto.newBuilder().setBlock(PBHelperClient.convert(block)).setToken(PBHelperClient.convert(token)).build();
    GetBlockLocalPathInfoResponseProto resp;
    try {
        resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
    } catch (ServiceException e) {
        throw ProtobufHelper.getRemoteException(e);
    }
    return new BlockLocalPathInfo(PBHelperClient.convert(resp.getBlock()), resp.getLocalPath(), resp.getLocalMetaPath());
}
Also used : ServiceException(com.google.protobuf.ServiceException) GetBlockLocalPathInfoRequestProto(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) GetBlockLocalPathInfoResponseProto(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)

Example 3 with BlockLocalPathInfo

use of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo in project hadoop by apache.

the class BlockReaderLocalLegacy method newBlockReader.

/**
   * The only way this object can be instantiated.
   */
static BlockReaderLocalLegacy newBlockReader(DfsClientConf conf, UserGroupInformation userGroupInformation, Configuration configuration, String file, ExtendedBlock blk, Token<BlockTokenIdentifier> token, DatanodeInfo node, long startOffset, long length, StorageType storageType, Tracer tracer) throws IOException {
    final ShortCircuitConf scConf = conf.getShortCircuitConf();
    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
    // check the cache first
    BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
    if (pathinfo == null) {
        if (userGroupInformation == null) {
            userGroupInformation = UserGroupInformation.getCurrentUser();
        }
        pathinfo = getBlockPathInfo(userGroupInformation, blk, node, configuration, conf.getSocketTimeout(), token, conf.isConnectToDnViaHostname(), storageType);
    }
    // check to see if the file exists. It may so happen that the
    // HDFS file has been deleted and this block-lookup is occurring
    // on behalf of a new HDFS file. This time, the block file could
    // be residing in a different portion of the fs.data.dir directory.
    // In this case, we remove this entry from the cache. The next
    // call to this method will re-populate the cache.
    FileInputStream dataIn = null;
    FileInputStream checksumIn = null;
    BlockReaderLocalLegacy localBlockReader = null;
    final boolean skipChecksumCheck = scConf.isSkipShortCircuitChecksums() || storageType.isTransient();
    try {
        // get a local file system
        File blkfile = new File(pathinfo.getBlockPath());
        dataIn = new FileInputStream(blkfile);
        LOG.debug("New BlockReaderLocalLegacy for file {} of size {} startOffset " + "{} length {} short circuit checksum {}", blkfile, blkfile.length(), startOffset, length, !skipChecksumCheck);
        if (!skipChecksumCheck) {
            // get the metadata file
            File metafile = new File(pathinfo.getMetaPath());
            checksumIn = new FileInputStream(metafile);
            final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(new DataInputStream(checksumIn), blk);
            long firstChunkOffset = startOffset - (startOffset % checksum.getBytesPerChecksum());
            localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, startOffset, checksum, true, dataIn, firstChunkOffset, checksumIn, tracer);
        } else {
            localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, startOffset, dataIn, tracer);
        }
    } catch (IOException e) {
        // remove from cache
        localDatanodeInfo.removeBlockLocalPathInfo(blk);
        LOG.warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file " + pathinfo.getBlockPath() + " could not be opened.");
        throw e;
    } finally {
        if (localBlockReader == null) {
            if (dataIn != null) {
                dataIn.close();
            }
            if (checksumIn != null) {
                checksumIn.close();
            }
        }
    }
    return localBlockReader;
}
Also used : ShortCircuitConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) File(java.io.File) FileInputStream(java.io.FileInputStream) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 4 with BlockLocalPathInfo

use of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo in project hadoop by apache.

the class BlockReaderLocalLegacy method getBlockPathInfo.

private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi, ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout, Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname, StorageType storageType) throws IOException {
    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
    BlockLocalPathInfo pathinfo;
    ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node, conf, timeout, connectToDnViaHostname);
    try {
        // make RPC to local datanode to find local pathnames of blocks
        pathinfo = proxy.getBlockLocalPathInfo(blk, token);
        // invalidated.  Therefore, our only option is to skip caching.
        if (pathinfo != null && !storageType.isTransient()) {
            LOG.debug("Cached location of block {} as {}", blk, pathinfo);
            localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
        }
    } catch (IOException e) {
        // Reset proxy on error
        localDatanodeInfo.resetDatanodeProxy();
        throw e;
    }
    return pathinfo;
}
Also used : BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 5 with BlockLocalPathInfo

use of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo in project hadoop by apache.

the class DataNode method getBlockLocalPathInfo.

@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token<BlockTokenIdentifier> token) throws IOException {
    checkBlockLocalPathAccess();
    checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ);
    Preconditions.checkNotNull(data, "Storage not yet initialized");
    BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
    if (LOG.isDebugEnabled()) {
        if (info != null) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("getBlockLocalPathInfo successful block=" + block + " blockfile " + info.getBlockPath() + " metafile " + info.getMetaPath());
            }
        } else {
            if (LOG.isTraceEnabled()) {
                LOG.trace("getBlockLocalPathInfo for block=" + block + " returning null");
            }
        }
    }
    metrics.incrBlocksGetLocalPathInfo();
    return info;
}
Also used : BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)

Aggregations

BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)7 File (java.io.File)3 IOException (java.io.IOException)3 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 ServiceException (com.google.protobuf.ServiceException)1 DataInputStream (java.io.DataInputStream)1 FileInputStream (java.io.FileInputStream)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 ShortCircuitConf (org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)1 GetBlockLocalPathInfoRequestProto (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)1 GetBlockLocalPathInfoResponseProto (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)1 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)1 Replica (org.apache.hadoop.hdfs.server.datanode.Replica)1