Search in sources :

Example 1 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class DFSInputStream method readBlockLength.

/** Read the block length from one of the datanodes. */
private long readBlockLength(LocatedBlock locatedblock) throws IOException {
    assert locatedblock != null : "LocatedBlock cannot be null";
    int replicaNotFoundCount = locatedblock.getLocations().length;
    final DfsClientConf conf = dfsClient.getConf();
    final int timeout = conf.getSocketTimeout();
    LinkedList<DatanodeInfo> nodeList = new LinkedList<DatanodeInfo>(Arrays.asList(locatedblock.getLocations()));
    LinkedList<DatanodeInfo> retryList = new LinkedList<DatanodeInfo>();
    boolean isRetry = false;
    StopWatch sw = new StopWatch();
    while (nodeList.size() > 0) {
        DatanodeInfo datanode = nodeList.pop();
        ClientDatanodeProtocol cdp = null;
        try {
            cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), timeout, conf.isConnectToDnViaHostname(), locatedblock);
            final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
            if (n >= 0) {
                return n;
            }
        } catch (IOException ioe) {
            checkInterrupted(ioe);
            if (ioe instanceof RemoteException) {
                if (((RemoteException) ioe).unwrapRemoteException() instanceof ReplicaNotFoundException) {
                    // replica is not on the DN. We will treat it as 0 length
                    // if no one actually has a replica.
                    replicaNotFoundCount--;
                } else if (((RemoteException) ioe).unwrapRemoteException() instanceof RetriableException) {
                    // add to the list to be retried if necessary.
                    retryList.add(datanode);
                }
            }
            DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" + " for block {}", datanode, locatedblock.getBlock(), ioe);
        } finally {
            if (cdp != null) {
                RPC.stopProxy(cdp);
            }
        }
        // Ran out of nodes, but there are retriable nodes.
        if (nodeList.size() == 0 && retryList.size() > 0) {
            nodeList.addAll(retryList);
            retryList.clear();
            isRetry = true;
        }
        if (isRetry) {
            // start the stop watch if not already running.
            if (!sw.isRunning()) {
                sw.start();
            }
            try {
                // delay between retries.
                Thread.sleep(500);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new InterruptedIOException("Interrupted while getting the length.");
            }
        }
        // see if we ran out of retry time
        if (sw.isRunning() && sw.now(TimeUnit.MILLISECONDS) > timeout) {
            break;
        }
    }
    // on a DN that has it.  we want to report that error
    if (replicaNotFoundCount == 0) {
        return 0;
    }
    throw new IOException("Cannot obtain block length for " + locatedblock);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) LinkedList(java.util.LinkedList) StopWatch(org.apache.hadoop.util.StopWatch) DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) RemoteException(org.apache.hadoop.ipc.RemoteException) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 2 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method moveBlockAcrossStorage.

/**
   * Move block files from one storage to another storage.
   * @return Returns the Old replicaInfo
   * @throws IOException
   */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException {
    ReplicaInfo replicaInfo = getReplicaInfo(block);
    if (replicaInfo.getState() != ReplicaState.FINALIZED) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
    }
    if (replicaInfo.getNumBytes() != block.getNumBytes()) {
        throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + block.getNumBytes());
    }
    if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
        throw new ReplicaAlreadyExistsException("Replica " + replicaInfo + " already exists on storage " + targetStorageType);
    }
    if (replicaInfo.isOnTransientStorage()) {
        // Block movement from RAM_DISK will be done by LazyPersist mechanism
        throw new IOException("Replica " + replicaInfo + " cannot be moved from storageType : " + replicaInfo.getVolume().getStorageType());
    }
    FsVolumeReference volumeRef = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        volumeRef = volumes.getNextVolume(targetStorageType, block.getNumBytes());
    }
    try {
        moveBlock(block, replicaInfo, volumeRef);
    } finally {
        if (volumeRef != null) {
            volumeRef.close();
        }
    }
    // Replace the old block if any to reschedule the scanning.
    return replicaInfo;
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 3 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method getBlockLocalPathInfo.

// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
        if (replica == null) {
            throw new ReplicaNotFoundException(block);
        }
        if (replica.getGenerationStamp() < block.getGenerationStamp()) {
            throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
        } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
            block.setGenerationStamp(replica.getGenerationStamp());
        }
    }
    ReplicaInfo r = getBlockReplica(block);
    File blockFile = new File(r.getBlockURI());
    File metaFile = new File(r.getMetadataURI());
    BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
    return info;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) File(java.io.File)

Example 4 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method checkBlock.

/**
   * Check if a block is valid.
   *
   * @param b           The block to check.
   * @param minLength   The minimum length that the block must have.  May be 0.
   * @param state       If this is null, it is ignored.  If it is non-null, we
   *                        will check that the replica has this state.
   *
   * @throws ReplicaNotFoundException          If the replica is not found 
   *
   * @throws UnexpectedReplicaStateException   If the replica is not in the 
   *                                             expected state.
   * @throws FileNotFoundException             If the block file is not found or there
   *                                              was an error locating it.
   * @throws EOFException                      If the replica length is too short.
   * 
   * @throws IOException                       May be thrown from the methods called. 
   */
// FsDatasetSpi
@Override
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException, FileNotFoundException, EOFException, IOException {
    final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
    if (replicaInfo == null) {
        throw new ReplicaNotFoundException(b);
    }
    if (replicaInfo.getState() != state) {
        throw new UnexpectedReplicaStateException(b, state);
    }
    if (!replicaInfo.blockDataExists()) {
        throw new FileNotFoundException(replicaInfo.getBlockURI().toString());
    }
    long onDiskLength = getLength(b);
    if (onDiskLength < minLength) {
        throw new EOFException(b + "'s on-disk length " + onDiskLength + " is shorter than minLength " + minLength);
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) FileNotFoundException(java.io.FileNotFoundException) EOFException(java.io.EOFException) UnexpectedReplicaStateException(org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException)

Example 5 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method append.

// FsDatasetSpi
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check the validity of the parameter
        if (newGS < b.getGenerationStamp()) {
            throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp");
        }
        ReplicaInfo replicaInfo = getReplicaInfo(b);
        LOG.info("Appending to " + replicaInfo);
        if (replicaInfo.getState() != ReplicaState.FINALIZED) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
        }
        if (replicaInfo.getNumBytes() != expectedBlockLen) {
            throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen);
        }
        FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
        ReplicaInPipeline replica = null;
        try {
            replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(replica, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Aggregations

ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)14 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)10 IOException (java.io.IOException)9 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)8 MultipleIOException (org.apache.hadoop.io.MultipleIOException)7 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)4 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)3 File (java.io.File)2 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)2 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 InterruptedIOException (java.io.InterruptedIOException)1 RandomAccessFile (java.io.RandomAccessFile)1 LinkedList (java.util.LinkedList)1 DfsClientConf (org.apache.hadoop.hdfs.client.impl.DfsClientConf)1 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1