use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class DFSInputStream method readBlockLength.
/** Read the block length from one of the datanodes. */
private long readBlockLength(LocatedBlock locatedblock) throws IOException {
assert locatedblock != null : "LocatedBlock cannot be null";
int replicaNotFoundCount = locatedblock.getLocations().length;
final DfsClientConf conf = dfsClient.getConf();
final int timeout = conf.getSocketTimeout();
LinkedList<DatanodeInfo> nodeList = new LinkedList<DatanodeInfo>(Arrays.asList(locatedblock.getLocations()));
LinkedList<DatanodeInfo> retryList = new LinkedList<DatanodeInfo>();
boolean isRetry = false;
StopWatch sw = new StopWatch();
while (nodeList.size() > 0) {
DatanodeInfo datanode = nodeList.pop();
ClientDatanodeProtocol cdp = null;
try {
cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), timeout, conf.isConnectToDnViaHostname(), locatedblock);
final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
if (n >= 0) {
return n;
}
} catch (IOException ioe) {
checkInterrupted(ioe);
if (ioe instanceof RemoteException) {
if (((RemoteException) ioe).unwrapRemoteException() instanceof ReplicaNotFoundException) {
// replica is not on the DN. We will treat it as 0 length
// if no one actually has a replica.
replicaNotFoundCount--;
} else if (((RemoteException) ioe).unwrapRemoteException() instanceof RetriableException) {
// add to the list to be retried if necessary.
retryList.add(datanode);
}
}
DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" + " for block {}", datanode, locatedblock.getBlock(), ioe);
} finally {
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
// Ran out of nodes, but there are retriable nodes.
if (nodeList.size() == 0 && retryList.size() > 0) {
nodeList.addAll(retryList);
retryList.clear();
isRetry = true;
}
if (isRetry) {
// start the stop watch if not already running.
if (!sw.isRunning()) {
sw.start();
}
try {
// delay between retries.
Thread.sleep(500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException("Interrupted while getting the length.");
}
}
// see if we ran out of retry time
if (sw.isRunning() && sw.now(TimeUnit.MILLISECONDS) > timeout) {
break;
}
}
// on a DN that has it. we want to report that error
if (replicaNotFoundCount == 0) {
return 0;
}
throw new IOException("Cannot obtain block length for " + locatedblock);
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method moveBlockAcrossStorage.
/**
* Move block files from one storage to another storage.
* @return Returns the Old replicaInfo
* @throws IOException
*/
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(block);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
}
if (replicaInfo.getNumBytes() != block.getNumBytes()) {
throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + block.getNumBytes());
}
if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
throw new ReplicaAlreadyExistsException("Replica " + replicaInfo + " already exists on storage " + targetStorageType);
}
if (replicaInfo.isOnTransientStorage()) {
// Block movement from RAM_DISK will be done by LazyPersist mechanism
throw new IOException("Replica " + replicaInfo + " cannot be moved from storageType : " + replicaInfo.getVolume().getStorageType());
}
FsVolumeReference volumeRef = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumeRef = volumes.getNextVolume(targetStorageType, block.getNumBytes());
}
try {
moveBlock(block, replicaInfo, volumeRef);
} finally {
if (volumeRef != null) {
volumeRef.close();
}
}
// Replace the old block if any to reschedule the scanning.
return replicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method getBlockLocalPathInfo.
// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
if (replica == null) {
throw new ReplicaNotFoundException(block);
}
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
} else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
block.setGenerationStamp(replica.getGenerationStamp());
}
}
ReplicaInfo r = getBlockReplica(block);
File blockFile = new File(r.getBlockURI());
File metaFile = new File(r.getMetadataURI());
BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
return info;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method checkBlock.
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
* @throws FileNotFoundException If the block file is not found or there
* was an error locating it.
* @throws EOFException If the replica length is too short.
*
* @throws IOException May be thrown from the methods called.
*/
// FsDatasetSpi
@Override
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException, FileNotFoundException, EOFException, IOException {
final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
if (replicaInfo == null) {
throw new ReplicaNotFoundException(b);
}
if (replicaInfo.getState() != state) {
throw new UnexpectedReplicaStateException(b, state);
}
if (!replicaInfo.blockDataExists()) {
throw new FileNotFoundException(replicaInfo.getBlockURI().toString());
}
long onDiskLength = getLength(b);
if (onDiskLength < minLength) {
throw new EOFException(b + "'s on-disk length " + onDiskLength + " is shorter than minLength " + minLength);
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method append.
// FsDatasetSpi
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// check the validity of the parameter
if (newGS < b.getGenerationStamp()) {
throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
LOG.info("Appending to " + replicaInfo);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
}
if (replicaInfo.getNumBytes() != expectedBlockLen) {
throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen);
}
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaInPipeline replica = null;
try {
replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(replica, ref);
}
}
Aggregations