use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method createTemporary.
// FsDatasetSpi
@Override
public ReplicaHandler createTemporary(StorageType storageType, ExtendedBlock b) throws IOException {
long startTimeMs = Time.monotonicNow();
long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
ReplicaInfo lastFoundReplicaInfo = null;
do {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo currentReplicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (currentReplicaInfo == lastFoundReplicaInfo) {
if (lastFoundReplicaInfo != null) {
invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
}
FsVolumeReference ref = volumes.getNextVolume(storageType, b.getNumBytes());
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
ReplicaInPipeline newReplicaInfo;
try {
newReplicaInfo = v.createTemporary(b);
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
return new ReplicaHandler(newReplicaInfo, ref);
} else {
if (!(currentReplicaInfo.getGenerationStamp() < b.getGenerationStamp() && (currentReplicaInfo.getState() == ReplicaState.TEMPORARY || currentReplicaInfo.getState() == ReplicaState.RBW))) {
throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + currentReplicaInfo.getState() + " and thus cannot be created.");
}
lastFoundReplicaInfo = currentReplicaInfo;
}
}
// Hang too long, just bail out. This is not supposed to happen.
long writerStopMs = Time.monotonicNow() - startTimeMs;
if (writerStopMs > writerStopTimeoutMs) {
LOG.warn("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
throw new IOException("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
}
// Stop the previous writer
((ReplicaInPipeline) lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs);
} while (true);
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsVolumeImpl method moveBlockToTmpLocation.
public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block, ReplicaInfo replicaInfo, int smallBufferSize, Configuration conf) throws IOException {
File[] blockFiles = FsDatasetImpl.copyBlockFiles(block.getBlockId(), block.getGenerationStamp(), replicaInfo, getTmpDir(block.getBlockPoolId()), replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY).setBlockId(replicaInfo.getBlockId()).setGenerationStamp(replicaInfo.getGenerationStamp()).setFsVolume(this).setDirectoryToUse(blockFiles[0].getParentFile()).setBytesToReserve(0).build();
newReplicaInfo.setNumBytes(blockFiles[1].length());
return newReplicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method getTmpInputStreams.
/**
* Returns handles to the block file and its metadata file
*/
// FsDatasetSpi
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo info = getReplicaInfo(b);
FsVolumeReference ref = info.getVolume().obtainReference();
try {
InputStream blockInStream = info.getDataInputStream(blkOffset);
try {
InputStream metaInStream = info.getMetadataInputStream(metaOffset);
return new ReplicaInputStreams(blockInStream, metaInStream, ref, datanode.getFileIoProvider());
} catch (IOException e) {
IOUtils.cleanup(null, blockInStream);
throw e;
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method recoverCheck.
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException, MustStopExistingWriter {
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check state
if (replicaInfo.getState() != ReplicaState.FINALIZED && replicaInfo.getState() != ReplicaState.RBW) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
}
// check generation stamp
long replicaGenerationStamp = replicaInfo.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
}
// stop the previous writer before check a replica's length
long replicaLen = replicaInfo.getNumBytes();
if (replicaInfo.getState() == ReplicaState.RBW) {
ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
throw new MustStopExistingWriter(rbw);
}
// check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
if (replicaLen != rbw.getBytesOnDisk() || replicaLen != rbw.getBytesAcked()) {
throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() + ") are not the same.");
}
}
// check block length
if (replicaLen != expectedBlockLen) {
throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaLen + " expected length is " + expectedBlockLen);
}
return replicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method getBlockInputStream.
// FsDatasetSpi
@Override
public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException {
ReplicaInfo info;
synchronized (this) {
info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
}
if (info != null && info.getVolume().isTransientStorage()) {
ramDiskReplicaTracker.touch(b.getBlockPoolId(), b.getBlockId());
datanode.getMetrics().incrRamDiskBlocksReadHits();
}
if (info != null && info.blockDataExists()) {
return info.getDataInputStream(seekOffset);
} else {
throw new IOException("No data exists for block " + b);
}
}
Aggregations