use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method getBlockLocalPathInfo.
// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
if (replica == null) {
throw new ReplicaNotFoundException(block);
}
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
} else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
block.setGenerationStamp(replica.getGenerationStamp());
}
}
ReplicaInfo r = getBlockReplica(block);
File blockFile = new File(r.getBlockURI());
File metaFile = new File(r.getMetadataURI());
BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
return info;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method createRbw.
// FsDatasetSpi
@Override
public ReplicaHandler createRbw(StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (replicaInfo != null) {
throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created.");
}
// create a new block
FsVolumeReference ref = null;
// significantly.
if (allowLazyPersist && lazyWriter != null && b.getNumBytes() % cacheManager.getOsPageSize() == 0 && reserveLockedMemory(b.getNumBytes())) {
try {
// First try to place the block on a transient volume.
ref = volumes.getNextTransientVolume(b.getNumBytes());
datanode.getMetrics().incrRamDiskBlocksWrite();
} catch (DiskOutOfSpaceException de) {
// Ignore the exception since we just fall back to persistent storage.
} finally {
if (ref == null) {
cacheManager.release(b.getNumBytes());
}
}
}
if (ref == null) {
ref = volumes.getNextVolume(storageType, b.getNumBytes());
}
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
if (allowLazyPersist && !v.isTransientStorage()) {
datanode.getMetrics().incrRamDiskBlocksWriteFallback();
}
ReplicaInPipeline newReplicaInfo;
try {
newReplicaInfo = v.createRbw(b);
if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
throw new IOException("CreateRBW returned a replica of state " + newReplicaInfo.getReplicaInfo().getState() + " for block " + b.getBlockId());
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
return new ReplicaHandler(newReplicaInfo, ref);
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method invalidate.
/**
* We're informed that a block is no longer valid. Delete it.
*/
// FsDatasetSpi
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
final List<String> errors = new ArrayList<String>();
for (int i = 0; i < invalidBlks.length; i++) {
final ReplicaInfo removing;
final FsVolumeImpl v;
try (AutoCloseableLock lock = datasetLock.acquire()) {
final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
if (info == null) {
ReplicaInfo infoByBlockId = volumeMap.get(bpid, invalidBlks[i].getBlockId());
if (infoByBlockId == null) {
// It is okay if the block is not found -- it
// may be deleted earlier.
LOG.info("Failed to delete replica " + invalidBlks[i] + ": ReplicaInfo not found.");
} else {
errors.add("Failed to delete replica " + invalidBlks[i] + ": GenerationStamp not matched, existing replica is " + Block.toString(infoByBlockId));
}
continue;
}
v = (FsVolumeImpl) info.getVolume();
if (v == null) {
errors.add("Failed to delete replica " + invalidBlks[i] + ". No volume for replica " + info);
continue;
}
try {
File blockFile = new File(info.getBlockURI());
if (blockFile != null && blockFile.getParentFile() == null) {
errors.add("Failed to delete replica " + invalidBlks[i] + ". Parent not found for block file: " + blockFile);
continue;
}
} catch (IllegalArgumentException e) {
LOG.warn("Parent directory check failed; replica " + info + " is not backed by a local file");
}
removing = volumeMap.remove(bpid, invalidBlks[i]);
addDeletingBlock(bpid, removing.getBlockId());
if (LOG.isDebugEnabled()) {
LOG.debug("Block file " + removing.getBlockURI() + " is to be deleted");
}
if (removing instanceof ReplicaInPipeline) {
((ReplicaInPipeline) removing).releaseAllBytesReserved();
}
}
if (v.isTransientStorage()) {
RamDiskReplica replicaInfo = ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
if (replicaInfo != null) {
if (!replicaInfo.getIsPersisted()) {
datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
}
ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(), replicaInfo.getBlockId(), true);
}
}
// If a DFSClient has the replica in its cache of short-circuit file
// descriptors (and the client is using ShortCircuitShm), invalidate it.
datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
// finishes.
try {
asyncDiskService.deleteAsync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
} catch (ClosedChannelException e) {
LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + "block " + invalidBlks[i]);
}
}
if (!errors.isEmpty()) {
StringBuilder b = new StringBuilder("Failed to delete ").append(errors.size()).append(" (out of ").append(invalidBlks.length).append(") replica(s):");
for (int i = 0; i < errors.size(); i++) {
b.append("\n").append(i).append(") ").append(errors.get(i));
}
throw new IOException(b.toString());
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method recoverAppend.
// FsDatasetSpi
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
LOG.info("Recover failed append to " + b);
while (true) {
try {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaInPipeline replica;
try {
// change the replica's state/gs etc.
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
} else {
//RBW
replicaInfo.bumpReplicaGS(newGS);
replica = (ReplicaInPipeline) replicaInfo;
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(replica, ref);
}
} catch (MustStopExistingWriter e) {
e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method checkBlock.
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
* @throws FileNotFoundException If the block file is not found or there
* was an error locating it.
* @throws EOFException If the replica length is too short.
*
* @throws IOException May be thrown from the methods called.
*/
// FsDatasetSpi
@Override
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException, FileNotFoundException, EOFException, IOException {
final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
if (replicaInfo == null) {
throw new ReplicaNotFoundException(b);
}
if (replicaInfo.getState() != state) {
throw new UnexpectedReplicaStateException(b, state);
}
if (!replicaInfo.blockDataExists()) {
throw new FileNotFoundException(replicaInfo.getBlockURI().toString());
}
long onDiskLength = getLength(b);
if (onDiskLength < minLength) {
throw new EOFException(b + "'s on-disk length " + onDiskLength + " is shorter than minLength " + minLength);
}
}
Aggregations