use of org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException in project hadoop by apache.
the class FsVolumeImpl method append.
public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo, long newGS, long estimateBlockLen) throws IOException {
long bytesReserved = estimateBlockLen - replicaInfo.getNumBytes();
if (getAvailable() < bytesReserved) {
throw new DiskOutOfSpaceException("Insufficient space for appending to " + replicaInfo);
}
assert replicaInfo.getVolume() == this : "The volume of the replica should be the same as this volume";
// construct a RBW replica with the new GS
File newBlkFile = new File(getRbwDir(bpid), replicaInfo.getBlockName());
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW).setBlockId(replicaInfo.getBlockId()).setLength(replicaInfo.getNumBytes()).setGenerationStamp(newGS).setFsVolume(this).setDirectoryToUse(newBlkFile.getParentFile()).setWriterThread(Thread.currentThread()).setBytesToReserve(bytesReserved).buildLocalReplicaInPipeline();
// load last checksum and datalen
LocalReplica localReplica = (LocalReplica) replicaInfo;
byte[] lastChunkChecksum = loadLastPartialChunkChecksum(localReplica.getBlockFile(), localReplica.getMetaFile());
newReplicaInfo.setLastChecksumAndDataLen(replicaInfo.getNumBytes(), lastChunkChecksum);
// rename meta file to rbw directory
// rename block file to rbw directory
newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile);
reserveSpaceForReplica(bytesReserved);
return newReplicaInfo;
}
use of org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException in project hadoop by apache.
the class FsDatasetImpl method createRbw.
// FsDatasetSpi
@Override
public ReplicaHandler createRbw(StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (replicaInfo != null) {
throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created.");
}
// create a new block
FsVolumeReference ref = null;
// significantly.
if (allowLazyPersist && lazyWriter != null && b.getNumBytes() % cacheManager.getOsPageSize() == 0 && reserveLockedMemory(b.getNumBytes())) {
try {
// First try to place the block on a transient volume.
ref = volumes.getNextTransientVolume(b.getNumBytes());
datanode.getMetrics().incrRamDiskBlocksWrite();
} catch (DiskOutOfSpaceException de) {
// Ignore the exception since we just fall back to persistent storage.
} finally {
if (ref == null) {
cacheManager.release(b.getNumBytes());
}
}
}
if (ref == null) {
ref = volumes.getNextVolume(storageType, b.getNumBytes());
}
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
if (allowLazyPersist && !v.isTransientStorage()) {
datanode.getMetrics().incrRamDiskBlocksWriteFallback();
}
ReplicaInPipeline newReplicaInfo;
try {
newReplicaInfo = v.createRbw(b);
if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
throw new IOException("CreateRBW returned a replica of state " + newReplicaInfo.getReplicaInfo().getState() + " for block " + b.getBlockId());
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
return new ReplicaHandler(newReplicaInfo, ref);
}
}
use of org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException in project hadoop by apache.
the class AvailableSpaceVolumeChoosingPolicy method chooseVolume.
@Override
public V chooseVolume(List<V> volumes, long replicaSize) throws IOException {
if (volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
// As all the items in volumes are with the same storage type,
// so only need to get the storage type index of the first item in volumes
StorageType storageType = volumes.get(0).getStorageType();
int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal();
synchronized (syncLocks[index]) {
return doChooseVolume(volumes, replicaSize);
}
}
use of org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException in project hadoop by apache.
the class RoundRobinVolumeChoosingPolicy method chooseVolume.
@Override
public V chooseVolume(final List<V> volumes, long blockSize) throws IOException {
if (volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
// As all the items in volumes are with the same storage type,
// so only need to get the storage type index of the first item in volumes
StorageType storageType = volumes.get(0).getStorageType();
int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal();
synchronized (syncLocks[index]) {
return chooseVolume(index, volumes, blockSize);
}
}
use of org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException in project hadoop by apache.
the class TestWriteToReplica method testAppend.
private void testAppend(String bpid, FsDatasetSpi<?> dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp() + 1;
final FsVolumeSpi v = dataSet.getVolume(blocks[FINALIZED]);
if (v instanceof FsVolumeImpl) {
FsVolumeImpl fvi = (FsVolumeImpl) v;
long available = fvi.getCapacity() - fvi.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {
fvi.onBlockFileDeletion(bpid, -available);
blocks[FINALIZED].setNumBytes(expectedLen + 100);
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
} catch (DiskOutOfSpaceException e) {
Assert.assertTrue(e.getMessage().startsWith("Insufficient space for appending to "));
}
fvi.onBlockFileDeletion(bpid, available);
blocks[FINALIZED].setNumBytes(expectedLen);
}
newGS = blocks[RBW].getGenerationStamp() + 1;
dataSet.append(blocks[FINALIZED], newGS, // successful
blocks[FINALIZED].getNumBytes());
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[TEMPORARY], e.getMessage());
}
try {
dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RBW], e.getMessage());
}
try {
dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RWR], e.getMessage());
}
try {
dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RUR], e.getMessage());
}
try {
dataSet.append(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + blocks[NON_EXISTENT], e.getMessage());
}
newGS = blocks[FINALIZED].getGenerationStamp() + 1;
dataSet.recoverAppend(blocks[FINALIZED], newGS, // successful
blocks[FINALIZED].getNumBytes());
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
newGS = blocks[RBW].getGenerationStamp() + 1;
dataSet.recoverAppend(blocks[RBW], newGS, blocks[RBW].getNumBytes());
blocks[RBW].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
}
Aggregations