use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method recoverRbw.
// FsDatasetSpi
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
LOG.info("Recover RBW replica " + b);
while (true) {
try {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check the replica's state
if (replicaInfo.getState() != ReplicaState.RBW) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
}
ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
throw new MustStopExistingWriter(rbw);
}
LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
}
} catch (MustStopExistingWriter e) {
e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class TestWriteToReplica method testAppend.
private void testAppend(String bpid, FsDatasetSpi<?> dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp() + 1;
final FsVolumeSpi v = dataSet.getVolume(blocks[FINALIZED]);
if (v instanceof FsVolumeImpl) {
FsVolumeImpl fvi = (FsVolumeImpl) v;
long available = fvi.getCapacity() - fvi.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {
fvi.onBlockFileDeletion(bpid, -available);
blocks[FINALIZED].setNumBytes(expectedLen + 100);
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
} catch (DiskOutOfSpaceException e) {
Assert.assertTrue(e.getMessage().startsWith("Insufficient space for appending to "));
}
fvi.onBlockFileDeletion(bpid, available);
blocks[FINALIZED].setNumBytes(expectedLen);
}
newGS = blocks[RBW].getGenerationStamp() + 1;
dataSet.append(blocks[FINALIZED], newGS, // successful
blocks[FINALIZED].getNumBytes());
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[TEMPORARY], e.getMessage());
}
try {
dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RBW], e.getMessage());
}
try {
dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RWR], e.getMessage());
}
try {
dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RUR], e.getMessage());
}
try {
dataSet.append(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + blocks[NON_EXISTENT], e.getMessage());
}
newGS = blocks[FINALIZED].getGenerationStamp() + 1;
dataSet.recoverAppend(blocks[FINALIZED], newGS, // successful
blocks[FINALIZED].getNumBytes());
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
newGS = blocks[RBW].getGenerationStamp() + 1;
dataSet.recoverAppend(blocks[RBW], newGS, blocks[RBW].getNumBytes());
blocks[RBW].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImplTestUtils method getMaterializedReplica.
/**
* Return a materialized replica from the FsDatasetImpl.
*/
@Override
public MaterializedReplica getMaterializedReplica(ExtendedBlock block) throws ReplicaNotFoundException {
File blockFile;
try {
ReplicaInfo r = dataset.getReplicaInfo(block);
blockFile = new File(r.getBlockURI());
} catch (IOException e) {
LOG.error("Block file for " + block + " does not existed:", e);
throw new ReplicaNotFoundException(block);
}
File metaFile = FsDatasetUtil.getMetaFile(blockFile, block.getGenerationStamp());
return new FsDatasetImplMaterializedReplica(blockFile, metaFile);
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method recoverCheck.
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException, MustStopExistingWriter {
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check state
if (replicaInfo.getState() != ReplicaState.FINALIZED && replicaInfo.getState() != ReplicaState.RBW) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
}
// check generation stamp
long replicaGenerationStamp = replicaInfo.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
}
// stop the previous writer before check a replica's length
long replicaLen = replicaInfo.getNumBytes();
if (replicaInfo.getState() == ReplicaState.RBW) {
ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
throw new MustStopExistingWriter(rbw);
}
// check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
if (replicaLen != rbw.getBytesOnDisk() || replicaLen != rbw.getBytesAcked()) {
throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() + ") are not the same.");
}
}
// check block length
if (replicaLen != expectedBlockLen) {
throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaLen + " expected length is " + expectedBlockLen);
}
return replicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method convertTemporaryToRbw.
// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
final ReplicaInfo temp;
{
// get replica
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
if (r == null) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
}
// check the replica's state
if (r.getState() != ReplicaState.TEMPORARY) {
throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
}
temp = r;
}
// check generation stamp
if (temp.getGenerationStamp() != expectedGs) {
throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
}
// TODO: check writer?
// set writer to the current thread
// temp.setWriter(Thread.currentThread());
// check length
final long numBytes = temp.getNumBytes();
if (numBytes < visible) {
throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
}
// check volume
final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
if (rbw.getState() != ReplicaState.RBW) {
throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
}
// overwrite the RBW in the volume map
volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
return rbw;
}
}
Aggregations