use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDatanodeRestart method testRbwReplicas.
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException {
FSDataOutputStream out = null;
FileSystem fs = cluster.getFileSystem();
final Path src = new Path("/test.txt");
try {
final int fileLen = 515;
// create some rbw replicas on disk
byte[] writeBuf = new byte[fileLen];
new Random().nextBytes(writeBuf);
out = fs.create(src);
out.write(writeBuf);
out.hflush();
DataNode dn = cluster.getDataNodes().get(0);
try (FsDatasetSpi.FsVolumeReferences volumes = dataset(dn).getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
final FsVolumeImpl volume = (FsVolumeImpl) vol;
File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
if (isCorrupt && Block.isBlockFilename(file)) {
new RandomAccessFile(file, "rw").setLength(// corrupt
fileLen - 1);
}
}
}
}
cluster.restartDataNodes();
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
// check volumeMap: one rwr replica
String bpid = cluster.getNamesystem().getBlockPoolId();
ReplicaMap replicas = dataset(dn).volumeMap;
Assert.assertEquals(1, replicas.size(bpid));
ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
Assert.assertEquals(ReplicaState.RWR, replica.getState());
if (isCorrupt) {
Assert.assertEquals((fileLen - 1) / 512 * 512, replica.getNumBytes());
} else {
Assert.assertEquals(fileLen, replica.getNumBytes());
}
dataset(dn).invalidate(bpid, new Block[] { replica });
} finally {
IOUtils.closeStream(out);
if (fs.exists(src)) {
fs.delete(src, false);
}
fs.close();
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDatasetVolumeCheckerTimeout method testDiskCheckTimeout.
@Test(timeout = 1000)
public void testDiskCheckTimeout() throws Exception {
LOG.info("Executing {}", testName.getMethodName());
final FsVolumeSpi volume = makeSlowVolume();
final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, new FakeTimer());
final AtomicLong numCallbackInvocations = new AtomicLong(0);
lock.lock();
/**
* Request a check and ensure it triggered {@link FsVolumeSpi#check}.
*/
boolean result = checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {
@Override
public void call(Set<FsVolumeSpi> healthyVolumes, Set<FsVolumeSpi> failedVolumes) {
numCallbackInvocations.incrementAndGet();
// Assert that the disk check registers a failed volume due to
// timeout
assertThat(healthyVolumes.size(), is(0));
assertThat(failedVolumes.size(), is(1));
}
});
// Wait for the callback
Thread.sleep(DISK_CHECK_TIME);
// Release lock
lock.unlock();
// Ensure that the check was invoked only once.
verify(volume, times(1)).check(anyObject());
assertThat(numCallbackInvocations.get(), is(1L));
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestWriteToReplica method testAppend.
private void testAppend(String bpid, FsDatasetSpi<?> dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp() + 1;
final FsVolumeSpi v = dataSet.getVolume(blocks[FINALIZED]);
if (v instanceof FsVolumeImpl) {
FsVolumeImpl fvi = (FsVolumeImpl) v;
long available = fvi.getCapacity() - fvi.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {
fvi.onBlockFileDeletion(bpid, -available);
blocks[FINALIZED].setNumBytes(expectedLen + 100);
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
} catch (DiskOutOfSpaceException e) {
Assert.assertTrue(e.getMessage().startsWith("Insufficient space for appending to "));
}
fvi.onBlockFileDeletion(bpid, available);
blocks[FINALIZED].setNumBytes(expectedLen);
}
newGS = blocks[RBW].getGenerationStamp() + 1;
dataSet.append(blocks[FINALIZED], newGS, // successful
blocks[FINALIZED].getNumBytes());
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[TEMPORARY], e.getMessage());
}
try {
dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RBW], e.getMessage());
}
try {
dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RWR], e.getMessage());
}
try {
dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RUR], e.getMessage());
}
try {
dataSet.append(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + blocks[NON_EXISTENT], e.getMessage());
}
newGS = blocks[FINALIZED].getGenerationStamp() + 1;
dataSet.recoverAppend(blocks[FINALIZED], newGS, // successful
blocks[FINALIZED].getNumBytes());
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
newGS = blocks[RBW].getGenerationStamp() + 1;
dataSet.recoverAppend(blocks[RBW], newGS, blocks[RBW].getNumBytes());
blocks[RBW].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
}
Aggregations