use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method testReplicatingAfterRemoveVolume.
@Test(timeout = 60000)
public void testReplicatingAfterRemoveVolume() throws InterruptedException, TimeoutException, IOException, ReconfigurationException {
startDFSCluster(1, 2);
final FileSystem fs = cluster.getFileSystem();
final short replFactor = 2;
Path testFile = new Path("/test");
createFile(testFile, 4, replFactor);
DataNode dn = cluster.getDataNodes().get(0);
Collection<String> oldDirs = getDataDirs(dn);
// Findout the storage with block and remove it
ExtendedBlock block = DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock();
FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block);
String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" + volumeWithBlock.getStorageLocation().getUri();
String newDirs = dirWithBlock;
for (String dir : oldDirs) {
if (dirWithBlock.startsWith(dir)) {
continue;
}
newDirs = dir;
break;
}
assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
oldDirs.remove(newDirs);
assertFileLocksReleased(oldDirs);
triggerDeleteReport(dn);
waitReplication(fs, testFile, 1, 1);
DFSTestUtil.waitReplication(fs, testFile, replFactor);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestBlockScanner method testVolumeIteratorImpl.
/**
* Test iterating through a bunch of blocks in a volume using a volume
* iterator.<p/>
*
* We will rewind the iterator when about halfway through the blocks.
*
* @param numFiles The number of files to create.
* @param maxStaleness The maximum staleness to allow with the iterator.
* @throws Exception
*/
private void testVolumeIteratorImpl(int numFiles, long maxStaleness) throws Exception {
Configuration conf = new Configuration();
disableBlockScanner(conf);
TestContext ctx = new TestContext(conf, 1);
ctx.createFiles(0, numFiles, 1);
assertEquals(1, ctx.volumes.size());
FsVolumeSpi volume = ctx.volumes.get(0);
ExtendedBlock savedBlock = null, loadedBlock = null;
boolean testedRewind = false, testedSave = false, testedLoad = false;
int blocksProcessed = 0, savedBlocksProcessed = 0;
try {
List<BPOfferService> bpos = ctx.datanode.getAllBpOs();
assertEquals(1, bpos.size());
BlockIterator iter = volume.newBlockIterator(ctx.bpids[0], "test");
assertEquals(ctx.bpids[0], iter.getBlockPoolId());
iter.setMaxStalenessMs(maxStaleness);
while (true) {
HashSet<ExtendedBlock> blocks = new HashSet<ExtendedBlock>();
for (int blockIdx = 0; blockIdx < numFiles; blockIdx++) {
blocks.add(ctx.getFileBlock(0, blockIdx));
}
while (true) {
ExtendedBlock block = iter.nextBlock();
if (block == null) {
break;
}
blocksProcessed++;
LOG.info("BlockIterator for {} found block {}, blocksProcessed = {}", volume, block, blocksProcessed);
if (testedSave && (savedBlock == null)) {
savedBlock = block;
}
if (testedLoad && (loadedBlock == null)) {
loadedBlock = block;
// The block that we get back right after loading the iterator
// should be the same block we got back right after saving
// the iterator.
assertEquals(savedBlock, loadedBlock);
}
boolean blockRemoved = blocks.remove(block);
assertTrue("Found unknown block " + block, blockRemoved);
if (blocksProcessed > (numFiles / 3)) {
if (!testedSave) {
LOG.info("Processed {} blocks out of {}. Saving iterator.", blocksProcessed, numFiles);
iter.save();
testedSave = true;
savedBlocksProcessed = blocksProcessed;
}
}
if (blocksProcessed > (numFiles / 2)) {
if (!testedRewind) {
LOG.info("Processed {} blocks out of {}. Rewinding iterator.", blocksProcessed, numFiles);
iter.rewind();
break;
}
}
if (blocksProcessed > ((2 * numFiles) / 3)) {
if (!testedLoad) {
LOG.info("Processed {} blocks out of {}. Loading iterator.", blocksProcessed, numFiles);
iter = volume.loadBlockIterator(ctx.bpids[0], "test");
iter.setMaxStalenessMs(maxStaleness);
break;
}
}
}
if (!testedRewind) {
testedRewind = true;
blocksProcessed = 0;
LOG.info("Starting again at the beginning...");
continue;
}
if (!testedLoad) {
testedLoad = true;
blocksProcessed = savedBlocksProcessed;
LOG.info("Starting again at the load point...");
continue;
}
assertEquals(numFiles, blocksProcessed);
break;
}
} finally {
ctx.close();
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDirectoryScanner method duplicateBlock.
/**
* Duplicate the given block on all volumes.
* @param blockId
* @throws IOException
*/
private void duplicateBlock(long blockId) throws IOException {
try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
for (FsVolumeSpi v : volumes) {
if (v.getStorageID().equals(b.getVolume().getStorageID())) {
continue;
}
// Volume without a copy of the block. Make a copy now.
File sourceBlock = new File(b.getBlockURI());
File sourceMeta = new File(b.getMetadataURI());
URI sourceRoot = b.getVolume().getStorageLocation().getUri();
URI destRoot = v.getStorageLocation().getUri();
String relativeBlockPath = sourceRoot.relativize(sourceBlock.toURI()).getPath();
String relativeMetaPath = sourceRoot.relativize(sourceMeta.toURI()).getPath();
File destBlock = new File(new File(destRoot).toString(), relativeBlockPath);
File destMeta = new File(new File(destRoot).toString(), relativeMetaPath);
destBlock.getParentFile().mkdirs();
FileUtils.copyFile(sourceBlock, destBlock);
FileUtils.copyFile(sourceMeta, destMeta);
if (destBlock.exists() && destMeta.exists()) {
LOG.info("Copied " + sourceBlock + " ==> " + destBlock);
LOG.info("Copied " + sourceMeta + " ==> " + destMeta);
}
}
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDirectoryScanner method testExceptionHandlingWhileDirectoryScan.
/**
* Test the behavior of exception handling during directory scan operation.
* Directory scanner shouldn't abort the scan on every directory just because
* one had an error.
*/
@Test(timeout = 60000)
public void testExceptionHandlingWhileDirectoryScan() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).build();
try {
cluster.waitActive();
bpid = cluster.getNamesystem().getBlockPoolId();
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
client = cluster.getFileSystem().getClient();
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
DataNode dataNode = cluster.getDataNodes().get(0);
// Add files with 2 blocks
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false);
// Inject error on #getFinalizedDir() so that ReportCompiler#call() will
// hit exception while preparing the block info report list.
List<FsVolumeSpi> volumes = new ArrayList<>();
Iterator<FsVolumeSpi> iterator = fds.getFsVolumeReferences().iterator();
while (iterator.hasNext()) {
FsVolumeImpl volume = (FsVolumeImpl) iterator.next();
FsVolumeImpl spy = Mockito.spy(volume);
Mockito.doThrow(new IOException("Error while getFinalizedDir")).when(spy).getFinalizedDir(volume.getBlockPoolList()[0]);
volumes.add(spy);
}
FsVolumeReferences volReferences = new FsVolumeReferences(volumes);
FsDatasetSpi<? extends FsVolumeSpi> spyFds = Mockito.spy(fds);
Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences();
scanner = new DirectoryScanner(dataNode, spyFds, CONF);
scanner.setRetainDiffs(true);
scanner.reconcile();
} finally {
if (scanner != null) {
scanner.shutdown();
scanner = null;
}
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestIncrementalBrVariations method verifyIncrementalBlockReports.
public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
// Get the block list for the file with the block locations.
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
try (FsDatasetSpi.FsVolumeReferences volumes = dn0.getFSDataset().getFsVolumeReferences()) {
// We will send 'fake' incremental block reports to the NN that look
// like they originated from DN 0.
StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[volumes.size()];
// Lie to the NN that one block on each storage has been deleted.
for (int i = 0; i < reports.length; ++i) {
FsVolumeSpi volume = volumes.get(i);
boolean foundBlockOnStorage = false;
ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
// report.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
foundBlockOnStorage = true;
break;
}
}
assertTrue(foundBlockOnStorage);
reports[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(volume.getStorageID()), rdbi);
if (splitReports) {
// If we are splitting reports then send the report for this storage now.
StorageReceivedDeletedBlocks[] singletonReport = { reports[i] };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
}
}
if (!splitReports) {
// Send a combined report.
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
}
// Make sure that the deleted block from each storage was picked up
// by the NameNode. IBRs are async, make sure the NN processes
// all of them.
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
}
}
Aggregations