use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method testDirectlyReloadAfterCheckDiskError.
/**
* Verify that {@link DataNode#checkDiskError()} removes all metadata in
* DataNode upon a volume failure. Thus we can run reconfig on the same
* configuration to reload the new volume on the same directory as the failed one.
*/
@Test(timeout = 60000)
public void testDirectlyReloadAfterCheckDiskError() throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
startDFSCluster(1, 2);
createFile(new Path("/test"), 32, (short) 2);
DataNode dn = cluster.getDataNodes().get(0);
final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
File dirToFail = new File(cluster.getDataDirectory(), "data1");
FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
assertTrue("No FsVolume was found for " + dirToFail, failedVolume != null);
long used = failedVolume.getDfsUsed();
DataNodeTestUtils.injectDataDirFailure(dirToFail);
// Call and wait DataNode to detect disk failure.
DataNodeTestUtils.waitForDiskError(dn, failedVolume);
createFile(new Path("/test1"), 32, (short) 2);
assertEquals(used, failedVolume.getDfsUsed());
DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
LOG.info("reconfiguring DN ");
assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
createFile(new Path("/test2"), 32, (short) 2);
FsVolumeImpl restoredVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
assertTrue(restoredVolume != null);
assertTrue(restoredVolume != failedVolume);
// More data has been written to this volume.
assertTrue(restoredVolume.getDfsUsed() > used);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.
the class TestDirectoryScanner method createBlockMetaFile.
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
long id = getFreeBlockId();
try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
int numVolumes = refs.size();
int index = rand.nextInt(numVolumes - 1);
File finalizedDir = ((FsVolumeImpl) refs.get(index)).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
// Create files with same prefix as block file but extension names
// such that during sorting, these files appear around meta file
// to test how DirectoryScanner handles extraneous files
String name1 = file.getAbsolutePath() + ".l";
String name2 = file.getAbsolutePath() + ".n";
file = new File(name1);
if (file.createNewFile()) {
LOG.info("Created extraneous file " + name1);
}
file = new File(name2);
if (file.createNewFile()) {
LOG.info("Created extraneous file " + name2);
}
file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
}
}
}
return id;
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.
the class TestDirectoryScanner method createMetaFile.
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
long id = getFreeBlockId();
try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
int numVolumes = refs.size();
int index = rand.nextInt(numVolumes - 1);
File finalizedDir = ((FsVolumeImpl) refs.get(index)).getFinalizedDir(bpid);
File file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
}
}
return id;
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.
the class DiskBalancerTestUtil method newImbalancedCluster.
public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final String fileName = "/" + UUID.randomUUID().toString();
final Path filePath = new Path(fileName);
Preconditions.checkNotNull(storageCapacities);
Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
// Write a file and restart the cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(0);
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
cluster.restartDataNodes();
cluster.waitActive();
// Get the data node and move all data to one disk.
for (int i = 0; i < numDatanodes; i++) {
DataNode dnNode = cluster.getDataNodes().get(i);
try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
}
}
cluster.restartDataNodes();
cluster.waitActive();
return cluster;
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.
the class TestDiskBalancerRPC method testMoveBlockAcrossVolume.
@Test
public void testMoveBlockAcrossVolume() throws Exception {
Configuration conf = new HdfsConfiguration();
final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
String fileName = "/tmp.txt";
Path filePath = new Path(fileName);
final int numDatanodes = 1;
final int dnIndex = 0;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
try {
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(dnIndex);
DFSTestUtil.createFile(fs, filePath, 10 * 1024, (short) 1, r.nextLong());
DataNode dnNode = cluster.getDataNodes().get(dnIndex);
FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences();
try {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
} finally {
refs.close();
}
} finally {
cluster.shutdown();
}
}
Aggregations