use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestWriteToReplica method testClose.
// test close
@Test
public void testClose() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
// test close
testClose(dataSet, blocks);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestDiskBalancerCommand method runAndVerifyPlan.
private String runAndVerifyPlan(final MiniDFSCluster miniCluster, final Configuration hdfsConf) throws Exception {
String cmdLine = "";
List<String> outputs = null;
final DataNode dn = miniCluster.getDataNodes().get(0);
/* run plan command */
cmdLine = String.format("hdfs diskbalancer -%s %s", PLAN, dn.getDatanodeUuid());
outputs = runCommand(cmdLine, hdfsConf, miniCluster);
/* get path of plan file*/
final String planFileName = dn.getDatanodeUuid();
/* verify plan command */
assertEquals("There must be two lines: the 1st is writing plan to...," + " the 2nd is actual full path of plan file.", 2, outputs.size());
assertThat(outputs.get(1), containsString(planFileName));
/* get full path of plan file*/
final String planFileFullName = outputs.get(1);
return planFileFullName;
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestSpaceReservation method testReservedSpaceForPipelineRecovery.
@Test(timeout = 30000)
public void testReservedSpaceForPipelineRecovery() throws Exception {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
old = DataNodeFaultInjector.get();
// Fault injector to fail connection to mirror first time.
DataNodeFaultInjector.set(new DataNodeFaultInjector() {
private int tries = 0;
@Override
public void failMirrorConnection() throws IOException {
if (tries++ == 0) {
throw new IOException("Failing Mirror for space reservation");
}
}
});
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.close();
// Ensure all space reserved for the replica was released on each
// DataNode.
cluster.triggerBlockReports();
for (final DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("dn " + dn.getDisplayName() + " space : " + volume.getReservedForReplicas());
return (volume.getReservedForReplicas() == 0);
}
}, 100, // Wait until the test times out.
Integer.MAX_VALUE);
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestSpaceReservation method testSpaceReleasedOnUnexpectedEof.
/**
* Ensure that reserved space is released when the client goes away
* unexpectedly.
*
* The verification is done for each replica in the write pipeline.
*
* @throws IOException
*/
@Test(timeout = 300000)
public void testSpaceReleasedOnUnexpectedEof() throws IOException, InterruptedException, TimeoutException {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.hsync();
DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
// DataNode.
for (DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return (volume.getReservedForReplicas() == 0);
}
}, 500, // Wait until the test times out.
Integer.MAX_VALUE);
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class DiskBalancerTestUtil method newImbalancedCluster.
public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final String fileName = "/" + UUID.randomUUID().toString();
final Path filePath = new Path(fileName);
Preconditions.checkNotNull(storageCapacities);
Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
// Write a file and restart the cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(0);
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
cluster.restartDataNodes();
cluster.waitActive();
// Get the data node and move all data to one disk.
for (int i = 0; i < numDatanodes; i++) {
DataNode dnNode = cluster.getDataNodes().get(i);
try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
}
}
cluster.restartDataNodes();
cluster.waitActive();
return cluster;
}
Aggregations