use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestMover method testMoverFailedRetryWithPinnedBlocks.
/**
* Test to verify that mover should work well with pinned blocks as well as
* failed blocks. Mover should continue retrying the failed blocks only.
*/
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String parenDir = "/parent";
dfs.mkdirs(new Path(parenDir));
final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
out.write(fileData);
out.close();
// Adding pinned blocks.
createFileWithFavoredDatanodes(conf, cluster, dfs);
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
LocatedBlock lb = locatedBlocks.get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(parenDir), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestSpaceReservation method testRBWFileCreationError.
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {
final short replication = 1;
startCluster(BLOCK_SIZE, replication, -1);
final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences().get(0);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Mock BlockPoolSlice so that RBW file creation gives IOExcception
BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any())).thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
field.setAccessible(true);
Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field.get(fsVolumeImpl);
bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
try {
// Write 1 byte to the file
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.hsync();
os.close();
fail("Expecting IOException file creation failure");
} catch (IOException e) {
// Exception can be ignored (expected)
}
// Ensure RBW space reserved is released
assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForReplicas(), fsVolumeImpl.getReservedForReplicas() == 0);
// Reserve some bytes to verify double clearing space should't happen
fsVolumeImpl.reserveSpaceForReplica(1000);
try {
// Write 1 byte to the file
FSDataOutputStream os = fs.create(new Path("/" + methodName + ".02.dat"), replication);
os.write(new byte[1]);
os.hsync();
os.close();
fail("Expecting IOException file creation failure");
} catch (IOException e) {
// Exception can be ignored (expected)
}
// Ensure RBW space reserved is released only once
assertTrue(fsVolumeImpl.getReservedForReplicas() == 1000);
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestSpaceReservation method createFileAndTestSpaceReservation.
private void createFileAndTestSpaceReservation(final String fileNamePrefix, final int fileBlockSize) throws IOException, InterruptedException {
// Enough for 1 block + meta files + some delta.
final long configuredCapacity = fileBlockSize * 2 - 1;
startCluster(BLOCK_SIZE, 1, configuredCapacity);
FSDataOutputStream out = null;
Path path = new Path("/" + fileNamePrefix + ".dat");
try {
out = fs.create(path, false, 4096, (short) 1, fileBlockSize);
byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)];
out.write(buffer);
out.hsync();
int bytesWritten = buffer.length;
// Check that space was reserved for a full block minus the bytesWritten.
assertThat(singletonVolume.getReservedForReplicas(), is((long) fileBlockSize - bytesWritten));
out.close();
out = null;
// Check that the reserved space has been released since we closed the
// file.
assertThat(singletonVolume.getReservedForReplicas(), is(0L));
// Reopen the file for appends and write 1 more byte.
out = fs.append(path);
out.write(buffer);
out.hsync();
bytesWritten += buffer.length;
// Check that space was again reserved for a full block minus the
// bytesWritten so far.
assertThat(singletonVolume.getReservedForReplicas(), is((long) fileBlockSize - bytesWritten));
// Write once again and again verify the available space. This ensures
// that the reserved space is progressively adjusted to account for bytes
// written to disk.
out.write(buffer);
out.hsync();
bytesWritten += buffer.length;
assertThat(singletonVolume.getReservedForReplicas(), is((long) fileBlockSize - bytesWritten));
} finally {
if (out != null) {
out.close();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestSpaceReservation method testReservedSpaceForPipelineRecovery.
@Test(timeout = 30000)
public void testReservedSpaceForPipelineRecovery() throws Exception {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
old = DataNodeFaultInjector.get();
// Fault injector to fail connection to mirror first time.
DataNodeFaultInjector.set(new DataNodeFaultInjector() {
private int tries = 0;
@Override
public void failMirrorConnection() throws IOException {
if (tries++ == 0) {
throw new IOException("Failing Mirror for space reservation");
}
}
});
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.close();
// Ensure all space reserved for the replica was released on each
// DataNode.
cluster.triggerBlockReports();
for (final DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("dn " + dn.getDisplayName() + " space : " + volume.getReservedForReplicas());
return (volume.getReservedForReplicas() == 0);
}
}, 100, // Wait until the test times out.
Integer.MAX_VALUE);
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestSpaceReservation method testWithLimitedSpace.
@Test(timeout = 300000)
public void testWithLimitedSpace() throws IOException {
// Cluster with just enough space for a full block + meta.
startCluster(BLOCK_SIZE, 1, 2 * BLOCK_SIZE - 1);
final String methodName = GenericTestUtils.getMethodName();
Path file1 = new Path("/" + methodName + ".01.dat");
Path file2 = new Path("/" + methodName + ".02.dat");
// Create two files.
FSDataOutputStream os1 = null, os2 = null;
try {
os1 = fs.create(file1);
os2 = fs.create(file2);
// Write one byte to the first file.
byte[] data = new byte[1];
os1.write(data);
os1.hsync();
// Try to write one byte to the second file.
// The block allocation must fail.
thrown.expect(RemoteException.class);
os2.write(data);
os2.hsync();
} finally {
if (os1 != null) {
os1.close();
}
// os2.close() will fail as no block was allocated.
}
}
Aggregations