use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestSpaceReservation method testTmpSpaceReserve.
@Test(timeout = 300000)
public void testTmpSpaceReserve() throws Exception {
final short replication = 2;
startCluster(BLOCK_SIZE, replication, -1);
final int byteCount1 = 100;
final int byteCount2 = 200;
final String methodName = GenericTestUtils.getMethodName();
// Test positive scenario
{
final Path file = new Path("/" + methodName + ".01.dat");
try (FSDataOutputStream os = fs.create(file, (short) 1)) {
// Write test data to the file
os.write(new byte[byteCount1]);
os.hsync();
}
BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
String firstReplicaNode = blockLocations[0].getNames()[0];
int newReplicaDNIndex = 0;
if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
newReplicaDNIndex = 1;
}
FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
performReReplication(file, true);
assertEquals("Wrong reserve space for Tmp ", byteCount1, fsVolumeImpl.getRecentReserved());
assertEquals("Reserved Tmp space is not released", 0, fsVolumeImpl.getReservedForReplicas());
}
// Test when file creation fails
{
final Path file = new Path("/" + methodName + ".01.dat");
try (FSDataOutputStream os = fs.create(file, (short) 1)) {
// Write test data to the file
os.write(new byte[byteCount2]);
os.hsync();
}
BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
String firstReplicaNode = blockLocations[0].getNames()[0];
int newReplicaDNIndex = 0;
if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
newReplicaDNIndex = 1;
}
BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
Mockito.when(blockPoolSlice.createTmpFile((Block) Mockito.any())).thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
// Reserve some bytes to verify double clearing space should't happen
fsVolumeImpl.reserveSpaceForReplica(1000);
Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
field.setAccessible(true);
@SuppressWarnings("unchecked") Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field.get(fsVolumeImpl);
bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
performReReplication(file, false);
assertEquals("Wrong reserve space for Tmp ", byteCount2, fsVolumeImpl.getRecentReserved());
assertEquals("Tmp space is not released OR released twice", 1000, fsVolumeImpl.getReservedForReplicas());
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestSpaceReservation method testSpaceReleasedOnUnexpectedEof.
/**
* Ensure that reserved space is released when the client goes away
* unexpectedly.
*
* The verification is done for each replica in the write pipeline.
*
* @throws IOException
*/
@Test(timeout = 300000)
public void testSpaceReleasedOnUnexpectedEof() throws IOException, InterruptedException, TimeoutException {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.hsync();
DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
// DataNode.
for (DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return (volume.getReservedForReplicas() == 0);
}
}, 500, // Wait until the test times out.
Integer.MAX_VALUE);
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFsck method testFsckOpenFiles.
@Test
public void testFsckOpenFiles() throws Exception {
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(4).build();
FileSystem fs = null;
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir = "/srcdat";
String randomString = "HADOOP ";
fs = cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs, topDir);
util.waitReplication(fs, topDir, (short) 3);
String outStr = runFsck(conf, 0, true, "/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// Open a file for writing and do not close for now
Path openFile = new Path(topDir + "/openFile");
FSDataOutputStream out = fs.create(openFile);
int writeCount = 0;
while (writeCount != 100) {
out.write(randomString.getBytes());
writeCount++;
}
((DFSOutputStream) out.getWrappedStream()).hflush();
// We expect the filesystem to be HEALTHY and show one open file
outStr = runFsck(conf, 0, true, topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
// Use -openforwrite option to list open files
outStr = runFsck(conf, 0, true, topDir, "-files", "-blocks", "-locations", "-openforwrite");
System.out.println(outStr);
assertTrue(outStr.contains("OPENFORWRITE"));
assertTrue(outStr.contains("Under Construction Block:"));
assertTrue(outStr.contains("openFile"));
// Close the file
out.close();
// Now, fsck should show HEALTHY fs and should not show any open files
outStr = runFsck(conf, 0, true, topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
assertFalse(outStr.contains("Under Construction Block:"));
util.cleanup(fs, topDir);
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFsck method writeFile.
private void writeFile(final DistributedFileSystem dfs, Path dir, String fileName) throws IOException {
Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
final FSDataOutputStream out = dfs.create(filePath);
out.writeChars("teststring");
out.close();
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestINodeFile method testWriteToDeletedFile.
@Test(timeout = 120000)
public void testWriteToDeletedFile() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
byte[] data = new byte[size];
// Create one file
Path filePath = new Path("/test1/file");
FSDataOutputStream fos = fs.create(filePath);
// Delete the file
fs.delete(filePath, false);
// Add new block should fail since /test1/file has been deleted.
try {
fos.write(data, 0, data.length);
// make sure addBlock() request gets to NN immediately
fos.hflush();
fail("Write should fail after delete");
} catch (Exception e) {
/* Ignore */
} finally {
cluster.shutdown();
}
}
Aggregations