use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestClientProtocolForPipelineRecovery method testPipelineRecoveryForLastBlock.
/** Test whether corrupt replicas are detected correctly during pipeline
* recoveries.
*/
@Test
public void testPipelineRecoveryForLastBlock() throws IOException {
DFSClientFaultInjector faultInjector = Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector oldInjector = DFSClientFaultInjector.get();
DFSClientFaultInjector.set(faultInjector);
Configuration conf = new HdfsConfiguration();
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 3);
MiniDFSCluster cluster = null;
try {
int numDataNodes = 3;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
Path file = new Path("dataprotocol1.dat");
Mockito.when(faultInjector.failPacket()).thenReturn(true);
DFSTestUtil.createFile(fileSys, file, 68000000L, (short) numDataNodes, 0L);
// At this point, NN should have accepted only valid replicas.
// Read should succeed.
FSDataInputStream in = fileSys.open(file);
try {
in.read();
// Test will fail with BlockMissingException if NN does not update the
// replica state based on the latest report.
} catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
Assert.fail("Block is missing because the file was closed with" + " corrupt replicas.");
}
} finally {
DFSClientFaultInjector.set(oldInjector);
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestClose method testWriteAfterClose.
@Test
public void testWriteAfterClose() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
final byte[] data = "foo".getBytes();
FileSystem fs = FileSystem.get(conf);
OutputStream out = fs.create(new Path("/test"));
out.write(data);
out.close();
try {
// Should fail.
out.write(data);
fail("Should not have been able to write more data after file is closed.");
} catch (ClosedChannelException cce) {
// We got the correct exception. Ignoring.
}
// Should succeed. Double closes are OK.
out.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestCrcCorruption method doTestEntirelyCorruptFile.
private void doTestEntirelyCorruptFile(int numDataNodes) throws Exception {
long fileSize = 4096;
Path file = new Path("/testFile");
short replFactor = (short) numDataNodes;
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, file, fileSize, replFactor, 12345L);
DFSTestUtil.waitReplication(fs, file, replFactor);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
fail("Didn't get exception");
} catch (IOException ioe) {
DFSClient.LOG.info("Got expected exception", ioe);
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestCrcCorruption method testCorruptionDuringWrt.
/**
* Test case for data corruption during data transmission for
* create/write. To recover from corruption while writing, at
* least two replicas are needed.
*/
@Test(timeout = 50000)
public void testCorruptionDuringWrt() throws Exception {
Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path file = new Path("/test_corruption_file");
FSDataOutputStream out = fs.create(file, true, 8192, (short) 3, (long) (128 * 1024 * 1024));
byte[] data = new byte[65536];
for (int i = 0; i < 65536; i++) {
data[i] = (byte) (i % 256);
}
for (int i = 0; i < 5; i++) {
out.write(data, 0, 65535);
}
out.hflush();
// corrupt the packet once
Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
Mockito.when(faultInjector.uncorruptPacket()).thenReturn(true, false);
for (int i = 0; i < 5; i++) {
out.write(data, 0, 65535);
}
out.close();
// read should succeed
FSDataInputStream in = fs.open(file);
for (int c; (c = in.read()) != -1; ) ;
in.close();
// test the retry limit
out = fs.create(file, true, 8192, (short) 3, (long) (128 * 1024 * 1024));
// corrupt the packet once and never fix it.
Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
// the client should give up pipeline reconstruction after retries.
try {
for (int i = 0; i < 5; i++) {
out.write(data, 0, 65535);
}
out.close();
fail("Write did not fail");
} catch (IOException ioe) {
// we should get an ioe
DFSClient.LOG.info("Got expected exception", ioe);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
Mockito.when(faultInjector.corruptPacket()).thenReturn(false);
Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestDFSClientExcludedNodes method testExcludedNodes.
@Test(timeout = 60000)
public void testExcludedNodes() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testExcludedNodes");
// kill a datanode
cluster.stopDataNode(AppendTestUtil.nextInt(3));
OutputStream out = fs.create(filePath, true, 4096, (short) 3, fs.getDefaultBlockSize(filePath));
out.write(20);
try {
out.close();
} catch (Exception e) {
fail("Single DN failure should not result in a block abort: \n" + e.getMessage());
}
}
Aggregations