use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestBlockReaderFactory method testFallbackFromShortCircuitToUnixDomainTraffic.
/**
* If we have a UNIX domain socket configured,
* and we have dfs.client.domain.socket.data.traffic set to true,
* and short-circuit access fails, we should still be able to pass
* data traffic over the UNIX domain socket. Test this.
*/
@Test(timeout = 60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting = true;
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
// The server is NOT configured with short-circuit local reads;
// the client is. Both support UNIX domain reads.
Configuration clientConf = createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
clientConf.set(DFS_CLIENT_CONTEXT, "testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
Configuration serverConf = new Configuration(clientConf);
serverConf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 8193;
final int SEED = 0xFADED;
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestReplication method testReplicationWhenBlockCorruption.
/**
* Test that blocks should get replicated if we have corrupted blocks and
* having good replicas at least equal or greater to minreplication
*
* Simulate rbw blocks by creating dummy copies, then a DN restart to detect
* those corrupted blocks asap.
*/
@Test(timeout = 30000)
public void testReplicationWhenBlockCorruption() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(1).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/test");
FSDataOutputStream create = fs.create(filePath);
fs.setReplication(filePath, (short) 1);
create.write(new byte[1024]);
create.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
int numReplicaCreated = 0;
for (final DataNode dn : cluster.getDataNodes()) {
if (!dn.getFSDataset().contains(block)) {
cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
numReplicaCreated++;
}
}
assertEquals(2, numReplicaCreated);
fs.setReplication(filePath, (short) 3);
// Lets detect all DNs about dummy copied
cluster.restartDataNodes();
// blocks
cluster.waitActive();
cluster.triggerBlockReports();
DFSTestUtil.waitReplication(fs, filePath, (short) 3);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestBlockReaderLocal method testStatistics.
private void testStatistics(boolean isShortCircuit) throws Exception {
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
HdfsConfiguration conf = new HdfsConfiguration();
TemporarySocketDirectory sockDir = null;
if (isShortCircuit) {
DFSInputStream.tcpReadsDisabledForTesting = true;
sockDir = new TemporarySocketDirectory();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestStatisticsForLocalRead.%d.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
DomainSocket.disableBindPathValidation();
} else {
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
}
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final long RANDOM_SEED = 4567L;
FSDataInputStream fsIn = null;
byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).hosts(new String[] { NetUtils.getLocalHostname() }).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalLocalBytesRead());
if (isShortCircuit) {
Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
} else {
Assert.assertEquals(0, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
}
fsIn.close();
fsIn = null;
} finally {
DFSInputStream.tcpReadsDisabledForTesting = false;
if (fsIn != null)
fsIn.close();
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
if (sockDir != null)
sockDir.close();
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestSeekBug method testSeekBugDFS.
/**
* Test if the seek bug exists in FSDataInputStream in DFS.
*/
@Test
public void testSeekBugDFS() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("seektest.dat");
DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB, fileSys.getDefaultBlockSize(file1), fileSys.getDefaultReplication(file1), seed);
seekReadFile(fileSys, file1);
smallReadSeek(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestSetTimes method testAtimeUpdate.
/**
* Test whether atime can be set explicitly even when the atime support is
* disabled.
*/
@Test
public void testAtimeUpdate() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
fs = cluster.getFileSystem();
// Create an empty file
Path p = new Path("/testAtimeUpdate");
DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short) 1, 0L);
fs.setTimes(p, -1L, 123456L);
Assert.assertEquals(123456L, fs.getFileStatus(p).getAccessTime());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations