Search in sources :

Example 91 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestBlockReaderFactory method testFallbackFromShortCircuitToUnixDomainTraffic.

/**
   * If we have a UNIX domain socket configured,
   * and we have dfs.client.domain.socket.data.traffic set to true,
   * and short-circuit access fails, we should still be able to pass
   * data traffic over the UNIX domain socket.  Test this.
   */
@Test(timeout = 60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
    DFSInputStream.tcpReadsDisabledForTesting = true;
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    // The server is NOT configured with short-circuit local reads;
    // the client is.  Both support UNIX domain reads.
    Configuration clientConf = createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
    clientConf.set(DFS_CLIENT_CONTEXT, "testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
    clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
    String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 8193;
    final int SEED = 0xFADED;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) Test(org.junit.Test)

Example 92 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestReplication method testReplicationWhenBlockCorruption.

/**
   * Test that blocks should get replicated if we have corrupted blocks and
   * having good replicas at least equal or greater to minreplication
   *
   * Simulate rbw blocks by creating dummy copies, then a DN restart to detect
   * those corrupted blocks asap.
   */
@Test(timeout = 30000)
public void testReplicationWhenBlockCorruption() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(1).build();
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("/test");
        FSDataOutputStream create = fs.create(filePath);
        fs.setReplication(filePath, (short) 1);
        create.write(new byte[1024]);
        create.close();
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
        int numReplicaCreated = 0;
        for (final DataNode dn : cluster.getDataNodes()) {
            if (!dn.getFSDataset().contains(block)) {
                cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
                numReplicaCreated++;
            }
        }
        assertEquals(2, numReplicaCreated);
        fs.setReplication(filePath, (short) 3);
        // Lets detect all DNs about dummy copied
        cluster.restartDataNodes();
        // blocks
        cluster.waitActive();
        cluster.triggerBlockReports();
        DFSTestUtil.waitReplication(fs, filePath, (short) 3);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 93 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestBlockReaderLocal method testStatistics.

private void testStatistics(boolean isShortCircuit) throws Exception {
    Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
    HdfsConfiguration conf = new HdfsConfiguration();
    TemporarySocketDirectory sockDir = null;
    if (isShortCircuit) {
        DFSInputStream.tcpReadsDisabledForTesting = true;
        sockDir = new TemporarySocketDirectory();
        conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestStatisticsForLocalRead.%d.sock").getAbsolutePath());
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
        DomainSocket.disableBindPathValidation();
    } else {
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    }
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final long RANDOM_SEED = 4567L;
    FSDataInputStream fsIn = null;
    byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).hosts(new String[] { NetUtils.getLocalHostname() }).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalLocalBytesRead());
        if (isShortCircuit) {
            Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        } else {
            Assert.assertEquals(0, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        }
        fsIn.close();
        fsIn = null;
    } finally {
        DFSInputStream.tcpReadsDisabledForTesting = false;
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
        if (sockDir != null)
            sockDir.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) TimeoutException(java.util.concurrent.TimeoutException)

Example 94 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestSeekBug method testSeekBugDFS.

/**
   * Test if the seek bug exists in FSDataInputStream in DFS.
   */
@Test
public void testSeekBugDFS() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fileSys = cluster.getFileSystem();
    try {
        Path file1 = new Path("seektest.dat");
        DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB, fileSys.getDefaultBlockSize(file1), fileSys.getDefaultReplication(file1), seed);
        seekReadFile(fileSys, file1);
        smallReadSeek(fileSys, file1);
        cleanupFile(fileSys, file1);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ChecksumFileSystem(org.apache.hadoop.fs.ChecksumFileSystem) Test(org.junit.Test)

Example 95 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestSetTimes method testAtimeUpdate.

/**
   * Test whether atime can be set explicitly even when the atime support is
   * disabled.
   */
@Test
public void testAtimeUpdate() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        fs = cluster.getFileSystem();
        // Create an empty file
        Path p = new Path("/testAtimeUpdate");
        DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short) 1, 0L);
        fs.setTimes(p, -1L, 123456L);
        Assert.assertEquals(123456L, fs.getFileStatus(p).getAccessTime());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81