Search in sources :

Example 71 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestFileSystemAccessService method fileSystemExecutorNoNameNode.

@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H06.*")
@TestDir
@TestHdfs
public void fileSystemExecutorNoNameNode() throws Exception {
    String dir = TestDirHelper.getTestDir().getAbsolutePath();
    String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName(), FileSystemAccessService.class.getName()));
    Configuration hadoopConf = new Configuration(false);
    hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
    createHadoopConf(hadoopConf);
    Configuration conf = new Configuration(false);
    conf.set("server.services", services);
    Server server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    FileSystemAccess fsAccess = server.get(FileSystemAccess.class);
    Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
    hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
    fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor<Void>() {

        @Override
        public Void execute(FileSystem fs) throws IOException {
            return null;
        }
    });
}
Also used : FileSystemAccess(org.apache.hadoop.lib.service.FileSystemAccess) Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.lib.server.Server) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) TestHdfs(org.apache.hadoop.test.TestHdfs) TestException(org.apache.hadoop.test.TestException) TestDir(org.apache.hadoop.test.TestDir) Test(org.junit.Test)

Example 72 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class MiniDFSCluster method copyNameDirs.

public static void copyNameDirs(Collection<URI> srcDirs, Collection<URI> dstDirs, Configuration dstConf) throws IOException {
    URI srcDir = Lists.newArrayList(srcDirs).get(0);
    FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw();
    for (URI dstDir : dstDirs) {
        Preconditions.checkArgument(!dstDir.equals(srcDir), "src and dst are the same: " + dstDir);
        File dstDirF = new File(dstDir);
        if (dstDirF.exists()) {
            if (!FileUtil.fullyDelete(dstDirF)) {
                throw new IOException("Unable to delete: " + dstDirF);
            }
        }
        LOG.info("Copying namedir from primary node dir " + srcDir + " to " + dstDir);
        FileUtil.copy(new File(srcDir), dstFS, new Path(dstDir), false, dstConf);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) URI(java.net.URI) Util.fileAsURI(org.apache.hadoop.hdfs.server.common.Util.fileAsURI) File(java.io.File)

Example 73 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testEvictWriter.

/**
   * Test that the writer is kicked out of a node.
   */
@Test
public void testEvictWriter() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes((int) 3).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path file = new Path("testEvictWriter.dat");
        FSDataOutputStream out = fs.create(file, (short) 2);
        out.write(0x31);
        out.hflush();
        // get nodes in the pipeline
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        DatanodeInfo[] nodes = dfsOut.getPipeline();
        Assert.assertEquals(2, nodes.length);
        String dnAddr = nodes[1].getIpcAddr(false);
        // evict the writer from the second datanode and wait until
        // the pipeline is rebuilt.
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        final String[] args1 = { "-evictWriters", dnAddr };
        Assert.assertEquals(0, dfsadmin.run(args1));
        out.write(0x31);
        out.hflush();
        // get the new pipline and check the node is not in there.
        nodes = dfsOut.getPipeline();
        try {
            Assert.assertTrue(nodes.length > 0);
            for (int i = 0; i < nodes.length; i++) {
                Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
            }
        } finally {
            out.close();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 74 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPacketTransmissionDelay.

@Test
public void testPacketTransmissionDelay() throws Exception {
    // Make the first datanode to not relay heartbeat packet.
    DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {

        @Override
        public boolean dropHeartbeatPacket() {
            return true;
        }
    };
    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
    DataNodeFaultInjector.set(dnFaultInjector);
    // Setting the timeout to be 3 seconds. Normally heartbeat packet
    // would be sent every 1.5 seconds if there is no data traffic.
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, "3000");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 2;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(new Path("noheartbeat.dat"), (short) 2);
        out.write(0x31);
        out.hflush();
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        // original pipeline
        DatanodeInfo[] orgNodes = dfsOut.getPipeline();
        // Cause the second datanode to timeout on reading packet
        Thread.sleep(3500);
        out.write(0x32);
        out.hflush();
        // new pipeline
        DatanodeInfo[] newNodes = dfsOut.getPipeline();
        out.close();
        boolean contains = false;
        for (int i = 0; i < newNodes.length; i++) {
            if (orgNodes[0].getXferAddr().equals(newNodes[i].getXferAddr())) {
                throw new IOException("The first datanode should have been replaced.");
            }
            if (orgNodes[1].getXferAddr().equals(newNodes[i].getXferAddr())) {
                contains = true;
            }
        }
        Assert.assertTrue(contains);
    } finally {
        DataNodeFaultInjector.set(oldDnInjector);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 75 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testZeroByteBlockRecovery.

/**
   * Test to make sure the checksum is set correctly after pipeline
   * recovery transfers 0 byte partial block. If fails the test case
   * will say "java.io.IOException: Failed to replace a bad datanode
   * on the existing pipeline due to no more good datanodes being
   * available to try."  This indicates there was a real failure
   * after the staged failure.
   */
@Test
public void testZeroByteBlockRecovery() throws Exception {
    // Make the first datanode fail once. With 3 nodes and a block being
    // created with 2 replicas, anything more than this planned failure
    // will cause a test failure.
    DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {

        int tries = 1;

        @Override
        public void stopSendingPacketDownstream(final String mirrAddr) throws IOException {
            if (tries > 0) {
                tries--;
                try {
                    Thread.sleep(60000);
                } catch (InterruptedException ie) {
                    throw new IOException("Interrupted while sleeping. Bailing out.");
                }
            }
        }
    };
    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
    DataNodeFaultInjector.set(dnFaultInjector);
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, "1000");
    conf.set(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, "ALWAYS");
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(new Path("noheartbeat.dat"), (short) 2);
        out.write(0x31);
        out.hflush();
        out.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        DataNodeFaultInjector.set(oldDnInjector);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81