Search in sources :

Example 81 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestAppendDifferentChecksum method testAlgoSwitchRandomized.

/**
   * Test which randomly alternates between appending with
   * CRC32 and with CRC32C, crossing several block boundaries.
   * Then, checks that all of the data can be read back correct.
   */
@Test(timeout = RANDOM_TEST_RUNTIME * 2)
public void testAlgoSwitchRandomized() throws IOException {
    FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
    FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);
    Path p = new Path("/testAlgoSwitchRandomized");
    long seed = Time.now();
    System.out.println("seed: " + seed);
    Random r = new Random(seed);
    // Create empty to start
    IOUtils.closeStream(fsWithCrc32.create(p));
    long st = Time.now();
    int len = 0;
    while (Time.now() - st < RANDOM_TEST_RUNTIME) {
        int thisLen = r.nextInt(500);
        FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
        FSDataOutputStream stm = fs.append(p);
        try {
            AppendTestUtil.write(stm, len, thisLen);
        } finally {
            stm.close();
        }
        len += thisLen;
    }
    AppendTestUtil.check(fsWithCrc32, p, len);
    AppendTestUtil.check(fsWithCrc32C, p, len);
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 82 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestDFSClientFailover method testFileContextDoesntDnsResolveLogicalURI.

/**
   * Same test as above, but for FileContext.
   */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    NameService spyNS = spyOnNameService();
    String logicalHost = fs.getUri().getHost();
    Configuration haClientConf = fs.getConf();
    FileContext fc = FileContext.getFileContext(haClientConf);
    Path root = new Path("/");
    fc.listStatus(root);
    fc.listStatus(fc.makeQualified(root));
    fc.getDefaultFileSystem().getCanonicalServiceName();
    // Ensure that the logical hostname was never resolved.
    Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
Also used : Path(org.apache.hadoop.fs.Path) NameService(sun.net.spi.nameservice.NameService) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 83 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestDFSClientFailover method testFailoverOnConnectTimeout.

/**
   * Test that even a non-idempotent method will properly fail-over if the
   * first IPC attempt times out trying to connect. Regression test for
   * HDFS-4404. 
   */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
    conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, InjectingSocketFactory.class, SocketFactory.class);
    // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
    // when connecting to the first NN.
    InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    // Make the second NN the active one.
    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    // Call a non-idempotent method, and ensure the failover of the call proceeds
    // successfully.
    IOUtils.closeStream(fs.create(TEST_FILE));
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 84 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestDFSClientRetries method testFailuresArePerOperation.

/**
   * This tests that DFSInputStream failures are counted for a given read
   * operation, and not over the lifetime of the stream. It is a regression
   * test for HDFS-127.
   */
@Test
public void testFailuresArePerOperation() throws Exception {
    long fileSize = 4096;
    Path file = new Path("/testFile");
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        int maxBlockAcquires = client.getConf().getMaxBlockAcquireFailures();
        assertTrue(maxBlockAcquires > 0);
        DFSTestUtil.createFile(fs, file, fileSize, (short) 1, 12345L);
        // If the client will retry maxBlockAcquires times, then if we fail
        // any more than that number of times, the operation should entirely
        // fail.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        try {
            IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true);
            fail("Didn't get exception");
        } catch (IOException ioe) {
            DFSClient.LOG.info("Got expected exception", ioe);
        }
        // If we fail exactly that many times, then it should succeed.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true);
        DFSClient.LOG.info("Starting test case for failure reset");
        // Now the tricky case - if we fail a few times on one read, then succeed,
        // then fail some more on another read, it shouldn't fail.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        DFSInputStream is = client.open(file.toString());
        byte[] buf = new byte[10];
        IOUtils.readFully(is, buf, 0, buf.length);
        DFSClient.LOG.info("First read successful after some failures.");
        // Further reads at this point will succeed since it has the good block locations.
        // So, force the block locations on this stream to be refreshed from bad info.
        // When reading again, it should start from a fresh failure count, since
        // we're starting a new operation on the user level.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        is.openInfo(true);
        // Seek to beginning forces a reopen of the BlockReader - otherwise it'll
        // just keep reading on the existing stream and the fact that we've poisoned
        // the block info won't do anything.
        is.seek(0);
        IOUtils.readFully(is, buf, 0, buf.length);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 85 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestSafeMode method testOperationsWhileInSafeMode.

/**
   * Run various fs operations while the NN is in safe mode,
   * assert that they are either allowed or fail as expected.
   */
@Test
public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
    final Path file1 = new Path("/file1");
    assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
    DFSTestUtil.createFile(fs, file1, 1024, (short) 1, 0);
    assertTrue("Could not enter SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
    runFsFun("Set quota while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            ((DistributedFileSystem) fs).setQuota(file1, 1, 1);
        }
    });
    runFsFun("Set perm while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setPermission(file1, FsPermission.getDefault());
        }
    });
    runFsFun("Set owner while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setOwner(file1, "user", "group");
        }
    });
    runFsFun("Set repl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setReplication(file1, (short) 1);
        }
    });
    runFsFun("Append file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            DFSTestUtil.appendFile(fs, file1, "new bytes");
        }
    });
    runFsFun("Truncate file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.truncate(file1, 0);
        }
    });
    runFsFun("Delete file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.delete(file1, false);
        }
    });
    runFsFun("Rename file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.rename(file1, new Path("file2"));
        }
    });
    runFsFun("Set time while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setTimes(file1, 0, 0);
        }
    });
    runFsFun("modifyAclEntries while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
        }
    });
    runFsFun("removeAclEntries while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
        }
    });
    runFsFun("removeDefaultAcl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeDefaultAcl(file1);
        }
    });
    runFsFun("removeAcl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeAcl(file1);
        }
    });
    runFsFun("setAcl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setAcl(file1, Lists.<AclEntry>newArrayList());
        }
    });
    runFsFun("setXAttr while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setXAttr(file1, "user.a1", null);
        }
    });
    runFsFun("removeXAttr while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeXAttr(file1, "user.a1");
        }
    });
    try {
        DFSTestUtil.readFile(fs, file1);
    } catch (IOException ioe) {
        fail("Set times failed while in SM");
    }
    try {
        fs.getAclStatus(file1);
    } catch (IOException ioe) {
        fail("getAclStatus failed while in SM");
    }
    // Test access
    UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
    FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws IOException {
            return FileSystem.get(conf);
        }
    });
    myfs.access(file1, FsAction.READ);
    try {
        myfs.access(file1, FsAction.WRITE);
        fail("The access call should have failed.");
    } catch (AccessControlException e) {
    // expected
    }
    assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) AclEntry(org.apache.hadoop.fs.permission.AclEntry) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81