Search in sources :

Example 1 with QuotaExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaExceededException in project hadoop by apache.

the class TestAbandonBlock method testQuotaUpdatedWhenBlockAbandoned.

@Test
public /** Make sure that the quota is decremented correctly when a block is abandoned */
void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
    // Setting diskspace quota to 3MB
    fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
    // Start writing a file with 2 replicas to ensure each datanode has one.
    // Block Size is 1MB.
    String src = FILE_NAME_PREFIX + "test_quota1";
    FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 2, 1024 * 1024);
    for (int i = 0; i < 1024; i++) {
        fout.writeByte(123);
    }
    // Shutdown one datanode, causing the block abandonment.
    cluster.getDataNodes().get(0).shutdown();
    // Close the file, new block will be allocated with 2MB pending size.
    try {
        fout.close();
    } catch (QuotaExceededException e) {
        fail("Unexpected quota exception when closing fout");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 2 with QuotaExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaExceededException in project hadoop by apache.

the class TestHDFSConcat method testConcatWithQuotaIncrease.

@Test
public void testConcatWithQuotaIncrease() throws IOException {
    final short repl = 3;
    final int srcNum = 10;
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path[] srcs = new Path[srcNum];
    final Path target = new Path(bar, "target");
    DFSTestUtil.createFile(dfs, target, blockSize, repl, 0L);
    final long dsQuota = blockSize * repl + blockSize * srcNum * REPL_FACTOR;
    dfs.setQuota(foo, Long.MAX_VALUE - 1, dsQuota);
    for (int i = 0; i < srcNum; i++) {
        srcs[i] = new Path(bar, "src" + i);
        DFSTestUtil.createFile(dfs, srcs[i], blockSize, REPL_FACTOR, 0L);
    }
    ContentSummary summary = dfs.getContentSummary(bar);
    Assert.assertEquals(11, summary.getFileCount());
    Assert.assertEquals(dsQuota, summary.getSpaceConsumed());
    try {
        dfs.concat(target, srcs);
        fail("QuotaExceededException expected");
    } catch (RemoteException e) {
        Assert.assertTrue(e.unwrapRemoteException() instanceof QuotaExceededException);
    }
    dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    dfs.concat(target, srcs);
    summary = dfs.getContentSummary(bar);
    Assert.assertEquals(1, summary.getFileCount());
    Assert.assertEquals(blockSize * repl * (srcNum + 1), summary.getSpaceConsumed());
}
Also used : Path(org.apache.hadoop.fs.Path) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) ContentSummary(org.apache.hadoop.fs.ContentSummary) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 3 with QuotaExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaExceededException in project hadoop by apache.

the class TestQuota method testBlockAllocationAdjustsUsageConservatively.

/**
   * Violate a space quota using files of size < 1 block. Test that block
   * allocation conservatively assumes that for quota checking the entire
   * space of the block is used.
   */
@Test
public void testBlockAllocationAdjustsUsageConservatively() throws Exception {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    DFSAdmin admin = new DFSAdmin(conf);
    Path dir = new Path(parent, "test");
    Path file1 = new Path(parent, "test/test1");
    Path file2 = new Path(parent, "test/test2");
    boolean exceededQuota = false;
    // total space usage including
    final int QUOTA_SIZE = 3 * DEFAULT_BLOCK_SIZE;
    // repl.
    final int FILE_SIZE = DEFAULT_BLOCK_SIZE / 2;
    ContentSummary c;
    // Create the directory and set the quota
    assertTrue(dfs.mkdirs(dir));
    runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE), dir.toString());
    // Creating a file should use half the quota
    DFSTestUtil.createFile(dfs, file1, FILE_SIZE, (short) 3, 1L);
    DFSTestUtil.waitReplication(dfs, file1, (short) 3);
    c = dfs.getContentSummary(dir);
    compareQuotaUsage(c, dfs, dir);
    checkContentSummary(c, webhdfs.getContentSummary(dir));
    assertEquals("Quota is half consumed", QUOTA_SIZE / 2, c.getSpaceConsumed());
    // used half the quota for the first file.
    try {
        DFSTestUtil.createFile(dfs, file2, FILE_SIZE, (short) 3, 1L);
    } catch (QuotaExceededException e) {
        exceededQuota = true;
    }
    assertTrue("Quota not exceeded", exceededQuota);
}
Also used : Path(org.apache.hadoop.fs.Path) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Example 4 with QuotaExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaExceededException in project hadoop by apache.

the class TestQuota method testQuotaCommands.

/** Test quota related commands: 
   *    setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count 
   */
@Test
public void testQuotaCommands() throws Exception {
    DFSAdmin admin = new DFSAdmin(conf);
    final Path dir = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(dir));
    final int fileLen = 1024;
    final short replication = 5;
    final long spaceQuota = fileLen * replication * 15 / 8;
    // 1: create a directory test and set its quota to be 3
    final Path parent = new Path(dir, "test");
    assertTrue(dfs.mkdirs(parent));
    String[] args = new String[] { "-setQuota", "3", parent.toString() };
    runCommand(admin, args, false);
    //try setting space quota with a 'binary prefix'
    runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
    assertEquals(2L << 40, dfs.getContentSummary(parent).getSpaceQuota());
    // set diskspace quota to 10000
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota), parent.toString());
    // 2: create directory /test/data0
    final Path childDir0 = new Path(parent, "data0");
    assertTrue(dfs.mkdirs(childDir0));
    // 3: create a file /test/datafile0
    final Path childFile0 = new Path(parent, "datafile0");
    DFSTestUtil.createFile(dfs, childFile0, fileLen, replication, 0);
    // 4: count -q /test
    ContentSummary c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getFileCount() + c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 3);
    assertEquals(c.getSpaceConsumed(), fileLen * replication);
    assertEquals(c.getSpaceQuota(), spaceQuota);
    // 5: count -q /test/data0
    c = dfs.getContentSummary(childDir0);
    compareQuotaUsage(c, dfs, childDir0);
    assertEquals(c.getFileCount() + c.getDirectoryCount(), 1);
    assertEquals(c.getQuota(), -1);
    // check disk space consumed
    c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getSpaceConsumed(), fileLen * replication);
    // 6: create a directory /test/data1
    final Path childDir1 = new Path(parent, "data1");
    boolean hasException = false;
    try {
        assertFalse(dfs.mkdirs(childDir1));
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    OutputStream fout;
    // 7: create a file /test/datafile1
    final Path childFile1 = new Path(parent, "datafile1");
    hasException = false;
    try {
        fout = dfs.create(childFile1);
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // 8: clear quota /test
    runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
    c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getQuota(), -1);
    assertEquals(c.getSpaceQuota(), spaceQuota);
    // 9: clear quota /test/data0
    runCommand(admin, new String[] { "-clrQuota", childDir0.toString() }, false);
    c = dfs.getContentSummary(childDir0);
    compareQuotaUsage(c, dfs, childDir0);
    assertEquals(c.getQuota(), -1);
    // 10: create a file /test/datafile1
    fout = dfs.create(childFile1, replication);
    // 10.s: but writing fileLen bytes should result in an quota exception
    try {
        fout.write(new byte[fileLen]);
        fout.close();
        Assert.fail();
    } catch (QuotaExceededException e) {
        IOUtils.closeStream(fout);
    }
    //delete the file
    dfs.delete(childFile1, false);
    // 9.s: clear diskspace quota
    runCommand(admin, false, "-clrSpaceQuota", parent.toString());
    c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getQuota(), -1);
    assertEquals(c.getSpaceQuota(), -1);
    // now creating childFile1 should succeed
    DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
    // 11: set the quota of /test to be 1
    // HADOOP-5872 - we can set quota even if it is immediately violated
    args = new String[] { "-setQuota", "1", parent.toString() };
    runCommand(admin, args, false);
    runCommand(// for space quota
    admin, // for space quota
    false, // for space quota
    "-setSpaceQuota", Integer.toString(fileLen), args[2]);
    // 12: set the quota of /test/data0 to be 1
    args = new String[] { "-setQuota", "1", childDir0.toString() };
    runCommand(admin, args, false);
    // 13: not able create a directory under data0
    hasException = false;
    try {
        assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    c = dfs.getContentSummary(childDir0);
    compareQuotaUsage(c, dfs, childDir0);
    assertEquals(c.getDirectoryCount() + c.getFileCount(), 1);
    assertEquals(c.getQuota(), 1);
    // 14a: set quota on a non-existent directory
    Path nonExistentPath = new Path(dir, "test1");
    assertFalse(dfs.exists(nonExistentPath));
    args = new String[] { "-setQuota", "1", nonExistentPath.toString() };
    runCommand(admin, args, true);
    runCommand(// for space quota
    admin, // for space quota
    true, // for space quota
    "-setSpaceQuota", // for space quota
    "1g", nonExistentPath.toString());
    // 14b: set quota on a file
    assertTrue(dfs.isFile(childFile0));
    args[1] = childFile0.toString();
    runCommand(admin, args, true);
    // same for space quota
    runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
    // 15a: clear quota on a file
    args[0] = "-clrQuota";
    runCommand(admin, args, true);
    runCommand(admin, true, "-clrSpaceQuota", args[1]);
    // 15b: clear quota on a non-existent directory
    args[1] = nonExistentPath.toString();
    runCommand(admin, args, true);
    runCommand(admin, true, "-clrSpaceQuota", args[1]);
    // 16a: set the quota of /test to be 0
    args = new String[] { "-setQuota", "0", parent.toString() };
    runCommand(admin, args, true);
    runCommand(admin, false, "-setSpaceQuota", "0", args[2]);
    // 16b: set the quota of /test to be -1
    args[1] = "-1";
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
    // 16c: set the quota of /test to be Long.MAX_VALUE+1
    args[1] = String.valueOf(Long.MAX_VALUE + 1L);
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
    // 16d: set the quota of /test to be a non integer
    args[1] = "33aa1.5";
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
    // 16e: set space quota with a value larger than Long.MAX_VALUE
    runCommand(admin, true, "-setSpaceQuota", (Long.MAX_VALUE / 1024 / 1024 + 1024) + "m", args[2]);
    // 17:  setQuota by a non-administrator
    final String username = "userxx";
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] { "groupyy" });
    // need final ref for doAs block
    final String[] args2 = args.clone();
    ugi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            assertEquals("Not running as new user", username, UserGroupInformation.getCurrentUser().getShortUserName());
            DFSAdmin userAdmin = new DFSAdmin(conf);
            args2[1] = "100";
            runCommand(userAdmin, args2, true);
            runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
            // 18: clrQuota by a non-administrator
            String[] args3 = new String[] { "-clrQuota", parent.toString() };
            runCommand(userAdmin, args3, true);
            runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);
            return null;
        }
    });
    // 19: clrQuota on the root directory ("/") should fail
    runCommand(admin, true, "-clrQuota", "/");
    // 20: setQuota on the root directory ("/") should succeed
    runCommand(admin, false, "-setQuota", "1000000", "/");
    runCommand(admin, true, "-clrQuota", "/");
    runCommand(admin, false, "-clrSpaceQuota", "/");
    runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
    runCommand(admin, false, "-clrSpaceQuota", parent.toString());
    // 2: create directory /test/data2
    final Path childDir2 = new Path(parent, "data2");
    assertTrue(dfs.mkdirs(childDir2));
    final Path childFile2 = new Path(childDir2, "datafile2");
    final Path childFile3 = new Path(childDir2, "datafile3");
    final long spaceQuota2 = DEFAULT_BLOCK_SIZE * replication;
    final long fileLen2 = DEFAULT_BLOCK_SIZE;
    // set space quota to a real low value
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
    // clear space quota
    runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
    // create a file that is greater than the size of space quota
    DFSTestUtil.createFile(dfs, childFile2, fileLen2, replication, 0);
    // now set space quota again. This should succeed
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
    hasException = false;
    try {
        DFSTestUtil.createFile(dfs, childFile3, fileLen2, replication, 0);
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // now test the same for root
    final Path childFile4 = new Path(dir, "datafile2");
    final Path childFile5 = new Path(dir, "datafile3");
    runCommand(admin, true, "-clrQuota", "/");
    runCommand(admin, false, "-clrSpaceQuota", "/");
    // set space quota to a real low value
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
    runCommand(admin, false, "-clrSpaceQuota", "/");
    DFSTestUtil.createFile(dfs, childFile4, fileLen2, replication, 0);
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
    hasException = false;
    try {
        DFSTestUtil.createFile(dfs, childFile5, fileLen2, replication, 0);
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertEquals(5, cluster.getNamesystem().getFSDirectory().getYieldCount());
    /*
     * clear sapce quota for root, otherwise other tests may fail due to
     * insufficient space quota.
     */
    runCommand(admin, false, "-clrSpaceQuota", "/");
}
Also used : Path(org.apache.hadoop.fs.Path) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) IOException(java.io.IOException) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) ContentSummary(org.apache.hadoop.fs.ContentSummary) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 5 with QuotaExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaExceededException in project hadoop by apache.

the class TestQuota method testMultipleFilesSmallerThanOneBlock.

/**
  * Like the previous test but create many files. This covers bugs where
  * the quota adjustment is incorrect but it takes many files to accrue 
  * a big enough accounting error to violate the quota.
  */
@Test
public void testMultipleFilesSmallerThanOneBlock() throws Exception {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    Configuration dfsConf = new HdfsConfiguration();
    final int BLOCK_SIZE = 6 * 1024;
    dfsConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    // Make it relinquish locks. When run serially, the result should
    // be identical.
    dfsConf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
    MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(3).build();
    dfsCluster.waitActive();
    FileSystem fs = dfsCluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(dfsConf);
    final String nnAddr = dfsConf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
    final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
    System.out.println("webhdfsuri=" + webhdfsuri);
    final FileSystem webHDFS = new Path(webhdfsuri).getFileSystem(dfsConf);
    try {
        //Test for deafult NameSpace Quota
        long nsQuota = FSImageTestUtil.getNSQuota(dfsCluster.getNameNode().getNamesystem());
        assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota, nsQuota == Long.MAX_VALUE);
        Path dir = new Path(parent, "test");
        boolean exceededQuota = false;
        ContentSummary c;
        // 1kb file
        // 6kb block
        // 192kb quota
        final int FILE_SIZE = 1024;
        final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
        assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
        assertEquals(192 * 1024, QUOTA_SIZE);
        // Create the dir and set the quota. We need to enable the quota before
        // writing the files as setting the quota afterwards will over-write
        // the cached disk space used for quota verification with the actual
        // amount used as calculated by INode#spaceConsumedInTree.
        assertTrue(fs.mkdirs(dir));
        runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE), dir.toString());
        // the last block: (58 * 3 * 1024) (3 * 6 * 1024) = 192kb
        for (int i = 0; i < 59; i++) {
            Path file = new Path(parent, "test/test" + i);
            DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
            DFSTestUtil.waitReplication(fs, file, (short) 3);
        }
        // Should account for all 59 files (almost QUOTA_SIZE)
        c = fs.getContentSummary(dir);
        compareQuotaUsage(c, fs, dir);
        checkContentSummary(c, webHDFS.getContentSummary(dir));
        assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3, c.getSpaceConsumed());
        assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3), 3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
        // Now check that trying to create another file violates the quota
        try {
            Path file = new Path(parent, "test/test59");
            DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
            DFSTestUtil.waitReplication(fs, file, (short) 3);
        } catch (QuotaExceededException e) {
            exceededQuota = true;
        }
        assertTrue("Quota not exceeded", exceededQuota);
        assertEquals(2, dfsCluster.getNamesystem().getFSDirectory().getYieldCount());
    } finally {
        dfsCluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) Configuration(org.apache.hadoop.conf.Configuration) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Aggregations

QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)7 Path (org.apache.hadoop.fs.Path)6 Test (org.junit.Test)6 ContentSummary (org.apache.hadoop.fs.ContentSummary)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)3 IOException (java.io.IOException)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 InterruptedIOException (java.io.InterruptedIOException)1 OutputStream (java.io.OutputStream)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 QuotaByStorageTypeExceededException (org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException)1 MultipleIOException (org.apache.hadoop.io.MultipleIOException)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1 SpanId (org.apache.htrace.core.SpanId)1