Search in sources :

Example 46 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestQuota method testNamespaceCommands.

/** Test commands that change the size of the name space:
   *  mkdirs, rename, and delete */
@Test
public void testNamespaceCommands() throws Exception {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    // 1: create directory nqdir0/qdir1/qdir20/nqdir30
    assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/qdir1/qdir20/nqdir30")));
    // 2: set the quota of nqdir0/qdir1 to be 6
    final Path quotaDir1 = new Path(parent, "nqdir0/qdir1");
    dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
    ContentSummary c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 6);
    // 3: set the quota of nqdir0/qdir1/qdir20 to be 7
    final Path quotaDir2 = new Path(parent, "nqdir0/qdir1/qdir20");
    dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 7);
    // 4: Create directory nqdir0/qdir1/qdir21 and set its quota to 2
    final Path quotaDir3 = new Path(parent, "nqdir0/qdir1/qdir21");
    assertTrue(dfs.mkdirs(quotaDir3));
    dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
    c = dfs.getContentSummary(quotaDir3);
    compareQuotaUsage(c, dfs, quotaDir3);
    assertEquals(c.getDirectoryCount(), 1);
    assertEquals(c.getQuota(), 2);
    // 5: Create directory nqdir0/qdir1/qdir21/nqdir32
    Path tempPath = new Path(quotaDir3, "nqdir32");
    assertTrue(dfs.mkdirs(tempPath));
    c = dfs.getContentSummary(quotaDir3);
    compareQuotaUsage(c, dfs, quotaDir3);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 2);
    // 6: Create directory nqdir0/qdir1/qdir21/nqdir33
    tempPath = new Path(quotaDir3, "nqdir33");
    boolean hasException = false;
    try {
        assertFalse(dfs.mkdirs(tempPath));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    c = dfs.getContentSummary(quotaDir3);
    compareQuotaUsage(c, dfs, quotaDir3);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 2);
    // 7: Create directory nqdir0/qdir1/qdir20/nqdir31
    tempPath = new Path(quotaDir2, "nqdir31");
    assertTrue(dfs.mkdirs(tempPath));
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 6);
    assertEquals(c.getQuota(), 6);
    // 8: Create directory nqdir0/qdir1/qdir20/nqdir33
    tempPath = new Path(quotaDir2, "nqdir33");
    hasException = false;
    try {
        assertFalse(dfs.mkdirs(tempPath));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // 9: Move nqdir0/qdir1/qdir21/nqdir32 nqdir0/qdir1/qdir20/nqdir30
    tempPath = new Path(quotaDir2, "nqdir30");
    dfs.rename(new Path(quotaDir3, "nqdir32"), tempPath);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 4);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 6);
    assertEquals(c.getQuota(), 6);
    // 10: Move nqdir0/qdir1/qdir20/nqdir30 to nqdir0/qdir1/qdir21
    hasException = false;
    try {
        assertFalse(dfs.rename(tempPath, quotaDir3));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertTrue(dfs.exists(tempPath));
    assertFalse(dfs.exists(new Path(quotaDir3, "nqdir30")));
    // 10.a: Rename nqdir0/qdir1/qdir20/nqdir30 to nqdir0/qdir1/qdir21/nqdir32
    hasException = false;
    try {
        assertFalse(dfs.rename(tempPath, new Path(quotaDir3, "nqdir32")));
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertTrue(dfs.exists(tempPath));
    assertFalse(dfs.exists(new Path(quotaDir3, "nqdir32")));
    // 11: Move nqdir0/qdir1/qdir20/nqdir30 to nqdir0
    assertTrue(dfs.rename(tempPath, new Path(parent, "nqdir0")));
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 4);
    assertEquals(c.getQuota(), 6);
    // 12: Create directory nqdir0/nqdir30/nqdir33
    assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/nqdir30/nqdir33")));
    // 13: Move nqdir0/nqdir30 nqdir0/qdir1/qdir20/qdir30
    hasException = false;
    try {
        assertFalse(dfs.rename(new Path(parent, "nqdir0/nqdir30"), tempPath));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // 14: Move nqdir0/qdir1/qdir21 nqdir0/qdir1/qdir20
    assertTrue(dfs.rename(quotaDir3, quotaDir2));
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 4);
    assertEquals(c.getQuota(), 6);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 7);
    tempPath = new Path(quotaDir2, "qdir21");
    c = dfs.getContentSummary(tempPath);
    compareQuotaUsage(c, dfs, tempPath);
    assertEquals(c.getDirectoryCount(), 1);
    assertEquals(c.getQuota(), 2);
    // 15: Delete nqdir0/qdir1/qdir20/qdir21
    dfs.delete(tempPath, true);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 6);
    // 16: Move nqdir0/qdir30 nqdir0/qdir1/qdir20
    assertTrue(dfs.rename(new Path(parent, "nqdir0/nqdir30"), quotaDir2));
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 5);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 6);
    assertEquals(c.getQuota(), 6);
}
Also used : Path(org.apache.hadoop.fs.Path) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) ContentSummary(org.apache.hadoop.fs.ContentSummary) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) Test(org.junit.Test)

Example 47 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestFileTruncate method testSnapshotWithTruncates.

void testSnapshotWithTruncates(int... deleteOrder) throws IOException {
    fs.mkdirs(parent);
    fs.setQuota(parent, 100, 1000);
    fs.allowSnapshot(parent);
    String truncateFile = "testSnapshotWithTruncates";
    final Path src = new Path(parent, truncateFile);
    int[] length = new int[3];
    length[0] = 3 * BLOCK_SIZE;
    DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 0L);
    Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
    Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock().getBlock().getLocalBlock();
    Path[] snapshotFiles = new Path[3];
    // Diskspace consumed should be 12 bytes * 3. [blk 1,2,3]
    ContentSummary contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(36L));
    // Add file to snapshot and append
    String[] ss = new String[] { "ss0", "ss1", "ss2" };
    Path snapshotDir = fs.createSnapshot(parent, ss[0]);
    snapshotFiles[0] = new Path(snapshotDir, truncateFile);
    length[1] = 2 * BLOCK_SIZE;
    boolean isReady = fs.truncate(src, 2 * BLOCK_SIZE);
    assertTrue("Recovery is not expected.", isReady);
    // Diskspace consumed should be 12 bytes * 3. [blk 1,2 SS:3]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(36L));
    snapshotDir = fs.createSnapshot(parent, ss[1]);
    snapshotFiles[1] = new Path(snapshotDir, truncateFile);
    // Create another snapshot with truncate
    length[2] = BLOCK_SIZE + BLOCK_SIZE / 2;
    isReady = fs.truncate(src, BLOCK_SIZE + BLOCK_SIZE / 2);
    assertFalse("Recovery is expected.", isReady);
    checkBlockRecovery(src);
    snapshotDir = fs.createSnapshot(parent, ss[2]);
    snapshotFiles[2] = new Path(snapshotDir, truncateFile);
    assertFileLength(snapshotFiles[0], length[0]);
    assertBlockExists(lastBlk);
    // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(42L));
    fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
    assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
    assertFileLength(src, length[2]);
    assertBlockExists(firstBlk);
    contentSummary = fs.getContentSummary(parent);
    if (fs.exists(snapshotFiles[0])) {
        // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
        assertThat(contentSummary.getSpaceConsumed(), is(42L));
        assertBlockExists(lastBlk);
    } else {
        // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
        assertThat(contentSummary.getSpaceConsumed(), is(30L));
        assertBlockNotPresent(lastBlk);
    }
    fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
    assertFileLength(src, length[2]);
    assertBlockExists(firstBlk);
    contentSummary = fs.getContentSummary(parent);
    if (fs.exists(snapshotFiles[0])) {
        // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
        assertThat(contentSummary.getSpaceConsumed(), is(42L));
        assertBlockExists(lastBlk);
    } else if (fs.exists(snapshotFiles[1])) {
        // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
        assertThat(contentSummary.getSpaceConsumed(), is(30L));
        assertBlockNotPresent(lastBlk);
    } else {
        // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
        assertThat(contentSummary.getSpaceConsumed(), is(18L));
        assertBlockNotPresent(lastBlk);
    }
    fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
    assertFileLength(src, length[2]);
    assertBlockExists(firstBlk);
    // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(18L));
    assertThat(contentSummary.getLength(), is(6L));
    fs.delete(src, false);
    assertBlockNotPresent(firstBlk);
    // Diskspace consumed should be 0 bytes * 3. []
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(0L));
}
Also used : Path(org.apache.hadoop.fs.Path) ContentSummary(org.apache.hadoop.fs.ContentSummary) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 48 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestFileTruncate method testSnapshotWithAppendTruncate.

/**
   * Create three snapshots with appended and truncated file.
   * Delete snapshots in the specified order and verify that
   * remaining snapshots are still readable.
   */
void testSnapshotWithAppendTruncate(int... deleteOrder) throws IOException {
    FSDirectory fsDir = cluster.getNamesystem().getFSDirectory();
    fs.mkdirs(parent);
    fs.setQuota(parent, 100, 1000);
    fs.allowSnapshot(parent);
    String truncateFile = "testSnapshotWithAppendTruncate";
    final Path src = new Path(parent, truncateFile);
    int[] length = new int[4];
    length[0] = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
    DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 0L);
    Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
    Path[] snapshotFiles = new Path[4];
    // Diskspace consumed should be 10 bytes * 3. [blk 1,2,3]
    ContentSummary contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(30L));
    // Add file to snapshot and append
    String[] ss = new String[] { "ss0", "ss1", "ss2", "ss3" };
    Path snapshotDir = fs.createSnapshot(parent, ss[0]);
    snapshotFiles[0] = new Path(snapshotDir, truncateFile);
    length[1] = length[2] = length[0] + BLOCK_SIZE + 1;
    DFSTestUtil.appendFile(fs, src, BLOCK_SIZE + 1);
    Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock().getBlock().getLocalBlock();
    // Diskspace consumed should be 15 bytes * 3. [blk 1,2,3,4]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(45L));
    // Create another snapshot without changes
    snapshotDir = fs.createSnapshot(parent, ss[1]);
    snapshotFiles[1] = new Path(snapshotDir, truncateFile);
    // Create another snapshot and append
    snapshotDir = fs.createSnapshot(parent, ss[2]);
    snapshotFiles[2] = new Path(snapshotDir, truncateFile);
    DFSTestUtil.appendFile(fs, src, BLOCK_SIZE - 1 + BLOCK_SIZE / 2);
    Block appendedBlk = getLocatedBlocks(src).getLastLocatedBlock().getBlock().getLocalBlock();
    // Diskspace consumed should be 20 bytes * 3. [blk 1,2,3,4,5]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(60L));
    // Truncate to block boundary
    int newLength = length[0] + BLOCK_SIZE / 2;
    boolean isReady = fs.truncate(src, newLength);
    assertTrue("Recovery is not expected.", isReady);
    assertFileLength(snapshotFiles[2], length[2]);
    assertFileLength(snapshotFiles[1], length[1]);
    assertFileLength(snapshotFiles[0], length[0]);
    assertBlockNotPresent(appendedBlk);
    // Diskspace consumed should be 16 bytes * 3. [blk 1,2,3 SS:4]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(48L));
    // Truncate full block again
    newLength = length[0] - BLOCK_SIZE / 2;
    isReady = fs.truncate(src, newLength);
    assertTrue("Recovery is not expected.", isReady);
    assertFileLength(snapshotFiles[2], length[2]);
    assertFileLength(snapshotFiles[1], length[1]);
    assertFileLength(snapshotFiles[0], length[0]);
    // Diskspace consumed should be 16 bytes * 3. [blk 1,2 SS:3,4]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(48L));
    // Truncate half of the last block
    newLength -= BLOCK_SIZE / 2;
    isReady = fs.truncate(src, newLength);
    assertFalse("Recovery is expected.", isReady);
    checkBlockRecovery(src);
    assertFileLength(snapshotFiles[2], length[2]);
    assertFileLength(snapshotFiles[1], length[1]);
    assertFileLength(snapshotFiles[0], length[0]);
    Block replacedBlk = getLocatedBlocks(src).getLastLocatedBlock().getBlock().getLocalBlock();
    // Diskspace consumed should be 16 bytes * 3. [blk 1,6 SS:2,3,4]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(54L));
    snapshotDir = fs.createSnapshot(parent, ss[3]);
    snapshotFiles[3] = new Path(snapshotDir, truncateFile);
    length[3] = newLength;
    // Delete file. Should still be able to read snapshots
    int numINodes = fsDir.getInodeMapSize();
    isReady = fs.delete(src, false);
    assertTrue("Delete failed.", isReady);
    assertFileLength(snapshotFiles[3], length[3]);
    assertFileLength(snapshotFiles[2], length[2]);
    assertFileLength(snapshotFiles[1], length[1]);
    assertFileLength(snapshotFiles[0], length[0]);
    assertEquals("Number of INodes should not change", numINodes, fsDir.getInodeMapSize());
    fs.deleteSnapshot(parent, ss[3]);
    assertBlockExists(firstBlk);
    assertBlockExists(lastBlk);
    assertBlockNotPresent(replacedBlk);
    // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(48L));
    // delete snapshots in the specified order
    fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
    assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
    assertBlockExists(firstBlk);
    assertBlockExists(lastBlk);
    assertEquals("Number of INodes should not change", numINodes, fsDir.getInodeMapSize());
    // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(48L));
    fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
    assertBlockExists(firstBlk);
    contentSummary = fs.getContentSummary(parent);
    if (fs.exists(snapshotFiles[0])) {
        // Diskspace consumed should be 0 bytes * 3. [SS:1,2,3]
        assertBlockNotPresent(lastBlk);
        assertThat(contentSummary.getSpaceConsumed(), is(36L));
    } else {
        // Diskspace consumed should be 48 bytes * 3. [SS:1,2,3,4]
        assertThat(contentSummary.getSpaceConsumed(), is(48L));
    }
    assertEquals("Number of INodes should not change", numINodes, fsDir.getInodeMapSize());
    fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
    assertBlockNotPresent(firstBlk);
    assertBlockNotPresent(lastBlk);
    // Diskspace consumed should be 0 bytes * 3. []
    contentSummary = fs.getContentSummary(parent);
    assertThat(contentSummary.getSpaceConsumed(), is(0L));
    assertNotEquals("Number of INodes should change", numINodes, fsDir.getInodeMapSize());
}
Also used : Path(org.apache.hadoop.fs.Path) ContentSummary(org.apache.hadoop.fs.ContentSummary) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 49 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestHDFSConcat method testConcat.

/**
   * Concatenates 10 files into one
   * Verifies the final size, deletion of the file, number of blocks
   * @throws IOException
   */
@Test
public void testConcat() throws IOException, InterruptedException {
    final int numFiles = 10;
    long fileLen = blockSize * 3;
    HdfsFileStatus fStatus;
    FSDataInputStream stm;
    String trg = "/trg";
    Path trgPath = new Path(trg);
    DFSTestUtil.createFile(dfs, trgPath, fileLen, REPL_FACTOR, 1);
    fStatus = nn.getFileInfo(trg);
    long trgLen = fStatus.getLen();
    long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
    Path[] files = new Path[numFiles];
    byte[][] bytes = new byte[numFiles + 1][(int) fileLen];
    LocatedBlocks[] lblocks = new LocatedBlocks[numFiles];
    long[] lens = new long[numFiles];
    stm = dfs.open(trgPath);
    stm.readFully(0, bytes[0]);
    stm.close();
    int i;
    for (i = 0; i < files.length; i++) {
        files[i] = new Path("/file" + i);
        Path path = files[i];
        System.out.println("Creating file " + path);
        // make files with different content
        DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, i);
        fStatus = nn.getFileInfo(path.toUri().getPath());
        lens[i] = fStatus.getLen();
        // file of the same length.
        assertEquals(trgLen, lens[i]);
        lblocks[i] = nn.getBlockLocations(path.toUri().getPath(), 0, lens[i]);
        //read the file
        stm = dfs.open(path);
        stm.readFully(0, bytes[i + 1]);
        //bytes[i][10] = 10;
        stm.close();
    }
    // check permissions -try the operation with the "wrong" user
    final UserGroupInformation user1 = UserGroupInformation.createUserForTesting("theDoctor", new String[] { "tardis" });
    DistributedFileSystem hdfs = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        hdfs.concat(trgPath, files);
        fail("Permission exception expected");
    } catch (IOException ie) {
        System.out.println("Got expected exception for permissions:" + ie.getLocalizedMessage());
    // expected
    }
    // check count update
    ContentSummary cBefore = dfs.getContentSummary(trgPath.getParent());
    // resort file array, make INode id not sorted.
    for (int j = 0; j < files.length / 2; j++) {
        Path tempPath = files[j];
        files[j] = files[files.length - 1 - j];
        files[files.length - 1 - j] = tempPath;
        byte[] tempBytes = bytes[1 + j];
        bytes[1 + j] = bytes[files.length - 1 - j + 1];
        bytes[files.length - 1 - j + 1] = tempBytes;
    }
    // now concatenate
    dfs.concat(trgPath, files);
    // verify  count
    ContentSummary cAfter = dfs.getContentSummary(trgPath.getParent());
    assertEquals(cBefore.getFileCount(), cAfter.getFileCount() + files.length);
    // verify other stuff
    long totalLen = trgLen;
    long totalBlocks = trgBlocks;
    for (i = 0; i < files.length; i++) {
        totalLen += lens[i];
        totalBlocks += lblocks[i].locatedBlockCount();
    }
    System.out.println("total len=" + totalLen + "; totalBlocks=" + totalBlocks);
    fStatus = nn.getFileInfo(trg);
    // new length
    trgLen = fStatus.getLen();
    // read the resulting file
    stm = dfs.open(trgPath);
    byte[] byteFileConcat = new byte[(int) trgLen];
    stm.readFully(0, byteFileConcat);
    stm.close();
    trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
    //verifications
    // 1. number of blocks
    assertEquals(trgBlocks, totalBlocks);
    // 2. file lengths
    assertEquals(trgLen, totalLen);
    // 3. removal of the src file
    for (Path p : files) {
        fStatus = nn.getFileInfo(p.toUri().getPath());
        // file shouldn't exist
        assertNull("File " + p + " still exists", fStatus);
        // try to create fie with the same name
        DFSTestUtil.createFile(dfs, p, fileLen, REPL_FACTOR, 1);
    }
    // 4. content
    checkFileContent(byteFileConcat, bytes);
    // add a small file (less then a block)
    Path smallFile = new Path("/sfile");
    int sFileLen = 10;
    DFSTestUtil.createFile(dfs, smallFile, sFileLen, REPL_FACTOR, 1);
    dfs.concat(trgPath, new Path[] { smallFile });
    fStatus = nn.getFileInfo(trg);
    // new length
    trgLen = fStatus.getLen();
    // check number of blocks
    trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
    assertEquals(trgBlocks, totalBlocks + 1);
    // and length
    assertEquals(trgLen, totalLen + sFileLen);
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ContentSummary(org.apache.hadoop.fs.ContentSummary) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 50 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestQuotaByStorageType method testContentSummaryWithoutStoragePolicy.

@Test(timeout = 60000)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
    final Path foo = new Path(dir, "foo");
    Path createdFile1 = new Path(foo, "created_file1.data");
    dfs.mkdirs(foo);
    INode fnode = fsdir.getINode4Write(foo.toString());
    assertTrue(fnode.isDirectory());
    assertTrue(!fnode.isQuotaSet());
    // Create file of size 2 * BLOCKSIZE under directory "foo"
    long file1Len = BLOCKSIZE * 2;
    int bufLen = BLOCKSIZE / 16;
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
    // Verify getContentSummary without any quota set
    // Expect no type quota and usage information available
    ContentSummary cs = dfs.getContentSummary(foo);
    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
    for (StorageType t : StorageType.values()) {
        assertEquals(cs.getTypeConsumed(t), 0);
        assertEquals(cs.getTypeQuota(t), -1);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Aggregations

ContentSummary (org.apache.hadoop.fs.ContentSummary)61 Path (org.apache.hadoop.fs.Path)42 Test (org.junit.Test)38 FileSystem (org.apache.hadoop.fs.FileSystem)10 IOException (java.io.IOException)9 Configuration (org.apache.hadoop.conf.Configuration)8 ArrayList (java.util.ArrayList)6 OutputStream (java.io.OutputStream)5 URI (java.net.URI)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)5 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)5 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)5 JobConf (org.apache.hadoop.mapred.JobConf)5 HttpURLConnection (java.net.HttpURLConnection)4 HashMap (java.util.HashMap)4 Properties (java.util.Properties)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)4 FileNotFoundException (java.io.FileNotFoundException)3