Search in sources :

Example 16 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestReplicationPolicy method testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication.

@Test(timeout = 60000)
public void testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication() throws IOException {
    Namesystem mockNS = mock(Namesystem.class);
    when(mockNS.hasWriteLock()).thenReturn(true);
    BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
    LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
    long blkID1 = ThreadLocalRandom.current().nextLong();
    if (blkID1 < 0) {
        blkID1 *= -1;
    }
    long blkID2 = ThreadLocalRandom.current().nextLong();
    if (blkID2 < 0) {
        blkID2 *= -1;
    }
    BlockInfo block1 = genBlockInfo(blkID1);
    BlockInfo block2 = genBlockInfo(blkID2);
    // Adding QUEUE_LOW_REDUNDANCY block
    lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
    // Adding QUEUE_LOW_REDUNDANCY block
    lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
    List<List<BlockInfo>> chosenBlocks;
    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
    // from QUEUE_VERY_LOW_REDUNDANCY.
    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
    final BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
    final BlockCollection mbc = mock(BlockCollection.class);
    when(mbc.getId()).thenReturn(1000L);
    when(mbc.getLastBlock()).thenReturn(info);
    when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
    when(mbc.isUnderConstruction()).thenReturn(true);
    ContentSummary cs = mock(ContentSummary.class);
    when(cs.getLength()).thenReturn((long) 1);
    when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs);
    info.setBlockCollectionId(1000);
    bm.addBlockCollection(info, mbc);
    DatanodeStorageInfo[] storageAry = { new DatanodeStorageInfo(dataNodes[0], new DatanodeStorage("s1")) };
    info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry);
    DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class);
    DatanodeDescriptor dn = mock(DatanodeDescriptor.class);
    when(dn.isDecommissioned()).thenReturn(true);
    when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
    when(storage.getDatanodeDescriptor()).thenReturn(dn);
    when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
    when(storage.addBlock(any(BlockInfo.class))).thenReturn(DatanodeStorageInfo.AddBlockResult.ADDED);
    info.addStorage(storage, info);
    BlockInfo lastBlk = mbc.getLastBlock();
    when(mbc.getLastBlock()).thenReturn(lastBlk, info);
    bm.convertLastBlockToUnderConstruction(mbc, 0L);
    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
    // from QUEUE_VERY_LOW_REDUNDANCY.
    // This block remains and should not be skipped over.
    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StatefulBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo) ContentSummary(org.apache.hadoop.fs.ContentSummary) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Namesystem(org.apache.hadoop.hdfs.server.namenode.Namesystem) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 17 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestFileTruncate method testTruncate4Symlink.

@Test
public void testTruncate4Symlink() throws IOException {
    final int fileLength = 3 * BLOCK_SIZE;
    fs.mkdirs(parent);
    final byte[] contents = AppendTestUtil.initBuffer(fileLength);
    final Path file = new Path(parent, "testTruncate4Symlink");
    writeContents(contents, fileLength, file);
    final Path link = new Path(parent, "link");
    fs.createSymlink(file, link, false);
    final int newLength = fileLength / 3;
    boolean isReady = fs.truncate(link, newLength);
    assertTrue("Recovery is not expected.", isReady);
    FileStatus fileStatus = fs.getFileStatus(file);
    assertThat(fileStatus.getLen(), is((long) newLength));
    ContentSummary cs = fs.getContentSummary(parent);
    assertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * REPLICATION);
    // validate the file content
    checkFullFile(file, newLength, contents);
    fs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Example 18 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestFileTruncate method testBasicTruncate.

/**
   * Truncate files of different sizes byte by byte.
   */
@Test
public void testBasicTruncate() throws IOException {
    int startingFileSize = 3 * BLOCK_SIZE;
    fs.mkdirs(parent);
    fs.setQuota(parent, 100, 1000);
    byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
    for (int fileLength = startingFileSize; fileLength > 0; fileLength -= BLOCK_SIZE - 1) {
        for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
            final Path p = new Path(parent, "testBasicTruncate" + fileLength);
            writeContents(contents, fileLength, p);
            int newLength = fileLength - toTruncate;
            boolean isReady = fs.truncate(p, newLength);
            LOG.info("fileLength=" + fileLength + ", newLength=" + newLength + ", toTruncate=" + toTruncate + ", isReady=" + isReady);
            assertEquals("File must be closed for zero truncate" + " or truncating at the block boundary", isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
            if (!isReady) {
                checkBlockRecovery(p);
            }
            ContentSummary cs = fs.getContentSummary(parent);
            assertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * REPLICATION);
            // validate the file content
            checkFullFile(p, newLength, contents);
        }
    }
    fs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Example 19 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestRenameWithSnapshots method checkSpaceConsumed.

private void checkSpaceConsumed(String message, Path directory, long expectedSpace) throws Exception {
    ContentSummary summary = hdfs.getContentSummary(directory);
    assertEquals(message, expectedSpace, summary.getSpaceConsumed());
}
Also used : ContentSummary(org.apache.hadoop.fs.ContentSummary)

Example 20 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestGetContentSummaryWithSnapshot method testGetContentSummary.

/**
   * Calculate against a snapshot path.
   * 1. create dirs /foo/bar
   * 2. take snapshot s1 on /foo
   * 3. create a 10 byte file /foo/bar/baz
   * Make sure for "/foo/bar" and "/foo/.snapshot/s1/bar" have correct results:
   * the 1 byte file is not included in snapshot s1.
   */
@Test
public void testGetContentSummary() throws IOException {
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path baz = new Path(bar, "baz");
    dfs.mkdirs(bar);
    dfs.allowSnapshot(foo);
    dfs.createSnapshot(foo, "s1");
    DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
    ContentSummary summary = cluster.getNameNodeRpc().getContentSummary(bar.toString());
    Assert.assertEquals(1, summary.getDirectoryCount());
    Assert.assertEquals(1, summary.getFileCount());
    Assert.assertEquals(10, summary.getLength());
    final Path barS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar");
    summary = cluster.getNameNodeRpc().getContentSummary(barS1.toString());
    Assert.assertEquals(1, summary.getDirectoryCount());
    Assert.assertEquals(0, summary.getFileCount());
    Assert.assertEquals(0, summary.getLength());
    // also check /foo and /foo/.snapshot/s1
    summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
    Assert.assertEquals(2, summary.getDirectoryCount());
    Assert.assertEquals(1, summary.getFileCount());
    Assert.assertEquals(10, summary.getLength());
    final Path fooS1 = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    summary = cluster.getNameNodeRpc().getContentSummary(fooS1.toString());
    Assert.assertEquals(2, summary.getDirectoryCount());
    Assert.assertEquals(0, summary.getFileCount());
    Assert.assertEquals(0, summary.getLength());
    final Path bazS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar/baz");
    try {
        cluster.getNameNodeRpc().getContentSummary(bazS1.toString());
        Assert.fail("should get FileNotFoundException");
    } catch (FileNotFoundException ignored) {
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ContentSummary(org.apache.hadoop.fs.ContentSummary) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Aggregations

ContentSummary (org.apache.hadoop.fs.ContentSummary)61 Path (org.apache.hadoop.fs.Path)42 Test (org.junit.Test)38 FileSystem (org.apache.hadoop.fs.FileSystem)10 IOException (java.io.IOException)9 Configuration (org.apache.hadoop.conf.Configuration)8 ArrayList (java.util.ArrayList)6 OutputStream (java.io.OutputStream)5 URI (java.net.URI)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)5 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)5 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)5 JobConf (org.apache.hadoop.mapred.JobConf)5 HttpURLConnection (java.net.HttpURLConnection)4 HashMap (java.util.HashMap)4 Properties (java.util.Properties)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)4 FileNotFoundException (java.io.FileNotFoundException)3