Search in sources :

Example 11 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFSImage method testSaveAndLoadStripedINodeFile.

private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException {
    // Construct an INode with StripedBlock for saving and loading
    fsn.setErasureCodingPolicy("/", testECPolicy.getName(), false);
    long id = 123456789;
    byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
    PermissionStatus permissionStatus = new PermissionStatus("testuser_a", "testuser_groups", new FsPermission((short) 0x755));
    long mtime = 1426222916 - 3600;
    long atime = 1426222916;
    BlockInfoContiguous[] blocks = new BlockInfoContiguous[0];
    byte erasureCodingPolicyID = testECPolicy.getId();
    long preferredBlockSize = 128 * 1024 * 1024;
    INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime, blocks, null, erasureCodingPolicyID, preferredBlockSize, (byte) 0, BlockType.STRIPED);
    ByteArrayOutputStream bs = new ByteArrayOutputStream();
    // Construct StripedBlocks for the INode
    BlockInfoStriped[] stripedBlocks = new BlockInfoStriped[3];
    long stripedBlkId = 10000001;
    long timestamp = mtime + 3600;
    for (int i = 0; i < stripedBlocks.length; i++) {
        stripedBlocks[i] = new BlockInfoStriped(new Block(stripedBlkId + i, preferredBlockSize, timestamp), testECPolicy);
        file.addBlock(stripedBlocks[i]);
    }
    final String client = "testClient";
    final String clientMachine = "testClientMachine";
    final String path = "testUnderConstructionPath";
    // Save the INode to byte array
    DataOutput out = new DataOutputStream(bs);
    if (isUC) {
        file.toUnderConstruction(client, clientMachine);
        FSImageSerialization.writeINodeUnderConstruction((DataOutputStream) out, file, path);
    } else {
        FSImageSerialization.writeINodeFile(file, out, false);
    }
    DataInput in = new DataInputStream(new ByteArrayInputStream(bs.toByteArray()));
    // load the INode from the byte array
    INodeFile fileByLoaded;
    if (isUC) {
        fileByLoaded = FSImageSerialization.readINodeUnderConstruction(in, fsn, fsn.getFSImage().getLayoutVersion());
    } else {
        fileByLoaded = (INodeFile) new FSImageFormat.Loader(conf, fsn).loadINodeWithLocalName(false, in, false);
    }
    assertEquals(id, fileByLoaded.getId());
    assertArrayEquals(isUC ? path.getBytes() : name, fileByLoaded.getLocalName().getBytes());
    assertEquals(permissionStatus.getUserName(), fileByLoaded.getPermissionStatus().getUserName());
    assertEquals(permissionStatus.getGroupName(), fileByLoaded.getPermissionStatus().getGroupName());
    assertEquals(permissionStatus.getPermission(), fileByLoaded.getPermissionStatus().getPermission());
    assertEquals(mtime, fileByLoaded.getModificationTime());
    assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
    // TODO for striped blocks, we currently save and load them as contiguous
    // blocks to/from legacy fsimage
    assertEquals(3, fileByLoaded.getBlocks().length);
    assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
    assertEquals(file.getFileReplication(), fileByLoaded.getFileReplication());
    if (isUC) {
        assertEquals(client, fileByLoaded.getFileUnderConstructionFeature().getClientName());
        assertEquals(clientMachine, fileByLoaded.getFileUnderConstructionFeature().getClientMachine());
    }
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DataOutput(java.io.DataOutput) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) DataInput(java.io.DataInput) ByteArrayInputStream(java.io.ByteArrayInputStream) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 12 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithClose.

@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
    completedBlockInfo.setBlockCollectionId(file.getId());
    completedBlockInfo.setGenerationStamp(genStamp);
    doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(completedBlockInfo).when(file).getLastBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 13 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronization.

@Test
public void testCommitBlockSynchronization() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
    // Simulate 'completing' the block.
    BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
    completedBlockInfo.setBlockCollectionId(file.getId());
    completedBlockInfo.setGenerationStamp(genStamp);
    doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(completedBlockInfo).when(file).getLastBlock();
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 14 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFileTruncate method testTruncateRecovery.

/**
   * Check truncate recovery.
   */
@Test
public void testTruncateRecovery() throws IOException {
    FSNamesystem fsn = cluster.getNamesystem();
    String client = "client";
    String clientMachine = "clientMachine";
    String src = "/test/testTruncateRecovery";
    Path srcPath = new Path(src);
    byte[] contents = AppendTestUtil.initBuffer(BLOCK_SIZE);
    writeContents(contents, BLOCK_SIZE, srcPath);
    INodesInPath iip = fsn.getFSDirectory().getINodesInPath(src, DirOp.WRITE);
    INodeFile file = iip.getLastINode().asFile();
    long initialGenStamp = file.getLastBlock().getGenerationStamp();
    // Test that prepareFileForTruncate sets up in-place truncate.
    fsn.writeLock();
    try {
        Block oldBlock = file.getLastBlock();
        Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip, client, clientMachine, 1, null);
        // In-place truncate uses old block id with new genStamp.
        assertThat(truncateBlock.getBlockId(), is(equalTo(oldBlock.getBlockId())));
        assertThat(truncateBlock.getNumBytes(), is(oldBlock.getNumBytes()));
        assertThat(truncateBlock.getGenerationStamp(), is(fsn.getBlockManager().getBlockIdManager().getGenerationStamp()));
        assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
        long blockRecoveryId = file.getLastBlock().getUnderConstructionFeature().getBlockRecoveryId();
        assertThat(blockRecoveryId, is(initialGenStamp + 1));
        fsn.getEditLog().logTruncate(src, client, clientMachine, BLOCK_SIZE - 1, Time.now(), truncateBlock);
    } finally {
        fsn.writeUnlock();
    }
    // Re-create file and ensure we are ready to copy on truncate
    writeContents(contents, BLOCK_SIZE, srcPath);
    fs.allowSnapshot(parent);
    fs.createSnapshot(parent, "ss0");
    iip = fsn.getFSDirectory().getINodesInPath(src, DirOp.WRITE);
    file = iip.getLastINode().asFile();
    file.recordModification(iip.getLatestSnapshotId(), true);
    assertThat(file.isBlockInLatestSnapshot((BlockInfoContiguous) file.getLastBlock()), is(true));
    initialGenStamp = file.getLastBlock().getGenerationStamp();
    // Test that prepareFileForTruncate sets up copy-on-write truncate
    fsn.writeLock();
    try {
        Block oldBlock = file.getLastBlock();
        Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip, client, clientMachine, 1, null);
        // Copy-on-write truncate makes new block with new id and genStamp
        assertThat(truncateBlock.getBlockId(), is(not(equalTo(oldBlock.getBlockId()))));
        assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes(), is(true));
        assertThat(truncateBlock.getGenerationStamp(), is(fsn.getBlockManager().getBlockIdManager().getGenerationStamp()));
        assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
        long blockRecoveryId = file.getLastBlock().getUnderConstructionFeature().getBlockRecoveryId();
        assertThat(blockRecoveryId, is(initialGenStamp + 1));
        fsn.getEditLog().logTruncate(src, client, clientMachine, BLOCK_SIZE - 1, Time.now(), truncateBlock);
    } finally {
        fsn.writeUnlock();
    }
    checkBlockRecovery(srcPath);
    fs.deleteSnapshot(parent, "ss0");
    fs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 15 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestINodeFile method createINodeFiles.

/** 
   * Creates the required number of files with one block each
   * @param nCount Number of INodes to create
   * @return Array of INode files
   */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
    if (nCount <= 0)
        return new INodeFile[1];
    replication = 3;
    preferredBlockSize = 128 * 1024 * 1024;
    INodeFile[] iNodes = new INodeFile[nCount];
    for (int i = 0; i < nCount; i++) {
        iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, preferredBlockSize);
        iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
        BlockInfo newblock = new BlockInfoContiguous(replication);
        iNodes[i].addBlock(newblock);
    }
    return iNodes;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Aggregations

BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)19 Block (org.apache.hadoop.hdfs.protocol.Block)17 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)11 Test (org.junit.Test)9 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 Configuration (org.apache.hadoop.conf.Configuration)6 Path (org.apache.hadoop.fs.Path)6 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)4 PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 IOException (java.io.IOException)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1