Search in sources :

Example 41 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class TestStorageMover method testMigrateOpenFileToArchival.

/**
   * Move an open file into archival storage
   */
@Test
public void testMigrateOpenFileToArchival() throws Exception {
    LOG.info("testMigrateOpenFileToArchival");
    final Path fooDir = new Path("/foo");
    Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
    policyMap.put(fooDir, COLD);
    NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap);
    ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
    MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
    test.setupCluster();
    // create an open file
    banner("writing to file /foo/bar");
    final Path barFile = new Path(fooDir, "bar");
    DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
    FSDataOutputStream out = test.dfs.append(barFile);
    out.writeBytes("hello, ");
    ((DFSOutputStream) out.getWrappedStream()).hsync();
    try {
        banner("start data migration");
        // set /foo to COLD
        test.setStoragePolicy();
        test.migrate(ExitStatus.SUCCESS);
        // make sure the under construction block has not been migrated
        LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
        LOG.info("Locations: " + lbs);
        List<LocatedBlock> blks = lbs.getLocatedBlocks();
        Assert.assertEquals(1, blks.size());
        Assert.assertEquals(1, blks.get(0).getLocations().length);
        banner("finish the migration, continue writing");
        // make sure the writing can continue
        out.writeBytes("world!");
        ((DFSOutputStream) out.getWrappedStream()).hsync();
        IOUtils.cleanup(LOG, out);
        lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
        LOG.info("Locations: " + lbs);
        blks = lbs.getLocatedBlocks();
        Assert.assertEquals(1, blks.size());
        Assert.assertEquals(1, blks.get(0).getLocations().length);
        banner("finish writing, starting reading");
        // check the content of /foo/bar
        FSDataInputStream in = test.dfs.open(barFile);
        byte[] buf = new byte[13];
        // read from offset 1024
        in.readFully(BLOCK_SIZE, buf, 0, buf.length);
        IOUtils.cleanup(LOG, in);
        Assert.assertEquals("hello, world!", new String(buf));
    } finally {
        test.shutdownCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 42 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class FSDirectory method getStorageTypeDeltas.

public EnumCounters<StorageType> getStorageTypeDeltas(byte storagePolicyID, long dsDelta, short oldRep, short newRep) {
    EnumCounters<StorageType> typeSpaceDeltas = new EnumCounters<StorageType>(StorageType.class);
    // empty file
    if (dsDelta == 0) {
        return typeSpaceDeltas;
    }
    // Storage type and its quota are only available when storage policy is set
    if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
        BlockStoragePolicy storagePolicy = getBlockManager().getStoragePolicy(storagePolicyID);
        if (oldRep != newRep) {
            List<StorageType> oldChosenStorageTypes = storagePolicy.chooseStorageTypes(oldRep);
            for (StorageType t : oldChosenStorageTypes) {
                if (!t.supportTypeQuota()) {
                    continue;
                }
                Preconditions.checkArgument(dsDelta > 0);
                typeSpaceDeltas.add(t, -dsDelta);
            }
        }
        List<StorageType> newChosenStorageTypes = storagePolicy.chooseStorageTypes(newRep);
        for (StorageType t : newChosenStorageTypes) {
            if (!t.supportTypeQuota()) {
                continue;
            }
            typeSpaceDeltas.add(t, dsDelta);
        }
    }
    return typeSpaceDeltas;
}
Also used : EnumCounters(org.apache.hadoop.hdfs.util.EnumCounters) StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 43 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class FSDirTruncateOp method truncate.

/**
   * Truncate a file to a given size.
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param newLength the target file size
   * @param clientName client name
   * @param clientMachine client machine info
   * @param mtime modified time
   * @param toRemoveBlocks to be removed blocks
   * @param pc permission checker to check fs permission
   * @return tuncate result
   * @throws IOException
   */
static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, final long newLength, final String clientName, final String clientMachine, final long mtime, final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc) throws IOException, UnresolvedLinkException {
    assert fsn.hasWriteLock();
    FSDirectory fsd = fsn.getFSDirectory();
    final String src;
    final INodesInPath iip;
    final boolean onBlockBoundary;
    Block truncateBlock = null;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        src = iip.getPath();
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
        // not support truncating file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot truncate file with striped block " + src);
        }
        final BlockStoragePolicy lpPolicy = fsd.getBlockManager().getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot truncate lazy persist file " + src);
        }
        // Check if the file is already being truncated with the same length
        final BlockInfo last = file.getLastBlock();
        if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
            final Block truncatedBlock = last.getUnderConstructionFeature().getTruncateBlock();
            if (truncatedBlock != null) {
                final long truncateLength = file.computeFileSize(false, false) + truncatedBlock.getNumBytes();
                if (newLength == truncateLength) {
                    return new TruncateResult(false, fsd.getAuditFileInfo(iip));
                }
            }
        }
        // Opening an existing file for truncate. May need lease recovery.
        fsn.recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE, iip, src, clientName, clientMachine, false);
        // Truncate length check.
        long oldLength = file.computeFileSize();
        if (oldLength == newLength) {
            return new TruncateResult(true, fsd.getAuditFileInfo(iip));
        }
        if (oldLength < newLength) {
            throw new HadoopIllegalArgumentException("Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + ".");
        }
        // Perform INodeFile truncation.
        final QuotaCounts delta = new QuotaCounts.Builder().build();
        onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, toRemoveBlocks, mtime, delta);
        if (!onBlockBoundary) {
            // Open file for write, but don't log into edits
            long lastBlockDelta = file.computeFileSize() - newLength;
            assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
            truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null);
        }
        // update the quota: use the preferred block size for UC block
        fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
    } finally {
        fsd.writeUnlock();
    }
    fsn.getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime, truncateBlock);
    return new TruncateResult(onBlockBoundary, fsd.getAuditFileInfo(iip));
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 44 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class INodeFile method computeContentSummary.

@Override
public final ContentSummaryComputationContext computeContentSummary(int snapshotId, final ContentSummaryComputationContext summary) {
    summary.nodeIncluded(this);
    final ContentCounts counts = summary.getCounts();
    counts.addContent(Content.FILE, 1);
    final long fileLen = computeFileSize(snapshotId);
    counts.addContent(Content.LENGTH, fileLen);
    counts.addContent(Content.DISKSPACE, storagespaceConsumed(null).getStorageSpace());
    if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
        BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().getPolicy(getStoragePolicyID());
        List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
        for (StorageType t : storageTypes) {
            if (!t.supportTypeQuota()) {
                continue;
            }
            counts.addTypeSpace(t, fileLen);
        }
    }
    return summary;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 45 with BlockStoragePolicy

use of org.apache.hadoop.hdfs.protocol.BlockStoragePolicy in project hadoop by apache.

the class FileWithSnapshotFeature method updateQuotaAndCollectBlocks.

public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
    byte storagePolicyID = file.getStoragePolicyID();
    BlockStoragePolicy bsp = null;
    if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
        bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
    }
    QuotaCounts oldCounts;
    if (removed.snapshotINode != null) {
        oldCounts = new QuotaCounts.Builder().build();
        BlockInfo[] blocks = file.getBlocks() == null ? new BlockInfo[0] : file.getBlocks();
        for (BlockInfo b : blocks) {
            short replication = b.getReplication();
            long blockSize = b.isComplete() ? b.getNumBytes() : file.getPreferredBlockSize();
            oldCounts.addStorageSpace(blockSize * replication);
            if (bsp != null) {
                List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
                for (StorageType t : oldTypeChosen) {
                    if (t.supportTypeQuota()) {
                        oldCounts.addTypeSpace(t, blockSize);
                    }
                }
            }
        }
        AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
        if (aclFeature != null) {
            AclStorage.removeAclFeature(aclFeature);
        }
    } else {
        oldCounts = file.storagespaceConsumed(null);
    }
    getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
    if (file.getBlocks() != null) {
        short replInDiff = getMaxBlockRepInDiffs(removed);
        short repl = (short) Math.max(file.getPreferredBlockReplication(), replInDiff);
        for (BlockInfo b : file.getBlocks()) {
            if (repl != b.getReplication()) {
                reclaimContext.collectedBlocks().addUpdateReplicationFactor(b, repl);
            }
        }
    }
    QuotaCounts current = file.storagespaceConsumed(bsp);
    reclaimContext.quotaDelta().add(oldCounts.subtract(current));
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) QuotaCounts(org.apache.hadoop.hdfs.server.namenode.QuotaCounts) AclFeature(org.apache.hadoop.hdfs.server.namenode.AclFeature)

Aggregations

BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)48 Test (org.junit.Test)19 BlockStoragePolicySuite (org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite)15 Path (org.apache.hadoop.fs.Path)12 StorageType (org.apache.hadoop.fs.StorageType)12 IOException (java.io.IOException)8 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)5 LinkedHashMap (java.util.LinkedHashMap)5 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)3 BlockStoragePolicySpi (org.apache.hadoop.fs.BlockStoragePolicySpi)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)3 Field (java.lang.reflect.Field)2 List (java.util.List)2 FileSystem (org.apache.hadoop.fs.FileSystem)2