use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.
the class TestStoragePolicySummary method testSortInDescendingOrder.
@Test
public void testSortInDescendingOrder() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
BlockStoragePolicy warm = bsps.getPolicy("WARM");
BlockStoragePolicy cold = bsps.getPolicy("COLD");
//DISK:3
sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
//DISK:1,ARCHIVE:2
sts.add(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE }, warm);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.DISK, StorageType.ARCHIVE }, warm);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.DISK }, warm);
//ARCHIVE:3
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(3, actualOutput.size());
Map<String, Long> expectedOutput = new LinkedHashMap<>();
expectedOutput.put("COLD|ARCHIVE:3(COLD)", 4l);
expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
expectedOutput.put("HOT|DISK:3(HOT)", 2l);
Assert.assertEquals(expectedOutput.toString(), actualOutput.toString());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.
the class TestStoragePolicySummary method testDifferentSpecifiedPolicies.
@Test
public void testDifferentSpecifiedPolicies() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
BlockStoragePolicy warm = bsps.getPolicy("WARM");
BlockStoragePolicy cold = bsps.getPolicy("COLD");
//DISK:3
sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, hot);
sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, warm);
sts.add(new StorageType[] { StorageType.DISK, StorageType.DISK, StorageType.DISK }, cold);
//DISK:1,ARCHIVE:2
sts.add(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE }, hot);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.DISK, StorageType.ARCHIVE }, warm);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.DISK }, cold);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.DISK }, cold);
//ARCHIVE:3
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, hot);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, hot);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, warm);
sts.add(new StorageType[] { StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE }, cold);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(9, actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:3(HOT)", 2l);
expectedOutput.put("COLD|DISK:1,ARCHIVE:2(WARM)", 2l);
expectedOutput.put("HOT|ARCHIVE:3(COLD)", 2l);
expectedOutput.put("WARM|DISK:3(HOT)", 1l);
expectedOutput.put("COLD|DISK:3(HOT)", 1l);
expectedOutput.put("WARM|ARCHIVE:3(COLD)", 1l);
expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 1l);
expectedOutput.put("COLD|ARCHIVE:3(COLD)", 1l);
expectedOutput.put("HOT|DISK:1,ARCHIVE:2(WARM)", 1l);
Assert.assertEquals(expectedOutput, actualOutput);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.
the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.
@Test
public void testUpdateQuotaAndCollectBlocks() {
FileDiffList diffs = new FileDiffList();
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
FileDiff diff = mock(FileDiff.class);
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
BlockManager bm = mock(BlockManager.class);
// No snapshot
INodeFile file = mock(INodeFile.class);
when(file.getFileWithSnapshotFeature()).thenReturn(sf);
when(file.getBlocks()).thenReturn(blocks);
when(file.getStoragePolicyID()).thenReturn((byte) 1);
Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
when(bsps.getPolicy(anyByte())).thenReturn(bsp);
INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
ArrayList<INode> removedINodes = new ArrayList<>();
INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals(0, counts.getStorageSpace());
Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);
when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
blocks[0].setReplication(REPL_3);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.
the class FSDirRenameOp method unprotectedRenameTo.
/**
* Rename src to dst.
* See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)}
* for details related to rename semantics and exceptions.
*
* @param fsd FSDirectory
* @param src source path
* @param dst destination path
* @param timestamp modification time
* @param collectedBlocks blocks to be removed
* @param options Rename options
* @return whether a file/directory gets overwritten in the dst path
*/
static RenameResult unprotectedRenameTo(FSDirectory fsd, final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp, BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) throws IOException {
assert fsd.hasWriteLock();
boolean overwrite = options != null && Arrays.asList(options).contains(Options.Rename.OVERWRITE);
final String src = srcIIP.getPath();
final String dst = dstIIP.getPath();
final String error;
final INode srcInode = srcIIP.getLastINode();
validateRenameSource(fsd, srcIIP);
// validate the destination
if (dst.equals(src)) {
throw new FileAlreadyExistsException("The source " + src + " and destination " + dst + " are the same");
}
validateDestination(src, dst, srcInode);
if (dstIIP.length() == 1) {
error = "rename destination cannot be the root";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new IOException(error);
}
BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
final INode dstInode = dstIIP.getLastINode();
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
if (dstInode != null) {
// Destination exists
validateOverwrite(src, dst, overwrite, srcInode, dstInode);
FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
}
INode dstParent = dstIIP.getINode(-2);
if (dstParent == null) {
error = "rename destination parent " + dst + " not found.";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new FileNotFoundException(error);
}
if (!dstParent.isDirectory()) {
error = "rename destination parent " + dst + " is a file.";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new ParentNotDirectoryException(error);
}
// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
verifyQuotaForRename(fsd, srcIIP, dstIIP);
RenameOperation tx = new RenameOperation(fsd, srcIIP, dstIIP);
boolean undoRemoveSrc = true;
tx.removeSrc();
boolean undoRemoveDst = false;
long removedNum = 0;
try {
if (dstInode != null) {
// dst exists, remove it
removedNum = tx.removeDst();
if (removedNum != -1) {
undoRemoveDst = true;
}
}
// add src as dst to complete rename
INodesInPath renamedIIP = tx.addSourceToDestination();
if (renamedIIP != null) {
undoRemoveSrc = false;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to " + dst);
}
tx.updateMtimeAndLease(timestamp);
// Collect the blocks and remove the lease for previous dst
boolean filesDeleted = false;
if (undoRemoveDst) {
undoRemoveDst = false;
if (removedNum > 0) {
filesDeleted = tx.cleanDst(bsps, collectedBlocks);
}
}
if (snapshottableDirs.size() > 0) {
// There are snapshottable directories (without snapshots) to be
// deleted. Need to update the SnapshotManager.
fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
}
tx.updateQuotasInSourceTree(bsps);
return createRenameResult(fsd, renamedIIP, filesDeleted, collectedBlocks);
}
} finally {
if (undoRemoveSrc) {
tx.restoreSource();
}
if (undoRemoveDst) {
// Rename failed - restore dst
tx.restoreDst(bsps);
}
}
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst);
throw new IOException("rename from " + src + " to " + dst + " failed.");
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hadoop by apache.
the class TestApplyingStoragePolicy method testSetAndGetStoragePolicy.
@Test
public void testSetAndGetStoragePolicy() throws IOException {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path fooz = new Path("/fooz");
DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
final BlockStoragePolicy warm = suite.getPolicy("WARM");
final BlockStoragePolicy cold = suite.getPolicy("COLD");
final BlockStoragePolicy hot = suite.getPolicy("HOT");
assertEquals(fs.getStoragePolicy(foo), hot);
assertEquals(fs.getStoragePolicy(bar), hot);
try {
fs.getStoragePolicy(fooz);
} catch (Exception e) {
assertTrue(e instanceof FileNotFoundException);
}
/*
* test: set storage policy
*/
fs.setStoragePolicy(foo, warm.getName());
fs.setStoragePolicy(bar, cold.getName());
try {
fs.setStoragePolicy(fooz, warm.getName());
} catch (Exception e) {
assertTrue(e instanceof FileNotFoundException);
}
/*
* test: get storage policy after set
*/
assertEquals(fs.getStoragePolicy(foo), warm);
assertEquals(fs.getStoragePolicy(bar), cold);
try {
fs.getStoragePolicy(fooz);
} catch (Exception e) {
assertTrue(e instanceof FileNotFoundException);
}
}
Aggregations