use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestMover method testMoverWithPinnedBlocks.
/**
* Test to verify that mover can't move pinned blocks.
*/
@Test(timeout = 90000)
public void testMoverWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
// Sets bigger retry max attempts value so that test case will timed out if
// block pinning errors are not handled properly during block movement.
conf.setInt(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, 10000);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoverWithPinnedBlocks/file";
Path dir = new Path("/testMoverWithPinnedBlocks");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 3);
out.write(fileData);
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// Adding one SSD based data node to the cluster.
StorageType[][] newtypes = new StorageType[][] { { StorageType.SSD } };
startAdditionalDNs(conf, 1, newtypes, cluster);
// Mock FsDatasetSpi#getPinning to show that the block is pinned.
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
DataNode dn = cluster.getDataNodes().get(i);
LOG.info("Simulate block pinning in datanode {}", dn);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
}
// move file blocks to ONE_SSD policy
dfs.setStoragePolicy(dir, "ONE_SSD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
Assert.assertEquals("Movement should fail", exitcode, rc);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestMover method testScheduleSameBlock.
@Test
public void testScheduleSameBlock() throws IOException {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleSameBlock/file";
{
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleSameBlock");
out.close();
}
final Mover mover = newMover(conf);
mover.init();
final Mover.Processor processor = mover.new Processor();
final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
final List<MLocation> locations = MLocation.toLocations(lb);
final MLocation ml = locations.get(0);
final DBlock db = mover.newDBlock(lb, locations, null);
final List<StorageType> storageTypes = new ArrayList<StorageType>(Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class JsonUtilClient method toContentSummary.
/** Convert a Json map to a ContentSummary. */
static ContentSummary toContentSummary(final Map<?, ?> json) {
if (json == null) {
return null;
}
final Map<?, ?> m = (Map<?, ?>) json.get(ContentSummary.class.getSimpleName());
final long length = ((Number) m.get("length")).longValue();
final long fileCount = ((Number) m.get("fileCount")).longValue();
final long directoryCount = ((Number) m.get("directoryCount")).longValue();
final long quota = ((Number) m.get("quota")).longValue();
final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
Builder contentSummaryBuilder = new ContentSummary.Builder().length(length).fileCount(fileCount).directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
if (typem != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
if (type != null) {
contentSummaryBuilder = contentSummaryBuilder.typeQuota(t, ((Number) type.get("quota")).longValue()).typeConsumed(t, ((Number) type.get("consumed")).longValue());
}
}
}
return contentSummaryBuilder.build();
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class JsonUtilClient method toLocatedBlock.
/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
if (m == null) {
return null;
}
final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
final long startOffset = ((Number) m.get("startOffset")).longValue();
final boolean isCorrupt = (Boolean) m.get("isCorrupt");
final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
return locatedblock;
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class PBHelperClient method convertLocatedBlockProto.
public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
if (proto == null)
return null;
List<DatanodeInfoProto> locs = proto.getLocsList();
DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
for (int i = 0; i < locs.size(); i++) {
targets[i] = convert(locs.get(i));
}
final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
final int storageIDsCount = proto.getStorageIDsCount();
final String[] storageIDs;
if (storageIDsCount == 0) {
storageIDs = null;
} else {
Preconditions.checkState(storageIDsCount == locs.size());
storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
}
byte[] indices = null;
if (proto.hasBlockIndices()) {
indices = proto.getBlockIndices().toByteArray();
}
// Set values from the isCached list, re-using references from loc
List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
List<Boolean> isCachedList = proto.getIsCachedList();
for (int i = 0; i < isCachedList.size(); i++) {
if (isCachedList.get(i)) {
cachedLocs.add(targets[i]);
}
}
final LocatedBlock lb;
if (indices == null) {
lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
} else {
lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
List<TokenProto> tokenProtos = proto.getBlockTokensList();
Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
}
lb.setBlockToken(convert(proto.getBlockToken()));
return lb;
}
Aggregations