use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DiskBalancerTestUtil method newImbalancedCluster.
public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final String fileName = "/" + UUID.randomUUID().toString();
final Path filePath = new Path(fileName);
Preconditions.checkNotNull(storageCapacities);
Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
// Write a file and restart the cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(0);
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
cluster.restartDataNodes();
cluster.waitActive();
// Get the data node and move all data to one disk.
for (int i = 0; i < numDatanodes; i++) {
DataNode dnNode = cluster.getDataNodes().get(i);
try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
}
}
cluster.restartDataNodes();
cluster.waitActive();
return cluster;
}
use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.
the class MoverProcessor method chooseTarget.
boolean chooseTarget(Dispatcher.DBlock db, Dispatcher.Source source, List<StorageType> targetTypes, Matcher matcher) {
final NetworkTopology cluster = dispatcher.getCluster();
for (StorageType t : targetTypes) {
final List<Dispatcher.DDatanode.StorageGroup> targets = storages.getTargetStorages(t);
Collections.shuffle(targets);
for (Dispatcher.DDatanode.StorageGroup target : targets) {
if (matcher.match(cluster, source.getDatanodeInfo(), target.getDatanodeInfo())) {
final Dispatcher.PendingMove pm = source.addPendingMove(db, target);
if (pm != null) {
dispatcher.executePendingMove(pm);
return true;
}
}
}
}
return false;
}
use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.
the class TestAllSsdFileAction method testAllSsd.
@Test
public void testAllSsd() throws Exception {
final String file = "/testAllSsd/file";
Path dir = new Path("/testAllSsd");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testAllSSD");
out.close();
// schedule move to SSD
AllSsdFileAction action = new AllSsdFileAction();
action.setDfsClient(dfsClient);
action.setContext(smartContext);
action.init(file);
ActionStatus status = action.getActionStatus();
action.run();
while (!status.isFinished()) {
System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
Thread.sleep(1000);
}
// verify after movement
Assert.assertTrue(status.isSuccessful());
LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.SSD == storageType);
}
}
use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.
the class TestOneSsdFileAction method testAllSsd.
@Test
public void testAllSsd() throws Exception {
final String file = "/testArchive/file";
Path dir = new Path("/testArchive");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testArchive");
out.close();
// schedule move to Archive
OneSsdFileAction action = new OneSsdFileAction();
action.setDfsClient(dfsClient);
action.setContext(smartContext);
action.init(file);
ActionStatus status = action.getActionStatus();
action.run();
while (!status.isFinished()) {
System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
Thread.sleep(1000);
}
// verify after movement
Assert.assertTrue(status.isSuccessful());
LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
int ssdCount = 0;
int hddCount = 0;
for (StorageType storageType : storageTypes) {
if (storageType == StorageType.SSD) {
ssdCount++;
} else if (storageType == StorageType.DISK) {
hddCount++;
}
}
Assert.assertEquals(1, ssdCount);
Assert.assertEquals(2, hddCount);
}
use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.
the class MoverProcessor method scheduleMoveBlock.
boolean scheduleMoveBlock(StorageTypeDiff diff, LocatedBlock lb) {
final List<MLocation> locations = MLocation.toLocations(lb);
Collections.shuffle(locations);
final Dispatcher.DBlock db = newDBlock(lb, locations);
for (final StorageType t : diff.existing) {
for (final MLocation ml : locations) {
final Dispatcher.Source source = storages.getSource(ml);
if (ml.storageType == t && source != null) {
// try to schedule one replica move.
if (scheduleMoveReplica(db, source, diff.expected)) {
return true;
}
}
}
}
return false;
}
Aggregations