use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.
the class TestMoveFileAction method testParallelMove.
@Test(timeout = 300000)
public void testParallelMove() throws Exception {
String dir = "/test";
String file1 = "/test/file1";
String file2 = "/test/file2";
dfs.mkdirs(new Path(dir));
// write to DISK
dfs.setStoragePolicy(new Path(dir), "HOT");
FSDataOutputStream out1 = dfs.create(new Path(file1));
final String str1 = "testtesttest1";
out1.writeChars(str1);
out1.close();
FSDataOutputStream out2 = dfs.create(new Path(file2));
final String str2 = "testtesttest2";
out2.writeChars(str2);
out2.close();
// move to SSD
AllSsdFileAction moveFileAction1 = new AllSsdFileAction();
moveFileAction1.setDfsClient(dfsClient);
moveFileAction1.setContext(smartContext);
Map<String, String> args1 = new HashMap();
args1.put(MoveFileAction.FILE_PATH, dir);
FileMovePlan plan1 = createPlan(file1, "SSD");
args1.put(MoveFileAction.MOVE_PLAN, plan1.toString());
AllSsdFileAction moveFileAction2 = new AllSsdFileAction();
moveFileAction2.setDfsClient(dfsClient);
moveFileAction2.setContext(smartContext);
Map<String, String> args2 = new HashMap();
args2.put(MoveFileAction.FILE_PATH, dir);
FileMovePlan plan2 = createPlan(file2, "SSD");
args2.put(MoveFileAction.MOVE_PLAN, plan2.toString());
// init and run
moveFileAction1.init(args1);
moveFileAction2.init(args2);
moveFileAction1.run();
Assert.assertTrue(moveFileAction1.getExpectedAfterRun());
moveFileAction2.run();
Assert.assertTrue(moveFileAction2.getExpectedAfterRun());
}
use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.
the class TestMoveFileAction method testMove.
@Test(timeout = 300000)
public void testMove() throws Exception {
String dir = "/test";
String file = "/test/file";
dfs.mkdirs(new Path(dir));
// write to DISK
dfs.setStoragePolicy(new Path(dir), "HOT");
FSDataOutputStream out = dfs.create(new Path(file));
final String str = "testtesttest";
out.writeChars(str);
// move to SSD
MoveFileAction moveFileAction = new MoveFileAction();
moveFileAction.setDfsClient(dfsClient);
moveFileAction.setContext(smartContext);
Map<String, String> args = new HashMap();
args.put(MoveFileAction.FILE_PATH, dir);
String storageType = "ONE_SSD";
args.put(MoveFileAction.STORAGE_POLICY, storageType);
FileMovePlan plan = createPlan(file, storageType);
args.put(MoveFileAction.MOVE_PLAN, plan.toString());
// init and run
moveFileAction.init(args);
moveFileAction.run();
Assert.assertTrue(moveFileAction.getExpectedAfterRun());
}
use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.
the class MoverScheduler method onSchedule.
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
if (!actions.contains(action.getActionType())) {
return ScheduleResult.SUCCESS;
}
String file = action.getArgs().get(HdfsAction.FILE_PATH);
if (file == null) {
actionInfo.appendLog("File path not specified!\n");
return ScheduleResult.FAIL;
}
String policy = null;
switch(action.getActionType()) {
case "allssd":
policy = "ALL_SSD";
break;
case "onessd":
policy = "ONE_SSD";
break;
case "archive":
policy = "COLD";
break;
case "alldisk":
policy = "HOT";
break;
case "onedisk":
policy = "WARM";
break;
case "ramdisk":
policy = "LAZY_PERSIST";
break;
}
try {
FileMovePlan plan = planMaker.processNamespace(new Path(file), policy);
if (rateLimiter != null) {
// Two possible understandings here: file level and replica level
int len = (int) (plan.getFileLengthToMove() >> 20);
if (len > 0) {
if (!rateLimiter.tryAcquire(len)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cancel Scheduling action {} due to throttling. {}", actionInfo, plan);
}
return ScheduleResult.RETRY;
}
}
}
plan.setNamenode(nnUri);
action.getArgs().put(MoveFileAction.MOVE_PLAN, plan.toString());
fileLock.add(action.getArgs().get(HdfsAction.FILE_PATH));
return ScheduleResult.SUCCESS;
} catch (IOException e) {
actionInfo.appendLogLine(e.getMessage());
LOG.error("Exception while processing " + action, e);
return ScheduleResult.FAIL;
} catch (Throwable t) {
actionInfo.appendLogLine(t.getMessage());
LOG.error("Unexpected exception when scheduling move " + policy + " '" + file + "'.", t);
return ScheduleResult.FAIL;
}
}
use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.
the class TestSchedulePlan method testJsonConvertion.
@Test
public void testJsonConvertion() throws Exception {
URI nn = new URI("hdfs://localhost:8888");
String file = "/test/foofile";
FileMovePlan plan = new FileMovePlan(nn, file);
plan.addPlan(1L, UUID.randomUUID().toString(), "ARCHIVE", "127.0.0.1", 10001, "SSD");
plan.addPlan(2L, UUID.randomUUID().toString(), "ARCHIVE", "127.0.0.1", 10002, "SSD");
Gson gson = new Gson();
String jsonPlan = gson.toJson(plan);
FileMovePlan plan2 = gson.fromJson(jsonPlan, FileMovePlan.class);
Assert.assertEquals(plan.getFileName(), plan2.getFileName());
Assert.assertEquals(plan.getBlockIds(), plan2.getBlockIds());
}
Aggregations