Search in sources :

Example 6 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestMoveFileAction method testParallelMove.

@Test(timeout = 300000)
public void testParallelMove() throws Exception {
    String dir = "/test";
    String file1 = "/test/file1";
    String file2 = "/test/file2";
    dfs.mkdirs(new Path(dir));
    // write to DISK
    dfs.setStoragePolicy(new Path(dir), "HOT");
    FSDataOutputStream out1 = dfs.create(new Path(file1));
    final String str1 = "testtesttest1";
    out1.writeChars(str1);
    out1.close();
    FSDataOutputStream out2 = dfs.create(new Path(file2));
    final String str2 = "testtesttest2";
    out2.writeChars(str2);
    out2.close();
    // move to SSD
    AllSsdFileAction moveFileAction1 = new AllSsdFileAction();
    moveFileAction1.setDfsClient(dfsClient);
    moveFileAction1.setContext(smartContext);
    Map<String, String> args1 = new HashMap();
    args1.put(MoveFileAction.FILE_PATH, dir);
    FileMovePlan plan1 = createPlan(file1, "SSD");
    args1.put(MoveFileAction.MOVE_PLAN, plan1.toString());
    AllSsdFileAction moveFileAction2 = new AllSsdFileAction();
    moveFileAction2.setDfsClient(dfsClient);
    moveFileAction2.setContext(smartContext);
    Map<String, String> args2 = new HashMap();
    args2.put(MoveFileAction.FILE_PATH, dir);
    FileMovePlan plan2 = createPlan(file2, "SSD");
    args2.put(MoveFileAction.MOVE_PLAN, plan2.toString());
    // init and run
    moveFileAction1.init(args1);
    moveFileAction2.init(args2);
    moveFileAction1.run();
    Assert.assertTrue(moveFileAction1.getExpectedAfterRun());
    moveFileAction2.run();
    Assert.assertTrue(moveFileAction2.getExpectedAfterRun());
}
Also used : HashMap(java.util.HashMap) AllSsdFileAction(org.smartdata.hdfs.action.AllSsdFileAction) FileMovePlan(org.smartdata.model.action.FileMovePlan) Test(org.junit.Test)

Example 7 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestMoveFileAction method testMove.

@Test(timeout = 300000)
public void testMove() throws Exception {
    String dir = "/test";
    String file = "/test/file";
    dfs.mkdirs(new Path(dir));
    // write to DISK
    dfs.setStoragePolicy(new Path(dir), "HOT");
    FSDataOutputStream out = dfs.create(new Path(file));
    final String str = "testtesttest";
    out.writeChars(str);
    // move to SSD
    MoveFileAction moveFileAction = new MoveFileAction();
    moveFileAction.setDfsClient(dfsClient);
    moveFileAction.setContext(smartContext);
    Map<String, String> args = new HashMap();
    args.put(MoveFileAction.FILE_PATH, dir);
    String storageType = "ONE_SSD";
    args.put(MoveFileAction.STORAGE_POLICY, storageType);
    FileMovePlan plan = createPlan(file, storageType);
    args.put(MoveFileAction.MOVE_PLAN, plan.toString());
    // init and run
    moveFileAction.init(args);
    moveFileAction.run();
    Assert.assertTrue(moveFileAction.getExpectedAfterRun());
}
Also used : MoveFileAction(org.smartdata.hdfs.action.MoveFileAction) HashMap(java.util.HashMap) FileMovePlan(org.smartdata.model.action.FileMovePlan) Test(org.junit.Test)

Example 8 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class MoverScheduler method onSchedule.

@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
    if (!actions.contains(action.getActionType())) {
        return ScheduleResult.SUCCESS;
    }
    String file = action.getArgs().get(HdfsAction.FILE_PATH);
    if (file == null) {
        actionInfo.appendLog("File path not specified!\n");
        return ScheduleResult.FAIL;
    }
    String policy = null;
    switch(action.getActionType()) {
        case "allssd":
            policy = "ALL_SSD";
            break;
        case "onessd":
            policy = "ONE_SSD";
            break;
        case "archive":
            policy = "COLD";
            break;
        case "alldisk":
            policy = "HOT";
            break;
        case "onedisk":
            policy = "WARM";
            break;
        case "ramdisk":
            policy = "LAZY_PERSIST";
            break;
    }
    try {
        FileMovePlan plan = planMaker.processNamespace(new Path(file), policy);
        if (rateLimiter != null) {
            // Two possible understandings here: file level and replica level
            int len = (int) (plan.getFileLengthToMove() >> 20);
            if (len > 0) {
                if (!rateLimiter.tryAcquire(len)) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Cancel Scheduling action {} due to throttling. {}", actionInfo, plan);
                    }
                    return ScheduleResult.RETRY;
                }
            }
        }
        plan.setNamenode(nnUri);
        action.getArgs().put(MoveFileAction.MOVE_PLAN, plan.toString());
        fileLock.add(action.getArgs().get(HdfsAction.FILE_PATH));
        return ScheduleResult.SUCCESS;
    } catch (IOException e) {
        actionInfo.appendLogLine(e.getMessage());
        LOG.error("Exception while processing " + action, e);
        return ScheduleResult.FAIL;
    } catch (Throwable t) {
        actionInfo.appendLogLine(t.getMessage());
        LOG.error("Unexpected exception when scheduling move " + policy + " '" + file + "'.", t);
        return ScheduleResult.FAIL;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) FileMovePlan(org.smartdata.model.action.FileMovePlan)

Example 9 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestSchedulePlan method testJsonConvertion.

@Test
public void testJsonConvertion() throws Exception {
    URI nn = new URI("hdfs://localhost:8888");
    String file = "/test/foofile";
    FileMovePlan plan = new FileMovePlan(nn, file);
    plan.addPlan(1L, UUID.randomUUID().toString(), "ARCHIVE", "127.0.0.1", 10001, "SSD");
    plan.addPlan(2L, UUID.randomUUID().toString(), "ARCHIVE", "127.0.0.1", 10002, "SSD");
    Gson gson = new Gson();
    String jsonPlan = gson.toJson(plan);
    FileMovePlan plan2 = gson.fromJson(jsonPlan, FileMovePlan.class);
    Assert.assertEquals(plan.getFileName(), plan2.getFileName());
    Assert.assertEquals(plan.getBlockIds(), plan2.getBlockIds());
}
Also used : Gson(com.google.gson.Gson) URI(java.net.URI) FileMovePlan(org.smartdata.model.action.FileMovePlan) Test(org.junit.Test)

Aggregations

FileMovePlan (org.smartdata.model.action.FileMovePlan)9 Test (org.junit.Test)5 URI (java.net.URI)4 IOException (java.io.IOException)3 HashMap (java.util.HashMap)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Configuration (org.apache.hadoop.conf.Configuration)2 DatanodeInfoWithStorage (org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage)2 Gson (com.google.gson.Gson)1 HashSet (java.util.HashSet)1 Path (org.apache.hadoop.fs.Path)1 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 AllSsdFileAction (org.smartdata.hdfs.action.AllSsdFileAction)1 ArchiveFileAction (org.smartdata.hdfs.action.ArchiveFileAction)1 MoveFileAction (org.smartdata.hdfs.action.MoveFileAction)1 StorageGroup (org.smartdata.hdfs.action.move.StorageGroup)1