Search in sources :

Example 1 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class MovePlanMaker method processNamespace.

/**
 * @return whether there is still remaining migration work for the next
 * round
 */
public synchronized FileMovePlan processNamespace(Path targetPath, String destPolicy) throws IOException {
    schedulePlan = new FileMovePlan();
    String filePath = targetPath.toUri().getPath();
    schedulePlan.setFileName(filePath);
    schedulePlan.setDestStoragePolicy(destPolicy);
    HdfsFileStatus status = dfs.getFileInfo(filePath);
    if (status == null) {
        throw new IOException("File '" + filePath + "' not found!");
    }
    if (status.isDir()) {
        schedulePlan.setDir(true);
        return schedulePlan;
    }
    byte currSpId = status.getStoragePolicy();
    String currSpName = mapPolicyIdToName.get(currSpId);
    schedulePlan.setCurrStoragePolicy(currSpName);
    if (currSpName == null || !currSpName.equals(destPolicy)) {
        try {
            dfs.setStoragePolicy(filePath, destPolicy);
        } catch (IOException e) {
        }
    }
    DirectoryListing files = dfs.listPaths(filePath, HdfsFileStatus.EMPTY_NAME, true);
    HdfsFileStatus[] statuses = files.getPartialListing();
    if (statuses == null || statuses.length == 0) {
        throw new IOException("File '" + filePath + "' not found!");
    }
    if (statuses.length != 1) {
        throw new IOException("Get '" + filePath + "' file located status error.");
    }
    status = statuses[0];
    if (status.isDir()) {
        throw new IOException("Unexpected '" + filePath + "' directory located status error.");
    }
    schedulePlan.setFileId(status.getFileId());
    schedulePlan.setModificationTime(status.getModificationTime());
    schedulePlan.setDir(false);
    schedulePlan.setFileLength(status.getLen());
    processFile(targetPath.toUri().getPath(), (HdfsLocatedFileStatus) status, destPolicy);
    return schedulePlan;
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) FileMovePlan(org.smartdata.model.action.FileMovePlan)

Example 2 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestMoverExecutor method moveCrossNodes.

@Test
public // DataXceiver.replaceBlock doing this job
void moveCrossNodes() throws Exception {
    Configuration conf = smartContext.getConf();
    URI namenode = cluster.getURI();
    if (namenode == null) {
        throw new Exception("Cannot get namenode url.");
    }
    generateFile("One-block file");
    FileMovePlan plan = new FileMovePlan(namenode, fileName);
    // Schedule move of one replica to another node
    NameNodeConnector nnc = new NameNodeConnector(namenode, conf);
    HashSet<DatanodeInfo> fileNodes = new HashSet<>();
    ExtendedBlock block = null;
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName, plan)) {
        block = lb.getBlock();
        fileNodes.addAll(Arrays.asList(lb.getLocations()));
    }
    final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
    nnc.close();
    for (DatanodeStorageReport report : reports) {
        DatanodeInfo targetDatanode = report.getDatanodeInfo();
        if (!fileNodes.contains(targetDatanode)) {
            if (block == null) {
                continue;
            }
            StorageGroup source = new StorageGroup(fileNodes.iterator().next(), StorageType.DISK.toString());
            StorageGroup target = new StorageGroup(targetDatanode, StorageType.SSD.toString());
            addPlan(plan, source, target, block.getBlockId());
            break;
        }
    }
    // Do mover executor
    MoverStatus status = new MoverStatus();
    MoverExecutor moverExecutor = new MoverExecutor(status, conf, 10, 500);
    int failedMoves = moverExecutor.executeMove(plan);
    Assert.assertEquals(0, failedMoves);
    // Check storage after move
    // Thread.sleep(100000);
    int ssdNum = 0;
    int hddNum = 0;
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName)) {
        for (DatanodeInfo datanodeInfo : lb.getLocations()) {
            Assert.assertTrue(datanodeInfo instanceof DatanodeInfoWithStorage);
            StorageType storageType = ((DatanodeInfoWithStorage) datanodeInfo).getStorageType();
            if (storageType.equals(StorageType.SSD)) {
                ssdNum++;
            } else if (storageType.equals(StorageType.DISK)) {
                hddNum++;
            }
        }
    }
// Assert.assertEquals(1, ssdNum);
// Assert.assertEquals(2, hddNum);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URI(java.net.URI) FileMovePlan(org.smartdata.model.action.FileMovePlan) IOException(java.io.IOException) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestMoverExecutor method moveInSameNode.

@Test
public void moveInSameNode() throws Exception {
    Configuration conf = smartContext.getConf();
    URI namenode = cluster.getURI();
    String blockContent = "This is a block with 50B.";
    StringBuilder stringBuilder = new StringBuilder();
    for (int i = 0; i < 50; i++) {
        stringBuilder.append(blockContent);
    }
    String content = stringBuilder.toString();
    generateFile(content);
    FileMovePlan plan = new FileMovePlan(namenode, fileName);
    // Schedule move in the same node
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName, plan)) {
        ExtendedBlock block = lb.getBlock();
        for (DatanodeInfo datanodeInfo : lb.getLocations()) {
            StorageGroup source = new StorageGroup(datanodeInfo, StorageType.DISK.toString());
            StorageGroup target = new StorageGroup(datanodeInfo, StorageType.SSD.toString());
            addPlan(plan, source, target, block.getBlockId());
        }
    }
    // Do move executor
    MoverStatus status = new MoverStatus();
    MoverExecutor moverExecutor = new MoverExecutor(status, conf, 10, 3);
    int failedMoves = moverExecutor.executeMove(plan);
    Assert.assertEquals(0, failedMoves);
    cluster.triggerBlockReports();
    boolean success = true;
    for (int i = 0; i < 3; i++) {
        success = true;
        // Check storage after move
        for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName)) {
            for (DatanodeInfo datanodeInfo : lb.getLocations()) {
                StorageType realType = ((DatanodeInfoWithStorage) datanodeInfo).getStorageType();
                success = realType == StorageType.SSD && success;
            }
        }
        if (success) {
            break;
        }
        Thread.sleep(500);
    }
    if (!success) {
        Assert.fail("Not the expected storage type SSD.");
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URI(java.net.URI) FileMovePlan(org.smartdata.model.action.FileMovePlan) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) Test(org.junit.Test)

Example 4 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestMoveFileAction method moveFile.

private void moveFile(String file) throws Exception {
    // schedule move to SSD
    ArchiveFileAction action = new ArchiveFileAction();
    action.setDfsClient(dfsClient);
    action.setContext(smartContext);
    Map<String, String> args = new HashMap();
    args.put(ArchiveFileAction.FILE_PATH, file);
    FileMovePlan plan = createPlan(file, "ARCHIVE");
    args.put(MoveFileAction.MOVE_PLAN, plan.toString());
    action.init(args);
    action.run();
    Assert.assertTrue(action.getExpectedAfterRun());
}
Also used : HashMap(java.util.HashMap) ArchiveFileAction(org.smartdata.hdfs.action.ArchiveFileAction) FileMovePlan(org.smartdata.model.action.FileMovePlan)

Example 5 with FileMovePlan

use of org.smartdata.model.action.FileMovePlan in project SSM by Intel-bigdata.

the class TestMoveFileAction method createPlan.

private FileMovePlan createPlan(String dir, String storageType) throws Exception {
    URI namenode = cluster.getURI();
    FileMovePlan plan = new FileMovePlan(namenode, dir);
    // Schedule move in the same node
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, dir, plan)) {
        ExtendedBlock block = lb.getBlock();
        for (DatanodeInfo datanodeInfo : lb.getLocations()) {
            StorageGroup source = new StorageGroup(datanodeInfo, StorageType.DISK.toString());
            StorageGroup target = new StorageGroup(datanodeInfo, storageType);
            addPlan(plan, source, target, block.getBlockId());
        }
    }
    return plan;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageGroup(org.smartdata.hdfs.action.move.StorageGroup) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URI(java.net.URI) FileMovePlan(org.smartdata.model.action.FileMovePlan)

Aggregations

FileMovePlan (org.smartdata.model.action.FileMovePlan)9 Test (org.junit.Test)5 URI (java.net.URI)4 IOException (java.io.IOException)3 HashMap (java.util.HashMap)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Configuration (org.apache.hadoop.conf.Configuration)2 DatanodeInfoWithStorage (org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage)2 Gson (com.google.gson.Gson)1 HashSet (java.util.HashSet)1 Path (org.apache.hadoop.fs.Path)1 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 AllSsdFileAction (org.smartdata.hdfs.action.AllSsdFileAction)1 ArchiveFileAction (org.smartdata.hdfs.action.ArchiveFileAction)1 MoveFileAction (org.smartdata.hdfs.action.MoveFileAction)1 StorageGroup (org.smartdata.hdfs.action.move.StorageGroup)1