Search in sources :

Example 6 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method testListLocatedStatus.

@Test
public void testListLocatedStatus() throws Exception {
    // if (!loadedNative()) {
    // return;
    // }
    waitTillSSMExitSafeMode();
    // initDB();
    SmartFileSystem smartDfs = new SmartFileSystem();
    smartDfs.initialize(dfs.getUri(), ssm.getContext().getConf());
    int arraySize = 1024 * 1024 * 8;
    String fileName = "/ssm/compression/file4";
    byte[] bytes = prepareFile(fileName, arraySize);
    // For uncompressed file, SmartFileSystem and DistributedFileSystem behave exactly the same
    RemoteIterator<LocatedFileStatus> iter1 = dfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat1 = iter1.next();
    RemoteIterator<LocatedFileStatus> iter2 = smartDfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat2 = iter2.next();
    Assert.assertEquals(stat1.getPath(), stat2.getPath());
    Assert.assertEquals(stat1.getBlockSize(), stat2.getBlockSize());
    Assert.assertEquals(stat1.getLen(), stat2.getLen());
    BlockLocation[] blockLocations1 = stat1.getBlockLocations();
    BlockLocation[] blockLocations2 = stat2.getBlockLocations();
    Assert.assertEquals(blockLocations1.length, blockLocations2.length);
    for (int i = 0; i < blockLocations1.length; i++) {
        Assert.assertEquals(blockLocations1[i].getLength(), blockLocations2[i].getLength());
        Assert.assertEquals(blockLocations1[i].getOffset(), blockLocations2[i].getOffset());
    }
    // Test compressed file
    int bufSize = 1024 * 1024;
    CmdletManager cmdletManager = ssm.getCmdletManager();
    long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
    waitTillActionDone(cmdId);
    RemoteIterator<LocatedFileStatus> iter3 = dfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat3 = iter3.next();
    BlockLocation[] blockLocations3 = stat3.getBlockLocations();
    RemoteIterator<LocatedFileStatus> iter4 = smartDfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat4 = iter4.next();
    BlockLocation[] blockLocations4 = stat4.getBlockLocations();
    Assert.assertEquals(stat1.getPath(), stat4.getPath());
    Assert.assertEquals(stat1.getBlockSize(), stat4.getBlockSize());
    Assert.assertEquals(stat1.getLen(), stat4.getLen());
}
Also used : Path(org.apache.hadoop.fs.Path) SmartFileSystem(org.smartdata.hadoop.filesystem.SmartFileSystem) CmdletManager(org.smartdata.server.engine.CmdletManager) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 7 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestMoverScheduler method testScheduler.

@Test(timeout = 40000)
public void testScheduler() throws Exception {
    waitTillSSMExitSafeMode();
    String file = "/testfile";
    Path filePath = new Path(file);
    int numBlocks = 2;
    DistributedFileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, numBlocks * DEFAULT_BLOCK_SIZE, (short) 3, 100);
    fs.setStoragePolicy(filePath, "ALL_SSD");
    CmdletManager cmdletManager = ssm.getCmdletManager();
    long cmdId = cmdletManager.submitCmdlet("allssd -file /testfile");
    while (true) {
        Thread.sleep(1000);
        CmdletState state = cmdletManager.getCmdletInfo(cmdId).getState();
        if (state == CmdletState.DONE) {
            return;
        } else if (state == CmdletState.FAILED) {
            Assert.fail("Mover failed.");
        } else {
            System.out.println(state);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CmdletManager(org.smartdata.server.engine.CmdletManager) CmdletState(org.smartdata.model.CmdletState) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 8 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class SmartEngine method init.

@Override
public void init() throws IOException {
    statesMgr = new StatesManager(serverContext);
    services.add(statesMgr);
    cmdletManager = new CmdletManager(serverContext);
    services.add(cmdletManager);
    agentService = new AgentExecutorService(conf, cmdletManager);
    hazelcastService = new HazelcastExecutorService(cmdletManager);
    cmdletManager.registerExecutorService(agentService);
    cmdletManager.registerExecutorService(hazelcastService);
    ruleMgr = new RuleManager(serverContext, statesMgr, cmdletManager);
    services.add(ruleMgr);
    for (AbstractService s : services) {
        s.init();
    }
}
Also used : CmdletManager(org.smartdata.server.engine.CmdletManager) RuleManager(org.smartdata.server.engine.RuleManager) StatesManager(org.smartdata.server.engine.StatesManager) HazelcastExecutorService(org.smartdata.server.engine.cmdlet.HazelcastExecutorService) AbstractService(org.smartdata.AbstractService) AgentExecutorService(org.smartdata.server.engine.cmdlet.agent.AgentExecutorService)

Example 9 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCacheScheduler method testCacheUncacheFile.

@Test(timeout = 100000)
public void testCacheUncacheFile() throws Exception {
    waitTillSSMExitSafeMode();
    String filePath = new String("/testFile");
    FSDataOutputStream out = dfs.create(new Path(filePath));
    out.writeChars("test content");
    out.close();
    CmdletManager cmdletManager = ssm.getCmdletManager();
    long cid = cmdletManager.submitCmdlet("cache -file " + filePath);
    while (true) {
        if (cmdletManager.getCmdletInfo(cid).getState().equals(CmdletState.DONE)) {
            break;
        }
    }
    RemoteIterator<CachePoolEntry> poolEntries = dfsClient.listCachePools();
    while (poolEntries.hasNext()) {
        CachePoolEntry poolEntry = poolEntries.next();
        if (poolEntry.getInfo().getPoolName().equals(CacheScheduler.SSM_POOL)) {
            return;
        }
        fail("A cache pool should be created by SSM: " + CacheScheduler.SSM_POOL);
    }
    // Currently, there is only one scheduler for cache action
    ActionScheduler actionScheduler = cmdletManager.getSchedulers("cache").get(0);
    assertTrue(actionScheduler instanceof CacheScheduler);
    Set<String> fileLock = ((CacheScheduler) actionScheduler).getFileLock();
    // There is no file locked after the action is finished.
    assertTrue(fileLock.isEmpty());
    long cid1 = cmdletManager.submitCmdlet("uncache -file " + filePath);
    while (true) {
        if (cmdletManager.getCmdletInfo(cid1).getState().equals(CmdletState.DONE)) {
            break;
        }
    }
    // There is no file locked after the action is finished.
    assertTrue(fileLock.isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) CmdletManager(org.smartdata.server.engine.CmdletManager) ActionScheduler(org.smartdata.model.action.ActionScheduler) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) CacheScheduler(org.smartdata.hdfs.scheduler.CacheScheduler) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 10 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method testSubmitCompressionAction.

@Test
public void testSubmitCompressionAction() throws Exception {
    // if (!loadedNative()) {
    // return;
    // }
    waitTillSSMExitSafeMode();
    // initDB();
    int arraySize = 1024 * 1024 * 80;
    String fileName = "/ssm/compression/file1";
    byte[] bytes = prepareFile(fileName, arraySize);
    MetaStore metaStore = ssm.getMetaStore();
    int bufSize = 1024 * 1024 * 10;
    CmdletManager cmdletManager = ssm.getCmdletManager();
    long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
    waitTillActionDone(cmdId);
    FileState fileState = null;
    // metastore  test
    int n = 0;
    while (true) {
        fileState = metaStore.getFileState(fileName);
        if (FileState.FileType.COMPRESSION.equals(fileState.getFileType())) {
            break;
        }
        Thread.sleep(1000);
        if (n++ >= 20) {
            throw new Exception("Time out in waiting for getting expect file state.");
        }
    }
    Assert.assertEquals(FileState.FileStage.DONE, fileState.getFileStage());
    Assert.assertTrue(fileState instanceof CompressionFileState);
    CompressionFileState compressionFileState = (CompressionFileState) fileState;
    Assert.assertEquals(fileName, compressionFileState.getPath());
    Assert.assertEquals(bufSize, compressionFileState.getBufferSize());
    Assert.assertEquals(codec, compressionFileState.getCompressionImpl());
    Assert.assertEquals(arraySize, compressionFileState.getOriginalLength());
    Assert.assertTrue(compressionFileState.getCompressedLength() > 0);
    Assert.assertTrue(compressionFileState.getCompressedLength() < compressionFileState.getOriginalLength());
    // data accuracy test
    byte[] input = new byte[arraySize];
    DFSInputStream dfsInputStream = smartDFSClient.open(fileName);
    int offset = 0;
    while (true) {
        int len = dfsInputStream.read(input, offset, arraySize - offset);
        if (len <= 0) {
            break;
        }
        offset += len;
    }
    Assert.assertArrayEquals("original array not equals compress/decompressed array", input, bytes);
}
Also used : MetaStore(org.smartdata.metastore.MetaStore) FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) CmdletManager(org.smartdata.server.engine.CmdletManager) CompressionFileState(org.smartdata.model.CompressionFileState) DFSInputStream(org.apache.hadoop.hdfs.DFSInputStream) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

CmdletManager (org.smartdata.server.engine.CmdletManager)13 Test (org.junit.Test)11 Path (org.apache.hadoop.fs.Path)5 IOException (java.io.IOException)3 CmdletState (org.smartdata.model.CmdletState)3 ActionScheduler (org.smartdata.model.action.ActionScheduler)3 DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 SmartDFSClient (org.smartdata.hdfs.client.SmartDFSClient)2 CompressionScheduler (org.smartdata.hdfs.scheduler.CompressionScheduler)2 CompressionFileState (org.smartdata.model.CompressionFileState)2 FileState (org.smartdata.model.FileState)2 Random (java.util.Random)1 BlockLocation (org.apache.hadoop.fs.BlockLocation)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)1 AbstractService (org.smartdata.AbstractService)1 SmartFileSystem (org.smartdata.hadoop.filesystem.SmartFileSystem)1