use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.
the class TestCompressDecompress method testListLocatedStatus.
@Test
public void testListLocatedStatus() throws Exception {
// if (!loadedNative()) {
// return;
// }
waitTillSSMExitSafeMode();
// initDB();
SmartFileSystem smartDfs = new SmartFileSystem();
smartDfs.initialize(dfs.getUri(), ssm.getContext().getConf());
int arraySize = 1024 * 1024 * 8;
String fileName = "/ssm/compression/file4";
byte[] bytes = prepareFile(fileName, arraySize);
// For uncompressed file, SmartFileSystem and DistributedFileSystem behave exactly the same
RemoteIterator<LocatedFileStatus> iter1 = dfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat1 = iter1.next();
RemoteIterator<LocatedFileStatus> iter2 = smartDfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat2 = iter2.next();
Assert.assertEquals(stat1.getPath(), stat2.getPath());
Assert.assertEquals(stat1.getBlockSize(), stat2.getBlockSize());
Assert.assertEquals(stat1.getLen(), stat2.getLen());
BlockLocation[] blockLocations1 = stat1.getBlockLocations();
BlockLocation[] blockLocations2 = stat2.getBlockLocations();
Assert.assertEquals(blockLocations1.length, blockLocations2.length);
for (int i = 0; i < blockLocations1.length; i++) {
Assert.assertEquals(blockLocations1[i].getLength(), blockLocations2[i].getLength());
Assert.assertEquals(blockLocations1[i].getOffset(), blockLocations2[i].getOffset());
}
// Test compressed file
int bufSize = 1024 * 1024;
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
RemoteIterator<LocatedFileStatus> iter3 = dfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat3 = iter3.next();
BlockLocation[] blockLocations3 = stat3.getBlockLocations();
RemoteIterator<LocatedFileStatus> iter4 = smartDfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat4 = iter4.next();
BlockLocation[] blockLocations4 = stat4.getBlockLocations();
Assert.assertEquals(stat1.getPath(), stat4.getPath());
Assert.assertEquals(stat1.getBlockSize(), stat4.getBlockSize());
Assert.assertEquals(stat1.getLen(), stat4.getLen());
}
use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.
the class TestMoverScheduler method testScheduler.
@Test(timeout = 40000)
public void testScheduler() throws Exception {
waitTillSSMExitSafeMode();
String file = "/testfile";
Path filePath = new Path(file);
int numBlocks = 2;
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, numBlocks * DEFAULT_BLOCK_SIZE, (short) 3, 100);
fs.setStoragePolicy(filePath, "ALL_SSD");
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("allssd -file /testfile");
while (true) {
Thread.sleep(1000);
CmdletState state = cmdletManager.getCmdletInfo(cmdId).getState();
if (state == CmdletState.DONE) {
return;
} else if (state == CmdletState.FAILED) {
Assert.fail("Mover failed.");
} else {
System.out.println(state);
}
}
}
use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.
the class SmartEngine method init.
@Override
public void init() throws IOException {
statesMgr = new StatesManager(serverContext);
services.add(statesMgr);
cmdletManager = new CmdletManager(serverContext);
services.add(cmdletManager);
agentService = new AgentExecutorService(conf, cmdletManager);
hazelcastService = new HazelcastExecutorService(cmdletManager);
cmdletManager.registerExecutorService(agentService);
cmdletManager.registerExecutorService(hazelcastService);
ruleMgr = new RuleManager(serverContext, statesMgr, cmdletManager);
services.add(ruleMgr);
for (AbstractService s : services) {
s.init();
}
}
use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.
the class TestCacheScheduler method testCacheUncacheFile.
@Test(timeout = 100000)
public void testCacheUncacheFile() throws Exception {
waitTillSSMExitSafeMode();
String filePath = new String("/testFile");
FSDataOutputStream out = dfs.create(new Path(filePath));
out.writeChars("test content");
out.close();
CmdletManager cmdletManager = ssm.getCmdletManager();
long cid = cmdletManager.submitCmdlet("cache -file " + filePath);
while (true) {
if (cmdletManager.getCmdletInfo(cid).getState().equals(CmdletState.DONE)) {
break;
}
}
RemoteIterator<CachePoolEntry> poolEntries = dfsClient.listCachePools();
while (poolEntries.hasNext()) {
CachePoolEntry poolEntry = poolEntries.next();
if (poolEntry.getInfo().getPoolName().equals(CacheScheduler.SSM_POOL)) {
return;
}
fail("A cache pool should be created by SSM: " + CacheScheduler.SSM_POOL);
}
// Currently, there is only one scheduler for cache action
ActionScheduler actionScheduler = cmdletManager.getSchedulers("cache").get(0);
assertTrue(actionScheduler instanceof CacheScheduler);
Set<String> fileLock = ((CacheScheduler) actionScheduler).getFileLock();
// There is no file locked after the action is finished.
assertTrue(fileLock.isEmpty());
long cid1 = cmdletManager.submitCmdlet("uncache -file " + filePath);
while (true) {
if (cmdletManager.getCmdletInfo(cid1).getState().equals(CmdletState.DONE)) {
break;
}
}
// There is no file locked after the action is finished.
assertTrue(fileLock.isEmpty());
}
use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.
the class TestCompressDecompress method testSubmitCompressionAction.
@Test
public void testSubmitCompressionAction() throws Exception {
// if (!loadedNative()) {
// return;
// }
waitTillSSMExitSafeMode();
// initDB();
int arraySize = 1024 * 1024 * 80;
String fileName = "/ssm/compression/file1";
byte[] bytes = prepareFile(fileName, arraySize);
MetaStore metaStore = ssm.getMetaStore();
int bufSize = 1024 * 1024 * 10;
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
FileState fileState = null;
// metastore test
int n = 0;
while (true) {
fileState = metaStore.getFileState(fileName);
if (FileState.FileType.COMPRESSION.equals(fileState.getFileType())) {
break;
}
Thread.sleep(1000);
if (n++ >= 20) {
throw new Exception("Time out in waiting for getting expect file state.");
}
}
Assert.assertEquals(FileState.FileStage.DONE, fileState.getFileStage());
Assert.assertTrue(fileState instanceof CompressionFileState);
CompressionFileState compressionFileState = (CompressionFileState) fileState;
Assert.assertEquals(fileName, compressionFileState.getPath());
Assert.assertEquals(bufSize, compressionFileState.getBufferSize());
Assert.assertEquals(codec, compressionFileState.getCompressionImpl());
Assert.assertEquals(arraySize, compressionFileState.getOriginalLength());
Assert.assertTrue(compressionFileState.getCompressedLength() > 0);
Assert.assertTrue(compressionFileState.getCompressedLength() < compressionFileState.getOriginalLength());
// data accuracy test
byte[] input = new byte[arraySize];
DFSInputStream dfsInputStream = smartDFSClient.open(fileName);
int offset = 0;
while (true) {
int len = dfsInputStream.read(input, offset, arraySize - offset);
if (len <= 0) {
break;
}
offset += len;
}
Assert.assertArrayEquals("original array not equals compress/decompressed array", input, bytes);
}
Aggregations