use of org.smartdata.hdfs.scheduler.CompressionScheduler in project SSM by Intel-bigdata.
the class TestCompressDecompress method testDecompress.
@Test
public void testDecompress() throws Exception {
int arraySize = 1024 * 1024 * 8;
String filePath = "/ssm/compression/file4";
prepareFile(filePath, arraySize);
dfsClient.setStoragePolicy(filePath, "COLD");
HdfsFileStatus fileStatusBefore = dfsClient.getFileInfo(filePath);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Expect that a common file cannot be decompressed.
List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
Assert.assertTrue(schedulers.size() == 1);
ActionScheduler scheduler = schedulers.get(0);
Assert.assertTrue(scheduler instanceof CompressionScheduler);
Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(filePath));
// Compress the given file
long cmdId = cmdletManager.submitCmdlet("compress -file " + filePath + " -codec " + codec);
waitTillActionDone(cmdId);
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertTrue(fileState instanceof CompressionFileState);
// The storage policy should not be changed
HdfsFileStatus fileStatusAfterCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterCompress.getStoragePolicy());
}
// Try to decompress a compressed file
cmdId = cmdletManager.submitCmdlet("decompress -file " + filePath);
waitTillActionDone(cmdId);
fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertFalse(fileState instanceof CompressionFileState);
// The storage policy should not be changed.
HdfsFileStatus fileStatusAfterDeCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterDeCompress.getStoragePolicy());
}
}
use of org.smartdata.hdfs.scheduler.CompressionScheduler in project SSM by Intel-bigdata.
the class TestCompressDecompress method testCompressDecompressDir.
@Test
public void testCompressDecompressDir() throws Exception {
String dir = "/ssm/compression";
dfsClient.mkdirs(dir, null, true);
CmdletManager cmdletManager = ssm.getCmdletManager();
List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
Assert.assertTrue(schedulers.size() == 1);
ActionScheduler scheduler = schedulers.get(0);
Assert.assertTrue(scheduler instanceof CompressionScheduler);
// Expect that a dir cannot be compressed.
Assert.assertFalse(((CompressionScheduler) scheduler).supportCompression(dir));
// Expect that a dir cannot be decompressed.
Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(dir));
}
Aggregations