Search in sources :

Example 1 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method testUnsupportedMethod.

@Test
public void testUnsupportedMethod() throws Exception {
    // Concat, truncate and append are not supported
    // Create raw file
    Path path = new Path("/test/compress_files/");
    dfs.mkdirs(path);
    int rawLength = 1024 * 1024 * 8;
    String fileName = "/test/compress_files/file_0";
    DFSTestUtil.createFile(dfs, new Path(fileName), rawLength, (short) 1, 1);
    int bufSize = 1024 * 1024;
    waitTillSSMExitSafeMode();
    CmdletManager cmdletManager = ssm.getCmdletManager();
    // Compress files
    long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
    waitTillActionDone(cmdId);
    SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
    // Test unsupported methods on compressed file
    try {
        smartDFSClient.concat(fileName + "target", new String[] { fileName });
    } catch (IOException e) {
        Assert.assertTrue(e.getMessage().contains("Compressed"));
    }
/*try {
      smartDFSClient.truncate(fileName, 100L);
    } catch (IOException e) {
      Assert.assertTrue(e.getMessage().contains("Compressed"));
    }*/
}
Also used : Path(org.apache.hadoop.fs.Path) CmdletManager(org.smartdata.server.engine.CmdletManager) IOException(java.io.IOException) SmartDFSClient(org.smartdata.hdfs.client.SmartDFSClient) Test(org.junit.Test)

Example 2 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method waitTillActionDone.

private void waitTillActionDone(long cmdId) throws Exception {
    int n = 0;
    while (true) {
        Thread.sleep(1000);
        CmdletManager cmdletManager = ssm.getCmdletManager();
        CmdletInfo info = cmdletManager.getCmdletInfo(cmdId);
        if (info == null) {
            continue;
        }
        CmdletState state = info.getState();
        if (state == CmdletState.DONE) {
            return;
        } else if (state == CmdletState.FAILED) {
            // Reasonably assume that there is only one action wrapped by a given cmdlet.
            long aid = cmdletManager.getCmdletInfo(cmdId).getAids().get(0);
            Assert.fail("Action failed. " + cmdletManager.getActionInfo(aid).getLog());
        } else {
            System.out.println(state);
        }
        // Wait for 20s.
        if (++n == 20) {
            throw new Exception("Time out in waiting for cmdlet: " + cmdletManager.getCmdletInfo(cmdId).toString());
        }
    }
}
Also used : CmdletManager(org.smartdata.server.engine.CmdletManager) CmdletState(org.smartdata.model.CmdletState) CmdletInfo(org.smartdata.model.CmdletInfo) IOException(java.io.IOException)

Example 3 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method testDecompress.

@Test
public void testDecompress() throws Exception {
    int arraySize = 1024 * 1024 * 8;
    String filePath = "/ssm/compression/file4";
    prepareFile(filePath, arraySize);
    dfsClient.setStoragePolicy(filePath, "COLD");
    HdfsFileStatus fileStatusBefore = dfsClient.getFileInfo(filePath);
    CmdletManager cmdletManager = ssm.getCmdletManager();
    // Expect that a common file cannot be decompressed.
    List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
    Assert.assertTrue(schedulers.size() == 1);
    ActionScheduler scheduler = schedulers.get(0);
    Assert.assertTrue(scheduler instanceof CompressionScheduler);
    Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(filePath));
    // Compress the given file
    long cmdId = cmdletManager.submitCmdlet("compress -file " + filePath + " -codec " + codec);
    waitTillActionDone(cmdId);
    FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
    Assert.assertTrue(fileState instanceof CompressionFileState);
    // The storage policy should not be changed
    HdfsFileStatus fileStatusAfterCompress = dfsClient.getFileInfo(filePath);
    if (fileStatusBefore.getStoragePolicy() != 0) {
        // To make sure the consistency of storage policy
        Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterCompress.getStoragePolicy());
    }
    // Try to decompress a compressed file
    cmdId = cmdletManager.submitCmdlet("decompress -file " + filePath);
    waitTillActionDone(cmdId);
    fileState = HadoopUtil.getFileState(dfsClient, filePath);
    Assert.assertFalse(fileState instanceof CompressionFileState);
    // The storage policy should not be changed.
    HdfsFileStatus fileStatusAfterDeCompress = dfsClient.getFileInfo(filePath);
    if (fileStatusBefore.getStoragePolicy() != 0) {
        // To make sure the consistency of storage policy
        Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterDeCompress.getStoragePolicy());
    }
}
Also used : FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) CmdletManager(org.smartdata.server.engine.CmdletManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ActionScheduler(org.smartdata.model.action.ActionScheduler) CompressionFileState(org.smartdata.model.CompressionFileState) CompressionScheduler(org.smartdata.hdfs.scheduler.CompressionScheduler) Test(org.junit.Test)

Example 4 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method testRename.

@Test
public void testRename() throws Exception {
    // Create raw file
    Path path = new Path("/test/compress_files/");
    dfs.mkdirs(path);
    int rawLength = 1024 * 1024 * 8;
    String fileName = "/test/compress_files/file_0";
    DFSTestUtil.createFile(dfs, new Path(fileName), rawLength, (short) 1, 1);
    int bufSize = 1024 * 1024;
    waitTillSSMExitSafeMode();
    CmdletManager cmdletManager = ssm.getCmdletManager();
    // Compress files
    long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
    waitTillActionDone(cmdId);
    SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
    smartDFSClient.rename("/test/compress_files/file_0", "/test/compress_files/file_4");
    Assert.assertTrue(smartDFSClient.exists("/test/compress_files/file_4"));
    HdfsFileStatus fileStatus = smartDFSClient.getFileInfo("/test/compress_files/file_4");
    Assert.assertEquals(rawLength, fileStatus.getLen());
}
Also used : Path(org.apache.hadoop.fs.Path) CmdletManager(org.smartdata.server.engine.CmdletManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) SmartDFSClient(org.smartdata.hdfs.client.SmartDFSClient) Test(org.junit.Test)

Example 5 with CmdletManager

use of org.smartdata.server.engine.CmdletManager in project SSM by Intel-bigdata.

the class TestCompressDecompress method testCompressedFileRandomRead.

// @Test(timeout = 90000)
// public void testCompressEmptyFile() throws Exception {
// waitTillSSMExitSafeMode();
// 
// // initDB();
// String fileName = "/ssm/compression/file2";
// prepareFile(fileName, 0);
// MetaStore metaStore = ssm.getMetaStore();
// 
// int bufSize = 1024 * 1024;
// CmdletManager cmdletManager = ssm.getCmdletManager();
// long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
// + " -bufSize " + bufSize + " -compressImpl " + compressionImpl);
// 
// waitTillActionDone(cmdId);
// FileState fileState = metaStore.getFileState(fileName);
// while (!fileState.getFileType().equals(FileState.FileType.COMPRESSION)) {
// Thread.sleep(200);
// fileState = metaStore.getFileState(fileName);
// }
// 
// // metastore  test
// //    Assert.assertEquals(FileState.FileType.COMPRESSION, fileState.getFileType());
// Assert.assertEquals(FileState.FileStage.DONE, fileState.getFileStage());
// Assert.assertTrue(fileState instanceof CompressionFileState);
// CompressionFileState compressionFileState = (CompressionFileState) fileState;
// Assert.assertEquals(fileName, compressionFileState.getPath());
// Assert.assertEquals(bufSize, compressionFileState.getBufferSize());
// Assert.assertEquals(compressionImpl, compressionFileState.getCompressionImpl());
// Assert.assertEquals(0, compressionFileState.getOriginalLength());
// Assert.assertEquals(0, compressionFileState.getCompressedLength());
// 
// // File length test
// Assert.assertEquals(0, dfsClient.getFileInfo(fileName).getLen());
// }
@Test
public void testCompressedFileRandomRead() throws Exception {
    // if (!loadedNative()) {
    // return;
    // }
    waitTillSSMExitSafeMode();
    // initDB();
    int arraySize = 1024 * 1024 * 8;
    String fileName = "/ssm/compression/file3";
    byte[] bytes = prepareFile(fileName, arraySize);
    int bufSize = 1024 * 1024;
    CmdletManager cmdletManager = ssm.getCmdletManager();
    long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
    waitTillActionDone(cmdId);
    // Test random read
    Random rnd = new Random(System.currentTimeMillis());
    DFSInputStream dfsInputStream = smartDFSClient.open(fileName);
    int randomReadSize = 500;
    byte[] randomReadBuffer = new byte[randomReadSize];
    for (int i = 0; i < 5; i++) {
        int pos = rnd.nextInt(arraySize - 500);
        byte[] subBytes = Arrays.copyOfRange(bytes, pos, pos + 500);
        dfsInputStream.seek(pos);
        Assert.assertEquals(pos, dfsInputStream.getPos());
        int off = 0;
        while (off < randomReadSize) {
            int len = dfsInputStream.read(randomReadBuffer, off, randomReadSize - off);
            off += len;
        }
        Assert.assertArrayEquals(subBytes, randomReadBuffer);
        Assert.assertEquals(pos + 500, dfsInputStream.getPos());
    }
}
Also used : Random(java.util.Random) CmdletManager(org.smartdata.server.engine.CmdletManager) DFSInputStream(org.apache.hadoop.hdfs.DFSInputStream) Test(org.junit.Test)

Aggregations

CmdletManager (org.smartdata.server.engine.CmdletManager)13 Test (org.junit.Test)11 Path (org.apache.hadoop.fs.Path)5 IOException (java.io.IOException)3 CmdletState (org.smartdata.model.CmdletState)3 ActionScheduler (org.smartdata.model.action.ActionScheduler)3 DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 SmartDFSClient (org.smartdata.hdfs.client.SmartDFSClient)2 CompressionScheduler (org.smartdata.hdfs.scheduler.CompressionScheduler)2 CompressionFileState (org.smartdata.model.CompressionFileState)2 FileState (org.smartdata.model.FileState)2 Random (java.util.Random)1 BlockLocation (org.apache.hadoop.fs.BlockLocation)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)1 AbstractService (org.smartdata.AbstractService)1 SmartFileSystem (org.smartdata.hadoop.filesystem.SmartFileSystem)1