Search in sources :

Example 6 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class MetaStore method deleteFileState.

/**
 * Delete FileState of the given fileName (including its corresponding compression/
 * compact/s3 state).
 *
 * @param filePath
 * @throws MetaStoreException
 */
public void deleteFileState(String filePath) throws MetaStoreException {
    try {
        FileState fileState = getFileState(filePath);
        fileStateDao.deleteByPath(filePath, false);
        switch(fileState.getFileType()) {
            case COMPACT:
                smallFileDao.deleteByPath(filePath, false);
                break;
            case COMPRESSION:
                deleteCompressedFile(filePath);
                break;
            case S3:
                break;
            default:
        }
    } catch (Exception e) {
        throw new MetaStoreException(e);
    }
}
Also used : S3FileState(org.smartdata.model.S3FileState) NormalFileState(org.smartdata.model.NormalFileState) CompactFileState(org.smartdata.model.CompactFileState) FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) SQLException(java.sql.SQLException)

Example 7 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class TestCompressDecompress method testDecompress.

@Test
public void testDecompress() throws Exception {
    int arraySize = 1024 * 1024 * 8;
    String filePath = "/ssm/compression/file4";
    prepareFile(filePath, arraySize);
    dfsClient.setStoragePolicy(filePath, "COLD");
    HdfsFileStatus fileStatusBefore = dfsClient.getFileInfo(filePath);
    CmdletManager cmdletManager = ssm.getCmdletManager();
    // Expect that a common file cannot be decompressed.
    List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
    Assert.assertTrue(schedulers.size() == 1);
    ActionScheduler scheduler = schedulers.get(0);
    Assert.assertTrue(scheduler instanceof CompressionScheduler);
    Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(filePath));
    // Compress the given file
    long cmdId = cmdletManager.submitCmdlet("compress -file " + filePath + " -codec " + codec);
    waitTillActionDone(cmdId);
    FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
    Assert.assertTrue(fileState instanceof CompressionFileState);
    // The storage policy should not be changed
    HdfsFileStatus fileStatusAfterCompress = dfsClient.getFileInfo(filePath);
    if (fileStatusBefore.getStoragePolicy() != 0) {
        // To make sure the consistency of storage policy
        Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterCompress.getStoragePolicy());
    }
    // Try to decompress a compressed file
    cmdId = cmdletManager.submitCmdlet("decompress -file " + filePath);
    waitTillActionDone(cmdId);
    fileState = HadoopUtil.getFileState(dfsClient, filePath);
    Assert.assertFalse(fileState instanceof CompressionFileState);
    // The storage policy should not be changed.
    HdfsFileStatus fileStatusAfterDeCompress = dfsClient.getFileInfo(filePath);
    if (fileStatusBefore.getStoragePolicy() != 0) {
        // To make sure the consistency of storage policy
        Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterDeCompress.getStoragePolicy());
    }
}
Also used : FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) CmdletManager(org.smartdata.server.engine.CmdletManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ActionScheduler(org.smartdata.model.action.ActionScheduler) CompressionFileState(org.smartdata.model.CompressionFileState) CompressionScheduler(org.smartdata.hdfs.scheduler.CompressionScheduler) Test(org.junit.Test)

Example 8 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class TestFileStateDao method testDelete.

@Test
public void testDelete() throws Exception {
    FileState fileState1 = new FileState("/file1", FileState.FileType.COMPACT, FileState.FileStage.PROCESSING);
    FileState fileState2 = new FileState("/file2", FileState.FileType.COMPRESSION, FileState.FileStage.DONE);
    FileState fileState3 = new FileState("/file3", FileState.FileType.S3, FileState.FileStage.DONE);
    fileStateDao.insertUpdate(fileState1);
    fileStateDao.insertUpdate(fileState2);
    fileStateDao.insertUpdate(fileState3);
    fileStateDao.deleteByPath(fileState1.getPath(), false);
    List<FileState> fileStates = fileStateDao.getAll();
    Assert.assertEquals(2, fileStates.size());
    try {
        fileStateDao.getByPath(fileState1.getPath());
        Assert.fail();
    } catch (EmptyResultDataAccessException e) {
    // It is correct if no entry found
    }
    fileStateDao.deleteAll();
    fileStates = fileStateDao.getAll();
    Assert.assertEquals(0, fileStates.size());
}
Also used : FileState(org.smartdata.model.FileState) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) Test(org.junit.Test)

Example 9 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class TestFileStateDao method testInsertUpdate.

@Test
public void testInsertUpdate() throws Exception {
    FileState fileState1 = new FileState("/file1", FileState.FileType.COMPACT, FileState.FileStage.PROCESSING);
    FileState fileState2 = new FileState("/file2", FileState.FileType.COMPRESSION, FileState.FileStage.DONE);
    fileStateDao.insertUpdate(fileState1);
    fileStateDao.insertUpdate(fileState2);
    List<FileState> fileStates = fileStateDao.getAll();
    Assert.assertEquals(2, fileStates.size());
    Assert.assertEquals(fileState1, fileStateDao.getByPath("/file1"));
    Assert.assertEquals(fileState2, fileStateDao.getByPath("/file2"));
    fileState1 = new FileState("/file1", FileState.FileType.COMPACT, FileState.FileStage.DONE);
    fileStateDao.insertUpdate(fileState1);
    fileStates = fileStateDao.getAll();
    Assert.assertEquals(2, fileStates.size());
    Assert.assertEquals(fileState1, fileStateDao.getByPath("/file1"));
    Assert.assertEquals(fileState2, fileStateDao.getByPath("/file2"));
}
Also used : FileState(org.smartdata.model.FileState) Test(org.junit.Test)

Example 10 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class SmartFileSystem method listCorruptFileBlocks.

@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path) throws IOException {
    RemoteIterator<Path> corruptFileBlocksIterator = super.listCorruptFileBlocks(path);
    FileState fileState = smartDFSClient.getFileState(getPathName(path));
    if (fileState instanceof CompactFileState) {
        corruptFileBlocksIterator = super.listCorruptFileBlocks(new Path(((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath()));
    }
    return corruptFileBlocksIterator;
}
Also used : Path(org.apache.hadoop.fs.Path) CompactFileState(org.smartdata.model.CompactFileState) FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) CompactFileState(org.smartdata.model.CompactFileState)

Aggregations

FileState (org.smartdata.model.FileState)34 CompressionFileState (org.smartdata.model.CompressionFileState)26 CompactFileState (org.smartdata.model.CompactFileState)19 NormalFileState (org.smartdata.model.NormalFileState)15 IOException (java.io.IOException)9 Test (org.junit.Test)7 ArrayList (java.util.ArrayList)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 Gson (com.google.gson.Gson)3 HashMap (java.util.HashMap)3 Path (org.apache.hadoop.fs.Path)3 DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)3 EmptyResultDataAccessException (org.springframework.dao.EmptyResultDataAccessException)3 SQLException (java.sql.SQLException)2 BlockLocation (org.apache.hadoop.fs.BlockLocation)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)2 HdfsLocatedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus)2 HdfsNamedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsNamedFileStatus)2 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)2