use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class MetaStore method deleteFileState.
/**
* Delete FileState of the given fileName (including its corresponding compression/
* compact/s3 state).
*
* @param filePath
* @throws MetaStoreException
*/
public void deleteFileState(String filePath) throws MetaStoreException {
try {
FileState fileState = getFileState(filePath);
fileStateDao.deleteByPath(filePath, false);
switch(fileState.getFileType()) {
case COMPACT:
smallFileDao.deleteByPath(filePath, false);
break;
case COMPRESSION:
deleteCompressedFile(filePath);
break;
case S3:
break;
default:
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class TestCompressDecompress method testDecompress.
@Test
public void testDecompress() throws Exception {
int arraySize = 1024 * 1024 * 8;
String filePath = "/ssm/compression/file4";
prepareFile(filePath, arraySize);
dfsClient.setStoragePolicy(filePath, "COLD");
HdfsFileStatus fileStatusBefore = dfsClient.getFileInfo(filePath);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Expect that a common file cannot be decompressed.
List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
Assert.assertTrue(schedulers.size() == 1);
ActionScheduler scheduler = schedulers.get(0);
Assert.assertTrue(scheduler instanceof CompressionScheduler);
Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(filePath));
// Compress the given file
long cmdId = cmdletManager.submitCmdlet("compress -file " + filePath + " -codec " + codec);
waitTillActionDone(cmdId);
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertTrue(fileState instanceof CompressionFileState);
// The storage policy should not be changed
HdfsFileStatus fileStatusAfterCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterCompress.getStoragePolicy());
}
// Try to decompress a compressed file
cmdId = cmdletManager.submitCmdlet("decompress -file " + filePath);
waitTillActionDone(cmdId);
fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertFalse(fileState instanceof CompressionFileState);
// The storage policy should not be changed.
HdfsFileStatus fileStatusAfterDeCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterDeCompress.getStoragePolicy());
}
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class TestFileStateDao method testDelete.
@Test
public void testDelete() throws Exception {
FileState fileState1 = new FileState("/file1", FileState.FileType.COMPACT, FileState.FileStage.PROCESSING);
FileState fileState2 = new FileState("/file2", FileState.FileType.COMPRESSION, FileState.FileStage.DONE);
FileState fileState3 = new FileState("/file3", FileState.FileType.S3, FileState.FileStage.DONE);
fileStateDao.insertUpdate(fileState1);
fileStateDao.insertUpdate(fileState2);
fileStateDao.insertUpdate(fileState3);
fileStateDao.deleteByPath(fileState1.getPath(), false);
List<FileState> fileStates = fileStateDao.getAll();
Assert.assertEquals(2, fileStates.size());
try {
fileStateDao.getByPath(fileState1.getPath());
Assert.fail();
} catch (EmptyResultDataAccessException e) {
// It is correct if no entry found
}
fileStateDao.deleteAll();
fileStates = fileStateDao.getAll();
Assert.assertEquals(0, fileStates.size());
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class TestFileStateDao method testInsertUpdate.
@Test
public void testInsertUpdate() throws Exception {
FileState fileState1 = new FileState("/file1", FileState.FileType.COMPACT, FileState.FileStage.PROCESSING);
FileState fileState2 = new FileState("/file2", FileState.FileType.COMPRESSION, FileState.FileStage.DONE);
fileStateDao.insertUpdate(fileState1);
fileStateDao.insertUpdate(fileState2);
List<FileState> fileStates = fileStateDao.getAll();
Assert.assertEquals(2, fileStates.size());
Assert.assertEquals(fileState1, fileStateDao.getByPath("/file1"));
Assert.assertEquals(fileState2, fileStateDao.getByPath("/file2"));
fileState1 = new FileState("/file1", FileState.FileType.COMPACT, FileState.FileStage.DONE);
fileStateDao.insertUpdate(fileState1);
fileStates = fileStateDao.getAll();
Assert.assertEquals(2, fileStates.size());
Assert.assertEquals(fileState1, fileStateDao.getByPath("/file1"));
Assert.assertEquals(fileState2, fileStateDao.getByPath("/file2"));
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class SmartFileSystem method listCorruptFileBlocks.
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path) throws IOException {
RemoteIterator<Path> corruptFileBlocksIterator = super.listCorruptFileBlocks(path);
FileState fileState = smartDFSClient.getFileState(getPathName(path));
if (fileState instanceof CompactFileState) {
corruptFileBlocksIterator = super.listCorruptFileBlocks(new Path(((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath()));
}
return corruptFileBlocksIterator;
}
Aggregations