use of org.smartdata.model.CompressionFileState in project SSM by Intel-bigdata.
the class CompressionScheduler method isSuccessfulBySpeculation.
/**
* Speculate action status and set result accordingly.
*/
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
FileState fileState = HadoopUtil.getFileState(dfsClient, path);
FileState.FileType fileType = fileState.getFileType();
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
return fileType == FileState.FileType.NORMAL;
}
// Recover action result for successful compress action.
if (fileType == FileState.FileType.COMPRESSION) {
CompressionFileInfo compressionFileInfo = new CompressionFileInfo((CompressionFileState) fileState);
actionInfo.setResult(new Gson().toJson(compressionFileInfo));
return true;
}
return false;
} catch (IOException e) {
LOG.warn("Failed to get file state, suppose this action was not " + "successfully executed: {}", actionInfo.toString());
return false;
}
}
use of org.smartdata.model.CompressionFileState in project SSM by Intel-bigdata.
the class CompressionScheduler method onCompressActionFinished.
private void onCompressActionFinished(ActionInfo actionInfo) throws MetaStoreException {
if (!actionInfo.getActionName().equals(COMPRESSION_ACTION_ID)) {
return;
}
Gson gson = new Gson();
String compressionInfoJson = actionInfo.getResult();
CompressionFileInfo compressionFileInfo = gson.fromJson(compressionInfoJson, new TypeToken<CompressionFileInfo>() {
}.getType());
if (compressionFileInfo == null) {
LOG.error("CompressionFileInfo should NOT be null after successful " + "execution!");
return;
}
CompressionFileState compressionFileState = compressionFileInfo.getCompressionFileState();
compressionFileState.setFileStage(FileState.FileStage.DONE);
// Update metastore and then replace file with compressed one
metaStore.insertUpdateFileState(compressionFileState);
}
use of org.smartdata.model.CompressionFileState in project SSM by Intel-bigdata.
the class ProtoBufferHelper method convert.
public static FileStateProto convert(FileState fileState) {
FileStateProto.Builder builder = FileStateProto.newBuilder();
builder.setPath(fileState.getPath()).setType(fileState.getFileType().getValue()).setStage(fileState.getFileStage().getValue());
if (fileState instanceof CompactFileState) {
FileContainerInfo fileContainerInfo = ((CompactFileState) fileState).getFileContainerInfo();
builder.setCompactFileState(CompactFileStateProto.newBuilder().setContainerFilePath(fileContainerInfo.getContainerFilePath()).setOffset(fileContainerInfo.getOffset()).setLength(fileContainerInfo.getLength()));
} else if (fileState instanceof CompressionFileState) {
builder.setCompressionFileState(convert((CompressionFileState) fileState));
}
/*else if (fileState instanceof S3FileState) {
builder.setS3FileState();
} else if (fileState instanceof ) {
}
*/
return builder.build();
}
use of org.smartdata.model.CompressionFileState in project SSM by Intel-bigdata.
the class ProtoBufferHelper method convert.
public static FileState convert(FileStateProto proto) {
FileState fileState = null;
String path = proto.getPath();
FileState.FileType type = FileState.FileType.fromValue(proto.getType());
FileState.FileStage stage = FileState.FileStage.fromValue(proto.getStage());
// FileState.FileStage stage = FileState.FileStage.fromValue(proto.getStage());
if (type == null) {
return new NormalFileState(path);
}
switch(type) {
case NORMAL:
fileState = new NormalFileState(path);
break;
case COMPACT:
CompactFileStateProto compactProto = proto.getCompactFileState();
fileState = new CompactFileState(path, convert(compactProto));
break;
case COMPRESSION:
CompressionFileStateProto compressionProto = proto.getCompressionFileState();
// convert to CompressionFileState
fileState = convert(path, stage, compressionProto);
break;
case S3:
S3FileStateProto s3Proto = proto.getS3FileState();
// fileState = convert(path, type, stage, s3Proto);
break;
default:
}
return fileState;
}
use of org.smartdata.model.CompressionFileState in project SSM by Intel-bigdata.
the class TestCompressDecompress method testDecompress.
@Test
public void testDecompress() throws Exception {
int arraySize = 1024 * 1024 * 8;
String filePath = "/ssm/compression/file4";
prepareFile(filePath, arraySize);
dfsClient.setStoragePolicy(filePath, "COLD");
HdfsFileStatus fileStatusBefore = dfsClient.getFileInfo(filePath);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Expect that a common file cannot be decompressed.
List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
Assert.assertTrue(schedulers.size() == 1);
ActionScheduler scheduler = schedulers.get(0);
Assert.assertTrue(scheduler instanceof CompressionScheduler);
Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(filePath));
// Compress the given file
long cmdId = cmdletManager.submitCmdlet("compress -file " + filePath + " -codec " + codec);
waitTillActionDone(cmdId);
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertTrue(fileState instanceof CompressionFileState);
// The storage policy should not be changed
HdfsFileStatus fileStatusAfterCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterCompress.getStoragePolicy());
}
// Try to decompress a compressed file
cmdId = cmdletManager.submitCmdlet("decompress -file " + filePath);
waitTillActionDone(cmdId);
fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertFalse(fileState instanceof CompressionFileState);
// The storage policy should not be changed.
HdfsFileStatus fileStatusAfterDeCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(), fileStatusAfterDeCompress.getStoragePolicy());
}
}
Aggregations