use of org.smartdata.model.CompressionFileInfo in project SSM by Intel-bigdata.
the class CompressionScheduler method isSuccessfulBySpeculation.
/**
* Speculate action status and set result accordingly.
*/
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
FileState fileState = HadoopUtil.getFileState(dfsClient, path);
FileState.FileType fileType = fileState.getFileType();
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
return fileType == FileState.FileType.NORMAL;
}
// Recover action result for successful compress action.
if (fileType == FileState.FileType.COMPRESSION) {
CompressionFileInfo compressionFileInfo = new CompressionFileInfo((CompressionFileState) fileState);
actionInfo.setResult(new Gson().toJson(compressionFileInfo));
return true;
}
return false;
} catch (IOException e) {
LOG.warn("Failed to get file state, suppose this action was not " + "successfully executed: {}", actionInfo.toString());
return false;
}
}
use of org.smartdata.model.CompressionFileInfo in project SSM by Intel-bigdata.
the class CompressionScheduler method onCompressActionFinished.
private void onCompressActionFinished(ActionInfo actionInfo) throws MetaStoreException {
if (!actionInfo.getActionName().equals(COMPRESSION_ACTION_ID)) {
return;
}
Gson gson = new Gson();
String compressionInfoJson = actionInfo.getResult();
CompressionFileInfo compressionFileInfo = gson.fromJson(compressionInfoJson, new TypeToken<CompressionFileInfo>() {
}.getType());
if (compressionFileInfo == null) {
LOG.error("CompressionFileInfo should NOT be null after successful " + "execution!");
return;
}
CompressionFileState compressionFileState = compressionFileInfo.getCompressionFileState();
compressionFileState.setFileStage(FileState.FileStage.DONE);
// Update metastore and then replace file with compressed one
metaStore.insertUpdateFileState(compressionFileState);
}
use of org.smartdata.model.CompressionFileInfo in project SSM by Intel-bigdata.
the class CompressionAction method execute.
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File path is missing.");
}
if (compressTmpPath == null) {
throw new IllegalArgumentException("Compression tmp path is not specified!");
}
if (!compressionCodecList.contains(compressCodec)) {
throw new ActionException("Compression Action failed due to unsupported codec: " + compressCodec);
}
appendLog(String.format("Compression Action started at %s for %s", Utils.getFormatedCurrentTime(), filePath));
if (!dfsClient.exists(filePath)) {
throw new ActionException("Failed to execute Compression Action: the given file doesn't exist!");
}
HdfsFileStatus srcFileStatus = dfsClient.getFileInfo(filePath);
// Consider directory case.
if (srcFileStatus.isDir()) {
appendLog("Compression is not applicable to a directory.");
return;
}
// Generate compressed file
compressionFileState = new CompressionFileState(filePath, bufferSize, compressCodec);
compressionFileState.setOriginalLength(srcFileStatus.getLen());
OutputStream appendOut = null;
DFSInputStream in = null;
OutputStream out = null;
try {
if (srcFileStatus.getLen() == 0) {
compressionFileInfo = new CompressionFileInfo(false, compressionFileState);
} else {
short replication = srcFileStatus.getReplication();
long blockSize = srcFileStatus.getBlockSize();
long fileSize = srcFileStatus.getLen();
appendLog("File length: " + fileSize);
bufferSize = getActualBuffSize(fileSize);
// SmartDFSClient will fail to open compressing file with PROCESSING FileStage
// set by Compression scheduler. But considering DfsClient may be used, we use
// append operation to lock the file to avoid any modification.
appendOut = CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, filePath, bufferSize);
in = dfsClient.open(filePath);
out = dfsClient.create(compressTmpPath, true, replication, blockSize);
// Keep storage policy consistent.
// The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
// String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
byte storagePolicyId = srcFileStatus.getStoragePolicy();
String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
}
compress(in, out);
HdfsFileStatus destFileStatus = dfsClient.getFileInfo(compressTmpPath);
dfsClient.setOwner(compressTmpPath, srcFileStatus.getOwner(), srcFileStatus.getGroup());
dfsClient.setPermission(compressTmpPath, srcFileStatus.getPermission());
compressionFileState.setCompressedLength(destFileStatus.getLen());
appendLog("Compressed file length: " + destFileStatus.getLen());
compressionFileInfo = new CompressionFileInfo(true, compressTmpPath, compressionFileState);
}
compressionFileState.setBufferSize(bufferSize);
appendLog("Compression buffer size: " + bufferSize);
appendLog("Compression codec: " + compressCodec);
String compressionInfoJson = new Gson().toJson(compressionFileInfo);
appendResult(compressionInfoJson);
LOG.warn(compressionInfoJson);
if (compressionFileInfo.needReplace()) {
// Add to temp path
// Please make sure content write to Xatte is less than 64K
dfsClient.setXAttr(compressionFileInfo.getTempPath(), XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE));
// Rename operation is moved from CompressionScheduler.
// Thus, modification for original file will be avoided.
dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
} else {
// Add to raw path
dfsClient.setXAttr(filePath, XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE));
}
} catch (IOException e) {
throw new IOException(e);
} finally {
if (appendOut != null) {
try {
appendOut.close();
} catch (IOException e) {
// Hide the expected exception that the original file is missing.
}
}
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
}
}
Aggregations