use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class CompressionAction method execute.
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File path is missing.");
}
if (compressTmpPath == null) {
throw new IllegalArgumentException("Compression tmp path is not specified!");
}
if (!compressionCodecList.contains(compressCodec)) {
throw new ActionException("Compression Action failed due to unsupported codec: " + compressCodec);
}
appendLog(String.format("Compression Action started at %s for %s", Utils.getFormatedCurrentTime(), filePath));
if (!dfsClient.exists(filePath)) {
throw new ActionException("Failed to execute Compression Action: the given file doesn't exist!");
}
HdfsFileStatus srcFileStatus = dfsClient.getFileInfo(filePath);
// Consider directory case.
if (srcFileStatus.isDir()) {
appendLog("Compression is not applicable to a directory.");
return;
}
// Generate compressed file
compressionFileState = new CompressionFileState(filePath, bufferSize, compressCodec);
compressionFileState.setOriginalLength(srcFileStatus.getLen());
OutputStream appendOut = null;
DFSInputStream in = null;
OutputStream out = null;
try {
if (srcFileStatus.getLen() == 0) {
compressionFileInfo = new CompressionFileInfo(false, compressionFileState);
} else {
short replication = srcFileStatus.getReplication();
long blockSize = srcFileStatus.getBlockSize();
long fileSize = srcFileStatus.getLen();
appendLog("File length: " + fileSize);
bufferSize = getActualBuffSize(fileSize);
// SmartDFSClient will fail to open compressing file with PROCESSING FileStage
// set by Compression scheduler. But considering DfsClient may be used, we use
// append operation to lock the file to avoid any modification.
appendOut = CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, filePath, bufferSize);
in = dfsClient.open(filePath);
out = dfsClient.create(compressTmpPath, true, replication, blockSize);
// Keep storage policy consistent.
// The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
// String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
byte storagePolicyId = srcFileStatus.getStoragePolicy();
String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
}
compress(in, out);
HdfsFileStatus destFileStatus = dfsClient.getFileInfo(compressTmpPath);
dfsClient.setOwner(compressTmpPath, srcFileStatus.getOwner(), srcFileStatus.getGroup());
dfsClient.setPermission(compressTmpPath, srcFileStatus.getPermission());
compressionFileState.setCompressedLength(destFileStatus.getLen());
appendLog("Compressed file length: " + destFileStatus.getLen());
compressionFileInfo = new CompressionFileInfo(true, compressTmpPath, compressionFileState);
}
compressionFileState.setBufferSize(bufferSize);
appendLog("Compression buffer size: " + bufferSize);
appendLog("Compression codec: " + compressCodec);
String compressionInfoJson = new Gson().toJson(compressionFileInfo);
appendResult(compressionInfoJson);
LOG.warn(compressionInfoJson);
if (compressionFileInfo.needReplace()) {
// Add to temp path
// Please make sure content write to Xatte is less than 64K
dfsClient.setXAttr(compressionFileInfo.getTempPath(), XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE));
// Rename operation is moved from CompressionScheduler.
// Thus, modification for original file will be avoided.
dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
} else {
// Add to raw path
dfsClient.setXAttr(filePath, XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE));
}
} catch (IOException e) {
throw new IOException(e);
} finally {
if (appendOut != null) {
try {
appendOut.close();
} catch (IOException e) {
// Hide the expected exception that the original file is missing.
}
}
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class CheckStorageAction method execute.
@Override
protected void execute() throws Exception {
if (fileName == null) {
throw new IllegalArgumentException("File parameter is missing! ");
}
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
throw new ActionException("File does not exist.");
}
if (fileStatus.isDir()) {
appendResult("This is a directory which has no storage result!");
// Append to log for the convenience of UI implementation
appendLog("This is a directory which has no storage result!");
return;
}
long length = fileStatus.getLen();
List<LocatedBlock> locatedBlocks = dfsClient.getLocatedBlocks(fileName, 0, length).getLocatedBlocks();
if (locatedBlocks.size() == 0) {
appendResult("File '" + fileName + "' has no blocks.");
appendLog("File '" + fileName + "' has no blocks.");
return;
}
for (LocatedBlock locatedBlock : locatedBlocks) {
StringBuilder blockInfo = new StringBuilder();
blockInfo.append("File offset = ").append(locatedBlock.getStartOffset()).append(", ");
blockInfo.append("Block locations = {");
for (DatanodeInfo datanodeInfo : locatedBlock.getLocations()) {
blockInfo.append(datanodeInfo.getName());
if (datanodeInfo instanceof DatanodeInfoWithStorage) {
blockInfo.append("[").append(((DatanodeInfoWithStorage) datanodeInfo).getStorageType()).append("]");
}
blockInfo.append(" ");
}
blockInfo.append("}");
appendResult(blockInfo.toString());
appendLog(blockInfo.toString());
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class AppendFileAction method execute.
@Override
protected void execute() throws Exception {
if (srcPath != null && !srcPath.isEmpty()) {
Path path = new Path(srcPath);
FileSystem fileSystem = path.getFileSystem(conf);
appendLog(String.format("Action starts at %s : Read %s", Utils.getFormatedCurrentTime(), srcPath));
if (!fileSystem.exists(path)) {
throw new ActionException("Append Action fails, file doesn't exist!");
}
appendLog(String.format("Append to %s", srcPath));
Random random = new Random();
FSDataOutputStream os = null;
try {
os = fileSystem.append(path, bufferSize);
long remaining = length;
while (remaining > 0) {
int toAppend = (int) Math.min(remaining, bufferSize);
byte[] bytes = new byte[toAppend];
random.nextBytes(bytes);
os.write(bytes);
remaining -= toAppend;
}
} finally {
if (os != null) {
os.close();
}
}
} else {
throw new ActionException("File parameter is missing.");
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class AddErasureCodingPolicy method execute.
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
if (codecName == null || numDataUnits <= 0 || numParityUnits <= 0 || cellSize <= 0 || cellSize % 1024 != 0) {
throw new ActionException("Illegal EC policy Schema! " + "A valid codec name should be given, " + "the dataNum, parityNum and cellSize should be positive and " + "the cellSize should be divisible by 1024.");
}
ECSchema ecSchema = new ECSchema(codecName, numDataUnits, numParityUnits);
ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(ecSchema, cellSize);
AddErasureCodingPolicyResponse addEcResponse = dfsClient.addErasureCodingPolicies(new ErasureCodingPolicy[] { ecPolicy })[0];
if (addEcResponse.isSucceed()) {
appendLog(String.format("EC policy named %s is added successfully!", addEcResponse.getPolicy().getName()));
} else {
appendLog(String.format("Failed to add the given EC policy!"));
throw new ActionException(addEcResponse.getErrorMsg());
}
}
Aggregations