use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class CmdletFactory method createAction.
public SmartAction createAction(long cmdletId, boolean isLastAction, LaunchAction launchAction) throws ActionException {
SmartAction smartAction = ActionRegistry.createAction(launchAction.getActionType());
smartAction.setContext(smartContext);
smartAction.setCmdletId(cmdletId);
smartAction.setLastAction(isLastAction);
smartAction.init(launchAction.getArgs());
smartAction.setActionId(launchAction.getActionId());
if (smartAction instanceof HdfsAction) {
try {
((HdfsAction) smartAction).setDfsClient(new SmartDFSClient(HadoopUtil.getNameNodeUri(smartContext.getConf()), smartContext.getConf(), getRpcServerAddress()));
} catch (IOException e) {
LOG.error("smartAction aid={} setDfsClient error", launchAction.getActionId(), e);
throw new ActionException(e);
}
}
/*
else if (smartAction instanceof AlluxioAction) {
FileSystem fs;
try {
fs = AlluxioUtil.getAlluxioFs(smartContext);
} catch (Exception e) {
LOG.error("smartAction aid={} alluxio filesystem error", launchAction.getActionId(), e);
throw new ActionException(e);
}
((AlluxioAction) smartAction).setFileSystem(fs);
}
*/
return smartAction;
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class DeleteFileAction method deleteFile.
private boolean deleteFile(String filePath) throws IOException, ActionException {
if (filePath.startsWith("hdfs")) {
// delete in remote cluster
// TODO read conf from file
Configuration conf = new Configuration();
// get FileSystem object
FileSystem fs = FileSystem.get(URI.create(filePath), conf);
if (!fs.exists(new Path(filePath))) {
throw new ActionException("DeleteFile Action fails, file doesn't exist!");
}
fs.delete(new Path(filePath), true);
return true;
} else {
// delete in local cluster
if (!dfsClient.exists(filePath)) {
throw new ActionException("DeleteFile Action fails, file doesn't exist!");
}
appendLog(String.format("Delete %s", filePath));
return dfsClient.delete(filePath, true);
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class DecompressionAction method execute.
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File path is missing.");
}
if (compressTmpPath == null) {
throw new IllegalArgumentException("Compression tmp path is not specified!");
}
if (!dfsClient.exists(filePath)) {
throw new ActionException("Failed to execute Compression Action: the given file doesn't exist!");
}
// Consider directory case.
if (dfsClient.getFileInfo(filePath).isDir()) {
appendLog("Decompression is not applicable to a directory.");
return;
}
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
if (!(fileState instanceof CompressionFileState)) {
appendLog("The file is already decompressed!");
return;
}
OutputStream out = null;
InputStream in = null;
try {
// No need to lock the file by append operation,
// since compressed file cannot be modified.
out = dfsClient.create(compressTmpPath, true);
// Keep storage policy consistent.
// The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
// String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
byte storagePolicyId = dfsClient.getFileInfo(filePath).getStoragePolicy();
String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
}
in = dfsClient.open(filePath);
long length = dfsClient.getFileInfo(filePath).getLen();
outputDecompressedData(in, out, length);
// Overwrite the original file with decompressed data
dfsClient.setOwner(compressTmpPath, dfsClient.getFileInfo(filePath).getOwner(), dfsClient.getFileInfo(filePath).getGroup());
dfsClient.setPermission(compressTmpPath, dfsClient.getFileInfo(filePath).getPermission());
dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
appendLog("The given file is successfully decompressed by codec: " + ((CompressionFileState) fileState).getCompressionImpl());
} catch (IOException e) {
throw new IOException(e);
} finally {
if (out != null) {
out.close();
}
if (in != null) {
in.close();
}
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class ReadFileAction method execute.
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
appendLog(String.format("Action starts at %s : Read %s", Utils.getFormatedCurrentTime(), filePath));
if (!dfsClient.exists(filePath)) {
throw new ActionException("ReadFile Action fails, file " + filePath + " doesn't exist!");
}
DFSInputStream dfsInputStream = dfsClient.open(filePath);
byte[] buffer = new byte[bufferSize];
// read from HDFS
while (dfsInputStream.read(buffer, 0, bufferSize) != -1) {
}
dfsInputStream.close();
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class RenameFileAction method renameSingleFile.
private boolean renameSingleFile(String src, String dest) throws IOException, ActionException {
if (dest.startsWith("hdfs") && src.startsWith("hdfs")) {
// check the file name
if (!URI.create(dest).getHost().equals(URI.create(src).getHost())) {
throw new ActionException("the file names are not in the same cluster");
}
Configuration conf = new Configuration();
// get FileSystem object
FileSystem fs = FileSystem.get(URI.create(dest), conf);
return fs.rename(new Path(src), new Path(dest));
} else if (!dest.startsWith("hdfs") && !src.startsWith("hdfs")) {
// rename file in local cluster and overwrite
if (!dfsClient.exists(src)) {
throw new ActionException("the source file is not exist");
}
dfsClient.rename(src, dest, Options.Rename.NONE);
return true;
} else {
// while src not, the two path are in the same cluster
throw new ActionException("the file names are not in the same cluster");
}
}
Aggregations