use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class ErasureCodingBase method convert.
protected void convert(HdfsFileStatus srcFileStatus) throws ActionException {
DFSInputStream in = null;
DFSOutputStream out = null;
try {
long blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
in = dfsClient.open(srcPath, bufferSize, true);
short replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT);
// use the same FsPermission as srcPath
FsPermission permission = srcFileStatus.getPermission();
out = dfsClient.create(ecTmpPath, permission, EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, null, bufferSize, null, null, ecPolicyName);
// Keep storage policy according with original file except UNDEF storage policy
String storagePolicyName = dfsClient.getStoragePolicy(srcPath).getName();
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(ecTmpPath, storagePolicyName);
}
long bytesRemaining = srcFileStatus.getLen();
byte[] buf = new byte[bufferSize];
while (bytesRemaining > 0L) {
int bytesToRead = (int) (bytesRemaining < (long) buf.length ? bytesRemaining : (long) buf.length);
int bytesRead = in.read(buf, 0, bytesToRead);
if (bytesRead == -1) {
break;
}
out.write(buf, 0, bytesRead);
bytesRemaining -= (long) bytesRead;
this.progress = (float) (srcFileStatus.getLen() - bytesRemaining) / srcFileStatus.getLen();
}
} catch (Exception ex) {
throw new ActionException(ex);
} finally {
try {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
} catch (IOException ex) {
LOG.error("IOException occurred when closing DFSInputStream or DFSOutputStream!");
}
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class UnErasureCodingAction method execute.
@Override
protected void execute() throws Exception {
final String MATCH_RESULT = "The current EC policy is replication already.";
final String DIR_RESULT = "The replication EC policy is set successfully for the given directory.";
final String CONVERT_RESULT = "The file is converted successfully with replication EC policy.";
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
if (fileStatus == null) {
throw new ActionException("File doesn't exist!");
}
ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
// if ecPolicy is null, it means replication.
if (srcEcPolicy == null) {
this.progress = 1.0F;
appendLog(MATCH_RESULT);
return;
}
if (fileStatus.isDir()) {
dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
progress = 1.0F;
appendLog(DIR_RESULT);
return;
}
try {
convert(fileStatus);
setAttributes(srcPath, fileStatus, ecTmpPath);
dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
appendLog(CONVERT_RESULT);
appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
appendLog(String.format("The current EC policy is %s.", REPLICATION_POLICY_NAME));
} catch (ActionException ex) {
try {
if (dfsClient.getFileInfo(ecTmpPath) != null) {
dfsClient.delete(ecTmpPath, false);
}
} catch (IOException e) {
LOG.error("Failed to delete tmp file created during the conversion!");
}
throw new ActionException(ex);
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class ErasureCodingAction method execute.
@Override
protected void execute() throws Exception {
final String MATCH_RESULT = "The current EC policy is already matched with the target one.";
final String DIR_RESULT = "The EC policy is set successfully for the given directory.";
final String CONVERT_RESULT = "The file is converted successfully with the given or default EC policy.";
// Make sure DFSClient is used instead of SmartDFSClient.
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
// keep attribute consistent
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
if (fileStatus == null) {
throw new ActionException("File doesn't exist!");
}
validateEcPolicy(ecPolicyName);
ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
// if the current ecPolicy is already the target one, no need to convert
if (srcEcPolicy != null) {
if (srcEcPolicy.getName().equals(ecPolicyName)) {
appendLog(MATCH_RESULT);
this.progress = 1.0F;
return;
}
} else {
// if ecPolicy is null, it means replication.
if (ecPolicyName.equals(REPLICATION_POLICY_NAME)) {
appendLog(MATCH_RESULT);
this.progress = 1.0F;
return;
}
}
if (fileStatus.isDir()) {
dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
this.progress = 1.0F;
appendLog(DIR_RESULT);
return;
}
HdfsDataOutputStream outputStream = null;
try {
// a file only with replication policy can be appended.
if (srcEcPolicy == null) {
// append the file to acquire the lock to avoid modifying, real appending wouldn't occur.
outputStream = dfsClient.append(srcPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
}
convert(fileStatus);
/**
* The append operation will change the modification time accordingly,
* so we use the FileStatus obtained before append to set ecTmp file's most attributes
*/
setAttributes(srcPath, fileStatus, ecTmpPath);
dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
appendLog(CONVERT_RESULT);
if (srcEcPolicy == null) {
appendLog("The previous EC policy is replication.");
} else {
appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
}
appendLog(String.format("The current EC policy is %s.", ecPolicyName));
} catch (ActionException ex) {
try {
if (dfsClient.getFileInfo(ecTmpPath) != null) {
dfsClient.delete(ecTmpPath, false);
}
} catch (IOException e) {
LOG.error("Failed to delete tmp file created during the conversion!");
}
throw new ActionException(ex);
} finally {
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException ex) {
// Hide the expected exception that the original file is missing.
}
}
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class CmdletManager method updateStatus.
public void updateStatus(StatusMessage status) {
if (LOG.isDebugEnabled()) {
LOG.debug("Got status update: " + status);
}
try {
if (status instanceof CmdletStatusUpdate) {
CmdletStatusUpdate statusUpdate = (CmdletStatusUpdate) status;
onCmdletStatusUpdate(statusUpdate.getCmdletStatus());
} else if (status instanceof StatusReport) {
onStatusReport((StatusReport) status);
}
} catch (IOException e) {
LOG.error(String.format("Update status %s failed with %s", status, e));
} catch (ActionException e) {
LOG.error("Action Status error {}", e);
}
}
use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.
the class CmdletDispatcher method updateCmdActionStatus.
private void updateCmdActionStatus(LaunchCmdlet cmdlet, String host) {
if (cmdletManager != null) {
try {
cmdletManager.updateCmdletExecHost(cmdlet.getCmdletId(), host);
} catch (IOException e) {
// Ignore this
}
}
try {
LaunchAction action;
ActionStatus actionStatus;
for (int i = 0; i < cmdlet.getLaunchActions().size(); i++) {
action = cmdlet.getLaunchActions().get(i);
actionStatus = new ActionStatus(cmdlet.getCmdletId(), i == cmdlet.getLaunchActions().size() - 1, action.getActionId(), System.currentTimeMillis());
cmdletManager.onActionStatusUpdate(actionStatus);
}
CmdletStatus cmdletStatus = new CmdletStatus(cmdlet.getCmdletId(), System.currentTimeMillis(), CmdletState.DISPATCHED);
cmdletManager.onCmdletStatusUpdate(cmdletStatus);
} catch (IOException e) {
LOG.info("update status failed.", e);
} catch (ActionException e) {
LOG.info("update action status failed.", e);
}
}
Aggregations