Search in sources :

Example 1 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class ErasureCodingBase method convert.

protected void convert(HdfsFileStatus srcFileStatus) throws ActionException {
    DFSInputStream in = null;
    DFSOutputStream out = null;
    try {
        long blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
        in = dfsClient.open(srcPath, bufferSize, true);
        short replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT);
        // use the same FsPermission as srcPath
        FsPermission permission = srcFileStatus.getPermission();
        out = dfsClient.create(ecTmpPath, permission, EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, null, bufferSize, null, null, ecPolicyName);
        // Keep storage policy according with original file except UNDEF storage policy
        String storagePolicyName = dfsClient.getStoragePolicy(srcPath).getName();
        if (!storagePolicyName.equals("UNDEF")) {
            dfsClient.setStoragePolicy(ecTmpPath, storagePolicyName);
        }
        long bytesRemaining = srcFileStatus.getLen();
        byte[] buf = new byte[bufferSize];
        while (bytesRemaining > 0L) {
            int bytesToRead = (int) (bytesRemaining < (long) buf.length ? bytesRemaining : (long) buf.length);
            int bytesRead = in.read(buf, 0, bytesToRead);
            if (bytesRead == -1) {
                break;
            }
            out.write(buf, 0, bytesRead);
            bytesRemaining -= (long) bytesRead;
            this.progress = (float) (srcFileStatus.getLen() - bytesRemaining) / srcFileStatus.getLen();
        }
    } catch (Exception ex) {
        throw new ActionException(ex);
    } finally {
        try {
            if (in != null) {
                in.close();
            }
            if (out != null) {
                out.close();
            }
        } catch (IOException ex) {
            LOG.error("IOException occurred when closing DFSInputStream or DFSOutputStream!");
        }
    }
}
Also used : ActionException(org.smartdata.action.ActionException) DFSInputStream(org.apache.hadoop.hdfs.DFSInputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) IOException(java.io.IOException) ActionException(org.smartdata.action.ActionException)

Example 2 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class UnErasureCodingAction method execute.

@Override
protected void execute() throws Exception {
    final String MATCH_RESULT = "The current EC policy is replication already.";
    final String DIR_RESULT = "The replication EC policy is set successfully for the given directory.";
    final String CONVERT_RESULT = "The file is converted successfully with replication EC policy.";
    this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
    HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
    if (fileStatus == null) {
        throw new ActionException("File doesn't exist!");
    }
    ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
    // if ecPolicy is null, it means replication.
    if (srcEcPolicy == null) {
        this.progress = 1.0F;
        appendLog(MATCH_RESULT);
        return;
    }
    if (fileStatus.isDir()) {
        dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
        progress = 1.0F;
        appendLog(DIR_RESULT);
        return;
    }
    try {
        convert(fileStatus);
        setAttributes(srcPath, fileStatus, ecTmpPath);
        dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
        appendLog(CONVERT_RESULT);
        appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
        appendLog(String.format("The current EC policy is %s.", REPLICATION_POLICY_NAME));
    } catch (ActionException ex) {
        try {
            if (dfsClient.getFileInfo(ecTmpPath) != null) {
                dfsClient.delete(ecTmpPath, false);
            }
        } catch (IOException e) {
            LOG.error("Failed to delete tmp file created during the conversion!");
        }
        throw new ActionException(ex);
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) ActionException(org.smartdata.action.ActionException) IOException(java.io.IOException)

Example 3 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class ErasureCodingAction method execute.

@Override
protected void execute() throws Exception {
    final String MATCH_RESULT = "The current EC policy is already matched with the target one.";
    final String DIR_RESULT = "The EC policy is set successfully for the given directory.";
    final String CONVERT_RESULT = "The file is converted successfully with the given or default EC policy.";
    // Make sure DFSClient is used instead of SmartDFSClient.
    this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
    // keep attribute consistent
    HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
    if (fileStatus == null) {
        throw new ActionException("File doesn't exist!");
    }
    validateEcPolicy(ecPolicyName);
    ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
    // if the current ecPolicy is already the target one, no need to convert
    if (srcEcPolicy != null) {
        if (srcEcPolicy.getName().equals(ecPolicyName)) {
            appendLog(MATCH_RESULT);
            this.progress = 1.0F;
            return;
        }
    } else {
        // if ecPolicy is null, it means replication.
        if (ecPolicyName.equals(REPLICATION_POLICY_NAME)) {
            appendLog(MATCH_RESULT);
            this.progress = 1.0F;
            return;
        }
    }
    if (fileStatus.isDir()) {
        dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
        this.progress = 1.0F;
        appendLog(DIR_RESULT);
        return;
    }
    HdfsDataOutputStream outputStream = null;
    try {
        // a file only with replication policy can be appended.
        if (srcEcPolicy == null) {
            // append the file to acquire the lock to avoid modifying, real appending wouldn't occur.
            outputStream = dfsClient.append(srcPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
        }
        convert(fileStatus);
        /**
         * The append operation will change the modification time accordingly,
         * so we use the FileStatus obtained before append to set ecTmp file's most attributes
         */
        setAttributes(srcPath, fileStatus, ecTmpPath);
        dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
        appendLog(CONVERT_RESULT);
        if (srcEcPolicy == null) {
            appendLog("The previous EC policy is replication.");
        } else {
            appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
        }
        appendLog(String.format("The current EC policy is %s.", ecPolicyName));
    } catch (ActionException ex) {
        try {
            if (dfsClient.getFileInfo(ecTmpPath) != null) {
                dfsClient.delete(ecTmpPath, false);
            }
        } catch (IOException e) {
            LOG.error("Failed to delete tmp file created during the conversion!");
        }
        throw new ActionException(ex);
    } finally {
        if (outputStream != null) {
            try {
                outputStream.close();
            } catch (IOException ex) {
            // Hide the expected exception that the original file is missing.
            }
        }
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) ActionException(org.smartdata.action.ActionException) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) IOException(java.io.IOException)

Example 4 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class CmdletManager method updateStatus.

public void updateStatus(StatusMessage status) {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Got status update: " + status);
    }
    try {
        if (status instanceof CmdletStatusUpdate) {
            CmdletStatusUpdate statusUpdate = (CmdletStatusUpdate) status;
            onCmdletStatusUpdate(statusUpdate.getCmdletStatus());
        } else if (status instanceof StatusReport) {
            onStatusReport((StatusReport) status);
        }
    } catch (IOException e) {
        LOG.error(String.format("Update status %s failed with %s", status, e));
    } catch (ActionException e) {
        LOG.error("Action Status error {}", e);
    }
}
Also used : StatusReport(org.smartdata.protocol.message.StatusReport) ActionException(org.smartdata.action.ActionException) CmdletStatusUpdate(org.smartdata.protocol.message.CmdletStatusUpdate) IOException(java.io.IOException)

Example 5 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class CmdletDispatcher method updateCmdActionStatus.

private void updateCmdActionStatus(LaunchCmdlet cmdlet, String host) {
    if (cmdletManager != null) {
        try {
            cmdletManager.updateCmdletExecHost(cmdlet.getCmdletId(), host);
        } catch (IOException e) {
        // Ignore this
        }
    }
    try {
        LaunchAction action;
        ActionStatus actionStatus;
        for (int i = 0; i < cmdlet.getLaunchActions().size(); i++) {
            action = cmdlet.getLaunchActions().get(i);
            actionStatus = new ActionStatus(cmdlet.getCmdletId(), i == cmdlet.getLaunchActions().size() - 1, action.getActionId(), System.currentTimeMillis());
            cmdletManager.onActionStatusUpdate(actionStatus);
        }
        CmdletStatus cmdletStatus = new CmdletStatus(cmdlet.getCmdletId(), System.currentTimeMillis(), CmdletState.DISPATCHED);
        cmdletManager.onCmdletStatusUpdate(cmdletStatus);
    } catch (IOException e) {
        LOG.info("update status failed.", e);
    } catch (ActionException e) {
        LOG.info("update action status failed.", e);
    }
}
Also used : LaunchAction(org.smartdata.model.LaunchAction) ActionException(org.smartdata.action.ActionException) CmdletStatus(org.smartdata.protocol.message.CmdletStatus) IOException(java.io.IOException) ActionStatus(org.smartdata.protocol.message.ActionStatus)

Aggregations

ActionException (org.smartdata.action.ActionException)14 IOException (java.io.IOException)8 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)3 DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)3 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)3 OutputStream (java.io.OutputStream)2 Configuration (org.apache.hadoop.conf.Configuration)2 CompressionFileState (org.smartdata.model.CompressionFileState)2 Gson (com.google.gson.Gson)1 InputStream (java.io.InputStream)1 Random (java.util.Random)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 DFSOutputStream (org.apache.hadoop.hdfs.DFSOutputStream)1 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)1 AddErasureCodingPolicyResponse (org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 DatanodeInfoWithStorage (org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage)1