Search in sources :

Example 11 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class CompressionAction method execute.

@Override
protected void execute() throws Exception {
    if (filePath == null) {
        throw new IllegalArgumentException("File path is missing.");
    }
    if (compressTmpPath == null) {
        throw new IllegalArgumentException("Compression tmp path is not specified!");
    }
    if (!compressionCodecList.contains(compressCodec)) {
        throw new ActionException("Compression Action failed due to unsupported codec: " + compressCodec);
    }
    appendLog(String.format("Compression Action started at %s for %s", Utils.getFormatedCurrentTime(), filePath));
    if (!dfsClient.exists(filePath)) {
        throw new ActionException("Failed to execute Compression Action: the given file doesn't exist!");
    }
    HdfsFileStatus srcFileStatus = dfsClient.getFileInfo(filePath);
    // Consider directory case.
    if (srcFileStatus.isDir()) {
        appendLog("Compression is not applicable to a directory.");
        return;
    }
    // Generate compressed file
    compressionFileState = new CompressionFileState(filePath, bufferSize, compressCodec);
    compressionFileState.setOriginalLength(srcFileStatus.getLen());
    OutputStream appendOut = null;
    DFSInputStream in = null;
    OutputStream out = null;
    try {
        if (srcFileStatus.getLen() == 0) {
            compressionFileInfo = new CompressionFileInfo(false, compressionFileState);
        } else {
            short replication = srcFileStatus.getReplication();
            long blockSize = srcFileStatus.getBlockSize();
            long fileSize = srcFileStatus.getLen();
            appendLog("File length: " + fileSize);
            bufferSize = getActualBuffSize(fileSize);
            // SmartDFSClient will fail to open compressing file with PROCESSING FileStage
            // set by Compression scheduler. But considering DfsClient may be used, we use
            // append operation to lock the file to avoid any modification.
            appendOut = CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, filePath, bufferSize);
            in = dfsClient.open(filePath);
            out = dfsClient.create(compressTmpPath, true, replication, blockSize);
            // Keep storage policy consistent.
            // The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
            // String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
            byte storagePolicyId = srcFileStatus.getStoragePolicy();
            String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
            if (!storagePolicyName.equals("UNDEF")) {
                dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
            }
            compress(in, out);
            HdfsFileStatus destFileStatus = dfsClient.getFileInfo(compressTmpPath);
            dfsClient.setOwner(compressTmpPath, srcFileStatus.getOwner(), srcFileStatus.getGroup());
            dfsClient.setPermission(compressTmpPath, srcFileStatus.getPermission());
            compressionFileState.setCompressedLength(destFileStatus.getLen());
            appendLog("Compressed file length: " + destFileStatus.getLen());
            compressionFileInfo = new CompressionFileInfo(true, compressTmpPath, compressionFileState);
        }
        compressionFileState.setBufferSize(bufferSize);
        appendLog("Compression buffer size: " + bufferSize);
        appendLog("Compression codec: " + compressCodec);
        String compressionInfoJson = new Gson().toJson(compressionFileInfo);
        appendResult(compressionInfoJson);
        LOG.warn(compressionInfoJson);
        if (compressionFileInfo.needReplace()) {
            // Add to temp path
            // Please make sure content write to Xatte is less than 64K
            dfsClient.setXAttr(compressionFileInfo.getTempPath(), XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE));
            // Rename operation is moved from CompressionScheduler.
            // Thus, modification for original file will be avoided.
            dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
        } else {
            // Add to raw path
            dfsClient.setXAttr(filePath, XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE));
        }
    } catch (IOException e) {
        throw new IOException(e);
    } finally {
        if (appendOut != null) {
            try {
                appendOut.close();
            } catch (IOException e) {
            // Hide the expected exception that the original file is missing.
            }
        }
        if (in != null) {
            in.close();
        }
        if (out != null) {
            out.close();
        }
    }
}
Also used : OutputStream(java.io.OutputStream) ActionException(org.smartdata.action.ActionException) Gson(com.google.gson.Gson) IOException(java.io.IOException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CompressionFileInfo(org.smartdata.model.CompressionFileInfo) CompressionFileState(org.smartdata.model.CompressionFileState) DFSInputStream(org.apache.hadoop.hdfs.DFSInputStream)

Example 12 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class CheckStorageAction method execute.

@Override
protected void execute() throws Exception {
    if (fileName == null) {
        throw new IllegalArgumentException("File parameter is missing! ");
    }
    HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
    if (fileStatus == null) {
        throw new ActionException("File does not exist.");
    }
    if (fileStatus.isDir()) {
        appendResult("This is a directory which has no storage result!");
        // Append to log for the convenience of UI implementation
        appendLog("This is a directory which has no storage result!");
        return;
    }
    long length = fileStatus.getLen();
    List<LocatedBlock> locatedBlocks = dfsClient.getLocatedBlocks(fileName, 0, length).getLocatedBlocks();
    if (locatedBlocks.size() == 0) {
        appendResult("File '" + fileName + "' has no blocks.");
        appendLog("File '" + fileName + "' has no blocks.");
        return;
    }
    for (LocatedBlock locatedBlock : locatedBlocks) {
        StringBuilder blockInfo = new StringBuilder();
        blockInfo.append("File offset = ").append(locatedBlock.getStartOffset()).append(", ");
        blockInfo.append("Block locations = {");
        for (DatanodeInfo datanodeInfo : locatedBlock.getLocations()) {
            blockInfo.append(datanodeInfo.getName());
            if (datanodeInfo instanceof DatanodeInfoWithStorage) {
                blockInfo.append("[").append(((DatanodeInfoWithStorage) datanodeInfo).getStorageType()).append("]");
            }
            blockInfo.append(" ");
        }
        blockInfo.append("}");
        appendResult(blockInfo.toString());
        appendLog(blockInfo.toString());
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ActionException(org.smartdata.action.ActionException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 13 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class AppendFileAction method execute.

@Override
protected void execute() throws Exception {
    if (srcPath != null && !srcPath.isEmpty()) {
        Path path = new Path(srcPath);
        FileSystem fileSystem = path.getFileSystem(conf);
        appendLog(String.format("Action starts at %s : Read %s", Utils.getFormatedCurrentTime(), srcPath));
        if (!fileSystem.exists(path)) {
            throw new ActionException("Append Action fails, file doesn't exist!");
        }
        appendLog(String.format("Append to %s", srcPath));
        Random random = new Random();
        FSDataOutputStream os = null;
        try {
            os = fileSystem.append(path, bufferSize);
            long remaining = length;
            while (remaining > 0) {
                int toAppend = (int) Math.min(remaining, bufferSize);
                byte[] bytes = new byte[toAppend];
                random.nextBytes(bytes);
                os.write(bytes);
                remaining -= toAppend;
            }
        } finally {
            if (os != null) {
                os.close();
            }
        }
    } else {
        throw new ActionException("File parameter is missing.");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) ActionException(org.smartdata.action.ActionException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 14 with ActionException

use of org.smartdata.action.ActionException in project SSM by Intel-bigdata.

the class AddErasureCodingPolicy method execute.

@Override
public void execute() throws Exception {
    this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
    if (codecName == null || numDataUnits <= 0 || numParityUnits <= 0 || cellSize <= 0 || cellSize % 1024 != 0) {
        throw new ActionException("Illegal EC policy Schema! " + "A valid codec name should be given, " + "the dataNum, parityNum and cellSize should be positive and " + "the cellSize should be divisible by 1024.");
    }
    ECSchema ecSchema = new ECSchema(codecName, numDataUnits, numParityUnits);
    ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(ecSchema, cellSize);
    AddErasureCodingPolicyResponse addEcResponse = dfsClient.addErasureCodingPolicies(new ErasureCodingPolicy[] { ecPolicy })[0];
    if (addEcResponse.isSucceed()) {
        appendLog(String.format("EC policy named %s is added successfully!", addEcResponse.getPolicy().getName()));
    } else {
        appendLog(String.format("Failed to add the given EC policy!"));
        throw new ActionException(addEcResponse.getErrorMsg());
    }
}
Also used : ECSchema(org.apache.hadoop.io.erasurecode.ECSchema) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) AddErasureCodingPolicyResponse(org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse) ActionException(org.smartdata.action.ActionException)

Aggregations

ActionException (org.smartdata.action.ActionException)14 IOException (java.io.IOException)8 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)3 DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)3 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)3 OutputStream (java.io.OutputStream)2 Configuration (org.apache.hadoop.conf.Configuration)2 CompressionFileState (org.smartdata.model.CompressionFileState)2 Gson (com.google.gson.Gson)1 InputStream (java.io.InputStream)1 Random (java.util.Random)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 DFSOutputStream (org.apache.hadoop.hdfs.DFSOutputStream)1 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)1 AddErasureCodingPolicyResponse (org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 DatanodeInfoWithStorage (org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage)1