Search in sources :

Example 16 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class SmartDFSClient method getBlockSize.

@Override
public long getBlockSize(String f) throws IOException {
    long blockSize = super.getBlockSize(f);
    FileState fileState = getFileState(f);
    if (fileState instanceof CompactFileState) {
        blockSize = super.getBlockSize(((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath());
    }
    return blockSize;
}
Also used : NormalFileState(org.smartdata.model.NormalFileState) CompactFileState(org.smartdata.model.CompactFileState) FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) CompactFileState(org.smartdata.model.CompactFileState)

Example 17 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class CheckCompressAction method execute.

@Override
protected void execute() throws Exception {
    if (srcPath == null) {
        throw new IOException("File path is not given!");
    }
    // Consider directory case.
    if (dfsClient.getFileInfo(srcPath).isDir()) {
        appendLog("The given path is a directory, " + "not applicable to checking compression status.");
        return;
    }
    FileState fileState = HadoopUtil.getFileState(dfsClient, srcPath);
    if (fileState instanceof CompressionFileState) {
        appendLog("The given file has already been compressed by SSM.");
        appendLog("The compression codec is " + ((CompressionFileState) fileState).getCompressionImpl());
        appendLog("The original file length is " + ((CompressionFileState) fileState).getOriginalLength());
        appendLog("The current file length is " + ((CompressionFileState) fileState).getCompressedLength());
        return;
    }
    appendLog("The given file is not compressed.");
}
Also used : FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) CompressionFileState(org.smartdata.model.CompressionFileState) IOException(java.io.IOException)

Example 18 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class DecompressionAction method execute.

protected void execute() throws Exception {
    if (filePath == null) {
        throw new IllegalArgumentException("File path is missing.");
    }
    if (compressTmpPath == null) {
        throw new IllegalArgumentException("Compression tmp path is not specified!");
    }
    if (!dfsClient.exists(filePath)) {
        throw new ActionException("Failed to execute Compression Action: the given file doesn't exist!");
    }
    // Consider directory case.
    if (dfsClient.getFileInfo(filePath).isDir()) {
        appendLog("Decompression is not applicable to a directory.");
        return;
    }
    FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
    if (!(fileState instanceof CompressionFileState)) {
        appendLog("The file is already decompressed!");
        return;
    }
    OutputStream out = null;
    InputStream in = null;
    try {
        // No need to lock the file by append operation,
        // since compressed file cannot be modified.
        out = dfsClient.create(compressTmpPath, true);
        // Keep storage policy consistent.
        // The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
        // String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
        byte storagePolicyId = dfsClient.getFileInfo(filePath).getStoragePolicy();
        String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
        if (!storagePolicyName.equals("UNDEF")) {
            dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
        }
        in = dfsClient.open(filePath);
        long length = dfsClient.getFileInfo(filePath).getLen();
        outputDecompressedData(in, out, length);
        // Overwrite the original file with decompressed data
        dfsClient.setOwner(compressTmpPath, dfsClient.getFileInfo(filePath).getOwner(), dfsClient.getFileInfo(filePath).getGroup());
        dfsClient.setPermission(compressTmpPath, dfsClient.getFileInfo(filePath).getPermission());
        dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
        appendLog("The given file is successfully decompressed by codec: " + ((CompressionFileState) fileState).getCompressionImpl());
    } catch (IOException e) {
        throw new IOException(e);
    } finally {
        if (out != null) {
            out.close();
        }
        if (in != null) {
            in.close();
        }
    }
}
Also used : FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) ActionException(org.smartdata.action.ActionException) CompressionFileState(org.smartdata.model.CompressionFileState) IOException(java.io.IOException)

Example 19 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class SmartDFSClient method getLocatedBlocks.

@Override
public LocatedBlocks getLocatedBlocks(String src, long start) throws IOException {
    LocatedBlocks locatedBlocks = super.getLocatedBlocks(src, start);
    if (!CALLER_CLASS.equals(Thread.currentThread().getStackTrace()[2].getClassName()) && locatedBlocks.getFileLength() == 0) {
        FileState fileState = getFileState(src);
        if (fileState instanceof CompactFileState) {
            String containerFile = ((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath();
            long offset = ((CompactFileState) fileState).getFileContainerInfo().getOffset();
            return super.getLocatedBlocks(containerFile, offset + start);
        }
    }
    return locatedBlocks;
}
Also used : NormalFileState(org.smartdata.model.NormalFileState) CompactFileState(org.smartdata.model.CompactFileState) FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) CompactFileState(org.smartdata.model.CompactFileState)

Example 20 with FileState

use of org.smartdata.model.FileState in project SSM by Intel-bigdata.

the class SmartDFSClient method open.

@Override
public DFSInputStream open(HdfsPathHandle fd, int buffersize, boolean verifyChecksum) throws IOException {
    String src = fd.getPath();
    DFSInputStream is = super.open(fd, buffersize, verifyChecksum);
    if (is.getFileLength() == 0) {
        is.close();
        FileState fileState = getFileState(src);
        if (fileState.getFileStage().equals(FileState.FileStage.PROCESSING)) {
            throw new IOException("Cannot open " + src + " when it is under PROCESSING to " + fileState.getFileType());
        }
        is = SmartInputStreamFactory.create(this, src, verifyChecksum, fileState);
    }
    reportFileAccessEvent(src);
    return is;
}
Also used : NormalFileState(org.smartdata.model.NormalFileState) CompactFileState(org.smartdata.model.CompactFileState) FileState(org.smartdata.model.FileState) CompressionFileState(org.smartdata.model.CompressionFileState) DFSInputStream(org.apache.hadoop.hdfs.DFSInputStream) IOException(java.io.IOException)

Aggregations

FileState (org.smartdata.model.FileState)34 CompressionFileState (org.smartdata.model.CompressionFileState)26 CompactFileState (org.smartdata.model.CompactFileState)19 NormalFileState (org.smartdata.model.NormalFileState)15 IOException (java.io.IOException)9 Test (org.junit.Test)7 ArrayList (java.util.ArrayList)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 Gson (com.google.gson.Gson)3 HashMap (java.util.HashMap)3 Path (org.apache.hadoop.fs.Path)3 DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)3 EmptyResultDataAccessException (org.springframework.dao.EmptyResultDataAccessException)3 SQLException (java.sql.SQLException)2 BlockLocation (org.apache.hadoop.fs.BlockLocation)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)2 HdfsLocatedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus)2 HdfsNamedFileStatus (org.apache.hadoop.hdfs.protocol.HdfsNamedFileStatus)2 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)2