use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class SmartDFSClient method getBlockSize.
@Override
public long getBlockSize(String f) throws IOException {
long blockSize = super.getBlockSize(f);
FileState fileState = getFileState(f);
if (fileState instanceof CompactFileState) {
blockSize = super.getBlockSize(((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath());
}
return blockSize;
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class CheckCompressAction method execute.
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IOException("File path is not given!");
}
// Consider directory case.
if (dfsClient.getFileInfo(srcPath).isDir()) {
appendLog("The given path is a directory, " + "not applicable to checking compression status.");
return;
}
FileState fileState = HadoopUtil.getFileState(dfsClient, srcPath);
if (fileState instanceof CompressionFileState) {
appendLog("The given file has already been compressed by SSM.");
appendLog("The compression codec is " + ((CompressionFileState) fileState).getCompressionImpl());
appendLog("The original file length is " + ((CompressionFileState) fileState).getOriginalLength());
appendLog("The current file length is " + ((CompressionFileState) fileState).getCompressedLength());
return;
}
appendLog("The given file is not compressed.");
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class DecompressionAction method execute.
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File path is missing.");
}
if (compressTmpPath == null) {
throw new IllegalArgumentException("Compression tmp path is not specified!");
}
if (!dfsClient.exists(filePath)) {
throw new ActionException("Failed to execute Compression Action: the given file doesn't exist!");
}
// Consider directory case.
if (dfsClient.getFileInfo(filePath).isDir()) {
appendLog("Decompression is not applicable to a directory.");
return;
}
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
if (!(fileState instanceof CompressionFileState)) {
appendLog("The file is already decompressed!");
return;
}
OutputStream out = null;
InputStream in = null;
try {
// No need to lock the file by append operation,
// since compressed file cannot be modified.
out = dfsClient.create(compressTmpPath, true);
// Keep storage policy consistent.
// The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
// String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
byte storagePolicyId = dfsClient.getFileInfo(filePath).getStoragePolicy();
String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
}
in = dfsClient.open(filePath);
long length = dfsClient.getFileInfo(filePath).getLen();
outputDecompressedData(in, out, length);
// Overwrite the original file with decompressed data
dfsClient.setOwner(compressTmpPath, dfsClient.getFileInfo(filePath).getOwner(), dfsClient.getFileInfo(filePath).getGroup());
dfsClient.setPermission(compressTmpPath, dfsClient.getFileInfo(filePath).getPermission());
dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
appendLog("The given file is successfully decompressed by codec: " + ((CompressionFileState) fileState).getCompressionImpl());
} catch (IOException e) {
throw new IOException(e);
} finally {
if (out != null) {
out.close();
}
if (in != null) {
in.close();
}
}
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class SmartDFSClient method getLocatedBlocks.
@Override
public LocatedBlocks getLocatedBlocks(String src, long start) throws IOException {
LocatedBlocks locatedBlocks = super.getLocatedBlocks(src, start);
if (!CALLER_CLASS.equals(Thread.currentThread().getStackTrace()[2].getClassName()) && locatedBlocks.getFileLength() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath();
long offset = ((CompactFileState) fileState).getFileContainerInfo().getOffset();
return super.getLocatedBlocks(containerFile, offset + start);
}
}
return locatedBlocks;
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class SmartDFSClient method open.
@Override
public DFSInputStream open(HdfsPathHandle fd, int buffersize, boolean verifyChecksum) throws IOException {
String src = fd.getPath();
DFSInputStream is = super.open(fd, buffersize, verifyChecksum);
if (is.getFileLength() == 0) {
is.close();
FileState fileState = getFileState(src);
if (fileState.getFileStage().equals(FileState.FileStage.PROCESSING)) {
throw new IOException("Cannot open " + src + " when it is under PROCESSING to " + fileState.getFileType());
}
is = SmartInputStreamFactory.create(this, src, verifyChecksum, fileState);
}
reportFileAccessEvent(src);
return is;
}
Aggregations