use of org.smartdata.model.FileContainerInfo in project SSM by Intel-bigdata.
the class ProtoBufferHelper method convert.
public static FileStateProto convert(FileState fileState) {
FileStateProto.Builder builder = FileStateProto.newBuilder();
builder.setPath(fileState.getPath()).setType(fileState.getFileType().getValue()).setStage(fileState.getFileStage().getValue());
if (fileState instanceof CompactFileState) {
FileContainerInfo fileContainerInfo = ((CompactFileState) fileState).getFileContainerInfo();
builder.setCompactFileState(CompactFileStateProto.newBuilder().setContainerFilePath(fileContainerInfo.getContainerFilePath()).setOffset(fileContainerInfo.getOffset()).setLength(fileContainerInfo.getLength()));
} else if (fileState instanceof CompressionFileState) {
builder.setCompressionFileState(convert((CompressionFileState) fileState));
}
/*else if (fileState instanceof S3FileState) {
builder.setS3FileState();
} else if (fileState instanceof ) {
}
*/
return builder.build();
}
use of org.smartdata.model.FileContainerInfo in project SSM by Intel-bigdata.
the class SmartFileSystem method getFileBlockLocations.
@Override
public BlockLocation[] getFileBlockLocations(Path p, final long start, final long len) throws IOException {
BlockLocation[] blockLocations = super.getFileBlockLocations(p, start, len);
if (blockLocations.length == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(p));
if (fileState instanceof CompactFileState) {
FileContainerInfo fileContainerInfo = ((CompactFileState) fileState).getFileContainerInfo();
String containerFile = fileContainerInfo.getContainerFilePath();
long offset = fileContainerInfo.getOffset();
blockLocations = super.getFileBlockLocations(new Path(containerFile), offset + start, len);
for (BlockLocation blockLocation : blockLocations) {
blockLocation.setOffset(blockLocation.getOffset() - offset);
}
}
}
return blockLocations;
}
use of org.smartdata.model.FileContainerInfo in project SSM by Intel-bigdata.
the class SmallFileCompactAction method execute.
@Override
protected void execute() throws Exception {
// Set hdfs client by DFSClient rather than SmartDFSClient
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
// Get small file list
if (smallFiles == null || smallFiles.isEmpty()) {
throw new IllegalArgumentException(String.format("Invalid small files: %s.", smallFiles));
}
ArrayList<String> smallFileList = new Gson().fromJson(smallFiles, new TypeToken<ArrayList<String>>() {
}.getType());
if (smallFileList == null || smallFileList.isEmpty()) {
throw new IllegalArgumentException(String.format("Invalid small files: %s.", smallFiles));
}
// Get container file path
if (containerFile == null || containerFile.isEmpty()) {
throw new IllegalArgumentException(String.format("Invalid container file: %s.", containerFile));
}
// Get container file permission
SmartFilePermission filePermission = null;
if (containerFilePermission != null && !containerFilePermission.isEmpty()) {
filePermission = new Gson().fromJson(containerFilePermission, new TypeToken<SmartFilePermission>() {
}.getType());
}
appendLog(String.format("Action starts at %s : compact small files to %s.", Utils.getFormatedCurrentTime(), containerFile));
// Get initial offset and output stream
// Create container file and set permission if not exists
long offset;
OutputStream out;
boolean isContainerFileExist = dfsClient.exists(containerFile);
if (isContainerFileExist) {
offset = dfsClient.getFileInfo(containerFile).getLen();
out = CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, containerFile, 64 * 1024, offset);
} else {
out = dfsClient.create(containerFile, true);
if (filePermission != null) {
dfsClient.setOwner(containerFile, filePermission.getOwner(), filePermission.getGroup());
dfsClient.setPermission(containerFile, new FsPermission(filePermission.getPermission()));
}
offset = 0L;
}
List<CompactFileState> compactFileStates = new ArrayList<>();
for (String smallFile : smallFileList) {
if ((smallFile != null) && !smallFile.isEmpty() && dfsClient.exists(smallFile)) {
HdfsDataOutputStream append = (HdfsDataOutputStream) CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, smallFile, 1024);
long fileLen = dfsClient.getFileInfo(smallFile).getLen();
if (fileLen > 0) {
try (InputStream in = dfsClient.open(smallFile)) {
// Copy bytes of small file to container file
IOUtils.copyBytes(in, out, 4096);
// Truncate small file, add file container info to XAttr
CompactFileState compactFileState = new CompactFileState(smallFile, new FileContainerInfo(containerFile, offset, fileLen));
append.close();
truncateAndSetXAttr(smallFile, compactFileState);
// Update compact file state map, offset, status, and log
compactFileStates.add(compactFileState);
offset += fileLen;
this.status = (smallFileList.indexOf(smallFile) + 1.0f) / smallFileList.size();
appendLog(String.format("Compact %s to %s successfully.", smallFile, containerFile));
} catch (IOException e) {
// Close append, output streams and put compact file state map into action result
if (append != null) {
append.close();
}
if (out != null) {
out.close();
appendResult(new Gson().toJson(compactFileStates));
}
if (!isContainerFileExist && compactFileStates.isEmpty()) {
dfsClient.delete(containerFile, false);
}
throw e;
}
}
}
}
appendResult(new Gson().toJson(compactFileStates));
if (out != null) {
out.close();
}
if (!isContainerFileExist && compactFileStates.isEmpty()) {
dfsClient.delete(containerFile, false);
}
appendLog(String.format("Compact all the small files to %s successfully.", containerFile));
}
Aggregations