use of org.smartdata.SmartFilePermission in project SSM by Intel-bigdata.
the class SmallFileCompactAction method execute.
@Override
protected void execute() throws Exception {
// Set hdfs client by DFSClient rather than SmartDFSClient
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
// Get small file list
if (smallFiles == null || smallFiles.isEmpty()) {
throw new IllegalArgumentException(String.format("Invalid small files: %s.", smallFiles));
}
ArrayList<String> smallFileList = new Gson().fromJson(smallFiles, new TypeToken<ArrayList<String>>() {
}.getType());
if (smallFileList == null || smallFileList.isEmpty()) {
throw new IllegalArgumentException(String.format("Invalid small files: %s.", smallFiles));
}
// Get container file path
if (containerFile == null || containerFile.isEmpty()) {
throw new IllegalArgumentException(String.format("Invalid container file: %s.", containerFile));
}
// Get container file permission
SmartFilePermission filePermission = null;
if (containerFilePermission != null && !containerFilePermission.isEmpty()) {
filePermission = new Gson().fromJson(containerFilePermission, new TypeToken<SmartFilePermission>() {
}.getType());
}
appendLog(String.format("Action starts at %s : compact small files to %s.", Utils.getFormatedCurrentTime(), containerFile));
// Get initial offset and output stream
// Create container file and set permission if not exists
long offset;
OutputStream out;
boolean isContainerFileExist = dfsClient.exists(containerFile);
if (isContainerFileExist) {
offset = dfsClient.getFileInfo(containerFile).getLen();
out = CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, containerFile, 64 * 1024, offset);
} else {
out = dfsClient.create(containerFile, true);
if (filePermission != null) {
dfsClient.setOwner(containerFile, filePermission.getOwner(), filePermission.getGroup());
dfsClient.setPermission(containerFile, new FsPermission(filePermission.getPermission()));
}
offset = 0L;
}
List<CompactFileState> compactFileStates = new ArrayList<>();
for (String smallFile : smallFileList) {
if ((smallFile != null) && !smallFile.isEmpty() && dfsClient.exists(smallFile)) {
HdfsDataOutputStream append = (HdfsDataOutputStream) CompatibilityHelperLoader.getHelper().getDFSClientAppend(dfsClient, smallFile, 1024);
long fileLen = dfsClient.getFileInfo(smallFile).getLen();
if (fileLen > 0) {
try (InputStream in = dfsClient.open(smallFile)) {
// Copy bytes of small file to container file
IOUtils.copyBytes(in, out, 4096);
// Truncate small file, add file container info to XAttr
CompactFileState compactFileState = new CompactFileState(smallFile, new FileContainerInfo(containerFile, offset, fileLen));
append.close();
truncateAndSetXAttr(smallFile, compactFileState);
// Update compact file state map, offset, status, and log
compactFileStates.add(compactFileState);
offset += fileLen;
this.status = (smallFileList.indexOf(smallFile) + 1.0f) / smallFileList.size();
appendLog(String.format("Compact %s to %s successfully.", smallFile, containerFile));
} catch (IOException e) {
// Close append, output streams and put compact file state map into action result
if (append != null) {
append.close();
}
if (out != null) {
out.close();
appendResult(new Gson().toJson(compactFileStates));
}
if (!isContainerFileExist && compactFileStates.isEmpty()) {
dfsClient.delete(containerFile, false);
}
throw e;
}
}
}
}
appendResult(new Gson().toJson(compactFileStates));
if (out != null) {
out.close();
}
if (!isContainerFileExist && compactFileStates.isEmpty()) {
dfsClient.delete(containerFile, false);
}
appendLog(String.format("Compact all the small files to %s successfully.", containerFile));
}
Aggregations