use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class SmallFileScheduler method checkIfValidSmallFiles.
/**
* Check if the small file list is valid.
*/
private boolean checkIfValidSmallFiles(List<String> smallFileList) {
for (String smallFile : smallFileList) {
if (smallFile == null || smallFile.isEmpty()) {
LOG.error("Illegal small file path: {}", smallFile);
return false;
} else if (compactSmallFileLock.contains(smallFile)) {
LOG.error(String.format("%s is locked.", smallFile));
return false;
} else if (handlingSmallFileCache.contains(smallFile)) {
LOG.error(String.format("%s is being handling.", smallFile));
return false;
} else if (containerFileCache.contains(smallFile) || containerFileLock.contains(smallFile)) {
LOG.error(String.format("%s is container file.", smallFile));
return false;
}
}
// Get small file info list and file state map from meta store.
List<FileInfo> fileInfos;
Map<String, FileState> fileStateMap;
try {
fileInfos = metaStore.getFilesByPaths(smallFileList);
fileStateMap = metaStore.getFileStates(smallFileList);
} catch (MetaStoreException e) {
LOG.error("Failed to get file states of small files.", e);
return false;
}
// Get small file info map
Map<String, FileInfo> fileInfoMap = new HashMap<>();
for (FileInfo fileInfo : fileInfos) {
fileInfoMap.put(fileInfo.getPath(), fileInfo);
}
// Check if the permission of small file is same,
// and all the small files exist
FileInfo firstFileInfo = null;
for (String smallFile : smallFileList) {
FileInfo fileInfo = fileInfoMap.get(smallFile);
if (fileInfo != null) {
if (firstFileInfo == null) {
firstFileInfo = fileInfo;
} else {
if (!(new SmartFilePermission(firstFileInfo)).equals(new SmartFilePermission(fileInfo))) {
LOG.debug(String.format("%s has different file permission with %s.", firstFileInfo.getPath(), fileInfo.getPath()));
return false;
}
}
} else {
LOG.debug("{} is not exist!!!", smallFile);
return false;
}
}
// Check if the state of small file is NORMAL
for (Map.Entry<String, FileState> entry : fileStateMap.entrySet()) {
String smallFile = entry.getKey();
FileState.FileType smallFileType = entry.getValue().getFileType();
if (smallFileType != FileState.FileType.NORMAL) {
LOG.debug(String.format("%s has invalid file state %s for small file compact.", smallFile, smallFileType.toString()));
return false;
}
}
return true;
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class SmallFileScheduler method isSuccessfulBySpeculation.
/**
* Speculate action status and set result accordingly.
*/
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
try {
boolean isSuccessful = true;
List<FileState> fileStateList = new ArrayList<>();
// If any one small file is not compacted, return false.
for (String path : getSmallFileList(actionInfo)) {
FileState fileState = HadoopUtil.getFileState(dfsClient, path);
FileState.FileType fileType = fileState.getFileType();
if (!isExpectedFileState(fileType, actionInfo.getActionName())) {
isSuccessful = false;
break;
}
// Only add compact file state.
if (actionInfo.getActionName().equals(COMPACT_ACTION_NAME)) {
fileStateList.add(fileState);
}
}
if (!isSuccessful) {
return false;
}
if (actionInfo.getActionName().equals(UNCOMPACT_ACTION_NAME)) {
return true;
}
// Recover action result for successful compact action.
if (actionInfo.getActionName().equals(COMPACT_ACTION_NAME)) {
List<CompactFileState> compactFileStates = new ArrayList<>();
assert fileStateList.size() == getSmallFileList(actionInfo).size();
for (FileState fileState : fileStateList) {
compactFileStates.add((CompactFileState) fileState);
}
actionInfo.setResult(new Gson().toJson(compactFileStates));
}
return true;
} catch (IOException e) {
LOG.warn("Failed to get file state, suppose this action was not " + "successfully executed: {}", actionInfo.toString());
return false;
}
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class CompressionScheduler method isSuccessfulBySpeculation.
/**
* Speculate action status and set result accordingly.
*/
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
FileState fileState = HadoopUtil.getFileState(dfsClient, path);
FileState.FileType fileType = fileState.getFileType();
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
return fileType == FileState.FileType.NORMAL;
}
// Recover action result for successful compress action.
if (fileType == FileState.FileType.COMPRESSION) {
CompressionFileInfo compressionFileInfo = new CompressionFileInfo((CompressionFileState) fileState);
actionInfo.setResult(new Gson().toJson(compressionFileInfo));
return true;
}
return false;
} catch (IOException e) {
LOG.warn("Failed to get file state, suppose this action was not " + "successfully executed: {}", actionInfo.toString());
return false;
}
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class ProtoBufferHelper method convert.
public static FileState convert(FileStateProto proto) {
FileState fileState = null;
String path = proto.getPath();
FileState.FileType type = FileState.FileType.fromValue(proto.getType());
FileState.FileStage stage = FileState.FileStage.fromValue(proto.getStage());
// FileState.FileStage stage = FileState.FileStage.fromValue(proto.getStage());
if (type == null) {
return new NormalFileState(path);
}
switch(type) {
case NORMAL:
fileState = new NormalFileState(path);
break;
case COMPACT:
CompactFileStateProto compactProto = proto.getCompactFileState();
fileState = new CompactFileState(path, convert(compactProto));
break;
case COMPRESSION:
CompressionFileStateProto compressionProto = proto.getCompressionFileState();
// convert to CompressionFileState
fileState = convert(path, stage, compressionProto);
break;
case S3:
S3FileStateProto s3Proto = proto.getS3FileState();
// fileState = convert(path, type, stage, s3Proto);
break;
default:
}
return fileState;
}
use of org.smartdata.model.FileState in project SSM by Intel-bigdata.
the class ServerProtocolsServerSideTranslator method getFileState.
@Override
public GetFileStateResponseProto getFileState(RpcController controller, GetFileStateRequestProto req) throws ServiceException {
try {
String path = req.getFilePath();
FileState fileState = server.getFileState(path);
return GetFileStateResponseProto.newBuilder().setFileState(ProtoBufferHelper.convert(fileState)).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
Aggregations