use of com.baidu.hugegraph.entity.load.FileUploadResult in project incubator-hugegraph-toolchain by apache.
the class FileUploadController method upload.
@PostMapping
public FileUploadResult upload(@PathVariable("connId") int connId, @PathVariable("jobId") int jobId, @RequestParam("file") MultipartFile file, @RequestParam("name") String fileName, @RequestParam("token") String token, @RequestParam("total") int total, @RequestParam("index") int index) {
this.checkTotalAndIndexValid(total, index);
this.checkFileNameMatchToken(fileName, token);
JobManager jobEntity = this.jobService.get(jobId);
this.checkFileValid(connId, jobId, jobEntity, file, fileName);
if (jobEntity.getJobStatus() == JobStatus.DEFAULT) {
jobEntity.setJobStatus(JobStatus.UPLOADING);
this.jobService.update(jobEntity);
}
// Ensure location exist and generate file path
String filePath = this.generateFilePath(connId, jobId, fileName);
// Check this file deleted before
ReadWriteLock lock = this.uploadingTokenLocks().get(token);
FileUploadResult result;
if (lock == null) {
result = new FileUploadResult();
result.setName(file.getOriginalFilename());
result.setSize(file.getSize());
result.setStatus(FileUploadResult.Status.FAILURE);
result.setCause("File has been deleted");
return result;
}
lock.readLock().lock();
try {
result = this.service.uploadFile(file, index, filePath);
if (result.getStatus() == FileUploadResult.Status.FAILURE) {
return result;
}
synchronized (this.service) {
// Verify the existence of fragmented files
FileMapping mapping = this.service.get(connId, jobId, fileName);
if (mapping == null) {
mapping = new FileMapping(connId, fileName, filePath);
mapping.setJobId(jobId);
mapping.setFileStatus(FileMappingStatus.UPLOADING);
this.service.save(mapping);
} else {
if (mapping.getFileStatus() == FileMappingStatus.COMPLETED) {
result.setId(mapping.getId());
// Remove uploading file token
this.uploadingTokenLocks().remove(token);
return result;
} else {
mapping.setUpdateTime(HubbleUtil.nowDate());
}
}
// Determine whether all the parts have been uploaded, then merge them
boolean merged = this.service.tryMergePartFiles(filePath, total);
if (!merged) {
this.service.update(mapping);
return result;
}
// Read column names and values then fill it
this.service.extractColumns(mapping);
mapping.setFileStatus(FileMappingStatus.COMPLETED);
mapping.setTotalLines(FileUtil.countLines(mapping.getPath()));
mapping.setTotalSize(FileUtils.sizeOf(new File(mapping.getPath())));
// Move to the directory corresponding to the file mapping Id
String newPath = this.service.moveToNextLevelDir(mapping);
// Update file mapping stored path
mapping.setPath(newPath);
this.service.update(mapping);
// Update Job Manager size
long jobSize = jobEntity.getJobSize() + mapping.getTotalSize();
jobEntity.setJobSize(jobSize);
this.jobService.update(jobEntity);
result.setId(mapping.getId());
// Remove uploading file token
this.uploadingTokenLocks().remove(token);
}
return result;
} finally {
lock.readLock().unlock();
}
}
use of com.baidu.hugegraph.entity.load.FileUploadResult in project incubator-hugegraph-toolchain by apache.
the class FileMappingService method uploadFile.
public FileUploadResult uploadFile(MultipartFile srcFile, int index, String dirPath) {
FileUploadResult result = new FileUploadResult();
// Current part saved path
String partName = srcFile.getOriginalFilename();
result.setName(partName);
result.setSize(srcFile.getSize());
File destFile = new File(dirPath, partName + "-" + index);
// File all parts saved path
File dir = new File(dirPath);
if (!dir.exists()) {
dir.mkdirs();
}
if (destFile.exists()) {
destFile.delete();
}
log.debug("Uploading file {} length {}", partName, srcFile.getSize());
try {
// transferTo should accept absolute path
srcFile.transferTo(destFile.getAbsoluteFile());
result.setStatus(FileUploadResult.Status.SUCCESS);
log.debug("Uploaded file part {}-{}", partName, index);
} catch (Exception e) {
log.error("Failed to save upload file and insert " + "file mapping record", e);
result.setStatus(FileUploadResult.Status.FAILURE);
result.setCause(e.getMessage());
}
return result;
}
Aggregations