Search in sources :

Example 1 with File

use of com.albedo.java.modules.file.domain.File in project albedo by somowhere.

the class AliFileChunkStrategyImpl method merge.

@Override
protected Result<File> merge(List<java.io.File> files, String path, String fileName, FileChunksMergeDto info) throws IOException {
    FileServerProperties.Ali ali = fileProperties.getAli();
    String bucketName = ali.getBucket();
    OSS ossClient = new OSSClientBuilder().build(ali.getEndpoint(), ali.getAccessKeyId(), ali.getAccessKeySecret());
    // 日期文件夹
    String relativePath = LocalDate.now().format(DateTimeFormatter.ofPattern("yyyy/MM"));
    // web服务器存放的绝对路径
    String relativeFileName = relativePath + StrPool.SLASH + fileName;
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentDisposition("attachment;fileName=" + info.getSubmittedFileName());
    metadata.setContentType(info.getContextType());
    // 步骤1:初始化一个分片上传事件。
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, relativeFileName, metadata);
    InitiateMultipartUploadResult result = ossClient.initiateMultipartUpload(request);
    // 返回uploadId,它是分片上传事件的唯一标识,您可以根据这个ID来发起相关的操作,如取消分片上传、查询分片上传等。
    String uploadId = result.getUploadId();
    // partTags是PartETag的集合。PartETag由分片的ETag和分片号组成。
    List<PartETag> partTags = new ArrayList<>();
    for (int i = 0; i < files.size(); i++) {
        java.io.File file = files.get(i);
        FileInputStream in = FileUtils.openInputStream(file);
        UploadPartRequest uploadPartRequest = new UploadPartRequest();
        uploadPartRequest.setBucketName(bucketName);
        uploadPartRequest.setKey(relativeFileName);
        uploadPartRequest.setUploadId(uploadId);
        uploadPartRequest.setInputStream(in);
        // 设置分片大小。除了最后一个分片没有大小限制,其他的分片最小为100KB。
        uploadPartRequest.setPartSize(file.length());
        // 设置分片号。每一个上传的分片都有一个分片号,取值范围是1~10000,如果超出这个范围,OSS将返回InvalidArgument的错误码。
        uploadPartRequest.setPartNumber(i + 1);
        // 每个分片不需要按顺序上传,甚至可以在不同客户端上传,OSS会按照分片号排序组成完整的文件。
        UploadPartResult uploadPartResult = ossClient.uploadPart(uploadPartRequest);
        // 每次上传分片之后,OSS的返回结果会包含一个PartETag。PartETag将被保存到partETags中。
        partTags.add(uploadPartResult.getPartETag());
    }
    /* 步骤3:完成分片上传。 */
    // 排序。partETags必须按分片号升序排列。
    partTags.sort(Comparator.comparingInt(PartETag::getPartNumber));
    // 在执行该操作时,需要提供所有有效的partETags。OSS收到提交的partETags后,会逐一验证每个分片的有效性。当所有的数据分片验证通过后,OSS将把这些分片组合成一个完整的文件。
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(bucketName, relativeFileName, uploadId, partTags);
    CompleteMultipartUploadResult uploadResult = ossClient.completeMultipartUpload(completeMultipartUploadRequest);
    String url = ali.getUrlPrefix() + relativePath + StrPool.SLASH + fileName;
    File filePo = File.builder().bucket(uploadResult.getBucketName()).path(path).url(StrUtil.replace(url, "\\\\", StrPool.SLASH)).build();
    // 关闭OSSClient。
    ossClient.shutdown();
    return Result.buildOkData(filePo);
}
Also used : ArrayList(java.util.ArrayList) OSS(com.aliyun.oss.OSS) FileInputStream(java.io.FileInputStream) FileServerProperties(com.albedo.java.modules.file.properties.FileServerProperties) File(com.albedo.java.modules.file.domain.File) OSSClientBuilder(com.aliyun.oss.OSSClientBuilder)

Example 2 with File

use of com.albedo.java.modules.file.domain.File in project albedo by somowhere.

the class FastDfsFileChunkStrategyImpl method merge.

@Override
protected Result<File> merge(List<java.io.File> files, String path, String fileName, FileChunksMergeDto info) throws IOException {
    StorePath storePath = null;
    long start = System.currentTimeMillis();
    for (int i = 0; i < files.size(); i++) {
        java.io.File file = files.get(i);
        FileInputStream in = FileUtils.openInputStream(file);
        if (i == 0) {
            storePath = storageClient.uploadAppenderFile(null, in, file.length(), info.getExt());
        } else {
            storageClient.appendFile(storePath.getGroup(), storePath.getPath(), in, file.length());
        }
    }
    if (storePath == null) {
        return Result.buildFail("上传失败");
    }
    long end = System.currentTimeMillis();
    log.info("上传耗时={}", (end - start));
    String url = fileProperties.getFastDfs().getUrlPrefix() + storePath.getFullPath();
    File filePo = File.builder().url(url).bucket(storePath.getGroup()).path(storePath.getPath()).build();
    return Result.buildOkData(filePo);
}
Also used : File(com.albedo.java.modules.file.domain.File) FileInputStream(java.io.FileInputStream) StorePath(com.github.tobato.fastdfs.domain.fdfs.StorePath)

Example 3 with File

use of com.albedo.java.modules.file.domain.File in project albedo by somowhere.

the class LocalFileChunkStrategyImpl method merge.

@Override
protected Result<File> merge(List<java.io.File> files, String path, String fileName, FileChunksMergeDto info) throws IOException {
    // 创建合并后的文件
    log.info("path={},fileName={}", path, fileName);
    java.io.File outputFile = new java.io.File(Paths.get(path, fileName).toString());
    if (!outputFile.exists()) {
        boolean newFile = outputFile.createNewFile();
        if (!newFile) {
            return Result.buildFail("创建文件失败");
        }
        try (FileChannel outChannel = new FileOutputStream(outputFile).getChannel()) {
            // 同步nio 方式对分片进行合并, 有效的避免文件过大导致内存溢出
            for (java.io.File file : files) {
                try (FileChannel inChannel = new FileInputStream(file).getChannel()) {
                    inChannel.transferTo(0, inChannel.size(), outChannel);
                } catch (FileNotFoundException ex) {
                    log.error("文件转换失败", ex);
                    return Result.buildFail("文件转换失败");
                }
                // 删除分片
                if (!file.delete()) {
                    log.error("分片[" + info.getName() + "=>" + file.getName() + "]删除失败");
                }
            }
        } catch (FileNotFoundException e) {
            log.error("文件输出失败", e);
            return Result.buildFail("文件输出失败");
        }
    } else {
        log.warn("文件[{}], fileName={}已经存在", info.getName(), fileName);
    }
    String relativePath = FileTypeUtil.getRelativePath(Paths.get(fileProperties.getLocal().getStoragePath()).toString(), outputFile.getAbsolutePath());
    log.info("relativePath={}, getStoragePath={}, getAbsolutePath={}", relativePath, fileProperties.getLocal().getStoragePath(), outputFile.getAbsolutePath());
    String url = fileProperties.getLocal().getUrlPrefix() + relativePath + StrPool.SLASH + fileName;
    File filePo = File.builder().url(StrUtil.replace(url, "\\\\", StrPool.SLASH)).build();
    return Result.buildOkData(filePo);
}
Also used : FileChannel(java.nio.channels.FileChannel) FileOutputStream(java.io.FileOutputStream) FileNotFoundException(java.io.FileNotFoundException) File(com.albedo.java.modules.file.domain.File) FileInputStream(java.io.FileInputStream)

Example 4 with File

use of com.albedo.java.modules.file.domain.File in project albedo by somowhere.

the class FileContext method findUrlById.

public Map<Long, String> findUrlById(List<Long> ids) {
    List<File> pathFiles = fileMapper.selectList(Wraps.<File>lbQ().in(File::getId, ids));
    Map<Long, List<File>> pathMap = pathFiles.stream().collect(Collectors.groupingBy(File::getId, LinkedHashMap::new, toList()));
    Map<Long, String> map = new LinkedHashMap<>(MapHelper.initialCapacity(pathMap.size()));
    pathMap.forEach((id, files) -> {
        if (CollUtil.isEmpty(files)) {
            return;
        }
        File fileFile = files.get(0);
        FileStrategy fileStrategy = getFileStrategy(fileFile.getStorageType());
        map.put(id, fileStrategy.getUrl(FileGetUrlBo.builder().bucket(fileFile.getBucket()).path(fileFile.getPath()).originalFileName(fileFile.getOriginalFileName()).build()));
    });
    return map;
}
Also used : List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) File(com.albedo.java.modules.file.domain.File) MultipartFile(org.springframework.web.multipart.MultipartFile) LinkedHashMap(java.util.LinkedHashMap)

Example 5 with File

use of com.albedo.java.modules.file.domain.File in project albedo by somowhere.

the class FileContext method download.

public void download(HttpServletRequest request, HttpServletResponse response, List<File> list) throws Exception {
    for (File fileFile : list) {
        FileStrategy fileStrategy = getFileStrategy(fileFile.getStorageType());
        String url = fileStrategy.getUrl(FileGetUrlBo.builder().bucket(fileFile.getBucket()).path(fileFile.getPath()).build());
        fileFile.setUrl(url);
    }
    down(request, response, list);
}
Also used : File(com.albedo.java.modules.file.domain.File) MultipartFile(org.springframework.web.multipart.MultipartFile)

Aggregations

File (com.albedo.java.modules.file.domain.File)11 MultipartFile (org.springframework.web.multipart.MultipartFile)5 FileInputStream (java.io.FileInputStream)3 List (java.util.List)3 FileServerProperties (com.albedo.java.modules.file.properties.FileServerProperties)2 ArrayList (java.util.ArrayList)2 LinkedHashMap (java.util.LinkedHashMap)2 Collectors.toList (java.util.stream.Collectors.toList)2 Convert (cn.hutool.core.convert.Convert)1 CommonConstants (com.albedo.java.common.core.constant.CommonConstants)1 BizException (com.albedo.java.common.core.exception.BizException)1 Result (com.albedo.java.common.core.util.Result)1 StrPool (com.albedo.java.common.core.util.StrPool)1 FileChunksMergeDto (com.albedo.java.modules.file.domain.dto.FileChunksMergeDto)1 FileRepository (com.albedo.java.modules.file.repository.FileRepository)1 FileChunkStrategy (com.albedo.java.modules.file.strategy.FileChunkStrategy)1 FileLock (com.albedo.java.modules.file.strategy.FileLock)1 FileTypeUtil (com.albedo.java.modules.file.utils.FileTypeUtil)1 OSS (com.aliyun.oss.OSS)1 OSSClientBuilder (com.aliyun.oss.OSSClientBuilder)1