Search in sources :

Example 1 with ThreadPool

use of ch.cyberduck.core.threading.ThreadPool in project cyberduck by iterate-ch.

the class CryptoChecksumCompute method compute.

protected Checksum compute(final InputStream in, final StreamCancelation cancel, final long offset, final long length, final ByteBuffer header, final NonceGenerator nonces) throws ChecksumException {
    if (log.isDebugEnabled()) {
        log.debug(String.format("Calculate checksum with header %s", header));
    }
    try {
        final PipedOutputStream source = new PipedOutputStream();
        final CryptoOutputStream out = new CryptoOutputStream(source, cryptomator.getFileContentCryptor(), cryptomator.getFileHeaderCryptor().decryptHeader(header), nonces, cryptomator.numberOfChunks(offset));
        final PipedInputStream sink = new PipedInputStream(source, PreferencesFactory.get().getInteger("connection.chunksize"));
        final ThreadPool pool = ThreadPoolFactory.get("checksum", 1);
        try {
            final Future<Void> execute = pool.execute(new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    if (offset == 0) {
                        source.write(header.array());
                    }
                    new StreamCopier(cancel, StreamProgress.noop).transfer(in, out);
                    return null;
                }
            });
            try {
                return delegate.compute(sink, new TransferStatus().withLength(cryptomator.toCiphertextSize(offset, length)));
            } finally {
                try {
                    execute.get();
                } catch (InterruptedException e) {
                    throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e.getMessage(), e);
                } catch (ExecutionException e) {
                    if (e.getCause() instanceof BackgroundException) {
                        throw (BackgroundException) e.getCause();
                    }
                    throw new DefaultExceptionMappingService().map(e.getCause());
                }
            }
        } finally {
            pool.shutdown(true);
        }
    } catch (ChecksumException e) {
        throw e;
    } catch (IOException | BackgroundException e) {
        throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e.getMessage(), e);
    }
}
Also used : ChecksumException(ch.cyberduck.core.exception.ChecksumException) ThreadPool(ch.cyberduck.core.threading.ThreadPool) PipedOutputStream(java.io.PipedOutputStream) PipedInputStream(java.io.PipedInputStream) IOException(java.io.IOException) ChecksumException(ch.cyberduck.core.exception.ChecksumException) IOException(java.io.IOException) BackgroundException(ch.cyberduck.core.exception.BackgroundException) ExecutionException(java.util.concurrent.ExecutionException) TransferStatus(ch.cyberduck.core.transfer.TransferStatus) CryptoOutputStream(ch.cyberduck.core.cryptomator.CryptoOutputStream) DefaultExceptionMappingService(ch.cyberduck.core.worker.DefaultExceptionMappingService) ExecutionException(java.util.concurrent.ExecutionException) StreamCopier(ch.cyberduck.core.io.StreamCopier) BackgroundException(ch.cyberduck.core.exception.BackgroundException)

Example 2 with ThreadPool

use of ch.cyberduck.core.threading.ThreadPool in project cyberduck by iterate-ch.

the class GoogleStorageObjectListService method list.

protected AttributedList<Path> list(final Path directory, final ListProgressListener listener, final String delimiter, final int chunksize) throws BackgroundException {
    final ThreadPool pool = ThreadPoolFactory.get("list", concurrency);
    try {
        final Path bucket = containerService.getContainer(directory);
        final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration(containerService.getContainer(directory)) : VersioningConfiguration.empty();
        final AttributedList<Path> objects = new AttributedList<>();
        final List<Future<Path>> folders = new ArrayList<>();
        Objects response;
        long revision = 0L;
        String lastKey = null;
        String page = null;
        boolean hasDirectoryPlaceholder = containerService.isContainer(directory);
        do {
            response = session.getClient().objects().list(bucket.getName()).setPageToken(page).setVersions(versioning.isEnabled()).setMaxResults((long) chunksize).setDelimiter(delimiter).setPrefix(this.createPrefix(directory)).execute();
            if (response.getItems() != null) {
                for (StorageObject object : response.getItems()) {
                    final String key = PathNormalizer.normalize(object.getName());
                    if (String.valueOf(Path.DELIMITER).equals(key)) {
                        log.warn(String.format("Skipping prefix %s", key));
                        continue;
                    }
                    if (new SimplePathPredicate(new Path(bucket, key, EnumSet.of(Path.Type.directory))).test(directory)) {
                        // Placeholder object, skip
                        hasDirectoryPlaceholder = true;
                        continue;
                    }
                    if (!StringUtils.equals(lastKey, key)) {
                        // Reset revision for next file
                        revision = 0L;
                    }
                    final EnumSet<Path.Type> types = object.getName().endsWith(String.valueOf(Path.DELIMITER)) ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file);
                    final Path file;
                    final PathAttributes attr = attributes.toAttributes(object);
                    attr.setRevision(++revision);
                    // Copy bucket location
                    attr.setRegion(bucket.attributes().getRegion());
                    if (null == delimiter) {
                        // When searching for files recursively
                        file = new Path(String.format("%s%s", bucket.getAbsolute(), key), types, attr);
                    } else {
                        file = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), types, attr);
                    }
                    objects.add(file);
                    lastKey = key;
                }
                if (versioning.isEnabled()) {
                    if (references) {
                        for (Path f : objects) {
                            if (f.attributes().isDuplicate()) {
                                final Path latest = objects.find(new LatestVersionPathPredicate(f));
                                if (latest != null) {
                                    // Reference version
                                    final AttributedList<Path> versions = new AttributedList<>(latest.attributes().getVersions());
                                    versions.add(f);
                                    latest.attributes().setVersions(versions);
                                } else {
                                    log.warn(String.format("No current version found for %s", f));
                                }
                            }
                        }
                    }
                }
            }
            if (response.getPrefixes() != null) {
                for (String prefix : response.getPrefixes()) {
                    if (String.valueOf(Path.DELIMITER).equals(prefix)) {
                        log.warn(String.format("Skipping prefix %s", prefix));
                        continue;
                    }
                    final String key = PathNormalizer.normalize(prefix);
                    if (new SimplePathPredicate(new Path(bucket, key, EnumSet.of(Path.Type.directory))).test(directory)) {
                        continue;
                    }
                    final Path file;
                    final PathAttributes attributes = new PathAttributes();
                    attributes.setRegion(bucket.attributes().getRegion());
                    if (null == delimiter) {
                        // When searching for files recursively
                        file = new Path(String.format("%s%s", bucket.getAbsolute(), key), EnumSet.of(Path.Type.directory, Path.Type.placeholder), attributes);
                    } else {
                        file = new Path(directory, PathNormalizer.name(key), EnumSet.of(Path.Type.directory, Path.Type.placeholder), attributes);
                    }
                    if (versioning.isEnabled()) {
                        folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(prefix)));
                    } else {
                        folders.add(ConcurrentUtils.constantFuture(file));
                    }
                }
            }
            page = response.getNextPageToken();
            listener.chunk(directory, objects);
        } while (page != null);
        for (Future<Path> future : folders) {
            try {
                objects.add(future.get());
            } catch (InterruptedException e) {
                log.error("Listing versioned objects failed with interrupt failure");
                throw new ConnectionCanceledException(e);
            } catch (ExecutionException e) {
                log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage()));
                if (e.getCause() instanceof BackgroundException) {
                    throw (BackgroundException) e.getCause();
                }
                throw new BackgroundException(e.getCause());
            }
        }
        listener.chunk(directory, objects);
        if (!hasDirectoryPlaceholder && objects.isEmpty()) {
            throw new NotfoundException(directory.getAbsolute());
        }
        return objects;
    } catch (IOException e) {
        throw new GoogleStorageExceptionMappingService().map("Listing directory {0} failed", e, directory);
    }
}
Also used : ThreadPool(ch.cyberduck.core.threading.ThreadPool) ArrayList(java.util.ArrayList) ExecutionException(java.util.concurrent.ExecutionException) Path(ch.cyberduck.core.Path) NotfoundException(ch.cyberduck.core.exception.NotfoundException) StorageObject(com.google.api.services.storage.model.StorageObject) ConnectionCanceledException(ch.cyberduck.core.exception.ConnectionCanceledException) PathAttributes(ch.cyberduck.core.PathAttributes) VersioningConfiguration(ch.cyberduck.core.VersioningConfiguration) IOException(java.io.IOException) AttributedList(ch.cyberduck.core.AttributedList) Objects(com.google.api.services.storage.model.Objects) Future(java.util.concurrent.Future) SimplePathPredicate(ch.cyberduck.core.SimplePathPredicate) BackgroundException(ch.cyberduck.core.exception.BackgroundException)

Example 3 with ThreadPool

use of ch.cyberduck.core.threading.ThreadPool in project cyberduck by iterate-ch.

the class SwiftLargeObjectUploadFeature method upload.

@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
    final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
    final List<Path> existingSegments = new ArrayList<>();
    if (status.isAppend()) {
        // Get a lexicographically ordered list of the existing file segments
        try {
            existingSegments.addAll(listService.list(segmentService.getSegmentsDirectory(file), new DisabledListProgressListener()).toList());
        } catch (NotfoundException e) {
        // Ignore
        }
    }
    // Get the results of the uploads in the order they were submitted
    // this is important for building the manifest, and is not a problem in terms of performance
    // because we should only continue when all segments have uploaded successfully
    final List<StorageObject> completed = new ArrayList<>();
    // Submit file segments for concurrent upload
    final List<Future<StorageObject>> segments = new ArrayList<>();
    long remaining = status.getLength();
    long offset = 0;
    for (int segmentNumber = 1; remaining > 0; segmentNumber++) {
        final long length = Math.min(segmentSize, remaining);
        // Segment name with left padded segment number
        final Path segment = segmentService.getSegment(file, segmentNumber);
        if (existingSegments.contains(segment)) {
            final Path existingSegment = existingSegments.get(existingSegments.indexOf(segment));
            if (log.isDebugEnabled()) {
                log.debug(String.format("Skip segment %s", existingSegment));
            }
            final StorageObject stored = new StorageObject(containerService.getKey(segment));
            if (HashAlgorithm.md5.equals(existingSegment.attributes().getChecksum().algorithm)) {
                stored.setMd5sum(existingSegment.attributes().getChecksum().hash);
            }
            stored.setSize(existingSegment.attributes().getSize());
            offset += existingSegment.attributes().getSize();
            completed.add(stored);
        } else {
            // Submit to queue
            segments.add(this.submit(pool, segment, local, throttle, listener, status, offset, length, callback));
            if (log.isDebugEnabled()) {
                log.debug(String.format("Segment %s submitted with size %d and offset %d", segment, length, offset));
            }
            remaining -= length;
            offset += length;
        }
    }
    try {
        for (Future<StorageObject> futureSegment : segments) {
            completed.add(futureSegment.get());
        }
    } catch (InterruptedException e) {
        log.error("Part upload failed with interrupt failure");
        status.setCanceled();
        throw new ConnectionCanceledException(e);
    } catch (ExecutionException e) {
        log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
        if (e.getCause() instanceof BackgroundException) {
            throw (BackgroundException) e.getCause();
        }
        throw new DefaultExceptionMappingService().map(e.getCause());
    } finally {
        pool.shutdown(false);
    }
    if (log.isInfoEnabled()) {
        log.info(String.format("Finished large file upload %s with %d parts", file, completed.size()));
    }
    // then create or update the manifest.
    try {
        // Static Large Object.
        final String manifest = segmentService.manifest(containerService.getContainer(file).getName(), completed);
        if (log.isDebugEnabled()) {
            log.debug(String.format("Creating SLO manifest %s for %s", manifest, file));
        }
        final StorageObject stored = new StorageObject(containerService.getKey(file));
        stored.setSize(status.getLength());
        final String checksum = session.getClient().createSLOManifestObject(regionService.lookup(containerService.getContainer(file)), containerService.getContainer(file).getName(), status.getMime(), containerService.getKey(file), manifest, Collections.emptyMap());
        // The value of the Content-Length header is the total size of all segment objects, and the value of the ETag header is calculated by taking
        // the ETag value of each segment, concatenating them together, and then returning the MD5 checksum of the result.
        stored.setMd5sum(checksum);
        // Mark parent status as complete
        status.withResponse(new SwiftAttributesFinderFeature(session).toAttributes(stored)).setComplete();
        return stored;
    } catch (GenericException e) {
        throw new SwiftExceptionMappingService().map("Upload {0} failed", e, file);
    } catch (IOException e) {
        throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
    }
}
Also used : Path(ch.cyberduck.core.Path) NotfoundException(ch.cyberduck.core.exception.NotfoundException) StorageObject(ch.iterate.openstack.swift.model.StorageObject) DisabledListProgressListener(ch.cyberduck.core.DisabledListProgressListener) ConnectionCanceledException(ch.cyberduck.core.exception.ConnectionCanceledException) ThreadPool(ch.cyberduck.core.threading.ThreadPool) ArrayList(java.util.ArrayList) IOException(java.io.IOException) GenericException(ch.iterate.openstack.swift.exception.GenericException) Future(java.util.concurrent.Future) DefaultExceptionMappingService(ch.cyberduck.core.worker.DefaultExceptionMappingService) DefaultIOExceptionMappingService(ch.cyberduck.core.DefaultIOExceptionMappingService) ExecutionException(java.util.concurrent.ExecutionException) BackgroundException(ch.cyberduck.core.exception.BackgroundException)

Example 4 with ThreadPool

use of ch.cyberduck.core.threading.ThreadPool in project cyberduck by iterate-ch.

the class S3MultipartUploadService method upload.

@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
    final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
    try {
        MultipartUpload multipart = null;
        try {
            if (status.isAppend()) {
                final List<MultipartUpload> list = multipartService.find(file);
                if (!list.isEmpty()) {
                    multipart = list.iterator().next();
                }
            }
        } catch (AccessDeniedException | InteroperabilityException e) {
            log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e));
        }
        final List<MultipartPart> completed = new ArrayList<>();
        // Not found or new upload
        if (null == multipart) {
            if (log.isInfoEnabled()) {
                log.info("No pending multipart upload found");
            }
            final S3Object object = new S3WriteFeature(session).getDetails(file, status);
            // ID for the initiated multipart upload.
            final Path bucket = containerService.getContainer(file);
            multipart = session.getClient().multipartStartUpload(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object);
            if (log.isDebugEnabled()) {
                log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId()));
            }
        } else {
            if (status.isAppend()) {
                // Add already completed parts
                completed.addAll(multipartService.list(multipart));
            }
        }
        // Full size of file
        final long size = status.getLength() + status.getOffset();
        final List<Future<MultipartPart>> parts = new ArrayList<>();
        long remaining = status.getLength();
        long offset = 0;
        for (int partNumber = 1; remaining > 0; partNumber++) {
            boolean skip = false;
            if (status.isAppend()) {
                if (log.isInfoEnabled()) {
                    log.info(String.format("Determine if part number %d can be skipped", partNumber));
                }
                for (MultipartPart c : completed) {
                    if (c.getPartNumber().equals(partNumber)) {
                        if (log.isInfoEnabled()) {
                            log.info(String.format("Skip completed part number %d", partNumber));
                        }
                        skip = true;
                        offset += c.getSize();
                        break;
                    }
                }
            }
            if (!skip) {
                // Last part can be less than 5 MB. Adjust part size.
                final long length = Math.min(Math.max((size / (S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
                // Submit to queue
                parts.add(this.submit(pool, file, local, throttle, listener, status, multipart, partNumber, offset, length, callback));
                remaining -= length;
                offset += length;
            }
        }
        for (Future<MultipartPart> future : parts) {
            try {
                completed.add(future.get());
            } catch (InterruptedException e) {
                log.error("Part upload failed with interrupt failure");
                status.setCanceled();
                throw new ConnectionCanceledException(e);
            } catch (ExecutionException e) {
                log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
                if (e.getCause() instanceof BackgroundException) {
                    throw (BackgroundException) e.getCause();
                }
                throw new BackgroundException(e.getCause());
            }
        }
        // Combining all the given parts into the final object. Processing of a Complete Multipart Upload request
        // could take several minutes to complete. Because a request could fail after the initial 200 OK response
        // has been sent, it is important that you check the response body to determine whether the request succeeded.
        final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed);
        if (log.isInfoEnabled()) {
            log.info(String.format("Completed multipart upload for %s with %d parts and checksum %s", complete.getObjectKey(), completed.size(), complete.getEtag()));
        }
        if (file.getType().contains(Path.Type.encrypted)) {
            log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file));
        } else {
            if (S3Session.isAwsHostname(session.getHost().getHostname())) {
                completed.sort(new MultipartPart.PartNumberComparator());
                final StringBuilder concat = new StringBuilder();
                for (MultipartPart part : completed) {
                    concat.append(part.getEtag());
                }
                final String expected = String.format("%s-%d", ChecksumComputeFactory.get(HashAlgorithm.md5).compute(concat.toString(), new TransferStatus()), completed.size());
                final String reference = StringUtils.remove(complete.getEtag(), "\"");
                if (!StringUtils.equalsIgnoreCase(expected, reference)) {
                    throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()), MessageFormat.format("Mismatch between MD5 hash {0} of uploaded data and ETag {1} returned by the server", expected, reference));
                }
            }
        }
        final StorageObject object = new StorageObject(containerService.getKey(file));
        object.setETag(complete.getEtag());
        if (status.getTimestamp() != null) {
            object.addMetadata(S3TimestampFeature.METADATA_MODIFICATION_DATE, String.valueOf(status.getTimestamp()));
        }
        // Mark parent status as complete
        status.withResponse(new S3AttributesAdapter().toAttributes(object)).setComplete();
        return object;
    } catch (ServiceException e) {
        throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
    } finally {
        // Cancel future tasks
        pool.shutdown(false);
    }
}
Also used : AccessDeniedException(ch.cyberduck.core.exception.AccessDeniedException) ChecksumException(ch.cyberduck.core.exception.ChecksumException) ThreadPool(ch.cyberduck.core.threading.ThreadPool) ArrayList(java.util.ArrayList) TransferStatus(ch.cyberduck.core.transfer.TransferStatus) MultipartCompleted(org.jets3t.service.model.MultipartCompleted) S3Object(org.jets3t.service.model.S3Object) ExecutionException(java.util.concurrent.ExecutionException) Path(ch.cyberduck.core.Path) MultipartPart(org.jets3t.service.model.MultipartPart) InteroperabilityException(ch.cyberduck.core.exception.InteroperabilityException) StorageObject(org.jets3t.service.model.StorageObject) ConnectionCanceledException(ch.cyberduck.core.exception.ConnectionCanceledException) MultipartUpload(org.jets3t.service.model.MultipartUpload) ServiceException(org.jets3t.service.ServiceException) Future(java.util.concurrent.Future) BackgroundException(ch.cyberduck.core.exception.BackgroundException)

Example 5 with ThreadPool

use of ch.cyberduck.core.threading.ThreadPool in project cyberduck by iterate-ch.

the class B2LargeUploadService method upload.

@Override
public BaseB2Response upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
    final ThreadPool pool = ThreadPoolFactory.get("largeupload", concurrency);
    try {
        // Get the results of the uploads in the order they were submitted
        // this is important for building the manifest, and is not a problem in terms of performance
        // because we should only continue when all segments have uploaded successfully
        final List<B2UploadPartResponse> completed = new ArrayList<>();
        final Map<String, String> fileinfo = new HashMap<>(status.getMetadata());
        final Checksum checksum = status.getChecksum();
        if (Checksum.NONE != checksum) {
            switch(checksum.algorithm) {
                case sha1:
                    fileinfo.put(X_BZ_INFO_LARGE_FILE_SHA1, status.getChecksum().hash);
                    break;
            }
        }
        if (null != status.getTimestamp()) {
            fileinfo.put(X_BZ_INFO_SRC_LAST_MODIFIED_MILLIS, String.valueOf(status.getTimestamp()));
        }
        final String fileId;
        if (status.isAppend()) {
            // Add already completed parts
            final B2LargeUploadPartService partService = new B2LargeUploadPartService(session, fileid);
            final List<B2FileInfoResponse> uploads = partService.find(file);
            if (uploads.isEmpty()) {
                fileId = session.getClient().startLargeFileUpload(fileid.getVersionId(containerService.getContainer(file), new DisabledListProgressListener()), containerService.getKey(file), status.getMime(), fileinfo).getFileId();
            } else {
                fileId = uploads.iterator().next().getFileId();
                completed.addAll(partService.list(fileId));
            }
        } else {
            fileId = session.getClient().startLargeFileUpload(fileid.getVersionId(containerService.getContainer(file), new DisabledListProgressListener()), containerService.getKey(file), status.getMime(), fileinfo).getFileId();
        }
        // Full size of file
        final long size = status.getLength() + status.getOffset();
        // Submit file segments for concurrent upload
        final List<Future<B2UploadPartResponse>> parts = new ArrayList<>();
        long remaining = status.getLength();
        long offset = 0;
        for (int partNumber = 1; remaining > 0; partNumber++) {
            boolean skip = false;
            if (status.isAppend()) {
                if (log.isInfoEnabled()) {
                    log.info(String.format("Determine if part number %d can be skipped", partNumber));
                }
                for (B2UploadPartResponse c : completed) {
                    if (c.getPartNumber().equals(partNumber)) {
                        if (log.isInfoEnabled()) {
                            log.info(String.format("Skip completed part number %d", partNumber));
                        }
                        skip = true;
                        offset += c.getContentLength();
                        break;
                    }
                }
            }
            if (!skip) {
                final long length = Math.min(Math.max((size / B2LargeUploadService.MAXIMUM_UPLOAD_PARTS), partSize), remaining);
                // Submit to queue
                parts.add(this.submit(pool, file, local, throttle, listener, status, fileId, partNumber, offset, length, callback));
                if (log.isDebugEnabled()) {
                    log.debug(String.format("Part %s submitted with size %d and offset %d", partNumber, length, offset));
                }
                remaining -= length;
                offset += length;
            }
        }
        try {
            for (Future<B2UploadPartResponse> f : parts) {
                completed.add(f.get());
            }
        } catch (InterruptedException e) {
            log.error("Part upload failed with interrupt failure");
            status.setCanceled();
            throw new ConnectionCanceledException(e);
        } catch (ExecutionException e) {
            log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
            if (e.getCause() instanceof BackgroundException) {
                throw (BackgroundException) e.getCause();
            }
            throw new DefaultExceptionMappingService().map(e.getCause());
        }
        completed.sort(new Comparator<B2UploadPartResponse>() {

            @Override
            public int compare(final B2UploadPartResponse o1, final B2UploadPartResponse o2) {
                return o1.getPartNumber().compareTo(o2.getPartNumber());
            }
        });
        final List<String> checksums = new ArrayList<>();
        for (B2UploadPartResponse part : completed) {
            checksums.add(part.getContentSha1());
        }
        final B2FinishLargeFileResponse response = session.getClient().finishLargeFileUpload(fileId, checksums.toArray(new String[checksums.size()]));
        if (log.isInfoEnabled()) {
            log.info(String.format("Finished large file upload %s with %d parts", file, completed.size()));
        }
        fileid.cache(file, response.getFileId());
        // Mark parent status as complete
        status.withResponse(new B2AttributesFinderFeature(session, fileid).toAttributes(response)).setComplete();
        return response;
    } catch (B2ApiException e) {
        throw new B2ExceptionMappingService(fileid).map("Upload {0} failed", e, file);
    } catch (IOException e) {
        throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
    } finally {
        pool.shutdown(false);
    }
}
Also used : B2FinishLargeFileResponse(synapticloop.b2.response.B2FinishLargeFileResponse) DisabledListProgressListener(ch.cyberduck.core.DisabledListProgressListener) HashMap(java.util.HashMap) ThreadPool(ch.cyberduck.core.threading.ThreadPool) ArrayList(java.util.ArrayList) Checksum(ch.cyberduck.core.io.Checksum) B2UploadPartResponse(synapticloop.b2.response.B2UploadPartResponse) ExecutionException(java.util.concurrent.ExecutionException) B2FileInfoResponse(synapticloop.b2.response.B2FileInfoResponse) ConnectionCanceledException(ch.cyberduck.core.exception.ConnectionCanceledException) B2ApiException(synapticloop.b2.exception.B2ApiException) IOException(java.io.IOException) Future(java.util.concurrent.Future) DefaultExceptionMappingService(ch.cyberduck.core.worker.DefaultExceptionMappingService) DefaultIOExceptionMappingService(ch.cyberduck.core.DefaultIOExceptionMappingService) BackgroundException(ch.cyberduck.core.exception.BackgroundException)

Aggregations

BackgroundException (ch.cyberduck.core.exception.BackgroundException)11 ThreadPool (ch.cyberduck.core.threading.ThreadPool)11 ExecutionException (java.util.concurrent.ExecutionException)11 ConnectionCanceledException (ch.cyberduck.core.exception.ConnectionCanceledException)10 ArrayList (java.util.ArrayList)10 Future (java.util.concurrent.Future)10 IOException (java.io.IOException)6 DisabledListProgressListener (ch.cyberduck.core.DisabledListProgressListener)5 DefaultIOExceptionMappingService (ch.cyberduck.core.DefaultIOExceptionMappingService)4 Path (ch.cyberduck.core.Path)4 NotfoundException (ch.cyberduck.core.exception.NotfoundException)4 TransferStatus (ch.cyberduck.core.transfer.TransferStatus)4 PathAttributes (ch.cyberduck.core.PathAttributes)3 ChecksumException (ch.cyberduck.core.exception.ChecksumException)3 DefaultExceptionMappingService (ch.cyberduck.core.worker.DefaultExceptionMappingService)3 HashMap (java.util.HashMap)3 AttributedList (ch.cyberduck.core.AttributedList)2 SimplePathPredicate (ch.cyberduck.core.SimplePathPredicate)2 InteroperabilityException (ch.cyberduck.core.exception.InteroperabilityException)2 Checksum (ch.cyberduck.core.io.Checksum)2