use of ch.cyberduck.core.io.MemorySegementingOutputStream in project cyberduck by iterate-ch.
the class BoxMultipartWriteFeature method write.
@Override
public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final UploadSession uploadSession = new BoxUploadHelper(session, fileid).createUploadSession(status, file);
if (log.isDebugEnabled()) {
log.debug(String.format("Obtained session %s for file %s", uploadSession, file));
}
final BoxOutputStream proxy = new BoxOutputStream(file, uploadSession, status);
return new HttpResponseOutputStream<File>(new MemorySegementingOutputStream(proxy, uploadSession.getPartSize().intValue()), new BoxAttributesFinderFeature(session, fileid), status) {
@Override
public File getStatus() {
return proxy.getResult();
}
};
}
use of ch.cyberduck.core.io.MemorySegementingOutputStream in project cyberduck by iterate-ch.
the class SwiftLargeUploadWriteFeature method write.
@Override
public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) {
final LargeUploadOutputStream proxy = new LargeUploadOutputStream(file, status);
return new HttpResponseOutputStream<StorageObject>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("openstack.upload.largeobject.size.minimum")), new SwiftAttributesFinderFeature(session, regionService), status) {
@Override
public StorageObject getStatus() {
final StorageObject stored = new StorageObject(containerService.getKey(file));
stored.setSize(status.getLength());
stored.setMd5sum(proxy.getResponse());
return stored;
}
};
}
use of ch.cyberduck.core.io.MemorySegementingOutputStream in project cyberduck by iterate-ch.
the class StoregateMultipartWriteFeature method write.
@Override
public HttpResponseOutputStream<FileMetadata> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final String location = new StoregateWriteFeature(session, fileid).start(file, status);
final MultipartOutputStream proxy = new MultipartOutputStream(location, file, status);
return new HttpResponseOutputStream<FileMetadata>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("storegate.upload.multipart.chunksize")), new StoregateAttributesFinderFeature(session, fileid), status) {
@Override
public FileMetadata getStatus() {
return proxy.getResult();
}
};
}
use of ch.cyberduck.core.io.MemorySegementingOutputStream in project cyberduck by iterate-ch.
the class GraphWriteFeature method write.
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if (status.getLength() == TransferStatus.UNKNOWN_LENGTH) {
throw new UnsupportedException("Content-Range with unknown file size is not supported");
}
final DriveItem folder = session.getItem(file.getParent());
final DriveItem item = new DriveItem(folder, URIEncoder.encode(file.getName()));
final UploadSession upload = Files.createUploadSession(item);
final ChunkedOutputStream proxy = new ChunkedOutputStream(upload, file, status);
final int partsize = new HostPreferences(session.getHost()).getInteger("onedrive.upload.multipart.partsize.minimum") * new HostPreferences(session.getHost()).getInteger("onedrive.upload.multipart.partsize.factor");
return new VoidStatusOutputStream(new MemorySegementingOutputStream(proxy, partsize));
} catch (OneDriveAPIException e) {
throw new GraphExceptionMappingService(fileid).map("Upload {0} failed", e, file);
} catch (IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
use of ch.cyberduck.core.io.MemorySegementingOutputStream in project cyberduck by iterate-ch.
the class S3MultipartWriteFeature method write.
@Override
public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final S3Object object = new S3WriteFeature(session).getDetails(file, status);
// ID for the initiated multipart upload.
final MultipartUpload multipart;
try {
final Path bucket = containerService.getContainer(file);
multipart = session.getClient().multipartStartUpload(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object);
if (log.isDebugEnabled()) {
log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId()));
}
} catch (ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
}
final MultipartOutputStream proxy = new MultipartOutputStream(multipart, file, status);
return new HttpResponseOutputStream<StorageObject>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("s3.upload.multipart.size")), new S3AttributesAdapter(), status) {
@Override
public StorageObject getStatus() {
if (proxy.getResponse() != null) {
if (log.isDebugEnabled()) {
log.debug(String.format("Received response %s", proxy.getResponse()));
}
object.setETag(proxy.getResponse().getEtag());
if (proxy.getResponse().getVersionId() != null) {
object.addMetadata(S3Object.S3_VERSION_ID, proxy.getResponse().getVersionId());
}
}
return object;
}
};
}
Aggregations