use of ch.cyberduck.core.sds.io.swagger.client.model.S3FileUploadStatus in project cyberduck by iterate-ch.
the class SDSDirectS3UploadFeature method upload.
@Override
public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest().directS3Upload(true).timestampModification(status.getTimestamp() != null ? new DateTime(status.getTimestamp()) : null).size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength()).parentId(Long.parseLong(nodeid.getVersionId(file.getParent(), new DisabledListProgressListener()))).name(file.getName());
final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient()).createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY);
if (log.isDebugEnabled()) {
log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse));
}
final Map<Integer, TransferStatus> etags = new HashMap<>();
final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status);
final List<Future<TransferStatus>> parts = new ArrayList<>();
final InputStream in;
final String random = new UUIDRandomStringService().random();
if (SDSNodeIdProvider.isEncrypted(file)) {
in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status);
} else {
in = local.getInputStream();
}
try {
// Full size of file
final long size = status.getLength() + status.getOffset();
long offset = 0;
long remaining = status.getLength();
for (int partNumber = 1; remaining >= 0; partNumber++) {
final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1);
if (SDSNodeIdProvider.isEncrypted(file)) {
final Local temporary = temp.create(String.format("%s-%d", random, partNumber));
if (log.isDebugEnabled()) {
log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary));
}
new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length).transfer(in, new BufferOutputStream(new FileBuffer(temporary)));
parts.add(this.submit(pool, file, temporary, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback));
} else {
parts.add(this.submit(pool, file, local, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback));
}
remaining -= length;
offset += length;
if (0L == remaining) {
break;
}
}
} finally {
in.close();
}
for (Future<TransferStatus> future : parts) {
try {
final TransferStatus part = future.get();
etags.put(part.getPart(), part);
} catch (InterruptedException e) {
log.error("Part upload failed with interrupt failure");
status.setCanceled();
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest().keepShareLinks(status.isExists() ? new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep") : false).resolutionStrategy(status.isExists() ? CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE : CompleteS3FileUploadRequest.ResolutionStrategyEnum.FAIL);
if (status.getFilekey() != null) {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
final FileKey fileKey = reader.readValue(status.getFilekey().array());
final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey(TripleCryptConverter.toCryptoPlainFileKey(fileKey), TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer()));
completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey));
}
etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem(new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key)));
if (log.isDebugEnabled()) {
log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file));
}
new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY);
// Polling
final ScheduledThreadPool polling = new ScheduledThreadPool();
final CountDownLatch done = new CountDownLatch(1);
final AtomicReference<BackgroundException> failure = new AtomicReference<>();
final ScheduledFuture f = polling.repeat(new Runnable() {
@Override
public void run() {
try {
if (log.isDebugEnabled()) {
log.debug(String.format("Query upload status for %s", createFileUploadResponse));
}
final S3FileUploadStatus uploadStatus = new NodesApi(session.getClient()).requestUploadStatusFiles(createFileUploadResponse.getUploadId(), StringUtils.EMPTY, null);
switch(uploadStatus.getStatus()) {
case "finishing":
// Expected
break;
case "transfer":
failure.set(new InteroperabilityException(uploadStatus.getStatus()));
done.countDown();
break;
case "error":
failure.set(new InteroperabilityException(uploadStatus.getErrorDetails().getMessage()));
done.countDown();
break;
case "done":
// Set node id in transfer status
nodeid.cache(file, String.valueOf(uploadStatus.getNode().getId()));
// Mark parent status as complete
status.withResponse(new SDSAttributesAdapter(session).toAttributes(uploadStatus.getNode())).setComplete();
done.countDown();
break;
}
} catch (ApiException e) {
failure.set(new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file));
done.countDown();
}
}
}, new HostPreferences(session.getHost()).getLong("sds.upload.s3.status.period"), TimeUnit.MILLISECONDS);
Uninterruptibles.awaitUninterruptibly(done);
polling.shutdown();
if (null != failure.get()) {
throw failure.get();
}
return null;
} catch (CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
} catch (ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
} catch (IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
} finally {
temp.shutdown();
// Cancel future tasks
pool.shutdown(false);
}
}
use of ch.cyberduck.core.sds.io.swagger.client.model.S3FileUploadStatus in project cyberduck by iterate-ch.
the class PresignedMultipartOutputStream method close.
@Override
public void close() throws IOException {
try {
if (close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
if (null != canceled.get()) {
return;
}
if (etags.isEmpty()) {
new SDSTouchFeature(session, nodeid).touch(file, new TransferStatus());
} else {
try {
final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest().keepShareLinks(overall.isExists() ? new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep") : false).resolutionStrategy(overall.isExists() ? CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE : CompleteS3FileUploadRequest.ResolutionStrategyEnum.FAIL);
if (overall.getFilekey() != null) {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
final FileKey fileKey = reader.readValue(overall.getFilekey().array());
final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey(TripleCryptConverter.toCryptoPlainFileKey(fileKey), TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer()));
completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey));
}
etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem(new S3FileUploadPart().partEtag(StringUtils.remove(value, '"')).partNumber(key)));
new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY);
// Polling
final ScheduledThreadPool polling = new ScheduledThreadPool();
final CountDownLatch done = new CountDownLatch(1);
final AtomicReference<BackgroundException> failure = new AtomicReference<>();
final ScheduledFuture f = polling.repeat(new Runnable() {
@Override
public void run() {
try {
final S3FileUploadStatus uploadStatus = new NodesApi(session.getClient()).requestUploadStatusFiles(createFileUploadResponse.getUploadId(), StringUtils.EMPTY, null);
switch(uploadStatus.getStatus()) {
case "finishing":
// Expected
break;
case "transfer":
failure.set(new InteroperabilityException(uploadStatus.getStatus()));
done.countDown();
case "error":
failure.set(new InteroperabilityException(uploadStatus.getErrorDetails().getMessage()));
done.countDown();
case "done":
nodeid.cache(file, String.valueOf(uploadStatus.getNode().getId()));
done.countDown();
break;
}
} catch (ApiException e) {
failure.set(new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file));
done.countDown();
}
}
}, new HostPreferences(session.getHost()).getLong("sds.upload.s3.status.period"), TimeUnit.MILLISECONDS);
Uninterruptibles.awaitUninterruptibly(done);
polling.shutdown();
if (null != failure.get()) {
throw failure.get();
}
} catch (CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
} catch (ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
}
}
} catch (BackgroundException e) {
throw new IOException(e);
} finally {
close.set(true);
}
}
Aggregations