use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class DAVSession method login.
@Override
public void login(final Proxy proxy, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
final CredentialsProvider provider = new BasicCredentialsProvider();
if (preferences.getBoolean("webdav.ntlm.windows.authentication.enable") && WinHttpClients.isWinAuthAvailable()) {
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.NTLM), new WindowsCredentialsProvider(new BasicCredentialsProvider()).getCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.NTLM)));
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.SPNEGO), new WindowsCredentialsProvider(new SystemDefaultCredentialsProvider()).getCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.SPNEGO)));
} else {
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.NTLM), new NTCredentials(host.getCredentials().getUsername(), host.getCredentials().getPassword(), preferences.getProperty("webdav.ntlm.workstation"), preferences.getProperty("webdav.ntlm.domain")));
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.SPNEGO), new NTCredentials(host.getCredentials().getUsername(), host.getCredentials().getPassword(), preferences.getProperty("webdav.ntlm.workstation"), preferences.getProperty("webdav.ntlm.domain")));
}
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.BASIC), new UsernamePasswordCredentials(host.getCredentials().getUsername(), host.getCredentials().getPassword()));
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.DIGEST), new UsernamePasswordCredentials(host.getCredentials().getUsername(), host.getCredentials().getPassword()));
provider.setCredentials(new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, AuthSchemes.KERBEROS), new UsernamePasswordCredentials(host.getCredentials().getUsername(), host.getCredentials().getPassword()));
client.setCredentials(provider);
if (preferences.getBoolean("webdav.basic.preemptive")) {
switch(proxy.getType()) {
case DIRECT:
case SOCKS:
// Enable preemptive authentication. See HttpState#setAuthenticationPreemptive
client.enablePreemptiveAuthentication(host.getHostname(), host.getPort(), host.getPort(), Charset.forName(preferences.getProperty("http.credentials.charset")));
break;
default:
client.disablePreemptiveAuthentication();
}
} else {
client.disablePreemptiveAuthentication();
}
if (host.getCredentials().isPassed()) {
log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this));
return;
}
try {
final Path home = new DelegatingHomeFeature(new WorkdirHomeFeature(host), new DefaultPathHomeFeature(host)).find();
final HttpHead head = new HttpHead(new DAVPathEncoder().encode(home));
try {
client.execute(head, new MicrosoftIISFeaturesResponseHandler());
} catch (SardineException e) {
switch(e.getStatusCode()) {
case HttpStatus.SC_NOT_FOUND:
log.warn(String.format("Ignore failure %s", e));
break;
case HttpStatus.SC_NOT_IMPLEMENTED:
case HttpStatus.SC_FORBIDDEN:
case HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE:
case HttpStatus.SC_METHOD_NOT_ALLOWED:
log.warn(String.format("Failed HEAD request to %s with %s. Retry with PROPFIND.", host, e.getResponsePhrase()));
cancel.verify();
// Possibly only HEAD requests are not allowed
list.list(home, new DisabledListProgressListener() {
@Override
public void chunk(final Path parent, final AttributedList<Path> list) throws ListCanceledException {
try {
cancel.verify();
} catch (ConnectionCanceledException e) {
throw new ListCanceledException(list, e);
}
}
});
break;
case HttpStatus.SC_BAD_REQUEST:
if (preferences.getBoolean("webdav.basic.preemptive")) {
log.warn(String.format("Disable preemptive authentication for %s due to failure %s", host, e.getResponsePhrase()));
cancel.verify();
client.disablePreemptiveAuthentication();
client.execute(head, new MicrosoftIISFeaturesResponseHandler());
} else {
throw new DAVExceptionMappingService().map(e);
}
break;
default:
throw new DAVExceptionMappingService().map(e);
}
}
} catch (SardineException e) {
throw new DAVExceptionMappingService().map(e);
} catch (IOException e) {
throw new HttpExceptionMappingService().map(e);
}
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class MoveWorker method compile.
protected Map<Path, Path> compile(final Move move, final ListService list, final Path source, final Path target) throws BackgroundException {
// Compile recursive list
final Map<Path, Path> recursive = new LinkedHashMap<>();
recursive.put(source, target);
if (source.isDirectory()) {
if (!move.isRecursive(source, target)) {
// sort ascending by timestamp to move older versions first
final AttributedList<Path> children = list.list(source, new WorkerListProgressListener(this, listener)).filter(new VersionsComparator(true));
for (Path child : children) {
if (this.isCanceled()) {
throw new ConnectionCanceledException();
}
recursive.putAll(this.compile(move, list, child, new Path(target, child.getName(), child.getType())));
}
}
}
return recursive;
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class ReadDistributionWorker method run.
@Override
public Distribution run(final Session<?> session) throws BackgroundException {
final DistributionConfiguration cdn = session.getFeature(DistributionConfiguration.class);
final PathContainerService container = session.getFeature(PathContainerService.class);
for (Path c : this.getContainers(container, files)) {
if (this.isCanceled()) {
throw new ConnectionCanceledException();
}
final Distribution distribution = cdn.read(c, method, prompt);
if (cdn.getFeature(Index.class, distribution.getMethod()) != null) {
// Make sure container items are cached for default root object.
distribution.setRootDocuments(session.getFeature(ListService.class).list(container.getContainer(c), new DisabledListProgressListener()).toList());
}
return distribution;
}
return this.initialize();
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class TransferPromptFilterWorker method run.
@Override
public Map<TransferItem, TransferStatus> run(final Session<?> session) throws BackgroundException {
final Map<TransferItem, TransferStatus> status = new HashMap<>();
final TransferPathFilter filter = transfer.filter(session, session, action, listener);
if (log.isDebugEnabled()) {
log.debug(String.format("Filter cache %s with filter %s", cache, filter));
}
// Unordered list
for (Map.Entry<TransferItem, AttributedList<TransferItem>> entry : cache.asMap().entrySet()) {
if (this.isCanceled()) {
throw new ConnectionCanceledException();
}
final AttributedList<TransferItem> list = entry.getValue();
for (TransferItem file : list) {
if (this.isCanceled()) {
throw new ConnectionCanceledException();
}
final boolean accept = filter.accept(file.remote, file.local, new TransferStatus().exists(true));
status.put(file, filter.prepare(file.remote, file.local, new TransferStatus().exists(true), listener).reject(!accept));
}
}
return status;
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class SDSDirectS3UploadFeature method upload.
@Override
public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest().directS3Upload(true).timestampModification(status.getTimestamp() != null ? new DateTime(status.getTimestamp()) : null).size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength()).parentId(Long.parseLong(nodeid.getVersionId(file.getParent(), new DisabledListProgressListener()))).name(file.getName());
final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient()).createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY);
if (log.isDebugEnabled()) {
log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse));
}
final Map<Integer, TransferStatus> etags = new HashMap<>();
final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status);
final List<Future<TransferStatus>> parts = new ArrayList<>();
final InputStream in;
final String random = new UUIDRandomStringService().random();
if (SDSNodeIdProvider.isEncrypted(file)) {
in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status);
} else {
in = local.getInputStream();
}
try {
// Full size of file
final long size = status.getLength() + status.getOffset();
long offset = 0;
long remaining = status.getLength();
for (int partNumber = 1; remaining >= 0; partNumber++) {
final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1);
if (SDSNodeIdProvider.isEncrypted(file)) {
final Local temporary = temp.create(String.format("%s-%d", random, partNumber));
if (log.isDebugEnabled()) {
log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary));
}
new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length).transfer(in, new BufferOutputStream(new FileBuffer(temporary)));
parts.add(this.submit(pool, file, temporary, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback));
} else {
parts.add(this.submit(pool, file, local, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback));
}
remaining -= length;
offset += length;
if (0L == remaining) {
break;
}
}
} finally {
in.close();
}
for (Future<TransferStatus> future : parts) {
try {
final TransferStatus part = future.get();
etags.put(part.getPart(), part);
} catch (InterruptedException e) {
log.error("Part upload failed with interrupt failure");
status.setCanceled();
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest().keepShareLinks(status.isExists() ? new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep") : false).resolutionStrategy(status.isExists() ? CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE : CompleteS3FileUploadRequest.ResolutionStrategyEnum.FAIL);
if (status.getFilekey() != null) {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
final FileKey fileKey = reader.readValue(status.getFilekey().array());
final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey(TripleCryptConverter.toCryptoPlainFileKey(fileKey), TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer()));
completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey));
}
etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem(new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key)));
if (log.isDebugEnabled()) {
log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file));
}
new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY);
// Polling
final ScheduledThreadPool polling = new ScheduledThreadPool();
final CountDownLatch done = new CountDownLatch(1);
final AtomicReference<BackgroundException> failure = new AtomicReference<>();
final ScheduledFuture f = polling.repeat(new Runnable() {
@Override
public void run() {
try {
if (log.isDebugEnabled()) {
log.debug(String.format("Query upload status for %s", createFileUploadResponse));
}
final S3FileUploadStatus uploadStatus = new NodesApi(session.getClient()).requestUploadStatusFiles(createFileUploadResponse.getUploadId(), StringUtils.EMPTY, null);
switch(uploadStatus.getStatus()) {
case "finishing":
// Expected
break;
case "transfer":
failure.set(new InteroperabilityException(uploadStatus.getStatus()));
done.countDown();
break;
case "error":
failure.set(new InteroperabilityException(uploadStatus.getErrorDetails().getMessage()));
done.countDown();
break;
case "done":
// Set node id in transfer status
nodeid.cache(file, String.valueOf(uploadStatus.getNode().getId()));
// Mark parent status as complete
status.withResponse(new SDSAttributesAdapter(session).toAttributes(uploadStatus.getNode())).setComplete();
done.countDown();
break;
}
} catch (ApiException e) {
failure.set(new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file));
done.countDown();
}
}
}, new HostPreferences(session.getHost()).getLong("sds.upload.s3.status.period"), TimeUnit.MILLISECONDS);
Uninterruptibles.awaitUninterruptibly(done);
polling.shutdown();
if (null != failure.get()) {
throw failure.get();
}
return null;
} catch (CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
} catch (ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
} catch (IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
} finally {
temp.shutdown();
// Cancel future tasks
pool.shutdown(false);
}
}
Aggregations