use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class SDSExceptionMappingService method map.
@Override
public BackgroundException map(final ApiException failure) {
for (Throwable cause : ExceptionUtils.getThrowableList(failure)) {
if (cause instanceof SocketException) {
// Map Connection has been shutdown: javax.net.ssl.SSLException: java.net.SocketException: Broken pipe
return new DefaultSocketExceptionMappingService().map((SocketException) cause);
}
if (cause instanceof HttpResponseException) {
return new DefaultHttpResponseExceptionMappingService().map((HttpResponseException) cause);
}
if (cause instanceof IOException) {
return new DefaultIOExceptionMappingService().map((IOException) cause);
}
if (cause instanceof IllegalStateException) {
// Caused by: ch.cyberduck.core.sds.io.swagger.client.ApiException: javax.ws.rs.ProcessingException: java.lang.IllegalStateException: Connection pool shut down
return new ConnectionCanceledException(cause);
}
}
final StringBuilder buffer = new StringBuilder();
if (null != failure.getResponseBody()) {
try {
final JsonObject json = JsonParser.parseReader(new StringReader(failure.getResponseBody())).getAsJsonObject();
if (json.has("errorCode")) {
if (json.get("errorCode").isJsonPrimitive()) {
final int errorCode = json.getAsJsonPrimitive("errorCode").getAsInt();
if (log.isDebugEnabled()) {
log.debug(String.format("Failure with errorCode %s", errorCode));
}
final String key = String.format("Error %d", errorCode);
final String localized = LocaleFactory.get().localize(key, "SDS");
this.append(buffer, localized);
if (StringUtils.equals(localized, key)) {
log.warn(String.format("Missing user message for error code %d", errorCode));
if (json.has("debugInfo")) {
if (json.get("debugInfo").isJsonPrimitive()) {
this.append(buffer, json.getAsJsonPrimitive("debugInfo").getAsString());
}
}
}
switch(failure.getCode()) {
case HttpStatus.SC_BAD_REQUEST:
switch(errorCode) {
case -10100:
// [-10100] Invalid authentication method
return new AccessDeniedException(buffer.toString(), failure);
}
case HttpStatus.SC_NOT_FOUND:
switch(errorCode) {
case -70020:
// [-70020] User does not have a keypair
case -70501:
// [-70501] User not found
case -40761:
// [-40761] Filekey not found for encrypted file
return new AccessDeniedException(buffer.toString(), failure);
}
break;
case HttpStatus.SC_PRECONDITION_FAILED:
switch(errorCode) {
case -10108:
// [-10108] Radius Access-Challenge required.
if (json.has("replyMessage")) {
if (json.get("replyMessage").isJsonPrimitive()) {
final JsonPrimitive replyMessage = json.getAsJsonPrimitive("replyMessage");
if (log.isDebugEnabled()) {
log.debug(String.format("Failure with replyMessage %s", replyMessage));
}
buffer.append(replyMessage.getAsString());
}
}
return new PartialLoginFailureException(buffer.toString(), failure);
}
break;
case HttpStatus.SC_UNAUTHORIZED:
switch(errorCode) {
case -10012:
// [-10012] Wrong token.
return new ExpiredTokenException(buffer.toString(), failure);
}
break;
}
}
} else {
switch(failure.getCode()) {
case HttpStatus.SC_INTERNAL_SERVER_ERROR:
break;
default:
if (json.has("debugInfo")) {
log.warn(String.format("Missing error code for failure %s", json));
if (json.get("debugInfo").isJsonPrimitive()) {
this.append(buffer, json.getAsJsonPrimitive("debugInfo").getAsString());
}
}
}
}
} catch (JsonParseException e) {
// Ignore
}
}
switch(failure.getCode()) {
case HttpStatus.SC_FORBIDDEN:
if (failure.getResponseHeaders().containsKey("X-Forbidden")) {
return new AccessDeniedException(LocaleFactory.localizedString("The AV scanner detected that the file could be malicious", "SDS"));
}
break;
case 901:
// Server with AV scanners will block transfer attempts of infected files (upload or download) and answer the request 901
return new AccessDeniedException(LocaleFactory.localizedString("The AV scanner detected that the file could be malicious", "SDS"));
case HttpStatus.SC_PRECONDITION_FAILED:
// [-10106] Username must be changed
return new LoginFailureException(buffer.toString(), failure);
}
return new DefaultHttpResponseExceptionMappingService().map(failure, buffer, failure.getCode());
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class GoogleStorageObjectListService method list.
protected AttributedList<Path> list(final Path directory, final ListProgressListener listener, final String delimiter, final int chunksize) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("list", concurrency);
try {
final Path bucket = containerService.getContainer(directory);
final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration(containerService.getContainer(directory)) : VersioningConfiguration.empty();
final AttributedList<Path> objects = new AttributedList<>();
final List<Future<Path>> folders = new ArrayList<>();
Objects response;
long revision = 0L;
String lastKey = null;
String page = null;
boolean hasDirectoryPlaceholder = containerService.isContainer(directory);
do {
response = session.getClient().objects().list(bucket.getName()).setPageToken(page).setVersions(versioning.isEnabled()).setMaxResults((long) chunksize).setDelimiter(delimiter).setPrefix(this.createPrefix(directory)).execute();
if (response.getItems() != null) {
for (StorageObject object : response.getItems()) {
final String key = PathNormalizer.normalize(object.getName());
if (String.valueOf(Path.DELIMITER).equals(key)) {
log.warn(String.format("Skipping prefix %s", key));
continue;
}
if (new SimplePathPredicate(new Path(bucket, key, EnumSet.of(Path.Type.directory))).test(directory)) {
// Placeholder object, skip
hasDirectoryPlaceholder = true;
continue;
}
if (!StringUtils.equals(lastKey, key)) {
// Reset revision for next file
revision = 0L;
}
final EnumSet<Path.Type> types = object.getName().endsWith(String.valueOf(Path.DELIMITER)) ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file);
final Path file;
final PathAttributes attr = attributes.toAttributes(object);
attr.setRevision(++revision);
// Copy bucket location
attr.setRegion(bucket.attributes().getRegion());
if (null == delimiter) {
// When searching for files recursively
file = new Path(String.format("%s%s", bucket.getAbsolute(), key), types, attr);
} else {
file = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), types, attr);
}
objects.add(file);
lastKey = key;
}
if (versioning.isEnabled()) {
if (references) {
for (Path f : objects) {
if (f.attributes().isDuplicate()) {
final Path latest = objects.find(new LatestVersionPathPredicate(f));
if (latest != null) {
// Reference version
final AttributedList<Path> versions = new AttributedList<>(latest.attributes().getVersions());
versions.add(f);
latest.attributes().setVersions(versions);
} else {
log.warn(String.format("No current version found for %s", f));
}
}
}
}
}
}
if (response.getPrefixes() != null) {
for (String prefix : response.getPrefixes()) {
if (String.valueOf(Path.DELIMITER).equals(prefix)) {
log.warn(String.format("Skipping prefix %s", prefix));
continue;
}
final String key = PathNormalizer.normalize(prefix);
if (new SimplePathPredicate(new Path(bucket, key, EnumSet.of(Path.Type.directory))).test(directory)) {
continue;
}
final Path file;
final PathAttributes attributes = new PathAttributes();
attributes.setRegion(bucket.attributes().getRegion());
if (null == delimiter) {
// When searching for files recursively
file = new Path(String.format("%s%s", bucket.getAbsolute(), key), EnumSet.of(Path.Type.directory, Path.Type.placeholder), attributes);
} else {
file = new Path(directory, PathNormalizer.name(key), EnumSet.of(Path.Type.directory, Path.Type.placeholder), attributes);
}
if (versioning.isEnabled()) {
folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(prefix)));
} else {
folders.add(ConcurrentUtils.constantFuture(file));
}
}
}
page = response.getNextPageToken();
listener.chunk(directory, objects);
} while (page != null);
for (Future<Path> future : folders) {
try {
objects.add(future.get());
} catch (InterruptedException e) {
log.error("Listing versioned objects failed with interrupt failure");
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
listener.chunk(directory, objects);
if (!hasDirectoryPlaceholder && objects.isEmpty()) {
throw new NotfoundException(directory.getAbsolute());
}
return objects;
} catch (IOException e) {
throw new GoogleStorageExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class SwiftLargeObjectUploadFeature method upload.
@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
final List<Path> existingSegments = new ArrayList<>();
if (status.isAppend()) {
// Get a lexicographically ordered list of the existing file segments
try {
existingSegments.addAll(listService.list(segmentService.getSegmentsDirectory(file), new DisabledListProgressListener()).toList());
} catch (NotfoundException e) {
// Ignore
}
}
// Get the results of the uploads in the order they were submitted
// this is important for building the manifest, and is not a problem in terms of performance
// because we should only continue when all segments have uploaded successfully
final List<StorageObject> completed = new ArrayList<>();
// Submit file segments for concurrent upload
final List<Future<StorageObject>> segments = new ArrayList<>();
long remaining = status.getLength();
long offset = 0;
for (int segmentNumber = 1; remaining > 0; segmentNumber++) {
final long length = Math.min(segmentSize, remaining);
// Segment name with left padded segment number
final Path segment = segmentService.getSegment(file, segmentNumber);
if (existingSegments.contains(segment)) {
final Path existingSegment = existingSegments.get(existingSegments.indexOf(segment));
if (log.isDebugEnabled()) {
log.debug(String.format("Skip segment %s", existingSegment));
}
final StorageObject stored = new StorageObject(containerService.getKey(segment));
if (HashAlgorithm.md5.equals(existingSegment.attributes().getChecksum().algorithm)) {
stored.setMd5sum(existingSegment.attributes().getChecksum().hash);
}
stored.setSize(existingSegment.attributes().getSize());
offset += existingSegment.attributes().getSize();
completed.add(stored);
} else {
// Submit to queue
segments.add(this.submit(pool, segment, local, throttle, listener, status, offset, length, callback));
if (log.isDebugEnabled()) {
log.debug(String.format("Segment %s submitted with size %d and offset %d", segment, length, offset));
}
remaining -= length;
offset += length;
}
}
try {
for (Future<StorageObject> futureSegment : segments) {
completed.add(futureSegment.get());
}
} catch (InterruptedException e) {
log.error("Part upload failed with interrupt failure");
status.setCanceled();
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new DefaultExceptionMappingService().map(e.getCause());
} finally {
pool.shutdown(false);
}
if (log.isInfoEnabled()) {
log.info(String.format("Finished large file upload %s with %d parts", file, completed.size()));
}
// then create or update the manifest.
try {
// Static Large Object.
final String manifest = segmentService.manifest(containerService.getContainer(file).getName(), completed);
if (log.isDebugEnabled()) {
log.debug(String.format("Creating SLO manifest %s for %s", manifest, file));
}
final StorageObject stored = new StorageObject(containerService.getKey(file));
stored.setSize(status.getLength());
final String checksum = session.getClient().createSLOManifestObject(regionService.lookup(containerService.getContainer(file)), containerService.getContainer(file).getName(), status.getMime(), containerService.getKey(file), manifest, Collections.emptyMap());
// The value of the Content-Length header is the total size of all segment objects, and the value of the ETag header is calculated by taking
// the ETag value of each segment, concatenating them together, and then returning the MD5 checksum of the result.
stored.setMd5sum(checksum);
// Mark parent status as complete
status.withResponse(new SwiftAttributesFinderFeature(session).toAttributes(stored)).setComplete();
return stored;
} catch (GenericException e) {
throw new SwiftExceptionMappingService().map("Upload {0} failed", e, file);
} catch (IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class S3MultipartUploadService method upload.
@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
MultipartUpload multipart = null;
try {
if (status.isAppend()) {
final List<MultipartUpload> list = multipartService.find(file);
if (!list.isEmpty()) {
multipart = list.iterator().next();
}
}
} catch (AccessDeniedException | InteroperabilityException e) {
log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e));
}
final List<MultipartPart> completed = new ArrayList<>();
// Not found or new upload
if (null == multipart) {
if (log.isInfoEnabled()) {
log.info("No pending multipart upload found");
}
final S3Object object = new S3WriteFeature(session).getDetails(file, status);
// ID for the initiated multipart upload.
final Path bucket = containerService.getContainer(file);
multipart = session.getClient().multipartStartUpload(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object);
if (log.isDebugEnabled()) {
log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId()));
}
} else {
if (status.isAppend()) {
// Add already completed parts
completed.addAll(multipartService.list(multipart));
}
}
// Full size of file
final long size = status.getLength() + status.getOffset();
final List<Future<MultipartPart>> parts = new ArrayList<>();
long remaining = status.getLength();
long offset = 0;
for (int partNumber = 1; remaining > 0; partNumber++) {
boolean skip = false;
if (status.isAppend()) {
if (log.isInfoEnabled()) {
log.info(String.format("Determine if part number %d can be skipped", partNumber));
}
for (MultipartPart c : completed) {
if (c.getPartNumber().equals(partNumber)) {
if (log.isInfoEnabled()) {
log.info(String.format("Skip completed part number %d", partNumber));
}
skip = true;
offset += c.getSize();
break;
}
}
}
if (!skip) {
// Last part can be less than 5 MB. Adjust part size.
final long length = Math.min(Math.max((size / (S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
// Submit to queue
parts.add(this.submit(pool, file, local, throttle, listener, status, multipart, partNumber, offset, length, callback));
remaining -= length;
offset += length;
}
}
for (Future<MultipartPart> future : parts) {
try {
completed.add(future.get());
} catch (InterruptedException e) {
log.error("Part upload failed with interrupt failure");
status.setCanceled();
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
// Combining all the given parts into the final object. Processing of a Complete Multipart Upload request
// could take several minutes to complete. Because a request could fail after the initial 200 OK response
// has been sent, it is important that you check the response body to determine whether the request succeeded.
final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed);
if (log.isInfoEnabled()) {
log.info(String.format("Completed multipart upload for %s with %d parts and checksum %s", complete.getObjectKey(), completed.size(), complete.getEtag()));
}
if (file.getType().contains(Path.Type.encrypted)) {
log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file));
} else {
if (S3Session.isAwsHostname(session.getHost().getHostname())) {
completed.sort(new MultipartPart.PartNumberComparator());
final StringBuilder concat = new StringBuilder();
for (MultipartPart part : completed) {
concat.append(part.getEtag());
}
final String expected = String.format("%s-%d", ChecksumComputeFactory.get(HashAlgorithm.md5).compute(concat.toString(), new TransferStatus()), completed.size());
final String reference = StringUtils.remove(complete.getEtag(), "\"");
if (!StringUtils.equalsIgnoreCase(expected, reference)) {
throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()), MessageFormat.format("Mismatch between MD5 hash {0} of uploaded data and ETag {1} returned by the server", expected, reference));
}
}
}
final StorageObject object = new StorageObject(containerService.getKey(file));
object.setETag(complete.getEtag());
if (status.getTimestamp() != null) {
object.addMetadata(S3TimestampFeature.METADATA_MODIFICATION_DATE, String.valueOf(status.getTimestamp()));
}
// Mark parent status as complete
status.withResponse(new S3AttributesAdapter().toAttributes(object)).setComplete();
return object;
} catch (ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
} finally {
// Cancel future tasks
pool.shutdown(false);
}
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class DAVSessionTest method testMutualTlsUnknownCA.
@Test(expected = SSLNegotiateException.class)
@Ignore
public void testMutualTlsUnknownCA() throws Exception {
final Host host = new Host(new DAVSSLProtocol(), "auth.startssl.com");
final DAVSession session = new DAVSession(host, new DefaultX509TrustManager(), new KeychainX509KeyManager(new DisabledCertificateIdentityCallback(), host, new DisabledCertificateStore() {
@Override
public X509Certificate choose(final CertificateIdentityCallback prompt, final String[] keyTypes, final Principal[] issuers, final Host bookmark) throws ConnectionCanceledException {
assertEquals("auth.startssl.com", bookmark.getHostname());
assertTrue(Arrays.asList(issuers).contains(new X500Principal("" + "CN=StartCom Certification Authority, OU=Secure Digital Certificate Signing, O=StartCom Ltd., C=IL")));
assertTrue(Arrays.asList(issuers).contains(new X500Principal("" + "CN=StartCom Class 1 Primary Intermediate Client CA, OU=Secure Digital Certificate Signing, O=StartCom Ltd., C=IL")));
assertTrue(Arrays.asList(issuers).contains(new X500Principal("" + "CN=StartCom Class 2 Primary Intermediate Client CA, OU=Secure Digital Certificate Signing, O=StartCom Ltd., C=IL")));
assertTrue(Arrays.asList(issuers).contains(new X500Principal("" + "CN=StartCom Class 3 Primary Intermediate Client CA, OU=Secure Digital Certificate Signing, O=StartCom Ltd., C=IL")));
throw new ConnectionCanceledException();
}
}));
final LoginConnectionService c = new LoginConnectionService(new DisabledLoginCallback() {
@Override
public Credentials prompt(final Host bookmark, String username, String title, String reason, LoginOptions options) {
//
return new Credentials();
}
}, new DisabledHostKeyCallback(), new DisabledPasswordStore(), new DisabledProgressListener());
c.connect(session, new DisabledCancelCallback());
}
Aggregations