use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class EueWriteFeatureTest method testWriteCancel.
@Test(expected = TransferCanceledException.class)
public void testWriteCancel() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final EueWriteFeature writer = new EueWriteFeature(session, fileid);
final byte[] content = RandomUtils.nextBytes(32769);
final Path test = new Path(String.format("{%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file));
{
final BytecountStreamListener count = new BytecountStreamListener();
final TransferStatus status = new TransferStatus() {
@Override
public void validate() throws ConnectionCanceledException {
if (count.getSent() >= 32768) {
throw new TransferCanceledException();
}
super.validate();
}
};
status.setLength(content.length);
final StatusOutputStream<EueWriteFeature.Chunk> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).withListener(count).transfer(new ByteArrayInputStream(content), out);
assertFalse(new DefaultFindFeature(session).find(test));
try {
out.getStatus();
fail();
} catch (TransferCanceledException e) {
//
}
}
// Rewrite
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final StatusOutputStream<EueWriteFeature.Chunk> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertFalse(new DefaultFindFeature(session).find(test));
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class IRODSUploadFeatureTest method testInterruptStatus.
@Test
public void testInterruptStatus() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(new Local("../profiles/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(System.getProperties().getProperty("irods.key"), System.getProperties().getProperty("irods.secret")));
final IRODSSession session = new IRODSSession(host);
session.open(Proxy.DIRECT, new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(Proxy.DIRECT, new DisabledLoginCallback(), new DisabledCancelCallback());
final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final int length = 32770;
final byte[] content = RandomUtils.nextBytes(length);
final OutputStream out = local.getOutputStream(false);
IOUtils.write(content, out);
out.close();
final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus().withLength(content.length);
final Checksum checksum = new IRODSUploadFeature(session).upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener() {
@Override
public void sent(final long bytes) {
super.sent(bytes);
status.setCanceled();
}
}, status, new DisabledConnectionCallback());
try {
status.validate();
fail();
} catch (ConnectionCanceledException e) {
//
}
assertFalse(status.isComplete());
session.close();
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class OpenSSHHostKeyVerifier method verify.
@Override
public boolean verify(final Host host, final PublicKey key) throws ConnectionCanceledException, ChecksumException {
if (null == database) {
log.warn(String.format("Missing database %s", database));
return super.verify(host, key);
}
final KeyType type = KeyType.fromKey(key);
if (type == KeyType.UNKNOWN) {
return false;
}
boolean foundApplicableHostEntry = false;
for (OpenSSHKnownHosts.KnownHostEntry entry : database.entries()) {
try {
if (entry.appliesTo(type, format(host))) {
foundApplicableHostEntry = true;
if (entry.verify(key)) {
return true;
}
}
} catch (IOException e) {
log.error(String.format("Failure verifying host key entry %s. %s", entry, e.getMessage()));
return false;
}
}
if (foundApplicableHostEntry) {
try {
return this.isChangedKeyAccepted(host, key);
} catch (ConnectionCanceledException | ChecksumException e) {
return false;
}
}
try {
return this.isUnknownKeyAccepted(host, key);
} catch (ConnectionCanceledException | ChecksumException e) {
return false;
}
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class S3VersionedObjectListService method list.
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("list", concurrency);
try {
final String prefix = this.createPrefix(directory);
final Path bucket = containerService.getContainer(directory);
final AttributedList<Path> children = new AttributedList<>();
final List<Future<Path>> folders = new ArrayList<>();
String priorLastKey = null;
String priorLastVersionId = null;
long revision = 0L;
String lastKey = null;
boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory);
do {
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false);
// Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first.
for (BaseVersionOrDeleteMarker marker : chunk.getItems()) {
final String key = URIEncoder.decode(marker.getKey());
if (String.valueOf(Path.DELIMITER).equals(PathNormalizer.normalize(key))) {
log.warn(String.format("Skipping prefix %s", key));
continue;
}
if (new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) {
// Placeholder object, skip
hasDirectoryPlaceholder = true;
continue;
}
final PathAttributes attr = new PathAttributes();
attr.setVersionId("null".equals(marker.getVersionId()) ? null : marker.getVersionId());
if (!StringUtils.equals(lastKey, key)) {
// Reset revision for next file
revision = 0L;
}
attr.setRevision(++revision);
attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest());
if (marker.isDeleteMarker()) {
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true)));
}
attr.setModificationDate(marker.getLastModified().getTime());
attr.setRegion(bucket.attributes().getRegion());
if (marker instanceof S3Version) {
final S3Version object = (S3Version) marker;
attr.setSize(object.getSize());
if (StringUtils.isNotBlank(object.getEtag())) {
attr.setETag(StringUtils.remove(object.getEtag(), "\""));
// The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted
// using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is
// not the MD5 of the object data.
attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\"")));
}
if (StringUtils.isNotBlank(object.getStorageClass())) {
attr.setStorageClass(object.getStorageClass());
}
}
final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr);
if (metadata) {
f.withAttributes(attributes.find(f));
}
children.add(f);
lastKey = key;
}
if (references) {
for (Path f : children) {
if (f.attributes().isDuplicate()) {
final Path latest = children.find(new LatestVersionPathPredicate(f));
if (latest != null) {
// Reference version
final AttributedList<Path> versions = new AttributedList<>(latest.attributes().getVersions());
versions.add(f);
latest.attributes().setVersions(versions);
} else {
log.warn(String.format("No current version found for %s", f));
}
}
}
}
final String[] prefixes = chunk.getCommonPrefixes();
for (String common : prefixes) {
if (String.valueOf(Path.DELIMITER).equals(common)) {
log.warn(String.format("Skipping prefix %s", common));
continue;
}
final String key = PathNormalizer.normalize(URIEncoder.decode(common));
if (new SimplePathPredicate(new Path(bucket, key, EnumSet.of(Path.Type.directory))).test(directory)) {
continue;
}
folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common)));
}
priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null;
priorLastVersionId = chunk.getNextVersionIdMarker();
listener.chunk(directory, children);
} while (priorLastKey != null);
for (Future<Path> future : folders) {
try {
children.add(future.get());
} catch (InterruptedException e) {
log.error("Listing versioned objects failed with interrupt failure");
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
listener.chunk(directory, children);
if (!hasDirectoryPlaceholder && children.isEmpty()) {
// Only for AWS
if (S3Session.isAwsHostname(session.getHost().getHostname())) {
if (StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) {
throw new NotfoundException(directory.getAbsolute());
}
} else {
// Handle missing prefix for directory placeholders in Minio
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false);
if (Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) {
throw new NotfoundException(directory.getAbsolute());
}
}
}
return children;
} catch (ServiceException e) {
throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory);
} finally {
// Cancel future tasks
pool.shutdown(false);
}
}
use of ch.cyberduck.core.exception.ConnectionCanceledException in project cyberduck by iterate-ch.
the class S3MultipartCopyFeature method copy.
@Override
protected String copy(final Path source, final S3Object destination, final TransferStatus status, final StreamListener listener) throws BackgroundException {
try {
final List<MultipartPart> completed = new ArrayList<>();
// ID for the initiated multipart upload.
final MultipartUpload multipart = session.getClient().multipartStartUpload(destination.getBucketName(), destination);
if (log.isDebugEnabled()) {
log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId()));
}
final long size = status.getLength();
long remaining = size;
long offset = 0;
final List<Future<MultipartPart>> parts = new ArrayList<>();
for (int partNumber = 1; remaining > 0; partNumber++) {
// Last part can be less than 5 MB. Adjust part size.
final Long length = Math.min(Math.max((size / S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS), partsize), remaining);
// Submit to queue
parts.add(this.submit(source, multipart, partNumber, offset, length));
remaining -= length;
offset += length;
}
for (Future<MultipartPart> future : parts) {
try {
final MultipartPart part = future.get();
completed.add(part);
listener.sent(part.getSize());
} catch (InterruptedException e) {
log.error("Part upload failed with interrupt failure");
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
// Combining all the given parts into the final object. Processing of a Complete Multipart Upload request
// could take several minutes to complete. Because a request could fail after the initial 200 OK response
// has been sent, it is important that you check the response body to determine whether the request succeeded.
final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed);
if (log.isDebugEnabled()) {
log.debug(String.format("Completed multipart upload for %s with checksum %s", complete.getObjectKey(), complete.getEtag()));
}
return complete.getVersionId();
} catch (ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source);
} finally {
pool.shutdown(false);
}
}
Aggregations