use of ch.cyberduck.core.preferences.HostPreferences in project cyberduck by iterate-ch.
the class AzureWriteFeature method write.
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final CloudBlob blob;
if (status.isExists()) {
if (new HostPreferences(session.getHost()).getBoolean("azure.upload.snapshot")) {
session.getClient().getContainerReference(containerService.getContainer(file).getName()).getBlobReferenceFromServer(containerService.getKey(file)).createSnapshot();
}
if (status.isAppend()) {
// Existing append blob type
blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()).getAppendBlobReference(containerService.getKey(file));
} else {
// Existing block blob type
final PathAttributes attr = new AzureAttributesFinderFeature(session, context).find(file);
if (BlobType.APPEND_BLOB == BlobType.valueOf(attr.getCustom().get(AzureAttributesFinderFeature.KEY_BLOB_TYPE))) {
blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()).getAppendBlobReference(containerService.getKey(file));
} else {
blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()).getBlockBlobReference(containerService.getKey(file));
}
}
} else {
// Create new blob with default type set in defaults
switch(blobType) {
case APPEND_BLOB:
blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()).getAppendBlobReference(containerService.getKey(file));
break;
default:
blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()).getBlockBlobReference(containerService.getKey(file));
}
}
if (StringUtils.isNotBlank(status.getMime())) {
blob.getProperties().setContentType(status.getMime());
}
// Add previous metadata when overwriting file
final HashMap<String, String> headers = new HashMap<>(status.getMetadata());
blob.setMetadata(headers);
// Remove additional headers not allowed in metadata and move to properties
if (headers.containsKey(HttpHeaders.CACHE_CONTROL)) {
blob.getProperties().setCacheControl(headers.get(HttpHeaders.CACHE_CONTROL));
headers.remove(HttpHeaders.CACHE_CONTROL);
}
if (headers.containsKey(HttpHeaders.CONTENT_TYPE)) {
blob.getProperties().setContentType(headers.get(HttpHeaders.CONTENT_TYPE));
headers.remove(HttpHeaders.CONTENT_TYPE);
}
final Checksum checksum = status.getChecksum();
if (Checksum.NONE != checksum) {
switch(checksum.algorithm) {
case md5:
try {
blob.getProperties().setContentMD5(Base64.toBase64String(Hex.decodeHex(status.getChecksum().hash.toCharArray())));
headers.remove(HttpHeaders.CONTENT_MD5);
} catch (DecoderException e) {
// Ignore
}
break;
}
}
final BlobRequestOptions options = new BlobRequestOptions();
options.setConcurrentRequestCount(1);
options.setStoreBlobContentMD5(new HostPreferences(session.getHost()).getBoolean("azure.upload.md5"));
final BlobOutputStream out;
if (status.isAppend()) {
options.setStoreBlobContentMD5(false);
if (blob instanceof CloudAppendBlob) {
out = ((CloudAppendBlob) blob).openWriteExisting(AccessCondition.generateEmptyCondition(), options, context);
} else {
throw new NotfoundException(String.format("Unexpected blob type for %s", blob.getName()));
}
} else {
if (blob instanceof CloudAppendBlob) {
out = ((CloudAppendBlob) blob).openWriteNew(AccessCondition.generateEmptyCondition(), options, context);
} else {
out = ((CloudBlockBlob) blob).openOutputStream(AccessCondition.generateEmptyCondition(), options, context);
}
}
return new VoidStatusOutputStream(out) {
@Override
protected void handleIOException(final IOException e) throws IOException {
if (StringUtils.equals(SR.STREAM_CLOSED, e.getMessage())) {
log.warn(String.format("Ignore failure %s", e));
return;
}
final Throwable cause = ExceptionUtils.getRootCause(e);
if (cause instanceof StorageException) {
throw new IOException(e.getMessage(), new AzureExceptionMappingService().map((StorageException) cause));
}
throw e;
}
};
} catch (StorageException e) {
throw new AzureExceptionMappingService().map("Upload {0} failed", e, file);
} catch (URISyntaxException e) {
throw new NotfoundException(e.getMessage(), e);
}
}
use of ch.cyberduck.core.preferences.HostPreferences in project cyberduck by iterate-ch.
the class SwiftObjectListService method list.
protected AttributedList<Path> list(final Path directory, final ListProgressListener listener, final String prefix) throws BackgroundException {
try {
final AttributedList<Path> children = new AttributedList<>();
final int limit = new HostPreferences(session.getHost()).getInteger("openstack.list.object.limit");
String marker = null;
List<StorageObject> list;
final Path container = containerService.getContainer(directory);
do {
list = session.getClient().listObjectsStartingWith(regionService.lookup(container), container.getName(), prefix, null, limit, marker, Path.DELIMITER);
for (StorageObject object : list) {
final PathAttributes attr = attributes.toAttributes(object);
String name = StringUtils.removeStart(object.getName(), prefix);
if (StringUtils.endsWith(name, String.valueOf(Path.DELIMITER))) {
// Must remove trailing delimiter
name = StringUtils.removeEnd(name, String.valueOf(Path.DELIMITER));
if (children.contains(new Path(directory, name, EnumSet.of(Path.Type.directory), attr))) {
// There is already a real placeholder file with application/directory MIME type. Only add virtual directory if the placeholder object is missing
continue;
}
}
final EnumSet<Path.Type> types = "application/directory".equals(object.getMimeType()) ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file);
attr.setOwner(container.attributes().getOwner());
attr.setRegion(container.attributes().getRegion());
children.add(new Path(directory, name, types, attr));
marker = object.getName();
}
listener.chunk(directory, children);
} while (list.size() == limit);
if (!containerService.isContainer(directory)) {
if (children.isEmpty()) {
try {
if (0 == session.getClient().listObjectsStartingWith(regionService.lookup(container), container.getName(), containerService.getKey(directory), null, 1, null, Path.DELIMITER).size()) {
throw new NotfoundException(directory.getAbsolute());
}
} catch (GenericException e) {
throw new SwiftExceptionMappingService().map("Listing directory {0} failed", e, directory);
} catch (IOException e) {
throw new DefaultIOExceptionMappingService().map(e, directory);
}
}
}
return children;
} catch (GenericException e) {
throw new SwiftExceptionMappingService().map("Listing directory {0} failed", e, directory);
} catch (IOException e) {
throw new DefaultIOExceptionMappingService().map(e, directory);
}
}
use of ch.cyberduck.core.preferences.HostPreferences in project cyberduck by iterate-ch.
the class S3UrlProvider method toUrl.
@Override
public DescriptiveUrlBag toUrl(final Path file) {
final DescriptiveUrlBag list = new DescriptiveUrlBag();
if (session.getClient().getConfiguration().getBoolProperty("s3service.disable-dns-buckets", false)) {
list.addAll(new DefaultUrlProvider(session.getHost()).toUrl(file));
} else {
list.add(this.toUrl(file, session.getHost().getProtocol().getScheme(), session.getHost().getPort()));
list.add(this.toUrl(file, Scheme.http, 80));
if (StringUtils.isNotBlank(session.getHost().getWebURL())) {
// Only include when custom domain is configured
list.addAll(new HostWebUrlProvider(session.getHost()).toUrl(file));
}
}
if (file.isFile()) {
if (!session.getHost().getCredentials().isAnonymousLogin()) {
// X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less
// than 604800 seconds
// In one hour
list.add(this.toSignedUrl(file, (int) TimeUnit.HOURS.toSeconds(1)));
// Default signed URL expiring in 24 hours.
list.add(this.toSignedUrl(file, (int) TimeUnit.SECONDS.toSeconds(new HostPreferences(session.getHost()).getInteger("s3.url.expire.seconds"))));
// 1 Week
list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(7)));
switch(session.getSignatureVersion()) {
case AWS2:
// 1 Month
list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(30)));
// 1 Year
list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(365)));
break;
case AWS4HMACSHA256:
break;
}
}
}
// AWS services require specifying an Amazon S3 bucket using S3://bucket
list.add(new DescriptiveUrl(URI.create(String.format("s3://%s%s", containerService.getContainer(file).getName(), file.isRoot() ? Path.DELIMITER : containerService.isContainer(file) ? Path.DELIMITER : String.format("/%s", URIEncoder.encode(containerService.getKey(file))))), DescriptiveUrl.Type.provider, MessageFormat.format(LocaleFactory.localizedString("{0} URL"), "S3")));
// Filter by matching container name
final Optional<Set<Distribution>> filtered = distributions.entrySet().stream().filter(entry -> new SimplePathPredicate(containerService.getContainer(file)).test(entry.getKey())).map(Map.Entry::getValue).findFirst();
if (filtered.isPresent()) {
// Add CloudFront distributions
for (Distribution distribution : filtered.get()) {
list.addAll(new DistributionUrlProvider(distribution).toUrl(file));
}
}
return list;
}
use of ch.cyberduck.core.preferences.HostPreferences in project cyberduck by iterate-ch.
the class S3VersionedObjectListService method list.
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("list", concurrency);
try {
final String prefix = this.createPrefix(directory);
final Path bucket = containerService.getContainer(directory);
final AttributedList<Path> children = new AttributedList<>();
final List<Future<Path>> folders = new ArrayList<>();
String priorLastKey = null;
String priorLastVersionId = null;
long revision = 0L;
String lastKey = null;
boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory);
do {
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false);
// Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first.
for (BaseVersionOrDeleteMarker marker : chunk.getItems()) {
final String key = URIEncoder.decode(marker.getKey());
if (String.valueOf(Path.DELIMITER).equals(PathNormalizer.normalize(key))) {
log.warn(String.format("Skipping prefix %s", key));
continue;
}
if (new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) {
// Placeholder object, skip
hasDirectoryPlaceholder = true;
continue;
}
final PathAttributes attr = new PathAttributes();
attr.setVersionId("null".equals(marker.getVersionId()) ? null : marker.getVersionId());
if (!StringUtils.equals(lastKey, key)) {
// Reset revision for next file
revision = 0L;
}
attr.setRevision(++revision);
attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest());
if (marker.isDeleteMarker()) {
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true)));
}
attr.setModificationDate(marker.getLastModified().getTime());
attr.setRegion(bucket.attributes().getRegion());
if (marker instanceof S3Version) {
final S3Version object = (S3Version) marker;
attr.setSize(object.getSize());
if (StringUtils.isNotBlank(object.getEtag())) {
attr.setETag(StringUtils.remove(object.getEtag(), "\""));
// The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted
// using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is
// not the MD5 of the object data.
attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\"")));
}
if (StringUtils.isNotBlank(object.getStorageClass())) {
attr.setStorageClass(object.getStorageClass());
}
}
final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr);
if (metadata) {
f.withAttributes(attributes.find(f));
}
children.add(f);
lastKey = key;
}
if (references) {
for (Path f : children) {
if (f.attributes().isDuplicate()) {
final Path latest = children.find(new LatestVersionPathPredicate(f));
if (latest != null) {
// Reference version
final AttributedList<Path> versions = new AttributedList<>(latest.attributes().getVersions());
versions.add(f);
latest.attributes().setVersions(versions);
} else {
log.warn(String.format("No current version found for %s", f));
}
}
}
}
final String[] prefixes = chunk.getCommonPrefixes();
for (String common : prefixes) {
if (String.valueOf(Path.DELIMITER).equals(common)) {
log.warn(String.format("Skipping prefix %s", common));
continue;
}
final String key = PathNormalizer.normalize(URIEncoder.decode(common));
if (new SimplePathPredicate(new Path(bucket, key, EnumSet.of(Path.Type.directory))).test(directory)) {
continue;
}
folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common)));
}
priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null;
priorLastVersionId = chunk.getNextVersionIdMarker();
listener.chunk(directory, children);
} while (priorLastKey != null);
for (Future<Path> future : folders) {
try {
children.add(future.get());
} catch (InterruptedException e) {
log.error("Listing versioned objects failed with interrupt failure");
throw new ConnectionCanceledException(e);
} catch (ExecutionException e) {
log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage()));
if (e.getCause() instanceof BackgroundException) {
throw (BackgroundException) e.getCause();
}
throw new BackgroundException(e.getCause());
}
}
listener.chunk(directory, children);
if (!hasDirectoryPlaceholder && children.isEmpty()) {
// Only for AWS
if (S3Session.isAwsHostname(session.getHost().getHostname())) {
if (StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) {
throw new NotfoundException(directory.getAbsolute());
}
} else {
// Handle missing prefix for directory placeholders in Minio
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false);
if (Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) {
throw new NotfoundException(directory.getAbsolute());
}
}
}
return children;
} catch (ServiceException e) {
throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory);
} finally {
// Cancel future tasks
pool.shutdown(false);
}
}
use of ch.cyberduck.core.preferences.HostPreferences in project cyberduck by iterate-ch.
the class S3LoggingFeature method setConfiguration.
@Override
public void setConfiguration(final Path file, final LoggingConfiguration configuration) throws BackgroundException {
// Logging target bucket
final Path bucket = containerService.getContainer(file);
try {
final S3BucketLoggingStatus status = new S3BucketLoggingStatus(StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), null);
if (configuration.isEnabled()) {
status.setLogfilePrefix(new HostPreferences(session.getHost()).getProperty("s3.logging.prefix"));
}
session.getClient().setBucketLoggingStatus(bucket.getName(), status, true);
} catch (ServiceException e) {
throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
}
Aggregations