use of org.jets3t.service.ServiceException in project hadoop by apache.
the class Jets3tNativeFileSystemStore method retrieveMetadata.
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
StorageObject object = null;
try {
LOG.debug("Getting metadata for key: {} from bucket: {}", key, bucket.getName());
object = s3Service.getObjectDetails(bucket.getName(), key);
return new FileMetadata(key, object.getContentLength(), object.getLastModifiedDate().getTime());
} catch (ServiceException e) {
try {
// process
handleException(e, key);
return null;
} catch (FileNotFoundException fnfe) {
// and downgrade missing files
return null;
}
} finally {
if (object != null) {
object.closeDataInputStream();
}
}
}
use of org.jets3t.service.ServiceException in project hadoop by apache.
the class Jets3tNativeFileSystemStore method copy.
@Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Copying srcKey: " + srcKey + "to dstKey: " + dstKey + "in bucket: " + bucket.getName());
}
if (multipartEnabled) {
S3Object object = s3Service.getObjectDetails(bucket, srcKey, null, null, null, null);
if (multipartCopyBlockSize > 0 && object.getContentLength() > multipartCopyBlockSize) {
copyLargeFile(object, dstKey);
return;
}
}
S3Object dstObject = new S3Object(dstKey);
dstObject.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(), dstObject, false);
} catch (ServiceException e) {
handleException(e, srcKey);
}
}
use of org.jets3t.service.ServiceException in project mucommander by mucommander.
the class S3File method listObjects.
protected AbstractFile[] listObjects(String bucketName, String prefix, S3File parent) throws IOException {
try {
StorageObjectsChunk chunk = service.listObjectsChunked(bucketName, prefix, "/", Constants.DEFAULT_OBJECT_LIST_CHUNK_SIZE, null, true);
StorageObject[] objects = chunk.getObjects();
String[] commonPrefixes = chunk.getCommonPrefixes();
if (objects.length == 0 && !prefix.equals("")) {
// This happens only when the directory does not exist
throw new IOException();
}
AbstractFile[] children = new AbstractFile[objects.length + commonPrefixes.length];
FileURL childURL;
int i = 0;
String objectKey;
for (StorageObject object : objects) {
// Discard the object corresponding to the prefix itself
objectKey = object.getKey();
if (objectKey.equals(prefix))
continue;
childURL = (FileURL) fileURL.clone();
childURL.setPath(bucketName + "/" + objectKey);
Map<String, Object> parameters = new HashMap<>();
parameters.put("service", service);
parameters.put("object", object);
children[i] = FileFactory.getFile(childURL, parent, parameters);
i++;
}
org.jets3t.service.model.S3Object directoryObject;
for (String commonPrefix : commonPrefixes) {
childURL = (FileURL) fileURL.clone();
childURL.setPath(bucketName + "/" + commonPrefix);
directoryObject = new org.jets3t.service.model.S3Object(commonPrefix);
// Common prefixes are not objects per se, and therefore do not have a date, content-length nor owner.
directoryObject.setLastModifiedDate(new Date(System.currentTimeMillis()));
directoryObject.setContentLength(0);
Map<String, Object> parameters = new HashMap<>();
parameters.put("service", service);
parameters.put("object", directoryObject);
children[i] = FileFactory.getFile(childURL, parent, parameters);
i++;
}
// to know in advance whether the prefix will appear in the results or not.
if (i < children.length) {
AbstractFile[] childrenTrimmed = new AbstractFile[i];
System.arraycopy(children, 0, childrenTrimmed, 0, i);
return childrenTrimmed;
}
return children;
} catch (ServiceException e) {
throw getIOException(e);
}
}
use of org.jets3t.service.ServiceException in project druid by druid-io.
the class S3DataSegmentPusher method push.
@Override
public DataSegment push(final File indexFilesDir, final DataSegment inSegment) throws IOException {
final String s3Path = S3Utils.constructSegmentPath(config.getBaseKey(), inSegment);
log.info("Copying segment[%s] to S3 at location[%s]", inSegment.getIdentifier(), s3Path);
final File zipOutFile = File.createTempFile("druid", "index.zip");
final long indexSize = CompressionUtils.zip(indexFilesDir, zipOutFile);
try {
return S3Utils.retryS3Operation(new Callable<DataSegment>() {
@Override
public DataSegment call() throws Exception {
S3Object toPush = new S3Object(zipOutFile);
final String outputBucket = config.getBucket();
final String s3DescriptorPath = S3Utils.descriptorPathForSegmentPath(s3Path);
toPush.setBucketName(outputBucket);
toPush.setKey(s3Path);
if (!config.getDisableAcl()) {
toPush.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL);
}
log.info("Pushing %s.", toPush);
s3Client.putObject(outputBucket, toPush);
final DataSegment outSegment = inSegment.withSize(indexSize).withLoadSpec(ImmutableMap.<String, Object>of("type", "s3_zip", "bucket", outputBucket, "key", toPush.getKey())).withBinaryVersion(SegmentUtils.getVersionFromDir(indexFilesDir));
File descriptorFile = File.createTempFile("druid", "descriptor.json");
Files.copy(ByteStreams.newInputStreamSupplier(jsonMapper.writeValueAsBytes(inSegment)), descriptorFile);
S3Object descriptorObject = new S3Object(descriptorFile);
descriptorObject.setBucketName(outputBucket);
descriptorObject.setKey(s3DescriptorPath);
if (!config.getDisableAcl()) {
descriptorObject.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL);
}
log.info("Pushing %s", descriptorObject);
s3Client.putObject(outputBucket, descriptorObject);
log.info("Deleting zipped index File[%s]", zipOutFile);
zipOutFile.delete();
log.info("Deleting descriptor file[%s]", descriptorFile);
descriptorFile.delete();
return outSegment;
}
});
} catch (ServiceException e) {
throw new IOException(e);
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of org.jets3t.service.ServiceException in project alluxio by Alluxio.
the class GCSInputStream method openStream.
/**
* Opens a new stream at mPos if the wrapped stream mInputStream is null.
*/
private void openStream() throws IOException {
ServiceException lastException = null;
String errorMessage = String.format("Failed to open key: %s bucket: %s", mKey, mBucketName);
while (mRetryPolicy.attempt()) {
try {
GSObject object;
if (mPos > 0) {
object = mClient.getObject(mBucketName, mKey, null, null, null, null, mPos, null);
} else {
object = mClient.getObject(mBucketName, mKey);
}
mInputStream = new BufferedInputStream(object.getDataInputStream());
return;
} catch (ServiceException e) {
errorMessage = String.format("Failed to open key: %s bucket: %s attempts: %d error: %s", mKey, mBucketName, mRetryPolicy.getAttemptCount(), e.getMessage());
if (e.getResponseCode() != HttpStatus.SC_NOT_FOUND) {
throw new IOException(errorMessage, e);
}
// Key does not exist
lastException = e;
}
}
// Failed after retrying key does not exist
throw new IOException(errorMessage, lastException);
}
Aggregations