use of org.jets3t.service.ServiceException in project alluxio by Alluxio.
the class GCSOutputStream method close.
@Override
public void close() throws IOException {
if (mClosed.getAndSet(true)) {
return;
}
mLocalOutputStream.close();
try {
GSObject obj = new GSObject(mKey);
obj.setBucketName(mBucketName);
obj.setDataInputFile(mFile);
obj.setContentLength(mFile.length());
obj.setContentType(Mimetypes.MIMETYPE_BINARY_OCTET_STREAM);
if (mHash != null) {
obj.setMd5Hash(mHash.digest());
} else {
LOG.warn("MD5 was not computed for: {}", mKey);
}
mClient.putObject(mBucketName, obj);
} catch (ServiceException e) {
LOG.error("Failed to upload {}.", mKey);
throw new IOException(e);
} finally {
// upload or if the upload failed.
if (!mFile.delete()) {
LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
}
}
}
use of org.jets3t.service.ServiceException in project alluxio by Alluxio.
the class GCSUnderFileSystem method copyObject.
@Override
protected boolean copyObject(String src, String dst) {
LOG.debug("Copying {} to {}", src, dst);
GSObject obj = new GSObject(dst);
// Retry copy for a few times, in case some Jets3t or GCS internal errors happened during copy.
int retries = 3;
for (int i = 0; i < retries; i++) {
try {
mClient.copyObject(mBucketName, src, mBucketName, obj, false);
return true;
} catch (ServiceException e) {
LOG.error("Failed to copy file {} to {}", src, dst, e);
if (i != retries - 1) {
LOG.error("Retrying copying file {} to {}", src, dst);
}
}
}
LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries);
return false;
}
use of org.jets3t.service.ServiceException in project hadoop by apache.
the class Jets3tNativeFileSystemStore method list.
/**
* list objects
* @param prefix prefix
* @param delimiter delimiter
* @param maxListingLength max no. of entries
* @param priorLastKey last key in any previous search
* @return a list of matches
* @throws IOException on any reported failure
*/
private PartialListing list(String prefix, String delimiter, int maxListingLength, String priorLastKey) throws IOException {
try {
if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(), prefix, delimiter, maxListingLength, priorLastKey);
FileMetadata[] fileMetadata = new FileMetadata[chunk.getObjects().length];
for (int i = 0; i < fileMetadata.length; i++) {
StorageObject object = chunk.getObjects()[i];
fileMetadata[i] = new FileMetadata(object.getKey(), object.getContentLength(), object.getLastModifiedDate().getTime());
}
return new PartialListing(chunk.getPriorLastKey(), fileMetadata, chunk.getCommonPrefixes());
} catch (ServiceException e) {
handleException(e, prefix);
// never returned - keep compiler happy
return null;
}
}
use of org.jets3t.service.ServiceException in project hadoop by apache.
the class Jets3tNativeFileSystemStore method storeFile.
@Override
public void storeFile(String key, File file, byte[] md5Hash) throws IOException {
if (multipartEnabled && file.length() >= multipartBlockSize) {
storeLargeFile(key, file, md5Hash);
return;
}
BufferedInputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
S3Object object = new S3Object(key);
object.setDataInputStream(in);
object.setContentType("binary/octet-stream");
object.setContentLength(file.length());
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
if (md5Hash != null) {
object.setMd5Hash(md5Hash);
}
s3Service.putObject(bucket, object);
} catch (ServiceException e) {
handleException(e, key);
} finally {
IOUtils.closeStream(in);
}
}
use of org.jets3t.service.ServiceException in project hadoop by apache.
the class Jets3tNativeFileSystemStore method copyLargeFile.
public void copyLargeFile(S3Object srcObject, String dstKey) throws IOException {
try {
long partCount = srcObject.getContentLength() / multipartCopyBlockSize + (srcObject.getContentLength() % multipartCopyBlockSize > 0 ? 1 : 0);
MultipartUpload multipartUpload = s3Service.multipartStartUpload(bucket.getName(), dstKey, srcObject.getMetadataMap());
List<MultipartPart> listedParts = new ArrayList<MultipartPart>();
for (int i = 0; i < partCount; i++) {
long byteRangeStart = i * multipartCopyBlockSize;
long byteLength;
if (i < partCount - 1) {
byteLength = multipartCopyBlockSize;
} else {
byteLength = srcObject.getContentLength() % multipartCopyBlockSize;
if (byteLength == 0) {
byteLength = multipartCopyBlockSize;
}
}
MultipartPart copiedPart = s3Service.multipartUploadPartCopy(multipartUpload, i + 1, bucket.getName(), srcObject.getKey(), null, null, null, null, byteRangeStart, byteRangeStart + byteLength - 1, null);
listedParts.add(copiedPart);
}
Collections.reverse(listedParts);
s3Service.multipartCompleteUpload(multipartUpload, listedParts);
} catch (ServiceException e) {
handleException(e, srcObject.getKey());
}
}
Aggregations