use of org.jets3t.service.model.StorageObject in project hadoop by apache.
the class Jets3tNativeFileSystemStore method storeLargeFile.
public void storeLargeFile(String key, File file, byte[] md5Hash) throws IOException {
S3Object object = new S3Object(key);
object.setDataInputFile(file);
object.setContentType("binary/octet-stream");
object.setContentLength(file.length());
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
if (md5Hash != null) {
object.setMd5Hash(md5Hash);
}
List<StorageObject> objectsToUploadAsMultipart = new ArrayList<StorageObject>();
objectsToUploadAsMultipart.add(object);
MultipartUtils mpUtils = new MultipartUtils(multipartBlockSize);
try {
mpUtils.uploadObjects(bucket.getName(), s3Service, objectsToUploadAsMultipart, null);
} catch (Exception e) {
handleException(e, key);
}
}
use of org.jets3t.service.model.StorageObject in project alluxio by Alluxio.
the class S3OutputStream method close.
@Override
public void close() throws IOException {
if (mClosed.getAndSet(true)) {
return;
}
mLocalOutputStream.close();
try {
S3Object obj = new S3Object(mKey);
obj.setBucketName(mBucketName);
obj.setDataInputFile(mFile);
obj.setContentLength(mFile.length());
obj.setContentEncoding(Mimetypes.MIMETYPE_BINARY_OCTET_STREAM);
if (mHash != null) {
obj.setMd5Hash(mHash.digest());
} else {
LOG.warn("MD5 was not computed for: {}", mKey);
}
if (MULTIPART_UTIL.isFileLargerThanMaxPartSize(mFile)) {
// Big object will be split into parts and uploaded to S3 in parallel.
List<StorageObject> objectsToUploadAsMultipart = new ArrayList<>();
objectsToUploadAsMultipart.add(obj);
MULTIPART_UTIL.uploadObjects(mBucketName, mClient, objectsToUploadAsMultipart, null);
} else {
// Avoid uploading file with Multipart if it's not necessary to save the
// extra overhead.
mClient.putObject(mBucketName, obj);
}
if (!mFile.delete()) {
LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
}
} catch (Exception e) {
LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath());
throw new IOException(e);
}
}
use of org.jets3t.service.model.StorageObject in project hadoop by apache.
the class Jets3tNativeFileSystemStore method list.
/**
* list objects
* @param prefix prefix
* @param delimiter delimiter
* @param maxListingLength max no. of entries
* @param priorLastKey last key in any previous search
* @return a list of matches
* @throws IOException on any reported failure
*/
private PartialListing list(String prefix, String delimiter, int maxListingLength, String priorLastKey) throws IOException {
try {
if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(), prefix, delimiter, maxListingLength, priorLastKey);
FileMetadata[] fileMetadata = new FileMetadata[chunk.getObjects().length];
for (int i = 0; i < fileMetadata.length; i++) {
StorageObject object = chunk.getObjects()[i];
fileMetadata[i] = new FileMetadata(object.getKey(), object.getContentLength(), object.getLastModifiedDate().getTime());
}
return new PartialListing(chunk.getPriorLastKey(), fileMetadata, chunk.getCommonPrefixes());
} catch (ServiceException e) {
handleException(e, prefix);
// never returned - keep compiler happy
return null;
}
}
Aggregations