use of software.amazon.awssdk.services.s3.model.S3Object in project iaf by ibissource.
the class AmazonS3FileSystem method toFile.
@Override
public S3Object toFile(String filename) throws FileSystemException {
S3Object object = new S3Object();
object.setKey(filename);
return object;
}
use of software.amazon.awssdk.services.s3.model.S3Object in project iaf by ibissource.
the class AmazonS3FileSystemTestHelper method _readFile.
@Override
public InputStream _readFile(String folder, String filename) throws FileNotFoundException {
final S3Object file = s3Client.getObject(bucketName, filename);
InputStream is = file.getObjectContent();
FilterInputStream fos = new FilterInputStream(is) {
@Override
public void close() throws IOException {
super.close();
file.close();
}
};
return fos;
}
use of software.amazon.awssdk.services.s3.model.S3Object in project Singularity by HubSpot.
the class S3ArtifactChunkDownloader method createDownloader.
private Callable<Path> createDownloader(final int retryNum) {
return new Callable<Path>() {
public Path call() throws Exception {
final Path chunkPath = (chunk == 0) ? downloadTo : Paths.get(downloadTo + "_" + chunk + "_" + retryNum);
chunkPath.toFile().deleteOnExit();
final long startTime = System.currentTimeMillis();
final long byteRangeStart = chunk * chunkSize;
final long byteRangeEnd = Math.min((chunk + 1) * chunkSize - 1, length);
log.info("Downloading {} - chunk {} (retry {}) ({}-{}) to {}", s3Artifact.getFilename(), chunk, retryNum, byteRangeStart, byteRangeEnd, chunkPath);
GetObjectRequest getObjectRequest = new GetObjectRequest(s3Artifact.getS3Bucket(), s3Artifact.getS3ObjectKey()).withRange(byteRangeStart, byteRangeEnd);
S3Object fetchedObject = s3.getObject(getObjectRequest);
try (InputStream is = fetchedObject.getObjectContent()) {
Files.copy(is, chunkPath, StandardCopyOption.REPLACE_EXISTING);
}
log.info("Finished downloading chunk {} (retry {}) of {} ({} bytes) in {}", chunk, retryNum, s3Artifact.getFilename(), byteRangeEnd - byteRangeStart, JavaUtils.duration(startTime));
return chunkPath;
}
};
}
use of software.amazon.awssdk.services.s3.model.S3Object in project YCSB by brianfrankcooper.
the class S3Client method writeToStorage.
/**
* Upload a new object to S3 or update an object on S3.
*
* @param bucket
* The name of the bucket
* @param key
* The file key of the object to upload/update.
* @param values
* The data to be written on the object
* @param updateMarker
* A boolean value. If true a new object will be uploaded
* to S3. If false an existing object will be re-uploaded
*
*/
protected Status writeToStorage(String bucket, String key, HashMap<String, ByteIterator> values, Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) {
int totalSize = 0;
//number of fields to concatenate
int fieldCount = values.size();
// getting the first field in the values
Object keyToSearch = values.keySet().toArray()[0];
// getting the content of just one field
byte[] sourceArray = values.get(keyToSearch).toArray();
//size of each array
int sizeArray = sourceArray.length;
if (updateMarker) {
totalSize = sizeArray * fieldCount;
} else {
try {
Map.Entry<S3Object, ObjectMetadata> objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal);
int sizeOfFile = (int) objectAndMetadata.getValue().getContentLength();
fieldCount = sizeOfFile / sizeArray;
totalSize = sizeOfFile;
objectAndMetadata.getKey().close();
} catch (Exception e) {
System.err.println("Not possible to get the object :" + key);
e.printStackTrace();
return Status.ERROR;
}
}
byte[] destinationArray = new byte[totalSize];
int offset = 0;
for (int i = 0; i < fieldCount; i++) {
System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray);
offset += sizeArray;
}
try (InputStream input = new ByteArrayInputStream(destinationArray)) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(totalSize);
PutObjectRequest putObjectRequest = null;
if (sseLocal.equals("true")) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
} else if (ssecLocal != null) {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal);
} else {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
}
try {
PutObjectResult res = s3Client.putObject(putObjectRequest);
if (res.getETag() == null) {
return Status.ERROR;
} else {
if (sseLocal.equals("true")) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
} else if (ssecLocal != null) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
}
}
} catch (Exception e) {
System.err.println("Not possible to write object :" + key);
e.printStackTrace();
return Status.ERROR;
}
} catch (Exception e) {
System.err.println("Error in the creation of the stream :" + e.toString());
e.printStackTrace();
return Status.ERROR;
}
return Status.OK;
}
use of software.amazon.awssdk.services.s3.model.S3Object in project gradle by gradle.
the class S3ResourceConnector method getMetaData.
public ExternalResourceMetaData getMetaData(URI location, boolean revalidate) {
LOGGER.debug("Attempting to get resource metadata: {}", location);
S3Object s3Object = s3Client.getMetaData(location);
if (s3Object == null) {
return null;
}
try {
ObjectMetadata objectMetadata = s3Object.getObjectMetadata();
return new DefaultExternalResourceMetaData(location, objectMetadata.getLastModified().getTime(), objectMetadata.getContentLength(), objectMetadata.getContentType(), objectMetadata.getETag(), // Passing null for sha1 - TODO - consider using the etag which is an MD5 hash of the file (when less than 5Gb)
null);
} finally {
IoActions.closeQuietly(s3Object);
}
}
Aggregations