use of software.amazon.awssdk.services.s3.model.S3Object in project aws-doc-sdk-examples by awsdocs.
the class GetObject2 method main.
public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
String key = "*** Object key ***";
S3Object fullObject = null, objectPortion = null, headerOverrideObject = null;
try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withRegion(clientRegion).withCredentials(new ProfileCredentialsProvider()).build();
// Get an object and print its contents.
System.out.println("Downloading an object");
fullObject = s3Client.getObject(new GetObjectRequest(bucketName, key));
System.out.println("Content-Type: " + fullObject.getObjectMetadata().getContentType());
System.out.println("Content: ");
displayTextInputStream(fullObject.getObjectContent());
// Get a range of bytes from an object and print the bytes.
GetObjectRequest rangeObjectRequest = new GetObjectRequest(bucketName, key).withRange(0, 9);
objectPortion = s3Client.getObject(rangeObjectRequest);
System.out.println("Printing bytes retrieved.");
displayTextInputStream(objectPortion.getObjectContent());
// Get an entire object, overriding the specified response headers, and print the object's content.
ResponseHeaderOverrides headerOverrides = new ResponseHeaderOverrides().withCacheControl("No-cache").withContentDisposition("attachment; filename=example.txt");
GetObjectRequest getObjectRequestHeaderOverride = new GetObjectRequest(bucketName, key).withResponseHeaders(headerOverrides);
headerOverrideObject = s3Client.getObject(getObjectRequestHeaderOverride);
displayTextInputStream(headerOverrideObject.getObjectContent());
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
} finally {
// To ensure that the network connection doesn't remain open, close any open input streams.
if (fullObject != null) {
fullObject.close();
}
if (objectPortion != null) {
objectPortion.close();
}
if (headerOverrideObject != null) {
headerOverrideObject.close();
}
}
}
use of software.amazon.awssdk.services.s3.model.S3Object in project XRTB by benmfaul.
the class Configuration method processDirectory.
public void processDirectory(AmazonS3Client s3, ObjectListing listing, String bucket) throws Exception {
for (S3ObjectSummary objectSummary : listing.getObjectSummaries()) {
long size = objectSummary.getSize();
logger.info("*** Processing S3 {}, size: {}", objectSummary.getKey(), size);
S3Object object = s3.getObject(new GetObjectRequest(bucket, objectSummary.getKey()));
String bucketName = object.getBucketName();
String keyName = object.getKey();
GetObjectTaggingRequest request = new GetObjectTaggingRequest(bucketName, keyName);
GetObjectTaggingResult result = s3.getObjectTagging(request);
List<Tag> tags = result.getTagSet();
String type = null;
String name = null;
if (tags.isEmpty()) {
System.err.println("Error: " + keyName + " has no tags");
} else {
for (Tag tag : tags) {
String key = tag.getKey();
String value = tag.getValue();
if (key.equals("type")) {
type = value;
}
if (key.equals("name")) {
name = value;
}
}
if (name == null)
throw new Exception("Error: " + keyName + " is missing a name tag");
if (name.contains(" "))
throw new Exception("Error: " + keyName + " has a name attribute with a space in it");
if (type == null)
throw new Exception("Error: " + keyName + " has no type tag");
if (!name.startsWith("$"))
name = "$" + name;
readData(type, name, object, size);
}
}
}
use of software.amazon.awssdk.services.s3.model.S3Object in project YCSB by brianfrankcooper.
the class S3Client method writeToStorage.
/**
* Upload a new object to S3 or update an object on S3.
*
* @param bucket
* The name of the bucket
* @param key
* The file key of the object to upload/update.
* @param values
* The data to be written on the object
* @param updateMarker
* A boolean value. If true a new object will be uploaded
* to S3. If false an existing object will be re-uploaded
*/
protected Status writeToStorage(String bucket, String key, Map<String, ByteIterator> values, Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) {
int totalSize = 0;
// number of fields to concatenate
int fieldCount = values.size();
// getting the first field in the values
Object keyToSearch = values.keySet().toArray()[0];
// getting the content of just one field
byte[] sourceArray = values.get(keyToSearch).toArray();
// size of each array
int sizeArray = sourceArray.length;
if (updateMarker) {
totalSize = sizeArray * fieldCount;
} else {
try {
S3Object object = getS3ObjectAndMetadata(bucket, key, ssecLocal);
int sizeOfFile = (int) object.getObjectMetadata().getContentLength();
fieldCount = sizeOfFile / sizeArray;
totalSize = sizeOfFile;
object.close();
} catch (Exception e) {
System.err.println("Not possible to get the object :" + key);
e.printStackTrace();
return Status.ERROR;
}
}
byte[] destinationArray = new byte[totalSize];
int offset = 0;
for (int i = 0; i < fieldCount; i++) {
System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray);
offset += sizeArray;
}
try (InputStream input = new ByteArrayInputStream(destinationArray)) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(totalSize);
PutObjectRequest putObjectRequest = null;
if (sseLocal.equals("true")) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
} else if (ssecLocal != null) {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal);
} else {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
}
try {
PutObjectResult res = s3Client.putObject(putObjectRequest);
if (res.getETag() == null) {
return Status.ERROR;
} else {
if (sseLocal.equals("true")) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
} else if (ssecLocal != null) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
}
}
} catch (Exception e) {
System.err.println("Not possible to write object :" + key);
e.printStackTrace();
return Status.ERROR;
}
} catch (Exception e) {
System.err.println("Error in the creation of the stream :" + e.toString());
e.printStackTrace();
return Status.ERROR;
}
return Status.OK;
}
use of software.amazon.awssdk.services.s3.model.S3Object in project YCSB by brianfrankcooper.
the class S3Client method readFromStorage.
/**
* Download an object from S3.
*
* @param bucket
* The name of the bucket
* @param key
* The file key of the object to upload/update.
* @param result
* The Hash map where data from the object are written
*/
protected Status readFromStorage(String bucket, String key, Map<String, ByteIterator> result, SSECustomerKey ssecLocal) {
try {
S3Object object = getS3ObjectAndMetadata(bucket, key, ssecLocal);
// consuming the stream
InputStream objectData = object.getObjectContent();
// writing the stream to bytes and to results
result.put(key, new ByteArrayByteIterator(IOUtils.toByteArray(objectData)));
objectData.close();
object.close();
} catch (Exception e) {
System.err.println("Not possible to get the object " + key);
e.printStackTrace();
return Status.ERROR;
}
return Status.OK;
}
use of software.amazon.awssdk.services.s3.model.S3Object in project crate by crate.
the class S3FileReadingCollectorTest method createBatchIterator.
private BatchIterator<Row> createBatchIterator(Collection<String> fileUris, String compression, final S3ObjectInputStream s3InputStream, boolean collectSourceUriFailure) {
InputFactory.Context<LineCollectorExpression<?>> ctx = inputFactory.ctxForRefs(txnCtx, FileLineReferenceResolver::getImplementation);
List<Input<?>> inputs = new ArrayList<>(2);
Reference raw = createReference(SourceLineExpression.COLUMN_NAME, DataTypes.STRING);
inputs.add(ctx.add(raw));
if (collectSourceUriFailure) {
Reference sourceUriFailure = createReference(SourceUriFailureExpression.COLUMN_NAME, DataTypes.STRING);
// noinspection unchecked
sourceUriFailureInput = (Input<String>) ctx.add(sourceUriFailure);
inputs.add(sourceUriFailureInput);
}
return FileReadingIterator.newInstance(fileUris, inputs, ctx.expressions(), compression, Map.of(S3FileInputFactory.NAME, new FileInputFactory() {
@Override
public FileInput create(URI uri, Settings withClauseOptions) throws IOException {
return new S3FileInput(new S3ClientHelper() {
@Override
protected AmazonS3 initClient(String accessKey, String secretKey, String endpoint, String protocol) throws IOException {
AmazonS3 client = mock(AmazonS3Client.class);
ObjectListing objectListing = mock(ObjectListing.class);
S3ObjectSummary summary = mock(S3ObjectSummary.class);
S3Object s3Object = mock(S3Object.class);
when(client.listObjects(anyString(), anyString())).thenReturn(objectListing);
when(objectListing.getObjectSummaries()).thenReturn(Collections.singletonList(summary));
when(summary.getKey()).thenReturn("foo");
when(client.getObject("fakebucket", "foo")).thenReturn(s3Object);
when(s3Object.getObjectContent()).thenReturn(s3InputStream);
when(client.listNextBatchOfObjects(any(ObjectListing.class))).thenReturn(objectListing);
when(objectListing.isTruncated()).thenReturn(false);
return client;
}
}, uri, "https");
}
}), false, 1, 0, CopyFromParserProperties.DEFAULT, FileUriCollectPhase.InputFormat.JSON, Settings.EMPTY);
}
Aggregations