use of com.amazonaws.services.s3.AmazonS3ClientBuilder.defaultClient in project aws-doc-sdk-examples by awsdocs.
the class ExportEndpoints method exportEndpointsToS3.
public static List<String> exportEndpointsToS3(String s3BucketName, String iamExportRoleArn, String applicationId) {
// The S3 path that Amazon Pinpoint exports the endpoints to.
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd-HH_mm:ss.SSS_z");
String endpointsKeyPrefix = "exports/" + applicationId + "_" + dateFormat.format(new Date());
String s3UrlPrefix = "s3://" + s3BucketName + "/" + endpointsKeyPrefix + "/";
// Defines the export job that Amazon Pinpoint runs.
ExportJobRequest exportJobRequest = new ExportJobRequest().withS3UrlPrefix(s3UrlPrefix).withRoleArn(iamExportRoleArn);
CreateExportJobRequest createExportJobRequest = new CreateExportJobRequest().withApplicationId(applicationId).withExportJobRequest(exportJobRequest);
// Initializes the Amazon Pinpoint client.
AmazonPinpoint pinpointClient = AmazonPinpointClientBuilder.standard().withRegion(Regions.US_EAST_1).build();
System.out.format("Exporting endpoints from Amazon Pinpoint application %s to Amazon S3 " + "bucket %s . . .\n", applicationId, s3BucketName);
List<String> objectKeys = null;
try {
// Runs the export job with Amazon Pinpoint.
CreateExportJobResult exportResult = pinpointClient.createExportJob(createExportJobRequest);
// Prints the export job status to the console while the job runs.
String jobId = exportResult.getExportJobResponse().getId();
printExportJobStatus(pinpointClient, applicationId, jobId);
// Initializes the Amazon S3 client.
AmazonS3 s3Client = AmazonS3ClientBuilder.defaultClient();
// Lists the objects created by Amazon Pinpoint.
objectKeys = s3Client.listObjectsV2(s3BucketName, endpointsKeyPrefix).getObjectSummaries().stream().map(S3ObjectSummary::getKey).collect(Collectors.toList());
} catch (AmazonServiceException e) {
System.err.println(e.getMessage());
System.exit(1);
}
return objectKeys;
}
use of com.amazonaws.services.s3.AmazonS3ClientBuilder.defaultClient in project nosqlbench by nosqlbench.
the class S3UrlStreamHandlerTest method sanityCheckS3UrlHandler.
/**
* This test requires that you have credentials already configured on your local system
* for S3. It creates an object using the s3 client directly, then uses a generic
* URL method to access and verify the contents.
*/
@Disabled
@Test
public void sanityCheckS3UrlHandler() {
AmazonS3 client = AmazonS3ClientBuilder.defaultClient();
Bucket bucket = null;
if (!client.doesBucketExistV2(bucketName)) {
bucket = client.createBucket(bucketName);
}
PutObjectResult putObjectResult = client.putObject(bucketName, keyName, testValue);
assertThat(putObjectResult).isNotNull();
try {
URL url = new URL("s3://" + bucketName + "/" + keyName);
InputStream is = url.openStream();
BufferedReader br = new BufferedReader(new InputStreamReader(is));
String line = br.readLine();
assertThat(line).isEqualTo(testValue);
System.out.println(line);
} catch (Exception e) {
e.printStackTrace();
}
}
use of com.amazonaws.services.s3.AmazonS3ClientBuilder.defaultClient in project javamelody by javamelody.
the class S3 method upload.
static void upload(File file, String bucketName) throws IOException {
final AmazonS3 s3Client = AmazonS3ClientBuilder.defaultClient();
if (file.length() > MINIMUM_SIZE_FOR_MULTIPART) {
// multipart upload
final List<PartETag> partETags = new ArrayList<>();
final InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, file.getName()));
final String uploadId = initResponse.getUploadId();
final UploadPartRequestFactory requestFactory = new UploadPartRequestFactory(new PutObjectRequest(bucketName, file.getName(), file), uploadId, PART_SIZE);
try {
while (requestFactory.hasMoreRequests()) {
partETags.add(s3Client.uploadPart(requestFactory.getNextUploadPartRequest()).getPartETag());
}
final CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, file.getName(), uploadId, partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (final Exception e) {
s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, file.getName(), uploadId));
throw new IOException(e);
}
} else {
try {
s3Client.putObject(bucketName, file.getName(), file);
} catch (final Exception e) {
throw new IOException(e);
}
}
LOG.info("File " + file.getName() + " uploaded successfully to S3");
}
use of com.amazonaws.services.s3.AmazonS3ClientBuilder.defaultClient in project amazon-neptune-tools by awslabs.
the class JsonResource method getFromS3.
private JsonNode getFromS3() throws IOException {
S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(resourcePath.toString());
AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
try (InputStream stream = s3.getObject(s3ObjectInfo.bucket(), s3ObjectInfo.key()).getObjectContent()) {
return new ObjectMapper().readTree(stream);
}
}
use of com.amazonaws.services.s3.AmazonS3ClientBuilder.defaultClient in project amazon-qldb-dmv-sample-java by aws-samples.
the class ExportJournal method createJournalExportAndAwaitCompletion.
/**
* Create a new journal export on a S3 bucket and wait for its completion.
*
* @param ledgerName
* The name of the bucket to be created.
* @param s3BucketName
* The name of the S3 bucket to create journal export on.
* @param s3Prefix
* The optional prefix name for the output objects of the export.
* @param roleArn
* The IAM role ARN to be used when exporting the journal.
* @param encryptionConfiguration
* The encryption settings to be used by the export job to write data in the given S3 bucket.
* @param awaitTimeoutMs
* Milliseconds to wait for export to complete.
* @return {@link ExportJournalToS3Result} from QLDB.
* @throws InterruptedException if thread is being interrupted while waiting for the export to complete.
*/
public static ExportJournalToS3Result createJournalExportAndAwaitCompletion(String ledgerName, String s3BucketName, String s3Prefix, String roleArn, S3EncryptionConfiguration encryptionConfiguration, long awaitTimeoutMs) throws InterruptedException {
AmazonS3 s3Client = AmazonS3ClientBuilder.defaultClient();
createS3BucketIfNotExists(s3BucketName, s3Client);
if (roleArn == null) {
roleArn = createExportRole(EXPORT_ROLE_NAME, AmazonIdentityManagementClientBuilder.defaultClient(), s3BucketName, encryptionConfiguration.getKmsKeyArn(), EXPORT_ROLE_POLICY_NAME);
}
try {
Date startTime = Date.from(Instant.now().minus(JOURNAL_EXPORT_TIME_WINDOW_MINUTES, ChronoUnit.MINUTES));
Date endTime = Date.from(Instant.now());
ExportJournalToS3Result exportJournalToS3Result = createExport(ledgerName, startTime, endTime, s3BucketName, s3Prefix, encryptionConfiguration, roleArn);
// Wait for export to complete.
waitForExportToComplete(Constants.LEDGER_NAME, exportJournalToS3Result.getExportId(), awaitTimeoutMs);
log.info("JournalS3Export for exportId " + exportJournalToS3Result.getExportId() + " is completed.");
return exportJournalToS3Result;
} catch (Exception e) {
log.error("Unable to create an export!", e);
throw e;
}
}
Aggregations