use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3TransporterTest method testCompressedPartitoned.
@Test
public void testCompressedPartitoned() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
partitions.put("day", "01");
partitions.put("hour", "23");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/day=01/hour=23/a_filename.bz2", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
assertEquals(3, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3TransporterTest method testContextBasedFilename.
@Test
public void testContextBasedFilename() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requests, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
TestContext context = new TestContext();
context.setAwsRequestId("request_id");
transport.sendBatch(buffer, partitions, context);
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("basepath/request_id.bz2", argument.getValue().getKey());
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3SnsNotifier method main.
public static void main(String[] args) throws ParseException, InterruptedException, IOException {
formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZoneUTC();
/*
* Parse cli arguments
*/
Options options = new Options();
options.addOption(Option.builder().longOpt("bucket").hasArg().required().desc("Name of S3 bucket to list s3 objects from").build());
options.addOption(Option.builder().longOpt("key-file").hasArg().required().desc("Local file of S3 keys to process").build());
options.addOption(Option.builder().longOpt("sns-arn").hasArg().required().desc("SNS arn to publish to").build());
options.addOption(Option.builder().longOpt("throttle-ms").hasArg().desc("Amount of ms to wait between publishing to SNS").build());
options.addOption(Option.builder().longOpt("processed-file").hasArg().desc("Local file to use to store procssed S3 object names").build());
options.addOption(Option.builder().longOpt("skip-processed").hasArg(false).desc("Whether to skip S3 objects that have been processed").build());
options.addOption(Option.builder().longOpt("dry-run").hasArg(false).desc("If set do not publish to SNS").build());
CommandLineParser parser = new DefaultParser();
CommandLine cmd = parser.parse(options, args);
String bucket = cmd.getOptionValue("bucket");
String keyFile = cmd.getOptionValue("key-file");
String snsArn = cmd.getOptionValue("sns-arn");
String processedFile = cmd.getOptionValue("processed-file", null);
boolean skipProcessed = cmd.hasOption("skip-processed");
dryRun = cmd.hasOption("dry-run");
long throttle = Long.parseLong(cmd.getOptionValue("throttle-ms", "-1"));
if (processedFile != null) {
File file = new File(processedFile);
if (!file.exists()) {
logger.debug("creating local file to store processed s3 object names: " + processedFile);
file.createNewFile();
}
}
/*
* Import S3 keys that have been processed
*/
if (skipProcessed && processedFile != null) {
try (BufferedReader br = new BufferedReader(new FileReader(processedFile))) {
String line;
while ((line = br.readLine()) != null) {
alreadyPublished.add(line.trim());
}
}
}
/*
* Setup writer for file containing processed S3 keys
*/
FileWriter fw = null;
BufferedWriter bw = null;
if (processedFile != null) {
fw = new FileWriter(processedFile, true);
bw = new BufferedWriter(fw);
}
/*
* Create clients
*/
AmazonS3Client s3Client = new AmazonS3Client();
AmazonSNSClient snsClient = new AmazonSNSClient();
/*
* Get S3 object list
*/
try (BufferedReader br = new BufferedReader(new FileReader(keyFile))) {
String line;
while ((line = br.readLine()) != null) {
String key = line.trim();
if (alreadyPublished.contains(key)) {
logger.info("skipping " + key);
}
ObjectMetadata om = s3Client.getObjectMetadata(bucket, key);
S3EventNotification s3Notification = getS3Notification(key, bucket, om.getContentLength());
String json = s3Notification.toJson();
/*
* Publish to SNS
*/
if (publish(snsArn, json, snsClient, key) && processedFile != null) {
bw.write(key + "\n");
bw.flush();
}
if (throttle != -1) {
Thread.sleep(throttle);
}
}
}
if (processedFile != null) {
bw.close();
fw.close();
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project Synapse-Stack-Builder by Sage-Bionetworks.
the class BuildStackMainTest method before.
@Before
public void before() throws IOException {
inputProps = TestHelper.createInputProperties("dev");
InputConfiguration config = TestHelper.createTestConfig("dev");
defaultProps = TestHelper.createDefaultProperties();
clientFactory = new MockAmazonClientFactory();
AmazonS3Client mockS3Client = clientFactory.createS3Client();
AmazonEC2Client mockEC2Client = clientFactory.createEC2Client();
AmazonSNSClient mockSNSnsClient = clientFactory.createSNSClient();
AmazonRDSClient mockRdsClient = clientFactory.createRDSClient();
// Write the default properties.
when(mockS3Client.getObject(any(GetObjectRequest.class), any(File.class))).thenAnswer(new Answer<ObjectMetadata>() {
public ObjectMetadata answer(InvocationOnMock invocation) throws Throwable {
// Write the property file
File file = (File) invocation.getArguments()[1];
FileWriter writer = new FileWriter(file);
try {
defaultProps.store(writer, "test generated");
} finally {
writer.close();
}
return new ObjectMetadata();
}
});
// Return a valid EC2 security group.
DescribeSecurityGroupsRequest dsgr = new DescribeSecurityGroupsRequest().withGroupNames(config.getElasticSecurityGroupName());
when(mockEC2Client.describeSecurityGroups(dsgr)).thenReturn(new DescribeSecurityGroupsResult().withSecurityGroups(new SecurityGroup().withGroupName(config.getElasticSecurityGroupName())));
// Return a valid topic
String topicArn = "some:arn";
when(mockSNSnsClient.createTopic(new CreateTopicRequest(config.getRDSAlertTopicName()))).thenReturn(new CreateTopicResult().withTopicArn(topicArn));
when(mockSNSnsClient.listSubscriptionsByTopic(new ListSubscriptionsByTopicRequest(topicArn))).thenReturn(new ListSubscriptionsByTopicResult().withSubscriptions(new Subscription()));
// return a valid group
when(mockRdsClient.describeDBParameterGroups(new DescribeDBParameterGroupsRequest().withDBParameterGroupName(config.getDatabaseParameterGroupName()))).thenReturn(new DescribeDBParameterGroupsResult().withDBParameterGroups(new DBParameterGroup().withDBParameterGroupName(config.getDatabaseParameterGroupName())));
when(mockRdsClient.describeDBParameters(new DescribeDBParametersRequest().withDBParameterGroupName(config.getDatabaseParameterGroupName()))).thenReturn(new DescribeDBParametersResult().withParameters(new Parameter().withParameterName(Constants.DB_PARAM_KEY_SLOW_QUERY_LOG)).withParameters(new Parameter().withParameterName(Constants.DB_PARAM_KEY_LONG_QUERY_TIME)));
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project opentest by mcdcorp.
the class GetS3Object method run.
@Override
public void run() {
super.run();
String awsCredentialsProfile = this.readStringArgument("awsProfile", "default");
String bucket = this.readStringArgument("bucket");
String objectKey = this.readStringArgument("objectKey");
String targetFilePath = this.readStringArgument("targetFile");
Boolean overwrite = this.readBooleanArgument("overwrite", false);
AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider(awsCredentialsProfile));
S3Object object = s3Client.getObject(new GetObjectRequest(bucket, objectKey));
InputStream objectDataStream = object.getObjectContent();
if (targetFilePath != null) {
File targetFile = new File(targetFilePath);
if (!targetFile.isAbsolute()) {
targetFile = Paths.get(this.getActor().getTempDir().getAbsolutePath(), targetFilePath).toFile();
}
targetFile.getParentFile().mkdirs();
try {
if (overwrite) {
Files.copy(objectDataStream, targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
} else {
Files.copy(objectDataStream, targetFile.toPath());
}
} catch (Exception ex) {
throw new RuntimeException(String.format("Failed to transfer data from the input stream into file %s", targetFilePath), ex);
}
this.writeArgument("targetFile", targetFile.getAbsolutePath());
} else {
// TODO: Make targetFile arg optional so this branch can execute.
// Read data in memory and write it to an output value
}
}
Aggregations