use of com.amazonaws.services.s3.AmazonS3Client in project acs-aem-commons by Adobe-Consulting-Services.
the class S3AssetIngestorTest method setup.
@Before
public void setup() throws PersistenceException {
context.registerAdapter(ResourceResolver.class, AssetManager.class, new Function<ResourceResolver, AssetManager>() {
@Nullable
@Override
public AssetManager apply(@Nullable ResourceResolver input) {
return assetManager;
}
});
context.create().resource("/content/dam", JcrConstants.JCR_PRIMARYTYPE, "sling:Folder");
context.resourceResolver().commit();
ingestor = new S3AssetIngestor(context.getService(MimeTypeService.class));
ingestor.jcrBasePath = "/content/dam";
ingestor.ignoreFileList = Collections.emptyList();
ingestor.ignoreExtensionList = Collections.emptyList();
ingestor.ignoreFolderList = Arrays.asList(".ds_store");
ingestor.existingAssetAction = AssetIngestor.AssetAction.skip;
int port = FreePortFinder.findFreeLocalPort();
s3Mock = new S3Mock.Builder().withPort(port).withInMemoryBackend().build();
s3Mock.start();
S3ClientOptions options = S3ClientOptions.builder().setPathStyleAccess(true).build();
s3Client = new AmazonS3Client(new AnonymousAWSCredentials());
s3Client.setS3ClientOptions(options);
s3Client.setEndpoint("http://localhost:" + port);
ingestor.s3Client = s3Client;
ingestor.bucket = TEST_BUCKET;
s3Client.createBucket(TEST_BUCKET);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
CheckedConsumer<ResourceResolver> method = (CheckedConsumer<ResourceResolver>) invocation.getArguments()[0];
method.accept(context.resourceResolver());
return null;
}
}).when(actionManager).deferredWithResolver(any(CheckedConsumer.class));
}
use of com.amazonaws.services.s3.AmazonS3Client in project acs-aem-commons by Adobe-Consulting-Services.
the class S3AssetIngestor method buildProcess.
@Override
public void buildProcess(ProcessInstance instance, ResourceResolver rr) throws LoginException, RepositoryException {
if (StringUtils.isNotBlank(s3BasePath) && !s3BasePath.endsWith("/")) {
s3BasePath = s3BasePath + "/";
}
instance.getInfo().setDescription(baseItemName + "->" + jcrBasePath);
instance.defineCriticalAction("Create Folders", rr, this::createFolders);
instance.defineCriticalAction("Import Assets", rr, this::importAssets);
s3Client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
if (StringUtils.isNotBlank(endpointUrl)) {
s3Client.setEndpoint(endpointUrl);
}
}
use of com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3EventIterator method updateCursor.
private void updateCursor() {
if (this.currentIndex == 0 || (this.currentIndex < this.records.size() && !this.lineIterator.hasNext())) {
/*
* The previous reader must be closed in order to prevent S3 connection leaking
*/
closeCurrentReader();
/*
* Use the S3 trigger event time for arrival time of records in file. This is less precise but
* avoids making a call to the S3 api to find file creation time. Note that if the
* deserializer creates a {@link com.nextdoor.bender.deserializer.DeserializedTimeSeriesEvent}
* then this arrival time is not used.
*/
S3EventNotificationRecord event = this.records.get(currentIndex);
this.arrivalTime = event.getEventTime().toDate().getTime();
this.currentS3Entity = event.getS3();
/*
* The S3 Object key is URL encoded and must be decoded before it can be used by the
* AmazonS3Client
*/
String key;
try {
key = URLDecoder.decode(this.currentS3Entity.getObject().getKey(), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
/*
* Stream object back from S3 into a reader
*/
String bucketName = this.currentS3Entity.getBucket().getName();
logger.debug("opening s3://" + bucketName + "/" + key);
GetObjectRequest req = new GetObjectRequest(bucketName, key);
S3Object obj = client.getObject(req);
logger.trace("s3 get request id: " + client.getCachedResponseMetadata(req).getRequestId() + " host: " + client.getCachedResponseMetadata(req).getHostId() + " cloudfrontid: " + client.getCachedResponseMetadata(req).getCloudFrontId());
// TODO: support different types of compressions
if (key.endsWith(".gz")) {
GZIPInputStream gzip;
try {
gzip = new GZIPInputStream(obj.getObjectContent());
} catch (IOException e) {
throw new RuntimeException(e);
}
reader = new BufferedReader(new InputStreamReader(gzip));
} else {
reader = new BufferedReader(new InputStreamReader(obj.getObjectContent()));
}
/*
* Note the BufferedReader is lazy and so is the iterator. The object is directly streamed
* from S3, fed into an input stream and consumed line by line by the iterator.
*/
this.lineIterator = reader.lines().iterator();
currentIndex++;
}
}
use of com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3TransporterTest method getMockClient.
private AmazonS3Client getMockClient() {
AmazonS3Client mockClient = spy(AmazonS3Client.class);
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setETag("foo");
doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));
InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
initUploadResult.setUploadId("123");
doReturn(initUploadResult).when(mockClient).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
return mockClient;
}
use of com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3TransporterTest method testUnpartitioned.
@Test
public void testUnpartitioned() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/a_filename", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
// foo\n
assertEquals(4, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
}
Aggregations