use of software.amazon.awssdk.services.s3.model.GetObjectRequest in project verify-hub by alphagov.
the class S3ConfigSourceTest method getRemoteConfigReturnsCachedConfigWhenRepeatedlyCalled.
@Test
public void getRemoteConfigReturnsCachedConfigWhenRepeatedlyCalled() throws IOException {
SelfServiceConfig selfServiceConfig = objectMapper.readValue(selfServiceConfigEnabledJson, SelfServiceConfig.class);
when(s3Client.getObject(new GetObjectRequest(BUCKET_NAME, OBJECT_KEY))).thenReturn(s3Object);
when(s3Object.getObjectContent()).thenReturn(getObjectStream("/remote-test-config.json"));
when(s3Object.getObjectMetadata()).thenReturn(objectMetadata);
when(objectMetadata.getLastModified()).thenReturn(new Date());
S3ConfigSource testSource = new S3ConfigSource(selfServiceConfig, s3Client, objectMapper);
RemoteConfigCollection result1 = testSource.getRemoteConfig();
RemoteConfigCollection result2 = testSource.getRemoteConfig();
verify(s3Object, times(1)).getObjectContent();
assertThat(result1 == result2);
}
use of software.amazon.awssdk.services.s3.model.GetObjectRequest in project stocator by SparkTC.
the class COSInputStream method reopen.
/**
* Opens up the stream at specified target position and for given length.
*
* @param reason reason for reopen
* @param targetPos target position
* @param length length requested
* @throws IOException on any failure to open the object
*/
private synchronized void reopen(String reason, long targetPos, long length) throws IOException {
if (wrappedStream != null) {
closeStream("reopen(" + reason + ")", contentRangeFinish, false);
}
contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos, length, contentLength, readahead);
LOG.debug("reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}", uri, reason, targetPos, contentRangeFinish, length, pos, nextReadPos);
try {
GetObjectRequest request = new GetObjectRequest(bucket, key).withRange(targetPos, contentRangeFinish - 1);
wrappedStream = client.getObject(request).getObjectContent();
contentRangeStart = targetPos;
if (wrappedStream == null) {
throw new IOException("Null IO stream from reopen of (" + reason + ") " + uri);
}
} catch (AmazonClientException e) {
throw COSUtils.translateException("Reopen at position " + targetPos, uri, e);
}
pos = targetPos;
}
use of software.amazon.awssdk.services.s3.model.GetObjectRequest in project Singularity by HubSpot.
the class S3ArtifactChunkDownloader method createDownloader.
private Callable<Path> createDownloader(final int retryNum) {
return new Callable<Path>() {
public Path call() throws Exception {
final Path chunkPath = (chunk == 0) ? downloadTo : Paths.get(downloadTo + "_" + chunk + "_" + retryNum);
chunkPath.toFile().deleteOnExit();
final long startTime = System.currentTimeMillis();
final long byteRangeStart = chunk * chunkSize;
final long byteRangeEnd = Math.min((chunk + 1) * chunkSize - 1, length);
log.info("Downloading {} - chunk {} (retry {}) ({}-{}) to {}", s3Artifact.getFilename(), chunk, retryNum, byteRangeStart, byteRangeEnd, chunkPath);
GetObjectRequest getObjectRequest = new GetObjectRequest(s3Artifact.getS3Bucket(), s3Artifact.getS3ObjectKey()).withRange(byteRangeStart, byteRangeEnd);
S3Object fetchedObject = s3.getObject(getObjectRequest);
try (InputStream is = fetchedObject.getObjectContent()) {
Files.copy(is, chunkPath, StandardCopyOption.REPLACE_EXISTING);
}
log.info("Finished downloading chunk {} (retry {}) of {} ({} bytes) in {}", chunk, retryNum, s3Artifact.getFilename(), byteRangeEnd - byteRangeStart, JavaUtils.duration(startTime));
return chunkPath;
}
};
}
use of software.amazon.awssdk.services.s3.model.GetObjectRequest in project incubator-gobblin by apache.
the class AWSSdkClient method downloadS3Object.
/**
* Download a S3 object to local directory
*
* @param s3ObjectSummary S3 object summary for the object to download
* @param targetDirectory Local target directory to download the object to
* @throws IOException If any errors were encountered in downloading the object
*/
public void downloadS3Object(S3ObjectSummary s3ObjectSummary, String targetDirectory) throws IOException {
final AmazonS3 amazonS3 = getS3Client();
final GetObjectRequest getObjectRequest = new GetObjectRequest(s3ObjectSummary.getBucketName(), s3ObjectSummary.getKey());
final S3Object s3Object = amazonS3.getObject(getObjectRequest);
final String targetFile = StringUtils.removeEnd(targetDirectory, File.separator) + File.separator + s3Object.getKey();
FileUtils.copyInputStreamToFile(s3Object.getObjectContent(), new File(targetFile));
LOGGER.info("S3 object downloaded to file: " + targetFile);
}
use of software.amazon.awssdk.services.s3.model.GetObjectRequest in project bender by Nextdoor.
the class S3EventIterator method updateCursor.
private void updateCursor() {
if (this.currentIndex == 0 || (this.currentIndex < this.records.size() && !this.lineIterator.hasNext())) {
/*
* The previous reader must be closed in order to prevent S3 connection leaking
*/
closeCurrentReader();
/*
* Use the S3 trigger event time for arrival time of records in file. This is less precise but
* avoids making a call to the S3 api to find file creation time. Note that if the
* deserializer creates a {@link com.nextdoor.bender.deserializer.DeserializedTimeSeriesEvent}
* then this arrival time is not used.
*/
S3EventNotificationRecord event = this.records.get(currentIndex);
this.arrivalTime = event.getEventTime().toDate().getTime();
this.currentS3Entity = event.getS3();
/*
* The S3 Object key is URL encoded and must be decoded before it can be used by the
* AmazonS3Client
*/
String key;
try {
key = URLDecoder.decode(this.currentS3Entity.getObject().getKey(), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
/*
* Stream object back from S3 into a reader
*/
String bucketName = this.currentS3Entity.getBucket().getName();
logger.debug("opening s3://" + bucketName + "/" + key);
GetObjectRequest req = new GetObjectRequest(bucketName, key);
S3Object obj = client.getObject(req);
logger.trace("s3 get request id: " + client.getCachedResponseMetadata(req).getRequestId() + " host: " + client.getCachedResponseMetadata(req).getHostId() + " cloudfrontid: " + client.getCachedResponseMetadata(req).getCloudFrontId());
long notificationDelay = this.arrivalTime - obj.getObjectMetadata().getLastModified().getTime();
if (notificationDelay > NOTIFICATION_DELAY_GRACE_PERIOD) {
/*
* Notification delay is measured from when the object was last modified time in S3 to when
* the SNS message was actually recieved by Bender. (If the producer only writes objects
* once, then this is effectively the created time.)
*/
logger.debug("Notification for s3://" + bucketName + "/" + key + " was received at" + event.getEventTime().toDate() + " - " + (notificationDelay / 1000) + " sec after the file" + " landed in S3 (" + obj.getObjectMetadata().getLastModified() + ").");
}
// TODO: support different types of compressions
if (key.endsWith(".gz")) {
GZIPInputStream gzip;
try {
gzip = new GZIPInputStream(obj.getObjectContent());
} catch (IOException e) {
throw new RuntimeException(e);
}
reader = new BufferedReader(new InputStreamReader(gzip));
} else {
reader = new BufferedReader(new InputStreamReader(obj.getObjectContent()));
}
/*
* Note the BufferedReader is lazy and so is the iterator. The object is directly streamed
* from S3, fed into an input stream and consumed line by line by the iterator.
*/
this.lineIterator = reader.lines().iterator();
currentIndex++;
}
}
Aggregations