use of com.amazonaws.services.s3.model.ObjectMetadata in project hadoop by apache.
the class TestS3AGetFileStatus method testFile.
@Test
public void testFile() throws Exception {
Path path = new Path("/file");
String key = path.toUri().getPath().substring(1);
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(1L);
meta.setLastModified(new Date(2L));
when(s3.getObjectMetadata(argThat(correctGetMetadataRequest(BUCKET, key)))).thenReturn(meta);
FileStatus stat = fs.getFileStatus(path);
assertNotNull(stat);
assertEquals(fs.makeQualified(path), stat.getPath());
assertTrue(stat.isFile());
assertEquals(meta.getContentLength(), stat.getLen());
assertEquals(meta.getLastModified().getTime(), stat.getModificationTime());
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project hadoop by apache.
the class S3AFileSystem method newObjectMetadata.
/**
* Create a new object metadata instance.
* Any standard metadata headers are added here, for example:
* encryption.
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata() {
final ObjectMetadata om = new ObjectMetadata();
setOptionalObjectMetadata(om);
return om;
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project ignite by apache.
the class S3CheckpointSpi method write.
/**
* Writes given checkpoint data to a given S3 bucket. Data is serialized to
* the binary stream and saved to the S3.
*
* @param data Checkpoint data.
* @throws IgniteCheckedException Thrown if an error occurs while marshalling.
* @throws AmazonClientException If an error occurs while querying Amazon S3.
*/
private void write(S3CheckpointData data) throws IgniteCheckedException, AmazonClientException {
assert data != null;
if (log.isDebugEnabled())
log.debug("Writing data to S3 [bucket=" + bucketName + ", key=" + data.getKey() + ']');
byte[] buf = data.toBytes();
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(buf.length);
if (!F.isEmpty(sseAlg))
meta.setSSEAlgorithm(sseAlg);
s3.putObject(bucketName, data.getKey(), new ByteArrayInputStream(buf), meta);
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project stocator by SparkTC.
the class COSBlockOutputStream method putObject.
/**
* Upload the current block as a single PUT request; if the buffer is empty a
* 0-byte PUT will be invoked, as it is needed to create an entry at the far
* end.
*
* @throws IOException any problem
*/
private void putObject() throws IOException {
LOG.debug("Executing regular upload for {}", writeOperationHelper);
final COSDataBlocks.DataBlock block = getActiveBlock();
int size = block.dataSize();
final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
final ObjectMetadata om = new ObjectMetadata();
om.setUserMetadata(mMetadata);
if (contentType != null && !contentType.isEmpty()) {
om.setContentType(contentType);
} else {
om.setContentType("application/octet-stream");
}
putObjectRequest.setMetadata(om);
ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
@Override
public PutObjectResult call() throws Exception {
PutObjectResult result;
try {
// the putObject call automatically closes the input
// stream afterwards.
result = writeOperationHelper.putObject(putObjectRequest);
} finally {
closeAll(LOG, uploadData, block);
}
return result;
}
});
clearActiveBlock();
// wait for completion
try {
putObjectResult.get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted object upload", ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
throw extractException("regular upload", key, ee);
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project hippo by NHS-digital-website.
the class S3ObjectMetadataImplTest method shouldReturnMetadata.
@Test
public void shouldReturnMetadata() {
String bucketName = "test.bucket";
String objectKey = "A3/C814DE/test.pdf";
String contentType = "test/type";
long size = 123456;
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
metadata.setContentLength(size);
S3ObjectMetadata actual = new S3ObjectMetadataImpl(metadata, bucketName, objectKey);
assertThat("file name is correct", actual.getFileName(), equalTo("test.pdf"));
assertThat("reference is correct", actual.getReference(), equalTo(objectKey));
assertThat("content type is correct", actual.getMimeType(), equalTo(contentType));
assertThat("url is correct", actual.getUrl(), equalTo("https://" + bucketName + "/" + objectKey));
assertThat("size is correct", actual.getSize(), equalTo(size));
}
Aggregations