Search in sources :

Example 36 with Upload

use of com.amazonaws.services.s3.transfer.Upload in project apex-malhar by apache.

the class S3FileMerger method verifyAndEmitFileMerge.

/**
 * Send the CompleteMultipartUploadRequest to S3 if all the blocks of a file are uploaded into S3.
 * @param keyName file to upload into S3
 */
private void verifyAndEmitFileMerge(String keyName) {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    S3InitiateFileUploadOperator.UploadFileMetadata uploadFileMetadata = fileMetadatas.get(keyName);
    List<PartETag> partETags = uploadParts.get(keyName);
    if (partETags == null || uploadFileMetadata == null || uploadFileMetadata.getFileMetadata().getNumberOfBlocks() != partETags.size()) {
        return;
    }
    if (partETags.size() <= 1) {
        uploadedFiles.add(keyName);
        LOG.debug("Uploaded file {} successfully", keyName);
        return;
    }
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, uploadFileMetadata.getUploadId(), partETags);
    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    if (result.getETag() != null) {
        uploadedFiles.add(keyName);
        LOG.debug("Uploaded file {} successfully", keyName);
    }
}
Also used : CompleteMultipartUploadResult(com.amazonaws.services.s3.model.CompleteMultipartUploadResult) PartETag(com.amazonaws.services.s3.model.PartETag) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 37 with Upload

use of com.amazonaws.services.s3.transfer.Upload in project stocator by SparkTC.

the class COSBlockOutputStream method putObject.

/**
 * Upload the current block as a single PUT request; if the buffer is empty a
 * 0-byte PUT will be invoked, as it is needed to create an entry at the far
 * end.
 *
 * @throws IOException any problem
 */
private void putObject() throws IOException {
    LOG.debug("Executing regular upload for {}", writeOperationHelper);
    final COSDataBlocks.DataBlock block = getActiveBlock();
    int size = block.dataSize();
    final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
    final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
    final ObjectMetadata om = new ObjectMetadata();
    om.setUserMetadata(mMetadata);
    if (contentType != null && !contentType.isEmpty()) {
        om.setContentType(contentType);
    } else {
        om.setContentType("application/octet-stream");
    }
    putObjectRequest.setMetadata(om);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {

        @Override
        public PutObjectResult call() throws Exception {
            PutObjectResult result;
            try {
                // the putObject call automatically closes the input
                // stream afterwards.
                result = writeOperationHelper.putObject(putObjectRequest);
            } finally {
                closeAll(LOG, uploadData, block);
            }
            return result;
        }
    });
    clearActiveBlock();
    // wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload", ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw extractException("regular upload", key, ee);
    }
}
Also used : PutObjectResult(com.amazonaws.services.s3.model.PutObjectResult) COSUtils.extractException(com.ibm.stocator.fs.cos.COSUtils.extractException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) COSUtils.translateException(com.ibm.stocator.fs.cos.COSUtils.translateException) AmazonClientException(com.amazonaws.AmazonClientException) ExecutionException(java.util.concurrent.ExecutionException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 38 with Upload

use of com.amazonaws.services.s3.transfer.Upload in project apex-malhar by apache.

the class S3BlockUploadOperator method uploadBlockIntoS3.

/**
 * Upload the block into S3 bucket.
 * @param tuple block data
 */
protected void uploadBlockIntoS3(AbstractBlockReader.ReaderRecord<Slice> tuple) {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    // Check whether the block metadata is present for this block
    if (blockIdToFilePath.get(tuple.getBlockId()) == null) {
        if (!waitingTuples.contains(tuple)) {
            waitingTuples.add(tuple);
        }
        return;
    }
    String uniqueBlockId = getUniqueBlockIdFromFile(tuple.getBlockId(), blockIdToFilePath.get(tuple.getBlockId()));
    S3BlockMetaData metaData = blockInfo.get(uniqueBlockId);
    // Check whether the file metadata is received
    if (metaData == null) {
        if (!waitingTuples.contains(tuple)) {
            waitingTuples.add(tuple);
        }
        return;
    }
    long partSize = tuple.getRecord().length;
    PartETag partETag = null;
    ByteArrayInputStream bis = new ByteArrayInputStream(tuple.getRecord().buffer);
    // Check if it is a Single block of a file
    if (metaData.isLastBlock && metaData.partNo == 1) {
        ObjectMetadata omd = createObjectMetadata();
        omd.setContentLength(partSize);
        PutObjectResult result = s3Client.putObject(new PutObjectRequest(bucketName, metaData.getKeyName(), bis, omd));
        partETag = new PartETag(1, result.getETag());
    } else {
        // Else upload use multi-part feature
        try {
            // Create request to upload a part.
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(metaData.getKeyName()).withUploadId(metaData.getUploadId()).withPartNumber(metaData.getPartNo()).withInputStream(bis).withPartSize(partSize);
            partETag = s3Client.uploadPart(uploadRequest).getPartETag();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    UploadBlockMetadata uploadmetadata = new UploadBlockMetadata(partETag, metaData.getKeyName());
    output.emit(uploadmetadata);
    currentWindowRecoveryState.put(uniqueBlockId, uploadmetadata);
    try {
        bis.close();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
Also used : PutObjectResult(com.amazonaws.services.s3.model.PutObjectResult) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) IOException(java.io.IOException) PartETag(com.amazonaws.services.s3.model.PartETag) IOException(java.io.IOException) ByteArrayInputStream(java.io.ByteArrayInputStream) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 39 with Upload

use of com.amazonaws.services.s3.transfer.Upload in project stocator by SparkTC.

the class COSAPIClient method createObject.

@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata, Statistics statistics) throws IOException {
    LOG.debug("Create object {}", objName);
    try {
        String objNameWithoutBuket = objName;
        if (objName.startsWith(mBucket + "/")) {
            objNameWithoutBuket = objName.substring(mBucket.length() + 1);
        }
        if (blockUploadEnabled) {
            return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBuket, new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true), partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBuket), metadata), null);
        }
        if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
            return new FSDataOutputStream(new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this), statistics);
        } else {
            final InputStream im = new InputStream() {

                @Override
                public int read() throws IOException {
                    return -1;
                }
            };
            final ObjectMetadata om = new ObjectMetadata();
            om.setContentLength(0L);
            om.setContentType(contentType);
            om.setUserMetadata(metadata);
            // Remove the bucket name prefix from key path
            if (objName.startsWith(mBucket + "/")) {
                objName = objName.substring(mBucket.length() + 1);
            }
            /*
        if (!objName.endsWith("/")) {
          objName = objName + "/";
        }*/
            LOG.debug("bucket: {}, key {}", mBucket, objName);
            PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
            Upload upload = transfers.upload(putObjectRequest);
            upload.waitForUploadResult();
            OutputStream fakeStream = new OutputStream() {

                @Override
                public void write(int b) throws IOException {
                }

                @Override
                public void close() throws IOException {
                    super.close();
                }
            };
            return new FSDataOutputStream(fakeStream, statistics);
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted creating " + objName);
    } catch (IOException e) {
        LOG.error(e.getMessage());
        throw e;
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) COSInputStream(com.ibm.stocator.fs.cos.COSInputStream) InputStream(java.io.InputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) Upload(com.amazonaws.services.s3.transfer.Upload) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Aggregations

PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)19 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)18 Upload (com.amazonaws.services.s3.transfer.Upload)18 AmazonClientException (com.amazonaws.AmazonClientException)11 IOException (java.io.IOException)11 File (java.io.File)8 DataStoreException (org.apache.jackrabbit.core.data.DataStoreException)7 AmazonServiceException (com.amazonaws.AmazonServiceException)6 PartETag (com.amazonaws.services.s3.model.PartETag)6 InitiateMultipartUploadRequest (com.amazonaws.services.s3.model.InitiateMultipartUploadRequest)5 InitiateMultipartUploadResult (com.amazonaws.services.s3.model.InitiateMultipartUploadResult)5 PutObjectResult (com.amazonaws.services.s3.model.PutObjectResult)5 InputStream (java.io.InputStream)5 InterruptedIOException (java.io.InterruptedIOException)5 CompleteMultipartUploadRequest (com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)4 S3Object (com.amazonaws.services.s3.model.S3Object)4 UploadPartRequest (com.amazonaws.services.s3.model.UploadPartRequest)4 ByteArrayInputStream (java.io.ByteArrayInputStream)4 ArrayList (java.util.ArrayList)4 AmazonS3 (com.amazonaws.services.s3.AmazonS3)3