Search in sources :

Example 21 with AmazonClientException

use of com.amazonaws.AmazonClientException in project ignite by apache.

the class S3CheckpointSpiSelfTest method afterSpiStopped.

/**
 * @throws Exception If error.
 */
@Override
protected void afterSpiStopped() throws Exception {
    AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), IgniteS3TestSuite.getSecretKey());
    AmazonS3 s3 = new AmazonS3Client(cred);
    String bucketName = S3CheckpointSpi.BUCKET_NAME_PREFIX + "unit-test-bucket";
    try {
        ObjectListing list = s3.listObjects(bucketName);
        while (true) {
            for (S3ObjectSummary sum : list.getObjectSummaries()) s3.deleteObject(bucketName, sum.getKey());
            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    } catch (AmazonClientException e) {
        throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
    }
}
Also used : AmazonS3(com.amazonaws.services.s3.AmazonS3) AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) AmazonClientException(com.amazonaws.AmazonClientException) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) BasicAWSCredentials(com.amazonaws.auth.BasicAWSCredentials) AWSCredentials(com.amazonaws.auth.AWSCredentials) BasicAWSCredentials(com.amazonaws.auth.BasicAWSCredentials)

Example 22 with AmazonClientException

use of com.amazonaws.AmazonClientException in project nifi by apache.

the class ITPutS3Object method testS3MultipartAgeoff.

@Ignore
@Test
public void testS3MultipartAgeoff() throws InterruptedException, IOException {
    final PutS3Object processor = new PutS3Object();
    final TestRunner runner = TestRunners.newTestRunner(processor);
    final ProcessContext context = runner.getProcessContext();
    runner.setProperty(PutS3Object.CREDENTIALS_FILE, CREDENTIALS_FILE);
    runner.setProperty(PutS3Object.REGION, REGION);
    runner.setProperty(PutS3Object.BUCKET, BUCKET_NAME);
    // set check interval and age off to minimum values
    runner.setProperty(PutS3Object.MULTIPART_S3_AGEOFF_INTERVAL, "1 milli");
    runner.setProperty(PutS3Object.MULTIPART_S3_MAX_AGE, "1 milli");
    // create some dummy uploads
    for (Integer i = 0; i < 3; i++) {
        final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(BUCKET_NAME, "file" + i.toString() + ".txt");
        try {
            client.initiateMultipartUpload(initiateRequest);
        } catch (AmazonClientException e) {
            Assert.fail("Failed to initiate upload: " + e.getMessage());
        }
    }
    // Age off is time dependent, so test has some timing constraints.  This
    // sleep() delays long enough to satisfy interval and age intervals.
    Thread.sleep(2000L);
    // System millis are used for timing, but it is incremented on each
    // call to circumvent what appears to be caching in the AWS library.
    // The increments are 1000 millis because AWS returns upload
    // initiation times in whole seconds.
    Long now = System.currentTimeMillis();
    MultipartUploadListing uploadList = processor.getS3AgeoffListAndAgeoffLocalState(context, client, now);
    Assert.assertEquals(3, uploadList.getMultipartUploads().size());
    MultipartUpload upload0 = uploadList.getMultipartUploads().get(0);
    processor.abortS3MultipartUpload(client, BUCKET_NAME, upload0);
    uploadList = processor.getS3AgeoffListAndAgeoffLocalState(context, client, now + 1000);
    Assert.assertEquals(2, uploadList.getMultipartUploads().size());
    final Map<String, String> attrs = new HashMap<>();
    attrs.put("filename", "test-upload.txt");
    runner.enqueue(getResourcePath(SAMPLE_FILE_RESOURCE_NAME), attrs);
    runner.run();
    uploadList = processor.getS3AgeoffListAndAgeoffLocalState(context, client, now + 2000);
    Assert.assertEquals(0, uploadList.getMultipartUploads().size());
}
Also used : HashMap(java.util.HashMap) TestRunner(org.apache.nifi.util.TestRunner) AmazonClientException(com.amazonaws.AmazonClientException) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) MultipartUploadListing(com.amazonaws.services.s3.model.MultipartUploadListing) MultipartUpload(com.amazonaws.services.s3.model.MultipartUpload) ProcessContext(org.apache.nifi.processor.ProcessContext) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 23 with AmazonClientException

use of com.amazonaws.AmazonClientException in project nifi by apache.

the class DeleteDynamoDB method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    List<FlowFile> flowFiles = session.get(context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger());
    if (flowFiles == null || flowFiles.size() == 0) {
        return;
    }
    Map<ItemKeys, FlowFile> keysToFlowFileMap = new HashMap<>();
    final String table = context.getProperty(TABLE).evaluateAttributeExpressions().getValue();
    final String hashKeyName = context.getProperty(HASH_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String hashKeyValueType = context.getProperty(HASH_KEY_VALUE_TYPE).getValue();
    final String rangeKeyName = context.getProperty(RANGE_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String rangeKeyValueType = context.getProperty(RANGE_KEY_VALUE_TYPE).getValue();
    TableWriteItems tableWriteItems = new TableWriteItems(table);
    for (FlowFile flowFile : flowFiles) {
        final Object hashKeyValue = getValue(context, HASH_KEY_VALUE_TYPE, HASH_KEY_VALUE, flowFile);
        final Object rangeKeyValue = getValue(context, RANGE_KEY_VALUE_TYPE, RANGE_KEY_VALUE, flowFile);
        if (!isHashKeyValueConsistent(hashKeyName, hashKeyValue, session, flowFile)) {
            continue;
        }
        if (!isRangeKeyValueConsistent(rangeKeyName, rangeKeyValue, session, flowFile)) {
            continue;
        }
        if (rangeKeyValue == null || StringUtils.isBlank(rangeKeyValue.toString())) {
            tableWriteItems.addHashOnlyPrimaryKeysToDelete(hashKeyName, hashKeyValue);
        } else {
            tableWriteItems.addHashAndRangePrimaryKeyToDelete(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue);
        }
        keysToFlowFileMap.put(new ItemKeys(hashKeyValue, rangeKeyValue), flowFile);
    }
    if (keysToFlowFileMap.isEmpty()) {
        return;
    }
    final DynamoDB dynamoDB = getDynamoDB();
    try {
        BatchWriteItemOutcome outcome = dynamoDB.batchWriteItem(tableWriteItems);
        handleUnprocessedItems(session, keysToFlowFileMap, table, hashKeyName, hashKeyValueType, rangeKeyName, rangeKeyValueType, outcome);
        // All non unprocessed items are successful
        for (FlowFile flowFile : keysToFlowFileMap.values()) {
            getLogger().debug("Successfully deleted item from dynamodb : " + table);
            session.transfer(flowFile, REL_SUCCESS);
        }
    } catch (AmazonServiceException exception) {
        getLogger().error("Could not process flowFiles due to service exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processServiceException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (AmazonClientException exception) {
        getLogger().error("Could not process flowFiles due to client exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processClientException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (Exception exception) {
        getLogger().error("Could not process flowFiles due to exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) TableWriteItems(com.amazonaws.services.dynamodbv2.document.TableWriteItems) BatchWriteItemOutcome(com.amazonaws.services.dynamodbv2.document.BatchWriteItemOutcome) HashMap(java.util.HashMap) AmazonClientException(com.amazonaws.AmazonClientException) DynamoDB(com.amazonaws.services.dynamodbv2.document.DynamoDB) AmazonServiceException(com.amazonaws.AmazonServiceException) AmazonClientException(com.amazonaws.AmazonClientException) AmazonServiceException(com.amazonaws.AmazonServiceException) List(java.util.List)

Example 24 with AmazonClientException

use of com.amazonaws.AmazonClientException in project nifi by apache.

the class GetDynamoDB method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    List<FlowFile> flowFiles = session.get(context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger());
    if (flowFiles == null || flowFiles.size() == 0) {
        return;
    }
    Map<ItemKeys, FlowFile> keysToFlowFileMap = new HashMap<>();
    final String table = context.getProperty(TABLE).evaluateAttributeExpressions().getValue();
    TableKeysAndAttributes tableKeysAndAttributes = new TableKeysAndAttributes(table);
    final String hashKeyName = context.getProperty(HASH_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String rangeKeyName = context.getProperty(RANGE_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String jsonDocument = context.getProperty(JSON_DOCUMENT).evaluateAttributeExpressions().getValue();
    for (FlowFile flowFile : flowFiles) {
        final Object hashKeyValue = getValue(context, HASH_KEY_VALUE_TYPE, HASH_KEY_VALUE, flowFile);
        final Object rangeKeyValue = getValue(context, RANGE_KEY_VALUE_TYPE, RANGE_KEY_VALUE, flowFile);
        if (!isHashKeyValueConsistent(hashKeyName, hashKeyValue, session, flowFile)) {
            continue;
        }
        if (!isRangeKeyValueConsistent(rangeKeyName, rangeKeyValue, session, flowFile)) {
            continue;
        }
        keysToFlowFileMap.put(new ItemKeys(hashKeyValue, rangeKeyValue), flowFile);
        if (rangeKeyValue == null || StringUtils.isBlank(rangeKeyValue.toString())) {
            tableKeysAndAttributes.addHashOnlyPrimaryKey(hashKeyName, hashKeyValue);
        } else {
            tableKeysAndAttributes.addHashAndRangePrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue);
        }
    }
    if (keysToFlowFileMap.isEmpty()) {
        return;
    }
    final DynamoDB dynamoDB = getDynamoDB();
    try {
        BatchGetItemOutcome result = dynamoDB.batchGetItem(tableKeysAndAttributes);
        // Handle processed items and get the json document
        List<Item> items = result.getTableItems().get(table);
        for (Item item : items) {
            ItemKeys itemKeys = new ItemKeys(item.get(hashKeyName), item.get(rangeKeyName));
            FlowFile flowFile = keysToFlowFileMap.get(itemKeys);
            if (item.get(jsonDocument) != null) {
                ByteArrayInputStream bais = new ByteArrayInputStream(item.getJSON(jsonDocument).getBytes());
                flowFile = session.importFrom(bais, flowFile);
            }
            session.transfer(flowFile, REL_SUCCESS);
            keysToFlowFileMap.remove(itemKeys);
        }
        // Handle unprocessed keys
        Map<String, KeysAndAttributes> unprocessedKeys = result.getUnprocessedKeys();
        if (unprocessedKeys != null && unprocessedKeys.size() > 0) {
            KeysAndAttributes keysAndAttributes = unprocessedKeys.get(table);
            List<Map<String, AttributeValue>> keys = keysAndAttributes.getKeys();
            for (Map<String, AttributeValue> unprocessedKey : keys) {
                Object hashKeyValue = getAttributeValue(context, HASH_KEY_VALUE_TYPE, unprocessedKey.get(hashKeyName));
                Object rangeKeyValue = getAttributeValue(context, RANGE_KEY_VALUE_TYPE, unprocessedKey.get(rangeKeyName));
                sendUnprocessedToUnprocessedRelationship(session, keysToFlowFileMap, hashKeyValue, rangeKeyValue);
            }
        }
        // Handle any remaining items
        for (ItemKeys key : keysToFlowFileMap.keySet()) {
            FlowFile flowFile = keysToFlowFileMap.get(key);
            flowFile = session.putAttribute(flowFile, DYNAMODB_KEY_ERROR_NOT_FOUND, DYNAMODB_KEY_ERROR_NOT_FOUND_MESSAGE + key.toString());
            session.transfer(flowFile, REL_NOT_FOUND);
            keysToFlowFileMap.remove(key);
        }
    } catch (AmazonServiceException exception) {
        getLogger().error("Could not process flowFiles due to service exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processServiceException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (AmazonClientException exception) {
        getLogger().error("Could not process flowFiles due to client exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processClientException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (Exception exception) {
        getLogger().error("Could not process flowFiles due to exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) AttributeValue(com.amazonaws.services.dynamodbv2.model.AttributeValue) HashMap(java.util.HashMap) AmazonClientException(com.amazonaws.AmazonClientException) DynamoDB(com.amazonaws.services.dynamodbv2.document.DynamoDB) KeysAndAttributes(com.amazonaws.services.dynamodbv2.model.KeysAndAttributes) TableKeysAndAttributes(com.amazonaws.services.dynamodbv2.document.TableKeysAndAttributes) BatchGetItemOutcome(com.amazonaws.services.dynamodbv2.document.BatchGetItemOutcome) AmazonServiceException(com.amazonaws.AmazonServiceException) AmazonClientException(com.amazonaws.AmazonClientException) Item(com.amazonaws.services.dynamodbv2.document.Item) ByteArrayInputStream(java.io.ByteArrayInputStream) AmazonServiceException(com.amazonaws.AmazonServiceException) List(java.util.List) TableKeysAndAttributes(com.amazonaws.services.dynamodbv2.document.TableKeysAndAttributes) HashMap(java.util.HashMap) Map(java.util.Map)

Example 25 with AmazonClientException

use of com.amazonaws.AmazonClientException in project nifi by apache.

the class FetchS3Object method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final long startNanos = System.nanoTime();
    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String versionId = context.getProperty(VERSION_ID).evaluateAttributeExpressions(flowFile).getValue();
    final AmazonS3 client = getClient();
    final GetObjectRequest request;
    if (versionId == null) {
        request = new GetObjectRequest(bucket, key);
    } else {
        request = new GetObjectRequest(bucket, key, versionId);
    }
    final Map<String, String> attributes = new HashMap<>();
    try (final S3Object s3Object = client.getObject(request)) {
        flowFile = session.importFrom(s3Object.getObjectContent(), flowFile);
        attributes.put("s3.bucket", s3Object.getBucketName());
        final ObjectMetadata metadata = s3Object.getObjectMetadata();
        if (metadata.getContentDisposition() != null) {
            final String fullyQualified = metadata.getContentDisposition();
            final int lastSlash = fullyQualified.lastIndexOf("/");
            if (lastSlash > -1 && lastSlash < fullyQualified.length() - 1) {
                attributes.put(CoreAttributes.PATH.key(), fullyQualified.substring(0, lastSlash));
                attributes.put(CoreAttributes.ABSOLUTE_PATH.key(), fullyQualified);
                attributes.put(CoreAttributes.FILENAME.key(), fullyQualified.substring(lastSlash + 1));
            } else {
                attributes.put(CoreAttributes.FILENAME.key(), metadata.getContentDisposition());
            }
        }
        if (metadata.getContentMD5() != null) {
            attributes.put("hash.value", metadata.getContentMD5());
            attributes.put("hash.algorithm", "MD5");
        }
        if (metadata.getContentType() != null) {
            attributes.put(CoreAttributes.MIME_TYPE.key(), metadata.getContentType());
        }
        if (metadata.getETag() != null) {
            attributes.put("s3.etag", metadata.getETag());
        }
        if (metadata.getExpirationTime() != null) {
            attributes.put("s3.expirationTime", String.valueOf(metadata.getExpirationTime().getTime()));
        }
        if (metadata.getExpirationTimeRuleId() != null) {
            attributes.put("s3.expirationTimeRuleId", metadata.getExpirationTimeRuleId());
        }
        if (metadata.getUserMetadata() != null) {
            attributes.putAll(metadata.getUserMetadata());
        }
        if (metadata.getSSEAlgorithm() != null) {
            attributes.put("s3.sseAlgorithm", metadata.getSSEAlgorithm());
        }
        if (metadata.getVersionId() != null) {
            attributes.put("s3.version", metadata.getVersionId());
        }
    } catch (final IOException | AmazonClientException ioe) {
        getLogger().error("Failed to retrieve S3 Object for {}; routing to failure", new Object[] { flowFile, ioe });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }
    if (!attributes.isEmpty()) {
        flowFile = session.putAllAttributes(flowFile, attributes);
    }
    session.transfer(flowFile, REL_SUCCESS);
    final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    getLogger().info("Successfully retrieved S3 Object for {} in {} millis; routing to success", new Object[] { flowFile, transferMillis });
    session.getProvenanceReporter().fetch(flowFile, "http://" + bucket + ".amazonaws.com/" + key, transferMillis);
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) AmazonS3(com.amazonaws.services.s3.AmazonS3) HashMap(java.util.HashMap) AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) S3Object(com.amazonaws.services.s3.model.S3Object) S3Object(com.amazonaws.services.s3.model.S3Object) GetObjectRequest(com.amazonaws.services.s3.model.GetObjectRequest) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Aggregations

AmazonClientException (com.amazonaws.AmazonClientException)202 IOException (java.io.IOException)70 AmazonServiceException (com.amazonaws.AmazonServiceException)32 ArrayList (java.util.ArrayList)32 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)23 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)19 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)17 HashMap (java.util.HashMap)16 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)14 Test (org.junit.Test)14 SienaException (siena.SienaException)12 AWSCredentials (com.amazonaws.auth.AWSCredentials)11 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)11 GetObjectRequest (com.amazonaws.services.s3.model.GetObjectRequest)11 ListObjectsRequest (com.amazonaws.services.s3.model.ListObjectsRequest)11 AmazonDynamoDB (com.amazonaws.services.dynamodbv2.AmazonDynamoDB)10 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)10 InterruptedIOException (java.io.InterruptedIOException)10 DeleteObjectsRequest (com.amazonaws.services.s3.model.DeleteObjectsRequest)9 File (java.io.File)9