Search in sources :

Example 1 with SSEAlgorithm

use of com.amazonaws.services.s3.model.SSEAlgorithm in project ignite by apache.

the class S3CheckpointSpi method spiStart.

/**
 * {@inheritDoc}
 */
@SuppressWarnings({ "BusyWait" })
@Override
public void spiStart(String igniteInstanceName) throws IgniteSpiException {
    // Start SPI start stopwatch.
    startStopwatch();
    assertParameter(cred != null, "awsCredentials != null");
    if (log.isDebugEnabled()) {
        log.debug(configInfo("awsCredentials", cred));
        log.debug(configInfo("clientConfiguration", cfg));
        log.debug(configInfo("bucketNameSuffix", bucketNameSuffix));
        log.debug(configInfo("bucketEndpoint", bucketEndpoint));
        log.debug(configInfo("SSEAlgorithm", sseAlg));
    }
    if (cfg == null)
        U.warn(log, "Amazon client configuration is not set (will use default).");
    if (F.isEmpty(bucketNameSuffix)) {
        U.warn(log, "Bucket name suffix is null or empty (will use default bucket name).");
        bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX;
    } else
        bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix;
    s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred);
    if (!F.isEmpty(bucketEndpoint))
        s3.setEndpoint(bucketEndpoint);
    if (!s3.doesBucketExist(bucketName)) {
        try {
            s3.createBucket(bucketName);
            if (log.isDebugEnabled())
                log.debug("Created S3 bucket: " + bucketName);
            while (!s3.doesBucketExist(bucketName)) try {
                U.sleep(200);
            } catch (IgniteInterruptedCheckedException e) {
                throw new IgniteSpiException("Thread has been interrupted.", e);
            }
        } catch (AmazonClientException e) {
            try {
                if (!s3.doesBucketExist(bucketName))
                    throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
            } catch (AmazonClientException ignored) {
                throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
            }
        }
    }
    Collection<S3TimeData> s3TimeDataLst = new LinkedList<>();
    try {
        ObjectListing list = s3.listObjects(bucketName);
        while (true) {
            for (S3ObjectSummary sum : list.getObjectSummaries()) {
                S3CheckpointData data = read(sum.getKey());
                if (data != null) {
                    s3TimeDataLst.add(new S3TimeData(data.getExpireTime(), data.getKey()));
                    if (log.isDebugEnabled())
                        log.debug("Registered existing checkpoint from key: " + data.getKey());
                }
            }
            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    } catch (AmazonClientException e) {
        throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
    } catch (IgniteCheckedException e) {
        throw new IgniteSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e);
    }
    // Track expiration for only those data that are made by this node
    timeoutWrk = new S3TimeoutWorker();
    timeoutWrk.add(s3TimeDataLst);
    timeoutWrk.start();
    registerMBean(igniteInstanceName, new S3CheckpointSpiMBeanImpl(this), S3CheckpointSpiMBean.class);
    // Ack ok start.
    if (log.isDebugEnabled())
        log.debug(startInfo());
}
Also used : AmazonClientException(com.amazonaws.AmazonClientException) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) LinkedList(java.util.LinkedList) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException)

Example 2 with SSEAlgorithm

use of com.amazonaws.services.s3.model.SSEAlgorithm in project nifi by apache.

the class FetchS3Object method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final long startNanos = System.nanoTime();
    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String versionId = context.getProperty(VERSION_ID).evaluateAttributeExpressions(flowFile).getValue();
    final AmazonS3 client = getClient();
    final GetObjectRequest request;
    if (versionId == null) {
        request = new GetObjectRequest(bucket, key);
    } else {
        request = new GetObjectRequest(bucket, key, versionId);
    }
    final Map<String, String> attributes = new HashMap<>();
    try (final S3Object s3Object = client.getObject(request)) {
        flowFile = session.importFrom(s3Object.getObjectContent(), flowFile);
        attributes.put("s3.bucket", s3Object.getBucketName());
        final ObjectMetadata metadata = s3Object.getObjectMetadata();
        if (metadata.getContentDisposition() != null) {
            final String fullyQualified = metadata.getContentDisposition();
            final int lastSlash = fullyQualified.lastIndexOf("/");
            if (lastSlash > -1 && lastSlash < fullyQualified.length() - 1) {
                attributes.put(CoreAttributes.PATH.key(), fullyQualified.substring(0, lastSlash));
                attributes.put(CoreAttributes.ABSOLUTE_PATH.key(), fullyQualified);
                attributes.put(CoreAttributes.FILENAME.key(), fullyQualified.substring(lastSlash + 1));
            } else {
                attributes.put(CoreAttributes.FILENAME.key(), metadata.getContentDisposition());
            }
        }
        if (metadata.getContentMD5() != null) {
            attributes.put("hash.value", metadata.getContentMD5());
            attributes.put("hash.algorithm", "MD5");
        }
        if (metadata.getContentType() != null) {
            attributes.put(CoreAttributes.MIME_TYPE.key(), metadata.getContentType());
        }
        if (metadata.getETag() != null) {
            attributes.put("s3.etag", metadata.getETag());
        }
        if (metadata.getExpirationTime() != null) {
            attributes.put("s3.expirationTime", String.valueOf(metadata.getExpirationTime().getTime()));
        }
        if (metadata.getExpirationTimeRuleId() != null) {
            attributes.put("s3.expirationTimeRuleId", metadata.getExpirationTimeRuleId());
        }
        if (metadata.getUserMetadata() != null) {
            attributes.putAll(metadata.getUserMetadata());
        }
        if (metadata.getSSEAlgorithm() != null) {
            attributes.put("s3.sseAlgorithm", metadata.getSSEAlgorithm());
        }
        if (metadata.getVersionId() != null) {
            attributes.put("s3.version", metadata.getVersionId());
        }
    } catch (final IOException | AmazonClientException ioe) {
        getLogger().error("Failed to retrieve S3 Object for {}; routing to failure", new Object[] { flowFile, ioe });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }
    if (!attributes.isEmpty()) {
        flowFile = session.putAllAttributes(flowFile, attributes);
    }
    session.transfer(flowFile, REL_SUCCESS);
    final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    getLogger().info("Successfully retrieved S3 Object for {} in {} millis; routing to success", new Object[] { flowFile, transferMillis });
    session.getProvenanceReporter().fetch(flowFile, "http://" + bucket + ".amazonaws.com/" + key, transferMillis);
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) AmazonS3(com.amazonaws.services.s3.AmazonS3) HashMap(java.util.HashMap) AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) S3Object(com.amazonaws.services.s3.model.S3Object) S3Object(com.amazonaws.services.s3.model.S3Object) GetObjectRequest(com.amazonaws.services.s3.model.GetObjectRequest) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Example 3 with SSEAlgorithm

use of com.amazonaws.services.s3.model.SSEAlgorithm in project kafka-connect-storage-cloud by confluentinc.

the class S3SinkConnectorConfig method newConfigDef.

public static ConfigDef newConfigDef() {
    ConfigDef configDef = StorageSinkConnectorConfig.newConfigDef(FORMAT_CLASS_RECOMMENDER, AVRO_COMPRESSION_RECOMMENDER);
    final String connectorGroup = "Connector";
    final int latestOrderInGroup = configDef.configKeys().values().stream().filter(c -> connectorGroup.equalsIgnoreCase(c.group)).map(c -> c.orderInGroup).max(Integer::compare).orElse(0);
    StorageSinkConnectorConfig.enableParquetConfig(configDef, PARQUET_COMPRESSION_RECOMMENDER, connectorGroup, latestOrderInGroup);
    {
        final String group = "S3";
        int orderInGroup = 0;
        configDef.define(S3_BUCKET_CONFIG, Type.STRING, Importance.HIGH, "The S3 Bucket.", group, ++orderInGroup, Width.LONG, "S3 Bucket");
        configDef.define(S3_OBJECT_TAGGING_CONFIG, Type.BOOLEAN, S3_OBJECT_TAGGING_DEFAULT, Importance.LOW, "Tag S3 objects with start and end offsets, as well as record count.", group, ++orderInGroup, Width.LONG, "S3 Object Tagging");
        configDef.define(REGION_CONFIG, Type.STRING, REGION_DEFAULT, new RegionValidator(), Importance.MEDIUM, "The AWS region to be used the connector.", group, ++orderInGroup, Width.LONG, "AWS region", new RegionRecommender());
        configDef.define(PART_SIZE_CONFIG, Type.INT, PART_SIZE_DEFAULT, new PartRange(), Importance.HIGH, "The Part Size in S3 Multi-part Uploads.", group, ++orderInGroup, Width.LONG, "S3 Part Size");
        configDef.define(CREDENTIALS_PROVIDER_CLASS_CONFIG, Type.CLASS, CREDENTIALS_PROVIDER_CLASS_DEFAULT, new CredentialsProviderValidator(), Importance.LOW, "Credentials provider or provider chain to use for authentication to AWS. By default " + "the connector uses ``" + DefaultAWSCredentialsProviderChain.class.getSimpleName() + "``.", group, ++orderInGroup, Width.LONG, "AWS Credentials Provider Class");
        configDef.define(AWS_ACCESS_KEY_ID_CONFIG, Type.STRING, AWS_ACCESS_KEY_ID_DEFAULT, Importance.HIGH, "The AWS access key ID used to authenticate personal AWS credentials such as IAM " + "credentials. Use only if you do not wish to authenticate by using a credentials " + "provider class via ``" + CREDENTIALS_PROVIDER_CLASS_CONFIG + "``", group, ++orderInGroup, Width.LONG, "AWS Access Key ID");
        configDef.define(AWS_SECRET_ACCESS_KEY_CONFIG, Type.PASSWORD, AWS_SECRET_ACCESS_KEY_DEFAULT, Importance.HIGH, "The secret access key used to authenticate personal AWS credentials such as IAM " + "credentials. Use only if you do not wish to authenticate by using a credentials " + "provider class via ``" + CREDENTIALS_PROVIDER_CLASS_CONFIG + "``", group, ++orderInGroup, Width.LONG, "AWS Secret Access Key");
        List<String> validSsea = new ArrayList<>(SSEAlgorithm.values().length + 1);
        validSsea.add("");
        for (SSEAlgorithm algo : SSEAlgorithm.values()) {
            validSsea.add(algo.toString());
        }
        configDef.define(SSEA_CONFIG, Type.STRING, SSEA_DEFAULT, ConfigDef.ValidString.in(validSsea.toArray(new String[validSsea.size()])), Importance.LOW, "The S3 Server Side Encryption Algorithm.", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Algorithm", new SseAlgorithmRecommender());
        configDef.define(SSE_CUSTOMER_KEY, Type.PASSWORD, SSE_CUSTOMER_KEY_DEFAULT, Importance.LOW, "The S3 Server Side Encryption Customer-Provided Key (SSE-C).", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Customer-Provided Key (SSE-C)");
        configDef.define(SSE_KMS_KEY_ID_CONFIG, Type.STRING, SSE_KMS_KEY_ID_DEFAULT, Importance.LOW, "The name of the AWS Key Management Service (AWS-KMS) key to be used for server side " + "encryption of the S3 objects. No encryption is used when no key is provided, but" + " it is enabled when ``" + SSEAlgorithm.KMS + "`` is specified as encryption " + "algorithm with a valid key name.", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Key", new SseKmsKeyIdRecommender());
        configDef.define(ACL_CANNED_CONFIG, Type.STRING, ACL_CANNED_DEFAULT, new CannedAclValidator(), Importance.LOW, "An S3 canned ACL header value to apply when writing objects.", group, ++orderInGroup, Width.LONG, "S3 Canned ACL");
        configDef.define(WAN_MODE_CONFIG, Type.BOOLEAN, WAN_MODE_DEFAULT, Importance.MEDIUM, "Use S3 accelerated endpoint.", group, ++orderInGroup, Width.LONG, "S3 accelerated endpoint enabled");
        configDef.define(COMPRESSION_TYPE_CONFIG, Type.STRING, COMPRESSION_TYPE_DEFAULT, new CompressionTypeValidator(), Importance.LOW, "Compression type for files written to S3. " + "Applied when using JsonFormat or ByteArrayFormat. " + "Available values: none, gzip.", group, ++orderInGroup, Width.LONG, "Compression type");
        configDef.define(COMPRESSION_LEVEL_CONFIG, Type.INT, COMPRESSION_LEVEL_DEFAULT, COMPRESSION_LEVEL_VALIDATOR, Importance.LOW, "Compression level for files written to S3. " + "Applied when using JsonFormat or ByteArrayFormat. ", group, ++orderInGroup, Width.LONG, "Compression type", COMPRESSION_LEVEL_VALIDATOR);
        configDef.define(S3_PART_RETRIES_CONFIG, Type.INT, S3_PART_RETRIES_DEFAULT, atLeast(0), Importance.MEDIUM, "Maximum number of retry attempts for failed requests. Zero means no retries. " + "The actual number of attempts is determined by the S3 client based on multiple " + "factors including, but not limited to: the value of this parameter, type of " + "exception occurred, and throttling settings of the underlying S3 client.", group, ++orderInGroup, Width.LONG, "S3 Part Upload Retries");
        configDef.define(S3_RETRY_BACKOFF_CONFIG, Type.LONG, S3_RETRY_BACKOFF_DEFAULT, atLeast(0L), Importance.LOW, "How long to wait in milliseconds before attempting the first retry " + "of a failed S3 request. Upon a failure, this connector may wait up to twice as " + "long as the previous wait, up to the maximum number of retries. " + "This avoids retrying in a tight loop under failure scenarios.", group, ++orderInGroup, Width.SHORT, "Retry Backoff (ms)");
        configDef.define(FORMAT_BYTEARRAY_EXTENSION_CONFIG, Type.STRING, FORMAT_BYTEARRAY_EXTENSION_DEFAULT, Importance.LOW, String.format("Output file extension for ByteArrayFormat. Defaults to ``%s``.", FORMAT_BYTEARRAY_EXTENSION_DEFAULT), group, ++orderInGroup, Width.LONG, "Output file extension for ByteArrayFormat");
        configDef.define(FORMAT_BYTEARRAY_LINE_SEPARATOR_CONFIG, Type.STRING, // the default is applied in getFormatByteArrayLineSeparator().
        null, Importance.LOW, "String inserted between records for ByteArrayFormat. " + "Defaults to ``System.lineSeparator()`` " + "and may contain escape sequences like ``\\n``. " + "An input record that contains the line separator will look like " + "multiple records in the output S3 object.", group, ++orderInGroup, Width.LONG, "Line separator ByteArrayFormat");
        configDef.define(S3_PROXY_URL_CONFIG, Type.STRING, S3_PROXY_URL_DEFAULT, Importance.LOW, "S3 Proxy settings encoded in URL syntax. This property is meant to be used only if you" + " need to access S3 through a proxy.", group, ++orderInGroup, Width.LONG, "S3 Proxy Settings");
        configDef.define(S3_PROXY_USER_CONFIG, Type.STRING, S3_PROXY_USER_DEFAULT, Importance.LOW, "S3 Proxy User. This property is meant to be used only if you" + " need to access S3 through a proxy. Using ``" + S3_PROXY_USER_CONFIG + "`` instead of embedding the username and password in ``" + S3_PROXY_URL_CONFIG + "`` allows the password to be hidden in the logs.", group, ++orderInGroup, Width.LONG, "S3 Proxy User");
        configDef.define(S3_PROXY_PASS_CONFIG, Type.PASSWORD, S3_PROXY_PASS_DEFAULT, Importance.LOW, "S3 Proxy Password. This property is meant to be used only if you" + " need to access S3 through a proxy. Using ``" + S3_PROXY_PASS_CONFIG + "`` instead of embedding the username and password in ``" + S3_PROXY_URL_CONFIG + "`` allows the password to be hidden in the logs.", group, ++orderInGroup, Width.LONG, "S3 Proxy Password");
        configDef.define(HEADERS_USE_EXPECT_CONTINUE_CONFIG, Type.BOOLEAN, HEADERS_USE_EXPECT_CONTINUE_DEFAULT, Importance.LOW, "Enable or disable use of the HTTP/1.1 handshake using EXPECT: 100-CONTINUE during " + "multi-part upload. If true, the client will wait for a 100 (CONTINUE) response " + "before sending the request body. Else, the client uploads the entire request " + "body without checking if the server is willing to accept the request.", group, ++orderInGroup, Width.SHORT, "S3 HTTP Send Uses Expect Continue");
        configDef.define(BEHAVIOR_ON_NULL_VALUES_CONFIG, Type.STRING, BEHAVIOR_ON_NULL_VALUES_DEFAULT, BehaviorOnNullValues.VALIDATOR, Importance.LOW, "How to handle records with a null value (i.e. Kafka tombstone records)." + " Valid options are 'ignore' and 'fail'.", group, ++orderInGroup, Width.SHORT, "Behavior for null-valued records");
    }
    {
        final String group = "Keys and Headers";
        int orderInGroup = 0;
        configDef.define(STORE_KAFKA_KEYS_CONFIG, Type.BOOLEAN, false, Importance.LOW, "Enable or disable writing keys to storage.", group, ++orderInGroup, Width.SHORT, "Store kafka keys", Collections.singletonList(KEYS_FORMAT_CLASS_CONFIG));
        configDef.define(STORE_KAFKA_HEADERS_CONFIG, Type.BOOLEAN, false, Importance.LOW, "Enable or disable writing headers to storage.", group, ++orderInGroup, Width.SHORT, "Store kafka headers", Collections.singletonList(HEADERS_FORMAT_CLASS_CONFIG));
        configDef.define(KEYS_FORMAT_CLASS_CONFIG, Type.CLASS, KEYS_FORMAT_CLASS_DEFAULT, Importance.LOW, "The format class to use when writing keys to the store.", group, ++orderInGroup, Width.NONE, "Keys format class", KEYS_FORMAT_CLASS_RECOMMENDER);
        configDef.define(HEADERS_FORMAT_CLASS_CONFIG, Type.CLASS, HEADERS_FORMAT_CLASS_DEFAULT, Importance.LOW, "The format class to use when writing headers to the store.", group, ++orderInGroup, Width.NONE, "Headers format class", HEADERS_FORMAT_CLASS_RECOMMENDER);
        configDef.define(S3_PATH_STYLE_ACCESS_ENABLED_CONFIG, Type.BOOLEAN, S3_PATH_STYLE_ACCESS_ENABLED_DEFAULT, Importance.LOW, "Specifies whether or not to enable path style access to the bucket used by the " + "connector", group, ++orderInGroup, Width.SHORT, "Enable Path Style Access to S3");
    }
    return configDef;
}
Also used : Arrays(java.util.Arrays) HourlyPartitioner(io.confluent.connect.storage.partitioner.HourlyPartitioner) DefaultAWSCredentialsProviderChain(com.amazonaws.auth.DefaultAWSCredentialsProviderChain) Range.atLeast(org.apache.kafka.common.config.ConfigDef.Range.atLeast) Type(org.apache.kafka.common.config.ConfigDef.Type) ByteArrayFormat(io.confluent.connect.s3.format.bytearray.ByteArrayFormat) JsonFormat(io.confluent.connect.s3.format.json.JsonFormat) Locale(java.util.Locale) Width(org.apache.kafka.common.config.ConfigDef.Width) Map(java.util.Map) CannedAccessControlList(com.amazonaws.services.s3.model.CannedAccessControlList) Importance(org.apache.kafka.common.config.ConfigDef.Importance) ConfigDef(org.apache.kafka.common.config.ConfigDef) ParquetFormat(io.confluent.connect.s3.format.parquet.ParquetFormat) Configurable(org.apache.kafka.common.Configurable) StorageSinkConnectorConfig(io.confluent.connect.storage.StorageSinkConnectorConfig) Collection(java.util.Collection) CompressionType(io.confluent.connect.s3.storage.CompressionType) RegionUtils(com.amazonaws.regions.RegionUtils) Set(java.util.Set) Deflater(java.util.zip.Deflater) Collectors(java.util.stream.Collectors) ComposableConfig(io.confluent.connect.storage.common.ComposableConfig) List(java.util.List) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) StorageCommonConfig(io.confluent.connect.storage.common.StorageCommonConfig) IntStream(java.util.stream.IntStream) Password(org.apache.kafka.common.config.types.Password) Format(io.confluent.connect.storage.format.Format) FieldPartitioner(io.confluent.connect.storage.partitioner.FieldPartitioner) HashMap(java.util.HashMap) SSEAlgorithm(com.amazonaws.services.s3.model.SSEAlgorithm) Function(java.util.function.Function) Regions(com.amazonaws.regions.Regions) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) DailyPartitioner(io.confluent.connect.storage.partitioner.DailyPartitioner) AWSCredentialsProvider(com.amazonaws.auth.AWSCredentialsProvider) LinkedList(java.util.LinkedList) S3Storage(io.confluent.connect.s3.storage.S3Storage) Utils(org.apache.kafka.common.utils.Utils) TimeBasedPartitioner(io.confluent.connect.storage.partitioner.TimeBasedPartitioner) ConfigException(org.apache.kafka.common.config.ConfigException) TimeUnit(java.util.concurrent.TimeUnit) DefaultPartitioner(io.confluent.connect.storage.partitioner.DefaultPartitioner) AvroFormat(io.confluent.connect.s3.format.avro.AvroFormat) ParentValueRecommender(io.confluent.connect.storage.common.ParentValueRecommender) ClientConfiguration(com.amazonaws.ClientConfiguration) AbstractConfig(org.apache.kafka.common.config.AbstractConfig) ConnectException(org.apache.kafka.connect.errors.ConnectException) PartitionerConfig(io.confluent.connect.storage.partitioner.PartitionerConfig) GenericRecommender(io.confluent.connect.storage.common.GenericRecommender) Collections(java.util.Collections) SSEAlgorithm(com.amazonaws.services.s3.model.SSEAlgorithm) CannedAccessControlList(com.amazonaws.services.s3.model.CannedAccessControlList) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) ConfigDef(org.apache.kafka.common.config.ConfigDef)

Example 4 with SSEAlgorithm

use of com.amazonaws.services.s3.model.SSEAlgorithm in project beam by apache.

the class S3WritableByteChannelTest method write.

private void write(AmazonS3 mockAmazonS3, Supplier channelSupplier, S3ResourceId path, String sseAlgorithm, String sseCustomerKeyMd5, SSEAwsKeyManagementParams sseAwsKeyManagementParams, long s3UploadBufferSizeBytes, boolean bucketKeyEnabled, boolean writeReadOnlyBuffer) throws IOException {
    InitiateMultipartUploadResult initiateMultipartUploadResult = new InitiateMultipartUploadResult();
    initiateMultipartUploadResult.setUploadId("upload-id");
    if (sseAlgorithm != null) {
        initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
    }
    if (sseCustomerKeyMd5 != null) {
        initiateMultipartUploadResult.setSSECustomerKeyMd5(sseCustomerKeyMd5);
    }
    if (sseAwsKeyManagementParams != null) {
        sseAlgorithm = "aws:kms";
        initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
    }
    initiateMultipartUploadResult.setBucketKeyEnabled(bucketKeyEnabled);
    doReturn(initiateMultipartUploadResult).when(mockAmazonS3).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
    InitiateMultipartUploadResult mockInitiateMultipartUploadResult = mockAmazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()));
    assertEquals(sseAlgorithm, mockInitiateMultipartUploadResult.getSSEAlgorithm());
    assertEquals(bucketKeyEnabled, mockInitiateMultipartUploadResult.getBucketKeyEnabled());
    assertEquals(sseCustomerKeyMd5, mockInitiateMultipartUploadResult.getSSECustomerKeyMd5());
    UploadPartResult result = new UploadPartResult();
    result.setETag("etag");
    if (sseCustomerKeyMd5 != null) {
        result.setSSECustomerKeyMd5(sseCustomerKeyMd5);
    }
    doReturn(result).when(mockAmazonS3).uploadPart(any(UploadPartRequest.class));
    UploadPartResult mockUploadPartResult = mockAmazonS3.uploadPart(new UploadPartRequest());
    assertEquals(sseCustomerKeyMd5, mockUploadPartResult.getSSECustomerKeyMd5());
    int contentSize = 34_078_720;
    ByteBuffer uploadContent = ByteBuffer.allocate((int) (contentSize * 2.5));
    for (int i = 0; i < contentSize; i++) {
        uploadContent.put((byte) 0xff);
    }
    uploadContent.flip();
    S3WritableByteChannel channel = channelSupplier.get();
    int uploadedSize = channel.write(writeReadOnlyBuffer ? uploadContent.asReadOnlyBuffer() : uploadContent);
    assertEquals(contentSize, uploadedSize);
    CompleteMultipartUploadResult completeMultipartUploadResult = new CompleteMultipartUploadResult();
    doReturn(completeMultipartUploadResult).when(mockAmazonS3).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
    channel.close();
    verify(mockAmazonS3, times(2)).initiateMultipartUpload(notNull(InitiateMultipartUploadRequest.class));
    int partQuantity = (int) Math.ceil((double) contentSize / s3UploadBufferSizeBytes) + 1;
    verify(mockAmazonS3, times(partQuantity)).uploadPart(notNull(UploadPartRequest.class));
    verify(mockAmazonS3, times(1)).completeMultipartUpload(notNull(CompleteMultipartUploadRequest.class));
    verifyNoMoreInteractions(mockAmazonS3);
}
Also used : UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) CompleteMultipartUploadResult(com.amazonaws.services.s3.model.CompleteMultipartUploadResult) ByteBuffer(java.nio.ByteBuffer) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 5 with SSEAlgorithm

use of com.amazonaws.services.s3.model.SSEAlgorithm in project nifi by apache.

the class TestFetchS3Object method testGetObject.

@Test
public void testGetObject() throws IOException {
    runner.setProperty(FetchS3Object.REGION, "us-east-1");
    runner.setProperty(FetchS3Object.BUCKET, "request-bucket");
    final Map<String, String> attrs = new HashMap<>();
    attrs.put("filename", "request-key");
    runner.enqueue(new byte[0], attrs);
    S3Object s3ObjectResponse = new S3Object();
    s3ObjectResponse.setBucketName("response-bucket-name");
    s3ObjectResponse.setKey("response-key");
    s3ObjectResponse.setObjectContent(new StringInputStream("Some Content"));
    ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class);
    metadata.setContentDisposition("key/path/to/file.txt");
    metadata.setContentType("text/plain");
    metadata.setContentMD5("testMD5hash");
    Date expiration = new Date();
    metadata.setExpirationTime(expiration);
    metadata.setExpirationTimeRuleId("testExpirationRuleId");
    Map<String, String> userMetadata = new HashMap<>();
    userMetadata.put("userKey1", "userValue1");
    userMetadata.put("userKey2", "userValue2");
    metadata.setUserMetadata(userMetadata);
    metadata.setSSEAlgorithm("testAlgorithm");
    Mockito.when(metadata.getETag()).thenReturn("test-etag");
    s3ObjectResponse.setObjectMetadata(metadata);
    Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse);
    runner.run(1);
    ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture());
    GetObjectRequest request = captureRequest.getValue();
    assertEquals("request-bucket", request.getBucketName());
    assertEquals("request-key", request.getKey());
    assertNull(request.getVersionId());
    runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1);
    final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS);
    MockFlowFile ff = ffs.get(0);
    ff.assertAttributeEquals("s3.bucket", "response-bucket-name");
    ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt");
    ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to");
    ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt");
    ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain");
    ff.assertAttributeEquals("hash.value", "testMD5hash");
    ff.assertAttributeEquals("hash.algorithm", "MD5");
    ff.assertAttributeEquals("s3.etag", "test-etag");
    ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime()));
    ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId");
    ff.assertAttributeEquals("userKey1", "userValue1");
    ff.assertAttributeEquals("userKey2", "userValue2");
    ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm");
    ff.assertContentEquals("Some Content");
}
Also used : MockFlowFile(org.apache.nifi.util.MockFlowFile) StringInputStream(com.amazonaws.util.StringInputStream) HashMap(java.util.HashMap) S3Object(com.amazonaws.services.s3.model.S3Object) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) GetObjectRequest(com.amazonaws.services.s3.model.GetObjectRequest) Date(java.util.Date) Test(org.junit.Test)

Aggregations

HashMap (java.util.HashMap)3 AmazonClientException (com.amazonaws.AmazonClientException)2 GetObjectRequest (com.amazonaws.services.s3.model.GetObjectRequest)2 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)2 S3Object (com.amazonaws.services.s3.model.S3Object)2 LinkedList (java.util.LinkedList)2 ClientConfiguration (com.amazonaws.ClientConfiguration)1 AWSCredentialsProvider (com.amazonaws.auth.AWSCredentialsProvider)1 DefaultAWSCredentialsProviderChain (com.amazonaws.auth.DefaultAWSCredentialsProviderChain)1 RegionUtils (com.amazonaws.regions.RegionUtils)1 Regions (com.amazonaws.regions.Regions)1 AmazonS3 (com.amazonaws.services.s3.AmazonS3)1 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)1 CannedAccessControlList (com.amazonaws.services.s3.model.CannedAccessControlList)1 CompleteMultipartUploadRequest (com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)1 CompleteMultipartUploadResult (com.amazonaws.services.s3.model.CompleteMultipartUploadResult)1 InitiateMultipartUploadRequest (com.amazonaws.services.s3.model.InitiateMultipartUploadRequest)1 InitiateMultipartUploadResult (com.amazonaws.services.s3.model.InitiateMultipartUploadResult)1 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)1 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)1