Search in sources :

Example 11 with Filter

use of com.amazonaws.services.s3.model.Filter in project aws-athena-query-federation by awslabs.

the class VerticaMetadataHandler method doGetSplits.

/**
 * Used to split-up the reads required to scan the requested batch of partition(s).
 *
 * Here we execute the SQL on Vertica
 *
 * @param allocator Tool for creating and managing Apache Arrow Blocks.
 * @param request Provides details of the catalog, database, table, andpartition(s) being queried as well as
 * any filter predicate.
 * @return A GetSplitsResponse which primarily contains:
 * 1. A Set<Split> which represent read operations Amazon Athena must perform by calling your read function.
 * 2. (Optional) A continuation token which allows you to paginate the generation of splits for large queries.
 */
@Override
public GetSplitsResponse doGetSplits(BlockAllocator allocator, GetSplitsRequest request) {
    // ToDo: implement use of a continuation token to use in case of larger queries
    Connection connection = getConnection(request);
    Set<Split> splits = new HashSet<>();
    String exportBucket = getS3ExportBucket();
    String queryId = request.getQueryId().replace("-", "");
    // testing if the user has access to the requested table
    testAccess(connection, request.getTableName());
    // get the SQL statement which was created in getPartitions
    FieldReader fieldReaderPS = request.getPartitions().getFieldReader("preparedStmt");
    String sqlStatement = fieldReaderPS.readText().toString();
    String catalogName = request.getCatalogName();
    FieldReader fieldReaderQid = request.getPartitions().getFieldReader("queryId");
    String queryID = fieldReaderQid.readText().toString();
    FieldReader fieldReaderAwsRegion = request.getPartitions().getFieldReader("awsRegionSql");
    String awsRegionSql = fieldReaderAwsRegion.readText().toString();
    // execute the queries on Vertica
    executeQueriesOnVertica(connection, sqlStatement, awsRegionSql);
    /*
          * For each generated S3 object, create a split and add data to the split.
          */
    Split split;
    List<S3ObjectSummary> s3ObjectSummaries = getlistExportedObjects(exportBucket, queryId);
    if (!s3ObjectSummaries.isEmpty()) {
        for (S3ObjectSummary objectSummary : s3ObjectSummaries) {
            split = Split.newBuilder(makeSpillLocation(request), makeEncryptionKey()).add("query_id", queryID).add(VERTICA_CONN_STR, getConnStr(request)).add("exportBucket", exportBucket).add("s3ObjectKey", objectSummary.getKey()).build();
            splits.add(split);
        }
        logger.info("doGetSplits: exit - " + splits.size());
        return new GetSplitsResponse(catalogName, splits);
    } else {
        // No records were exported by Vertica for the issued query, creating a "empty" split
        logger.info("No records were exported by Vertica");
        split = Split.newBuilder(makeSpillLocation(request), makeEncryptionKey()).add("query_id", queryID).add(VERTICA_CONN_STR, getConnStr(request)).add("exportBucket", exportBucket).add("s3ObjectKey", EMPTY_STRING).build();
        splits.add(split);
        logger.info("doGetSplits: exit - " + splits.size());
        return new GetSplitsResponse(catalogName, split);
    }
}
Also used : S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) Split(com.amazonaws.athena.connector.lambda.domain.Split) FieldReader(org.apache.arrow.vector.complex.reader.FieldReader)

Example 12 with Filter

use of com.amazonaws.services.s3.model.Filter in project big-data-plugin by pentaho.

the class S3CommonFileSystem method getS3Client.

public AmazonS3 getS3Client() {
    S3CommonFileSystemConfigBuilder s3CommonFileSystemConfigBuilder = new S3CommonFileSystemConfigBuilder(getFileSystemOptions());
    Optional<? extends ConnectionDetails> defaultS3Connection = Optional.empty();
    try {
        defaultS3Connection = connectionManager.get().getConnectionDetailsByScheme("s3").stream().filter(connectionDetails -> connectionDetails.getProperties().get(DEFAULT_S3_CONFIG_PROPERTY) != null && connectionDetails.getProperties().get(DEFAULT_S3_CONFIG_PROPERTY).equalsIgnoreCase("true")).findFirst();
    } catch (Exception ignored) {
    // Ignore the exception, it's OK if we can't find a default S3 connection.
    }
    // Use a specified default PVFS connection if it's available.
    if (s3CommonFileSystemConfigBuilder.getName() == null) {
        // Copy the connection properties
        Map<String, String> newConnectionProperties = new HashMap<>();
        defaultS3Connection.ifPresent(connectionDetails -> newConnectionProperties.putAll(connectionDetails.getProperties()));
        // Have the default connection properties changed?
        if (!newConnectionProperties.equals(currentConnectionProperties)) {
            // Force a new connection if the default PVFS was changed
            client = null;
            // Track the new connection
            currentConnectionProperties = newConnectionProperties;
            // Clear the file system cache as the credentials have changed and the cache is now invalid.
            this.getFileSystemManager().getFilesCache().clear(this);
        }
    }
    if (currentFileSystemOptions != null && !currentFileSystemOptions.equals(getFileSystemOptions())) {
        client = null;
        this.getFileSystemManager().getFilesCache().clear(this);
    }
    if (client == null && getFileSystemOptions() != null) {
        currentFileSystemOptions = getFileSystemOptions();
        String accessKey = null;
        String secretKey = null;
        String sessionToken = null;
        String region = null;
        String credentialsFilePath = null;
        String profileName = null;
        String endpoint = null;
        String signatureVersion = null;
        String pathStyleAccess = null;
        if (s3CommonFileSystemConfigBuilder.getName() == null && defaultS3Connection.isPresent()) {
            accessKey = Encr.decryptPassword(currentConnectionProperties.get("accessKey"));
            secretKey = Encr.decryptPassword(currentConnectionProperties.get("secretKey"));
            sessionToken = Encr.decryptPassword(currentConnectionProperties.get("sessionToken"));
            region = currentConnectionProperties.get("region");
            credentialsFilePath = currentConnectionProperties.get("credentialsFilePath");
            profileName = currentConnectionProperties.get("profileName");
            endpoint = currentConnectionProperties.get("endpoint");
            signatureVersion = currentConnectionProperties.get("signatureVersion");
            pathStyleAccess = currentConnectionProperties.get("pathStyleAccess");
        } else {
            accessKey = s3CommonFileSystemConfigBuilder.getAccessKey();
            secretKey = s3CommonFileSystemConfigBuilder.getSecretKey();
            sessionToken = s3CommonFileSystemConfigBuilder.getSessionToken();
            region = s3CommonFileSystemConfigBuilder.getRegion();
            credentialsFilePath = s3CommonFileSystemConfigBuilder.getCredentialsFile();
            profileName = s3CommonFileSystemConfigBuilder.getProfileName();
            endpoint = s3CommonFileSystemConfigBuilder.getEndpoint();
            signatureVersion = s3CommonFileSystemConfigBuilder.getSignatureVersion();
            pathStyleAccess = s3CommonFileSystemConfigBuilder.getPathStyleAccess();
        }
        boolean access = (pathStyleAccess == null) || Boolean.parseBoolean(pathStyleAccess);
        AWSCredentialsProvider awsCredentialsProvider = null;
        Regions regions = Regions.DEFAULT_REGION;
        S3Util.S3Keys keys = S3Util.getKeysFromURI(getRootURI());
        if (keys != null) {
            accessKey = keys.getAccessKey();
            secretKey = keys.getSecretKey();
        }
        if (!S3Util.isEmpty(accessKey) && !S3Util.isEmpty(secretKey)) {
            AWSCredentials awsCredentials;
            if (S3Util.isEmpty(sessionToken)) {
                awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
            } else {
                awsCredentials = new BasicSessionCredentials(accessKey, secretKey, sessionToken);
            }
            awsCredentialsProvider = new AWSStaticCredentialsProvider(awsCredentials);
            regions = S3Util.isEmpty(region) ? Regions.DEFAULT_REGION : Regions.fromName(region);
        } else if (!S3Util.isEmpty(credentialsFilePath)) {
            ProfilesConfigFile profilesConfigFile = new ProfilesConfigFile(credentialsFilePath);
            awsCredentialsProvider = new ProfileCredentialsProvider(profilesConfigFile, profileName);
        }
        if (!S3Util.isEmpty(endpoint)) {
            ClientConfiguration clientConfiguration = new ClientConfiguration();
            clientConfiguration.setSignerOverride(S3Util.isEmpty(signatureVersion) ? S3Util.SIGNATURE_VERSION_SYSTEM_PROPERTY : signatureVersion);
            client = AmazonS3ClientBuilder.standard().withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, regions.getName())).withPathStyleAccessEnabled(access).withClientConfiguration(clientConfiguration).withCredentials(awsCredentialsProvider).build();
        } else {
            AmazonS3ClientBuilder clientBuilder = AmazonS3ClientBuilder.standard().enableForceGlobalBucketAccess().withCredentials(awsCredentialsProvider);
            if (!isRegionSet()) {
                clientBuilder.withRegion(regions);
            }
            client = clientBuilder.build();
        }
    }
    if (client == null || hasClientChangedCredentials()) {
        try {
            if (isRegionSet()) {
                client = AmazonS3ClientBuilder.standard().enableForceGlobalBucketAccess().build();
            } else {
                client = AmazonS3ClientBuilder.standard().enableForceGlobalBucketAccess().withRegion(Regions.DEFAULT_REGION).build();
            }
            awsAccessKeyCache = System.getProperty(S3Util.ACCESS_KEY_SYSTEM_PROPERTY);
            awsSecretKeyCache = System.getProperty(S3Util.SECRET_KEY_SYSTEM_PROPERTY);
        } catch (Exception ex) {
            logger.error("Could not get an S3Client", ex);
        }
    }
    return client;
}
Also used : BasicSessionCredentials(com.amazonaws.auth.BasicSessionCredentials) HashMap(java.util.HashMap) AmazonS3ClientBuilder(com.amazonaws.services.s3.AmazonS3ClientBuilder) Regions(com.amazonaws.regions.Regions) AWSCredentials(com.amazonaws.auth.AWSCredentials) BasicAWSCredentials(com.amazonaws.auth.BasicAWSCredentials) BasicAWSCredentials(com.amazonaws.auth.BasicAWSCredentials) AWSStaticCredentialsProvider(com.amazonaws.auth.AWSStaticCredentialsProvider) ProfileCredentialsProvider(com.amazonaws.auth.profile.ProfileCredentialsProvider) S3Util(org.pentaho.amazon.s3.S3Util) ProfilesConfigFile(com.amazonaws.auth.profile.ProfilesConfigFile) AWSCredentialsProvider(com.amazonaws.auth.AWSCredentialsProvider) ClientConfiguration(com.amazonaws.ClientConfiguration)

Example 13 with Filter

use of com.amazonaws.services.s3.model.Filter in project bazel-buildfarm by bazelbuild.

the class AwsAdmin method getInstanceId.

private Instance getInstanceId(String privateDnsName) {
    DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest().withFilters(new Filter().withName("private-dns-name").withValues(privateDnsName));
    DescribeInstancesResult instancesResult = ec2.describeInstances(describeInstancesRequest);
    for (Reservation r : instancesResult.getReservations()) {
        for (Instance e : r.getInstances()) {
            if (e.getPrivateDnsName() != null && e.getPrivateDnsName().equals(privateDnsName)) {
                return e;
            }
        }
    }
    return null;
}
Also used : DescribeInstancesResult(com.amazonaws.services.ec2.model.DescribeInstancesResult) Reservation(com.amazonaws.services.ec2.model.Reservation) Filter(com.amazonaws.services.ec2.model.Filter) Instance(com.amazonaws.services.ec2.model.Instance) DescribeInstancesRequest(com.amazonaws.services.ec2.model.DescribeInstancesRequest)

Example 14 with Filter

use of com.amazonaws.services.s3.model.Filter in project bazel-buildfarm by bazelbuild.

the class AwsAdmin method getHosts.

@Override
public GetHostsResult getHosts(String filter, int ageInMinutes, String status) {
    GetHostsResult.Builder resultBuilder = GetHostsResult.newBuilder();
    List<Host> hosts = new ArrayList<>();
    DescribeInstancesResult instancesResult = ec2.describeInstances(new DescribeInstancesRequest().withFilters(new Filter().withName("tag-value").withValues(filter)));
    long hostNum = 1L;
    for (Reservation r : instancesResult.getReservations()) {
        for (Instance e : r.getInstances()) {
            long uptime = getHostUptimeInMinutes(e.getLaunchTime());
            if (e.getPrivateIpAddress() != null && uptime > ageInMinutes && status.equalsIgnoreCase(e.getState().getName())) {
                Host.Builder hostBuilder = Host.newBuilder();
                hostBuilder.setHostNum(hostNum++);
                hostBuilder.setDnsName(e.getPrivateDnsName());
                hostBuilder.setHostId(e.getInstanceId());
                hostBuilder.setIpAddress(e.getPrivateIpAddress());
                hostBuilder.setLaunchTime(Timestamps.fromMillis(e.getLaunchTime().getTime()));
                hostBuilder.setLifecycle(e.getInstanceLifecycle() != null ? e.getInstanceLifecycle() : "on demand");
                hostBuilder.setNumCores(e.getCpuOptions().getCoreCount());
                hostBuilder.setState(e.getState().getName());
                hostBuilder.setType(e.getInstanceType());
                hostBuilder.setUptimeMinutes(uptime);
                hosts.add(hostBuilder.build());
            }
        }
    }
    resultBuilder.addAllHosts(hosts);
    resultBuilder.setNumHosts(hosts.size());
    logger.log(Level.FINE, String.format("Got %d hosts for filter: %s", hosts.size(), filter));
    return resultBuilder.build();
}
Also used : DescribeInstancesResult(com.amazonaws.services.ec2.model.DescribeInstancesResult) GetHostsResult(build.buildfarm.v1test.GetHostsResult) Reservation(com.amazonaws.services.ec2.model.Reservation) Filter(com.amazonaws.services.ec2.model.Filter) Instance(com.amazonaws.services.ec2.model.Instance) ArrayList(java.util.ArrayList) Host(build.buildfarm.v1test.Host) DescribeInstancesRequest(com.amazonaws.services.ec2.model.DescribeInstancesRequest)

Example 15 with Filter

use of com.amazonaws.services.s3.model.Filter in project bazel-buildfarm by bazelbuild.

the class AdminServiceImpl method getInstanceIdByPrivateDnsName.

@Override
public String getInstanceIdByPrivateDnsName(String dnsName) {
    Filter filter = new Filter().withName("private-dns-name").withValues(dnsName);
    DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest().withFilters(filter);
    DescribeInstancesResult instancesResult = ec2.describeInstances(describeInstancesRequest);
    for (Reservation r : instancesResult.getReservations()) {
        for (com.amazonaws.services.ec2.model.Instance e : r.getInstances()) {
            if (e.getPrivateDnsName() != null && e.getPrivateDnsName().equals(dnsName)) {
                return e.getInstanceId();
            }
        }
    }
    return null;
}
Also used : DescribeInstancesResult(com.amazonaws.services.ec2.model.DescribeInstancesResult) Reservation(com.amazonaws.services.ec2.model.Reservation) Filter(com.amazonaws.services.ec2.model.Filter) DescribeInstancesRequest(com.amazonaws.services.ec2.model.DescribeInstancesRequest)

Aggregations

Filter (com.amazonaws.services.ec2.model.Filter)96 ArrayList (java.util.ArrayList)70 List (java.util.List)52 Collectors (java.util.stream.Collectors)46 IOException (java.io.IOException)41 HashMap (java.util.HashMap)38 Map (java.util.Map)35 AmazonS3 (com.amazonaws.services.s3.AmazonS3)34 Set (java.util.Set)31 DescribeInstancesRequest (com.amazonaws.services.ec2.model.DescribeInstancesRequest)30 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)27 Instance (com.amazonaws.services.ec2.model.Instance)26 HashSet (java.util.HashSet)26 Reservation (com.amazonaws.services.ec2.model.Reservation)24 Collections (java.util.Collections)23 DescribeInstancesResult (com.amazonaws.services.ec2.model.DescribeInstancesResult)21 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)21 DescribeSubnetsRequest (com.amazonaws.services.ec2.model.DescribeSubnetsRequest)20 Entry (java.util.Map.Entry)20 Tag (com.amazonaws.services.ec2.model.Tag)18