Search in sources :

Example 66 with Cluster

use of com.google.bigtable.admin.v2.Cluster in project grpc-java by grpc.

the class ClientXdsClientDataTest method parseCluster_leastRequestLbPolicy_invalidChoiceCountConfig_tooSmallChoiceCount.

@Test
public void parseCluster_leastRequestLbPolicy_invalidChoiceCountConfig_tooSmallChoiceCount() throws ResourceInvalidException {
    ClientXdsClient.enableLeastRequest = true;
    Cluster cluster = Cluster.newBuilder().setName("cluster-foo.googleapis.com").setType(DiscoveryType.EDS).setEdsClusterConfig(EdsClusterConfig.newBuilder().setEdsConfig(ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())).setServiceName("service-foo.googleapis.com")).setLbPolicy(LbPolicy.LEAST_REQUEST).setLeastRequestLbConfig(LeastRequestLbConfig.newBuilder().setChoiceCount(UInt32Value.newBuilder().setValue(1))).build();
    thrown.expect(ResourceInvalidException.class);
    thrown.expectMessage("Cluster cluster-foo.googleapis.com: invalid least_request_lb_config");
    ClientXdsClient.processCluster(cluster, new HashSet<String>(), null, LRS_SERVER_INFO);
}
Also used : WeightedCluster(io.envoyproxy.envoy.config.route.v3.WeightedCluster) Cluster(io.envoyproxy.envoy.config.cluster.v3.Cluster) Test(org.junit.Test)

Example 67 with Cluster

use of com.google.bigtable.admin.v2.Cluster in project grpc-java by grpc.

the class ClientXdsClientDataTest method parseCluster_ringHashLbPolicy_invalidRingSizeConfig_minGreaterThanMax.

@Test
public void parseCluster_ringHashLbPolicy_invalidRingSizeConfig_minGreaterThanMax() throws ResourceInvalidException {
    Cluster cluster = Cluster.newBuilder().setName("cluster-foo.googleapis.com").setType(DiscoveryType.EDS).setEdsClusterConfig(EdsClusterConfig.newBuilder().setEdsConfig(ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())).setServiceName("service-foo.googleapis.com")).setLbPolicy(LbPolicy.RING_HASH).setRingHashLbConfig(RingHashLbConfig.newBuilder().setHashFunction(HashFunction.XX_HASH).setMinimumRingSize(UInt64Value.newBuilder().setValue(1000L)).setMaximumRingSize(UInt64Value.newBuilder().setValue(100L))).build();
    thrown.expect(ResourceInvalidException.class);
    thrown.expectMessage("Cluster cluster-foo.googleapis.com: invalid ring_hash_lb_config");
    ClientXdsClient.processCluster(cluster, new HashSet<String>(), null, LRS_SERVER_INFO);
}
Also used : WeightedCluster(io.envoyproxy.envoy.config.route.v3.WeightedCluster) Cluster(io.envoyproxy.envoy.config.cluster.v3.Cluster) Test(org.junit.Test)

Example 68 with Cluster

use of com.google.bigtable.admin.v2.Cluster in project cdap by caskdata.

the class DataprocClient method createCluster.

/**
 * Create a cluster. This will return after the initial request to create the cluster is completed.
 * At this point, the cluster is likely not yet running, but in a provisioning state.
 *
 * @param name         the name of the cluster to create
 * @param imageVersion the image version for the cluster
 * @param labels       labels to set on the cluster
 * @param privateInstance {@code true} to indicate using private instance
 * @return create operation metadata
 * @throws InterruptedException        if the thread was interrupted while waiting for the initial request to complete
 * @throws AlreadyExistsException      if the cluster already exists
 * @throws IOException                 if there was an I/O error talking to Google Compute APIs
 * @throws RetryableProvisionException if there was a non 4xx error code returned
 */
ClusterOperationMetadata createCluster(String name, String imageVersion, Map<String, String> labels, boolean privateInstance) throws RetryableProvisionException, InterruptedException, IOException {
    if (network == null) {
        // yet being used to create cluster.
        throw new IllegalArgumentException("Missing network information");
    }
    try {
        Map<String, String> metadata = new HashMap<>();
        SSHPublicKey publicKey = conf.getPublicKey();
        if (publicKey != null) {
            // Don't fail if there is no public key. It is for tooling case that the key might be generated differently.
            metadata.put("ssh-keys", publicKey.getUser() + ":" + publicKey.getKey());
            // override any os-login that may be set on the project-level metadata
            // this metadata is only needed if ssh is being used to launch the jobs - CDAP-15369
            metadata.put("enable-oslogin", "false");
        }
        // Check if ClusterMetaData is provided and add them.
        metadata.putAll(conf.getClusterMetaData());
        GceClusterConfig.Builder clusterConfig = GceClusterConfig.newBuilder().addServiceAccountScopes(DataprocConf.CLOUD_PLATFORM_SCOPE).setShieldedInstanceConfig(ShieldedInstanceConfig.newBuilder().setEnableSecureBoot(conf.isSecureBootEnabled()).setEnableVtpm(conf.isvTpmEnabled()).setEnableIntegrityMonitoring(conf.isIntegrityMonitoringEnabled()).build()).putAllMetadata(metadata);
        if (conf.getServiceAccount() != null) {
            clusterConfig.setServiceAccount(conf.getServiceAccount());
        }
        if (conf.getZone() != null) {
            clusterConfig.setZoneUri(conf.getZone());
        }
        // subnets are unique within a location, not within a network, which is why these configs are mutually exclusive.
        if (conf.getSubnet() != null) {
            clusterConfig.setSubnetworkUri(conf.getSubnet());
        } else {
            clusterConfig.setNetworkUri(network.getSelfLink());
        }
        // Add any defined Network Tags
        clusterConfig.addAllTags(conf.getNetworkTags());
        boolean internalIPOnly = isInternalIPOnly(network, privateInstance, publicKey != null);
        // if public key is not null that means ssh is used to launch / monitor job on dataproc
        if (publicKey != null) {
            int maxTags = Math.max(0, DataprocConf.MAX_NETWORK_TAGS - clusterConfig.getTagsCount());
            List<String> tags = getFirewallTargetTags(network, internalIPOnly);
            if (tags.size() > maxTags) {
                LOG.warn("No more than 64 tags can be added. Firewall tags ignored: {}", tags.subList(maxTags, tags.size()));
            }
            tags.stream().limit(maxTags).forEach(clusterConfig::addTags);
        }
        // if internal ip is preferred then create dataproc cluster without external ip for better security
        clusterConfig.setInternalIpOnly(internalIPOnly);
        Map<String, String> clusterProperties = new HashMap<>(conf.getClusterProperties());
        // Enable/Disable stackdriver
        clusterProperties.put("dataproc:dataproc.logging.stackdriver.enable", Boolean.toString(conf.isStackdriverLoggingEnabled()));
        clusterProperties.put("dataproc:dataproc.monitoring.stackdriver.enable", Boolean.toString(conf.isStackdriverMonitoringEnabled()));
        DiskConfig workerDiskConfig = DiskConfig.newBuilder().setBootDiskSizeGb(conf.getWorkerDiskGB()).setBootDiskType(conf.getWorkerDiskType()).setNumLocalSsds(0).build();
        InstanceGroupConfig.Builder primaryWorkerConfig = InstanceGroupConfig.newBuilder().setNumInstances(conf.getWorkerNumNodes()).setMachineTypeUri(conf.getWorkerMachineType()).setDiskConfig(workerDiskConfig);
        InstanceGroupConfig.Builder secondaryWorkerConfig = InstanceGroupConfig.newBuilder().setNumInstances(conf.getSecondaryWorkerNumNodes()).setMachineTypeUri(conf.getWorkerMachineType()).setPreemptibility(InstanceGroupConfig.Preemptibility.NON_PREEMPTIBLE).setDiskConfig(workerDiskConfig);
        // Set default concurrency settings for fixed cluster
        if (Strings.isNullOrEmpty(conf.getAutoScalingPolicy()) && !conf.isPredefinedAutoScaleEnabled()) {
            // Set spark.default.parallelism according to cluster size.
            // Spark defaults it to number of current executors, but when we configure the job
            // executors may not have started yet, so this value gets artificially low.
            int defaultConcurrency = Math.max(conf.getTotalWorkerCPUs(), MIN_DEFAULT_CONCURRENCY);
            // Set spark.sql.adaptive.coalescePartitions.initialPartitionNum as 32x of default parallelism,
            // but no more than 8192. This value is used only in spark 3 with adaptive execution and
            // according to our tests spark can handle really large numbers and 32x is a reasonable default.
            int initialPartitionNum = Math.min(Math.max(conf.getTotalWorkerCPUs() * PARTITION_NUM_FACTOR, MIN_INITIAL_PARTITIONS_DEFAULT), MAX_INITIAL_PARTITIONS_DEFAULT);
            clusterProperties.putIfAbsent("spark:spark.default.parallelism", Integer.toString(defaultConcurrency));
            clusterProperties.putIfAbsent("spark:spark.sql.adaptive.coalescePartitions.initialPartitionNum", Integer.toString(initialPartitionNum));
        }
        SoftwareConfig.Builder softwareConfigBuilder = SoftwareConfig.newBuilder().putAllProperties(clusterProperties);
        // Use image version only if custom Image URI is not specified, otherwise may cause image version conflicts
        if (conf.getCustomImageUri() == null || conf.getCustomImageUri().isEmpty()) {
            softwareConfigBuilder.setImageVersion(imageVersion);
        } else {
            // If custom Image URI is specified, use that for cluster creation
            primaryWorkerConfig.setImageUri(conf.getCustomImageUri());
            secondaryWorkerConfig.setImageUri(conf.getCustomImageUri());
        }
        ClusterConfig.Builder builder = ClusterConfig.newBuilder().setEndpointConfig(EndpointConfig.newBuilder().setEnableHttpPortAccess(conf.isComponentGatewayEnabled()).build()).setMasterConfig(InstanceGroupConfig.newBuilder().setNumInstances(conf.getMasterNumNodes()).setMachineTypeUri(conf.getMasterMachineType()).setDiskConfig(DiskConfig.newBuilder().setBootDiskType(conf.getMasterDiskType()).setBootDiskSizeGb(conf.getMasterDiskGB()).setNumLocalSsds(0).build()).build()).setWorkerConfig(primaryWorkerConfig.build()).setSecondaryWorkerConfig(secondaryWorkerConfig.build()).setGceClusterConfig(clusterConfig.build()).setSoftwareConfig(softwareConfigBuilder);
        // Cluster TTL if one should be set
        if (conf.getIdleTTLMinutes() > 0) {
            long seconds = TimeUnit.MINUTES.toSeconds(conf.getIdleTTLMinutes());
            builder.setLifecycleConfig(LifecycleConfig.newBuilder().setIdleDeleteTtl(Duration.newBuilder().setSeconds(seconds).build()).build());
        }
        // Add any Node Initialization action scripts
        for (String action : conf.getInitActions()) {
            builder.addInitializationActions(NodeInitializationAction.newBuilder().setExecutableFile(action).build());
        }
        // Set Auto Scaling Policy
        String autoScalingPolicy = conf.getAutoScalingPolicy();
        if (conf.isPredefinedAutoScaleEnabled()) {
            PredefinedAutoScaling predefinedAutoScaling = new PredefinedAutoScaling(conf);
            autoScalingPolicy = predefinedAutoScaling.createPredefinedAutoScalingPolicy();
        }
        if (!Strings.isNullOrEmpty(autoScalingPolicy)) {
            // Check if policy is URI or ID. If ID Convert to URI
            if (!autoScalingPolicy.contains("/")) {
                autoScalingPolicy = "projects/" + conf.getProjectId() + "/regions/" + conf.getRegion() + "/autoscalingPolicies/" + autoScalingPolicy;
            }
            builder.setAutoscalingConfig(AutoscalingConfig.newBuilder().setPolicyUri(autoScalingPolicy).build());
        }
        if (conf.getEncryptionKeyName() != null) {
            builder.setEncryptionConfig(EncryptionConfig.newBuilder().setGcePdKmsKeyName(conf.getEncryptionKeyName()).build());
        }
        if (conf.getGcsBucket() != null) {
            builder.setConfigBucket(conf.getGcsBucket());
        }
        Cluster cluster = com.google.cloud.dataproc.v1.Cluster.newBuilder().setClusterName(name).putAllLabels(labels).setConfig(builder.build()).build();
        OperationFuture<Cluster, ClusterOperationMetadata> operationFuture = client.createClusterAsync(conf.getProjectId(), conf.getRegion(), cluster);
        return operationFuture.getMetadata().get();
    } catch (ExecutionException e) {
        cleanUpClusterAfterCreationFailure(name);
        Throwable cause = e.getCause();
        if (cause instanceof ApiException) {
            throw handleApiException((ApiException) cause);
        }
        throw new DataprocRuntimeException(cause);
    }
}
Also used : ClusterOperationMetadata(com.google.cloud.dataproc.v1.ClusterOperationMetadata) HashMap(java.util.HashMap) DiskConfig(com.google.cloud.dataproc.v1.DiskConfig) SoftwareConfig(com.google.cloud.dataproc.v1.SoftwareConfig) ExecutionException(java.util.concurrent.ExecutionException) InstanceGroupConfig(com.google.cloud.dataproc.v1.InstanceGroupConfig) GceClusterConfig(com.google.cloud.dataproc.v1.GceClusterConfig) Cluster(com.google.cloud.dataproc.v1.Cluster) SSHPublicKey(io.cdap.cdap.runtime.spi.ssh.SSHPublicKey) ClusterConfig(com.google.cloud.dataproc.v1.ClusterConfig) GceClusterConfig(com.google.cloud.dataproc.v1.GceClusterConfig) ApiException(com.google.api.gax.rpc.ApiException)

Example 69 with Cluster

use of com.google.bigtable.admin.v2.Cluster in project cdap by caskdata.

the class DataprocClient method getCluster.

/**
 * Get information about the specified cluster. The cluster will not be present if it could not be found.
 *
 * @param name the cluster name
 * @return the cluster information if it exists
 * @throws RetryableProvisionException if there was a non 4xx error code returned
 */
Optional<io.cdap.cdap.runtime.spi.provisioner.Cluster> getCluster(String name) throws RetryableProvisionException, IOException {
    Optional<Cluster> clusterOptional = getDataprocCluster(name);
    if (!clusterOptional.isPresent()) {
        return Optional.empty();
    }
    Cluster cluster = clusterOptional.get();
    return Optional.of(getCluster(cluster));
}
Also used : Cluster(com.google.cloud.dataproc.v1.Cluster)

Example 70 with Cluster

use of com.google.bigtable.admin.v2.Cluster in project cdap by caskdata.

the class DataprocClient method updateClusterLabels.

/**
 * Updates labelsToSet on the given Dataproc cluster.
 *
 * @param clusterName name of the cluster
 * @param labelsToSet Key/Value pairs to set on the Dataproc cluster.
 * @param labelsToRemove collection of labels to remove from the Dataproc cluster.
 */
void updateClusterLabels(String clusterName, Map<String, String> labelsToSet, Collection<String> labelsToRemove) throws RetryableProvisionException, InterruptedException {
    if (labelsToSet.isEmpty() && labelsToRemove.isEmpty()) {
        return;
    }
    try {
        Cluster cluster = getDataprocCluster(clusterName).filter(c -> c.getStatus().getState() == ClusterStatus.State.RUNNING).orElseThrow(() -> new DataprocRuntimeException("Dataproc cluster " + clusterName + " does not exist or not in running state"));
        Map<String, String> existingLabels = cluster.getLabelsMap();
        // no need to update the cluster labelsToSet.
        if (labelsToSet.entrySet().stream().allMatch(e -> Objects.equals(e.getValue(), existingLabels.get(e.getKey()))) && labelsToRemove.stream().noneMatch(existingLabels::containsKey)) {
            return;
        }
        Map<String, String> newLabels = new HashMap<>(existingLabels);
        newLabels.keySet().removeAll(labelsToRemove);
        newLabels.putAll(labelsToSet);
        FieldMask updateMask = FieldMask.newBuilder().addPaths("labels").build();
        OperationFuture<Cluster, ClusterOperationMetadata> operationFuture = client.updateClusterAsync(UpdateClusterRequest.newBuilder().setProjectId(conf.getProjectId()).setRegion(conf.getRegion()).setClusterName(clusterName).setCluster(cluster.toBuilder().clearLabels().putAllLabels(newLabels)).setUpdateMask(updateMask).build());
        ClusterOperationMetadata metadata = operationFuture.getMetadata().get();
        int numWarnings = metadata.getWarningsCount();
        if (numWarnings > 0) {
            LOG.warn("Encountered {} warning {} while setting labels on cluster:\n{}", numWarnings, numWarnings > 1 ? "s" : "", String.join("\n", metadata.getWarningsList()));
        }
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof ApiException) {
            throw handleApiException((ApiException) cause);
        }
        throw new DataprocRuntimeException(cause);
    }
}
Also used : HttpURLConnection(java.net.HttpURLConnection) NetworkPeering(com.google.api.services.compute.model.NetworkPeering) Arrays(java.util.Arrays) OperationFuture(com.google.api.gax.longrunning.OperationFuture) NotFoundException(com.google.api.gax.rpc.NotFoundException) LoggerFactory(org.slf4j.LoggerFactory) FixedCredentialsProvider(com.google.api.gax.core.FixedCredentialsProvider) Network(com.google.api.services.compute.model.Network) DeleteClusterRequest(com.google.cloud.dataproc.v1.DeleteClusterRequest) GoogleJsonResponseException(com.google.api.client.googleapis.json.GoogleJsonResponseException) GeneralSecurityException(java.security.GeneralSecurityException) GetClusterRequest(com.google.cloud.dataproc.v1.GetClusterRequest) Cluster(com.google.cloud.dataproc.v1.Cluster) NodeInitializationAction(com.google.cloud.dataproc.v1.NodeInitializationAction) Map(java.util.Map) CredentialsProvider(com.google.api.gax.core.CredentialsProvider) ParseException(java.text.ParseException) EnumSet(java.util.EnumSet) AutoscalingConfig(com.google.cloud.dataproc.v1.AutoscalingConfig) ImmutableSet(com.google.common.collect.ImmutableSet) ClusterStatus(com.google.cloud.dataproc.v1.ClusterStatus) JacksonFactory(com.google.api.client.json.jackson2.JacksonFactory) Predicate(java.util.function.Predicate) Collection(java.util.Collection) HttpTransport(com.google.api.client.http.HttpTransport) Status(com.google.rpc.Status) Set(java.util.Set) GoogleNetHttpTransport(com.google.api.client.googleapis.javanet.GoogleNetHttpTransport) HttpResponseException(com.google.api.client.http.HttpResponseException) Collectors(java.util.stream.Collectors) AlreadyExistsException(com.google.api.gax.rpc.AlreadyExistsException) Node(io.cdap.cdap.runtime.spi.provisioner.Node) InstanceGroupConfig(com.google.cloud.dataproc.v1.InstanceGroupConfig) Objects(java.util.Objects) List(java.util.List) HttpStatusCodes(com.google.api.client.http.HttpStatusCodes) Stream(java.util.stream.Stream) OperationsClient(com.google.longrunning.OperationsClient) HttpCredentialsAdapter(com.google.auth.http.HttpCredentialsAdapter) FirewallList(com.google.api.services.compute.model.FirewallList) FieldMask(com.google.protobuf.FieldMask) IPRange(io.cdap.cdap.runtime.spi.common.IPRange) SSHPublicKey(io.cdap.cdap.runtime.spi.ssh.SSHPublicKey) Optional(java.util.Optional) Compute(com.google.api.services.compute.Compute) SoftwareConfig(com.google.cloud.dataproc.v1.SoftwareConfig) DataprocUtils(io.cdap.cdap.runtime.spi.common.DataprocUtils) Instance(com.google.api.services.compute.model.Instance) ClusterConfig(com.google.cloud.dataproc.v1.ClusterConfig) SimpleDateFormat(java.text.SimpleDateFormat) HashMap(java.util.HashMap) UpdateClusterRequest(com.google.cloud.dataproc.v1.UpdateClusterRequest) Operation(com.google.longrunning.Operation) GceClusterConfig(com.google.cloud.dataproc.v1.GceClusterConfig) ArrayList(java.util.ArrayList) ClusterOperationMetadata(com.google.cloud.dataproc.v1.ClusterOperationMetadata) Strings(com.google.common.base.Strings) NetworkList(com.google.api.services.compute.model.NetworkList) DiskConfig(com.google.cloud.dataproc.v1.DiskConfig) HttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer) AccessConfig(com.google.api.services.compute.model.AccessConfig) Firewall(com.google.api.services.compute.model.Firewall) SocketTimeoutException(java.net.SocketTimeoutException) ShieldedInstanceConfig(com.google.cloud.dataproc.v1.ShieldedInstanceConfig) StreamSupport(java.util.stream.StreamSupport) Nullable(javax.annotation.Nullable) ClusterControllerClient(com.google.cloud.dataproc.v1.ClusterControllerClient) RetryableProvisionException(io.cdap.cdap.runtime.spi.provisioner.RetryableProvisionException) Logger(org.slf4j.Logger) EncryptionConfig(com.google.cloud.dataproc.v1.EncryptionConfig) IOException(java.io.IOException) ApiException(com.google.api.gax.rpc.ApiException) ClusterControllerSettings(com.google.cloud.dataproc.v1.ClusterControllerSettings) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Duration(com.google.protobuf.Duration) EndpointConfig(com.google.cloud.dataproc.v1.EndpointConfig) LifecycleConfig(com.google.cloud.dataproc.v1.LifecycleConfig) Collections(java.util.Collections) ClusterOperationMetadata(com.google.cloud.dataproc.v1.ClusterOperationMetadata) HashMap(java.util.HashMap) Cluster(com.google.cloud.dataproc.v1.Cluster) ExecutionException(java.util.concurrent.ExecutionException) FieldMask(com.google.protobuf.FieldMask) ApiException(com.google.api.gax.rpc.ApiException)

Aggregations

Test (org.junit.Test)93 AbstractMessage (com.google.protobuf.AbstractMessage)38 ByteString (com.google.protobuf.ByteString)32 Cluster (io.envoyproxy.envoy.config.cluster.v3.Cluster)25 InvalidArgumentException (com.google.api.gax.rpc.InvalidArgumentException)24 StatusRuntimeException (io.grpc.StatusRuntimeException)24 ExecutionException (java.util.concurrent.ExecutionException)22 Cluster (com.google.cloud.dataproc.v1.Cluster)18 Operation (com.google.longrunning.Operation)18 Cluster (com.google.bigtable.admin.v2.Cluster)16 ClusterName (com.google.bigtable.admin.v2.ClusterName)16 ArrayList (java.util.ArrayList)16 ClusterOperationMetadata (com.google.cloud.dataproc.v1.ClusterOperationMetadata)13 HashMap (java.util.HashMap)13 Cluster (com.google.container.v1.Cluster)12 ClusterControllerClient (com.google.cloud.dataproc.v1.ClusterControllerClient)11 ClusterControllerSettings (com.google.cloud.dataproc.v1.ClusterControllerSettings)11 SoftwareConfig (com.google.cloud.dataproc.v1.SoftwareConfig)9 SnapshotName (com.google.bigtable.admin.v2.SnapshotName)8 ClusterConfig (com.google.cloud.dataproc.v1.ClusterConfig)8