use of com.google.cloud.dataproc.v1.Cluster in project java-container by googleapis.
the class ClusterManagerClientTest method createClusterExceptionTest2.
@Test
public void createClusterExceptionTest2() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT);
mockClusterManager.addException(exception);
try {
String projectId = "projectId-894832108";
String zone = "zone3744684";
Cluster cluster = Cluster.newBuilder().build();
client.createCluster(projectId, zone, cluster);
Assert.fail("No exception raised");
} catch (InvalidArgumentException e) {
// Expected exception.
}
}
use of com.google.cloud.dataproc.v1.Cluster in project java-container by googleapis.
the class ClusterManagerClientTest method listClustersTest.
@Test
public void listClustersTest() throws Exception {
ListClustersResponse expectedResponse = ListClustersResponse.newBuilder().addAllClusters(new ArrayList<Cluster>()).addAllMissingZones(new ArrayList<String>()).build();
mockClusterManager.addResponse(expectedResponse);
String projectId = "projectId-894832108";
String zone = "zone3744684";
ListClustersResponse actualResponse = client.listClusters(projectId, zone);
Assert.assertEquals(expectedResponse, actualResponse);
List<AbstractMessage> actualRequests = mockClusterManager.getRequests();
Assert.assertEquals(1, actualRequests.size());
ListClustersRequest actualRequest = ((ListClustersRequest) actualRequests.get(0));
Assert.assertEquals(projectId, actualRequest.getProjectId());
Assert.assertEquals(zone, actualRequest.getZone());
Assert.assertTrue(channelProvider.isHeaderSent(ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
}
use of com.google.cloud.dataproc.v1.Cluster in project java-container by googleapis.
the class ClusterManagerClientTest method createClusterExceptionTest.
@Test
public void createClusterExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT);
mockClusterManager.addException(exception);
try {
String projectId = "projectId-894832108";
String zone = "zone3744684";
Cluster cluster = Cluster.newBuilder().build();
client.createCluster(projectId, zone, cluster);
Assert.fail("No exception raised");
} catch (InvalidArgumentException e) {
// Expected exception.
}
}
use of com.google.cloud.dataproc.v1.Cluster in project cdap by cdapio.
the class DataprocClient method updateClusterLabels.
/**
* Updates labelsToSet on the given Dataproc cluster.
*
* @param clusterName name of the cluster
* @param labelsToSet Key/Value pairs to set on the Dataproc cluster.
* @param labelsToRemove collection of labels to remove from the Dataproc cluster.
*/
void updateClusterLabels(String clusterName, Map<String, String> labelsToSet, Collection<String> labelsToRemove) throws RetryableProvisionException, InterruptedException {
if (labelsToSet.isEmpty() && labelsToRemove.isEmpty()) {
return;
}
try {
Cluster cluster = getDataprocCluster(clusterName).filter(c -> c.getStatus().getState() == ClusterStatus.State.RUNNING).orElseThrow(() -> new DataprocRuntimeException("Dataproc cluster " + clusterName + " does not exist or not in running state"));
Map<String, String> existingLabels = cluster.getLabelsMap();
// no need to update the cluster labelsToSet.
if (labelsToSet.entrySet().stream().allMatch(e -> Objects.equals(e.getValue(), existingLabels.get(e.getKey()))) && labelsToRemove.stream().noneMatch(existingLabels::containsKey)) {
return;
}
Map<String, String> newLabels = new HashMap<>(existingLabels);
newLabels.keySet().removeAll(labelsToRemove);
newLabels.putAll(labelsToSet);
FieldMask updateMask = FieldMask.newBuilder().addPaths("labels").build();
OperationFuture<Cluster, ClusterOperationMetadata> operationFuture = client.updateClusterAsync(UpdateClusterRequest.newBuilder().setProjectId(conf.getProjectId()).setRegion(conf.getRegion()).setClusterName(clusterName).setCluster(cluster.toBuilder().clearLabels().putAllLabels(newLabels)).setUpdateMask(updateMask).build());
ClusterOperationMetadata metadata = operationFuture.getMetadata().get();
int numWarnings = metadata.getWarningsCount();
if (numWarnings > 0) {
LOG.warn("Encountered {} warning {} while setting labels on cluster:\n{}", numWarnings, numWarnings > 1 ? "s" : "", String.join("\n", metadata.getWarningsList()));
}
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof ApiException) {
throw handleApiException((ApiException) cause);
}
throw new DataprocRuntimeException(cause);
}
}
use of com.google.cloud.dataproc.v1.Cluster in project cdap by cdapio.
the class DataprocProvisioner method createCluster.
@Override
public Cluster createCluster(ProvisionerContext context) throws Exception {
DataprocConf conf = DataprocConf.create(createContextProperties(context));
if (!isAutoscalingFieldsValid(conf, createContextProperties(context))) {
LOG.warn("The configs : {}, {}, {} will not be considered when {} is enabled ", DataprocConf.WORKER_NUM_NODES, DataprocConf.SECONDARY_WORKER_NUM_NODES, DataprocConf.AUTOSCALING_POLICY, DataprocConf.PREDEFINED_AUTOSCALE_ENABLED);
}
if (context.getRuntimeMonitorType() == RuntimeMonitorType.SSH || !conf.isRuntimeJobManagerEnabled()) {
// Generates and set the ssh key if it does not have one.
// Since invocation of this method can come from a retry, we don't need to keep regenerating the keys
SSHContext sshContext = context.getSSHContext();
if (sshContext != null) {
SSHKeyPair sshKeyPair = sshContext.getSSHKeyPair().orElse(null);
if (sshKeyPair == null) {
sshKeyPair = sshContext.generate("cdap");
sshContext.setSSHKeyPair(sshKeyPair);
}
conf = DataprocConf.create(createContextProperties(context), sshKeyPair.getPublicKey());
}
}
try (DataprocClient client = getClient(conf)) {
Cluster reused = tryReuseCluster(client, context, conf);
if (reused != null) {
DataprocUtils.emitMetric(context, conf.getRegion(), "provisioner.createCluster.reuse.count");
return reused;
}
String clusterName = getRunKey(context);
// if it already exists, it means this is a retry. We can skip actually making the request
Optional<Cluster> existing = client.getCluster(clusterName);
if (existing.isPresent()) {
return existing.get();
}
String imageVersion = getImageVersion(context, conf);
String imageDescription = conf.getCustomImageUri();
if (imageDescription == null || imageDescription.isEmpty()) {
imageDescription = imageVersion;
}
// Reload system context properties and get system labels
Map<String, String> labels = new HashMap<>();
labels.putAll(getSystemLabels());
labels.putAll(getReuseLabels(context, conf));
labels.putAll(conf.getClusterLabels());
LOG.info("Creating Dataproc cluster {} in project {}, in region {}, with image {}, with labels {}, endpoint {}", clusterName, conf.getProjectId(), conf.getRegion(), imageDescription, labels, getRootUrl(conf));
boolean privateInstance = Boolean.parseBoolean(getSystemContext().getProperties().get(PRIVATE_INSTANCE));
ClusterOperationMetadata createOperationMeta = client.createCluster(clusterName, imageVersion, labels, privateInstance);
int numWarnings = createOperationMeta.getWarningsCount();
if (numWarnings > 0) {
LOG.warn("Encountered {} warning{} while creating Dataproc cluster:\n{}", numWarnings, numWarnings > 1 ? "s" : "", String.join("\n", createOperationMeta.getWarningsList()));
}
DataprocUtils.emitMetric(context, conf.getRegion(), "provisioner.createCluster.response.count");
return new Cluster(clusterName, ClusterStatus.CREATING, Collections.emptyList(), Collections.emptyMap());
} catch (Exception e) {
DataprocUtils.emitMetric(context, conf.getRegion(), "provisioner.createCluster.response.count", e);
throw e;
}
}
Aggregations