use of com.google.cloud.dataproc.v1.ManagedCluster in project java-dataproc by googleapis.
the class InstantiateInlineWorkflowTemplate method instantiateInlineWorkflowTemplate.
public static void instantiateInlineWorkflowTemplate(String projectId, String region) throws IOException, InterruptedException {
String myEndpoint = String.format("%s-dataproc.googleapis.com:443", region);
// Configure the settings for the workflow template service client.
WorkflowTemplateServiceSettings workflowTemplateServiceSettings = WorkflowTemplateServiceSettings.newBuilder().setEndpoint(myEndpoint).build();
// closes the client, but this can also be done manually with the .close() method.
try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create(workflowTemplateServiceSettings)) {
// Configure the jobs within the workflow.
HadoopJob teragenHadoopJob = HadoopJob.newBuilder().setMainJarFileUri("file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar").addArgs("teragen").addArgs("1000").addArgs("hdfs:///gen/").build();
OrderedJob teragen = OrderedJob.newBuilder().setHadoopJob(teragenHadoopJob).setStepId("teragen").build();
HadoopJob terasortHadoopJob = HadoopJob.newBuilder().setMainJarFileUri("file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar").addArgs("terasort").addArgs("hdfs:///gen/").addArgs("hdfs:///sort/").build();
OrderedJob terasort = OrderedJob.newBuilder().setHadoopJob(terasortHadoopJob).addPrerequisiteStepIds("teragen").setStepId("terasort").build();
// Configure the cluster placement for the workflow.
// Leave "ZoneUri" empty for "Auto Zone Placement".
// GceClusterConfig gceClusterConfig =
// GceClusterConfig.newBuilder().setZoneUri("").build();
GceClusterConfig gceClusterConfig = GceClusterConfig.newBuilder().setZoneUri("us-central1-a").build();
ClusterConfig clusterConfig = ClusterConfig.newBuilder().setGceClusterConfig(gceClusterConfig).build();
ManagedCluster managedCluster = ManagedCluster.newBuilder().setClusterName("my-managed-cluster").setConfig(clusterConfig).build();
WorkflowTemplatePlacement workflowTemplatePlacement = WorkflowTemplatePlacement.newBuilder().setManagedCluster(managedCluster).build();
// Create the inline workflow template.
WorkflowTemplate workflowTemplate = WorkflowTemplate.newBuilder().addJobs(teragen).addJobs(terasort).setPlacement(workflowTemplatePlacement).build();
// Submit the instantiated inline workflow template request.
String parent = RegionName.format(projectId, region);
OperationFuture<Empty, WorkflowMetadata> instantiateInlineWorkflowTemplateAsync = workflowTemplateServiceClient.instantiateInlineWorkflowTemplateAsync(parent, workflowTemplate);
instantiateInlineWorkflowTemplateAsync.get();
// Print out a success message.
System.out.printf("Workflow ran successfully.");
} catch (ExecutionException e) {
System.err.println(String.format("Error running workflow: %s ", e.getMessage()));
}
}
use of com.google.cloud.dataproc.v1.ManagedCluster in project kubernetes-client by fabric8io.
the class ManagedClusterTest method deserializationAndSerializationShouldWorkAsExpected.
@Test
void deserializationAndSerializationShouldWorkAsExpected() throws IOException {
// Given
String originalJson = new Scanner(getClass().getResourceAsStream("/valid-managedcluster.json")).useDelimiter("\\A").next();
// When
final ManagedCluster managedCluster = mapper.readValue(originalJson, ManagedCluster.class);
final String serializedJson = mapper.writeValueAsString(managedCluster);
final ManagedCluster managedClusterFromSerializedJson = mapper.readValue(serializedJson, ManagedCluster.class);
// Then
assertNotNull(managedCluster);
assertNotNull(serializedJson);
assertNotNull(managedClusterFromSerializedJson);
assertEquals(managedCluster.getMetadata().getName(), managedClusterFromSerializedJson.getMetadata().getName());
assertTrue(managedCluster.getSpec().getHubAcceptsClient());
assertEquals(1, managedCluster.getSpec().getManagedClusterClientConfigs().size());
}
use of com.google.cloud.dataproc.v1.ManagedCluster in project kubernetes-client by fabric8io.
the class ManagedClusterTest method builderShouldCreateObject.
@Test
void builderShouldCreateObject() {
// Given
ManagedClusterBuilder managedClusterBuilder = new ManagedClusterBuilder().withNewMetadata().addToLabels("vendor", "OpenShift").withName("cluster1").endMetadata().withNewSpec().withHubAcceptsClient(true).addNewManagedClusterClientConfig().withCaBundle("test").withUrl("https://test.com").endManagedClusterClientConfig().endSpec();
// When
ManagedCluster managedCluster = managedClusterBuilder.build();
// Then
assertNotNull(managedCluster);
assertEquals("cluster1", managedCluster.getMetadata().getName());
assertEquals(1, managedCluster.getMetadata().getLabels().size());
assertTrue(managedCluster.getSpec().getHubAcceptsClient());
assertEquals(1, managedCluster.getSpec().getManagedClusterClientConfigs().size());
}
use of com.google.cloud.dataproc.v1.ManagedCluster in project kubernetes-client by fabric8io.
the class ManagedClusterList method main.
public static void main(String[] args) {
try (OpenClusterManagementClient ocmClient = new DefaultOpenClusterManagementClient()) {
logger.info("Listing all ManagedClusters: ");
ocmClient.clusters().managedClusters().list().getItems().stream().map(ManagedCluster::getMetadata).map(ObjectMeta::getName).forEach(logger::info);
}
}
use of com.google.cloud.dataproc.v1.ManagedCluster in project kubernetes-client by fabric8io.
the class ManagedClusterTest method get.
@Test
void get() {
// Given
server.expect().get().withPath("/apis/cluster.open-cluster-management.io/v1/managedclusters/test-get").andReturn(HttpURLConnection.HTTP_OK, createNewManagedCluster("test-get")).once();
// When
ManagedCluster managedCluster = client.clusters().managedClusters().withName("test-get").get();
// Then
assertThat(managedCluster).isNotNull().hasFieldOrPropertyWithValue("metadata.name", "test-get");
}
Aggregations