use of com.google.cloud.dataproc.v1.JobControllerClient in project java-pubsublite-spark by googleapis.
the class SampleTestBase method runDataprocJob.
protected Job runDataprocJob(SparkJob.Builder sparkJobBuilder) throws Exception {
String myEndpoint = String.format("%s-dataproc.googleapis.com:443", cloudRegion.value());
JobControllerSettings jobControllerSettings = JobControllerSettings.newBuilder().setEndpoint(myEndpoint).build();
try (JobControllerClient jobControllerClient = JobControllerClient.create(jobControllerSettings)) {
JobPlacement jobPlacement = JobPlacement.newBuilder().setClusterName(clusterName).build();
sparkJobBuilder.addJarFileUris(String.format("gs://%s/%s", bucketName, sampleJarNameInGCS)).addJarFileUris(String.format("gs://%s/%s", bucketName, connectorJarNameInGCS));
Job job = Job.newBuilder().setPlacement(jobPlacement).setSparkJob(sparkJobBuilder.build()).build();
OperationFuture<Job, JobMetadata> submitJobAsOperationAsyncRequest = jobControllerClient.submitJobAsOperationAsync(projectId.value(), cloudRegion.value(), job);
return submitJobAsOperationAsyncRequest.get();
}
}
Aggregations