use of com.google.cloud.dataproc.v1.PySparkJob in project java-dataproc by googleapis.
the class Quickstart method quickstart.
public static void quickstart(String projectId, String region, String clusterName, String jobFilePath) throws IOException, InterruptedException {
String myEndpoint = String.format("%s-dataproc.googleapis.com:443", region);
// Configure the settings for the cluster controller client.
ClusterControllerSettings clusterControllerSettings = ClusterControllerSettings.newBuilder().setEndpoint(myEndpoint).build();
// Configure the settings for the job controller client.
JobControllerSettings jobControllerSettings = JobControllerSettings.newBuilder().setEndpoint(myEndpoint).build();
// manually with the .close() method.
try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create(clusterControllerSettings);
JobControllerClient jobControllerClient = JobControllerClient.create(jobControllerSettings)) {
// Configure the settings for our cluster.
InstanceGroupConfig masterConfig = InstanceGroupConfig.newBuilder().setMachineTypeUri("n1-standard-2").setNumInstances(1).build();
InstanceGroupConfig workerConfig = InstanceGroupConfig.newBuilder().setMachineTypeUri("n1-standard-2").setNumInstances(2).build();
ClusterConfig clusterConfig = ClusterConfig.newBuilder().setMasterConfig(masterConfig).setWorkerConfig(workerConfig).build();
// Create the cluster object with the desired cluster config.
Cluster cluster = Cluster.newBuilder().setClusterName(clusterName).setConfig(clusterConfig).build();
// Create the Cloud Dataproc cluster.
OperationFuture<Cluster, ClusterOperationMetadata> createClusterAsyncRequest = clusterControllerClient.createClusterAsync(projectId, region, cluster);
Cluster clusterResponse = createClusterAsyncRequest.get();
System.out.println(String.format("Cluster created successfully: %s", clusterResponse.getClusterName()));
// Configure the settings for our job.
JobPlacement jobPlacement = JobPlacement.newBuilder().setClusterName(clusterName).build();
PySparkJob pySparkJob = PySparkJob.newBuilder().setMainPythonFileUri(jobFilePath).build();
Job job = Job.newBuilder().setPlacement(jobPlacement).setPysparkJob(pySparkJob).build();
// Submit an asynchronous request to execute the job.
OperationFuture<Job, JobMetadata> submitJobAsOperationAsyncRequest = jobControllerClient.submitJobAsOperationAsync(projectId, region, job);
Job jobResponse = submitJobAsOperationAsyncRequest.get();
// Print output from Google Cloud Storage.
Matcher matches = Pattern.compile("gs://(.*?)/(.*)").matcher(jobResponse.getDriverOutputResourceUri());
matches.matches();
Storage storage = StorageOptions.getDefaultInstance().getService();
Blob blob = storage.get(matches.group(1), String.format("%s.000000000", matches.group(2)));
System.out.println(String.format("Job finished successfully: %s", new String(blob.getContent())));
// Delete the cluster.
OperationFuture<Empty, ClusterOperationMetadata> deleteClusterAsyncRequest = clusterControllerClient.deleteClusterAsync(projectId, region, clusterName);
deleteClusterAsyncRequest.get();
System.out.println(String.format("Cluster \"%s\" successfully deleted.", clusterName));
} catch (ExecutionException e) {
System.err.println(String.format("quickstart: %s ", e.getMessage()));
}
}
Aggregations