use of com.thinkbiganalytics.kylo.spark.rest.model.job.SparkJobResponse in project kylo by Teradata.
the class SparkLivyRestClient method createJob.
@Override
public SparkJobResponse createJob(@Nonnull final SparkShellProcess process, @Nonnull final SparkJobRequest request) {
logger.entry(process, request);
// Execute job script
final JerseyRestClient client = sparkLivyProcessManager.getClient(process);
final String script = scalaScriptService.wrapScriptForLivy(request);
final Statement statement = submitCode(client, script, process);
final String jobId = ScalaScriptService.newTableName();
sparkLivyProcessManager.setStatementId(jobId, statement.getId());
// Generate response
final SparkJobResponse response = new SparkJobResponse();
response.setId(jobId);
response.setStatus(StatementStateTranslator.translate(statement.getState()));
return logger.exit(response);
}
use of com.thinkbiganalytics.kylo.spark.rest.model.job.SparkJobResponse in project kylo by Teradata.
the class SparkLivyRestClient method getJobResult.
@Nonnull
@Override
public Optional<SparkJobResponse> getJobResult(@Nonnull final SparkShellProcess process, @Nonnull final String id) {
logger.entry(process, id);
Validate.isInstanceOf(SparkLivyProcess.class, process, "SparkLivyRestClient.getJobResult called on non Livy Process");
SparkLivyProcess sparkLivyProcess = (SparkLivyProcess) process;
// Request result from Livy
final JerseyRestClient client = sparkLivyProcessManager.getClient(process);
final Integer statementId = sparkLivyProcessManager.getStatementId(id);
final Statement statement = livyClient.getStatement(client, sparkLivyProcess, statementId);
sparkLivyProcessManager.setStatementId(id, statement.getId());
// Generate response
final SparkJobResponse response = LivyRestModelTransformer.toJobResponse(id, statement);
if (response.getStatus() != TransformResponse.Status.ERROR) {
return logger.exit(Optional.of(response));
} else {
throw logger.throwing(new SparkException(String.format("Unexpected error found in transform response:\n%s", response.getMessage())));
}
}
use of com.thinkbiganalytics.kylo.spark.rest.model.job.SparkJobResponse in project kylo by Teradata.
the class TransformService method submit.
/**
* Executes the specified transformation and returns the name of the Hive table containing the results.
*
* @param request the transformation request
* @return the Hive table containing the results
* @throws IllegalStateException if this service is not running
* @throws ScriptException if the script cannot be executed
*/
@Nonnull
public SparkJobResponse submit(@Nonnull final SparkJobRequest request) throws ScriptException {
log.entry(request);
final Supplier<SparkJobResult> jobTask = createJobTask(request);
final SparkJobResponse response = submitSparkJob(jobTask);
return log.exit(response);
}
use of com.thinkbiganalytics.kylo.spark.rest.model.job.SparkJobResponse in project kylo by Teradata.
the class SparkJobController method getJobResult.
@GET
@Path("/{job}")
@Produces(MediaType.APPLICATION_JSON)
@ApiOperation("Fetches the status of a job")
@ApiResponses({ @ApiResponse(code = 200, message = "Returns the status of the job.", response = SparkJobResponse.class), @ApiResponse(code = 404, message = "The job does not exist.", response = RestResponseStatus.class), @ApiResponse(code = 500, message = "There was a problem accessing the data.", response = SparkJobResponse.class) })
public Response getJobResult(@PathParam("job") final String id) {
try {
final SparkJob job = transformService.getSparkJob(id);
final SparkJobResponse response = new SparkJobResponse();
response.setId(job.getGroupId());
if (job.isDone()) {
final SparkJobResult result = job.get();
response.setResult(result);
response.setStatus(SparkJobResponse.Status.SUCCESS);
} else {
response.setStatus(SparkJobResponse.Status.PENDING);
}
return Response.ok(response).build();
} catch (final IllegalArgumentException e) {
throw new NotFoundException(getMessage("job.not-found"));
} catch (final Exception e) {
throw new InternalServerErrorException(e);
}
}
use of com.thinkbiganalytics.kylo.spark.rest.model.job.SparkJobResponse in project kylo by Teradata.
the class DefaultSparkJobContext method create.
/**
* Creates a {@code DefaultSparkJobContext} using the specified task to execute the Spark job.
*/
public static DefaultSparkJobContext create(@Nonnull final ChainableSupplier<SparkJobResponse> responseTask, @Nonnull final SparkJobCacheService cache, @Nonnull final ExecutorService executor) {
// Create context
final String id = UUID.randomUUID().toString();
final Processor<SparkJobStatus, SparkJobStatus> processor = RingBufferProcessor.create(executor, false);
final DefaultSparkJobContext context = new DefaultSparkJobContext(id, processor);
// Start task
final ChainableSupplier<SparkJobStatus> statusTask = responseTask.andThen(response -> {
context.sparkJobId = response.getId();
return response;
}).andThen(new JobStatusFunction(cache));
final CompletableFuture<SparkJobStatus> future = CompletableFuture.supplyAsync(statusTask, executor).whenComplete((response, error) -> {
if (response != null) {
processor.onNext(response);
processor.onComplete();
} else if (error != null) {
processor.onError(error);
} else {
processor.onError(new NoSuchElementException());
}
});
context.setFuture(future);
return context;
}
Aggregations