use of io.cdap.http.HttpResponder in project cdap by caskdata.
the class ProgramLifecycleHttpHandler method programHistory.
/**
* Returns program runs of an app version based on options it returns either currently running or completed or failed.
* Default it returns all.
*/
@GET
@Path("/apps/{app-name}/versions/{app-version}/{program-type}/{program-name}/runs")
public void programHistory(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("app-name") String appName, @PathParam("app-version") String appVersion, @PathParam("program-type") String type, @PathParam("program-name") String programName, @QueryParam("status") String status, @QueryParam("start") String startTs, @QueryParam("end") String endTs, @QueryParam("limit") @DefaultValue("100") final int resultLimit) throws Exception {
ProgramType programType = getProgramType(type);
long start = (startTs == null || startTs.isEmpty()) ? 0 : Long.parseLong(startTs);
long end = (endTs == null || endTs.isEmpty()) ? Long.MAX_VALUE : Long.parseLong(endTs);
ProgramId program = new ApplicationId(namespaceId, appName, appVersion).program(programType, programName);
ProgramRunStatus runStatus = (status == null) ? ProgramRunStatus.ALL : ProgramRunStatus.valueOf(status.toUpperCase());
List<RunRecord> records = lifecycleService.getRunRecords(program, runStatus, start, end, resultLimit).stream().filter(record -> !isTetheredRunRecord(record)).collect(Collectors.toList());
responder.sendJson(HttpResponseStatus.OK, GSON.toJson(records));
}
use of io.cdap.http.HttpResponder in project cdap by caskdata.
the class ProgramLifecycleHttpHandler method stopPrograms.
/**
* Stops all programs that are passed into the data. The data is an array of JSON objects
* where each object must contain the following three elements: appId, programType, and programId
* (flow name, service name, etc.).
* <p>
* Example input:
* <pre><code>
* [{"appId": "App1", "programType": "Service", "programId": "Service1"},
* {"appId": "App1", "programType": "Mapreduce", "programId": "MapReduce2"}]
* </code></pre>
* </p><p>
* The response will be an array of JsonObjects each of which will contain the three input parameters
* as well as a "statusCode" field which maps to the status code for the data in that JsonObjects.
* </p><p>
* If an error occurs in the input (for the example above, App2 does not exist), then all JsonObjects for which the
* parameters have a valid status will have the status field but all JsonObjects for which the parameters do not have
* a valid status will have an error message and statusCode.
* </p><p>
* For example, if there is no App2 in the data above, then the response would be 200 OK with following possible data:
* </p>
* <pre><code>
* [{"appId": "App1", "programType": "Service", "programId": "Service1", "statusCode": 200},
* {"appId": "App1", "programType": "Mapreduce", "programId": "Mapreduce2", "statusCode": 200}]
* </code></pre>
*/
@POST
@Path("/stop")
@AuditPolicy({ AuditDetail.REQUEST_BODY, AuditDetail.RESPONSE_BODY })
public void stopPrograms(FullHttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId) throws Exception {
List<BatchProgram> programs = validateAndGetBatchInput(request, BATCH_PROGRAMS_TYPE);
List<ListenableFuture<BatchProgramResult>> issuedStops = new ArrayList<>(programs.size());
for (final BatchProgram program : programs) {
ProgramId programId = new ProgramId(namespaceId, program.getAppId(), program.getProgramType(), program.getProgramId());
try {
List<ListenableFuture<ProgramRunId>> stops = lifecycleService.issueStop(programId, null);
for (ListenableFuture<ProgramRunId> stop : stops) {
ListenableFuture<BatchProgramResult> issuedStop = Futures.transform(stop, (Function<ProgramRunId, BatchProgramResult>) input -> new BatchProgramResult(program, HttpResponseStatus.OK.code(), null, input.getRun()));
issuedStops.add(issuedStop);
}
} catch (NotFoundException e) {
issuedStops.add(Futures.immediateFuture(new BatchProgramResult(program, HttpResponseStatus.NOT_FOUND.code(), e.getMessage())));
} catch (BadRequestException e) {
issuedStops.add(Futures.immediateFuture(new BatchProgramResult(program, HttpResponseStatus.BAD_REQUEST.code(), e.getMessage())));
}
}
List<BatchProgramResult> output = new ArrayList<>(programs.size());
// need to keep this index in case there is an exception getting the future, since we won't have the program
// information in that scenario
int i = 0;
for (ListenableFuture<BatchProgramResult> issuedStop : issuedStops) {
try {
output.add(issuedStop.get());
} catch (Throwable t) {
LOG.warn(t.getMessage(), t);
output.add(new BatchProgramResult(programs.get(i), HttpResponseStatus.INTERNAL_SERVER_ERROR.code(), t.getMessage()));
}
i++;
}
responder.sendJson(HttpResponseStatus.OK, GSON.toJson(output));
}
use of io.cdap.http.HttpResponder in project cdap by caskdata.
the class DefaultDatasetTypeService method createModuleConsumer.
private AbstractBodyConsumer createModuleConsumer(final DatasetModuleId datasetModuleId, final String className, final boolean forceUpdate) throws IOException, NotFoundException {
final NamespaceId namespaceId = datasetModuleId.getParent();
final Location namespaceHomeLocation;
try {
namespaceHomeLocation = impersonator.doAs(namespaceId, new Callable<Location>() {
@Override
public Location call() throws Exception {
return namespacePathLocator.get(namespaceId);
}
});
} catch (Exception e) {
// the only checked exception that the callable throws is IOException
Throwables.propagateIfInstanceOf(e, IOException.class);
throw Throwables.propagate(e);
}
// verify namespace directory exists
if (!namespaceHomeLocation.exists()) {
String msg = String.format("Home directory %s for namespace %s not found", namespaceHomeLocation, namespaceId);
LOG.debug(msg);
throw new NotFoundException(msg);
}
// Store uploaded content to a local temp file
String namespacesDir = cConf.get(Constants.Namespace.NAMESPACES_DIR);
File localDataDir = new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR));
File namespaceBase = new File(localDataDir, namespacesDir);
File tempDir = new File(new File(namespaceBase, datasetModuleId.getNamespace()), cConf.get(Constants.AppFabric.TEMP_DIR)).getAbsoluteFile();
if (!DirUtils.mkdirs(tempDir)) {
throw new IOException("Could not create temporary directory at: " + tempDir);
}
return new AbstractBodyConsumer(File.createTempFile("dataset-", ".jar", tempDir)) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) throws Exception {
if (className == null) {
// We have to delay until body upload is completed due to the fact that not all client is
// requesting with "Expect: 100-continue" header and the client library we have cannot handle
// connection close, and yet be able to read response reliably.
// In longer term we should fix the client, as well as the netty-http server. However, since
// this handler will be gone in near future, it's ok to have this workaround.
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Required header 'class-name' is absent.");
return;
}
LOG.debug("Adding module {}, class name: {}", datasetModuleId, className);
String dataFabricDir = cConf.get(Constants.Dataset.Manager.OUTPUT_DIR);
String moduleName = datasetModuleId.getModule();
Location archiveDir = namespaceHomeLocation.append(dataFabricDir).append(moduleName).append(Constants.ARCHIVE_DIR);
String archiveName = moduleName + ".jar";
Location archive = archiveDir.append(archiveName);
// Copy uploaded content to a temporary location
Location tmpLocation = archive.getTempFile(".tmp");
try {
Locations.mkdirsIfNotExists(archiveDir);
LOG.debug("Copy from {} to {}", uploadedFile, tmpLocation);
Files.copy(uploadedFile, Locations.newOutputSupplier(tmpLocation));
// Finally, move archive to final location
LOG.debug("Storing module {} jar at {}", datasetModuleId, archive);
if (tmpLocation.renameTo(archive) == null) {
throw new IOException(String.format("Could not move archive from location: %s, to location: %s", tmpLocation, archive));
}
typeManager.addModule(datasetModuleId, className, archive, forceUpdate);
// todo: response with DatasetModuleMeta of just added module (and log this info)
// Ideally this should have been done before, but we cannot grant privileges on types until they've been
// added to the type MDS. First revoke any orphaned privileges for types left behind by past failed revokes
LOG.info("Added module {}", datasetModuleId);
responder.sendStatus(HttpResponseStatus.OK);
} catch (Exception e) {
// In case copy to temporary file failed, or rename failed
try {
tmpLocation.delete();
} catch (IOException ex) {
LOG.warn("Failed to cleanup temporary location {}", tmpLocation);
}
if (e instanceof DatasetModuleConflictException) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} else {
throw e;
}
}
}
};
}
use of io.cdap.http.HttpResponder in project cdap by caskdata.
the class JsonListResponderTest method testSendForPaginatedListResponder.
@Test
public void testSendForPaginatedListResponder() throws IOException {
HttpResponder responder = Mockito.mock(HttpResponder.class);
ChunkResponder chunkResponder = Mockito.mock(ChunkResponder.class);
Mockito.when(responder.sendChunkStart(HttpResponseStatus.OK)).thenReturn(chunkResponder);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
Mockito.doAnswer(invocation -> {
ByteBuffers.writeToStream(invocation.getArgumentAt(0, ByteBuffer.class), byteArrayOutputStream);
return null;
}).when(chunkResponder).sendChunk(Mockito.any(ByteBuffer.class));
JsonPaginatedListResponder.respond(new Gson(), responder, "applications", (jsonListResponder) -> {
jsonListResponder.send("application");
return "nextToken";
});
JsonParser parser = new JsonParser();
JsonObject json = (JsonObject) parser.parse(byteArrayOutputStream.toString());
Assert.assertEquals(json.get("applications").getAsString(), "application");
Assert.assertEquals(json.get("nextPageToken").getAsString(), "nextToken");
}
use of io.cdap.http.HttpResponder in project cdap by caskdata.
the class AppLifecycleHttpHandler method deployApplication.
private BodyConsumer deployApplication(final HttpResponder responder, final NamespaceId namespace, final String appId, final String archiveName, final String configString, @Nullable final String ownerPrincipal, final boolean updateSchedules) throws IOException {
Id.Namespace idNamespace = Id.Namespace.fromEntityId(namespace);
Location namespaceHomeLocation = namespacePathLocator.get(namespace);
if (!namespaceHomeLocation.exists()) {
String msg = String.format("Home directory %s for namespace %s not found", namespaceHomeLocation, namespace.getNamespace());
LOG.error(msg);
responder.sendString(HttpResponseStatus.NOT_FOUND, msg);
return null;
}
if (archiveName == null || archiveName.isEmpty()) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, String.format("%s header not present. Please include the header and set its value to the jar name.", ARCHIVE_NAME_HEADER), new DefaultHttpHeaders().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE));
return null;
}
// TODO: (CDAP-3258) error handling needs to be refactored here, should be able just to throw the exception,
// but the caller catches all exceptions and responds with a 500
final Id.Artifact artifactId;
try {
artifactId = Id.Artifact.parse(idNamespace, archiveName);
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
return null;
}
KerberosPrincipalId ownerPrincipalId = ownerPrincipal == null ? null : new KerberosPrincipalId(ownerPrincipal);
// Store uploaded content to a local temp file
String namespacesDir = configuration.get(Constants.Namespace.NAMESPACES_DIR);
File localDataDir = new File(configuration.get(Constants.CFG_LOCAL_DATA_DIR));
File namespaceBase = new File(localDataDir, namespacesDir);
File tempDir = new File(new File(namespaceBase, namespace.getNamespace()), configuration.get(Constants.AppFabric.TEMP_DIR)).getAbsoluteFile();
if (!DirUtils.mkdirs(tempDir)) {
throw new IOException("Could not create temporary directory at: " + tempDir);
}
final KerberosPrincipalId finalOwnerPrincipalId = ownerPrincipalId;
return new AbstractBodyConsumer(File.createTempFile("app-", ".jar", tempDir)) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) {
try {
// deploy app
ApplicationWithPrograms app = applicationLifecycleService.deployAppAndArtifact(namespace, appId, artifactId, uploadedFile, configString, finalOwnerPrincipalId, createProgramTerminator(), updateSchedules);
LOG.info("Successfully deployed app {} in namespace {} from artifact {} with configuration {} and " + "principal {}", app.getApplicationId().getApplication(), namespace.getNamespace(), artifactId, configString, finalOwnerPrincipalId);
responder.sendString(HttpResponseStatus.OK, String.format("Successfully deployed app %s", app.getApplicationId().getApplication()));
} catch (InvalidArtifactException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
} catch (ArtifactAlreadyExistsException e) {
responder.sendString(HttpResponseStatus.CONFLICT, String.format("Artifact '%s' already exists. Please use the API that creates an application from an existing artifact. " + "If you are trying to replace the artifact, please delete it and then try again.", artifactId));
} catch (WriteConflictException e) {
// don't really expect this to happen. It means after multiple retries there were still write conflicts.
LOG.warn("Write conflict while trying to add artifact {}.", artifactId, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Write conflict while adding artifact. This can happen if multiple requests to add " + "the same artifact occur simultaneously. Please try again.");
} catch (UnauthorizedException e) {
responder.sendString(HttpResponseStatus.FORBIDDEN, e.getMessage());
} catch (ConflictException e) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} catch (Exception e) {
LOG.error("Deploy failure", e);
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
};
}
Aggregations