use of io.cdap.cdap.common.http.AbstractBodyConsumer in project cdap by caskdata.
the class ArtifactHttpHandler method addArtifact.
@POST
@Path("/namespaces/{namespace-id}/artifacts/{artifact-name}")
@AuditPolicy(AuditDetail.HEADERS)
public BodyConsumer addArtifact(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") final String namespaceId, @PathParam("artifact-name") final String artifactName, @HeaderParam(VERSION_HEADER) final String artifactVersion, @HeaderParam(EXTENDS_HEADER) final String parentArtifactsStr, @HeaderParam(PLUGINS_HEADER) String pluginClasses) throws NamespaceNotFoundException, BadRequestException {
final NamespaceId namespace = validateAndGetNamespace(namespaceId);
// that processes the last http chunk.
if (artifactVersion != null && !artifactVersion.isEmpty()) {
ArtifactId artifactId = validateAndGetArtifactId(namespace, artifactName, artifactVersion);
// If the artifact ID is available, use it to perform an authorization check.
contextAccessEnforcer.enforce(artifactId, StandardPermission.CREATE);
} else {
// If there is no version, we perform an enforceOnParent check in which the entityID is not needed.
contextAccessEnforcer.enforceOnParent(EntityType.ARTIFACT, namespace, StandardPermission.CREATE);
}
final Set<ArtifactRange> parentArtifacts = parseExtendsHeader(namespace, parentArtifactsStr);
final Set<PluginClass> additionalPluginClasses;
if (pluginClasses == null || pluginClasses.isEmpty()) {
additionalPluginClasses = ImmutableSet.of();
} else {
try {
additionalPluginClasses = GSON.fromJson(pluginClasses, PLUGINS_TYPE);
additionalPluginClasses.forEach(PluginClass::validate);
} catch (JsonParseException e) {
throw new BadRequestException(String.format("%s header '%s' is invalid.", PLUGINS_HEADER, pluginClasses), e);
} catch (IllegalArgumentException e) {
throw new BadRequestException(String.format("Invalid PluginClasses '%s'.", pluginClasses), e);
}
}
try {
// copy the artifact contents to local tmp directory
Files.createDirectories(tmpDir.toPath());
File destination = File.createTempFile("artifact-", ".jar", tmpDir);
return new AbstractBodyConsumer(destination) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) {
try {
String version = (artifactVersion == null || artifactVersion.isEmpty()) ? getBundleVersion(uploadedFile) : artifactVersion;
ArtifactId artifactId = validateAndGetArtifactId(namespace, artifactName, version);
// add the artifact to the repo
artifactRepository.addArtifact(Id.Artifact.fromEntityId(artifactId), uploadedFile, parentArtifacts, additionalPluginClasses);
responder.sendString(HttpResponseStatus.OK, "Artifact added successfully");
} catch (ArtifactRangeNotFoundException e) {
responder.sendString(HttpResponseStatus.NOT_FOUND, e.getMessage());
} catch (ArtifactAlreadyExistsException e) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} catch (WriteConflictException e) {
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Conflict while writing artifact, please try again.");
} catch (IOException e) {
LOG.error("Exception while trying to write artifact {}-{}-{}.", namespaceId, artifactName, artifactVersion, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Error performing IO while writing artifact.");
} catch (BadRequestException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
} catch (UnauthorizedException e) {
responder.sendString(HttpResponseStatus.FORBIDDEN, e.getMessage());
} catch (Exception e) {
LOG.error("Error while writing artifact {}-{}-{}", namespaceId, artifactName, artifactVersion, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Error while adding artifact.");
}
}
private String getBundleVersion(File file) throws BadRequestException, IOException {
try (JarFile jarFile = new JarFile(file)) {
Manifest manifest = jarFile.getManifest();
if (manifest == null) {
throw new BadRequestException("Unable to derive version from artifact because it does not contain a manifest. " + "Please package the jar with a manifest, or explicitly specify the artifact version.");
}
Attributes attributes = manifest.getMainAttributes();
String version = attributes == null ? null : attributes.getValue(ManifestFields.BUNDLE_VERSION);
if (version == null) {
throw new BadRequestException("Unable to derive version from artifact because manifest does not contain Bundle-Version attribute. " + "Please include Bundle-Version in the manifest, or explicitly specify the artifact version.");
}
return version;
} catch (ZipException e) {
throw new BadRequestException("Artifact is not in zip format. Please make sure it is a jar file.");
}
}
};
} catch (IOException e) {
LOG.error("Exception creating temp file to place artifact {} contents", artifactName, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Server error creating temp file for artifact.");
return null;
}
}
use of io.cdap.cdap.common.http.AbstractBodyConsumer in project cdap by caskdata.
the class AppLifecycleHttpHandler method deployAppFromArtifact.
// normally we wouldn't want to use a body consumer but would just want to read the request body directly
// since it wont be big. But the deploy app API has one path with different behavior based on content type
// the other behavior requires a BodyConsumer and only have one method per path is allowed,
// so we have to use a BodyConsumer
private BodyConsumer deployAppFromArtifact(final ApplicationId appId) throws IOException {
// Perform auth checks outside BodyConsumer as only the first http request containing auth header
// to populate SecurityRequestContext while http chunk doesn't. BodyConsumer runs in the thread
// that processes the last http chunk.
accessEnforcer.enforce(appId, authenticationContext.getPrincipal(), StandardPermission.CREATE);
// createTempFile() needs a prefix of at least 3 characters
return new AbstractBodyConsumer(File.createTempFile("apprequest-" + appId, ".json", tmpDir)) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) {
try (FileReader fileReader = new FileReader(uploadedFile)) {
AppRequest<?> appRequest = DECODE_GSON.fromJson(fileReader, AppRequest.class);
ArtifactSummary artifactSummary = appRequest.getArtifact();
KerberosPrincipalId ownerPrincipalId = appRequest.getOwnerPrincipal() == null ? null : new KerberosPrincipalId(appRequest.getOwnerPrincipal());
// if we don't null check, it gets serialized to "null"
Object config = appRequest.getConfig();
String configString = config == null ? null : config instanceof String ? (String) config : GSON.toJson(config);
try {
applicationLifecycleService.deployApp(appId.getParent(), appId.getApplication(), appId.getVersion(), artifactSummary, configString, createProgramTerminator(), ownerPrincipalId, appRequest.canUpdateSchedules(), false, Collections.emptyMap());
} catch (DatasetManagementException e) {
if (e.getCause() instanceof UnauthorizedException) {
throw (UnauthorizedException) e.getCause();
} else {
throw e;
}
}
responder.sendString(HttpResponseStatus.OK, "Deploy Complete");
} catch (ArtifactNotFoundException e) {
responder.sendString(HttpResponseStatus.NOT_FOUND, e.getMessage());
} catch (ConflictException e) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} catch (UnauthorizedException e) {
responder.sendString(HttpResponseStatus.FORBIDDEN, e.getMessage());
} catch (InvalidArtifactException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
} catch (IOException e) {
LOG.error("Error reading request body for creating app {}.", appId);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, String.format("Error while reading json request body for app %s.", appId));
} catch (Exception e) {
LOG.error("Deploy failure", e);
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
};
}
use of io.cdap.cdap.common.http.AbstractBodyConsumer in project cdap by caskdata.
the class DefaultDatasetTypeService method createModuleConsumer.
private AbstractBodyConsumer createModuleConsumer(final DatasetModuleId datasetModuleId, final String className, final boolean forceUpdate) throws IOException, NotFoundException {
final NamespaceId namespaceId = datasetModuleId.getParent();
final Location namespaceHomeLocation;
try {
namespaceHomeLocation = impersonator.doAs(namespaceId, new Callable<Location>() {
@Override
public Location call() throws Exception {
return namespacePathLocator.get(namespaceId);
}
});
} catch (Exception e) {
// the only checked exception that the callable throws is IOException
Throwables.propagateIfInstanceOf(e, IOException.class);
throw Throwables.propagate(e);
}
// verify namespace directory exists
if (!namespaceHomeLocation.exists()) {
String msg = String.format("Home directory %s for namespace %s not found", namespaceHomeLocation, namespaceId);
LOG.debug(msg);
throw new NotFoundException(msg);
}
// Store uploaded content to a local temp file
String namespacesDir = cConf.get(Constants.Namespace.NAMESPACES_DIR);
File localDataDir = new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR));
File namespaceBase = new File(localDataDir, namespacesDir);
File tempDir = new File(new File(namespaceBase, datasetModuleId.getNamespace()), cConf.get(Constants.AppFabric.TEMP_DIR)).getAbsoluteFile();
if (!DirUtils.mkdirs(tempDir)) {
throw new IOException("Could not create temporary directory at: " + tempDir);
}
return new AbstractBodyConsumer(File.createTempFile("dataset-", ".jar", tempDir)) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) throws Exception {
if (className == null) {
// We have to delay until body upload is completed due to the fact that not all client is
// requesting with "Expect: 100-continue" header and the client library we have cannot handle
// connection close, and yet be able to read response reliably.
// In longer term we should fix the client, as well as the netty-http server. However, since
// this handler will be gone in near future, it's ok to have this workaround.
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Required header 'class-name' is absent.");
return;
}
LOG.debug("Adding module {}, class name: {}", datasetModuleId, className);
String dataFabricDir = cConf.get(Constants.Dataset.Manager.OUTPUT_DIR);
String moduleName = datasetModuleId.getModule();
Location archiveDir = namespaceHomeLocation.append(dataFabricDir).append(moduleName).append(Constants.ARCHIVE_DIR);
String archiveName = moduleName + ".jar";
Location archive = archiveDir.append(archiveName);
// Copy uploaded content to a temporary location
Location tmpLocation = archive.getTempFile(".tmp");
try {
Locations.mkdirsIfNotExists(archiveDir);
LOG.debug("Copy from {} to {}", uploadedFile, tmpLocation);
Files.copy(uploadedFile, Locations.newOutputSupplier(tmpLocation));
// Finally, move archive to final location
LOG.debug("Storing module {} jar at {}", datasetModuleId, archive);
if (tmpLocation.renameTo(archive) == null) {
throw new IOException(String.format("Could not move archive from location: %s, to location: %s", tmpLocation, archive));
}
typeManager.addModule(datasetModuleId, className, archive, forceUpdate);
// todo: response with DatasetModuleMeta of just added module (and log this info)
// Ideally this should have been done before, but we cannot grant privileges on types until they've been
// added to the type MDS. First revoke any orphaned privileges for types left behind by past failed revokes
LOG.info("Added module {}", datasetModuleId);
responder.sendStatus(HttpResponseStatus.OK);
} catch (Exception e) {
// In case copy to temporary file failed, or rename failed
try {
tmpLocation.delete();
} catch (IOException ex) {
LOG.warn("Failed to cleanup temporary location {}", tmpLocation);
}
if (e instanceof DatasetModuleConflictException) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} else {
throw e;
}
}
}
};
}
use of io.cdap.cdap.common.http.AbstractBodyConsumer in project cdap by caskdata.
the class AppLifecycleHttpHandler method deployApplication.
private BodyConsumer deployApplication(final HttpResponder responder, final NamespaceId namespace, final String appId, final String archiveName, final String configString, @Nullable final String ownerPrincipal, final boolean updateSchedules) throws IOException {
Id.Namespace idNamespace = Id.Namespace.fromEntityId(namespace);
Location namespaceHomeLocation = namespacePathLocator.get(namespace);
if (!namespaceHomeLocation.exists()) {
String msg = String.format("Home directory %s for namespace %s not found", namespaceHomeLocation, namespace.getNamespace());
LOG.error(msg);
responder.sendString(HttpResponseStatus.NOT_FOUND, msg);
return null;
}
if (archiveName == null || archiveName.isEmpty()) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, String.format("%s header not present. Please include the header and set its value to the jar name.", ARCHIVE_NAME_HEADER), new DefaultHttpHeaders().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE));
return null;
}
// TODO: (CDAP-3258) error handling needs to be refactored here, should be able just to throw the exception,
// but the caller catches all exceptions and responds with a 500
final Id.Artifact artifactId;
try {
artifactId = Id.Artifact.parse(idNamespace, archiveName);
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
return null;
}
KerberosPrincipalId ownerPrincipalId = ownerPrincipal == null ? null : new KerberosPrincipalId(ownerPrincipal);
// Store uploaded content to a local temp file
String namespacesDir = configuration.get(Constants.Namespace.NAMESPACES_DIR);
File localDataDir = new File(configuration.get(Constants.CFG_LOCAL_DATA_DIR));
File namespaceBase = new File(localDataDir, namespacesDir);
File tempDir = new File(new File(namespaceBase, namespace.getNamespace()), configuration.get(Constants.AppFabric.TEMP_DIR)).getAbsoluteFile();
if (!DirUtils.mkdirs(tempDir)) {
throw new IOException("Could not create temporary directory at: " + tempDir);
}
final KerberosPrincipalId finalOwnerPrincipalId = ownerPrincipalId;
return new AbstractBodyConsumer(File.createTempFile("app-", ".jar", tempDir)) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) {
try {
// deploy app
ApplicationWithPrograms app = applicationLifecycleService.deployAppAndArtifact(namespace, appId, artifactId, uploadedFile, configString, finalOwnerPrincipalId, createProgramTerminator(), updateSchedules);
LOG.info("Successfully deployed app {} in namespace {} from artifact {} with configuration {} and " + "principal {}", app.getApplicationId().getApplication(), namespace.getNamespace(), artifactId, configString, finalOwnerPrincipalId);
responder.sendString(HttpResponseStatus.OK, String.format("Successfully deployed app %s", app.getApplicationId().getApplication()));
} catch (InvalidArtifactException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
} catch (ArtifactAlreadyExistsException e) {
responder.sendString(HttpResponseStatus.CONFLICT, String.format("Artifact '%s' already exists. Please use the API that creates an application from an existing artifact. " + "If you are trying to replace the artifact, please delete it and then try again.", artifactId));
} catch (WriteConflictException e) {
// don't really expect this to happen. It means after multiple retries there were still write conflicts.
LOG.warn("Write conflict while trying to add artifact {}.", artifactId, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Write conflict while adding artifact. This can happen if multiple requests to add " + "the same artifact occur simultaneously. Please try again.");
} catch (UnauthorizedException e) {
responder.sendString(HttpResponseStatus.FORBIDDEN, e.getMessage());
} catch (ConflictException e) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} catch (Exception e) {
LOG.error("Deploy failure", e);
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
};
}
Aggregations