use of io.hops.hopsworks.exceptions.ElasticException in project hopsworks by logicalclocks.
the class ElasticClientController method mngIndexCreate.
public CreateIndexResponse mngIndexCreate(CreateIndexRequest request) throws ElasticException {
if (request.index().length() > 255) {
String msg = "elastic index name is too long:" + request.index();
throw new ElasticException(RESTCodes.ElasticErrorCode.ELASTIC_QUERY_ERROR, Level.INFO, msg);
}
if (!request.index().equals(request.index().toLowerCase())) {
String msg = "elastic index names can only contain lower case:" + request.index();
throw new ElasticException(RESTCodes.ElasticErrorCode.ELASTIC_QUERY_ERROR, Level.INFO, msg);
}
FailableSupplier<CreateIndexResponse> query = () -> client.getClient().indices().create(request, RequestOptions.DEFAULT);
CreateIndexResponse response = executeElasticQuery(query, "elastic index create", request.toString());
if (response.isAcknowledged()) {
return response;
} else {
String msg = "elastic index:" + request.index() + "creation could not be acknowledged";
throw new ElasticException(RESTCodes.ElasticErrorCode.ELASTIC_QUERY_ERROR, Level.INFO, msg);
}
}
use of io.hops.hopsworks.exceptions.ElasticException in project hopsworks by logicalclocks.
the class ElasticClientController method clearScrollingContext.
ClearScrollResponse clearScrollingContext(String scrollId) throws ElasticException {
ClearScrollRequest request = new ClearScrollRequest();
request.addScrollId(scrollId);
FailableSupplier<ClearScrollResponse> query = () -> client.getClient().clearScroll(request, RequestOptions.DEFAULT);
ClearScrollResponse response = executeElasticQuery(query, "elastic scrolling search", request.toString());
if (response.status().getStatus() != 200) {
String msg = "scroll context clearing query - bad status response:" + response.status().getStatus();
throw new ElasticException(RESTCodes.ElasticErrorCode.ELASTIC_QUERY_ERROR, Level.INFO, msg);
}
return response;
}
use of io.hops.hopsworks.exceptions.ElasticException in project hopsworks by logicalclocks.
the class ElasticClient method getElasticIps.
private HttpHost[] getElasticIps() throws ElasticException {
boolean isHTTPS = settings.isElasticHTTPSEnabled();
List<String> addrs = settings.getElasticIps();
HttpHost[] hosts = new HttpHost[addrs.size()];
int index = 0;
for (String addr : addrs) {
// Validate the ip address pulled from the variables
if (!Ip.validIp(addr)) {
try {
InetAddress.getByName(addr);
} catch (UnknownHostException e) {
throw new ElasticException(RESTCodes.ElasticErrorCode.ELASTIC_CONNECTION_ERROR, Level.INFO, "Error while parsing elasticsearch ips", e.getMessage(), e);
}
}
hosts[index] = new HttpHost(addr, settings.getElasticRESTPort(), isHTTPS ? "https" : "http");
index++;
}
return hosts;
}
use of io.hops.hopsworks.exceptions.ElasticException in project hopsworks by logicalclocks.
the class ProjectController method createProject.
/**
* Creates a new project(project), the related DIR, the different services in
* the project, and the master of the
* project.
* <p>
* This needs to be an atomic operation (all or nothing) REQUIRES_NEW will
* make sure a new transaction is created even
* if this method is called from within a transaction.
*
* @param projectDTO
* @param owner
* @param sessionId
* @return
*/
public Project createProject(ProjectDTO projectDTO, Users owner, String sessionId) throws DatasetException, GenericException, KafkaException, ProjectException, UserException, HopsSecurityException, ServiceException, FeaturestoreException, ElasticException, SchemaException, IOException {
Long startTime = System.currentTimeMillis();
// check that the project name is ok
String projectName = projectDTO.getProjectName();
FolderNameValidator.isValidProjectName(projectUtils, projectName);
List<ProjectServiceEnum> projectServices = new ArrayList<>();
if (projectDTO.getServices() != null) {
for (String s : projectDTO.getServices()) {
ProjectServiceEnum se = ProjectServiceEnum.valueOf(s.toUpperCase());
projectServices.add(se);
}
}
LOGGER.log(Level.FINE, () -> "PROJECT CREATION TIME. Step 1: " + (System.currentTimeMillis() - startTime));
DistributedFileSystemOps dfso = null;
Project project = null;
try {
dfso = dfs.getDfsOps();
/*
* create a project in the database
* if the creation go through it means that there is no other project with
* the same name.
* this project creation act like a lock, no other project can be created
* with the same name
* until this project is removed from the database
*/
try {
project = createProject(projectName, owner, projectDTO.getDescription(), dfso);
} catch (EJBException ex) {
LOGGER.log(Level.WARNING, null, ex);
Path dummy = new Path("/tmp/" + projectName);
try {
dfso.rm(dummy, true);
} catch (IOException e) {
LOGGER.log(Level.SEVERE, null, e);
}
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_EXISTS, Level.SEVERE, "project: " + projectName, ex.getMessage(), ex);
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 2 (hdfs): {0}", System.currentTimeMillis() - startTime);
verifyProject(project, dfso, sessionId);
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 3 (verify): {0}", System.currentTimeMillis() - startTime);
// Run the handlers.
try {
ProjectHandler.runProjectPreCreateHandlers(projectHandlers, project);
} catch (ProjectException ex) {
cleanup(project, sessionId, null, true, owner);
throw ex;
}
List<Future<?>> projectCreationFutures = new ArrayList<>();
// This is an async call
try {
projectCreationFutures.add(certificatesController.generateCertificates(project, owner));
} catch (Exception ex) {
cleanup(project, sessionId, projectCreationFutures, true, owner);
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERT_CREATION_ERROR, Level.SEVERE, "project: " + project.getName() + "owner: " + owner.getUsername(), ex.getMessage(), ex);
}
String username = hdfsUsersController.getHdfsUserName(project, owner);
if (username == null || username.isEmpty()) {
cleanup(project, sessionId, projectCreationFutures, true, owner);
throw new UserException(RESTCodes.UserErrorCode.USER_WAS_NOT_FOUND, Level.SEVERE, "project: " + project.getName() + "owner: " + owner.getUsername());
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 4 (certs): {0}", System.currentTimeMillis() - startTime);
// all the verifications have passed, we can now create the project
// create the project folder
ProvTypeDTO provType = settings.getProvType().dto;
try {
mkProjectDIR(projectName, dfso);
fsProvController.updateProjectProvType(project, provType, dfso);
} catch (IOException | EJBException | ProvenanceException ex) {
cleanup(project, sessionId, projectCreationFutures, true, owner);
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_FOLDER_NOT_CREATED, Level.SEVERE, "project: " + projectName, ex.getMessage(), ex);
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 5 (folders): {0}", System.currentTimeMillis() - startTime);
// update the project with the project folder inode
try {
setProjectInode(project, dfso);
} catch (IOException | EJBException ex) {
cleanup(project, sessionId, projectCreationFutures, true, owner);
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_INODE_CREATION_ERROR, Level.SEVERE, "project: " + projectName, ex.getMessage(), ex);
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 6 (inodes): {0}", System.currentTimeMillis() - startTime);
// set payment and quotas
try {
setProjectOwnerAndQuotas(project, dfso, owner);
} catch (IOException | EJBException ex) {
cleanup(project, sessionId, projectCreationFutures, true, owner);
throw new ProjectException(RESTCodes.ProjectErrorCode.QUOTA_ERROR, Level.SEVERE, "project: " + project.getName(), ex.getMessage(), ex);
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 7 (quotas): {0}", System.currentTimeMillis() - startTime);
try {
hdfsUsersController.addProjectFolderOwner(project, dfso);
createProjectLogResources(owner, project, dfso);
} catch (IOException | EJBException ex) {
cleanup(project, sessionId, projectCreationFutures);
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_SET_PERMISSIONS_ERROR, Level.SEVERE, "project: " + projectName, ex.getMessage(), ex);
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 8 (logs): {0}", System.currentTimeMillis() - startTime);
// inconsistencies
try {
elasticController.deleteProjectIndices(project);
elasticController.deleteProjectSavedObjects(projectName);
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 9 (elastic cleanup): {0}", System.currentTimeMillis() - startTime);
} catch (ElasticException ex) {
LOGGER.log(Level.FINE, "Error while cleaning old project indices", ex);
}
logProject(project, OperationType.Add);
// enable services
for (ProjectServiceEnum service : projectServices) {
try {
projectCreationFutures.addAll(addService(project, service, owner, dfso, provType));
} catch (RESTException | IOException ex) {
cleanup(project, sessionId, projectCreationFutures);
throw ex;
}
}
try {
for (Future f : projectCreationFutures) {
if (f != null) {
f.get();
}
}
} catch (InterruptedException | ExecutionException ex) {
LOGGER.log(Level.SEVERE, "Error while waiting for the certificate generation thread to finish. Will try to " + "cleanup...", ex);
cleanup(project, sessionId, projectCreationFutures);
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERT_CREATION_ERROR, Level.SEVERE);
}
// Run the handlers.
try {
ProjectHandler.runProjectPostCreateHandlers(projectHandlers, project);
} catch (ProjectException ex) {
cleanup(project, sessionId, projectCreationFutures);
throw ex;
}
try {
project = environmentController.createEnv(project, owner);
} catch (PythonException | EJBException ex) {
cleanup(project, sessionId, projectCreationFutures);
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_ANACONDA_ENABLE_ERROR, Level.SEVERE, "project: " + projectName, ex.getMessage(), ex);
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 10 (env): {0}", System.currentTimeMillis() - startTime);
return project;
} finally {
if (dfso != null) {
dfso.close();
}
LOGGER.log(Level.FINE, "PROJECT CREATION TIME. Step 11 (close): {0}", System.currentTimeMillis() - startTime);
}
}
use of io.hops.hopsworks.exceptions.ElasticException in project hopsworks by logicalclocks.
the class ProjectController method removeProjectInt.
private void removeProjectInt(Project project, List<HdfsUsers> usersToClean, List<HdfsGroups> groupsToClean, List<Future<?>> projectCreationFutures, boolean decreaseCreatedProj, Users owner) throws IOException, InterruptedException, HopsSecurityException, ServiceException, ProjectException, GenericException, TensorBoardException, FeaturestoreException {
DistributedFileSystemOps dfso = null;
try {
dfso = dfs.getDfsOps();
// Run custom handlers for project deletion
ProjectHandler.runProjectPreDeleteHandlers(projectHandlers, project);
// log removal to notify elastic search
logProject(project, OperationType.Delete);
// change the owner and group of the project folder to hdfs super user
Path location = new Path(Utils.getProjectPath(project.getName()));
changeOwnershipToSuperuser(location, dfso);
Path dumy = new Path("/tmp/" + project.getName());
changeOwnershipToSuperuser(dumy, dfso);
// remove kafka topics
removeKafkaTopics(project);
// projectCreationFutures will be null during project deletion.
if (projectCreationFutures != null) {
for (Future f : projectCreationFutures) {
if (f != null) {
try {
f.get();
} catch (ExecutionException ex) {
LOGGER.log(Level.SEVERE, "Error while waiting for ProjectCreationFutures to finish for Project " + project.getName(), ex);
}
}
}
}
try {
certificatesController.revokeProjectCertificates(project, owner);
} catch (HopsSecurityException ex) {
if (ex.getErrorCode() != RESTCodes.SecurityErrorCode.CERTIFICATE_NOT_FOUND) {
LOGGER.log(Level.SEVERE, "Could not delete certificates during cleanup for project " + project.getName() + ". Manual cleanup is needed!!!", ex);
throw ex;
}
} catch (IOException | GenericException ex) {
LOGGER.log(Level.SEVERE, "Could not delete certificates during cleanup for project " + project.getName() + ". Manual cleanup is needed!!!", ex);
throw ex;
}
// remove running tensorboards
removeTensorBoard(project);
// remove jupyter
removeJupyter(project);
removeProjectRelatedFiles(usersToClean, dfso);
// remove quota
removeQuotas(project);
// change owner for files in shared datasets
fixSharedDatasets(project, dfso);
// Delete online featurestore database
onlineFeaturestoreController.removeOnlineFeatureStore(project);
// Delete Hive database - will automatically cleanup all the Hive's metadata
hiveController.dropDatabases(project, dfso, false);
try {
// Delete elasticsearch template for this project
removeElasticsearch(project);
} catch (ElasticException ex) {
LOGGER.log(Level.WARNING, "Failure while removing elasticsearch indices", ex);
}
// delete project group and users
removeGroupAndUsers(groupsToClean, usersToClean);
// remove dumy Inode
dfso.rm(dumy, true);
// Remove servings
try {
servingController.deleteAll(project);
} catch (ServingException e) {
throw new IOException(e);
}
// Remove Airflow DAGs from local filesystem,
// JWT renewal monitors and materialized X.509
airflowManager.onProjectRemoval(project);
// remove folder
removeProjectFolder(project.getName(), dfso);
if (decreaseCreatedProj) {
usersController.decrementNumProjectsCreated(project.getOwner().getUid());
}
usersController.decrementNumActiveProjects(project.getOwner().getUid());
// Run custom handlers for project deletion
ProjectHandler.runProjectPostDeleteHandlers(projectHandlers, project);
LOGGER.log(Level.INFO, "{0} - project removed.", project.getName());
} finally {
if (dfso != null) {
dfso.close();
}
}
}
Aggregations