use of com.logicalclocks.servicediscoverclient.exceptions.ServiceDiscoveryException in project hopsworks by logicalclocks.
the class FeaturegroupController method convertFeaturegrouptoDTO.
/**
* Convert a featuregroup entity to a DTO representation
*
* @param featuregroup the entity to convert
* @return a DTO representation of the entity
*/
private FeaturegroupDTO convertFeaturegrouptoDTO(Featuregroup featuregroup, Project project, Users user) throws FeaturestoreException, ServiceException {
String featurestoreName = featurestoreFacade.getHiveDbName(featuregroup.getFeaturestore().getHiveDbId());
switch(featuregroup.getFeaturegroupType()) {
case CACHED_FEATURE_GROUP:
CachedFeaturegroupDTO cachedFeaturegroupDTO = cachedFeaturegroupController.convertCachedFeaturegroupToDTO(featuregroup, project, user);
cachedFeaturegroupDTO.setFeaturestoreName(featurestoreName);
return cachedFeaturegroupDTO;
case ON_DEMAND_FEATURE_GROUP:
FeaturestoreStorageConnectorDTO storageConnectorDTO = connectorController.convertToConnectorDTO(user, project, featuregroup.getOnDemandFeaturegroup().getFeaturestoreConnector());
OnDemandFeaturegroupDTO onDemandFeaturegroupDTO = new OnDemandFeaturegroupDTO(featurestoreName, featuregroup, storageConnectorDTO);
try {
String path = getFeatureGroupLocation(featuregroup);
String location = featurestoreUtils.prependNameNode(path);
onDemandFeaturegroupDTO.setLocation(location);
} catch (ServiceDiscoveryException e) {
throw new ServiceException(RESTCodes.ServiceErrorCode.SERVICE_NOT_FOUND, Level.SEVERE);
}
return onDemandFeaturegroupDTO;
default:
throw new IllegalArgumentException(RESTCodes.FeaturestoreErrorCode.ILLEGAL_FEATUREGROUP_TYPE.getMessage() + ", Recognized Feature group types are: " + FeaturegroupType.ON_DEMAND_FEATURE_GROUP + ", and: " + FeaturegroupType.CACHED_FEATURE_GROUP + ". The provided feature group type was not recognized: " + featuregroup.getFeaturegroupType());
}
}
use of com.logicalclocks.servicediscoverclient.exceptions.ServiceDiscoveryException in project hopsworks by logicalclocks.
the class OfflineFeatureGroupController method openMetastoreClient.
// Here we can't use the HiveMetaStoreClient.java wrapper as we would need to export environment variables and so on
// instead we assemble directly the thirft client, which is what the HiveMetaStoreClient does behind the scenes.
private ThriftHiveMetastore.Client openMetastoreClient(Project project, Users user) throws ServiceException, IOException {
String hdfsUsername = hdfsUsersController.getHdfsUserName(project, user);
ThriftHiveMetastore.Client client = null;
try {
certificateMaterializer.materializeCertificatesLocal(user.getUsername(), project.getName());
CertificateMaterializer.CryptoMaterial userMaterial = certificateMaterializer.getUserMaterial(user.getUsername(), project.getName());
// read Password
String password = String.copyValueOf(userMaterial.getPassword());
// Get metastore service information from consul
Service metastoreService = serviceDiscoveryController.getAnyAddressOfServiceWithDNS(ServiceDiscoveryController.HopsworksService.HIVE_METASTORE);
TTransport transport;
if (settings.getHopsRpcTls()) {
// Setup secure connection with the Hive metastore.
TSSLTransportFactory.TSSLTransportParameters params = new TSSLTransportFactory.TSSLTransportParameters();
params.setTrustStore(certificateMaterializer.getUserTransientTruststorePath(project, user), password);
params.setKeyStore(certificateMaterializer.getUserTransientKeystorePath(project, user), password);
transport = TSSLTransportFactory.getClientSocket(metastoreService.getAddress(), metastoreService.getPort(), CONNECTION_TIMEOUT, params);
} else {
transport = new TSocket(TConfiguration.DEFAULT, metastoreService.getAddress(), metastoreService.getPort(), CONNECTION_TIMEOUT);
}
TProtocol protocol = new TBinaryProtocol(transport);
client = new ThriftHiveMetastore.Client(protocol);
// Open transport
if (!transport.isOpen()) {
transport.open();
}
// Set the UGI on the metastore side
client.set_ugi(hdfsUsername, new ArrayList<>());
if (settings.getHopsRpcTls()) {
// Send the certificate to the metastore so it can operate with the fs.
client.set_crypto(userMaterial.getKeyStore(), password, userMaterial.getTrustStore(), password, false);
}
} catch (CryptoPasswordNotFoundException | ServiceDiscoveryException | TException e) {
throw new ServiceException(RESTCodes.ServiceErrorCode.METASTORE_CONNECTION_ERROR, Level.SEVERE, "Hive metastore connection error", e.getMessage(), e);
}
return client;
}
use of com.logicalclocks.servicediscoverclient.exceptions.ServiceDiscoveryException in project hopsworks by logicalclocks.
the class EnvironmentController method updateInstalledDependencies.
public Project updateInstalledDependencies(Project project) throws ServiceException, IOException {
try {
String condaListOutput = libraryController.condaList(projectUtils.getFullDockerImageName(project, false));
Collection<PythonDep> projectDeps = libraryController.parseCondaList(condaListOutput);
projectDeps = libraryController.persistAndMarkImmutable(projectDeps);
project = libraryController.syncProjectPythonDepsWithEnv(project, projectDeps);
project = libraryController.addOngoingOperations(project);
return project;
} catch (ServiceDiscoveryException e) {
throw new ServiceException(RESTCodes.ServiceErrorCode.SERVICE_DISCOVERY_ERROR, Level.SEVERE, null, e.getMessage(), e);
}
}
use of com.logicalclocks.servicediscoverclient.exceptions.ServiceDiscoveryException in project hopsworks by logicalclocks.
the class HttpClient method execute.
public <T> T execute(HttpRequest request, ResponseHandler<T> handler) throws IOException {
if (host == null) {
try {
Service hopsworksService = serviceDiscoveryController.getAnyAddressOfServiceWithDNS(ServiceDiscoveryController.HopsworksService.HOPSWORKS_APP);
host = new HttpHost(hopsworksService.getName(), hopsworksService.getPort(), "HTTPS");
} catch (ServiceDiscoveryException ex) {
throw new IOException(ex);
}
}
return client.execute(host, request, handler);
}
use of com.logicalclocks.servicediscoverclient.exceptions.ServiceDiscoveryException in project hopsworks by logicalclocks.
the class JupyterConfigFilesGenerator method createSparkMagicConfig.
public void createSparkMagicConfig(Writer out, Project project, JupyterSettings js, String hdfsUser, Users hopsworksUser, String confDirPath) throws IOException, ServiceDiscoveryException, JobException, ApiKeyException {
SparkJobConfiguration sparkJobConfiguration = (SparkJobConfiguration) js.getJobConfig();
// If user selected Python we should use the default spark configuration for Spark/PySpark kernels
if (js.isPythonKernel()) {
sparkJobConfiguration = (SparkJobConfiguration) jobController.getConfiguration(project, JobType.SPARK, true);
}
SparkConfigurationUtil sparkConfigurationUtil = new SparkConfigurationUtil();
Map<String, String> extraJavaOptions = new HashMap<>();
extraJavaOptions.put(Settings.LOGSTASH_JOB_INFO, project.getName().toLowerCase() + ",jupyter,notebook,?");
HashMap<String, String> finalSparkConfiguration = new HashMap<>();
finalSparkConfiguration.put(Settings.SPARK_DRIVER_STAGINGDIR_ENV, "hdfs:///Projects/" + project.getName() + "/Resources/.sparkStaging");
// Set Hopsworks consul service domain, don't use the address, use the name
String hopsworksRestEndpoint = "https://" + serviceDiscoveryController.constructServiceFQDNWithPort(ServiceDiscoveryController.HopsworksService.HOPSWORKS_APP);
finalSparkConfiguration.putAll(sparkConfigurationUtil.setFrameworkProperties(project, sparkJobConfiguration, settings, hdfsUser, hopsworksUser, extraJavaOptions, kafkaBrokers.getKafkaBrokersString(), hopsworksRestEndpoint, servingConfig, serviceDiscoveryController));
StringBuilder sparkConfBuilder = new StringBuilder();
ArrayList<String> keys = new ArrayList<>(finalSparkConfiguration.keySet());
Collections.sort(keys);
for (String configKey : keys) {
sparkConfBuilder.append("\t\"" + configKey + "\":\"" + finalSparkConfiguration.get(configKey) + "\"," + "\n");
}
sparkConfBuilder.deleteCharAt(sparkConfBuilder.lastIndexOf(","));
try {
Service livyService = serviceDiscoveryController.getAnyAddressOfServiceWithDNS(ServiceDiscoveryController.HopsworksService.LIVY);
SparkMagicConfigTemplateBuilder templateBuilder = SparkMagicConfigTemplateBuilder.newBuilder().setLivyIp(livyService.getAddress()).setJupyterHome(confDirPath).setDriverCores(Integer.parseInt(finalSparkConfiguration.get(Settings.SPARK_DRIVER_CORES_ENV))).setDriverMemory(finalSparkConfiguration.get(Settings.SPARK_DRIVER_MEMORY_ENV)).setLivyStartupTimeout(settings.getLivyStartupTimeout());
if (sparkJobConfiguration.isDynamicAllocationEnabled() || sparkJobConfiguration.getExperimentType() != null) {
templateBuilder.setNumExecutors(1);
} else {
templateBuilder.setNumExecutors(Integer.parseInt(finalSparkConfiguration.get(Settings.SPARK_NUMBER_EXECUTORS_ENV)));
}
templateBuilder.setExecutorCores(Integer.parseInt(finalSparkConfiguration.get(Settings.SPARK_EXECUTOR_CORES_ENV))).setExecutorMemory(finalSparkConfiguration.get(Settings.SPARK_EXECUTOR_MEMORY_ENV)).setHdfsUser(hdfsUser).setYarnQueue(sparkJobConfiguration.getAmQueue()).setHadoopHome(settings.getHadoopSymbolicLinkDir()).setHadoopVersion(settings.getHadoopVersion()).setSparkConfiguration(sparkConfBuilder.toString());
Map<String, Object> dataModel = new HashMap<>(1);
dataModel.put("conf", templateBuilder.build());
templateEngine.template(SparkMagicConfigTemplate.TEMPLATE_NAME, dataModel, out);
} catch (TemplateException | ServiceDiscoveryException ex) {
throw new IOException(ex);
}
}
Aggregations