use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopClusterService method getHadoopCustomLibraries.
/*
* (non-Javadoc)
*
* @see org.talend.core.hadoop.IHadoopClusterService#getHadoopCustomLibraries()
*/
@Override
public Map<String, String> getHadoopCustomLibraries(String clusterId) {
Map<String, String> customLibraries = new HashMap<>();
HadoopClusterConnection hadoopClusterConnection = HCRepositoryUtil.getRelativeHadoopClusterConnection(clusterId);
if (hadoopClusterConnection != null) {
EMap<String, String> parameters = hadoopClusterConnection.getParameters();
for (String key : parameters.keySet()) {
customLibraries.put(key, parameters.get(key));
}
}
return customLibraries;
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopClusterService method copyHadoopCluster.
@Override
public void copyHadoopCluster(final Item sourceItem, final IPath path, String newName) throws PersistenceException, BusinessException {
if (isHadoopClusterItem(sourceItem)) {
IProxyRepositoryFactory factory = ProxyRepositoryFactory.getInstance();
HadoopClusterConnectionItem sourceClusterItem = (HadoopClusterConnectionItem) sourceItem;
HadoopClusterConnectionItem targetClusterItem = null;
if (StringUtils.isNotBlank(newName)) {
targetClusterItem = (HadoopClusterConnectionItem) factory.copy(sourceClusterItem, path, newName);
} else {
targetClusterItem = (HadoopClusterConnectionItem) factory.copy(sourceClusterItem, path, true);
}
HadoopClusterConnection targetClusterConnection = (HadoopClusterConnection) targetClusterItem.getConnection();
targetClusterConnection.getConnectionList().clear();
String targetClusterId = targetClusterItem.getProperty().getId();
Set<Item> sourceSubitems = HCRepositoryUtil.getSubitemsOfHadoopCluster(sourceClusterItem);
for (Item subitem : sourceSubitems) {
Item newSubitem = factory.copy(subitem, path, true);
if (newSubitem instanceof HadoopSubConnectionItem) {
((HadoopSubConnection) ((HadoopSubConnectionItem) newSubitem).getConnection()).setRelativeHadoopClusterId(targetClusterId);
targetClusterConnection.getConnectionList().add(newSubitem.getProperty().getId());
} else if (subitem instanceof DatabaseConnectionItem) {
((DatabaseConnection) ((DatabaseConnectionItem) newSubitem).getConnection()).getParameters().put(ConnParameterKeys.CONN_PARA_KEY_HADOOP_CLUSTER_ID, targetClusterId);
}
factory.save(newSubitem);
}
factory.save(targetClusterItem);
}
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class AdaptDeprecatedHadoopVersionsMigrationTask method execute.
@Override
public ExecutionResult execute(Item item) {
if (item instanceof HadoopClusterConnectionItem) {
DistributionBean[] distributions = HadoopDistributionsHelper.HADOOP.getDistributions();
if (distributions != null && distributions.length > 0) {
boolean modified = false;
HadoopClusterConnectionItem hcItem = (HadoopClusterConnectionItem) item;
HadoopClusterConnection hcConnection = (HadoopClusterConnection) hcItem.getConnection();
DistributionBean distributionBean = HadoopDistributionsHelper.HADOOP.getDistribution(hcConnection.getDistribution(), false);
if (distributionBean == null) {
distributionBean = distributions[0];
hcConnection.setDistribution(distributionBean.getName());
modified = true;
}
String version = hcConnection.getDfVersion();
DistributionVersion distributionVersion = distributionBean.getVersion(version, false);
if (distributionVersion == null) {
DistributionVersion[] versions = distributionBean.getVersions();
if (versions != null && versions.length > 0) {
hcConnection.setDfVersion(versions[0].getVersion());
modified = true;
}
}
if (modified) {
try {
ProxyRepositoryFactory.getInstance().save(hcItem, true);
return ExecutionResult.SUCCESS_NO_ALERT;
} catch (PersistenceException e) {
ExceptionHandler.process(e);
return ExecutionResult.FAILURE;
}
}
}
}
return ExecutionResult.NOTHING_TO_DO;
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopClusterContextHandler method matchContextForAttribues.
@Override
protected void matchContextForAttribues(Connection conn, IConnParamName paramName, String hadoopVariableName) {
HadoopClusterConnection hadoopConn = (HadoopClusterConnection) conn;
EHadoopParamName hadoopParam = (EHadoopParamName) paramName;
switch(hadoopParam) {
case NameNodeUri:
hadoopConn.setNameNodeURI(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case JobTrackerUri:
hadoopConn.setJobTrackerURI(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case ResourceManager:
hadoopConn.setJobTrackerURI(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case ResourceManagerScheduler:
hadoopConn.setRmScheduler(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case JobHistory:
hadoopConn.setJobHistory(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case StagingDirectory:
hadoopConn.setStagingDirectory(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case NameNodePrin:
hadoopConn.setPrincipal(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case JTOrRMPrin:
hadoopConn.setJtOrRmPrincipal(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case JobHistroyPrin:
hadoopConn.setJobHistoryPrincipal(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case User:
hadoopConn.setUserName(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case Group:
hadoopConn.setGroup(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case Principal:
hadoopConn.setKeytabPrincipal(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KeyTab:
hadoopConn.setKeytab(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case WebHostName:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_WEB_HCAT_HOSTNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case WebPort:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_WEB_HCAT_PORT, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case WebUser:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_WEB_HCAT_USERNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case WebJobResFolder:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_WEB_HCAT_JOB_RESULT_FOLDER, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case HDIUser:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_HDI_USERNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case HDIPassword:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_HDI_PASSWORD, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KeyAzureHost:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_AZURE_HOSTNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KeyAzureContainer:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_AZURE_CONTAINER, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KeyAzuresUser:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_AZURE_USERNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KeyAzurePassword:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_AZURE_PASSWORD, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KeyAzureDeployBlob:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_AZURE_DEPLOY_BLOB, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseHostName:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_HOST, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseAuthToken:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_AUTH_TOKEN, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseSparkPools:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_SPARK_POOLS, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseFsHostName:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_FS_HOSTNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseFsContainer:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_FS_CONTAINER, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseFsUserName:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_FS_USERNAME, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseFsPassword:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_FS_PASSWORD, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseDeployBlob:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SYNAPSE_DEPLOY_BLOB, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseDriverMemory:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DRIVER_MEMORY, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseDriverCores:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DRIVER_CORES, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SynapseExecutorMemory:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_EXECUTOR_MEMORY, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case ClouderaNavigatorUsername:
hadoopConn.setClouderaNaviUserName(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case ClouderaNavigatorPassword:
hadoopConn.setClouderaNaviPassword(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case ClouderaNavigatorUrl:
hadoopConn.setClouderaNaviUrl(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case ClouderaNavigatorMetadataUrl:
hadoopConn.setClouderaNaviMetadataUrl(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case maprTPassword:
hadoopConn.setMaprTPassword(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case maprTCluster:
hadoopConn.setMaprTCluster(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case maprTDuration:
hadoopConn.setMaprTDuration(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case maprTHomeDir:
hadoopConn.setMaprTHomeDir(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case maprTHadoopLogin:
hadoopConn.setMaprTHadoopLogin(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case GoogleProjectId:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_GOOGLE_PROJECT_ID, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case GoogleClusterId:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_GOOGLE_CLUSTER_ID, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case GoogleRegion:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_GOOGLE_REGION, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case GoogleJarsBucket:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_GOOGLE_JARS_BUCKET, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case PathToGoogleCredentials:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_PATH_TO_GOOGLE_CREDENTIALS, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case DataBricksEndpoint:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DATABRICKS_ENDPOINT, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case DataBricksCloudProvider:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DATABRICKS_CLOUD_PROVIDER, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case DatabricksRunMode:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DATABRICKS_RUN_MODE, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case DataBricksClusterId:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DATABRICKS_CLUSTER_ID, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case DataBricksToken:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DATABRICKS_TOKEN, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case DataBricksDBFSDepFolder:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_DATABRICKS_DBFS_DEP_FOLDER, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case WebHDFSSSLTrustStorePath:
hadoopConn.setWebHDFSSSLTrustStorePath(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case WebHDFSSSLTrustStorePassword:
hadoopConn.setWebHDFSSSLTrustStorePassword(ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case setHadoopConf:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SET_HADOOP_CONF, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case hadoopConfSpecificJar:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_HADOOP_CONF_SPECIFIC_JAR, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case UseKnox:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_USE_KNOX, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case SparkMode:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_SPARK_MODE, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KnoxUrl:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_KNOX_URL, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KnoxUsername:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_KNOX_USER, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KnoxPassword:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_KNOX_PASSWORD, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
case KnoxDirectory:
hadoopConn.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_KNOX_DIRECTORY, ContextParameterUtils.getNewScriptCode(hadoopVariableName, LANGUAGE));
break;
default:
}
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopClusterContextHandler method setPropertiesForExistContextMode.
@Override
public void setPropertiesForExistContextMode(Connection connection, Set<IConnParamName> paramSet, Map<ContextItem, List<ConectionAdaptContextVariableModel>> adaptMap) {
if (connection == null) {
return;
}
if (connection instanceof HadoopClusterConnection) {
HadoopClusterConnection hadoopConn = (HadoopClusterConnection) connection;
ContextItem currentContext = null;
for (IConnParamName param : paramSet) {
if (param instanceof EHadoopParamName) {
String hadoopVariableName = null;
EHadoopParamName hadoopParam = (EHadoopParamName) param;
if (adaptMap != null && adaptMap.size() > 0) {
for (Map.Entry<ContextItem, List<ConectionAdaptContextVariableModel>> entry : adaptMap.entrySet()) {
currentContext = entry.getKey();
List<ConectionAdaptContextVariableModel> modelList = entry.getValue();
for (ConectionAdaptContextVariableModel model : modelList) {
if (model.getValue().equals(hadoopParam.name())) {
hadoopVariableName = model.getName();
break;
}
}
}
}
if (hadoopVariableName != null) {
hadoopVariableName = getCorrectVariableName(currentContext, hadoopVariableName, hadoopParam);
matchContextForAttribues(hadoopConn, hadoopParam, hadoopVariableName);
}
}
}
matchAdditionProperties(hadoopConn, adaptMap);
}
}
Aggregations