use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HDFSConnectionCreator method initializeConnectionParameters.
@Override
protected void initializeConnectionParameters(Connection conn) {
if (!(conn instanceof HDFSConnection)) {
return;
}
HDFSConnection connection = (HDFSConnection) conn;
if (relativeHadoopClusterItem instanceof HadoopClusterConnectionItem) {
HadoopClusterConnection hcConnection = (HadoopClusterConnection) ((HadoopClusterConnectionItem) relativeHadoopClusterItem).getConnection();
String userName = ConnectionContextHelper.getParamValueOffContext(hcConnection, hcConnection.getUserName());
if (userName != null) {
connection.setUserName(userName);
}
}
connection.setRowSeparator(IExtractSchemaService.DEFAULT_ROW_SEPARATOR);
connection.setFieldSeparator(IExtractSchemaService.DEFAULT_FIELD_SEPARATOR);
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HCatalogDragAndDropHandler method isSubValid.
protected boolean isSubValid(Item item, ERepositoryObjectType type, RepositoryNode seletetedNode, IComponent component, String repositoryType) {
boolean isSubValid = true;
HadoopClusterConnection hcConnection = HCRepositoryUtil.getRelativeHadoopClusterConnection((HCatalogConnectionItem) item);
if (hcConnection != null) {
if (EHadoopDistributions.MICROSOFT_HD_INSIGHT.getName().equals(hcConnection.getDistribution())) {
if (component.getName().toUpperCase().contains(HCATALOG)) {
isSubValid = false;
}
}
}
if (type == ERepositoryObjectType.METADATA_CON_TABLE) {
if (component.getName().toUpperCase().endsWith(MAP)) {
isSubValid = false;
}
}
return isSubValid;
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopClusterContextUpdateService method updateContextParameter.
@Override
public boolean updateContextParameter(Connection conn, String oldValue, String newValue) {
boolean isModified = false;
if (conn.isContextMode()) {
if (conn instanceof HadoopClusterConnection) {
HadoopClusterConnection hadoopConn = (HadoopClusterConnection) conn;
if (hadoopConn.getNameNodeURI() != null && hadoopConn.getNameNodeURI().equals(oldValue)) {
hadoopConn.setNameNodeURI(newValue);
isModified = true;
} else if (hadoopConn.getUserName() != null && hadoopConn.getUserName().equals(oldValue)) {
hadoopConn.setUserName(newValue);
isModified = true;
} else if (hadoopConn.getJobTrackerURI() != null && hadoopConn.getJobTrackerURI().equals(oldValue)) {
hadoopConn.setJobTrackerURI(newValue);
isModified = true;
} else if (hadoopConn.getPrincipal() != null && hadoopConn.getPrincipal().equals(oldValue)) {
hadoopConn.setPrincipal(newValue);
isModified = true;
} else if (hadoopConn.getJtOrRmPrincipal() != null && hadoopConn.getJtOrRmPrincipal().equals(oldValue)) {
hadoopConn.setJtOrRmPrincipal(newValue);
isModified = true;
} else if (hadoopConn.getGroup() != null && hadoopConn.getGroup().equals(oldValue)) {
hadoopConn.setGroup(newValue);
isModified = true;
} else if (hadoopConn.getKeytabPrincipal() != null && hadoopConn.getKeytabPrincipal().equals(oldValue)) {
hadoopConn.setKeytabPrincipal(newValue);
isModified = true;
} else if (hadoopConn.getKeytab() != null && hadoopConn.getKeytab().equals(oldValue)) {
hadoopConn.setKeytab(newValue);
isModified = true;
} else if (hadoopConn.getMaprTPassword() != null && hadoopConn.getMaprTPassword().equals(oldValue)) {
hadoopConn.setMaprTPassword(newValue);
isModified = true;
} else if (hadoopConn.getMaprTCluster() != null && hadoopConn.getMaprTCluster().equals(oldValue)) {
hadoopConn.setMaprTCluster(newValue);
isModified = true;
} else if (hadoopConn.getMaprTDuration() != null && hadoopConn.getMaprTDuration().equals(oldValue)) {
hadoopConn.setMaprTDuration(newValue);
isModified = true;
} else if (hadoopConn.getMaprTHomeDir() != null && hadoopConn.getMaprTHomeDir().equals(oldValue)) {
hadoopConn.setMaprTHomeDir(newValue);
isModified = true;
} else if (hadoopConn.getMaprTHadoopLogin() != null && hadoopConn.getMaprTHadoopLogin().equals(oldValue)) {
hadoopConn.setMaprTHadoopLogin(newValue);
isModified = true;
} else if (hadoopConn.getWebHDFSSSLTrustStorePath() != null && hadoopConn.getWebHDFSSSLTrustStorePath().equals(oldValue)) {
hadoopConn.setWebHDFSSSLTrustStorePath(newValue);
isModified = true;
} else if (hadoopConn.getWebHDFSSSLTrustStorePassword() != null && hadoopConn.getWebHDFSSSLTrustStorePassword().equals(oldValue)) {
hadoopConn.setWebHDFSSSLTrustStorePassword(newValue);
isModified = true;
} else {
for (String paramKey : hadoopConn.getParameters().keySet()) {
if (hadoopConn.getParameters().get(paramKey).equals(oldValue)) {
hadoopConn.getParameters().put(paramKey, newValue);
isModified = true;
}
}
List<Map<String, Object>> hadoopProperties = HadoopRepositoryUtil.getHadoopPropertiesList(hadoopConn.getHadoopProperties());
String finalProperties = updateHadoopProperties(hadoopProperties, oldValue, newValue);
if (finalProperties != null) {
hadoopConn.setHadoopProperties(updateHadoopProperties(hadoopProperties, oldValue, newValue));
isModified = true;
}
}
}
}
return isModified;
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopConfsUtils method setConnectionParameters.
public static void setConnectionParameters(HadoopClusterConnectionItem connectionItem, DistributionBean distribution, DistributionVersion distributionVersion, IRetrieveConfsService confsService) throws Exception {
HadoopClusterConnection connection = (HadoopClusterConnection) connectionItem.getConnection();
connection.setUseCustomConfs(confsService != null);
connection.setDistribution(distribution.name);
connection.setDfVersion(distributionVersion.version);
boolean supportYARN = distributionVersion.hadoopComponent != null && (distributionVersion.hadoopComponent.isHadoop2() || distributionVersion.hadoopComponent.isHadoop3());
boolean supportMR1 = distributionVersion.hadoopComponent != null && distributionVersion.hadoopComponent.isHadoop1();
connection.setUseYarn(supportYARN && !supportMR1);
HCRepositoryUtil.fillDefaultValuesOfHadoopCluster(connection);
if (confsService == null) {
return;
}
String namenodeURI = null;
String ns = confsService.getConfValue(EHadoopConfs.HDFS.getName(), EHadoopConfProperties.DFS_NAMESERVICES.getName());
if (StringUtils.isNotEmpty(ns)) {
// $NON-NLS-1$
namenodeURI = "hdfs://" + ns;
} else {
namenodeURI = confsService.getConfValue(EHadoopConfs.HDFS.getName(), EHadoopConfProperties.FS_DEFAULT_URI_NEW.getName());
if (StringUtils.isEmpty(namenodeURI)) {
namenodeURI = confsService.getConfValue(EHadoopConfs.HDFS.getName(), EHadoopConfProperties.FS_DEFAULT_URI.getName());
}
}
if (namenodeURI != null) {
connection.setNameNodeURI(namenodeURI);
}
String yarnHostName = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.YARN_RESOURCEMANAGER_HOSTNAME.getName());
if (StringUtils.isNotEmpty(yarnHostName)) {
connection.getParameters().put(ConnParameterKeys.CONN_PARA_KEY_YARN_HOSTNAME, yarnHostName);
}
String rmOrJt = null;
if (supportYARN) {
String useRmHa = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.YARN_RESOURCEMANAGER_HA_ENABLED.getName());
boolean isUseRmHa = Boolean.valueOf(useRmHa);
if (isUseRmHa) {
String rmIdKey = getRmIdKey(confsService, EHadoopConfProperties.YARN_RESOURCEMANAGER_ADDRESS_RM_ID.getName());
if (StringUtils.isNotEmpty(rmIdKey)) {
rmOrJt = confsService.getConfValue(EHadoopConfs.YARN.getName(), rmIdKey);
}
if (rmOrJt == null) {
rmIdKey = getRmIdKey(confsService, EHadoopConfProperties.YARN_RESOURCEMANAGER_HOSTNAME_RM_ID.getName());
if (StringUtils.isNotEmpty(rmIdKey)) {
rmOrJt = confsService.getConfValue(EHadoopConfs.YARN.getName(), rmIdKey);
}
}
} else {
rmOrJt = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.RESOURCE_MANAGER.getName());
}
} else {
rmOrJt = confsService.getConfValue(EHadoopConfs.MAPREDUCE2.getName(), EHadoopConfProperties.JOB_TRACKER_URI.getName());
}
if (rmOrJt == null && yarnHostName != null) {
rmOrJt = replaceHostName(connection.getJobTrackerURI(), yarnHostName);
}
connection.setJobTrackerURI(rmOrJt);
String rms = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.RESOURCEMANAGER_SCHEDULER.getName());
if (rms == null) {
String rmIdKey = getRmIdKey(confsService, EHadoopConfProperties.RESOURCEMANAGER_SCHEDULER_ADDRESS_RM_ID.getName());
if (StringUtils.isNotEmpty(rmIdKey)) {
rms = confsService.getConfValue(EHadoopConfs.YARN.getName(), rmIdKey);
}
if (yarnHostName != null) {
rms = replaceHostName(connection.getRmScheduler(), yarnHostName);
}
}
connection.setRmScheduler(rms);
String jh = confsService.getConfValue(EHadoopConfs.MAPREDUCE2.getName(), EHadoopConfProperties.JOBHISTORY.getName());
if (StringUtils.isEmpty(jh)) {
jh = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.JOBHISTORY.getName());
}
if (StringUtils.isNotEmpty(jh)) {
connection.setJobHistory(jh);
}
String sd = confsService.getConfValue(EHadoopConfs.MAPREDUCE2.getName(), EHadoopConfProperties.STAGING_DIR.getName());
if (StringUtils.isEmpty(sd)) {
sd = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.STAGING_DIR.getName());
}
if (StringUtils.isNotEmpty(sd)) {
connection.setStagingDirectory(sd);
}
String at = confsService.getConfValue(EHadoopConfs.HDFS.getName(), EHadoopConfProperties.AUTHENTICATION.getName());
if (StringUtils.isNotEmpty(at)) {
// $NON-NLS-1$
connection.setEnableKerberos("kerberos".equals(at));
}
if (connection.isEnableKerberos()) {
String nnp = confsService.getConfValue(EHadoopConfs.HDFS.getName(), EHadoopConfProperties.KERBEROS_PRINCIPAL.getName());
if (StringUtils.isNotEmpty(nnp)) {
connection.setPrincipal(nnp);
}
String rmOrJtPrincipal = null;
if (supportYARN) {
rmOrJtPrincipal = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.RM_PRINCIPAL.getName());
} else {
rmOrJtPrincipal = confsService.getConfValue(EHadoopConfs.MAPREDUCE2.getName(), EHadoopConfProperties.JT_PRINCIPAL.getName());
}
if (rmOrJtPrincipal != null) {
connection.setJtOrRmPrincipal(rmOrJtPrincipal);
}
String jhp = confsService.getConfValue(EHadoopConfs.MAPREDUCE2.getName(), EHadoopConfProperties.JH_PRINCIPAL.getName());
if (StringUtils.isEmpty(jhp)) {
jhp = confsService.getConfValue(EHadoopConfs.YARN.getName(), EHadoopConfProperties.JH_PRINCIPAL.getName());
}
if (StringUtils.isNotEmpty(jhp)) {
connection.setJobHistoryPrincipal(jhp);
}
}
}
use of org.talend.repository.model.hadoopcluster.HadoopClusterConnection in project tbd-studio-se by Talend.
the class HadoopConfsUtils method buildAndDeployConfsJar.
public static void buildAndDeployConfsJar(HadoopClusterConnectionItem connectionItem, String contextGroup, String dir, String jarName) {
try {
File parentFile = new File(getConfsJarTempFolder());
File jarFile = new File(parentFile, jarName);
File rootDir = new File(dir);
JarBuilder jarBuilder = new JarBuilder(rootDir, jarFile);
jarBuilder.setIncludeDir(null);
jarBuilder.setExcludeDir(null);
jarBuilder.buildJar();
byte[] confFileByteArray = FileUtils.readFileToByteArray(jarFile);
HadoopClusterConnection connection = (HadoopClusterConnection) connectionItem.getConnection();
if (connection.isContextMode()) {
if (contextGroup == null) {
// $NON-NLS-1$
throw new Exception("contextGroup cannot be null in context mode!");
}
connection.getConfFiles().put(contextGroup, confFileByteArray);
} else {
connection.setConfFile(confFileByteArray);
}
if (GlobalServiceRegister.getDefault().isServiceRegistered(ILibrariesService.class)) {
ILibrariesService service = GlobalServiceRegister.getDefault().getService(ILibrariesService.class);
if (service != null) {
// Only deploy a new jar, no need to reset all
service.deployLibrary(jarFile.toURI().toURL());
addToDeployedCache(connectionItem, jarName);
}
}
} catch (Exception e) {
ExceptionHandler.process(e);
}
}
Aggregations