use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class DistributionsManager method removeDistribution.
private void removeDistribution(BundleContext bc, Map<String, DistributionBean> distributionsMap, ComponentType type, ServiceReference<? extends HadoopComponent> sr) {
HadoopComponent hc = bc.getService(sr);
final String distribution = hc.getDistribution();
final String distributionName = hc.getDistributionName();
String key = getKey(hc);
DistributionBean distributionBean = distributionsMap.get(key);
if (distributionBean == null) {
// not exsit, no need to remove
return;
} else {
// check the name and displayName
if (!distribution.equals(distributionBean.name) || !distributionName.equals(distributionBean.displayName)) {
// $NON-NLS-1$
CommonExceptionHandler.warn(" There are different distribution name for " + distributionBean);
// return;
}
}
clearCache();
final String version = hc.getVersion();
DistributionVersion versionBean = null;
if (hc instanceof AbstractDynamicDistributionTemplate) {
AbstractDynamicDistributionTemplate dynamicDistribution = (AbstractDynamicDistributionTemplate) hc;
versionBean = new DynamicDistributionVersion(dynamicDistribution, distributionBean, type, version, hc.getVersionName(type));
} else {
versionBean = new DistributionVersion(hc, distributionBean, version, hc.getVersionName(type));
}
// special condition for current version
versionBean.displayCondition = hc.getDisplayCondition(type);
distributionBean.removeVersion(versionBean);
distributionBean.setDefaultVersion(null);
DistributionVersion[] versions = distributionBean.getVersions();
if (versions == null || versions.length <= 0) {
distributionsMap.remove(key);
}
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class DistributionsManager method addDistribution.
private void addDistribution(BundleContext bc, Map<String, DistributionBean> disctributionsMap, ComponentType type, ServiceReference<? extends HadoopComponent> sr) {
HadoopComponent hc = bc.getService(sr);
if (hc != null && hc.isActivated()) {
final String distribution = hc.getDistribution();
final String distributionName = hc.getDistributionName();
String key = getKey(hc);
DistributionBean distributionBean = disctributionsMap.get(key);
if (distributionBean == null) {
distributionBean = new DistributionBean(type, distribution, distributionName, ((AbstractDistribution) hc).isSparkLocal());
if (ISparkDistribution.DISTRIBUTION_NAME.equals(distribution)) {
distributionBean.setHadoopFSVersions(((AbstractDistribution) hc).getSupportedHadoopFSVersion());
distributionBean.setHiveVersions(((AbstractDistribution) hc).getSupportedHiveVersion());
distributionBean.sethBaseVersions(((AbstractDistribution) hc).getSupportedHBaseVersion());
distributionBean.sethCatalogVersions(((AbstractDistribution) hc).getSupportedHCatalogVersion());
}
disctributionsMap.put(key, distributionBean);
} else {
// check the name and displayName
if (!distribution.equals(distributionBean.name) || !distributionName.equals(distributionBean.displayName)) {
// $NON-NLS-1$
CommonExceptionHandler.warn(" There are different distribution name for " + distributionBean);
return;
}
}
clearCache();
DistributionVersion versionBean = null;
final String version = hc.getVersion();
if (hc instanceof AbstractDynamicDistributionTemplate) {
AbstractDynamicDistributionTemplate dynamicDistribution = (AbstractDynamicDistributionTemplate) hc;
versionBean = new DynamicDistributionVersion(dynamicDistribution, distributionBean, type, version, hc.getVersionName(type));
} else {
versionBean = new DistributionVersion(hc, distributionBean, version, hc.getVersionName(type));
}
versionBean.addModuleGroups(hc.getModuleGroups(type));
// special condition for current version
versionBean.displayCondition = hc.getDisplayCondition(type);
distributionBean.addVersion(versionBean);
// add all version conditions ?
distributionBean.addCondition(hc.getDisplayCondition(type));
}
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class HadoopDistributionsHelper method buildDistribution.
/**
* Builds a {@link HadoopComponent} distribution.
*
* @param pDistribution The name of the distribution
* @param pVersion The name of the version
* @return an implementation of {@link HadoopComponent}.
* @throws Exception
*/
public static HadoopComponent buildDistribution(String pDistribution, String pVersion) throws Exception {
final BundleContext bc = FrameworkUtil.getBundle(DistributionFactory.class).getBundleContext();
// find hadoop components
if (hadoopDistributions == null) {
synchronized (HadoopDistributionsHelper.class) {
if (hadoopDistributions == null) {
try {
hadoopDistributions = bc.getServiceReferences(HadoopComponent.class, null);
} catch (InvalidSyntaxException e) {
CommonExceptionHandler.process(e);
}
}
}
}
for (ServiceReference<HadoopComponent> sr : hadoopDistributions) {
HadoopComponent np = bc.getService(sr);
String thatDistribution = np.getDistribution();
if (Constant.DISTRIBUTION_CUSTOM.equals(thatDistribution) && thatDistribution.equals(pDistribution)) {
return np;
}
if (thatDistribution != null && thatDistribution.equals(pDistribution)) {
String thatVersion = np.getVersion();
if (thatVersion != null && thatVersion.equals(pVersion)) {
return np;
}
}
}
// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
throw new Exception("The distribution " + pDistribution + " with the version " + pVersion + " doesn't exist.");
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class DBR550DistributionTest method testDatabricksDistribution.
@Test
public void testDatabricksDistribution() throws Exception {
HadoopComponent distribution = new DBR550Distribution();
assertNotNull(distribution.getDistributionName());
assertNotNull(distribution.getVersionName(null));
assertTrue(distribution.doSupportS3());
assertEquals(DBR550Distribution.DISTRIBUTION_NAME, distribution.getDistribution());
assertEquals(DBR550Distribution.VERSION, distribution.getVersion());
assertEquals(EHadoopVersion.HADOOP_2, distribution.getHadoopVersion());
assertFalse(distribution.doSupportKerberos());
assertTrue(distribution.doSupportUseDatanodeHostname());
assertFalse(distribution.doSupportGroup());
assertTrue(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_4_X));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_3));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_2));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_0));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_6));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_5));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_4));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_3));
assertTrue(((SparkBatchComponent) distribution).doSupportDynamicMemoryAllocation());
assertFalse(((SparkBatchComponent) distribution).isExecutedThroughSparkJobServer());
assertTrue(((SparkBatchComponent) distribution).doSupportSparkStandaloneMode());
assertFalse(((SparkBatchComponent) distribution).doSupportSparkYarnClientMode());
assertFalse(((SparkBatchComponent) distribution).doSupportSparkYarnClusterMode());
assertTrue(((SparkStreamingComponent) distribution).doSupportBackpressure());
assertTrue(((SparkStreamingComponent) distribution).doSupportCheckpointing());
assertTrue(distribution.doSupportCreateServiceConnection());
assertTrue((distribution.getNecessaryServiceName() == null ? 0 : distribution.getNecessaryServiceName().size()) == 0);
assertTrue(distribution.doSupportAzureDataLakeStorage());
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class DBR640DistributionTest method testDatabricksDistribution.
@Test
public void testDatabricksDistribution() throws Exception {
HadoopComponent distribution = new DBR640Distribution();
assertNotNull(distribution.getDistributionName());
assertNotNull(distribution.getVersionName(null));
assertTrue(distribution.doSupportS3());
assertEquals(DBR640Distribution.DISTRIBUTION_NAME, distribution.getDistribution());
assertEquals(DBR640Distribution.VERSION, distribution.getVersion());
assertEquals(EHadoopVersion.HADOOP_2, distribution.getHadoopVersion());
assertFalse(distribution.doSupportKerberos());
assertTrue(distribution.doSupportUseDatanodeHostname());
assertFalse(distribution.doSupportGroup());
assertTrue(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_4_X));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_3));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_2));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_0));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_6));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_5));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_4));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_3));
assertTrue(((SparkBatchComponent) distribution).doSupportDynamicMemoryAllocation());
assertFalse(((SparkBatchComponent) distribution).isExecutedThroughSparkJobServer());
assertTrue(((SparkBatchComponent) distribution).doSupportSparkStandaloneMode());
assertFalse(((SparkBatchComponent) distribution).doSupportSparkYarnClientMode());
assertTrue(((SparkStreamingComponent) distribution).doSupportBackpressure());
assertTrue(((SparkStreamingComponent) distribution).doSupportCheckpointing());
assertTrue(distribution.doSupportCreateServiceConnection());
assertTrue((distribution.getNecessaryServiceName() == null ? 0 : distribution.getNecessaryServiceName().size()) == 0);
assertTrue(distribution.doSupportAzureDataLakeStorage());
}
Aggregations