use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicHDPMapReduceModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups();
if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
moduleGroups.addAll(moduleGroupsFromSuper);
}
ComponentCondition useAtlas = new SimpleComponentCondition(new BasicExpression(MRConstant.USE_ATLAS));
String atlasSpark1RuntimeId = getPluginAdapter().getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.ATLAS_SPARK_1_MODULE_GROUP.getModuleName());
checkRuntimeId(atlasSpark1RuntimeId);
if (StringUtils.isNotBlank(atlasSpark1RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(atlasSpark1RuntimeId, false, useAtlas));
}
return moduleGroups;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicHDPSparkBatchModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups();
if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
moduleGroups.addAll(moduleGroupsFromSuper);
}
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String spark2RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK2_MODULE_GROUP.getModuleName());
String sparkMRRequiredRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_MRREQUIRED_MODULE_GROUP.getModuleName());
String hdfsRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP.getModuleName());
String hdfsNotSpark16RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_NOT_SPARK_1_6_MODULE_GROUP.getModuleName());
String tezNotSpark16RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.TEZ_NOT_SPARK_1_6_MODULE_GROUP.getModuleName());
String mapReduceRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.MAPREDUCE_MODULE_GROUP.getModuleName());
String atlasSpark1RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.ATLAS_SPARK_1_MODULE_GROUP.getModuleName());
String atlasSpark2RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.ATLAS_SPARK_2_MODULE_GROUP.getModuleName());
String sqoopRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SQOOP_MODULE_GROUP.getModuleName());
String sqoopParquetRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SQOOP_PARQUET_MODULE_GROUP.getModuleName());
String hBaseRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HBASE_MODULE_GROUP.getModuleName());
String sparkS3RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_S3_MRREQUIRED_MODULE_GROUP.getModuleName());
checkRuntimeId(spark2RuntimeId);
checkRuntimeId(sparkMRRequiredRuntimeId);
checkRuntimeId(hdfsRuntimeId);
checkRuntimeId(hdfsNotSpark16RuntimeId);
checkRuntimeId(tezNotSpark16RuntimeId);
checkRuntimeId(mapReduceRuntimeId);
checkRuntimeId(atlasSpark1RuntimeId);
checkRuntimeId(atlasSpark2RuntimeId);
checkRuntimeId(sqoopRuntimeId);
checkRuntimeId(sqoopParquetRuntimeId);
checkRuntimeId(hBaseRuntimeId);
checkRuntimeId(sparkS3RuntimeId);
ComponentCondition useAtlas = new SimpleComponentCondition(new BasicExpression(MRConstant.USE_ATLAS));
ComponentCondition atlasSpark1x = new MultiComponentCondition(useAtlas, BooleanOperator.AND, conditionSpark1);
ComponentCondition atlasSpark2x = new MultiComponentCondition(useAtlas, BooleanOperator.AND, conditionSpark2);
if (StringUtils.isNotBlank(sparkMRRequiredRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkMRRequiredRuntimeId, true, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(sparkMRRequiredRuntimeId, true, conditionSpark2));
}
if (StringUtils.isNotBlank(hdfsRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsRuntimeId, false, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(hdfsRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(hdfsNotSpark16RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsNotSpark16RuntimeId, false, conditionNotSpark16));
}
if (StringUtils.isNotBlank(tezNotSpark16RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(tezNotSpark16RuntimeId, false, conditionNotSpark16));
}
if (StringUtils.isNotBlank(mapReduceRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(mapReduceRuntimeId, false, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(mapReduceRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(atlasSpark1RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(atlasSpark1RuntimeId, true, atlasSpark1x));
}
if (StringUtils.isNotBlank(atlasSpark2RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(atlasSpark2RuntimeId, true, atlasSpark2x));
}
if (StringUtils.isNotBlank(sqoopRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sqoopRuntimeId, false, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(sqoopRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(sqoopParquetRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sqoopParquetRuntimeId, false, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(sqoopParquetRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(hBaseRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hBaseRuntimeId, true, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(hBaseRuntimeId, true, conditionSpark2));
}
if (StringUtils.isNotBlank(sparkS3RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkS3RuntimeId, true));
}
return moduleGroups;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicHDPWebHDFSModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups(String distribution, String version) throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups(distribution, version);
if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
moduleGroups.addAll(moduleGroupsFromSuper);
}
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String webHdfsRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.WEBHDFS_MODULE_GROUP.getModuleName());
checkRuntimeId(webHdfsRuntimeId);
if (StringUtils.isNotBlank(webHdfsRuntimeId)) {
HDFSLinkedNodeCondition hdfsLinkedNodeCondition = new HDFSLinkedNodeCondition(distribution, version);
moduleGroups.add(new DistributionModuleGroup(webHdfsRuntimeId, true, hdfsLinkedNodeCondition.getWebHDFSCondition()));
}
return moduleGroups;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicSparkBatchModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String sparkRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_MODULE_GROUP.getModuleName());
String spark2RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK2_MODULE_GROUP.getModuleName());
checkRuntimeId(sparkRuntimeId);
checkRuntimeId(spark2RuntimeId);
if (StringUtils.isNotBlank(sparkRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkRuntimeId, false, conditionSpark1));
}
if (StringUtils.isNotBlank(spark2RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(spark2RuntimeId, false, conditionSpark2));
}
return moduleGroups;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicCDHSparkBatchModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups();
if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
moduleGroups.addAll(moduleGroupsFromSuper);
}
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String sparkMrRequiredRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_MRREQUIRED_MODULE_GROUP.getModuleName());
String hdfsSpark1_6RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_SPARK1_6.getModuleName());
String hdfsSpark2_1RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_SPARK2_1.getModuleName());
String hdfsCommonRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_COMMON.getModuleName());
String mrRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.MAPREDUCE_MODULE_GROUP.getModuleName());
String talendClouderaNaviRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicCDHModuleGroupConstant.TALEND_CLOUDERA_CDH_NAVIGATOR.getModuleName());
checkRuntimeId(sparkMrRequiredRuntimeId);
checkRuntimeId(hdfsSpark1_6RuntimeId);
checkRuntimeId(hdfsSpark2_1RuntimeId);
checkRuntimeId(hdfsCommonRuntimeId);
checkRuntimeId(mrRuntimeId);
checkRuntimeId(talendClouderaNaviRuntimeId);
if (StringUtils.isNotBlank(sparkMrRequiredRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkMrRequiredRuntimeId, true, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(sparkMrRequiredRuntimeId, true, conditionSpark2));
}
if (StringUtils.isNotBlank(hdfsSpark1_6RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsSpark1_6RuntimeId, false, conditionSpark1));
}
if (StringUtils.isNotBlank(hdfsSpark2_1RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsSpark2_1RuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(hdfsCommonRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsCommonRuntimeId, false, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(hdfsCommonRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(mrRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(mrRuntimeId, false, conditionSpark1));
moduleGroups.add(new DistributionModuleGroup(mrRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(talendClouderaNaviRuntimeId)) {
ComponentCondition conditionUseNavigator = new SimpleComponentCondition(new BasicExpression(SparkBatchConstant.USE_CLOUDERA_NAVIGATOR));
moduleGroups.add(new DistributionModuleGroup(talendClouderaNaviRuntimeId, true, conditionUseNavigator));
}
return moduleGroups;
}
Aggregations