use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicSparkStreamingModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> hs = new HashSet<>();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String sparkRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_MODULE_GROUP.getModuleName());
String spark2RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK2_MODULE_GROUP.getModuleName());
checkRuntimeId(sparkRuntimeId);
checkRuntimeId(spark2RuntimeId);
if (StringUtils.isNotBlank(sparkRuntimeId)) {
hs.add(new DistributionModuleGroup(sparkRuntimeId, false, spark1Condition));
}
if (StringUtils.isNotBlank(spark2RuntimeId)) {
hs.add(new DistributionModuleGroup(spark2RuntimeId, false, spark2Condition));
}
return hs;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicWebHDFSModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups(String distribution, String version) throws Exception {
HDFSLinkedNodeCondition hdfsLinkedNodeCondition = new HDFSLinkedNodeCondition(distribution, version);
Set<DistributionModuleGroup> hs = new HashSet<>();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String runtimeADLSId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_AZURE_MRREQUIRED_MODULE_GROUP.getModuleName());
checkRuntimeId(runtimeADLSId);
if (StringUtils.isNotBlank(runtimeADLSId)) {
DistributionModuleGroup dmgADLS = new DistributionModuleGroup(runtimeADLSId, true, hdfsLinkedNodeCondition.getAdlsCondition());
hs.add(dmgADLS);
}
return hs;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicCDPHiveOnSparkModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String sparkHiveRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_HIVE_MRREQUIRED_MODULE_GROUP.getModuleName());
checkRuntimeId(sparkHiveRuntimeId);
if (StringUtils.isNotBlank(sparkHiveRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkHiveRuntimeId, true));
}
return moduleGroups;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicCDPSparkBatchModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups();
if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
moduleGroups.addAll(moduleGroupsFromSuper);
}
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String sparkMrRequiredRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_MRREQUIRED_MODULE_GROUP.getModuleName());
String hdfsSpark2_1RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_SPARK2_1.getModuleName());
String hdfsCommonRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_COMMON.getModuleName());
String mrRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.MAPREDUCE_MODULE_GROUP.getModuleName());
String talendClouderaNaviRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicCDPModuleGroupConstant.TALEND_CLOUDERA_CDP_NAVIGATOR.getModuleName());
checkRuntimeId(sparkMrRequiredRuntimeId);
checkRuntimeId(hdfsSpark2_1RuntimeId);
checkRuntimeId(hdfsCommonRuntimeId);
checkRuntimeId(mrRuntimeId);
checkRuntimeId(talendClouderaNaviRuntimeId);
if (StringUtils.isNotBlank(sparkMrRequiredRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkMrRequiredRuntimeId, true, conditionSpark2));
}
if (StringUtils.isNotBlank(hdfsSpark2_1RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsSpark2_1RuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(hdfsCommonRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsCommonRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(mrRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(mrRuntimeId, false, conditionSpark2));
}
if (StringUtils.isNotBlank(talendClouderaNaviRuntimeId)) {
ComponentCondition conditionUseNavigator = new SimpleComponentCondition(new BasicExpression(SparkBatchConstant.USE_CLOUDERA_NAVIGATOR));
moduleGroups.add(new DistributionModuleGroup(talendClouderaNaviRuntimeId, true, conditionUseNavigator));
}
return moduleGroups;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicHDPSparkStreamingModuleGroup method getModuleGroups.
@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups();
if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
moduleGroups.addAll(moduleGroupsFromSuper);
}
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
String hdfsRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP.getModuleName());
String mapreduceRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.MAPREDUCE_MODULE_GROUP.getModuleName());
String sparkS3RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_S3_MRREQUIRED_MODULE_GROUP.getModuleName());
checkRuntimeId(hdfsRuntimeId);
checkRuntimeId(mapreduceRuntimeId);
checkRuntimeId(sparkS3RuntimeId);
if (StringUtils.isNotBlank(hdfsRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(hdfsRuntimeId, false, spark1Condition));
moduleGroups.add(new DistributionModuleGroup(hdfsRuntimeId, false, spark2Condition));
}
if (StringUtils.isNotBlank(mapreduceRuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(mapreduceRuntimeId, false, spark1Condition));
moduleGroups.add(new DistributionModuleGroup(mapreduceRuntimeId, false, spark2Condition));
}
if (StringUtils.isNotBlank(sparkS3RuntimeId)) {
moduleGroups.add(new DistributionModuleGroup(sparkS3RuntimeId, true));
}
return moduleGroups;
}
Aggregations