use of org.talend.hadoop.distribution.condition.ComponentCondition in project tbd-studio-se by Talend.
the class ModuleGroupsUtils method getModuleGroups.
/**
* Utility method to create the collection of {@link DistributionModuleGroup} with a condition made of a
* {@link SparkBatchLinkedNodeCondition} and an additional raw condition
*
* @param distribution the distribution key
* @param version the version key
* @param condition a nullable additional condition
* @param moduleName the module name
* @param mrRequired if the module group is mrRequired
* @return a set of {@link DistributionModuleGroup}
*/
public static Set<DistributionModuleGroup> getModuleGroups(String distribution, String version, ComponentCondition condition, String moduleName, boolean mrRequired) {
Set<DistributionModuleGroup> hs = new HashSet<>();
ComponentCondition distribCondition = new SparkBatchLinkedNodeCondition(distribution, version, SparkBatchConstant.SPARK_BATCH_SPARKCONFIGURATION_LINKEDPARAMETER).getCondition();
ComponentCondition cc = null;
if (condition != null) {
cc = new MultiComponentCondition(condition, BooleanOperator.AND, distribCondition);
} else {
cc = distribCondition;
}
DistributionModuleGroup dmg = new DistributionModuleGroup(moduleName, mrRequired, cc);
hs.add(dmg);
return hs;
}
use of org.talend.hadoop.distribution.condition.ComponentCondition in project tbd-studio-se by Talend.
the class AbstractDistribution method buildModuleGroups.
protected Map<ComponentType, Set<DistributionModuleGroup>> buildModuleGroups() {
Map<ComponentType, Set<DistributionModuleGroup>> result = new HashMap<>();
// HCatalog
result.put(ComponentType.HCATALOG, ModuleGroupsUtils.getModuleGroups(null, ModuleGroupName.HDFS.get(this.getVersion()), false));
// HDFS
result.put(ComponentType.HDFS, ModuleGroupsUtils.getModuleGroups(null, ModuleGroupName.HDFS.get(this.getVersion()), false));
// Hbase
result.put(ComponentType.HBASE, ModuleGroupsUtils.getModuleGroups(null, ModuleGroupName.HBASE.get(this.getVersion()), true));
// Hive
ComponentCondition hiveOnHbaseCondition = new MultiComponentCondition(new SimpleComponentCondition(new BasicExpression(//
HiveConstant.HIVE_CONFIGURATION_COMPONENT_HBASEPARAMETER)), //
BooleanOperator.AND, new SimpleComponentCondition(new ShowExpression(HiveConstant.HIVE_CONFIGURATION_COMPONENT_HBASEPARAMETER)));
Set<DistributionModuleGroup> hiveModuleGroups = new HashSet<>();
hiveModuleGroups.addAll(ModuleGroupsUtils.getModuleGroups(null, ModuleGroupName.HIVE.get(this.getVersion()), false));
hiveModuleGroups.addAll(ModuleGroupsUtils.getModuleGroups(hiveOnHbaseCondition, ModuleGroupName.HBASE.get(this.getVersion()), false));
result.put(ComponentType.HIVE, hiveModuleGroups);
// Hive on Spark
result.put(ComponentType.HIVEONSPARK, ModuleGroupsUtils.getModuleGroups((ComponentCondition) null, ModuleGroupName.HIVE.get(this.getVersion()), true));
// Sqoop
result.put(ComponentType.SQOOP, SqoopModuleGroup.getModuleGroups(this.getVersion()));
result.put(ComponentType.SPARKBATCH, ModuleGroupsUtils.getModuleGroups(null, ModuleGroupName.SPARK_BATCH.get(this.getVersion()), true));
result.put(ComponentType.SPARKSTREAMING, ModuleGroupsUtils.getModuleGroups(null, ModuleGroupName.SPARK_STREAMING.get(this.getVersion()), true));
return result;
}
use of org.talend.hadoop.distribution.condition.ComponentCondition in project tbd-studio-se by Talend.
the class DistributionVersionModule method getModuleRequiredIf.
public ComponentCondition getModuleRequiredIf() {
ComponentCondition condition;
// The import is needed only if the good version and the good distribution are selected, and
// if the Distribution is shown. The second condition to take the
// USE_EXISTING_CONNECTIOn into account.
final ComponentType componentType = distributionVersion.distribution.componentType;
Expression distributionSelected = new BasicExpression(componentType.getDistributionParameter(), EqualityOperator.EQ, distributionVersion.distribution.name);
Expression distributionVersionSelected = new BasicExpression(componentType.getVersionParameter(), EqualityOperator.EQ, distributionVersion.version);
Expression distributionShown = new ShowExpression(componentType.getDistributionParameter());
condition = new MultiComponentCondition(new SimpleComponentCondition(distributionSelected), BooleanOperator.AND, new MultiComponentCondition(new SimpleComponentCondition(distributionVersionSelected), BooleanOperator.AND, new SimpleComponentCondition(distributionShown)));
if (moduleGroup.getRequiredIf() != null) {
condition = new MultiComponentCondition(condition, BooleanOperator.AND, new NestedComponentCondition(moduleGroup.getRequiredIf()));
}
return condition;
}
use of org.talend.hadoop.distribution.condition.ComponentCondition in project tbd-studio-se by Talend.
the class ComponentConditionUtil method generateSparkVersionShowIfConditions.
/**
* Generates the "SHOW_IF" condition for the "SUPPORTED_SPARK_VERSION" drop down list. Given a map of Spark versions
* and corresponding supported hadoop versions, it builds a {@link ComponentCondition} for each entry in the map.
*
* @param supportedSparkVersions the map of Spark versions
* @return an array of a String reprensation of a {@link ComponentCondition}
*/
public static String[] generateSparkVersionShowIfConditions(Map<ESparkVersion, Set<DistributionVersion>> supportedSparkVersions) {
String[] results = null;
if (supportedSparkVersions != null) {
results = new String[supportedSparkVersions.size()];
int conditionIndex = 0;
for (Map.Entry<ESparkVersion, Set<DistributionVersion>> entry : supportedSparkVersions.entrySet()) {
Set<ComponentCondition> multiComponentConditions = new LinkedHashSet<>();
for (DistributionVersion distributionVersion : entry.getValue()) {
SimpleComponentCondition distribution = new SimpleComponentCondition(new BasicExpression("DISTRIBUTION", EqualityOperator.EQ, // $NON-NLS-1$
distributionVersion.distribution.getName()));
SimpleComponentCondition version = new SimpleComponentCondition(new BasicExpression("SPARK_VERSION", EqualityOperator.EQ, // $NON-NLS-1$
distributionVersion.getVersion()));
multiComponentConditions.add(new MultiComponentCondition(distribution, BooleanOperator.AND, version));
}
ComponentCondition componentCondition = buildDistributionShowIf(multiComponentConditions);
results[conditionIndex++] = componentCondition != null ? componentCondition.getConditionString() : null;
}
}
return results;
}
use of org.talend.hadoop.distribution.condition.ComponentCondition in project tbd-studio-se by Talend.
the class DynamicHDPGraphFramesNodeModuleGroup method getModuleGroups.
public Set<DistributionModuleGroup> getModuleGroups(String distribution, String version, String condition) throws Exception {
Set<DistributionModuleGroup> dmg = new HashSet<>();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
// $NON-NLS-1$
ComponentCondition spark1Condition = getComponentCondition(ESparkVersion.SPARK_1_6.getSparkVersion());
// $NON-NLS-1$
ComponentCondition spark2Condition = getComponentCondition(ESparkVersion.SPARK_2_1.getSparkVersion());
if (condition != null) {
ComponentCondition c = new SimpleComponentCondition(new RawExpression(condition));
spark1Condition = new MultiComponentCondition(spark1Condition, BooleanOperator.AND, c);
spark2Condition = new MultiComponentCondition(spark2Condition, BooleanOperator.AND, c);
}
String graphFramesMrRequiredRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.GRAPHFRAMES_MRREQUIRED_MODULE_GROUP.getModuleName());
String graphFramesSpark2MrRequiredRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.GRAPHFRAMES_SPARK2_MRREQUIRED_MODULE_GROUP.getModuleName());
checkRuntimeId(graphFramesMrRequiredRuntimeId);
checkRuntimeId(graphFramesSpark2MrRequiredRuntimeId);
if (StringUtils.isNotBlank(graphFramesMrRequiredRuntimeId)) {
dmg.addAll(ModuleGroupsUtils.getModuleGroups(distribution, version, spark1Condition, graphFramesMrRequiredRuntimeId, true));
}
if (StringUtils.isNotBlank(graphFramesSpark2MrRequiredRuntimeId)) {
dmg.addAll(ModuleGroupsUtils.getModuleGroups(distribution, version, spark2Condition, graphFramesSpark2MrRequiredRuntimeId, true));
}
return dmg;
}
Aggregations