Search in sources :

Example 6 with SimpleComponentCondition

use of org.talend.hadoop.distribution.condition.SimpleComponentCondition in project tbd-studio-se by Talend.

the class DynamicCDPSparkBatchModuleGroup method getModuleGroups.

@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
    Set<DistributionModuleGroup> moduleGroups = new HashSet<>();
    Set<DistributionModuleGroup> moduleGroupsFromSuper = super.getModuleGroups();
    if (moduleGroupsFromSuper != null && !moduleGroupsFromSuper.isEmpty()) {
        moduleGroups.addAll(moduleGroupsFromSuper);
    }
    DynamicPluginAdapter pluginAdapter = getPluginAdapter();
    String sparkMrRequiredRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.SPARK_MRREQUIRED_MODULE_GROUP.getModuleName());
    String hdfsSpark2_1RuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_SPARK2_1.getModuleName());
    String hdfsCommonRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP_COMMON.getModuleName());
    String mrRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.MAPREDUCE_MODULE_GROUP.getModuleName());
    String talendClouderaNaviRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicCDPModuleGroupConstant.TALEND_CLOUDERA_CDP_NAVIGATOR.getModuleName());
    checkRuntimeId(sparkMrRequiredRuntimeId);
    checkRuntimeId(hdfsSpark2_1RuntimeId);
    checkRuntimeId(hdfsCommonRuntimeId);
    checkRuntimeId(mrRuntimeId);
    checkRuntimeId(talendClouderaNaviRuntimeId);
    if (StringUtils.isNotBlank(sparkMrRequiredRuntimeId)) {
        moduleGroups.add(new DistributionModuleGroup(sparkMrRequiredRuntimeId, true, conditionSpark2));
    }
    if (StringUtils.isNotBlank(hdfsSpark2_1RuntimeId)) {
        moduleGroups.add(new DistributionModuleGroup(hdfsSpark2_1RuntimeId, false, conditionSpark2));
    }
    if (StringUtils.isNotBlank(hdfsCommonRuntimeId)) {
        moduleGroups.add(new DistributionModuleGroup(hdfsCommonRuntimeId, false, conditionSpark2));
    }
    if (StringUtils.isNotBlank(mrRuntimeId)) {
        moduleGroups.add(new DistributionModuleGroup(mrRuntimeId, false, conditionSpark2));
    }
    if (StringUtils.isNotBlank(talendClouderaNaviRuntimeId)) {
        ComponentCondition conditionUseNavigator = new SimpleComponentCondition(new BasicExpression(SparkBatchConstant.USE_CLOUDERA_NAVIGATOR));
        moduleGroups.add(new DistributionModuleGroup(talendClouderaNaviRuntimeId, true, conditionUseNavigator));
    }
    return moduleGroups;
}
Also used : DynamicPluginAdapter(org.talend.hadoop.distribution.dynamic.adapter.DynamicPluginAdapter) BasicExpression(org.talend.hadoop.distribution.condition.BasicExpression) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) ComponentCondition(org.talend.hadoop.distribution.condition.ComponentCondition) DistributionModuleGroup(org.talend.hadoop.distribution.DistributionModuleGroup) HashSet(java.util.HashSet)

Example 7 with SimpleComponentCondition

use of org.talend.hadoop.distribution.condition.SimpleComponentCondition in project tbd-studio-se by Talend.

the class DynamicSparkStreamingKafkaClientModuleGroup method init.

protected void init() {
    spark1Condition = new SimpleComponentCondition(new LinkedNodeExpression(// $NON-NLS-1$
    SparkStreamingConstant.KAFKA_SPARKCONFIGURATION_LINKEDPARAMETER, // $NON-NLS-1$
    "SUPPORTED_SPARK_VERSION", // $NON-NLS-1$
    EqualityOperator.EQ, ESparkVersion.SPARK_1_6.getSparkVersion()));
    spark2Condition = new MultiComponentCondition(new LinkedNodeExpression(SparkStreamingConstant.KAFKA_SPARKCONFIGURATION_LINKEDPARAMETER, // $NON-NLS-1$
    "SUPPORTED_SPARK_VERSION", // $NON-NLS-1$
    EqualityOperator.EQ, // $NON-NLS-1$
    ESparkVersion.SPARK_2_2.getSparkVersion()), BooleanOperator.OR, new LinkedNodeExpression(SparkStreamingConstant.KAFKA_SPARKCONFIGURATION_LINKEDPARAMETER, // $NON-NLS-1$
    "SUPPORTED_SPARK_VERSION", // $NON-NLS-1$
    EqualityOperator.EQ, // $NON-NLS-1$
    ESparkVersion.SPARK_2_4_X.getSparkVersion()));
}
Also used : SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) MultiComponentCondition(org.talend.hadoop.distribution.condition.MultiComponentCondition) LinkedNodeExpression(org.talend.hadoop.distribution.condition.LinkedNodeExpression)

Example 8 with SimpleComponentCondition

use of org.talend.hadoop.distribution.condition.SimpleComponentCondition in project tbd-studio-se by Talend.

the class DynamicHiveModuleGroup method getModuleGroups.

@Override
public Set<DistributionModuleGroup> getModuleGroups() throws Exception {
    Set<DistributionModuleGroup> hs = new HashSet<>();
    DynamicPluginAdapter pluginAdapter = getPluginAdapter();
    String hiveRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HIVE_MODULE_GROUP.getModuleName());
    String hdfsRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HDFS_MODULE_GROUP.getModuleName());
    String mrRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.MAPREDUCE_MODULE_GROUP.getModuleName());
    String hiveHBaseRuntimeId = pluginAdapter.getRuntimeModuleGroupIdByTemplateId(DynamicModuleGroupConstant.HIVE_HBASE_MODULE_GROUP.getModuleName());
    checkRuntimeId(hiveRuntimeId);
    checkRuntimeId(hdfsRuntimeId);
    checkRuntimeId(mrRuntimeId);
    checkRuntimeId(hiveHBaseRuntimeId);
    if (StringUtils.isNotBlank(hiveRuntimeId)) {
        hs.add(new DistributionModuleGroup(hiveRuntimeId));
    }
    if (StringUtils.isNotBlank(hdfsRuntimeId)) {
        hs.add(new DistributionModuleGroup(hdfsRuntimeId));
    }
    if (StringUtils.isNotBlank(mrRuntimeId)) {
        hs.add(new DistributionModuleGroup(mrRuntimeId));
    }
    if (StringUtils.isNotBlank(hiveHBaseRuntimeId)) {
        // The following condition instance stands for:
        // (isShow[STORE_BY_HBASE] AND STORE_BY_HBASE=='true')
        ComponentCondition hbaseLoaderCondition = new MultiComponentCondition(// 
        new SimpleComponentCondition(new BasicExpression(HiveConstant.HIVE_CONFIGURATION_COMPONENT_HBASEPARAMETER)), // 
        BooleanOperator.AND, new SimpleComponentCondition(new ShowExpression(HiveConstant.HIVE_CONFIGURATION_COMPONENT_HBASEPARAMETER)));
        // The Hive components need to import some hbase libraries if the "Use HBase storage" is checked.
        hs.add(new DistributionModuleGroup(hiveHBaseRuntimeId, false, hbaseLoaderCondition));
    }
    return hs;
}
Also used : DynamicPluginAdapter(org.talend.hadoop.distribution.dynamic.adapter.DynamicPluginAdapter) ShowExpression(org.talend.hadoop.distribution.condition.ShowExpression) BasicExpression(org.talend.hadoop.distribution.condition.BasicExpression) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) MultiComponentCondition(org.talend.hadoop.distribution.condition.MultiComponentCondition) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) ComponentCondition(org.talend.hadoop.distribution.condition.ComponentCondition) MultiComponentCondition(org.talend.hadoop.distribution.condition.MultiComponentCondition) DistributionModuleGroup(org.talend.hadoop.distribution.DistributionModuleGroup) HashSet(java.util.HashSet)

Example 9 with SimpleComponentCondition

use of org.talend.hadoop.distribution.condition.SimpleComponentCondition in project tbd-studio-se by Talend.

the class AbstractDistribution method buildNodeModuleGroups.

protected Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> buildNodeModuleGroups(String distribution, String version) {
    Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> result = new HashMap<>();
    // Azure
    ComponentCondition azureCondition = new SparkStreamingLinkedNodeCondition(distribution, version, SparkBatchConstant.SPARK_BATCH_AZURE_SPARKCONFIGURATION_LINKEDPARAMETER).getCondition();
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.AZURE_CONFIGURATION_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, azureCondition, ModuleGroupName.AZURE.get(this.getVersion()), true));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.AZURE_CONFIGURATION_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, azureCondition, ModuleGroupName.AZURE.get(this.getVersion()), true));
    // Spark Batch BigQuery
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.BIGQUERY_CONFIG_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.BIGQUERY.get(this.getVersion()), true));
    // DynamoDB nodes ...
    Set<DistributionModuleGroup> dynamoDBBatchNodeModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, "USE_EXISTING_CONNECTION == 'false'", ModuleGroupName.DYNAMODB_BATCH.get(this.getVersion()), true);
    Set<DistributionModuleGroup> dynamoDBBatchConfigurationModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.DYNAMODB_BATCH.get(this.getVersion()), true);
    // ... in Spark batch
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DYNAMODB_INPUT_COMPONENT), dynamoDBBatchNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DYNAMODB_OUTPUT_COMPONENT), dynamoDBBatchNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DYNAMODB_CONFIGURATION_COMPONENT), dynamoDBBatchConfigurationModuleGroups);
    Set<DistributionModuleGroup> dynamoDBStreamingNodeModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, "USE_EXISTING_CONNECTION == 'false'", ModuleGroupName.DYNAMODB_STREAMING.get(this.getVersion()), true);
    Set<DistributionModuleGroup> dynamoDBStreamingConfigurationModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.DYNAMODB_STREAMING.get(this.getVersion()), true);
    // ... in Spark streaming
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.DYNAMODB_INPUT_COMPONENT), dynamoDBStreamingNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.DYNAMODB_OUTPUT_COMPONENT), dynamoDBStreamingNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.DYNAMODB_CONFIGURATION_COMPONENT), dynamoDBStreamingConfigurationModuleGroups);
    // Spark Streaming Flume nodes
    ComponentCondition flumeCondition = new SparkStreamingLinkedNodeCondition(distribution, version, SparkStreamingConstant.FLUME_SPARKCONFIGURATION_LINKEDPARAMETER).getCondition();
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.FLUME_INPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, flumeCondition, ModuleGroupName.FLUME.get(this.getVersion()), true));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.FLUME_OUTPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, flumeCondition, ModuleGroupName.FLUME.get(this.getVersion()), true));
    // Spark Batch  GCS
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.GCS_CONFIG_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.GCS.get(this.getVersion()), true));
    // GraphFrames - Spark Batch DQ matching
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.MATCH_PREDICT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.GRAPHFRAMES.get(this.getVersion()), true));
    // Spark Streaming Kafka nodes
    ComponentCondition kafkaCondition = new SparkStreamingLinkedNodeCondition(distribution, version, SparkStreamingConstant.KAFKA_SPARKCONFIGURATION_LINKEDPARAMETER).getCondition();
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KAFKA_INPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, kafkaCondition, ModuleGroupName.KAFKA.get(this.getVersion()), true));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KAFKA_AVRO_INPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, kafkaCondition, ModuleGroupName.KAFKA.get(this.getVersion()), true));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KAFKA_OUTPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, kafkaCondition, ModuleGroupName.KAFKA.get(this.getVersion()), true));
    // Spark Streaming Kinesis nodes
    Set<DistributionModuleGroup> kinesisModuleGroups = ModuleGroupsUtils.getStreamingModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.KINESIS.get(this.getVersion()), true);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KINESIS_INPUT_COMPONENT), kinesisModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KINESIS_INPUT_AVRO_COMPONENT), kinesisModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KINESIS_OUTPUT_COMPONENT), kinesisModuleGroups);
    // PubSub nodes...
    Set<DistributionModuleGroup> pubSubNodeModuleGroups = ModuleGroupsUtils.getStreamingModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.PUBSUB.get(this.getVersion()), true);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PUBSUB_INPUT_COMPONENT), pubSubNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PUBSUB_INPUT_AVRO_COMPONENT), pubSubNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PUBSUB_OUTPUT_COMPONENT), pubSubNodeModuleGroups);
    // Spark Batch Parquet nodes
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.PARQUET_INPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.PARQUET.get(this.getVersion()), !"SPARK".equals(this.getDistribution())));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.PARQUET_OUTPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.PARQUET.get(this.getVersion()), !"SPARK".equals(this.getDistribution())));
    // Spark Streaming Parquet nodes
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PARQUET_INPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.PARQUET.get(this.getVersion()), !"SPARK".equals(this.getDistribution())));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PARQUET_OUTPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.PARQUET.get(this.getVersion()), !"SPARK".equals(this.getDistribution())));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PARQUET_STREAM_INPUT_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.PARQUET.get(this.getVersion()), !"SPARK".equals(this.getDistribution())));
    // Redshift nodes ...
    Set<DistributionModuleGroup> redshiftBatchNodeModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, "USE_EXISTING_CONNECTION == 'false'", ModuleGroupName.REDSHIFT_BATCH.get(this.getVersion()), true);
    Set<DistributionModuleGroup> redshiftBatchConfigurationModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.REDSHIFT_BATCH.get(this.getVersion()), true);
    // ... in Spark batch
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.REDSHIFT_INPUT_COMPONENT), redshiftBatchNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.REDSHIFT_OUTPUT_COMPONENT), redshiftBatchNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.REDSHIFT_CONFIGURATION_COMPONENT), redshiftBatchConfigurationModuleGroups);
    Set<DistributionModuleGroup> redshiftStreamingNodeModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, "USE_EXISTING_CONNECTION == 'false'", ModuleGroupName.REDSHIFT_STREAMING.get(this.getVersion()), true);
    Set<DistributionModuleGroup> redshiftStreamingConfigurationModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.REDSHIFT_STREAMING.get(this.getVersion()), true);
    // ... in Spark streaming
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.REDSHIFT_LOOKUP_INPUT_COMPONENT), redshiftStreamingNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.REDSHIFT_OUTPUT_COMPONENT), redshiftStreamingNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.REDSHIFT_CONFIGURATION_COMPONENT), redshiftStreamingConfigurationModuleGroups);
    // Snowflake nodes ...
    Set<DistributionModuleGroup> snowFlakeBatchNodeModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, "USE_EXISTING_CONNECTION == 'false'", ModuleGroupName.SNOWFLAKE.get(this.getVersion()), true);
    Set<DistributionModuleGroup> snowFlakeBatchConfigurationModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.SNOWFLAKE.get(this.getVersion()), true);
    // ... in Spark batch
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.SNOWFLAKE_INPUT_COMPONENT), snowFlakeBatchNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.SNOWFLAKE_OUTPUT_COMPONENT), snowFlakeBatchNodeModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.SNOWFLAKE_CONFIGURATION_COMPONENT), snowFlakeBatchConfigurationModuleGroups);
    // Spark S3 condition
    ComponentCondition s3StorageCondition = new SparkBatchLinkedNodeCondition(distribution, version, SparkBatchConstant.SPARK_BATCH_S3_SPARKCONFIGURATION_LINKEDPARAMETER).getCondition();
    // Spark Batch S3 nodes
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.S3_CONFIGURATION_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) s3StorageCondition, ModuleGroupName.S3.get(this.getVersion()), true));
    // Spark Streaming S3 nodes
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.S3_CONFIGURATION_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) s3StorageCondition, ModuleGroupName.S3.get(this.getVersion()), true));
    // WebHDFS
    HDFSLinkedNodeCondition hdfsLinkedNodeCondition = new HDFSLinkedNodeCondition(distribution, version);
    Set<DistributionModuleGroup> webHDFSNodeModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, hdfsLinkedNodeCondition.getWebHDFSCondition(), ModuleGroupName.WEBHDFS.get(this.getVersion()), true);
    for (String hdfsComponent : HDFSConstant.HDFS_COMPONENTS) {
        result.put(new NodeComponentTypeBean(ComponentType.HDFS, hdfsComponent), webHDFSNodeModuleGroups);
    }
    // Sqoop
    for (String sqoopComponent : SqoopConstant.SQOOP_COMPONENTS) {
        result.put(new NodeComponentTypeBean(ComponentType.SQOOP, sqoopComponent), ModuleGroupsUtils.getModuleGroups(distribution, version, (ComponentCondition) null, ModuleGroupName.SQOOP.get(this.getVersion()), true));
    }
    // Spark Batch tSQLRow nodes
    ComponentCondition hiveContextCondition = new SimpleComponentCondition(new BasicExpression("SQL_CONTEXT", EqualityOperator.EQ, "HiveContext"));
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.SPARK_SQL_ROW_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, hiveContextCondition, ModuleGroupName.HIVE.get(this.getVersion()), true));
    // Spark Streaming tSQLRow nodes
    result.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.SPARK_SQL_ROW_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, hiveContextCondition, ModuleGroupName.HIVE.get(this.getVersion()), true));
    // delta components in Spark batch
    Set<DistributionModuleGroup> deltaBatchConfigurationModuleGroups = ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.DELTALAKE.get(this.getVersion()), true);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DELTALAKE_INPUT_COMPONENT), deltaBatchConfigurationModuleGroups);
    result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DELTALAKE_OUTPUT_COMPONENT), deltaBatchConfigurationModuleGroups);
    return result;
}
Also used : HDFSLinkedNodeCondition(org.talend.hadoop.distribution.condition.common.HDFSLinkedNodeCondition) HashSet(java.util.HashSet) Set(java.util.Set) SparkStreamingLinkedNodeCondition(org.talend.hadoop.distribution.condition.common.SparkStreamingLinkedNodeCondition) HashMap(java.util.HashMap) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) SparkBatchLinkedNodeCondition(org.talend.hadoop.distribution.condition.common.SparkBatchLinkedNodeCondition) BasicExpression(org.talend.hadoop.distribution.condition.BasicExpression) MultiComponentCondition(org.talend.hadoop.distribution.condition.MultiComponentCondition) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) ComponentCondition(org.talend.hadoop.distribution.condition.ComponentCondition)

Example 10 with SimpleComponentCondition

use of org.talend.hadoop.distribution.condition.SimpleComponentCondition in project tbd-studio-se by Talend.

the class ComponentConditionTest method testGetConditionString.

@Test
public void testGetConditionString() throws Exception {
    ComponentCondition dc1 = new SimpleComponentCondition(new BasicExpression(PARAM_1, VALUE_1, EqualityOperator.EQ));
    assertEquals(dc1.getConditionString(), LEFT_PAR + PARAM_1 + EQ + SINGLE_QUOTE + VALUE_1 + SINGLE_QUOTE + RIGHT_PAR);
    // $NON-NLS-1$ //$NON-NLS-2$
    Expression e1 = new BasicExpression("A", "aaa", EqualityOperator.EQ);
    // $NON-NLS-1$ //$NON-NLS-2$
    Expression e2 = new BasicExpression("Z", "ccc", EqualityOperator.EQ);
    // $NON-NLS-1$ //$NON-NLS-2$
    Expression e3 = new BasicExpression("A", "bbb", EqualityOperator.EQ);
    // $NON-NLS-1$ //$NON-NLS-2$
    Expression e4 = new BasicExpression("B", "ccc", EqualityOperator.NOT_EQ);
    dc1 = new NestedComponentCondition(new MultiComponentCondition(new SimpleComponentCondition(e1), new NestedComponentCondition(new MultiComponentCondition(new SimpleComponentCondition(e4), new NestedComponentCondition(new MultiComponentCondition(new SimpleComponentCondition(e2), new SimpleComponentCondition(e3), BooleanOperator.AND)), BooleanOperator.OR)), BooleanOperator.AND));
    assertEquals(dc1.getConditionString(), result1);
    dc1 = new MultiComponentCondition(new SimpleComponentCondition(e1), new MultiComponentCondition(new SimpleComponentCondition(e2), new NestedComponentCondition(new MultiComponentCondition(new SimpleComponentCondition(e4), new SimpleComponentCondition(e3), BooleanOperator.OR)), BooleanOperator.AND), BooleanOperator.AND);
    assertEquals(dc1.getConditionString(), result2);
}
Also used : BasicExpression(org.talend.hadoop.distribution.condition.BasicExpression) Expression(org.talend.hadoop.distribution.condition.Expression) BasicExpression(org.talend.hadoop.distribution.condition.BasicExpression) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) MultiComponentCondition(org.talend.hadoop.distribution.condition.MultiComponentCondition) SimpleComponentCondition(org.talend.hadoop.distribution.condition.SimpleComponentCondition) NestedComponentCondition(org.talend.hadoop.distribution.condition.NestedComponentCondition) ComponentCondition(org.talend.hadoop.distribution.condition.ComponentCondition) MultiComponentCondition(org.talend.hadoop.distribution.condition.MultiComponentCondition) NestedComponentCondition(org.talend.hadoop.distribution.condition.NestedComponentCondition) Test(org.junit.Test)

Aggregations

SimpleComponentCondition (org.talend.hadoop.distribution.condition.SimpleComponentCondition)23 BasicExpression (org.talend.hadoop.distribution.condition.BasicExpression)20 MultiComponentCondition (org.talend.hadoop.distribution.condition.MultiComponentCondition)17 ComponentCondition (org.talend.hadoop.distribution.condition.ComponentCondition)16 HashSet (java.util.HashSet)12 DistributionModuleGroup (org.talend.hadoop.distribution.DistributionModuleGroup)10 DynamicPluginAdapter (org.talend.hadoop.distribution.dynamic.adapter.DynamicPluginAdapter)6 NestedComponentCondition (org.talend.hadoop.distribution.condition.NestedComponentCondition)4 ShowExpression (org.talend.hadoop.distribution.condition.ShowExpression)4 Set (java.util.Set)3 Expression (org.talend.hadoop.distribution.condition.Expression)3 LinkedNodeExpression (org.talend.hadoop.distribution.condition.LinkedNodeExpression)3 RawExpression (org.talend.hadoop.distribution.condition.RawExpression)3 HashMap (java.util.HashMap)2 LinkedHashSet (java.util.LinkedHashSet)1 Map (java.util.Map)1 Test (org.junit.Test)1 ComponentType (org.talend.hadoop.distribution.ComponentType)1 ESparkVersion (org.talend.hadoop.distribution.ESparkVersion)1 BooleanExpression (org.talend.hadoop.distribution.condition.BooleanExpression)1