use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DBR73xSparkStreamingModuleGroupTest method testModuleGroups.
@Test
public void testModuleGroups() throws Exception {
Map<String, String> expected = new HashMap<>();
expected.put(DBR73xConstant.SPARK_STREAMING_LIB_MRREQUIRED_DBR73X.getModuleName(), null);
expected.put(DBR73xConstant.HIVEONSPARK_LIB_MRREQUIRED_DBR73X.getModuleName(), null);
expected.put(DBR73xConstant.SPARK_LIB_MRREQUIRED_DBR73X.getModuleName(), null);
expected.put(DBR73xConstant.BIGDATA_LAUNCHER_LIB_DBR73X.getModuleName(), null);
expected.put(DBR73xConstant.DYNAMODB_GROUP_DBR73x.getModuleName(), null);
expected.put(DBR73xConstant.KAFKA_LIB_REQUIRED_DBR73X.getModuleName(), null);
expected.put(DBR73xConstant.SPARK_STREAMING_LIB_KINESIS_DBR73X.getModuleName(), null);
Set<DistributionModuleGroup> moduleGroups = DBR73xSparkStreamingModuleGroup.getModuleGroups();
assertEquals(expected.size(), moduleGroups.size());
moduleGroups.iterator();
for (DistributionModuleGroup module : moduleGroups) {
assertTrue("Should contain module " + module.getModuleName(), expected.containsKey(module.getModuleName()));
// $NON-NLS-1$
if (expected.get(module.getModuleName()) == null) {
assertTrue("The condition of the module " + module.getModuleName() + " is not null.", // $NON-NLS-1$ //$NON-NLS-2$
expected.get(module.getModuleName()) == null);
} else {
assertTrue(// $NON-NLS-1$ //$NON-NLS-2$
"The condition of the module " + module.getModuleName() + " is null, but it should be " + expected.get(module.getModuleName()) + ".", expected.get(module.getModuleName()) != null);
// $NON-NLS-1$
assertEquals(expected.get(module.getModuleName()), module.getRequiredIf().getConditionString());
}
}
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DBR73xKinesisNodeModuleGroupTest method testModuleGroups.
@Test
public void testModuleGroups() throws Exception {
Map<String, String> expected = new HashMap<>();
expected.put(DBR73xConstant.SPARK_STREAMING_LIB_KINESIS_DBR73X.getModuleName(), "((#LINK@NODE.STORAGE_CONFIGURATION.DISTRIBUTION=='DATABRICKS') AND (#LINK@NODE.STORAGE_CONFIGURATION.SPARK_VERSION=='DATABRICKS_7_3')) AND (#LINK@NODE.STORAGE_CONFIGURATION.SPARK_LOCAL_MODE=='false')");
// $NON-NLS-1$
Set<DistributionModuleGroup> moduleGroups = DBR73xKinesisNodeModuleGroup.getModuleGroups(DBR73xDistribution.DISTRIBUTION_NAME, DBR73xDistribution.VERSION);
assertEquals(expected.size(), moduleGroups.size());
for (DistributionModuleGroup module : moduleGroups) {
assertTrue("Should contain module " + module.getModuleName(), expected.containsKey(module.getModuleName()));
// $NON-NLS-1$
if (expected.get(module.getModuleName()) == null) {
assertTrue("The condition of the module " + module.getModuleName() + " is not null.", // $NON-NLS-1$ //$NON-NLS-2$
expected.get(module.getModuleName()) == null);
} else {
assertTrue(// $NON-NLS-1$ //$NON-NLS-2$
"The condition of the module " + module.getModuleName() + " is null, but it should be " + expected.get(module.getModuleName()) + ".", expected.get(module.getModuleName()) != null);
// $NON-NLS-1$
// assertEquals(expected.get(module.getModuleName()), module.getRequiredIf().getConditionString());
}
}
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class Spark30xNodeModuleGroup method getModuleGroup.
public static Set<DistributionModuleGroup> getModuleGroup(String moduleGroupName, String sparkConfigLinkedParameter, ESparkVersion sparkVersion) {
Set<DistributionModuleGroup> hs = new HashSet<>();
DistributionModuleGroup dmg = new DistributionModuleGroup(moduleGroupName, true, spark30xCondition(sparkConfigLinkedParameter, sparkVersion));
hs.add(dmg);
return hs;
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicMapReduceModuleGroupTemplate method buildNodeModuleGroups4MapReduce.
protected void buildNodeModuleGroups4MapReduce(DynamicPluginAdapter pluginAdapter, Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> nodeModuleGroupsMap, String distribution, String version) throws Exception {
Set<DistributionModuleGroup> nodeModuleGroup4MRS3 = buildNodeModuleGroup4MapReduce4MRS3(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.MAPREDUCE, MRConstant.S3_INPUT_COMPONENT), nodeModuleGroup4MRS3);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.MAPREDUCE, MRConstant.S3_OUTPUT_COMPONENT), nodeModuleGroup4MRS3);
}
use of org.talend.hadoop.distribution.DistributionModuleGroup in project tbd-studio-se by Talend.
the class DynamicSparkBatchModuleGroupTemplate method buildNodeModuleGroups4SparkBatch.
protected void buildNodeModuleGroups4SparkBatch(DynamicPluginAdapter pluginAdapter, Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> nodeModuleGroupsMap, String distribution, String version) throws Exception {
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.PARQUET_INPUT_COMPONENT), new DynamicSparkBatchParquetNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.PARQUET_OUTPUT_COMPONENT), new DynamicSparkBatchParquetNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.S3_CONFIGURATION_COMPONENT), new DynamicSparkBatchS3NodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.AZURE_CONFIGURATION_COMPONENT), buildNodeModuleGroups4SparkBatchAzure(pluginAdapter, distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.MATCH_PREDICT_COMPONENT), buildNodeModuleGroups4SparkBatch4GraphFrames(pluginAdapter, distribution, version));
// DynamoDB module groups
Set<DistributionModuleGroup> dynamoDBNodeModuleGroups = buildNodeModuleGroup4Spark4DynamoDB(pluginAdapter, distribution, version, "USE_EXISTING_CONNECTION == 'false'");
Set<DistributionModuleGroup> dynamoDBConfigurationModuleGroups = buildNodeModuleGroup4Spark4DynamoDB(pluginAdapter, distribution, version, null);
// attach module group to correspondent nodes
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DYNAMODB_INPUT_COMPONENT), dynamoDBNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DYNAMODB_OUTPUT_COMPONENT), dynamoDBNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.DYNAMODB_CONFIGURATION_COMPONENT), dynamoDBConfigurationModuleGroups);
Set<DistributionModuleGroup> jdbcIONodeModuleGroups = buildNodeModuleGroup4Spark4JDBC(pluginAdapter, distribution, version, "USE_EXISTING_CONNECTION == 'false'");
Set<DistributionModuleGroup> jdbcConfNodeModuleGroups = buildNodeModuleGroup4Spark4JDBC(pluginAdapter, distribution, version, null);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.TERADATA_OUTPUT_COMPONENT), jdbcIONodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.TERADATA_INPUT_COMPONENT), jdbcIONodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.ORACLE_OUTPUT_COMPONENT), jdbcIONodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.ORACLE_INPUT_COMPONENT), jdbcIONodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.TERADATA_CONFIG_COMPONENT), jdbcConfNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.ORACLE_CONFIG_COMPONENT), jdbcConfNodeModuleGroups);
buildNodeModuleGroups4SparkBatch4Kudu(pluginAdapter, nodeModuleGroupsMap, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.GCS_CONFIG_COMPONENT), buildModuleGroups4SparkBatch4GCS(pluginAdapter, distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.BIGQUERY_CONFIG_COMPONENT), buildModuleGroups4SparkBatch4BigQuery(pluginAdapter, distribution, version));
Set<DistributionModuleGroup> hiveConfigurationModuleGroups = buildNodeModuleGroup4Spark4Hive(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.HIVE_INPUT_COMPONENT), hiveConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.HIVE_OUTPUT_COMPONENT), hiveConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.HIVE_CONFIGURATION_COMPONENT), hiveConfigurationModuleGroups);
Set<DistributionModuleGroup> hiveWarehouseConfigurationModuleGroups = buildNodeModuleGroup4Spark4HiveWarehouse(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.HIVE_WAREHOUSE_INPUT_COMPONENT), hiveWarehouseConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.HIVE_WAREHOUSE_OUTPUT_COMPONENT), hiveWarehouseConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.HIVE_WAREHOUSE_CONFIGURATION_COMPONENT), hiveWarehouseConfigurationModuleGroups);
}
Aggregations