use of org.talend.hadoop.distribution.NodeComponentTypeBean in project tbd-studio-se by Talend.
the class DynamicCDPSparkBatchModuleGroupTemplate method getNodeModuleGroups.
@Override
public Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> getNodeModuleGroups() throws Exception {
Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> nodeModuleGroups = super.getNodeModuleGroups();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
IDynamicPluginConfiguration configuration = pluginAdapter.getPluginConfiguration();
String distribution = ICDPDistribution.DISTRIBUTION_NAME;
String version = configuration.getId();
buildNodeModuleGroups4SparkBatch(pluginAdapter, nodeModuleGroups, distribution, version);
buildNodeModuleGroups4SparkStreaming(pluginAdapter, nodeModuleGroups, distribution, version);
return nodeModuleGroups;
}
use of org.talend.hadoop.distribution.NodeComponentTypeBean in project tbd-studio-se by Talend.
the class DynamicSparkBatchModuleGroupTemplate method getNodeModuleGroups.
@Override
public Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> getNodeModuleGroups() throws Exception {
Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> nodeModuleGroups = super.getNodeModuleGroups();
DynamicPluginAdapter pluginAdapter = getPluginAdapter();
IDynamicPluginConfiguration configuration = pluginAdapter.getPluginConfiguration();
String distribution = configuration.getDistribution();
String version = configuration.getId();
buildNodeModuleGroups4SparkBatch(pluginAdapter, nodeModuleGroups, distribution, version);
return nodeModuleGroups;
}
use of org.talend.hadoop.distribution.NodeComponentTypeBean in project tbd-studio-se by Talend.
the class DynamicSparkBatchModuleGroupTemplate method buildNodeModuleGroups4SparkStreaming.
protected void buildNodeModuleGroups4SparkStreaming(DynamicPluginAdapter pluginAdapter, Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> nodeModuleGroupsMap, String distribution, String version) throws Exception {
Set<DistributionModuleGroup> hiveConfigurationModuleGroups = buildNodeModuleGroup4Spark4Hive(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.HIVE_INPUT_COMPONENT), hiveConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.HIVE_OUTPUT_COMPONENT), hiveConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.HIVE_CONFIGURATION_COMPONENT), hiveConfigurationModuleGroups);
Set<DistributionModuleGroup> hiveWarehouseConfigurationModuleGroups = buildNodeModuleGroup4Spark4HiveWarehouse(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.HIVE_WAREHOUSE_INPUT_COMPONENT), hiveWarehouseConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.HIVE_WAREHOUSE_OUTPUT_COMPONENT), hiveWarehouseConfigurationModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.HIVE_WAREHOUSE_CONFIGURATION_COMPONENT), hiveWarehouseConfigurationModuleGroups);
}
use of org.talend.hadoop.distribution.NodeComponentTypeBean in project tbd-studio-se by Talend.
the class DynamicSparkStreamingModuleGroupTemplate method buildNodeModuleGroups4SparkStreaming.
protected void buildNodeModuleGroups4SparkStreaming(DynamicPluginAdapter pluginAdapter, Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> nodeModuleGroupsMap, String distribution, String version) throws Exception {
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PARQUET_INPUT_COMPONENT), new DynamicSparkStreamingParquetNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PARQUET_OUTPUT_COMPONENT), new DynamicSparkStreamingParquetNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.PARQUET_STREAM_INPUT_COMPONENT), new DynamicSparkStreamingParquetNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.S3_CONFIGURATION_COMPONENT), new DynamicSparkStreamingS3NodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkBatchConstant.AZURE_CONFIGURATION_COMPONENT), new DynamicSparkBatchAzureNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version));
// Kinesis
Set<DistributionModuleGroup> kinesisNodeModuleGroups = buildNodeModuleGroups4SparkStreaming4Kinesis(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KINESIS_INPUT_COMPONENT), kinesisNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KINESIS_INPUT_AVRO_COMPONENT), kinesisNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KINESIS_OUTPUT_COMPONENT), kinesisNodeModuleGroups);
// Flume
Set<DistributionModuleGroup> flumeNodeModuleGroups = new DynamicSparkStreamingFlumeNodeModuleGroup(pluginAdapter).getModuleGroups(distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.FLUME_INPUT_COMPONENT), flumeNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.FLUME_OUTPUT_COMPONENT), flumeNodeModuleGroups);
// Kafka
Set<DistributionModuleGroup> kafkaAssemblyModuleGroups = buildNodeModuleGroups4SparkStreaming4KafkaAssembly(pluginAdapter, distribution, version);
Set<DistributionModuleGroup> kafkaAvroModuleGroups = buildNodeModuleGroups4SparkStreaming4KafkaAvro(pluginAdapter, distribution, version);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KAFKA_INPUT_COMPONENT), kafkaAssemblyModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KAFKA_AVRO_INPUT_COMPONENT), kafkaAvroModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.KAFKA_OUTPUT_COMPONENT), kafkaAssemblyModuleGroups);
// DynamoDB ...
Set<DistributionModuleGroup> dynamoDBNodeModuleGroups = buildNodeModuleGroup4Spark4DynamoDB(pluginAdapter, distribution, version, // $NON-NLS-1$
"USE_EXISTING_CONNECTION == 'false'");
Set<DistributionModuleGroup> dynamoDBConfigurationModuleGroups = buildNodeModuleGroup4Spark4DynamoDB(pluginAdapter, distribution, version, null);
// ... in Spark streaming
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.DYNAMODB_INPUT_COMPONENT), dynamoDBNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.DYNAMODB_OUTPUT_COMPONENT), dynamoDBNodeModuleGroups);
nodeModuleGroupsMap.put(new NodeComponentTypeBean(ComponentType.SPARKSTREAMING, SparkStreamingConstant.DYNAMODB_CONFIGURATION_COMPONENT), dynamoDBConfigurationModuleGroups);
}
use of org.talend.hadoop.distribution.NodeComponentTypeBean in project tbd-studio-se by Talend.
the class DTPDistribution method buildNodeModuleGroups.
protected Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> buildNodeModuleGroups(String distribution, String version) {
Map<NodeComponentTypeBean, Set<DistributionModuleGroup>> result = new HashMap<>();
// BigQuery
result.put(new NodeComponentTypeBean(ComponentType.SPARKBATCH, SparkBatchConstant.BIGQUERY_CONFIG_COMPONENT), ModuleGroupsUtils.getModuleGroups(distribution, version, (String) null, ModuleGroupName.BIGQUERY.get(this.getVersion()), true));
return result;
}
Aggregations