use of com.hortonworks.streamline.streams.catalog.Topology in project streamline by hortonworks.
the class TopologyTestRunner method runTest.
public TopologyTestRunHistory runTest(TopologyActions topologyActions, Topology topology, TopologyTestRunCase testCase, Long durationSecs) throws IOException {
List<StreamlineSource> sources = topology.getTopologyDag().getOutputComponents().stream().filter(c -> c instanceof StreamlineSource).map(c -> (StreamlineSource) c).collect(toList());
List<StreamlineSink> sinks = topology.getTopologyDag().getInputComponents().stream().filter(c -> c instanceof StreamlineSink).map(c -> (StreamlineSink) c).collect(toList());
List<StreamlineProcessor> processors = topology.getTopologyDag().getOutputComponents().stream().filter(c -> c instanceof StreamlineProcessor && !(c instanceof RulesProcessor)).map(c -> (StreamlineProcessor) c).collect(toList());
List<RulesProcessor> rulesProcessors = topology.getTopologyDag().getOutputComponents().stream().filter(c -> c instanceof RulesProcessor).map(c -> (RulesProcessor) c).collect(toList());
// load test case sources for all sources
List<TopologyTestRunCaseSource> testRunCaseSources = sources.stream().map(s -> catalogService.getTopologyTestRunCaseSourceBySourceId(testCase.getId(), Long.valueOf(s.getId()))).collect(toList());
if (testRunCaseSources.stream().anyMatch(Objects::isNull)) {
throw new IllegalArgumentException("Not every source register test records.");
}
// load test case sources for all sinks
List<TopologyTestRunCaseSink> testRunCaseSinks = sinks.stream().map(s -> catalogService.getTopologyTestRunCaseSinkBySinkId(testCase.getId(), Long.valueOf(s.getId()))).collect(toList());
Map<Long, Map<String, List<Map<String, Object>>>> testRecordsForEachSources = readTestRecordsFromTestCaseSources(testRunCaseSources);
Map<Long, Integer> occurrenceForEachSources = readOccurrenceFromTestCaseSources(testRunCaseSources);
Map<Long, Long> sleepMsPerRecordsForEachSources = readSleepMsPerIterationFromTestCaseSources(testRunCaseSources);
Map<String, List<Map<String, Object>>> expectedOutputRecordsMap = readExpectedRecordsFromTestCaseSinks(sinks, testRunCaseSinks);
String eventLogFilePath = getTopologyTestRunEventLog(topology);
Map<String, TestRunSource> testRunSourceMap = sources.stream().collect(toMap(s -> s.getName(), s -> {
TestRunSource testRunSource = new TestRunSource(s.getOutputStreams(), testRecordsForEachSources.get(Long.valueOf(s.getId())), occurrenceForEachSources.get(Long.valueOf(s.getId())), sleepMsPerRecordsForEachSources.get(Long.valueOf(s.getId())), eventLogFilePath);
testRunSource.setName(s.getName());
return testRunSource;
}));
Map<String, TestRunSink> testRunSinkMap = sinks.stream().collect(toMap(s -> s.getName(), s -> {
String uuid = UUID.randomUUID().toString();
TestRunSink testRunSink = new TestRunSink(getTopologyTestRunResult(uuid));
testRunSink.setName(s.getName());
return testRunSink;
}));
Map<String, TestRunProcessor> testRunProcessorMap = processors.stream().collect(toMap(s -> s.getName(), s -> {
if (s instanceof JoinProcessor) {
TestRunProcessor testRunProcessor = new TestRunProcessor(s, true, eventLogFilePath);
testRunProcessor.setName(s.getName());
return testRunProcessor;
} else {
TestRunProcessor testRunProcessor = new TestRunProcessor(s, false, eventLogFilePath);
testRunProcessor.setName(s.getName());
return testRunProcessor;
}
}));
Map<String, TestRunRulesProcessor> testRunRulesProcessorMap = rulesProcessors.stream().collect(toMap(s -> s.getName(), s -> {
TestRunRulesProcessor testRunRulesProcessor = new TestRunRulesProcessor(s, eventLogFilePath);
testRunRulesProcessor.setName(s.getName());
return testRunRulesProcessor;
}));
// just create event file before running actual test run process
createEventLogFile(eventLogFilePath);
TopologyTestRunHistory history = initializeTopologyTestRunHistory(topology, testCase, expectedOutputRecordsMap, eventLogFilePath);
catalogService.addTopologyTestRunHistory(history);
Optional<Long> finalDurationSecs = Optional.ofNullable(durationSecs);
ParallelStreamUtil.runAsync(() -> runTestInBackground(topologyActions, topology, history, testRunSourceMap, testRunProcessorMap, testRunRulesProcessorMap, testRunSinkMap, expectedOutputRecordsMap, finalDurationSecs), forkJoinPool);
return history;
}
use of com.hortonworks.streamline.streams.catalog.Topology in project streamline by hortonworks.
the class StreamCatalogService method doImportTopology.
private Topology doImportTopology(Topology newTopology, TopologyData topologyData) throws Exception {
List<TopologySource> topologySources = topologyData.getSources();
Map<Long, Long> oldToNewComponentIds = new HashMap<>();
Map<Long, Long> oldToNewRuleIds = new HashMap<>();
Map<Long, Long> oldToNewWindowIds = new HashMap<>();
Map<Long, Long> oldToNewBranchRuleIds = new HashMap<>();
Map<Long, Long> oldToNewStreamIds = new HashMap<>();
// import source streams
for (TopologySource topologySource : topologySources) {
topologySource.setOutputStreamIds(importOutputStreams(newTopology.getId(), oldToNewStreamIds, topologySource.getOutputStreams()));
topologySource.setOutputStreams(null);
}
// import processor streams
for (TopologyProcessor topologyProcessor : topologyData.getProcessors()) {
topologyProcessor.setOutputStreamIds(importOutputStreams(newTopology.getId(), oldToNewStreamIds, topologyProcessor.getOutputStreams()));
topologyProcessor.setOutputStreams(null);
}
// import rules
for (TopologyRule rule : topologyData.getRules()) {
Long currentId = rule.getId();
rule.setId(null);
TopologyRule addedRule = addRule(newTopology.getId(), rule);
oldToNewRuleIds.put(currentId, addedRule.getId());
}
// import windowed rules
for (TopologyWindow window : topologyData.getWindows()) {
Long currentId = window.getId();
window.setId(null);
TopologyWindow addedWindow = addWindow(newTopology.getId(), window);
oldToNewWindowIds.put(currentId, addedWindow.getId());
}
// import branch rules
for (TopologyBranchRule branchRule : topologyData.getBranchRules()) {
Long currentId = branchRule.getId();
branchRule.setId(null);
TopologyBranchRule addedBranchRule = addBranchRule(newTopology.getId(), branchRule);
oldToNewBranchRuleIds.put(currentId, addedBranchRule.getId());
}
// import sources
for (TopologySource topologySource : topologySources) {
Long oldComponentId = topologySource.getId();
topologySource.setId(null);
topologySource.setTopologyId(newTopology.getId());
TopologyComponentBundle bundle = getCurrentTopologyComponentBundle(TopologyComponentBundle.TopologyComponentType.SOURCE, topologyData.getBundleIdToType().get(topologySource.getTopologyComponentBundleId().toString()));
topologySource.setTopologyComponentBundleId(bundle.getId());
addTopologySource(newTopology.getId(), topologySource);
oldToNewComponentIds.put(oldComponentId, topologySource.getId());
}
// import processors
for (TopologyProcessor topologyProcessor : topologyData.getProcessors()) {
Long oldComponentId = topologyProcessor.getId();
topologyProcessor.setId(null);
topologyProcessor.setTopologyId(newTopology.getId());
TopologyComponentBundle bundle;
String subType = topologyData.getBundleIdToType().get(topologyProcessor.getTopologyComponentBundleId().toString());
if (TopologyLayoutConstants.JSON_KEY_CUSTOM_PROCESSOR_SUB_TYPE.equals(subType)) {
QueryParam queryParam = new QueryParam(CustomProcessorInfo.NAME, topologyProcessor.getConfig().get(CustomProcessorInfo.NAME));
Collection<TopologyComponentBundle> result = listCustomProcessorBundlesWithFilter(Collections.singletonList(queryParam));
if (result.size() != 1) {
throw new IllegalStateException("Not able to find topology component bundle for custom processor :" + topologyProcessor.getConfig().get(CustomProcessorInfo.NAME));
}
bundle = result.iterator().next();
} else {
bundle = getCurrentTopologyComponentBundle(TopologyComponentBundle.TopologyComponentType.PROCESSOR, subType);
}
topologyProcessor.setTopologyComponentBundleId(bundle.getId());
Optional<Object> ruleListObj = topologyProcessor.getConfig().getAnyOptional(RulesProcessor.CONFIG_KEY_RULES);
ruleListObj.ifPresent(ruleList -> {
List<Long> ruleIds = new ObjectMapper().convertValue(ruleList, new TypeReference<List<Long>>() {
});
List<Long> updatedRuleIds = new ArrayList<>();
if (ComponentTypes.RULE.equals(bundle.getSubType()) || ComponentTypes.PROJECTION.equals(bundle.getSubType())) {
ruleIds.forEach(ruleId -> updatedRuleIds.add(oldToNewRuleIds.get(ruleId)));
} else if (bundle.getSubType().equals(ComponentTypes.BRANCH)) {
ruleIds.forEach(ruleId -> updatedRuleIds.add(oldToNewBranchRuleIds.get(ruleId)));
} else if (bundle.getSubType().equals(ComponentTypes.WINDOW)) {
ruleIds.forEach(ruleId -> updatedRuleIds.add(oldToNewWindowIds.get(ruleId)));
}
topologyProcessor.getConfig().setAny(RulesProcessor.CONFIG_KEY_RULES, updatedRuleIds);
});
addTopologyProcessor(newTopology.getId(), topologyProcessor);
oldToNewComponentIds.put(oldComponentId, topologyProcessor.getId());
}
// import sinks
for (TopologySink topologySink : topologyData.getSinks()) {
topologySink.setTopologyId(newTopology.getId());
Long currentId = topologySink.getId();
topologySink.setId(null);
TopologyComponentBundle bundle = getCurrentTopologyComponentBundle(TopologyComponentBundle.TopologyComponentType.SINK, topologyData.getBundleIdToType().get(topologySink.getTopologyComponentBundleId().toString()));
topologySink.setTopologyComponentBundleId(bundle.getId());
if (bundle.getSubType().equals(NOTIFICATION)) {
updateNotifierJarFileName(topologySink);
}
addTopologySink(newTopology.getId(), topologySink);
oldToNewComponentIds.put(currentId, topologySink.getId());
}
// import edges
for (TopologyEdge topologyEdge : topologyData.getEdges()) {
List<StreamGrouping> streamGroupings = topologyEdge.getStreamGroupings();
for (StreamGrouping streamGrouping : streamGroupings) {
Long newStreamId = oldToNewStreamIds.get(streamGrouping.getStreamId());
streamGrouping.setStreamId(newStreamId);
}
topologyEdge.setId(null);
topologyEdge.setTopologyId(newTopology.getId());
topologyEdge.setFromId(oldToNewComponentIds.get(topologyEdge.getFromId()));
topologyEdge.setToId(oldToNewComponentIds.get(topologyEdge.getToId()));
addTopologyEdge(newTopology.getId(), topologyEdge);
}
// import topology editor metadata
TopologyEditorMetadata topologyEditorMetadata = topologyData.getTopologyEditorMetadata();
topologyEditorMetadata.setTopologyId(newTopology.getId());
if (topologyEditorMetadata.getData() != null) {
TopologyUIData topologyUIData = new ObjectMapper().readValue(topologyEditorMetadata.getData(), TopologyUIData.class);
topologyUIData.getSources().forEach(c -> c.setId(oldToNewComponentIds.get(c.getId())));
topologyUIData.getProcessors().forEach(c -> c.setId(oldToNewComponentIds.get(c.getId())));
topologyUIData.getSinks().forEach(c -> c.setId(oldToNewComponentIds.get(c.getId())));
topologyEditorMetadata.setData(new ObjectMapper().writeValueAsString(topologyUIData));
} else {
topologyEditorMetadata.setData(StringUtils.EMPTY);
}
addTopologyEditorMetadata(newTopology.getId(), topologyData.getTopologyEditorMetadata());
return newTopology;
}
use of com.hortonworks.streamline.streams.catalog.Topology in project streamline by hortonworks.
the class StreamCatalogService method getTopology.
public Topology getTopology(Long topologyId, Long versionId) {
Topology topology = new Topology();
topology.setId(topologyId);
topology.setVersionId(versionId);
Topology result = this.dao.get(topology.getStorableKey());
if (result != null) {
result.setVersionTimestamp(getVersionTimestamp(versionId));
}
return result;
}
use of com.hortonworks.streamline.streams.catalog.Topology in project streamline by hortonworks.
the class StreamCatalogService method removeOnlyTopologyEntity.
private Topology removeOnlyTopologyEntity(Long topologyId, Long versionId) {
Topology topologyForDelete = new Topology();
topologyForDelete.setId(topologyId);
topologyForDelete.setVersionId(versionId);
return dao.remove(topologyForDelete.getStorableKey());
}
use of com.hortonworks.streamline.streams.catalog.Topology in project streamline by hortonworks.
the class StreamCatalogService method importTopology.
public Topology importTopology(Long namespaceId, TopologyData topologyData) throws Exception {
Preconditions.checkNotNull(topologyData);
Topology newTopology = new Topology();
try {
newTopology.setName(topologyData.getTopologyName());
newTopology.setConfig(topologyData.getConfig());
newTopology.setNamespaceId(namespaceId);
addTopology(newTopology);
} catch (Exception ex) {
LOG.error("Got exception while importing the topology", ex);
throw ex;
}
try {
doImportTopology(newTopology, topologyData);
} catch (Exception ex) {
LOG.error("Got exception while importing the topology", ex);
removeTopology(newTopology.getId(), true);
throw ex;
}
return newTopology;
}
Aggregations