use of org.finos.waltz.schema.tables.PhysicalFlow.PHYSICAL_FLOW in project waltz by khartec.
the class PhysicalFlowParticipantGenerator method create.
@Override
public Map<String, Integer> create(ApplicationContext ctx) {
DSLContext dsl = getDsl(ctx);
log("---creating demo records");
Map<Criticality, Integer> criticalityCompletionProbabilities = newHashMap(Criticality.VERY_HIGH, 80, Criticality.HIGH, 70, Criticality.MEDIUM, 50, Criticality.LOW, 30, Criticality.NONE, 10, Criticality.UNKNOWN, 10);
Map<Long, List<Long>> serverIdsByAppId = dsl.select(SERVER_USAGE.ENTITY_ID, SERVER_USAGE.SERVER_ID).from(SERVER_USAGE).where(SERVER_USAGE.ENTITY_KIND.eq(EntityKind.APPLICATION.name())).fetch().intoGroups(SERVER_USAGE.ENTITY_ID, SERVER_USAGE.SERVER_ID);
Collection<Long> allServerIds = SetUtilities.unionAll(serverIdsByAppId.values());
List<PhysicalFlowParticipantRecord> records = dsl.select(PHYSICAL_FLOW.ID, PHYSICAL_FLOW.CRITICALITY, LOGICAL_FLOW.SOURCE_ENTITY_ID, LOGICAL_FLOW.TARGET_ENTITY_ID).from(PHYSICAL_FLOW).innerJoin(LOGICAL_FLOW).on(LOGICAL_FLOW.ID.eq(PHYSICAL_FLOW.LOGICAL_FLOW_ID)).where(PHYSICAL_FLOW.IS_REMOVED.isFalse()).and(LOGICAL_FLOW.ENTITY_LIFECYCLE_STATUS.ne(EntityLifecycleStatus.REMOVED.name())).fetch().stream().map(r -> tuple(r.get(PHYSICAL_FLOW.ID), Criticality.parse(r.get(PHYSICAL_FLOW.CRITICALITY), x -> Criticality.UNKNOWN), r.get(LOGICAL_FLOW.SOURCE_ENTITY_ID), r.get(LOGICAL_FLOW.TARGET_ENTITY_ID))).filter(// filter based on criticality probability
t -> criticalityCompletionProbabilities.get(t.v2) > rnd.nextInt(100)).flatMap(t -> // flat map to tuples of (flow_id, source_id/target_id, server_ids, p_kind)
Stream.of(tuple(t.v1, t.v3, serverIdsByAppId.getOrDefault(t.v3, emptyList()), ParticipationKind.SOURCE), tuple(t.v1, t.v4, serverIdsByAppId.getOrDefault(t.v4, emptyList()), ParticipationKind.TARGET))).filter(// no servers therefore filter
t -> !t.v3.isEmpty()).filter(// even if we have servers some may not be mapped
t -> rnd.nextInt(100) < 80).map(t -> t.map3(associatedServerIds -> randomPick(rnd.nextInt(100) < 90 ? // most of the time we'll go with associated servers
associatedServerIds : // ... but occasionally we'll go with anything to simulate messy data
allServerIds))).map(t -> {
PhysicalFlowParticipantRecord r = new PhysicalFlowParticipantRecord();
r.setPhysicalFlowId(t.v1);
r.setParticipantEntityId(t.v3);
r.setParticipantEntityKind(EntityKind.SERVER.name());
r.setKind(t.v4.name());
r.setDescription("Test data");
r.setLastUpdatedAt(nowUtcTimestamp());
r.setLastUpdatedBy("admin");
r.setProvenance(SAMPLE_DATA_PROVENANCE);
return r;
}).collect(toList());
log("About to insert %d records", records.size());
int[] rcs = dsl.batchInsert(records).execute();
log("Inserted %d records", rcs.length);
return null;
}
Aggregations