use of org.apache.samza.operators.OutputStream in project samza by apache.
the class TestJobGraphJsonGenerator method testRepartitionedJoinStreamApplication.
@Test
public void testRepartitionedJoinStreamApplication() throws Exception {
/**
* the graph looks like the following.
* number in parentheses () indicates number of stream partitions.
* number in parentheses in quotes ("") indicates expected partition count.
* number in square brackets [] indicates operator ID.
*
* input3 (32) -> filter [7] -> partitionBy [8] ("64") -> map [10] -> join [14] -> sendTo(output2) [15] (16)
* |
* input2 (16) -> partitionBy [3] ("64") -> filter [5] -| -> sink [13]
* |
* input1 (64) -> map [1] -> join [11] -> sendTo(output1) [12] (8)
*/
Map<String, String> configMap = new HashMap<>();
configMap.put(JobConfig.JOB_NAME, "test-app");
configMap.put(JobConfig.JOB_DEFAULT_SYSTEM, "test-system");
StreamTestUtils.addStreamConfigs(configMap, "input1", "system1", "input1");
StreamTestUtils.addStreamConfigs(configMap, "input2", "system2", "input2");
StreamTestUtils.addStreamConfigs(configMap, "input3", "system2", "input3");
StreamTestUtils.addStreamConfigs(configMap, "output1", "system1", "output1");
StreamTestUtils.addStreamConfigs(configMap, "output2", "system2", "output2");
Config config = new MapConfig(configMap);
// set up external partition count
Map<String, Integer> system1Map = new HashMap<>();
system1Map.put("input1", 64);
system1Map.put("output1", 8);
Map<String, Integer> system2Map = new HashMap<>();
system2Map.put("input2", 16);
system2Map.put("input3", 32);
system2Map.put("output2", 16);
SystemAdmin systemAdmin1 = createSystemAdmin(system1Map);
SystemAdmin systemAdmin2 = createSystemAdmin(system2Map);
SystemAdmins systemAdmins = mock(SystemAdmins.class);
when(systemAdmins.getSystemAdmin("system1")).thenReturn(systemAdmin1);
when(systemAdmins.getSystemAdmin("system2")).thenReturn(systemAdmin2);
StreamManager streamManager = new StreamManager(systemAdmins);
StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
KVSerde<Object, Object> kvSerde = new KVSerde<>(new NoOpSerde(), new NoOpSerde());
String mockSystemFactoryClass = "factory.class.name";
GenericSystemDescriptor system1 = new GenericSystemDescriptor("system1", mockSystemFactoryClass);
GenericSystemDescriptor system2 = new GenericSystemDescriptor("system2", mockSystemFactoryClass);
GenericInputDescriptor<KV<Object, Object>> input1Descriptor = system1.getInputDescriptor("input1", kvSerde);
GenericInputDescriptor<KV<Object, Object>> input2Descriptor = system2.getInputDescriptor("input2", kvSerde);
GenericInputDescriptor<KV<Object, Object>> input3Descriptor = system2.getInputDescriptor("input3", kvSerde);
GenericOutputDescriptor<KV<Object, Object>> output1Descriptor = system1.getOutputDescriptor("output1", kvSerde);
GenericOutputDescriptor<KV<Object, Object>> output2Descriptor = system2.getOutputDescriptor("output2", kvSerde);
MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor).map(m -> m);
MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor).partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1").filter(m -> true);
MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor).filter(m -> true).partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2").map(m -> m);
OutputStream<KV<Object, Object>> outputStream1 = appDesc.getOutputStream(output1Descriptor);
OutputStream<KV<Object, Object>> outputStream2 = appDesc.getOutputStream(output2Descriptor);
messageStream1.join(messageStream2, (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1").sendTo(outputStream1);
messageStream2.sink((message, collector, coordinator) -> {
});
messageStream3.join(messageStream2, (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2").sendTo(outputStream2);
}, config);
ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
ExecutionPlan plan = planner.plan(graphSpec);
String json = plan.getPlanAsJson();
System.out.println(json);
// deserialize
ObjectMapper mapper = new ObjectMapper();
JobGraphJsonGenerator.JobGraphJson nodes = mapper.readValue(json, JobGraphJsonGenerator.JobGraphJson.class);
assertEquals(5, nodes.jobs.get(0).operatorGraph.inputStreams.size());
assertEquals(11, nodes.jobs.get(0).operatorGraph.operators.size());
assertEquals(3, nodes.sourceStreams.size());
assertEquals(2, nodes.sinkStreams.size());
assertEquals(2, nodes.intermediateStreams.size());
}
use of org.apache.samza.operators.OutputStream in project samza by apache.
the class TestExecutionPlanner method testMaxPartitionLimit.
@Test
public void testMaxPartitionLimit() {
int partitionLimit = IntermediateStreamManager.MAX_INFERRED_PARTITIONS;
ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input4Descriptor);
OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
input1.partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1").map(kv -> kv).sendTo(output1);
}, config);
JobGraph jobGraph = (JobGraph) planner.plan(graphSpec);
// Partitions should be the same as input1
jobGraph.getIntermediateStreams().forEach(edge -> {
// max of input1 and output1
assertEquals(partitionLimit, edge.getPartitionCount());
});
}
Aggregations