use of org.apache.samza.serializers.LongSerde in project samza by apache.
the class LogicalAggregateTranslator method translate.
void translate(final LogicalAggregate aggregate, final TranslatorContext context) {
validateAggregateFunctions(aggregate);
MessageStream<SamzaSqlRelMessage> inputStream = context.getMessageStream(aggregate.getInput().getId());
// At this point, the assumption is that only count function is supported.
SupplierFunction<Long> initialValue = () -> (long) 0;
FoldLeftFunction<SamzaSqlRelMessage, Long> foldCountFn = (m, c) -> c + 1;
final ArrayList<String> aggFieldNames = getAggFieldNames(aggregate);
MessageStream<SamzaSqlRelMessage> outputStream = inputStream.map(new TranslatorInputMetricsMapFunction(logicalOpId)).window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(context.getExecutionContext().getSamzaSqlApplicationConfig().getWindowDurationMs()), initialValue, foldCountFn, new SamzaSqlRelMessageSerdeFactory.SamzaSqlRelMessageSerde(), new LongSerde()).setAccumulationMode(AccumulationMode.DISCARDING), changeLogStorePrefix + "_tumblingWindow_" + logicalOpId).map(windowPane -> {
List<String> fieldNames = windowPane.getKey().getKey().getSamzaSqlRelRecord().getFieldNames();
List<Object> fieldValues = windowPane.getKey().getKey().getSamzaSqlRelRecord().getFieldValues();
fieldNames.add(aggFieldNames.get(0));
fieldValues.add(windowPane.getMessage());
return new SamzaSqlRelMessage(fieldNames, fieldValues, new SamzaSqlRelMsgMetadata(0L, 0L));
});
context.registerMessageStream(aggregate.getId(), outputStream);
outputStream.map(new TranslatorOutputMetricsMapFunction(logicalOpId));
}
use of org.apache.samza.serializers.LongSerde in project samza by apache.
the class TestTimeSeriesKeySerde method testLongTimeSeriesKey.
@Test
public void testLongTimeSeriesKey() {
TimeSeriesKey<Long> storeKey = new TimeSeriesKey<>(30L, 1, 23);
TimeSeriesKeySerde<Long> serde = new TimeSeriesKeySerde<>(new LongSerde());
byte[] serializedBytes = serde.toBytes(storeKey);
TimeSeriesKey<Long> deserializedTimeSeriesKey = serde.fromBytes(serializedBytes);
assertEquals(storeKey.getKey(), deserializedTimeSeriesKey.getKey());
assertEquals(storeKey.getSeqNum(), deserializedTimeSeriesKey.getSeqNum());
assertEquals(storeKey.getTimestamp(), deserializedTimeSeriesKey.getTimestamp());
assertEquals(storeKey, deserializedTimeSeriesKey);
}
use of org.apache.samza.serializers.LongSerde in project samza by apache.
the class TestJobGraphJsonGenerator method testRepartitionedWindowStreamApplication.
@Test
public void testRepartitionedWindowStreamApplication() throws Exception {
Map<String, String> configMap = new HashMap<>();
configMap.put(JobConfig.JOB_NAME, "test-app");
configMap.put(JobConfig.JOB_DEFAULT_SYSTEM, "test-system");
StreamTestUtils.addStreamConfigs(configMap, "PageView", "hdfs", "hdfs:/user/dummy/PageViewEvent");
StreamTestUtils.addStreamConfigs(configMap, "PageViewCount", "kafka", "PageViewCount");
Config config = new MapConfig(configMap);
// set up external partition count
Map<String, Integer> system1Map = new HashMap<>();
system1Map.put("hdfs:/user/dummy/PageViewEvent", 512);
Map<String, Integer> system2Map = new HashMap<>();
system2Map.put("PageViewCount", 16);
SystemAdmin systemAdmin1 = createSystemAdmin(system1Map);
SystemAdmin systemAdmin2 = createSystemAdmin(system2Map);
SystemAdmins systemAdmins = mock(SystemAdmins.class);
when(systemAdmins.getSystemAdmin("hdfs")).thenReturn(systemAdmin1);
when(systemAdmins.getSystemAdmin("kafka")).thenReturn(systemAdmin2);
StreamManager streamManager = new StreamManager(systemAdmins);
StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
KVSerde<String, PageViewEvent> pvSerde = KVSerde.of(new StringSerde(), new JsonSerdeV2<>(PageViewEvent.class));
GenericSystemDescriptor isd = new GenericSystemDescriptor("hdfs", "mockSystemFactoryClass");
GenericInputDescriptor<KV<String, PageViewEvent>> pageView = isd.getInputDescriptor("PageView", pvSerde);
KVSerde<String, Long> pvcSerde = KVSerde.of(new StringSerde(), new LongSerde());
GenericSystemDescriptor osd = new GenericSystemDescriptor("kafka", "mockSystemFactoryClass");
GenericOutputDescriptor<KV<String, Long>> pageViewCount = osd.getOutputDescriptor("PageViewCount", pvcSerde);
MessageStream<KV<String, PageViewEvent>> inputStream = appDesc.getInputStream(pageView);
OutputStream<KV<String, Long>> outputStream = appDesc.getOutputStream(pageViewCount);
inputStream.partitionBy(kv -> kv.getValue().getCountry(), kv -> kv.getValue(), pvSerde, "keyed-by-country").window(Windows.keyedTumblingWindow(kv -> kv.getValue().getCountry(), Duration.ofSeconds(10L), () -> 0L, (m, c) -> c + 1L, new StringSerde(), new LongSerde()), "count-by-country").map(pane -> new KV<>(pane.getKey().getKey(), pane.getMessage())).sendTo(outputStream);
}, config);
ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
ExecutionPlan plan = planner.plan(graphSpec);
String json = plan.getPlanAsJson();
System.out.println(json);
// deserialize
ObjectMapper mapper = new ObjectMapper();
JobGraphJsonGenerator.JobGraphJson nodes = mapper.readValue(json, JobGraphJsonGenerator.JobGraphJson.class);
JobGraphJsonGenerator.OperatorGraphJson operatorGraphJson = nodes.jobs.get(0).operatorGraph;
assertEquals(2, operatorGraphJson.inputStreams.size());
assertEquals(4, operatorGraphJson.operators.size());
assertEquals(1, nodes.sourceStreams.size());
assertEquals(1, nodes.sinkStreams.size());
assertEquals(1, nodes.intermediateStreams.size());
// verify partitionBy op output to the intermdiate stream of the same id
assertEquals(operatorGraphJson.operators.get("test-app-1-partition_by-keyed-by-country").get("outputStreamId"), "test-app-1-partition_by-keyed-by-country");
assertEquals(operatorGraphJson.operators.get("test-app-1-send_to-5").get("outputStreamId"), "PageViewCount");
}
Aggregations