use of com.hazelcast.function.SupplierEx in project hazelcast by hazelcast.
the class AggregateAbstractPhysicalRule method aggregateOperation.
protected static AggregateOperation<?, JetSqlRow> aggregateOperation(RelDataType inputType, ImmutableBitSet groupSet, List<AggregateCall> aggregateCalls) {
List<QueryDataType> operandTypes = OptUtils.schema(inputType).getTypes();
List<SupplierEx<SqlAggregation>> aggregationProviders = new ArrayList<>();
List<FunctionEx<JetSqlRow, Object>> valueProviders = new ArrayList<>();
for (Integer groupIndex : groupSet.toList()) {
aggregationProviders.add(ValueSqlAggregation::new);
// getMaybeSerialized is safe for ValueAggr because it only passes the value on
valueProviders.add(new RowGetMaybeSerializedFn(groupIndex));
}
for (AggregateCall aggregateCall : aggregateCalls) {
boolean distinct = aggregateCall.isDistinct();
List<Integer> aggregateCallArguments = aggregateCall.getArgList();
SqlKind kind = aggregateCall.getAggregation().getKind();
switch(kind) {
case COUNT:
if (distinct) {
int countIndex = aggregateCallArguments.get(0);
aggregationProviders.add(new AggregateCountSupplier(true, true));
// getMaybeSerialized is safe for COUNT because the aggregation only looks whether it is null or not
valueProviders.add(new RowGetMaybeSerializedFn(countIndex));
} else if (aggregateCallArguments.size() == 1) {
int countIndex = aggregateCallArguments.get(0);
aggregationProviders.add(new AggregateCountSupplier(true, false));
valueProviders.add(new RowGetMaybeSerializedFn(countIndex));
} else {
aggregationProviders.add(new AggregateCountSupplier(false, false));
valueProviders.add(NullFunction.INSTANCE);
}
break;
case MIN:
int minIndex = aggregateCallArguments.get(0);
aggregationProviders.add(MinSqlAggregation::new);
valueProviders.add(new RowGetFn(minIndex));
break;
case MAX:
int maxIndex = aggregateCallArguments.get(0);
aggregationProviders.add(MaxSqlAggregation::new);
valueProviders.add(new RowGetFn(maxIndex));
break;
case SUM:
int sumIndex = aggregateCallArguments.get(0);
QueryDataType sumOperandType = operandTypes.get(sumIndex);
aggregationProviders.add(new AggregateSumSupplier(distinct, sumOperandType));
valueProviders.add(new RowGetFn(sumIndex));
break;
case AVG:
int avgIndex = aggregateCallArguments.get(0);
QueryDataType avgOperandType = operandTypes.get(avgIndex);
aggregationProviders.add(new AggregateAvgSupplier(distinct, avgOperandType));
valueProviders.add(new RowGetFn(avgIndex));
break;
default:
throw QueryException.error("Unsupported aggregation function: " + kind);
}
}
return AggregateOperation.withCreate(new AggregateCreateSupplier(aggregationProviders)).andAccumulate(new AggregateAccumulateFunction(valueProviders)).andCombine(AggregateCombineFunction.INSTANCE).andExportFinish(AggregateExportFinishFunction.INSTANCE);
}
use of com.hazelcast.function.SupplierEx in project hazelcast by hazelcast.
the class JmsSourceBuilder method build.
/**
* Creates and returns the JMS {@link StreamSource} with the supplied
* components and the projection function {@code projectionFn}.
* <p>
* The given function must be stateless.
*
* @param projectionFn the function which creates output object from each
* message
* @param <T> the type of the items the source emits
*/
@Nonnull
public <T> StreamSource<T> build(@Nonnull FunctionEx<? super Message, ? extends T> projectionFn) {
String usernameLocal = username;
String passwordLocal = password;
String destinationLocal = destinationName;
ProcessingGuarantee maxGuaranteeLocal = maxGuarantee;
@SuppressWarnings("UnnecessaryLocalVariable") boolean isTopicLocal = isTopic;
if (connectionFn == null) {
connectionFn = factory -> requireNonNull(usernameLocal != null || passwordLocal != null ? factory.createConnection(usernameLocal, passwordLocal) : factory.createConnection());
}
if (consumerFn == null) {
checkNotNull(destinationLocal, "neither consumerFn nor destinationName set");
consumerFn = session -> session.createConsumer(isTopicLocal ? session.createTopic(destinationLocal) : session.createQueue(destinationLocal));
if (isTopic) {
// the user didn't specify a custom consumerFn and we know we're using a non-durable consumer
// for a topic - there's no point in using any guarantee, see `maxGuarantee`
maxGuaranteeLocal = ProcessingGuarantee.NONE;
}
}
ProcessingGuarantee maxGuaranteeFinal = maxGuaranteeLocal;
FunctionEx<? super ConnectionFactory, ? extends Connection> connectionFnLocal = connectionFn;
@SuppressWarnings("UnnecessaryLocalVariable") SupplierEx<? extends ConnectionFactory> factorySupplierLocal = factorySupplier;
SupplierEx<? extends Connection> newConnectionFn = () -> connectionFnLocal.apply(factorySupplierLocal.get());
FunctionEx<? super Session, ? extends MessageConsumer> consumerFnLocal = consumerFn;
boolean isSharedConsumerLocal = isSharedConsumer;
FunctionEx<? super Message, ?> messageIdFnLocal = messageIdFn;
FunctionEx<EventTimePolicy<? super T>, ProcessorMetaSupplier> metaSupplierFactory = policy -> isTopicLocal ? streamJmsTopicP(destinationLocal, isSharedConsumerLocal, maxGuaranteeFinal, policy, newConnectionFn, consumerFnLocal, messageIdFnLocal, projectionFn) : streamJmsQueueP(destinationLocal, maxGuaranteeFinal, policy, newConnectionFn, consumerFnLocal, messageIdFnLocal, projectionFn);
return Sources.streamFromProcessorWithWatermarks(sourceName(), true, metaSupplierFactory);
}
use of com.hazelcast.function.SupplierEx in project hazelcast by hazelcast.
the class SlidingWindowP_CoGroupTest method test.
@Test
@SuppressWarnings("unchecked")
public void test() {
SupplierEx supplier = Processors.aggregateToSlidingWindowP(asList(Functions.<String>entryKey(), entryKey()), asList(t -> 1L, t -> 1L), TimestampKind.FRAME, tumblingWinPolicy(1), 0L, aggregateOperation2(AggregateOperations.<Entry<String, String>>toList(), AggregateOperations.<Entry<String, String>>toList()), (start, end, key, result, isEarly) -> result(end, key, result.f0(), result.f1()));
Entry<String, String> entry1 = entry("k1", "a");
Entry<String, String> entry2 = entry("k2", "b");
Entry<String, String> entry3 = entry("k1", "c");
Entry<String, String> entry4 = entry("k3", "d");
Entry<String, String> entry5 = entry("k1", "e");
TestSupport.verifyProcessor(supplier).inputs(asList(asList(entry1, entry2), asList(entry3, entry4, entry5))).expectOutput(asList(result(1, "k1", singletonList(entry1), asList(entry3, entry5)), result(1, "k2", singletonList(entry2), emptyList()), result(1, "k3", emptyList(), singletonList(entry4))));
}
use of com.hazelcast.function.SupplierEx in project hazelcast by hazelcast.
the class ManagedContextTest method testSources.
private void testSources(SupplierEx<? extends AnotherSourceContext> sourceSupplier) {
BatchSource<String> src = SourceBuilder.batch("source", c -> sourceSupplier.get()).<String>fillBufferFn((c, b) -> {
b.add(c.injectedValue);
b.close();
}).build();
Pipeline pipeline = Pipeline.create();
pipeline.readFrom(src).writeTo(assertAnyOrder(singletonList(INJECTED_VALUE)));
hz.getJet().newJob(pipeline).join();
}
use of com.hazelcast.function.SupplierEx in project hazelcast by hazelcast.
the class SinkStressTestUtil method test_withRestarts.
public static void test_withRestarts(@Nonnull HazelcastInstance instance, @Nonnull ILogger logger, @Nonnull Sink<Integer> sink, boolean graceful, boolean exactlyOnce, @Nonnull SupplierEx<List<Integer>> actualItemsSupplier) {
int numItems = 1000;
Pipeline p = Pipeline.create();
p.readFrom(SourceBuilder.stream("src", procCtx -> new int[] { procCtx.globalProcessorIndex() == 0 ? 0 : Integer.MAX_VALUE }).<Integer>fillBufferFn((ctx, buf) -> {
if (ctx[0] < numItems) {
buf.add(ctx[0]++);
sleepMillis(5);
}
}).distributed(1).createSnapshotFn(ctx -> ctx[0] < Integer.MAX_VALUE ? ctx[0] : null).restoreSnapshotFn((ctx, state) -> ctx[0] = ctx[0] != Integer.MAX_VALUE ? state.get(0) : Integer.MAX_VALUE).build()).withoutTimestamps().peek().writeTo(sink);
JobConfig config = new JobConfig().setProcessingGuarantee(exactlyOnce ? EXACTLY_ONCE : AT_LEAST_ONCE).setSnapshotIntervalMillis(50);
JobProxy job = (JobProxy) instance.getJet().newJob(p, config);
long endTime = System.nanoTime() + SECONDS.toNanos(TEST_TIMEOUT_SECONDS);
int lastCount = 0;
String expectedRows = IntStream.range(0, numItems).mapToObj(i -> i + (exactlyOnce ? "=1" : "")).collect(joining("\n"));
// We'll restart once, then restart again after a short sleep (possibly during initialization),
// and then assert some output so that the test isn't constantly restarting without any progress
Long lastExecutionId = null;
for (; ; ) {
lastExecutionId = assertJobRunningEventually(instance, job, lastExecutionId);
job.restart(graceful);
lastExecutionId = assertJobRunningEventually(instance, job, lastExecutionId);
sleepMillis(ThreadLocalRandom.current().nextInt(400));
job.restart(graceful);
try {
List<Integer> actualItems;
Set<Integer> distinctActualItems;
do {
actualItems = actualItemsSupplier.get();
distinctActualItems = new HashSet<>(actualItems);
} while (distinctActualItems.size() < Math.min(numItems, 100 + lastCount) && System.nanoTime() < endTime);
lastCount = distinctActualItems.size();
logger.info("number of committed items in the sink so far: " + lastCount);
if (exactlyOnce) {
String actualItemsStr = actualItems.stream().collect(groupingBy(identity(), TreeMap::new, counting())).entrySet().stream().map(Object::toString).collect(joining("\n"));
assertEquals(expectedRows, actualItemsStr);
} else {
assertEquals(expectedRows, distinctActualItems.stream().map(Objects::toString).collect(joining("\n")));
}
// if content matches, break the loop. Otherwise restart and try again
break;
} catch (AssertionError e) {
if (System.nanoTime() >= endTime) {
throw e;
}
}
}
}
Aggregations