use of java.util.Spliterator in project jdk8u_jdk by JetBrains.
the class StreamSpliteratorTest method testLongSplitting.
//
public void testLongSplitting() {
List<Consumer<LongStream>> terminalOps = Arrays.asList(s -> s.toArray(), s -> s.forEach(e -> {
}), s -> s.reduce(Long::sum));
List<UnaryOperator<LongStream>> intermediateOps = Arrays.asList(s -> s.parallel(), // The following ensures the wrapping spliterator is tested
s -> s.map(i -> i).parallel());
for (int i = 0; i < terminalOps.size(); i++) {
Consumer<LongStream> terminalOp = terminalOps.get(i);
setContext("termOpIndex", i);
for (int j = 0; j < intermediateOps.size(); j++) {
setContext("intOpIndex", j);
UnaryOperator<LongStream> intermediateOp = intermediateOps.get(j);
for (boolean proxyEstimateSize : new boolean[] { false, true }) {
setContext("proxyEstimateSize", proxyEstimateSize);
// Size is assumed to be larger than the target size for no splitting
// @@@ Need way to obtain the target size
Spliterator.OfLong sp = intermediateOp.apply(LongStream.range(0, 1000)).spliterator();
ProxyNoExactSizeSpliterator.OfLong psp = new ProxyNoExactSizeSpliterator.OfLong(sp, proxyEstimateSize);
LongStream s = StreamSupport.longStream(psp, true);
terminalOp.accept(s);
Assert.assertTrue(psp.splits > 0, String.format("Number of splits should be greater that zero when proxyEstimateSize is %s", proxyEstimateSize));
Assert.assertTrue(psp.prefixSplits > 0, String.format("Number of non-null prefix splits should be greater that zero when proxyEstimateSize is %s", proxyEstimateSize));
Assert.assertTrue(psp.sizeOnTraversal < 1000, String.format("Size on traversal of last split should be less than the size of the list, %d, when proxyEstimateSize is %s", 1000, proxyEstimateSize));
}
}
}
}
use of java.util.Spliterator in project jdk8u_jdk by JetBrains.
the class DoublePipeline method forEachWithCancel.
@Override
final void forEachWithCancel(Spliterator<Double> spliterator, Sink<Double> sink) {
Spliterator.OfDouble spl = adapt(spliterator);
DoubleConsumer adaptedSink = adapt(sink);
do {
} while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
}
use of java.util.Spliterator in project jdk8u_jdk by JetBrains.
the class LongPipeline method forEachWithCancel.
@Override
final void forEachWithCancel(Spliterator<Long> spliterator, Sink<Long> sink) {
Spliterator.OfLong spl = adapt(spliterator);
LongConsumer adaptedSink = adapt(sink);
do {
} while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
}
use of java.util.Spliterator in project j2objc by google.
the class LongPipeline method forEachWithCancel.
@Override
public final void forEachWithCancel(Spliterator<Long> spliterator, Sink<Long> sink) {
Spliterator.OfLong spl = adapt(spliterator);
LongConsumer adaptedSink = adapt(sink);
do {
} while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
}
use of java.util.Spliterator in project jdk8u_jdk by JetBrains.
the class DistinctOps method makeRef.
/**
* Appends a "distinct" operation to the provided stream, and returns the
* new stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
* @return the new stream
*/
static <T> ReferencePipeline<T, T> makeRef(AbstractPipeline<?, T, ?> upstream) {
return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE, StreamOpFlag.IS_DISTINCT | StreamOpFlag.NOT_SIZED) {
<P_IN> Node<T> reduce(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
// If the stream is SORTED then it should also be ORDERED so the following will also
// preserve the sort order
TerminalOp<T, LinkedHashSet<T>> reduceOp = ReduceOps.<T, LinkedHashSet<T>>makeRef(LinkedHashSet::new, LinkedHashSet::add, LinkedHashSet::addAll);
return Nodes.node(reduceOp.evaluateParallel(helper, spliterator));
}
@Override
<P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper, Spliterator<P_IN> spliterator, IntFunction<T[]> generator) {
if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
// No-op
return helper.evaluate(spliterator, false, generator);
} else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
return reduce(helper, spliterator);
} else {
// Holder of null state since ConcurrentHashMap does not support null values
AtomicBoolean seenNull = new AtomicBoolean(false);
ConcurrentHashMap<T, Boolean> map = new ConcurrentHashMap<>();
TerminalOp<T, Void> forEachOp = ForEachOps.makeRef(t -> {
if (t == null)
seenNull.set(true);
else
map.putIfAbsent(t, Boolean.TRUE);
}, false);
forEachOp.evaluateParallel(helper, spliterator);
// If null has been seen then copy the key set into a HashSet that supports null values
// and add null
Set<T> keys = map.keySet();
if (seenNull.get()) {
// TODO Implement a more efficient set-union view, rather than copying
keys = new HashSet<>(keys);
keys.add(null);
}
return Nodes.node(keys);
}
}
@Override
<P_IN> Spliterator<T> opEvaluateParallelLazy(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
// No-op
return helper.wrapSpliterator(spliterator);
} else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
// Not lazy, barrier required to preserve order
return reduce(helper, spliterator).spliterator();
} else {
// Lazy
return new StreamSpliterators.DistinctSpliterator<>(helper.wrapSpliterator(spliterator));
}
}
@Override
Sink<T> opWrapSink(int flags, Sink<T> sink) {
Objects.requireNonNull(sink);
if (StreamOpFlag.DISTINCT.isKnown(flags)) {
return sink;
} else if (StreamOpFlag.SORTED.isKnown(flags)) {
return new Sink.ChainedReference<T, T>(sink) {
boolean seenNull;
T lastSeen;
@Override
public void begin(long size) {
seenNull = false;
lastSeen = null;
downstream.begin(-1);
}
@Override
public void end() {
seenNull = false;
lastSeen = null;
downstream.end();
}
@Override
public void accept(T t) {
if (t == null) {
if (!seenNull) {
seenNull = true;
downstream.accept(lastSeen = null);
}
} else if (lastSeen == null || !t.equals(lastSeen)) {
downstream.accept(lastSeen = t);
}
}
};
} else {
return new Sink.ChainedReference<T, T>(sink) {
Set<T> seen;
@Override
public void begin(long size) {
seen = new HashSet<>();
downstream.begin(-1);
}
@Override
public void end() {
seen = null;
downstream.end();
}
@Override
public void accept(T t) {
if (!seen.contains(t)) {
seen.add(t);
downstream.accept(t);
}
}
};
}
}
};
}
Aggregations