use of java.util.stream.LongStream in project jdk8u_jdk by JetBrains.
the class StreamSpliteratorTest method testLongSplitting.
//
public void testLongSplitting() {
List<Consumer<LongStream>> terminalOps = Arrays.asList(s -> s.toArray(), s -> s.forEach(e -> {
}), s -> s.reduce(Long::sum));
List<UnaryOperator<LongStream>> intermediateOps = Arrays.asList(s -> s.parallel(), // The following ensures the wrapping spliterator is tested
s -> s.map(i -> i).parallel());
for (int i = 0; i < terminalOps.size(); i++) {
Consumer<LongStream> terminalOp = terminalOps.get(i);
setContext("termOpIndex", i);
for (int j = 0; j < intermediateOps.size(); j++) {
setContext("intOpIndex", j);
UnaryOperator<LongStream> intermediateOp = intermediateOps.get(j);
for (boolean proxyEstimateSize : new boolean[] { false, true }) {
setContext("proxyEstimateSize", proxyEstimateSize);
// Size is assumed to be larger than the target size for no splitting
// @@@ Need way to obtain the target size
Spliterator.OfLong sp = intermediateOp.apply(LongStream.range(0, 1000)).spliterator();
ProxyNoExactSizeSpliterator.OfLong psp = new ProxyNoExactSizeSpliterator.OfLong(sp, proxyEstimateSize);
LongStream s = StreamSupport.longStream(psp, true);
terminalOp.accept(s);
Assert.assertTrue(psp.splits > 0, String.format("Number of splits should be greater that zero when proxyEstimateSize is %s", proxyEstimateSize));
Assert.assertTrue(psp.prefixSplits > 0, String.format("Number of non-null prefix splits should be greater that zero when proxyEstimateSize is %s", proxyEstimateSize));
Assert.assertTrue(psp.sizeOnTraversal < 1000, String.format("Size on traversal of last split should be less than the size of the list, %d, when proxyEstimateSize is %s", 1000, proxyEstimateSize));
}
}
}
}
use of java.util.stream.LongStream in project jdk8u_jdk by JetBrains.
the class CountTest method testOps.
@Test(dataProvider = "LongStreamTestData", dataProviderClass = LongStreamTestDataProvider.class)
public void testOps(String name, TestData.OfLong data) {
AtomicLong expectedCount = new AtomicLong();
data.stream().forEach(e -> expectedCount.incrementAndGet());
withData(data).terminal(LongStream::count).expectedResult(expectedCount.get()).exercise();
}
use of java.util.stream.LongStream in project incubator-systemml by apache.
the class RandSPInstruction method generateRandData.
private void generateRandData(SparkExecutionContext sec) throws DMLRuntimeException {
//step 1: generate pseudo-random seed (because not specified)
//seed per invocation
long lSeed = seed;
if (lSeed == DataGenOp.UNSPECIFIED_SEED)
lSeed = DataGenOp.generateRandomSeed();
if (LOG.isTraceEnabled())
LOG.trace("Process RandSPInstruction rand with seed = " + lSeed + ".");
//step 2: potential in-memory rand operations if applicable
if (isMemAvail(rows, cols, sparsity, minValue, maxValue) && DMLScript.rtplatform != RUNTIME_PLATFORM.SPARK) {
RandomMatrixGenerator rgen = LibMatrixDatagen.createRandomMatrixGenerator(pdf, (int) rows, (int) cols, rowsInBlock, colsInBlock, sparsity, minValue, maxValue, pdfParams);
MatrixBlock mb = MatrixBlock.randOperations(rgen, lSeed);
sec.setMatrixOutput(output.getName(), mb);
Statistics.decrementNoOfExecutedSPInst();
return;
}
//step 3: seed generation
JavaPairRDD<MatrixIndexes, Tuple2<Long, Long>> seedsRDD = null;
Well1024a bigrand = LibMatrixDatagen.setupSeedsForRand(lSeed);
LongStream nnz = LibMatrixDatagen.computeNNZperBlock(rows, cols, rowsInBlock, colsInBlock, sparsity);
PrimitiveIterator.OfLong nnzIter = nnz.iterator();
double totalSize = OptimizerUtils.estimatePartitionedSizeExactSparsity(rows, cols, rowsInBlock, colsInBlock, //overestimate for on disk, ensures hdfs block per partition
rows * cols * sparsity);
double hdfsBlkSize = InfrastructureAnalyzer.getHDFSBlockSize();
long numBlocks = new MatrixCharacteristics(rows, cols, rowsInBlock, colsInBlock).getNumBlocks();
long numColBlocks = (long) Math.ceil((double) cols / (double) colsInBlock);
//a) in-memory seed rdd construction
if (numBlocks < INMEMORY_NUMBLOCKS_THRESHOLD) {
ArrayList<Tuple2<MatrixIndexes, Tuple2<Long, Long>>> seeds = new ArrayList<Tuple2<MatrixIndexes, Tuple2<Long, Long>>>();
for (long i = 0; i < numBlocks; i++) {
long r = 1 + i / numColBlocks;
long c = 1 + i % numColBlocks;
MatrixIndexes indx = new MatrixIndexes(r, c);
Long seedForBlock = bigrand.nextLong();
seeds.add(new Tuple2<MatrixIndexes, Tuple2<Long, Long>>(indx, new Tuple2<Long, Long>(seedForBlock, nnzIter.nextLong())));
}
//for load balancing: degree of parallelism such that ~128MB per partition
int numPartitions = (int) Math.max(Math.min(totalSize / hdfsBlkSize, numBlocks), 1);
//create seeds rdd
seedsRDD = sec.getSparkContext().parallelizePairs(seeds, numPartitions);
} else //b) file-based seed rdd construction (for robustness wrt large number of blocks)
{
Path path = new Path(LibMatrixDatagen.generateUniqueSeedPath(dir));
PrintWriter pw = null;
try {
FileSystem fs = IOUtilFunctions.getFileSystem(path);
pw = new PrintWriter(fs.create(path));
StringBuilder sb = new StringBuilder();
for (long i = 0; i < numBlocks; i++) {
sb.append(1 + i / numColBlocks);
sb.append(',');
sb.append(1 + i % numColBlocks);
sb.append(',');
sb.append(bigrand.nextLong());
sb.append(',');
sb.append(nnzIter.nextLong());
pw.println(sb.toString());
sb.setLength(0);
}
} catch (IOException ex) {
throw new DMLRuntimeException(ex);
} finally {
IOUtilFunctions.closeSilently(pw);
}
//for load balancing: degree of parallelism such that ~128MB per partition
int numPartitions = (int) Math.max(Math.min(totalSize / hdfsBlkSize, numBlocks), 1);
//create seeds rdd
seedsRDD = sec.getSparkContext().textFile(path.toString(), numPartitions).mapToPair(new ExtractSeedTuple());
}
//step 4: execute rand instruction over seed input
JavaPairRDD<MatrixIndexes, MatrixBlock> out = seedsRDD.mapToPair(new GenerateRandomBlock(rows, cols, rowsInBlock, colsInBlock, sparsity, minValue, maxValue, pdf, pdfParams));
//step 5: output handling
MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(output.getName());
if (!mcOut.dimsKnown(true)) {
//note: we cannot compute the nnz from sparsity because this would not reflect the
//actual number of non-zeros, except for extreme values of sparsity equals 0 or 1.
long lnnz = (sparsity == 0 || sparsity == 1) ? (long) (sparsity * rows * cols) : -1;
mcOut.set(rows, cols, rowsInBlock, colsInBlock, lnnz);
}
sec.setRDDHandleForVariable(output.getName(), out);
}
use of java.util.stream.LongStream in project caffeine by ben-manes.
the class Simulator method broadcast.
/** Broadcast the trace events to all of the policy actors. */
private void broadcast() throws IOException {
try (LongStream events = eventStream()) {
LongArrayList batch = new LongArrayList(batchSize);
for (PrimitiveIterator.OfLong i = events.iterator(); i.hasNext(); ) {
batch.add(i.nextLong());
if (batch.size() == batchSize) {
router.route(batch, getSelf());
batch = new LongArrayList(batchSize);
}
}
router.route(batch, getSelf());
router.route(Message.FINISH, getSelf());
} catch (Exception e) {
router.route(Message.ERROR, getSelf());
context().system().log().error(e, "");
getContext().stop(getSelf());
}
}
use of java.util.stream.LongStream in project caffeine by ben-manes.
the class BinaryTraceReader method events.
@Override
public LongStream events() throws IOException {
DataInputStream input = new DataInputStream(new BufferedInputStream(readFiles()));
LongStream stream = StreamSupport.longStream(Spliterators.spliteratorUnknownSize(new TraceIterator(input), Spliterator.ORDERED), /* parallel */
false);
return stream.onClose(() -> Closeables.closeQuietly(input));
}
Aggregations