Search in sources :

Example 6 with IntFunction

use of java.util.function.IntFunction in project lucene-solr by apache.

the class BKDWriter method build.

/* Recursively reorders the provided reader and writes the bkd-tree on the fly; this method is used
   * when we are writing a new segment directly from IndexWriter's indexing buffer (MutablePointsReader). */
private void build(int nodeID, int leafNodeOffset, MutablePointValues reader, int from, int to, IndexOutput out, byte[] minPackedValue, byte[] maxPackedValue, int[] parentSplits, byte[] splitPackedValues, long[] leafBlockFPs, int[] spareDocIds) throws IOException {
    if (nodeID >= leafNodeOffset) {
        // leaf node
        final int count = to - from;
        assert count <= maxPointsInLeafNode;
        // Compute common prefixes
        Arrays.fill(commonPrefixLengths, bytesPerDim);
        reader.getValue(from, scratchBytesRef1);
        for (int i = from + 1; i < to; ++i) {
            reader.getValue(i, scratchBytesRef2);
            for (int dim = 0; dim < numDims; dim++) {
                final int offset = dim * bytesPerDim;
                for (int j = 0; j < commonPrefixLengths[dim]; j++) {
                    if (scratchBytesRef1.bytes[scratchBytesRef1.offset + offset + j] != scratchBytesRef2.bytes[scratchBytesRef2.offset + offset + j]) {
                        commonPrefixLengths[dim] = j;
                        break;
                    }
                }
            }
        }
        // Find the dimension that has the least number of unique bytes at commonPrefixLengths[dim]
        FixedBitSet[] usedBytes = new FixedBitSet[numDims];
        for (int dim = 0; dim < numDims; ++dim) {
            if (commonPrefixLengths[dim] < bytesPerDim) {
                usedBytes[dim] = new FixedBitSet(256);
            }
        }
        for (int i = from + 1; i < to; ++i) {
            for (int dim = 0; dim < numDims; dim++) {
                if (usedBytes[dim] != null) {
                    byte b = reader.getByteAt(i, dim * bytesPerDim + commonPrefixLengths[dim]);
                    usedBytes[dim].set(Byte.toUnsignedInt(b));
                }
            }
        }
        int sortedDim = 0;
        int sortedDimCardinality = Integer.MAX_VALUE;
        for (int dim = 0; dim < numDims; ++dim) {
            if (usedBytes[dim] != null) {
                final int cardinality = usedBytes[dim].cardinality();
                if (cardinality < sortedDimCardinality) {
                    sortedDim = dim;
                    sortedDimCardinality = cardinality;
                }
            }
        }
        // sort by sortedDim
        MutablePointsReaderUtils.sortByDim(sortedDim, bytesPerDim, commonPrefixLengths, reader, from, to, scratchBytesRef1, scratchBytesRef2);
        // Save the block file pointer:
        leafBlockFPs[nodeID - leafNodeOffset] = out.getFilePointer();
        assert scratchOut.getPosition() == 0;
        // Write doc IDs
        int[] docIDs = spareDocIds;
        for (int i = from; i < to; ++i) {
            docIDs[i - from] = reader.getDocID(i);
        }
        //System.out.println("writeLeafBlock pos=" + out.getFilePointer());
        writeLeafBlockDocs(scratchOut, docIDs, 0, count);
        // Write the common prefixes:
        reader.getValue(from, scratchBytesRef1);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset, scratch1, 0, packedBytesLength);
        writeCommonPrefixes(scratchOut, commonPrefixLengths, scratch1);
        // Write the full values:
        IntFunction<BytesRef> packedValues = new IntFunction<BytesRef>() {

            @Override
            public BytesRef apply(int i) {
                reader.getValue(from + i, scratchBytesRef1);
                return scratchBytesRef1;
            }
        };
        assert valuesInOrderAndBounds(count, sortedDim, minPackedValue, maxPackedValue, packedValues, docIDs, 0);
        writeLeafBlockPackedValues(scratchOut, commonPrefixLengths, count, sortedDim, packedValues);
        out.writeBytes(scratchOut.getBytes(), 0, scratchOut.getPosition());
        scratchOut.reset();
    } else {
        // inner node
        // compute the split dimension and partition around it
        final int splitDim = split(minPackedValue, maxPackedValue, parentSplits);
        final int mid = (from + to + 1) >>> 1;
        int commonPrefixLen = bytesPerDim;
        for (int i = 0; i < bytesPerDim; ++i) {
            if (minPackedValue[splitDim * bytesPerDim + i] != maxPackedValue[splitDim * bytesPerDim + i]) {
                commonPrefixLen = i;
                break;
            }
        }
        MutablePointsReaderUtils.partition(maxDoc, splitDim, bytesPerDim, commonPrefixLen, reader, from, to, mid, scratchBytesRef1, scratchBytesRef2);
        // set the split value
        final int address = nodeID * (1 + bytesPerDim);
        splitPackedValues[address] = (byte) splitDim;
        reader.getValue(mid, scratchBytesRef1);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
        byte[] minSplitPackedValue = Arrays.copyOf(minPackedValue, packedBytesLength);
        byte[] maxSplitPackedValue = Arrays.copyOf(maxPackedValue, packedBytesLength);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, maxSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
        // recurse
        parentSplits[splitDim]++;
        build(nodeID * 2, leafNodeOffset, reader, from, mid, out, minPackedValue, maxSplitPackedValue, parentSplits, splitPackedValues, leafBlockFPs, spareDocIds);
        build(nodeID * 2 + 1, leafNodeOffset, reader, mid, to, out, minSplitPackedValue, maxPackedValue, parentSplits, splitPackedValues, leafBlockFPs, spareDocIds);
        parentSplits[splitDim]--;
    }
}
Also used : FixedBitSet(org.apache.lucene.util.FixedBitSet) IntFunction(java.util.function.IntFunction) BytesRef(org.apache.lucene.util.BytesRef)

Example 7 with IntFunction

use of java.util.function.IntFunction in project lucene-solr by apache.

the class SimpleTextBKDWriter method build.

/** The array (sized numDims) of PathSlice describe the cell we have currently recursed to. */
private void build(int nodeID, int leafNodeOffset, PathSlice[] slices, LongBitSet ordBitSet, IndexOutput out, byte[] minPackedValue, byte[] maxPackedValue, byte[] splitPackedValues, long[] leafBlockFPs, List<Closeable> toCloseHeroically) throws IOException {
    for (PathSlice slice : slices) {
        assert slice.count == slices[0].count;
    }
    if (numDims == 1 && slices[0].writer instanceof OfflinePointWriter && slices[0].count <= maxPointsSortInHeap) {
        // Special case for 1D, to cutover to heap once we recurse deeply enough:
        slices[0] = switchToHeap(slices[0], toCloseHeroically);
    }
    if (nodeID >= leafNodeOffset) {
        // Leaf node: write block
        // We can write the block in any order so by default we write it sorted by the dimension that has the
        // least number of unique bytes at commonPrefixLengths[dim], which makes compression more efficient
        int sortedDim = 0;
        int sortedDimCardinality = Integer.MAX_VALUE;
        for (int dim = 0; dim < numDims; dim++) {
            if (slices[dim].writer instanceof HeapPointWriter == false) {
                // Adversarial cases can cause this, e.g. very lopsided data, all equal points, such that we started
                // offline, but then kept splitting only in one dimension, and so never had to rewrite into heap writer
                slices[dim] = switchToHeap(slices[dim], toCloseHeroically);
            }
            PathSlice source = slices[dim];
            HeapPointWriter heapSource = (HeapPointWriter) source.writer;
            // Find common prefix by comparing first and last values, already sorted in this dimension:
            heapSource.readPackedValue(Math.toIntExact(source.start), scratch1);
            heapSource.readPackedValue(Math.toIntExact(source.start + source.count - 1), scratch2);
            int offset = dim * bytesPerDim;
            commonPrefixLengths[dim] = bytesPerDim;
            for (int j = 0; j < bytesPerDim; j++) {
                if (scratch1[offset + j] != scratch2[offset + j]) {
                    commonPrefixLengths[dim] = j;
                    break;
                }
            }
            int prefix = commonPrefixLengths[dim];
            if (prefix < bytesPerDim) {
                int cardinality = 1;
                byte previous = scratch1[offset + prefix];
                for (long i = 1; i < source.count; ++i) {
                    heapSource.readPackedValue(Math.toIntExact(source.start + i), scratch2);
                    byte b = scratch2[offset + prefix];
                    assert Byte.toUnsignedInt(previous) <= Byte.toUnsignedInt(b);
                    if (b != previous) {
                        cardinality++;
                        previous = b;
                    }
                }
                assert cardinality <= 256;
                if (cardinality < sortedDimCardinality) {
                    sortedDim = dim;
                    sortedDimCardinality = cardinality;
                }
            }
        }
        PathSlice source = slices[sortedDim];
        // We ensured that maxPointsSortInHeap was >= maxPointsInLeafNode, so we better be in heap at this point:
        HeapPointWriter heapSource = (HeapPointWriter) source.writer;
        // Save the block file pointer:
        leafBlockFPs[nodeID - leafNodeOffset] = out.getFilePointer();
        //System.out.println("  write leaf block @ fp=" + out.getFilePointer());
        // Write docIDs first, as their own chunk, so that at intersect time we can add all docIDs w/o
        // loading the values:
        int count = Math.toIntExact(source.count);
        assert count > 0 : "nodeID=" + nodeID + " leafNodeOffset=" + leafNodeOffset;
        writeLeafBlockDocs(out, heapSource.docIDs, Math.toIntExact(source.start), count);
        // TODO: minor opto: we don't really have to write the actual common prefixes, because BKDReader on recursing can regenerate it for us
        // from the index, much like how terms dict does so from the FST:
        // Write the full values:
        IntFunction<BytesRef> packedValues = new IntFunction<BytesRef>() {

            final BytesRef scratch = new BytesRef();

            {
                scratch.length = packedBytesLength;
            }

            @Override
            public BytesRef apply(int i) {
                heapSource.getPackedValueSlice(Math.toIntExact(source.start + i), scratch);
                return scratch;
            }
        };
        assert valuesInOrderAndBounds(count, sortedDim, minPackedValue, maxPackedValue, packedValues, heapSource.docIDs, Math.toIntExact(source.start));
        writeLeafBlockPackedValues(out, commonPrefixLengths, count, sortedDim, packedValues);
    } else {
        // Inner node: partition/recurse
        int splitDim;
        if (numDims > 1) {
            splitDim = split(minPackedValue, maxPackedValue);
        } else {
            splitDim = 0;
        }
        PathSlice source = slices[splitDim];
        assert nodeID < splitPackedValues.length : "nodeID=" + nodeID + " splitValues.length=" + splitPackedValues.length;
        // How many points will be in the left tree:
        long rightCount = source.count / 2;
        long leftCount = source.count - rightCount;
        byte[] splitValue = markRightTree(rightCount, splitDim, source, ordBitSet);
        int address = nodeID * (1 + bytesPerDim);
        splitPackedValues[address] = (byte) splitDim;
        System.arraycopy(splitValue, 0, splitPackedValues, address + 1, bytesPerDim);
        // Partition all PathSlice that are not the split dim into sorted left and right sets, so we can recurse:
        PathSlice[] leftSlices = new PathSlice[numDims];
        PathSlice[] rightSlices = new PathSlice[numDims];
        byte[] minSplitPackedValue = new byte[packedBytesLength];
        System.arraycopy(minPackedValue, 0, minSplitPackedValue, 0, packedBytesLength);
        byte[] maxSplitPackedValue = new byte[packedBytesLength];
        System.arraycopy(maxPackedValue, 0, maxSplitPackedValue, 0, packedBytesLength);
        // When we are on this dim, below, we clear the ordBitSet:
        int dimToClear;
        if (numDims - 1 == splitDim) {
            dimToClear = numDims - 2;
        } else {
            dimToClear = numDims - 1;
        }
        for (int dim = 0; dim < numDims; dim++) {
            if (dim == splitDim) {
                // No need to partition on this dim since it's a simple slice of the incoming already sorted slice, and we
                // will re-use its shared reader when visiting it as we recurse:
                leftSlices[dim] = new PathSlice(source.writer, source.start, leftCount);
                rightSlices[dim] = new PathSlice(source.writer, source.start + leftCount, rightCount);
                System.arraycopy(splitValue, 0, minSplitPackedValue, dim * bytesPerDim, bytesPerDim);
                System.arraycopy(splitValue, 0, maxSplitPackedValue, dim * bytesPerDim, bytesPerDim);
                continue;
            }
            // Not inside the try because we don't want to close this one now, so that after recursion is done,
            // we will have done a singel full sweep of the file:
            PointReader reader = slices[dim].writer.getSharedReader(slices[dim].start, slices[dim].count, toCloseHeroically);
            try (PointWriter leftPointWriter = getPointWriter(leftCount, "left" + dim);
                PointWriter rightPointWriter = getPointWriter(source.count - leftCount, "right" + dim)) {
                long nextRightCount = reader.split(source.count, ordBitSet, leftPointWriter, rightPointWriter, dim == dimToClear);
                if (rightCount != nextRightCount) {
                    throw new IllegalStateException("wrong number of points in split: expected=" + rightCount + " but actual=" + nextRightCount);
                }
                leftSlices[dim] = new PathSlice(leftPointWriter, 0, leftCount);
                rightSlices[dim] = new PathSlice(rightPointWriter, 0, rightCount);
            } catch (Throwable t) {
                throw verifyChecksum(t, slices[dim].writer);
            }
        }
        // Recurse on left tree:
        build(2 * nodeID, leafNodeOffset, leftSlices, ordBitSet, out, minPackedValue, maxSplitPackedValue, splitPackedValues, leafBlockFPs, toCloseHeroically);
        for (int dim = 0; dim < numDims; dim++) {
            // Don't destroy the dim we split on because we just re-used what our caller above gave us for that dim:
            if (dim != splitDim) {
                leftSlices[dim].writer.destroy();
            }
        }
        // TODO: we could "tail recurse" here?  have our parent discard its refs as we recurse right?
        // Recurse on right tree:
        build(2 * nodeID + 1, leafNodeOffset, rightSlices, ordBitSet, out, minSplitPackedValue, maxPackedValue, splitPackedValues, leafBlockFPs, toCloseHeroically);
        for (int dim = 0; dim < numDims; dim++) {
            // Don't destroy the dim we split on because we just re-used what our caller above gave us for that dim:
            if (dim != splitDim) {
                rightSlices[dim].writer.destroy();
            }
        }
    }
}
Also used : OfflinePointWriter(org.apache.lucene.util.bkd.OfflinePointWriter) HeapPointWriter(org.apache.lucene.util.bkd.HeapPointWriter) PointWriter(org.apache.lucene.util.bkd.PointWriter) OfflinePointWriter(org.apache.lucene.util.bkd.OfflinePointWriter) HeapPointWriter(org.apache.lucene.util.bkd.HeapPointWriter) PointReader(org.apache.lucene.util.bkd.PointReader) OfflinePointReader(org.apache.lucene.util.bkd.OfflinePointReader) IntFunction(java.util.function.IntFunction) BytesRef(org.apache.lucene.util.BytesRef)

Example 8 with IntFunction

use of java.util.function.IntFunction in project lucene-solr by apache.

the class SimpleTextBKDWriter method build.

/* Recursively reorders the provided reader and writes the bkd-tree on the fly. */
private void build(int nodeID, int leafNodeOffset, MutablePointValues reader, int from, int to, IndexOutput out, byte[] minPackedValue, byte[] maxPackedValue, byte[] splitPackedValues, long[] leafBlockFPs, int[] spareDocIds) throws IOException {
    if (nodeID >= leafNodeOffset) {
        // leaf node
        final int count = to - from;
        assert count <= maxPointsInLeafNode;
        // Compute common prefixes
        Arrays.fill(commonPrefixLengths, bytesPerDim);
        reader.getValue(from, scratchBytesRef1);
        for (int i = from + 1; i < to; ++i) {
            reader.getValue(i, scratchBytesRef2);
            for (int dim = 0; dim < numDims; dim++) {
                final int offset = dim * bytesPerDim;
                for (int j = 0; j < commonPrefixLengths[dim]; j++) {
                    if (scratchBytesRef1.bytes[scratchBytesRef1.offset + offset + j] != scratchBytesRef2.bytes[scratchBytesRef2.offset + offset + j]) {
                        commonPrefixLengths[dim] = j;
                        break;
                    }
                }
            }
        }
        // Find the dimension that has the least number of unique bytes at commonPrefixLengths[dim]
        FixedBitSet[] usedBytes = new FixedBitSet[numDims];
        for (int dim = 0; dim < numDims; ++dim) {
            if (commonPrefixLengths[dim] < bytesPerDim) {
                usedBytes[dim] = new FixedBitSet(256);
            }
        }
        for (int i = from + 1; i < to; ++i) {
            for (int dim = 0; dim < numDims; dim++) {
                if (usedBytes[dim] != null) {
                    byte b = reader.getByteAt(i, dim * bytesPerDim + commonPrefixLengths[dim]);
                    usedBytes[dim].set(Byte.toUnsignedInt(b));
                }
            }
        }
        int sortedDim = 0;
        int sortedDimCardinality = Integer.MAX_VALUE;
        for (int dim = 0; dim < numDims; ++dim) {
            if (usedBytes[dim] != null) {
                final int cardinality = usedBytes[dim].cardinality();
                if (cardinality < sortedDimCardinality) {
                    sortedDim = dim;
                    sortedDimCardinality = cardinality;
                }
            }
        }
        // sort by sortedDim
        MutablePointsReaderUtils.sortByDim(sortedDim, bytesPerDim, commonPrefixLengths, reader, from, to, scratchBytesRef1, scratchBytesRef2);
        // Save the block file pointer:
        leafBlockFPs[nodeID - leafNodeOffset] = out.getFilePointer();
        // Write doc IDs
        int[] docIDs = spareDocIds;
        for (int i = from; i < to; ++i) {
            docIDs[i - from] = reader.getDocID(i);
        }
        writeLeafBlockDocs(out, docIDs, 0, count);
        // Write the common prefixes:
        reader.getValue(from, scratchBytesRef1);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset, scratch1, 0, packedBytesLength);
        // Write the full values:
        IntFunction<BytesRef> packedValues = new IntFunction<BytesRef>() {

            @Override
            public BytesRef apply(int i) {
                reader.getValue(from + i, scratchBytesRef1);
                return scratchBytesRef1;
            }
        };
        assert valuesInOrderAndBounds(count, sortedDim, minPackedValue, maxPackedValue, packedValues, docIDs, 0);
        writeLeafBlockPackedValues(out, commonPrefixLengths, count, sortedDim, packedValues);
    } else {
        // inner node
        // compute the split dimension and partition around it
        final int splitDim = split(minPackedValue, maxPackedValue);
        final int mid = (from + to + 1) >>> 1;
        int commonPrefixLen = bytesPerDim;
        for (int i = 0; i < bytesPerDim; ++i) {
            if (minPackedValue[splitDim * bytesPerDim + i] != maxPackedValue[splitDim * bytesPerDim + i]) {
                commonPrefixLen = i;
                break;
            }
        }
        MutablePointsReaderUtils.partition(maxDoc, splitDim, bytesPerDim, commonPrefixLen, reader, from, to, mid, scratchBytesRef1, scratchBytesRef2);
        // set the split value
        final int address = nodeID * (1 + bytesPerDim);
        splitPackedValues[address] = (byte) splitDim;
        reader.getValue(mid, scratchBytesRef1);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
        byte[] minSplitPackedValue = Arrays.copyOf(minPackedValue, packedBytesLength);
        byte[] maxSplitPackedValue = Arrays.copyOf(maxPackedValue, packedBytesLength);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
        System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, maxSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
        // recurse
        build(nodeID * 2, leafNodeOffset, reader, from, mid, out, minPackedValue, maxSplitPackedValue, splitPackedValues, leafBlockFPs, spareDocIds);
        build(nodeID * 2 + 1, leafNodeOffset, reader, mid, to, out, minSplitPackedValue, maxPackedValue, splitPackedValues, leafBlockFPs, spareDocIds);
    }
}
Also used : FixedBitSet(org.apache.lucene.util.FixedBitSet) IntFunction(java.util.function.IntFunction) BytesRef(org.apache.lucene.util.BytesRef)

Example 9 with IntFunction

use of java.util.function.IntFunction in project suite by stupidsing.

the class AnalyzeTimeSeriesTest method analyze.

private void analyze(float[] prices) {
    int length = prices.length;
    int log2 = Quant.log2trunc(length);
    double nYears = length * Trade_.invTradeDaysPerYear;
    float[] fds = dct.dct(Arrays.copyOfRange(prices, length - log2, length));
    float[] returns = ts.returns(prices);
    float[] logPrices = To.vector(prices, Math::log);
    float[] logReturns = ts.differences(1, logPrices);
    MeanVariance rmv = stat.meanVariance(returns);
    double variance = rmv.variance;
    double kelly = rmv.mean / variance;
    IntFltPair max = IntFltPair.of(Integer.MIN_VALUE, Float.MIN_VALUE);
    for (int i = 4; i < fds.length; i++) {
        float f = Math.abs(fds[i]);
        if (max.t1 < f)
            max.update(i, f);
    }
    IntFunction<BuySell> momFun = n -> {
        int d0 = 1 + n;
        int d1 = 1;
        return buySell(d -> Quant.sign(prices[d - d0], prices[d - d1])).start(d0);
    };
    IntFunction<BuySell> revert = d -> momFun.apply(d).scale(0f, -1f);
    IntFunction<BuySell> trend_ = d -> momFun.apply(d).scale(0f, +1f);
    BuySell[] reverts = To.array(8, BuySell.class, revert);
    BuySell[] trends_ = To.array(8, BuySell.class, trend_);
    BuySell tanh = buySell(d -> Tanh.tanh(3.2d * reverts[1].apply(d)));
    float[] holds = mt.hold(prices, 1f, 1f, 1f);
    float[] ma200 = ma.movingAvg(prices, 200);
    BuySell mat = buySell(d -> {
        int last = d - 1;
        return Quant.sign(ma200[last], prices[last]);
    }).start(1).longOnly();
    BuySell mt_ = buySell(d -> holds[d]);
    Pair<float[], float[]> bbmv = bb.meanVariances(VirtualVector.of(logReturns), 9, 0);
    float[] bbmean = bbmv.t0;
    float[] bbvariances = bbmv.t1;
    BuySell ms2 = buySell(d -> {
        int last = d - 1;
        int ref = last - 250;
        float mean = bbmean[last];
        return Quant.sign(logPrices[last], logPrices[ref] - bbvariances[last] / (2d * mean * mean));
    }).start(1 + 250);
    LogUtil.info(// 
    "" + "\nsymbol = " + // 
    symbol + "\nlength = " + // 
    length + "\nnYears = " + // 
    nYears + "\nups = " + // 
    Floats_.of(returns).filter(return_ -> 0f <= return_).size() + "\ndct period = " + // 
    max.t0 + // 
    Ints_.range(// 
    10).map(// 
    d -> "\ndct component [" + d + "d] = " + fds[d]).collect(// 
    As::joined) + "\nreturn yearly sharpe = " + // 
    rmv.mean / Math.sqrt(variance / nYears) + "\nreturn kelly = " + // 
    kelly + "\nreturn skew = " + // 
    stat.skewness(returns) + "\nreturn kurt = " + // 
    stat.kurtosis(returns) + // 
    Ints_.of(1, 2, 4, 8, 16, // 
    32).map(// 
    d -> "\nmean reversion ols [" + d + "d] = " + ts.meanReversion(prices, d).coefficients[0]).collect(// 
    As::joined) + // 
    Ints_.of(4, // 
    16).map(// 
    d -> "\nvariance ratio [" + d + "d over 1d] = " + ts.varianceRatio(prices, d)).collect(// 
    As::joined) + "\nreturn hurst = " + // 
    ts.hurst(prices, prices.length / 2) + "\nhold " + // 
    buySell(d -> 1d).invest(prices) + "\nkelly " + // 
    buySell(d -> kelly).invest(prices) + "\nma200 trend " + // 
    mat.invest(prices) + // 
    Ints_.range(1, // 
    8).map(// 
    d -> "\nrevert [" + d + "d] " + reverts[d].invest(prices)).collect(// 
    As::joined) + // 
    Ints_.range(1, // 
    8).map(// 
    d -> "\ntrend_ [" + d + "d] " + trends_[d].invest(prices)).collect(// 
    As::joined) + // 
    Ints_.range(1, // 
    8).map(// 
    d -> "\nrevert [" + d + "d] long-only " + reverts[d].longOnly().invest(prices)).collect(// 
    As::joined) + // 
    Ints_.range(1, // 
    8).map(// 
    d -> "\ntrend_ [" + d + "d] long-only " + trends_[d].longOnly().invest(prices)).collect(// 
    As::joined) + "\nms2 " + // 
    ms2.invest(prices) + "\nms2 long-only " + // 
    ms2.longOnly().invest(prices) + "\ntanh " + // 
    tanh.invest(prices) + "\ntimed " + // 
    mt_.invest(prices) + "\ntimed long-only " + mt_.longOnly().invest(prices));
}
Also used : Arrays(java.util.Arrays) LogUtil(suite.os.LogUtil) IntFltPair(suite.primitive.adt.pair.IntFltPair) Trade_(suite.trade.Trade_) ConfigurationImpl(suite.trade.data.ConfigurationImpl) TimeSeries(ts.TimeSeries) Ints_(suite.primitive.Ints_) DiscreteCosineTransform(suite.math.transform.DiscreteCosineTransform) IntFunction(java.util.function.IntFunction) Statistic(suite.math.numeric.Statistic) Test(org.junit.Test) To(suite.util.To) Quant(ts.Quant) BollingerBands(ts.BollingerBands) Tanh(suite.math.Tanh) VirtualVector(suite.math.linalg.VirtualVector) Pair(suite.adt.pair.Pair) Friends.max(suite.util.Friends.max) MeanVariance(suite.math.numeric.Statistic.MeanVariance) Time(suite.trade.Time) Floats_(suite.primitive.Floats_) Configuration(suite.trade.data.Configuration) DataSource(suite.trade.data.DataSource) As(suite.streamlet.As) TimeRange(suite.trade.TimeRange) Int_Dbl(suite.primitive.Int_Dbl) Int_Flt(suite.primitive.Int_Flt) IntFltPair(suite.primitive.adt.pair.IntFltPair) MeanVariance(suite.math.numeric.Statistic.MeanVariance) As(suite.streamlet.As)

Example 10 with IntFunction

use of java.util.function.IntFunction in project neo4j by neo4j.

the class ServerPoliciesLoadBalancingIT method shouldFallOverBetweenRules.

@Test
public void shouldFallOverBetweenRules() throws Exception {
    Map<String, IntFunction<String>> instanceCoreParams = new HashMap<>();
    instanceCoreParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "core" + id + ",core");
    Map<String, IntFunction<String>> instanceReplicaParams = new HashMap<>();
    instanceReplicaParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "replica" + id + ",replica");
    String defaultPolicy = "groups(core) -> min(3); groups(replica1,replica2) -> min(2);";
    Map<String, String> coreParams = stringMap(CausalClusteringSettings.cluster_allow_reads_on_followers.name(), "true", CausalClusteringSettings.load_balancing_config.name() + ".server_policies.default", defaultPolicy, CausalClusteringSettings.multi_dc_license.name(), "true");
    cluster = new Cluster(testDir.directory("cluster"), 5, 5, new HazelcastDiscoveryServiceFactory(), coreParams, instanceCoreParams, emptyMap(), instanceReplicaParams, Standard.LATEST_NAME);
    cluster.start();
    // should use the first rule: only cores for reading
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(5, 1, 4, 0));
    cluster.getCoreMemberById(3).shutdown();
    // one core reader is gone, but we are still fulfilling min(3)
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(4, 1, 3, 0));
    cluster.getCoreMemberById(0).shutdown();
    // should now fall over to the second rule: use replica1 and replica2
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 0, 2));
    cluster.getReadReplicaById(0).shutdown();
    // this does not affect replica1 and replica2
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 0, 2));
    cluster.getReadReplicaById(1).shutdown();
    // should now fall over to use the last rule: all
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 2, 3));
    cluster.addCoreMemberWithId(3).start();
    // should now go back to first rule
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(4, 1, 3, 0));
}
Also used : HazelcastDiscoveryServiceFactory(org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory) HashMap(java.util.HashMap) IntFunction(java.util.function.IntFunction) Cluster(org.neo4j.causalclustering.discovery.Cluster) Test(org.junit.Test)

Aggregations

IntFunction (java.util.function.IntFunction)14 Test (org.junit.Test)6 BytesRef (org.apache.lucene.util.BytesRef)5 HashMap (java.util.HashMap)4 Cluster (org.neo4j.causalclustering.discovery.Cluster)4 HazelcastDiscoveryServiceFactory (org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 IOException (java.io.IOException)2 DecimalFormat (java.text.DecimalFormat)2 DecimalFormatSymbols (java.text.DecimalFormatSymbols)2 ParseException (java.text.ParseException)2 HashSet (java.util.HashSet)2 LinkedHashSet (java.util.LinkedHashSet)2 Objects (java.util.Objects)2 Set (java.util.Set)2 Spliterator (java.util.Spliterator)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 BufferedChecksumIndexInput (org.apache.lucene.store.BufferedChecksumIndexInput)2 ChecksumIndexInput (org.apache.lucene.store.ChecksumIndexInput)2 IndexInput (org.apache.lucene.store.IndexInput)2