Search in sources :

Example 21 with IntFunction

use of java.util.function.IntFunction in project ambry by linkedin.

the class Utils method partitionList.

/**
 * Partition the input list into a List of smaller sublists, each one limited to the specified batch size. This method
 * copy elements, so changes to the so changes to the original list will be reflected in the returned list.
 * Method inspired by the Guava utility Lists.partition(List<T> list, int size).
 * @param inputList the input list to partition.
 * @param batchSize the maximum size of the returned sublists.
 * @return the partitioned list of sublists.
 */
public static <T> List<List<T>> partitionList(List<T> inputList, int batchSize) {
    Objects.requireNonNull(inputList, "Input list cannot be null");
    if (batchSize < 1) {
        throw new IllegalArgumentException("Invalid batchSize: " + batchSize);
    }
    IntSupplier numBatches = () -> (inputList.size() + batchSize - 1) / batchSize;
    IntFunction<List<T>> batchFetcher = index -> {
        int start = index * batchSize;
        int end = Math.min(start + batchSize, inputList.size());
        return inputList.subList(start, end);
    };
    return new ListView<>(numBatches, batchFetcher);
}
Also used : RandomAccessFile(java.io.RandomAccessFile) Arrays(java.util.Arrays) ByteBufAllocator(io.netty.buffer.ByteBufAllocator) LoggerFactory(org.slf4j.LoggerFactory) AbstractList(java.util.AbstractList) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) Unpooled(io.netty.buffer.Unpooled) JSONException(org.json.JSONException) JSONObject(org.json.JSONObject) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ThreadFactory(java.util.concurrent.ThreadFactory) Path(java.nio.file.Path) SimpleFileVisitor(java.nio.file.SimpleFileVisitor) PosixFilePermission(java.nio.file.attribute.PosixFilePermission) Collection(java.util.Collection) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) StandardCharsets(java.nio.charset.StandardCharsets) Objects(java.util.Objects) FileVisitResult(java.nio.file.FileVisitResult) List(java.util.List) Stream(java.util.stream.Stream) Modifier(java.lang.reflect.Modifier) EventExecutor(io.netty.util.concurrent.EventExecutor) DataInputStream(java.io.DataInputStream) Constructor(java.lang.reflect.Constructor) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) RandomAccess(java.util.RandomAccess) ByteBuf(io.netty.buffer.ByteBuf) Charset(java.nio.charset.Charset) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) IntSupplier(java.util.function.IntSupplier) IntFunction(java.util.function.IntFunction) ReadableByteChannel(java.nio.channels.ReadableByteChannel) EventLoopGroup(io.netty.channel.EventLoopGroup) Properties(java.util.Properties) Logger(org.slf4j.Logger) SingleThreadEventExecutor(io.netty.util.concurrent.SingleThreadEventExecutor) Files(java.nio.file.Files) Channels(java.nio.channels.Channels) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes) InputStreamReader(java.io.InputStreamReader) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Paths(java.nio.file.Paths) BufferedReader(java.io.BufferedReader) FileChannel(java.nio.channels.FileChannel) InputStream(java.io.InputStream) IntSupplier(java.util.function.IntSupplier) AbstractList(java.util.AbstractList) List(java.util.List) ArrayList(java.util.ArrayList)

Example 22 with IntFunction

use of java.util.function.IntFunction in project neo4j by neo4j.

the class ServerPoliciesLoadBalancingIT method shouldFallOverBetweenRules.

@Test
public void shouldFallOverBetweenRules() throws Exception {
    Map<String, IntFunction<String>> instanceCoreParams = new HashMap<>();
    instanceCoreParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "core" + id + ",core");
    Map<String, IntFunction<String>> instanceReplicaParams = new HashMap<>();
    instanceReplicaParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "replica" + id + ",replica");
    String defaultPolicy = "groups(core) -> min(3); groups(replica1,replica2) -> min(2);";
    Map<String, String> coreParams = stringMap(CausalClusteringSettings.cluster_allow_reads_on_followers.name(), "true", CausalClusteringSettings.load_balancing_config.name() + ".server_policies.default", defaultPolicy, CausalClusteringSettings.multi_dc_license.name(), "true");
    cluster = new Cluster(testDir.directory("cluster"), 5, 5, new HazelcastDiscoveryServiceFactory(), coreParams, instanceCoreParams, emptyMap(), instanceReplicaParams, Standard.LATEST_NAME);
    cluster.start();
    // should use the first rule: only cores for reading
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(5, 1, 4, 0));
    cluster.getCoreMemberById(3).shutdown();
    // one core reader is gone, but we are still fulfilling min(3)
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(4, 1, 3, 0));
    cluster.getCoreMemberById(0).shutdown();
    // should now fall over to the second rule: use replica1 and replica2
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 0, 2));
    cluster.getReadReplicaById(0).shutdown();
    // this does not affect replica1 and replica2
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 0, 2));
    cluster.getReadReplicaById(1).shutdown();
    // should now fall over to use the last rule: all
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 2, 3));
    cluster.addCoreMemberWithId(3).start();
    // should now go back to first rule
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(4, 1, 3, 0));
}
Also used : HazelcastDiscoveryServiceFactory(org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory) HashMap(java.util.HashMap) IntFunction(java.util.function.IntFunction) Cluster(org.neo4j.causalclustering.discovery.Cluster) Test(org.junit.Test)

Example 23 with IntFunction

use of java.util.function.IntFunction in project neo4j by neo4j.

the class ServerPoliciesLoadBalancingIT method shouldSupportSeveralPolicies.

@Test
public void shouldSupportSeveralPolicies() throws Exception {
    Map<String, IntFunction<String>> instanceCoreParams = new HashMap<>();
    instanceCoreParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "core" + id + ",core");
    Map<String, IntFunction<String>> instanceReplicaParams = new HashMap<>();
    instanceReplicaParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "replica" + id + ",replica");
    String defaultPolicySpec = "groups(replica0,replica1)";
    String policyOneTwoSpec = "groups(replica1,replica2)";
    String policyZeroTwoSpec = "groups(replica0,replica2)";
    String policyAllReplicasSpec = "groups(replica); halt()";
    String allPolicySpec = "all()";
    Map<String, String> coreParams = stringMap(CausalClusteringSettings.cluster_allow_reads_on_followers.name(), "true", CausalClusteringSettings.load_balancing_config.name() + ".server_policies.all", allPolicySpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.default", defaultPolicySpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.policy_one_two", policyOneTwoSpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.policy_zero_two", policyZeroTwoSpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.policy_all_replicas", policyAllReplicasSpec, CausalClusteringSettings.multi_dc_license.name(), "true");
    cluster = new Cluster(testDir.directory("cluster"), 3, 3, new HazelcastDiscoveryServiceFactory(), coreParams, instanceCoreParams, emptyMap(), instanceReplicaParams, Standard.LATEST_NAME);
    cluster.start();
    assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 2, 3), policyContext("all"));
    for (CoreClusterMember core : cluster.coreMembers()) {
        CoreGraphDatabase db = core.database();
        assertThat(getServers(db, policyContext("default")), new SpecificReplicasMatcher(0, 1));
        assertThat(getServers(db, policyContext("policy_one_two")), new SpecificReplicasMatcher(1, 2));
        assertThat(getServers(db, policyContext("policy_zero_two")), new SpecificReplicasMatcher(0, 2));
        assertThat(getServers(db, policyContext("policy_all_replicas")), new SpecificReplicasMatcher(0, 1, 2));
    }
}
Also used : HazelcastDiscoveryServiceFactory(org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory) HashMap(java.util.HashMap) CoreClusterMember(org.neo4j.causalclustering.discovery.CoreClusterMember) IntFunction(java.util.function.IntFunction) Cluster(org.neo4j.causalclustering.discovery.Cluster) CoreGraphDatabase(org.neo4j.causalclustering.core.CoreGraphDatabase) Test(org.junit.Test)

Example 24 with IntFunction

use of java.util.function.IntFunction in project jdk8u_jdk by JetBrains.

the class DistinctOps method makeRef.

/**
     * Appends a "distinct" operation to the provided stream, and returns the
     * new stream.
     *
     * @param <T> the type of both input and output elements
     * @param upstream a reference stream with element type T
     * @return the new stream
     */
static <T> ReferencePipeline<T, T> makeRef(AbstractPipeline<?, T, ?> upstream) {
    return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE, StreamOpFlag.IS_DISTINCT | StreamOpFlag.NOT_SIZED) {

        <P_IN> Node<T> reduce(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
            // If the stream is SORTED then it should also be ORDERED so the following will also
            // preserve the sort order
            TerminalOp<T, LinkedHashSet<T>> reduceOp = ReduceOps.<T, LinkedHashSet<T>>makeRef(LinkedHashSet::new, LinkedHashSet::add, LinkedHashSet::addAll);
            return Nodes.node(reduceOp.evaluateParallel(helper, spliterator));
        }

        @Override
        <P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper, Spliterator<P_IN> spliterator, IntFunction<T[]> generator) {
            if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
                // No-op
                return helper.evaluate(spliterator, false, generator);
            } else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
                return reduce(helper, spliterator);
            } else {
                // Holder of null state since ConcurrentHashMap does not support null values
                AtomicBoolean seenNull = new AtomicBoolean(false);
                ConcurrentHashMap<T, Boolean> map = new ConcurrentHashMap<>();
                TerminalOp<T, Void> forEachOp = ForEachOps.makeRef(t -> {
                    if (t == null)
                        seenNull.set(true);
                    else
                        map.putIfAbsent(t, Boolean.TRUE);
                }, false);
                forEachOp.evaluateParallel(helper, spliterator);
                // If null has been seen then copy the key set into a HashSet that supports null values
                // and add null
                Set<T> keys = map.keySet();
                if (seenNull.get()) {
                    // TODO Implement a more efficient set-union view, rather than copying
                    keys = new HashSet<>(keys);
                    keys.add(null);
                }
                return Nodes.node(keys);
            }
        }

        @Override
        <P_IN> Spliterator<T> opEvaluateParallelLazy(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
            if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
                // No-op
                return helper.wrapSpliterator(spliterator);
            } else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
                // Not lazy, barrier required to preserve order
                return reduce(helper, spliterator).spliterator();
            } else {
                // Lazy
                return new StreamSpliterators.DistinctSpliterator<>(helper.wrapSpliterator(spliterator));
            }
        }

        @Override
        Sink<T> opWrapSink(int flags, Sink<T> sink) {
            Objects.requireNonNull(sink);
            if (StreamOpFlag.DISTINCT.isKnown(flags)) {
                return sink;
            } else if (StreamOpFlag.SORTED.isKnown(flags)) {
                return new Sink.ChainedReference<T, T>(sink) {

                    boolean seenNull;

                    T lastSeen;

                    @Override
                    public void begin(long size) {
                        seenNull = false;
                        lastSeen = null;
                        downstream.begin(-1);
                    }

                    @Override
                    public void end() {
                        seenNull = false;
                        lastSeen = null;
                        downstream.end();
                    }

                    @Override
                    public void accept(T t) {
                        if (t == null) {
                            if (!seenNull) {
                                seenNull = true;
                                downstream.accept(lastSeen = null);
                            }
                        } else if (lastSeen == null || !t.equals(lastSeen)) {
                            downstream.accept(lastSeen = t);
                        }
                    }
                };
            } else {
                return new Sink.ChainedReference<T, T>(sink) {

                    Set<T> seen;

                    @Override
                    public void begin(long size) {
                        seen = new HashSet<>();
                        downstream.begin(-1);
                    }

                    @Override
                    public void end() {
                        seen = null;
                        downstream.end();
                    }

                    @Override
                    public void accept(T t) {
                        if (!seen.contains(t)) {
                            seen.add(t);
                            downstream.accept(t);
                        }
                    }
                };
            }
        }
    };
}
Also used : LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) Objects(java.util.Objects) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Spliterator(java.util.Spliterator) LinkedHashSet(java.util.LinkedHashSet) IntFunction(java.util.function.IntFunction) HashSet(java.util.HashSet) Set(java.util.Set) LinkedHashSet(java.util.LinkedHashSet) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) IntFunction(java.util.function.IntFunction) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Spliterator(java.util.Spliterator) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet)

Example 25 with IntFunction

use of java.util.function.IntFunction in project lucene-solr by apache.

the class SimpleTextDocValuesReader method getBinary.

@Override
public synchronized BinaryDocValues getBinary(FieldInfo fieldInfo) throws IOException {
    final OneField field = fields.get(fieldInfo.name);
    // valid:
    assert field != null;
    final IndexInput in = data.clone();
    final BytesRefBuilder scratch = new BytesRefBuilder();
    final DecimalFormat decoder = new DecimalFormat(field.pattern, new DecimalFormatSymbols(Locale.ROOT));
    DocValuesIterator docsWithField = getBinaryDocsWithField(fieldInfo);
    IntFunction<BytesRef> values = new IntFunction<BytesRef>() {

        final BytesRefBuilder term = new BytesRefBuilder();

        @Override
        public BytesRef apply(int docID) {
            try {
                if (docID < 0 || docID >= maxDoc) {
                    throw new IndexOutOfBoundsException("docID must be 0 .. " + (maxDoc - 1) + "; got " + docID);
                }
                in.seek(field.dataStartFilePointer + (9 + field.pattern.length() + field.maxLength + 2) * docID);
                SimpleTextUtil.readLine(in, scratch);
                assert StringHelper.startsWith(scratch.get(), LENGTH);
                int len;
                try {
                    len = decoder.parse(new String(scratch.bytes(), LENGTH.length, scratch.length() - LENGTH.length, StandardCharsets.UTF_8)).intValue();
                } catch (ParseException pe) {
                    throw new CorruptIndexException("failed to parse int length", in, pe);
                }
                term.grow(len);
                term.setLength(len);
                in.readBytes(term.bytes(), 0, len);
                return term.get();
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    return new BinaryDocValues() {

        @Override
        public int nextDoc() throws IOException {
            return docsWithField.nextDoc();
        }

        @Override
        public int docID() {
            return docsWithField.docID();
        }

        @Override
        public long cost() {
            return docsWithField.cost();
        }

        @Override
        public int advance(int target) throws IOException {
            return docsWithField.advance(target);
        }

        @Override
        public boolean advanceExact(int target) throws IOException {
            return docsWithField.advanceExact(target);
        }

        @Override
        public BytesRef binaryValue() throws IOException {
            return values.apply(docsWithField.docID());
        }
    };
}
Also used : BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) DecimalFormatSymbols(java.text.DecimalFormatSymbols) DecimalFormat(java.text.DecimalFormat) IOException(java.io.IOException) IntFunction(java.util.function.IntFunction) ChecksumIndexInput(org.apache.lucene.store.ChecksumIndexInput) BufferedChecksumIndexInput(org.apache.lucene.store.BufferedChecksumIndexInput) IndexInput(org.apache.lucene.store.IndexInput) ParseException(java.text.ParseException) BytesRef(org.apache.lucene.util.BytesRef)

Aggregations

IntFunction (java.util.function.IntFunction)33 List (java.util.List)10 Collectors (java.util.stream.Collectors)10 Test (org.junit.Test)9 Arrays (java.util.Arrays)8 IOException (java.io.IOException)7 ArrayList (java.util.ArrayList)7 IntStream (java.util.stream.IntStream)7 LoggerFactory (org.slf4j.LoggerFactory)6 HashMap (java.util.HashMap)5 Map (java.util.Map)5 Objects (java.util.Objects)5 Set (java.util.Set)5 Logger (org.slf4j.Logger)5 File (java.io.File)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)4 Function (java.util.function.Function)4 Supplier (java.util.function.Supplier)4 BytesRef (org.apache.lucene.util.BytesRef)4