Search in sources :

Example 1 with IntProcedure

use of com.carrotsearch.hppc.procedures.IntProcedure in project elasticsearch by elastic.

the class RelocationIT method testRelocationWhileIndexingRandom.

@TestLogging("org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.search:TRACE")
public void testRelocationWhileIndexingRandom() throws Exception {
    int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4);
    int numberOfReplicas = randomBoolean() ? 0 : 1;
    int numberOfNodes = numberOfReplicas == 0 ? 2 : 3;
    logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes);
    String[] nodes = new String[numberOfNodes];
    logger.info("--> starting [node1] ...");
    nodes[0] = internalCluster().startNode();
    logger.info("--> creating test index ...");
    prepareCreate("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", numberOfReplicas)).get();
    for (int i = 2; i <= numberOfNodes; i++) {
        logger.info("--> starting [node{}] ...", i);
        nodes[i - 1] = internalCluster().startNode();
        if (i != numberOfNodes) {
            ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(Integer.toString(i)).setWaitForGreenStatus().execute().actionGet();
            assertThat(healthResponse.isTimedOut(), equalTo(false));
        }
    }
    int numDocs = scaledRandomIntBetween(200, 2500);
    try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type1", client(), numDocs)) {
        logger.info("--> waiting for {} docs to be indexed ...", numDocs);
        waitForDocs(numDocs, indexer);
        logger.info("--> {} docs indexed", numDocs);
        logger.info("--> starting relocations...");
        // if we have replicas shift those
        int nodeShiftBased = numberOfReplicas;
        for (int i = 0; i < numberOfRelocations; i++) {
            int fromNode = (i % 2);
            int toNode = fromNode == 0 ? 1 : 0;
            fromNode += nodeShiftBased;
            toNode += nodeShiftBased;
            numDocs = scaledRandomIntBetween(200, 1000);
            logger.debug("--> Allow indexer to index [{}] documents", numDocs);
            indexer.continueIndexing(numDocs);
            logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
            client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])).get();
            if (rarely()) {
                logger.debug("--> flushing");
                client().admin().indices().prepareFlush().get();
            }
            ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
            assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
            indexer.pauseIndexing();
            logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
        }
        logger.info("--> done relocations");
        logger.info("--> waiting for indexing threads to stop ...");
        indexer.stop();
        logger.info("--> indexing threads stopped");
        logger.info("--> refreshing the index");
        client().admin().indices().prepareRefresh("test").execute().actionGet();
        logger.info("--> searching the index");
        boolean ranOnce = false;
        for (int i = 0; i < 10; i++) {
            logger.info("--> START search test round {}", i + 1);
            SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).storedFields().execute().actionGet().getHits();
            ranOnce = true;
            if (hits.getTotalHits() != indexer.totalIndexedDocs()) {
                int[] hitIds = new int[(int) indexer.totalIndexedDocs()];
                for (int hit = 0; hit < indexer.totalIndexedDocs(); hit++) {
                    hitIds[hit] = hit + 1;
                }
                IntHashSet set = IntHashSet.from(hitIds);
                for (SearchHit hit : hits.getHits()) {
                    int id = Integer.parseInt(hit.getId());
                    if (!set.remove(id)) {
                        logger.error("Extra id [{}]", id);
                    }
                }
                set.forEach((IntProcedure) value -> {
                    logger.error("Missing id [{}]", value);
                });
            }
            assertThat(hits.getTotalHits(), equalTo(indexer.totalIndexedDocs()));
            logger.info("--> DONE search test round {}", i + 1);
        }
        if (!ranOnce) {
            fail();
        }
    }
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) Arrays(java.util.Arrays) TransportRequest(org.elasticsearch.transport.TransportRequest) Nullable(org.elasticsearch.common.Nullable) SearchHits(org.elasticsearch.search.SearchHits) Matchers.not(org.hamcrest.Matchers.not) IndexStats(org.elasticsearch.action.admin.indices.stats.IndexStats) MoveAllocationCommand(org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand) SeqNoStats(org.elasticsearch.index.seqno.SeqNoStats) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) Scope(org.elasticsearch.test.ESIntegTestCase.Scope) SearchResponse(org.elasticsearch.action.search.SearchResponse) Path(java.nio.file.Path) SimpleFileVisitor(java.nio.file.SimpleFileVisitor) SearchHit(org.elasticsearch.search.SearchHit) MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) Priority(org.elasticsearch.common.Priority) PeerRecoveryTargetService(org.elasticsearch.indices.recovery.PeerRecoveryTargetService) Transport(org.elasticsearch.transport.Transport) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging) Collection(java.util.Collection) RecoveryFileChunkRequest(org.elasticsearch.indices.recovery.RecoveryFileChunkRequest) ElasticsearchAssertions.assertHitCount(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount) Matchers.startsWith(org.hamcrest.Matchers.startsWith) ElasticsearchAssertions.assertSearchHits(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits) FileVisitResult(java.nio.file.FileVisitResult) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Stream(java.util.stream.Stream) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) ESIntegTestCase(org.elasticsearch.test.ESIntegTestCase) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL(org.elasticsearch.index.IndexSettings.INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL) Matchers.equalTo(org.hamcrest.Matchers.equalTo) Optional(java.util.Optional) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions) IntProcedure(com.carrotsearch.hppc.procedures.IntProcedure) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardStats(org.elasticsearch.action.admin.indices.stats.IndexShardStats) ClusterService(org.elasticsearch.cluster.service.ClusterService) ShardRoutingState(org.elasticsearch.cluster.routing.ShardRoutingState) ArrayList(java.util.ArrayList) IndicesStatsResponse(org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) SequenceNumbersService(org.elasticsearch.index.seqno.SequenceNumbersService) ShardStats(org.elasticsearch.action.admin.indices.stats.ShardStats) TimeValue(org.elasticsearch.common.unit.TimeValue) MockTransportService(org.elasticsearch.test.transport.MockTransportService) TransportService(org.elasticsearch.transport.TransportService) EnableAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider) ClusterScope(org.elasticsearch.test.ESIntegTestCase.ClusterScope) IndexShardState(org.elasticsearch.index.shard.IndexShardState) QueryBuilders.matchAllQuery(org.elasticsearch.index.query.QueryBuilders.matchAllQuery) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) Files(java.nio.file.Files) Semaphore(java.util.concurrent.Semaphore) Client(org.elasticsearch.client.Client) IndexShard(org.elasticsearch.index.shard.IndexShard) IntHashSet(com.carrotsearch.hppc.IntHashSet) IndexFileNames(org.apache.lucene.index.IndexFileNames) Plugin(org.elasticsearch.plugins.Plugin) BackgroundIndexer(org.elasticsearch.test.BackgroundIndexer) ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) IOException(java.io.IOException) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) English(org.apache.lucene.util.English) ElasticsearchAssertions.assertAcked(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked) ElasticsearchAssertions.assertNoFailures(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures) ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) BackgroundIndexer(org.elasticsearch.test.BackgroundIndexer) SearchHit(org.elasticsearch.search.SearchHit) MoveAllocationCommand(org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand) IntHashSet(com.carrotsearch.hppc.IntHashSet) SearchHits(org.elasticsearch.search.SearchHits) ElasticsearchAssertions.assertSearchHits(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging)

Example 2 with IntProcedure

use of com.carrotsearch.hppc.procedures.IntProcedure in project graphhopper by graphhopper.

the class QueryGraph method createUncachedEdgeExplorer.

private EdgeExplorer createUncachedEdgeExplorer(EdgeFilter edgeFilter) {
    // Iteration over virtual nodes needs to be thread safe if done from different explorer
    // so we need to create the mapping on EVERY call!
    // This needs to be a HashMap (and cannot be an array) as we also need to tweak edges for some mainNodes!
    // The more query points we have the more inefficient this map could be. Hmmh.
    final IntObjectMap<VirtualEdgeIterator> node2EdgeMap = new GHIntObjectHashMap<VirtualEdgeIterator>(queryResults.size() * 3);
    final EdgeExplorer mainExplorer = mainGraph.createEdgeExplorer(edgeFilter);
    final GHIntHashSet towerNodesToChange = new GHIntHashSet(queryResults.size());
    // 1. virtualEdges should also get fresh EdgeIterators on every createEdgeExplorer call!
    for (int i = 0; i < queryResults.size(); i++) {
        // create outgoing edges
        VirtualEdgeIterator virtEdgeIter = new VirtualEdgeIterator(2);
        EdgeIteratorState baseRevEdge = virtualEdges.get(i * 4 + VE_BASE_REV);
        if (edgeFilter.accept(baseRevEdge))
            virtEdgeIter.add(baseRevEdge);
        EdgeIteratorState adjEdge = virtualEdges.get(i * 4 + VE_ADJ);
        if (edgeFilter.accept(adjEdge))
            virtEdgeIter.add(adjEdge);
        int virtNode = mainNodes + i;
        node2EdgeMap.put(virtNode, virtEdgeIter);
        // replace edge list of neighboring tower nodes:
        // add virtual edges only and collect tower nodes where real edges will be added in step 2.
        //
        // base node
        int towerNode = baseRevEdge.getAdjNode();
        if (!isVirtualNode(towerNode)) {
            towerNodesToChange.add(towerNode);
            addVirtualEdges(node2EdgeMap, edgeFilter, true, towerNode, i);
        }
        // adj node
        towerNode = adjEdge.getAdjNode();
        if (!isVirtualNode(towerNode)) {
            towerNodesToChange.add(towerNode);
            addVirtualEdges(node2EdgeMap, edgeFilter, false, towerNode, i);
        }
    }
    // 2. the connected tower nodes from mainGraph need fresh EdgeIterators with possible fakes
    // where 'fresh' means independent of previous call and respecting the edgeFilter
    // -> setup fake iterators of detected tower nodes (virtual edges are already added)
    towerNodesToChange.forEach(new IntProcedure() {

        @Override
        public void apply(int value) {
            fillVirtualEdges(node2EdgeMap, value, mainExplorer);
        }
    });
    return new EdgeExplorer() {

        @Override
        public EdgeIterator setBaseNode(int baseNode) {
            VirtualEdgeIterator iter = node2EdgeMap.get(baseNode);
            if (iter != null)
                return iter.reset();
            return mainExplorer.setBaseNode(baseNode);
        }
    };
}
Also used : GHIntHashSet(com.graphhopper.coll.GHIntHashSet) GHIntObjectHashMap(com.graphhopper.coll.GHIntObjectHashMap) IntProcedure(com.carrotsearch.hppc.procedures.IntProcedure) GHPoint(com.graphhopper.util.shapes.GHPoint)

Aggregations

IntProcedure (com.carrotsearch.hppc.procedures.IntProcedure)2 IntHashSet (com.carrotsearch.hppc.IntHashSet)1 GHIntHashSet (com.graphhopper.coll.GHIntHashSet)1 GHIntObjectHashMap (com.graphhopper.coll.GHIntObjectHashMap)1 GHPoint (com.graphhopper.util.shapes.GHPoint)1 IOException (java.io.IOException)1 FileVisitResult (java.nio.file.FileVisitResult)1 Files (java.nio.file.Files)1 Path (java.nio.file.Path)1 SimpleFileVisitor (java.nio.file.SimpleFileVisitor)1 BasicFileAttributes (java.nio.file.attribute.BasicFileAttributes)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 List (java.util.List)1 Optional (java.util.Optional)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 ExecutionException (java.util.concurrent.ExecutionException)1 Semaphore (java.util.concurrent.Semaphore)1 TimeUnit (java.util.concurrent.TimeUnit)1