Search in sources :

Example 16 with ClusterService

use of org.elasticsearch.cluster.ClusterService in project crate by crate.

the class TableStatsServiceTest method testNoUpdateIfLocalNodeNotAvailable.

@Test
public void testNoUpdateIfLocalNodeNotAvailable() throws Exception {
    final ClusterService clusterService = mock(ClusterService.class);
    when(clusterService.localNode()).thenReturn(null);
    SQLOperations sqlOperations = mock(SQLOperations.class);
    SQLOperations.Session session = mock(SQLOperations.Session.class);
    when(sqlOperations.createSession(anyString(), any(), anyInt())).thenReturn(session);
    TableStatsService statsService = new TableStatsService(Settings.EMPTY, threadPool, clusterService, new TableStats(), new NodeSettingsService(Settings.EMPTY), sqlOperations);
    statsService.run();
    Mockito.verify(session, times(0)).sync();
}
Also used : ClusterService(org.elasticsearch.cluster.ClusterService) NodeSettingsService(org.elasticsearch.node.settings.NodeSettingsService) SQLOperations(io.crate.action.sql.SQLOperations) Test(org.junit.Test) CrateUnitTest(io.crate.test.integration.CrateUnitTest)

Example 17 with ClusterService

use of org.elasticsearch.cluster.ClusterService in project crate by crate.

the class NestedLoopConsumerTest method initPlanner.

@Before
public void initPlanner() throws Exception {
    ClusterService clusterService = new NoopClusterService();
    TableStats tableStats = getTableStats();
    e = SQLExecutor.builder(clusterService).enableDefaultTables().setTableStats(tableStats).addDocTable(emptyRoutingTable).build();
    Functions functions = e.functions();
    EvaluatingNormalizer normalizer = EvaluatingNormalizer.functionOnlyNormalizer(functions, ReplaceMode.COPY);
    plannerContext = new Planner.Context(e.planner, clusterService, UUID.randomUUID(), new ConsumingPlanner(clusterService, functions, tableStats), normalizer, new TransactionContext(SessionContext.SYSTEM_SESSION), 0, 0);
    consumer = new NestedLoopConsumer(clusterService, functions, tableStats);
}
Also used : NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) ClusterService(org.elasticsearch.cluster.ClusterService) EvaluatingNormalizer(io.crate.analyze.EvaluatingNormalizer) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) Before(org.junit.Before)

Example 18 with ClusterService

use of org.elasticsearch.cluster.ClusterService in project crate by crate.

the class PartitionedTableConcurrentIntegrationTest method testSelectWhileShardsAreRelocating.

/**
     * Test depends on 2 data nodes
     */
@Test
public void testSelectWhileShardsAreRelocating() throws Throwable {
    execute("create table t (name string, p string) " + "clustered into 2 shards " + "partitioned by (p) with (number_of_replicas = 0)");
    ensureYellow();
    execute("insert into t (name, p) values (?, ?)", new Object[][] { new Object[] { "Marvin", "a" }, new Object[] { "Trillian", "a" } });
    execute("refresh table t");
    execute("set global stats.enabled=true");
    final AtomicReference<Throwable> lastThrowable = new AtomicReference<>();
    final CountDownLatch selects = new CountDownLatch(100);
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            while (selects.getCount() > 0) {
                try {
                    execute("select * from t");
                } catch (Throwable t) {
                    // The failed job should have three started operations
                    SQLResponse res = execute("select id from sys.jobs_log where error is not null order by started desc limit 1");
                    if (res.rowCount() > 0) {
                        String id = (String) res.rows()[0][0];
                        res = execute("select count(*) from sys.operations_log where name=? or name = ?and job_id = ?", new Object[] { "collect", "fetchContext", id });
                        if ((long) res.rows()[0][0] < 3) {
                            // set the error if there where less than three attempts
                            lastThrowable.set(t);
                        }
                    }
                } finally {
                    selects.countDown();
                }
            }
        }
    });
    t.start();
    PartitionName partitionName = new PartitionName("t", Collections.singletonList(new BytesRef("a")));
    final String indexName = partitionName.asIndexName();
    ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
    DiscoveryNodes nodes = clusterService.state().nodes();
    List<String> nodeIds = new ArrayList<>(2);
    for (DiscoveryNode node : nodes) {
        if (node.dataNode()) {
            nodeIds.add(node.getId());
        }
    }
    final Map<String, String> nodeSwap = new HashMap<>(2);
    nodeSwap.put(nodeIds.get(0), nodeIds.get(1));
    nodeSwap.put(nodeIds.get(1), nodeIds.get(0));
    final CountDownLatch relocations = new CountDownLatch(20);
    Thread relocatingThread = new Thread(new Runnable() {

        @Override
        public void run() {
            while (relocations.getCount() > 0) {
                ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().setIndices(indexName).execute().actionGet();
                List<ShardRouting> shardRoutings = clusterStateResponse.getState().routingTable().allShards(indexName);
                ClusterRerouteRequestBuilder clusterRerouteRequestBuilder = admin().cluster().prepareReroute();
                int numMoves = 0;
                for (ShardRouting shardRouting : shardRoutings) {
                    if (shardRouting.currentNodeId() == null) {
                        continue;
                    }
                    if (shardRouting.state() != ShardRoutingState.STARTED) {
                        continue;
                    }
                    String toNode = nodeSwap.get(shardRouting.currentNodeId());
                    clusterRerouteRequestBuilder.add(new MoveAllocationCommand(shardRouting.shardId(), shardRouting.currentNodeId(), toNode));
                    numMoves++;
                }
                if (numMoves > 0) {
                    clusterRerouteRequestBuilder.execute().actionGet();
                    client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
                    relocations.countDown();
                }
            }
        }
    });
    relocatingThread.start();
    relocations.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
    selects.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
    Throwable throwable = lastThrowable.get();
    if (throwable != null) {
        throw throwable;
    }
    t.join();
    relocatingThread.join();
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterRerouteRequestBuilder(org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) MoveAllocationCommand(org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand) AtomicReference(java.util.concurrent.atomic.AtomicReference) SQLResponse(io.crate.testing.SQLResponse) CountDownLatch(java.util.concurrent.CountDownLatch) PartitionName(io.crate.metadata.PartitionName) ClusterService(org.elasticsearch.cluster.ClusterService) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) BytesRef(org.apache.lucene.util.BytesRef) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) Test(org.junit.Test)

Example 19 with ClusterService

use of org.elasticsearch.cluster.ClusterService in project crate by crate.

the class MapSideDataCollectOperationTest method testFileUriCollect.

@Test
public void testFileUriCollect() throws Exception {
    ClusterService clusterService = new NoopClusterService();
    Functions functions = getFunctions();
    CollectSourceResolver collectSourceResolver = mock(CollectSourceResolver.class);
    when(collectSourceResolver.getService(any(RoutedCollectPhase.class))).thenReturn(new FileCollectSource(functions, clusterService, Collections.<String, FileInputFactory>emptyMap()));
    MapSideDataCollectOperation collectOperation = new MapSideDataCollectOperation(collectSourceResolver, threadPool);
    File tmpFile = temporaryFolder.newFile("fileUriCollectOperation.json");
    try (OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(tmpFile), StandardCharsets.UTF_8)) {
        writer.write("{\"name\": \"Arthur\", \"id\": 4, \"details\": {\"age\": 38}}\n");
        writer.write("{\"id\": 5, \"name\": \"Trillian\", \"details\": {\"age\": 33}}\n");
    }
    FileUriCollectPhase collectNode = new FileUriCollectPhase(UUID.randomUUID(), 0, "test", Collections.singletonList("noop_id"), Literal.of(Paths.get(tmpFile.toURI()).toUri().toString()), Arrays.<Symbol>asList(createReference("name", DataTypes.STRING), createReference(new ColumnIdent("details", "age"), DataTypes.INTEGER)), Collections.emptyList(), null, false);
    String threadPoolName = JobCollectContext.threadPoolName(collectNode, "noop_id");
    TestingBatchConsumer consumer = new TestingBatchConsumer();
    JobCollectContext jobCollectContext = mock(JobCollectContext.class);
    CrateCollector collectors = collectOperation.createCollector(collectNode, consumer, jobCollectContext);
    collectOperation.launchCollector(collectors, threadPoolName);
    assertThat(new CollectionBucket(consumer.getResult()), contains(isRow("Arthur", 38), isRow("Trillian", 33)));
}
Also used : CollectSourceResolver(io.crate.operation.collect.sources.CollectSourceResolver) Functions(io.crate.metadata.Functions) FileUriCollectPhase(io.crate.planner.node.dql.FileUriCollectPhase) ColumnIdent(io.crate.metadata.ColumnIdent) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) ClusterService(org.elasticsearch.cluster.ClusterService) FileCollectSource(io.crate.operation.collect.sources.FileCollectSource) FileOutputStream(java.io.FileOutputStream) FileInputFactory(io.crate.operation.collect.files.FileInputFactory) OutputStreamWriter(java.io.OutputStreamWriter) TestingBatchConsumer(io.crate.testing.TestingBatchConsumer) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) File(java.io.File) RoutedCollectPhase(io.crate.planner.node.dql.RoutedCollectPhase) CollectionBucket(io.crate.data.CollectionBucket) Test(org.junit.Test) CrateUnitTest(io.crate.test.integration.CrateUnitTest)

Aggregations

ClusterService (org.elasticsearch.cluster.ClusterService)19 Test (org.junit.Test)13 CrateUnitTest (io.crate.test.integration.CrateUnitTest)11 NoopClusterService (org.elasticsearch.test.cluster.NoopClusterService)9 ClusterState (org.elasticsearch.cluster.ClusterState)6 IndexNameExpressionResolver (org.elasticsearch.cluster.metadata.IndexNameExpressionResolver)5 TableIdent (io.crate.metadata.TableIdent)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)4 AtomicReference (java.util.concurrent.atomic.AtomicReference)4 ActionListener (org.elasticsearch.action.ActionListener)4 Settings (org.elasticsearch.common.settings.Settings)4 ShardId (org.elasticsearch.index.shard.ShardId)4 Before (org.junit.Before)4 SQLOperations (io.crate.action.sql.SQLOperations)3 ShardResponse (io.crate.executor.transport.ShardResponse)3 ShardUpsertRequest (io.crate.executor.transport.ShardUpsertRequest)3 Reference (io.crate.metadata.Reference)3 ReferenceIdent (io.crate.metadata.ReferenceIdent)3 RowGranularity (io.crate.metadata.RowGranularity)3 DataTypes (io.crate.types.DataTypes)3