use of org.elasticsearch.cluster.ClusterService in project crate by crate.
the class TableStatsServiceTest method testNoUpdateIfLocalNodeNotAvailable.
@Test
public void testNoUpdateIfLocalNodeNotAvailable() throws Exception {
final ClusterService clusterService = mock(ClusterService.class);
when(clusterService.localNode()).thenReturn(null);
SQLOperations sqlOperations = mock(SQLOperations.class);
SQLOperations.Session session = mock(SQLOperations.Session.class);
when(sqlOperations.createSession(anyString(), any(), anyInt())).thenReturn(session);
TableStatsService statsService = new TableStatsService(Settings.EMPTY, threadPool, clusterService, new TableStats(), new NodeSettingsService(Settings.EMPTY), sqlOperations);
statsService.run();
Mockito.verify(session, times(0)).sync();
}
use of org.elasticsearch.cluster.ClusterService in project crate by crate.
the class NestedLoopConsumerTest method initPlanner.
@Before
public void initPlanner() throws Exception {
ClusterService clusterService = new NoopClusterService();
TableStats tableStats = getTableStats();
e = SQLExecutor.builder(clusterService).enableDefaultTables().setTableStats(tableStats).addDocTable(emptyRoutingTable).build();
Functions functions = e.functions();
EvaluatingNormalizer normalizer = EvaluatingNormalizer.functionOnlyNormalizer(functions, ReplaceMode.COPY);
plannerContext = new Planner.Context(e.planner, clusterService, UUID.randomUUID(), new ConsumingPlanner(clusterService, functions, tableStats), normalizer, new TransactionContext(SessionContext.SYSTEM_SESSION), 0, 0);
consumer = new NestedLoopConsumer(clusterService, functions, tableStats);
}
use of org.elasticsearch.cluster.ClusterService in project crate by crate.
the class PartitionedTableConcurrentIntegrationTest method testSelectWhileShardsAreRelocating.
/**
* Test depends on 2 data nodes
*/
@Test
public void testSelectWhileShardsAreRelocating() throws Throwable {
execute("create table t (name string, p string) " + "clustered into 2 shards " + "partitioned by (p) with (number_of_replicas = 0)");
ensureYellow();
execute("insert into t (name, p) values (?, ?)", new Object[][] { new Object[] { "Marvin", "a" }, new Object[] { "Trillian", "a" } });
execute("refresh table t");
execute("set global stats.enabled=true");
final AtomicReference<Throwable> lastThrowable = new AtomicReference<>();
final CountDownLatch selects = new CountDownLatch(100);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
while (selects.getCount() > 0) {
try {
execute("select * from t");
} catch (Throwable t) {
// The failed job should have three started operations
SQLResponse res = execute("select id from sys.jobs_log where error is not null order by started desc limit 1");
if (res.rowCount() > 0) {
String id = (String) res.rows()[0][0];
res = execute("select count(*) from sys.operations_log where name=? or name = ?and job_id = ?", new Object[] { "collect", "fetchContext", id });
if ((long) res.rows()[0][0] < 3) {
// set the error if there where less than three attempts
lastThrowable.set(t);
}
}
} finally {
selects.countDown();
}
}
}
});
t.start();
PartitionName partitionName = new PartitionName("t", Collections.singletonList(new BytesRef("a")));
final String indexName = partitionName.asIndexName();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
DiscoveryNodes nodes = clusterService.state().nodes();
List<String> nodeIds = new ArrayList<>(2);
for (DiscoveryNode node : nodes) {
if (node.dataNode()) {
nodeIds.add(node.getId());
}
}
final Map<String, String> nodeSwap = new HashMap<>(2);
nodeSwap.put(nodeIds.get(0), nodeIds.get(1));
nodeSwap.put(nodeIds.get(1), nodeIds.get(0));
final CountDownLatch relocations = new CountDownLatch(20);
Thread relocatingThread = new Thread(new Runnable() {
@Override
public void run() {
while (relocations.getCount() > 0) {
ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().setIndices(indexName).execute().actionGet();
List<ShardRouting> shardRoutings = clusterStateResponse.getState().routingTable().allShards(indexName);
ClusterRerouteRequestBuilder clusterRerouteRequestBuilder = admin().cluster().prepareReroute();
int numMoves = 0;
for (ShardRouting shardRouting : shardRoutings) {
if (shardRouting.currentNodeId() == null) {
continue;
}
if (shardRouting.state() != ShardRoutingState.STARTED) {
continue;
}
String toNode = nodeSwap.get(shardRouting.currentNodeId());
clusterRerouteRequestBuilder.add(new MoveAllocationCommand(shardRouting.shardId(), shardRouting.currentNodeId(), toNode));
numMoves++;
}
if (numMoves > 0) {
clusterRerouteRequestBuilder.execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
relocations.countDown();
}
}
}
});
relocatingThread.start();
relocations.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
selects.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
Throwable throwable = lastThrowable.get();
if (throwable != null) {
throw throwable;
}
t.join();
relocatingThread.join();
}
use of org.elasticsearch.cluster.ClusterService in project crate by crate.
the class MapSideDataCollectOperationTest method testFileUriCollect.
@Test
public void testFileUriCollect() throws Exception {
ClusterService clusterService = new NoopClusterService();
Functions functions = getFunctions();
CollectSourceResolver collectSourceResolver = mock(CollectSourceResolver.class);
when(collectSourceResolver.getService(any(RoutedCollectPhase.class))).thenReturn(new FileCollectSource(functions, clusterService, Collections.<String, FileInputFactory>emptyMap()));
MapSideDataCollectOperation collectOperation = new MapSideDataCollectOperation(collectSourceResolver, threadPool);
File tmpFile = temporaryFolder.newFile("fileUriCollectOperation.json");
try (OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(tmpFile), StandardCharsets.UTF_8)) {
writer.write("{\"name\": \"Arthur\", \"id\": 4, \"details\": {\"age\": 38}}\n");
writer.write("{\"id\": 5, \"name\": \"Trillian\", \"details\": {\"age\": 33}}\n");
}
FileUriCollectPhase collectNode = new FileUriCollectPhase(UUID.randomUUID(), 0, "test", Collections.singletonList("noop_id"), Literal.of(Paths.get(tmpFile.toURI()).toUri().toString()), Arrays.<Symbol>asList(createReference("name", DataTypes.STRING), createReference(new ColumnIdent("details", "age"), DataTypes.INTEGER)), Collections.emptyList(), null, false);
String threadPoolName = JobCollectContext.threadPoolName(collectNode, "noop_id");
TestingBatchConsumer consumer = new TestingBatchConsumer();
JobCollectContext jobCollectContext = mock(JobCollectContext.class);
CrateCollector collectors = collectOperation.createCollector(collectNode, consumer, jobCollectContext);
collectOperation.launchCollector(collectors, threadPoolName);
assertThat(new CollectionBucket(consumer.getResult()), contains(isRow("Arthur", 38), isRow("Trillian", 33)));
}
Aggregations