use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class DiscoveryWithServiceDisruptionsIT method testStaleMasterNotHijackingMajority.
/**
* Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes
* that already are following another elected master node. These nodes should reject this cluster state and prevent
* them from following the stale master.
*/
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE")
public void testStaleMasterNotHijackingMajority() throws Exception {
// 3 node cluster with unicast discovery and minimum_master_nodes set to 2:
final List<String> nodes = startCluster(3, 2);
// Save the current master node as old master node, because that node will get frozen
final String oldMasterNode = internalCluster().getMasterName();
for (String node : nodes) {
ensureStableCluster(3, node);
}
assertMaster(oldMasterNode, nodes);
// Simulating a painful gc by suspending all threads for a long time on the current elected master node.
SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode);
// Save the majority side
final List<String> majoritySide = new ArrayList<>(nodes);
majoritySide.remove(oldMasterNode);
// Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<String, List<Tuple<String, String>>>());
for (final String node : majoritySide) {
masters.put(node, new ArrayList<Tuple<String, String>>());
internalCluster().getInstance(ClusterService.class, node).addListener(event -> {
DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode();
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
if (!Objects.equals(previousMaster, currentMaster)) {
logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), event.previousState());
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
}
});
}
final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1);
internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> {
if (event.state().nodes().getMasterNodeId() == null) {
oldMasterNodeSteppedDown.countDown();
}
});
internalCluster().setDisruptionScheme(masterNodeDisruption);
logger.info("freezing node [{}]", oldMasterNode);
masterNodeDisruption.startDisrupting();
// Wait for the majority side to get stable
assertDifferentMaster(majoritySide.get(0), oldMasterNode);
assertDifferentMaster(majoritySide.get(1), oldMasterNode);
// the test is periodically tripping on the following assertion. To find out which threads are blocking the nodes from making
// progress we print a stack dump
boolean failed = true;
try {
assertDiscoveryCompleted(majoritySide);
failed = false;
} finally {
if (failed) {
logger.error("discovery failed to complete, probably caused by a blocked thread: {}", new HotThreads().busiestThreads(Integer.MAX_VALUE).ignoreIdleThreads(false).detect());
}
}
// The old master node is frozen, but here we submit a cluster state update task that doesn't get executed,
// but will be queued and once the old master node un-freezes it gets executed.
// The old master node will send this update + the cluster state where he is flagged as master to the other
// nodes that follow the new master. These nodes should ignore this update.
internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return ClusterState.builder(currentState).build();
}
@Override
public void onFailure(String source, Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure [{}]", source), e);
}
});
// Save the new elected master node
final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
logger.info("new detected master node [{}]", newMasterNode);
// Stop disruption
logger.info("Unfreeze node [{}]", oldMasterNode);
masterNodeDisruption.stopDisrupting();
oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS);
// Make sure that the end state is consistent on all nodes:
assertDiscoveryCompleted(nodes);
assertMaster(newMasterNode, nodes);
assertThat(masters.size(), equalTo(2));
for (Map.Entry<String, List<Tuple<String, String>>> entry : masters.entrySet()) {
String nodeName = entry.getKey();
List<Tuple<String, String>> recordedMasterTransition = entry.getValue();
assertThat("[" + nodeName + "] Each node should only record two master node transitions", recordedMasterTransition.size(), equalTo(2));
assertThat("[" + nodeName + "] First transition's previous master should be [null]", recordedMasterTransition.get(0).v1(), equalTo(oldMasterNode));
assertThat("[" + nodeName + "] First transition's current master should be [" + newMasterNode + "]", recordedMasterTransition.get(0).v2(), nullValue());
assertThat("[" + nodeName + "] Second transition's previous master should be [null]", recordedMasterTransition.get(1).v1(), nullValue());
assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]", recordedMasterTransition.get(1).v2(), equalTo(newMasterNode));
}
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class ClusterServiceTests method testClusterStateBatchedUpdates.
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
AtomicInteger counter = new AtomicInteger();
class Task {
private AtomicBoolean state = new AtomicBoolean();
private final int id;
Task(int id) {
this.id = id;
}
public void execute() {
if (!state.compareAndSet(false, true)) {
throw new IllegalStateException();
} else {
counter.incrementAndGet();
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Task task = (Task) o;
return id == task.id;
}
@Override
public int hashCode() {
return id;
}
@Override
public String toString() {
return Integer.toString(id);
}
}
int numberOfThreads = randomIntBetween(2, 8);
int taskSubmissionsPerThread = randomIntBetween(1, 64);
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
final Semaphore semaphore = new Semaphore(numberOfExecutors);
class TaskExecutor implements ClusterStateTaskExecutor<Task> {
private final List<Set<Task>> taskGroups;
private AtomicInteger counter = new AtomicInteger();
private AtomicInteger batches = new AtomicInteger();
private AtomicInteger published = new AtomicInteger();
TaskExecutor(List<Set<Task>> taskGroups) {
this.taskGroups = taskGroups;
}
@Override
public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception {
for (Set<Task> expectedSet : taskGroups) {
long count = tasks.stream().filter(expectedSet::contains).count();
assertThat("batched set should be executed together or not at all. Expected " + expectedSet + "s. Executing " + tasks, count, anyOf(equalTo(0L), equalTo((long) expectedSet.size())));
}
tasks.forEach(Task::execute);
counter.addAndGet(tasks.size());
ClusterState maybeUpdatedClusterState = currentState;
if (randomBoolean()) {
maybeUpdatedClusterState = ClusterState.builder(currentState).build();
batches.incrementAndGet();
semaphore.acquire();
}
return ClusterTasksResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
}
@Override
public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
published.incrementAndGet();
semaphore.release();
}
}
ConcurrentMap<String, AtomicInteger> processedStates = new ConcurrentHashMap<>();
List<Set<Task>> taskGroups = new ArrayList<>();
List<TaskExecutor> executors = new ArrayList<>();
for (int i = 0; i < numberOfExecutors; i++) {
executors.add(new TaskExecutor(taskGroups));
}
// randomly assign tasks to executors
List<Tuple<TaskExecutor, Set<Task>>> assignments = new ArrayList<>();
int taskId = 0;
for (int i = 0; i < numberOfThreads; i++) {
for (int j = 0; j < taskSubmissionsPerThread; j++) {
TaskExecutor executor = randomFrom(executors);
Set<Task> tasks = new HashSet<>();
for (int t = randomInt(3); t >= 0; t--) {
tasks.add(new Task(taskId++));
}
taskGroups.add(tasks);
assignments.add(Tuple.tuple(executor, tasks));
}
}
Map<TaskExecutor, Integer> counts = new HashMap<>();
int totalTaskCount = 0;
for (Tuple<TaskExecutor, Set<Task>> assignment : assignments) {
final int taskCount = assignment.v2().size();
counts.merge(assignment.v1(), taskCount, (previous, count) -> previous + count);
totalTaskCount += taskCount;
}
final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount);
final ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
fail(ExceptionsHelper.detailedMessage(e));
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
processedStates.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
updateLatch.countDown();
}
};
final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
final String threadName = Thread.currentThread().getName();
try {
barrier.await();
for (int j = 0; j < taskSubmissionsPerThread; j++) {
Tuple<TaskExecutor, Set<Task>> assignment = assignments.get(index * taskSubmissionsPerThread + j);
final Set<Task> tasks = assignment.v2();
submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size());
final TaskExecutor executor = assignment.v1();
if (tasks.size() == 1) {
clusterService.submitStateUpdateTask(threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor, listener);
} else {
Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
tasks.stream().forEach(t -> taskListeners.put(t, listener));
clusterService.submitStateUpdateTasks(threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor);
}
}
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
// wait until all the cluster state updates have been processed
updateLatch.await();
// and until all of the publication callbacks have completed
semaphore.acquire(numberOfExecutors);
// assert the number of executed tasks is correct
assertEquals(totalTaskCount, counter.get());
// assert each executor executed the correct number of tasks
for (TaskExecutor executor : executors) {
if (counts.containsKey(executor)) {
assertEquals((int) counts.get(executor), executor.counter.get());
assertEquals(executor.batches.get(), executor.published.get());
}
}
// assert the correct number of clusterStateProcessed events were triggered
for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
assertEquals("not all tasks submitted by " + entry.getKey() + " received a processed event", entry.getValue().get(), submittedTasksPerThread.get(entry.getKey()).get());
}
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class ClusterServiceTests method testClusterStateUpdateTasksAreExecutedInOrder.
// test that for a single thread, tasks are executed in the order
// that they are submitted
public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException {
class TaskExecutor implements ClusterStateTaskExecutor<Integer> {
List<Integer> tasks = new ArrayList<>();
@Override
public ClusterTasksResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception {
this.tasks.addAll(tasks);
return ClusterTasksResult.<Integer>builder().successes(tasks).build(ClusterState.builder(currentState).build());
}
}
int numberOfThreads = randomIntBetween(2, 8);
TaskExecutor[] executors = new TaskExecutor[numberOfThreads];
for (int i = 0; i < numberOfThreads; i++) {
executors[i] = new TaskExecutor();
}
int tasksSubmittedPerThread = randomIntBetween(2, 1024);
CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e);
failures.add(new Tuple<>(source, e));
updateLatch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateLatch.countDown();
}
};
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener);
}
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
updateLatch.await();
assertThat(failures, empty());
for (int i = 0; i < numberOfThreads; i++) {
assertEquals(tasksSubmittedPerThread, executors[i].tasks.size());
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assertNotNull(executors[i].tasks.get(j));
assertEquals("cluster state update task executed out of order", j, (int) executors[i].tasks.get(j));
}
}
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class AbstractQueryTestCase method alterateQueries.
/**
* Traverses the json tree of the valid query provided as argument and mutates it one or more times by adding one object within each
* object encountered.
*
* For instance given the following valid term query:
* {
* "term" : {
* "field" : {
* "value" : "foo"
* }
* }
* }
*
* The following two mutations will be generated, and an exception is expected when trying to parse them:
* {
* "term" : {
* "newField" : {
* "field" : {
* "value" : "foo"
* }
* }
* }
* }
*
* {
* "term" : {
* "field" : {
* "newField" : {
* "value" : "foo"
* }
* }
* }
* }
*
* Every mutation is then added to the list of results with a boolean flag indicating if a parsing exception is expected or not
* for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the
* arbitraryMarkers parameter.
*/
static List<Tuple<String, Boolean>> alterateQueries(Set<String> queries, Set<String> arbitraryMarkers) throws IOException {
List<Tuple<String, Boolean>> results = new ArrayList<>();
// Indicate if a part of the query can hold any arbitrary content
boolean hasArbitraryContent = (arbitraryMarkers != null && arbitraryMarkers.isEmpty() == false);
for (String query : queries) {
// Track the number of query mutations
int mutation = 0;
while (true) {
boolean expectException = true;
BytesStreamOutput out = new BytesStreamOutput();
try (XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out);
XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, query)) {
int objectIndex = -1;
Deque<String> levels = new LinkedList<>();
// Parse the valid query and inserts a new object level called "newField"
XContentParser.Token token;
while ((token = parser.nextToken()) != null) {
if (token == XContentParser.Token.START_OBJECT) {
objectIndex++;
levels.addLast(parser.currentName());
if (objectIndex == mutation) {
// We reached the place in the object tree where we want to insert a new object level
generator.writeStartObject();
generator.writeFieldName("newField");
XContentHelper.copyCurrentStructure(generator, parser);
generator.writeEndObject();
if (hasArbitraryContent) {
// field is one (or a child) of those, no exception is expected when parsing the mutated query.
for (String marker : arbitraryMarkers) {
if (levels.contains(marker)) {
expectException = false;
break;
}
}
}
// Jump to next token
continue;
}
} else if (token == XContentParser.Token.END_OBJECT) {
levels.removeLast();
}
// We are walking through the object tree, so we can safely copy the current node
XContentHelper.copyCurrentEvent(generator, parser);
}
if (objectIndex < mutation) {
// We did not reach the insertion point, there's no more mutations to try
break;
} else {
// We reached the expected insertion point, so next time we'll try one step further
mutation++;
}
}
results.add(new Tuple<>(out.bytes().utf8ToString(), expectException));
}
}
return results;
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch-river-neo4j by sksamuel.
the class InternalSettingsPerparer method prepareSettings.
public static Tuple<Settings, Environment> prepareSettings(Settings pSettings, boolean loadConfigSettings) {
// ignore this prefixes when getting properties from es. and elasticsearch.
String[] ignorePrefixes = new String[] { "es.default.", "elasticsearch.default." };
// just create enough settings to build the environment
ImmutableSettings.Builder settingsBuilder = settingsBuilder().put(pSettings).putProperties("elasticsearch.default.", System.getProperties()).putProperties("es.default.", System.getProperties()).putProperties("elasticsearch.", System.getProperties(), ignorePrefixes).putProperties("es.", System.getProperties(), ignorePrefixes).replacePropertyPlaceholders();
Environment environment = new Environment(settingsBuilder.build());
if (loadConfigSettings) {
boolean loadFromEnv = true;
// if its default, then load it, but also load form env
if (System.getProperty("es.default.config") != null) {
loadFromEnv = true;
settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("es.default.config")));
}
// if explicit, just load it and don't load from env
if (System.getProperty("es.config") != null) {
loadFromEnv = false;
settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("es.config")));
}
if (System.getProperty("elasticsearch.config") != null) {
loadFromEnv = false;
settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("elasticsearch.config")));
}
if (loadFromEnv) {
try {
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.yml"));
} catch (FailedToResolveConfigException e) {
// ignore
} catch (NoClassDefFoundError e) {
// ignore, no yaml
}
try {
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.json"));
} catch (FailedToResolveConfigException e) {
// ignore
}
try {
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.properties"));
} catch (FailedToResolveConfigException e) {
// ignore
}
}
}
settingsBuilder.put(pSettings).putProperties("elasticsearch.", System.getProperties(), ignorePrefixes).putProperties("es.", System.getProperties(), ignorePrefixes).replacePropertyPlaceholders();
// generate the name
if (settingsBuilder.get("name") == null) {
String name = System.getProperty("name");
if (name == null || name.isEmpty()) {
name = settingsBuilder.get("node.name");
if (name == null || name.isEmpty()) {
name = Names.randomNodeName(environment.resolveConfig("names.txt"));
}
}
if (name != null) {
settingsBuilder.put("name", name);
}
}
// put the cluster name
if (settingsBuilder.get(ClusterName.SETTING) == null) {
settingsBuilder.put(ClusterName.SETTING, ClusterName.DEFAULT.value());
}
Settings v1 = settingsBuilder.build();
environment = new Environment(v1);
// put back the env settings
settingsBuilder = settingsBuilder().put(v1);
// we put back the path.logs so we can use it in the logging configuration file
settingsBuilder.put("path.logs", cleanPath(environment.logsFile().getAbsolutePath()));
v1 = settingsBuilder.build();
return new Tuple<Settings, Environment>(v1, environment);
}
Aggregations