use of org.elasticsearch.cluster.ClusterChangedEvent in project crate by crate.
the class PublicationTransportHandlerTests method testDiffSerializationFailure.
public void testDiffSerializationFailure() {
DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), random());
final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
final DiscoveryNode localNode = new DiscoveryNode("localNode", buildNewFakeTransportAddress(), Version.CURRENT);
final TransportService transportService = new CapturingTransport().createTransportService(Settings.EMPTY, deterministicTaskQueue.getThreadPool(), x -> localNode, clusterSettings);
final PublicationTransportHandler handler = new PublicationTransportHandler(transportService, writableRegistry(), pu -> null, (pu, l) -> {
});
transportService.start();
transportService.acceptIncomingRequests();
final DiscoveryNode otherNode = new DiscoveryNode("otherNode", buildNewFakeTransportAddress(), Version.CURRENT);
final ClusterState clusterState = CoordinationStateTests.clusterState(2L, 1L, DiscoveryNodes.builder().add(localNode).add(otherNode).localNodeId(localNode.getId()).build(), VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L);
final ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) {
@Override
public Diff<ClusterState> diff(ClusterState previousState) {
return new Diff<ClusterState>() {
@Override
public ClusterState apply(ClusterState part) {
fail("this diff shouldn't be applied");
return part;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new IOException("Simulated failure of diff serialization");
}
};
}
};
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> handler.newPublicationContext(new ClusterChangedEvent("test", unserializableClusterState, clusterState)));
assertNotNull(e.getCause());
assertThat(e.getCause(), instanceOf(IOException.class));
assertThat(e.getCause().getMessage(), containsString("Simulated failure of diff serialization"));
}
use of org.elasticsearch.cluster.ClusterChangedEvent in project crate by crate.
the class MasterServiceTests method testClusterStateBatchedUpdates.
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
AtomicInteger counter = new AtomicInteger();
class Task {
private AtomicBoolean state = new AtomicBoolean();
private final int id;
Task(int id) {
this.id = id;
}
public void execute() {
if (!state.compareAndSet(false, true)) {
throw new IllegalStateException();
} else {
counter.incrementAndGet();
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Task task = (Task) o;
return id == task.id;
}
@Override
public int hashCode() {
return id;
}
@Override
public String toString() {
return Integer.toString(id);
}
}
int numberOfThreads = randomIntBetween(2, 8);
int taskSubmissionsPerThread = randomIntBetween(1, 64);
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
final Semaphore semaphore = new Semaphore(numberOfExecutors);
class TaskExecutor implements ClusterStateTaskExecutor<Task> {
private final List<Set<Task>> taskGroups;
private AtomicInteger counter = new AtomicInteger();
private AtomicInteger batches = new AtomicInteger();
private AtomicInteger published = new AtomicInteger();
TaskExecutor(List<Set<Task>> taskGroups) {
this.taskGroups = taskGroups;
}
@Override
public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception {
for (Set<Task> expectedSet : taskGroups) {
long count = tasks.stream().filter(expectedSet::contains).count();
assertThat("batched set should be executed together or not at all. Expected " + expectedSet + "s. Executing " + tasks, count, anyOf(equalTo(0L), equalTo((long) expectedSet.size())));
}
tasks.forEach(Task::execute);
counter.addAndGet(tasks.size());
ClusterState maybeUpdatedClusterState = currentState;
if (randomBoolean()) {
maybeUpdatedClusterState = ClusterState.builder(currentState).build();
batches.incrementAndGet();
semaphore.acquire();
}
return ClusterTasksResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
}
@Override
public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
published.incrementAndGet();
semaphore.release();
}
}
ConcurrentMap<String, AtomicInteger> processedStates = new ConcurrentHashMap<>();
List<Set<Task>> taskGroups = new ArrayList<>();
List<TaskExecutor> executors = new ArrayList<>();
for (int i = 0; i < numberOfExecutors; i++) {
executors.add(new TaskExecutor(taskGroups));
}
// randomly assign tasks to executors
List<Tuple<TaskExecutor, Set<Task>>> assignments = new ArrayList<>();
int taskId = 0;
for (int i = 0; i < numberOfThreads; i++) {
for (int j = 0; j < taskSubmissionsPerThread; j++) {
TaskExecutor executor = randomFrom(executors);
Set<Task> tasks = new HashSet<>();
for (int t = randomInt(3); t >= 0; t--) {
tasks.add(new Task(taskId++));
}
taskGroups.add(tasks);
assignments.add(Tuple.tuple(executor, tasks));
}
}
Map<TaskExecutor, Integer> counts = new HashMap<>();
int totalTaskCount = 0;
for (Tuple<TaskExecutor, Set<Task>> assignment : assignments) {
final int taskCount = assignment.v2().size();
counts.merge(assignment.v1(), taskCount, (previous, count) -> previous + count);
totalTaskCount += taskCount;
}
final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount);
final ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
throw new AssertionError(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
processedStates.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
updateLatch.countDown();
}
};
try (MasterService masterService = createMasterService(true)) {
final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
final String threadName = Thread.currentThread().getName();
try {
barrier.await();
for (int j = 0; j < taskSubmissionsPerThread; j++) {
Tuple<TaskExecutor, Set<Task>> assignment = assignments.get(index * taskSubmissionsPerThread + j);
final Set<Task> tasks = assignment.v2();
submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size());
final TaskExecutor executor = assignment.v1();
if (tasks.size() == 1) {
masterService.submitStateUpdateTask(threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor, listener);
} else {
Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
tasks.forEach(t -> taskListeners.put(t, listener));
masterService.submitStateUpdateTasks(threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor);
}
}
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
// wait until all the cluster state updates have been processed
updateLatch.await();
// and until all of the publication callbacks have completed
semaphore.acquire(numberOfExecutors);
// assert the number of executed tasks is correct
assertEquals(totalTaskCount, counter.get());
// assert each executor executed the correct number of tasks
for (TaskExecutor executor : executors) {
if (counts.containsKey(executor)) {
assertEquals((int) counts.get(executor), executor.counter.get());
assertEquals(executor.batches.get(), executor.published.get());
}
}
// assert the correct number of clusterStateProcessed events were triggered
for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
assertEquals("not all tasks submitted by " + entry.getKey() + " received a processed event", entry.getValue().get(), submittedTasksPerThread.get(entry.getKey()).get());
}
}
}
use of org.elasticsearch.cluster.ClusterChangedEvent in project crate by crate.
the class MasterServiceTests method testAcking.
public void testAcking() throws InterruptedException {
final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
try (MasterService masterService = new MasterService(Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()).put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool)) {
final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())).nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
final AtomicReference<ClusterStatePublisher> publisherRef = new AtomicReference<>();
masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al));
masterService.setClusterStateSupplier(() -> initialClusterState);
masterService.start();
// check that we don't time out before even committing the cluster state
{
final CountDownLatch latch = new CountDownLatch(1);
publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> publishListener.onFailure(new FailedToCommitClusterStateException("mock exception")));
masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).build();
}
@Override
public TimeValue ackTimeout() {
return TimeValue.ZERO;
}
@Override
public TimeValue timeout() {
return null;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
protected Void newResponse(boolean acknowledged) {
fail();
return null;
}
@Override
public void onFailure(String source, Exception e) {
latch.countDown();
}
@Override
public void onAckTimeout() {
fail();
}
});
latch.await();
}
// check that we timeout if commit took too long
{
final CountDownLatch latch = new CountDownLatch(2);
final TimeValue ackTimeout = TimeValue.timeValueMillis(randomInt(100));
publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> {
publishListener.onResponse(null);
ackListener.onCommit(TimeValue.timeValueMillis(ackTimeout.millis() + randomInt(100)));
ackListener.onNodeAck(node1, null);
ackListener.onNodeAck(node2, null);
ackListener.onNodeAck(node3, null);
});
masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).build();
}
@Override
public TimeValue ackTimeout() {
return ackTimeout;
}
@Override
public TimeValue timeout() {
return null;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
protected Void newResponse(boolean acknowledged) {
fail();
return null;
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
@Override
public void onAckTimeout() {
latch.countDown();
}
});
latch.await();
}
}
}
use of org.elasticsearch.cluster.ClusterChangedEvent in project crate by crate.
the class BlobStoreTestUtil method mockClusterService.
private static ClusterService mockClusterService(ClusterState initialState) {
final ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.executor(ThreadPool.Names.SNAPSHOT)).thenReturn(new SameThreadExecutorService());
when(threadPool.generic()).thenReturn(new SameThreadExecutorService());
final ClusterService clusterService = mock(ClusterService.class);
final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class);
when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService);
final AtomicReference<ClusterState> currentState = new AtomicReference<>(initialState);
when(clusterService.state()).then(invocationOnMock -> currentState.get());
final List<ClusterStateApplier> appliers = new CopyOnWriteArrayList<>();
doAnswer(invocation -> {
final ClusterStateUpdateTask task = ((ClusterStateUpdateTask) invocation.getArguments()[1]);
final ClusterState current = currentState.get();
final ClusterState next = task.execute(current);
currentState.set(next);
appliers.forEach(applier -> applier.applyClusterState(new ClusterChangedEvent((String) invocation.getArguments()[0], next, current)));
task.clusterStateProcessed((String) invocation.getArguments()[0], current, next);
return null;
}).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class));
doAnswer(invocation -> {
appliers.add((ClusterStateApplier) invocation.getArguments()[0]);
return null;
}).when(clusterService).addStateApplier(any(ClusterStateApplier.class));
when(clusterApplierService.threadPool()).thenReturn(threadPool);
return clusterService;
}
Aggregations