use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.
the class MasterServiceTests method testMasterAwareExecution.
public void testMasterAwareExecution() throws Exception {
final MasterService nonMaster = createMasterService(false);
final boolean[] taskFailed = { false };
final CountDownLatch latch1 = new CountDownLatch(1);
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
latch1.countDown();
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
taskFailed[0] = true;
latch1.countDown();
}
});
latch1.await();
assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
final CountDownLatch latch2 = new CountDownLatch(1);
nonMaster.submitStateUpdateTask("test", new LocalClusterUpdateTask() {
@Override
public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) {
taskFailed[0] = false;
latch2.countDown();
return unchanged();
}
@Override
public void onFailure(String source, Exception e) {
taskFailed[0] = true;
latch2.countDown();
}
});
latch2.await();
assertFalse("non-master cluster state update task was not executed", taskFailed[0]);
nonMaster.close();
}
use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.
the class MasterServiceTests method testLongClusterStateUpdateLogging.
// To ensure that we log cluster state events on WARN level
@TestLogging("org.elasticsearch.cluster.service:WARN")
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't log because it was fast enough", MasterService.class.getCanonicalName(), Level.WARN, "*took*test1*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", MasterService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test2]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", MasterService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test3]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", MasterService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test4]"));
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test5 should not log despite publishing slowly", MasterService.class.getCanonicalName(), Level.WARN, "*took*test5*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test6 should log due to slow and failing publication", MasterService.class.getCanonicalName(), Level.WARN, "took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [test6]:*"));
Logger clusterLogger = LogManager.getLogger(MasterService.class);
Loggers.addAppender(clusterLogger, mockAppender);
try (MasterService masterService = new MasterService(Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()).put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool)) {
final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())).nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
if (event.source().contains("test5")) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + randomLongBetween(1, 1000000);
}
if (event.source().contains("test6")) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + randomLongBetween(1, 1000000);
throw new ElasticsearchException("simulated error during slow publication which should trigger logging");
}
clusterStateRef.set(event.state());
publishListener.onResponse(null);
});
masterService.setClusterStateSupplier(clusterStateRef::get);
masterService.start();
final CountDownLatch latch = new CountDownLatch(6);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += randomLongBetween(0L, MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis());
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
processedFirstTask.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
processedFirstTask.await();
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + randomLongBetween(1, 1000000);
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
latch.countDown();
}
});
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + randomLongBetween(1, 1000000);
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + randomLongBetween(1, 1000000);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
// maybe we should notify here?
fail();
}
});
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
latch.await();
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
}
mockAppender.assertAllExpectationsMatched();
}
use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.
the class BlobStoreTestUtil method mockClusterService.
private static ClusterService mockClusterService(ClusterState initialState) {
final ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.executor(ThreadPool.Names.SNAPSHOT)).thenReturn(new SameThreadExecutorService());
when(threadPool.generic()).thenReturn(new SameThreadExecutorService());
final ClusterService clusterService = mock(ClusterService.class);
final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class);
when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService);
final AtomicReference<ClusterState> currentState = new AtomicReference<>(initialState);
when(clusterService.state()).then(invocationOnMock -> currentState.get());
final List<ClusterStateApplier> appliers = new CopyOnWriteArrayList<>();
doAnswer(invocation -> {
final ClusterStateUpdateTask task = ((ClusterStateUpdateTask) invocation.getArguments()[1]);
final ClusterState current = currentState.get();
final ClusterState next = task.execute(current);
currentState.set(next);
appliers.forEach(applier -> applier.applyClusterState(new ClusterChangedEvent((String) invocation.getArguments()[0], next, current)));
task.clusterStateProcessed((String) invocation.getArguments()[0], current, next);
return null;
}).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class));
doAnswer(invocation -> {
appliers.add((ClusterStateApplier) invocation.getArguments()[0]);
return null;
}).when(clusterService).addStateApplier(any(ClusterStateApplier.class));
when(clusterApplierService.threadPool()).thenReturn(threadPool);
return clusterService;
}
use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.
the class BatchedRerouteServiceTests method testBatchesReroutesTogetherAtPriorityOfHighestSubmittedReroute.
public void testBatchesReroutesTogetherAtPriorityOfHighestSubmittedReroute() throws BrokenBarrierException, InterruptedException {
final CyclicBarrier cyclicBarrier = new CyclicBarrier(2);
clusterService.submitStateUpdateTask("block master service", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
// notify test that we are blocked
cyclicBarrier.await();
// wait to be unblocked by test
cyclicBarrier.await();
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
throw new AssertionError(source, e);
}
});
// wait for master thread to be blocked
cyclicBarrier.await();
final AtomicBoolean rerouteExecuted = new AtomicBoolean();
final BatchedRerouteService batchedRerouteService = new BatchedRerouteService(clusterService, (s, r) -> {
// only called once
assertTrue(rerouteExecuted.compareAndSet(false, true));
return s;
});
final int iterations = scaledRandomIntBetween(1, 100);
final CountDownLatch tasksSubmittedCountDown = new CountDownLatch(iterations);
final CountDownLatch tasksCompletedCountDown = new CountDownLatch(iterations);
final List<Runnable> actions = new ArrayList<>(iterations);
final Function<Priority, Runnable> rerouteFromPriority = priority -> () -> {
final AtomicBoolean alreadyRun = new AtomicBoolean();
batchedRerouteService.reroute("reroute at " + priority, priority, ActionListener.wrap(() -> {
assertTrue(alreadyRun.compareAndSet(false, true));
tasksCompletedCountDown.countDown();
}));
tasksSubmittedCountDown.countDown();
};
// ensure at least one URGENT priority reroute
actions.add(rerouteFromPriority.apply(Priority.URGENT));
for (int i = 1; i < iterations; i++) {
final int iteration = i;
if (randomBoolean()) {
actions.add(rerouteFromPriority.apply(randomFrom(Priority.LOW, Priority.NORMAL, Priority.HIGH, Priority.URGENT)));
} else {
final Priority priority = randomFrom(Priority.NORMAL, Priority.HIGH, Priority.URGENT, Priority.IMMEDIATE);
final boolean submittedConcurrentlyWithReroute = randomBoolean();
if (submittedConcurrentlyWithReroute == false) {
// this task might be submitted later
tasksSubmittedCountDown.countDown();
}
actions.add(() -> {
clusterService.submitStateUpdateTask("other task " + iteration + " at " + priority, new ClusterStateUpdateTask(priority) {
@Override
public ClusterState execute(ClusterState currentState) {
switch(priority) {
case IMMEDIATE:
if (submittedConcurrentlyWithReroute) {
assertFalse("should have rerouted after " + priority + " priority task", rerouteExecuted.get());
}
// else this task might be submitted too late to precede the reroute
break;
case URGENT:
// may run either before or after reroute
break;
case HIGH:
case NORMAL:
assertTrue("should have rerouted before " + priority + " priority task", rerouteExecuted.get());
break;
default:
fail("unexpected priority: " + priority);
break;
}
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
throw new AssertionError(source, e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
tasksCompletedCountDown.countDown();
}
});
if (submittedConcurrentlyWithReroute) {
tasksSubmittedCountDown.countDown();
}
});
}
}
Randomness.shuffle(actions);
actions.forEach(threadPool.generic()::execute);
assertTrue(tasksSubmittedCountDown.await(10, TimeUnit.SECONDS));
// allow master thread to continue;
cyclicBarrier.await();
// wait for reroute to complete
assertTrue(tasksCompletedCountDown.await(10, TimeUnit.SECONDS));
// see above for assertion that it's only called once
assertTrue(rerouteExecuted.get());
}
use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.
the class GatewayServiceTests method testRecoverStateUpdateTask.
public void testRecoverStateUpdateTask() throws Exception {
GatewayService service = createService(Settings.builder());
ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask();
String nodeId = randomAlphaOfLength(10);
DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT).put(Node.NODE_MASTER_SETTING.getKey(), true).build(), new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId);
ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()).build();
ClusterState recoveredState = clusterStateUpdateTask.execute(stateWithBlock);
assertNotEquals(recoveredState, stateWithBlock);
assertThat(recoveredState.blocks().global(ClusterBlockLevel.METADATA_WRITE), not(hasItem(STATE_NOT_RECOVERED_BLOCK)));
ClusterState clusterState = clusterStateUpdateTask.execute(recoveredState);
assertSame(recoveredState, clusterState);
}
Aggregations