use of org.opensearch.action.LatchedActionListener in project OpenSearch by opensearch-project.
the class TransportActionFilterChainTests method testTooManyContinueProcessingRequest.
public void testTooManyContinueProcessingRequest() throws InterruptedException {
final int additionalContinueCount = randomInt(10);
RequestTestFilter testFilter = new RequestTestFilter(randomInt(), new RequestCallback() {
@Override
public <Request extends ActionRequest, Response extends ActionResponse> void execute(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> actionFilterChain) {
for (int i = 0; i <= additionalContinueCount; i++) {
actionFilterChain.proceed(task, action, request, listener);
}
}
});
Set<ActionFilter> filters = new HashSet<>();
filters.add(testFilter);
String actionName = randomAlphaOfLength(randomInt(30));
ActionFilters actionFilters = new ActionFilters(filters);
TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(actionName, actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
@Override
protected void doExecute(Task task, TestRequest request, ActionListener<TestResponse> listener) {
listener.onResponse(new TestResponse());
}
};
final CountDownLatch latch = new CountDownLatch(additionalContinueCount + 1);
final AtomicInteger responses = new AtomicInteger();
final List<Throwable> failures = new CopyOnWriteArrayList<>();
transportAction.execute(new TestRequest(), new LatchedActionListener<>(new ActionListener<TestResponse>() {
@Override
public void onResponse(TestResponse testResponse) {
responses.incrementAndGet();
}
@Override
public void onFailure(Exception e) {
failures.add(e);
}
}, latch));
if (!latch.await(10, TimeUnit.SECONDS)) {
fail("timeout waiting for the filter to notify the listener as many times as expected");
}
assertThat(testFilter.runs.get(), equalTo(1));
assertThat(testFilter.lastActionName, equalTo(actionName));
assertThat(responses.get(), equalTo(1));
assertThat(failures.size(), equalTo(additionalContinueCount));
for (Throwable failure : failures) {
assertThat(failure, instanceOf(IllegalStateException.class));
}
}
use of org.opensearch.action.LatchedActionListener in project OpenSearch by opensearch-project.
the class TransportSearchActionTests method testCCSRemoteReduce.
public void testCCSRemoteReduce() throws Exception {
int numClusters = randomIntBetween(1, 10);
DiscoveryNode[] nodes = new DiscoveryNode[numClusters];
Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
Settings.Builder builder = Settings.builder();
MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder);
Settings settings = builder.build();
boolean local = randomBoolean();
OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null;
int totalClusters = numClusters + (local ? 1 : 0);
TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0);
try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) {
service.start();
service.acceptIncomingRequests();
RemoteClusterService remoteClusterService = service.getRemoteClusterService();
{
SearchRequest searchRequest = new SearchRequest();
final CountDownLatch latch = new CountDownLatch(1);
SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
AtomicReference<SearchResponse> response = new AtomicReference<>();
LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch);
TransportSearchAction.ccsRemoteReduce(searchRequest, localIndices, remoteIndicesByCluster, timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
if (localIndices == null) {
assertNull(setOnce.get());
} else {
Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
assertEquals("", tuple.v1().getLocalClusterAlias());
assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
tuple.v2().onResponse(emptySearchResponse());
}
awaitLatch(latch, 5, TimeUnit.SECONDS);
SearchResponse searchResponse = response.get();
assertEquals(0, searchResponse.getClusters().getSkipped());
assertEquals(totalClusters, searchResponse.getClusters().getTotal());
assertEquals(totalClusters, searchResponse.getClusters().getSuccessful());
assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases());
}
{
SearchRequest searchRequest = new SearchRequest();
searchRequest.preference("index_not_found");
final CountDownLatch latch = new CountDownLatch(1);
SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
AtomicReference<Exception> failure = new AtomicReference<>();
LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch);
TransportSearchAction.ccsRemoteReduce(searchRequest, localIndices, remoteIndicesByCluster, timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
if (localIndices == null) {
assertNull(setOnce.get());
} else {
Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
assertEquals("", tuple.v1().getLocalClusterAlias());
assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
tuple.v2().onResponse(emptySearchResponse());
}
awaitLatch(latch, 5, TimeUnit.SECONDS);
assertNotNull(failure.get());
assertThat(failure.get(), instanceOf(RemoteTransportException.class));
RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get();
assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status());
}
int numDisconnectedClusters = randomIntBetween(1, numClusters);
Set<DiscoveryNode> disconnectedNodes = new HashSet<>(numDisconnectedClusters);
Set<Integer> disconnectedNodesIndices = new HashSet<>(numDisconnectedClusters);
while (disconnectedNodes.size() < numDisconnectedClusters) {
int i = randomIntBetween(0, numClusters - 1);
if (disconnectedNodes.add(nodes[i])) {
assertTrue(disconnectedNodesIndices.add(i));
}
}
CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters);
RemoteClusterServiceTests.addConnectionListener(remoteClusterService, new TransportConnectionListener() {
@Override
public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connection) {
if (disconnectedNodes.remove(node)) {
disconnectedLatch.countDown();
}
}
});
for (DiscoveryNode disconnectedNode : disconnectedNodes) {
service.addFailToSendNoConnectRule(disconnectedNode.getAddress());
}
{
SearchRequest searchRequest = new SearchRequest();
final CountDownLatch latch = new CountDownLatch(1);
SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
AtomicReference<Exception> failure = new AtomicReference<>();
LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch);
TransportSearchAction.ccsRemoteReduce(searchRequest, localIndices, remoteIndicesByCluster, timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
if (localIndices == null) {
assertNull(setOnce.get());
} else {
Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
assertEquals("", tuple.v1().getLocalClusterAlias());
assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
tuple.v2().onResponse(emptySearchResponse());
}
awaitLatch(latch, 5, TimeUnit.SECONDS);
assertNotNull(failure.get());
assertThat(failure.get(), instanceOf(RemoteTransportException.class));
assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster ["));
assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class));
}
// setting skip_unavailable to true for all the disconnected clusters will make the request succeed again
for (int i : disconnectedNodesIndices) {
RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true);
}
{
SearchRequest searchRequest = new SearchRequest();
final CountDownLatch latch = new CountDownLatch(1);
SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
AtomicReference<SearchResponse> response = new AtomicReference<>();
LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch);
TransportSearchAction.ccsRemoteReduce(searchRequest, localIndices, remoteIndicesByCluster, timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
if (localIndices == null) {
assertNull(setOnce.get());
} else {
Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
assertEquals("", tuple.v1().getLocalClusterAlias());
assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
tuple.v2().onResponse(emptySearchResponse());
}
awaitLatch(latch, 5, TimeUnit.SECONDS);
SearchResponse searchResponse = response.get();
assertEquals(disconnectedNodesIndices.size(), searchResponse.getClusters().getSkipped());
assertEquals(totalClusters, searchResponse.getClusters().getTotal());
int successful = totalClusters - disconnectedNodesIndices.size();
assertEquals(successful, searchResponse.getClusters().getSuccessful());
assertEquals(successful == 0 ? 0 : successful + 1, searchResponse.getNumReducePhases());
}
// give transport service enough time to realize that the node is down, and to notify the connection listeners
// so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next
assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS));
service.clearAllRules();
if (randomBoolean()) {
for (int i : disconnectedNodesIndices) {
if (randomBoolean()) {
RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true);
}
}
}
{
SearchRequest searchRequest = new SearchRequest();
final CountDownLatch latch = new CountDownLatch(1);
SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
AtomicReference<SearchResponse> response = new AtomicReference<>();
LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch);
TransportSearchAction.ccsRemoteReduce(searchRequest, localIndices, remoteIndicesByCluster, timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
if (localIndices == null) {
assertNull(setOnce.get());
} else {
Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
assertEquals("", tuple.v1().getLocalClusterAlias());
assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
tuple.v2().onResponse(emptySearchResponse());
}
awaitLatch(latch, 5, TimeUnit.SECONDS);
SearchResponse searchResponse = response.get();
assertEquals(0, searchResponse.getClusters().getSkipped());
assertEquals(totalClusters, searchResponse.getClusters().getTotal());
assertEquals(totalClusters, searchResponse.getClusters().getSuccessful());
assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases());
}
assertEquals(0, service.getConnectionManager().size());
} finally {
for (MockTransportService mockTransportService : mockTransportServices) {
mockTransportService.close();
}
}
}
use of org.opensearch.action.LatchedActionListener in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method testSendFileChunksStopOnError.
public void testSendFileChunksStopOnError() throws Exception {
final List<FileChunkResponse> unrepliedChunks = new CopyOnWriteArrayList<>();
final AtomicInteger sentChunks = new AtomicInteger();
final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
final AtomicLong chunkNumberGenerator = new AtomicLong();
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
final long chunkNumber = chunkNumberGenerator.getAndIncrement();
logger.info("--> write chunk name={} seq={}, position={}", md.name(), chunkNumber, position);
unrepliedChunks.add(new FileChunkResponse(chunkNumber, listener));
sentChunks.incrementAndGet();
}
};
final int maxConcurrentChunks = between(1, 4);
final int chunkSize = between(1, 16);
final RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(recoveryTarget, recoveryExecutor), threadPool, getStartRecoveryRequest(), chunkSize, maxConcurrentChunks, between(1, 5));
Store store = newStore(createTempDir(), false);
List<StoreFileMetadata> files = generateFiles(store, between(1, 10), () -> between(1, chunkSize * 20));
int totalChunks = files.stream().mapToInt(md -> ((int) md.length() + chunkSize - 1) / chunkSize).sum();
SetOnce<Exception> sendFilesError = new SetOnce<>();
CountDownLatch sendFilesLatch = new CountDownLatch(1);
handler.sendFiles(store, files.toArray(new StoreFileMetadata[0]), () -> 0, new LatchedActionListener<>(ActionListener.wrap(r -> sendFilesError.set(null), e -> sendFilesError.set(e)), sendFilesLatch));
assertBusy(() -> assertThat(sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks))));
List<FileChunkResponse> failedChunks = randomSubsetOf(between(1, unrepliedChunks.size()), unrepliedChunks);
CountDownLatch replyLatch = new CountDownLatch(failedChunks.size());
failedChunks.forEach(c -> {
c.listener.onFailure(new IllegalStateException("test chunk exception"));
replyLatch.countDown();
});
replyLatch.await();
unrepliedChunks.removeAll(failedChunks);
unrepliedChunks.forEach(c -> {
if (randomBoolean()) {
c.listener.onFailure(new RuntimeException("test"));
} else {
c.listener.onResponse(null);
}
});
sendFilesLatch.await();
assertThat(sendFilesError.get(), instanceOf(IllegalStateException.class));
assertThat(sendFilesError.get().getMessage(), containsString("test chunk exception"));
assertThat("no more chunks should be sent", sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks)));
store.close();
}
Aggregations