use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class TransportIndicesShardStoresAction method masterOperation.
@Override
protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener<IndicesShardStoresResponse> listener) {
final RoutingTable routingTables = state.routingTable();
final RoutingNodes routingNodes = state.getRoutingNodes();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
final Set<Tuple<ShardId, String>> shardsToFetch = new HashSet<>();
logger.trace("using cluster state version [{}] to determine shards", state.version());
// collect relevant shard ids of the requested indices for fetching store infos
for (String index : concreteIndices) {
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
if (indexShardRoutingTables == null) {
continue;
}
final String customDataPath = IndexMetadata.INDEX_DATA_PATH_SETTING.get(state.metadata().index(index).getSettings());
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
final int shardId = routing.shardId().id();
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing);
if (request.shardStatuses().contains(shardHealth.getStatus())) {
shardsToFetch.add(Tuple.tuple(routing.shardId(), customDataPath));
}
}
}
// async fetch store infos from all the nodes
// NOTE: instead of fetching shard store info one by one from every node (nShards * nNodes requests)
// we could fetch all shard store info from every node once (nNodes requests)
// we have to implement a TransportNodesAction instead of using TransportNodesListGatewayStartedShards
// for fetching shard stores info, that operates on a list of shards instead of a single shard
new AsyncShardStoresInfoFetches(state.nodes(), routingNodes, shardsToFetch, listener).start();
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class MetadataIndexStateService method closeIndices.
/**
* Closes one or more indices.
*
* Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards
* to be terminated and finally closes the indices by moving their state to CLOSE.
*/
public void closeIndices(final CloseIndexClusterStateUpdateRequest request, final ActionListener<CloseIndexResponse> listener) {
final Index[] concreteIndices = request.indices();
if (concreteIndices == null || concreteIndices.length == 0) {
throw new IllegalArgumentException("Index name is required");
}
List<String> writeIndices = new ArrayList<>();
SortedMap<String, IndexAbstraction> lookup = clusterService.state().metadata().getIndicesLookup();
for (Index index : concreteIndices) {
IndexAbstraction ia = lookup.get(index.getName());
if (ia != null && ia.getParentDataStream() != null && ia.getParentDataStream().getWriteIndex().getIndex().equals(index)) {
writeIndices.add(index.getName());
}
}
if (writeIndices.size() > 0) {
throw new IllegalArgumentException("cannot close the following data stream write indices [" + Strings.collectionToCommaDelimitedString(writeIndices) + "]");
}
clusterService.submitStateUpdateTask("add-block-index-to-close " + Arrays.toString(concreteIndices), new ClusterStateUpdateTask(Priority.URGENT) {
private final Map<Index, ClusterBlock> blockedIndices = new HashMap<>();
@Override
public ClusterState execute(final ClusterState currentState) {
return addIndexClosedBlocks(concreteIndices, blockedIndices, currentState);
}
@Override
public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) {
if (oldState == newState) {
assert blockedIndices.isEmpty() : "List of blocked indices is not empty but cluster state wasn't changed";
listener.onResponse(new CloseIndexResponse(true, false, Collections.emptyList()));
} else {
assert blockedIndices.isEmpty() == false : "List of blocked indices is empty but cluster state was changed";
threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new WaitForClosedBlocksApplied(blockedIndices, request, ActionListener.wrap(verifyResults -> clusterService.submitStateUpdateTask("close-indices", new ClusterStateUpdateTask(Priority.URGENT) {
private final List<IndexResult> indices = new ArrayList<>();
@Override
public ClusterState execute(final ClusterState currentState) throws Exception {
Tuple<ClusterState, Collection<IndexResult>> closingResult = closeRoutingTable(currentState, blockedIndices, verifyResults);
assert verifyResults.size() == closingResult.v2().size();
indices.addAll(closingResult.v2());
return allocationService.reroute(closingResult.v1(), "indices closed");
}
@Override
public void onFailure(final String source, final Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) {
final boolean acknowledged = indices.stream().noneMatch(IndexResult::hasFailures);
final String[] waitForIndices = indices.stream().filter(result -> result.hasFailures() == false).filter(result -> newState.routingTable().hasIndex(result.getIndex())).map(result -> result.getIndex().getName()).toArray(String[]::new);
if (waitForIndices.length > 0) {
activeShardsObserver.waitForActiveShards(waitForIndices, request.waitForActiveShards(), request.ackTimeout(), shardsAcknowledged -> {
if (shardsAcknowledged == false) {
logger.debug("[{}] indices closed, but the operation timed out while waiting " + "for enough shards to be started.", Arrays.toString(waitForIndices));
}
// acknowledged maybe be false but some indices may have been correctly
// closed, so
// we maintain a kind of coherency by overriding the shardsAcknowledged
// value
// (see ShardsAcknowledgedResponse constructor)
boolean shardsAcked = acknowledged ? shardsAcknowledged : false;
listener.onResponse(new CloseIndexResponse(acknowledged, shardsAcked, indices));
}, listener::onFailure);
} else {
listener.onResponse(new CloseIndexResponse(acknowledged, false, indices));
}
}
}), listener::onFailure)));
}
}
@Override
public void onFailure(final String source, final Exception e) {
listener.onFailure(e);
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
});
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class ScriptContextInfoTests method testPrimitiveContext.
public void testPrimitiveContext() {
String name = "primitive_context";
ScriptContextInfo info = new ScriptContextInfo(name, PrimitiveContext.class);
assertEquals(name, info.name);
assertEquals("execute", info.execute.name);
assertEquals("int", info.execute.returnType);
assertEquals(4, info.execute.parameters.size());
List<Tuple<String, String>> eparams = new ArrayList<>();
eparams.add(new Tuple<>("boolean", "foo"));
eparams.add(new Tuple<>("long", "bar"));
eparams.add(new Tuple<>("short", "baz"));
eparams.add(new Tuple<>("float", "qux"));
for (int i = 0; i < info.execute.parameters.size(); i++) {
assertEquals(eparams.get(i).v1(), info.execute.parameters.get(i).type);
assertEquals(eparams.get(i).v2(), info.execute.parameters.get(i).name);
}
assertEquals(2, info.getters.size());
HashMap<String, String> getters = new HashMap<String, String>() {
{
put("getByte", "byte");
put("getChar", "char");
}
};
for (ScriptContextInfo.ScriptMethodInfo getter : info.getters) {
assertEquals(0, getter.parameters.size());
String returnType = getters.remove(getter.name);
assertNotNull(returnType);
assertEquals(returnType, getter.returnType);
}
assertEquals(0, getters.size());
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class NestedAggregatorTests method testNestedOrdering_random.
public void testNestedOrdering_random() throws IOException {
int numBooks = randomIntBetween(32, 512);
List<Tuple<String, int[]>> books = new ArrayList<>();
for (int i = 0; i < numBooks; i++) {
int numChapters = randomIntBetween(1, 8);
int[] chapters = new int[numChapters];
for (int j = 0; j < numChapters; j++) {
chapters[j] = randomIntBetween(2, 64);
}
books.add(Tuple.tuple(String.format(Locale.ROOT, "%03d", i), chapters));
}
try (Directory directory = newDirectory()) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
int id = 0;
for (Tuple<String, int[]> book : books) {
iw.addDocuments(generateBook(String.format(Locale.ROOT, "%03d", id), new String[] { book.v1() }, book.v2()));
id++;
}
}
for (Tuple<String, int[]> book : books) {
Arrays.sort(book.v2());
}
books.sort((o1, o2) -> {
int cmp = Integer.compare(o1.v2()[0], o2.v2()[0]);
if (cmp == 0) {
return o1.v1().compareTo(o2.v1());
} else {
return cmp;
}
});
try (IndexReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
MappedFieldType fieldType1 = new NumberFieldMapper.NumberFieldType("num_pages", NumberFieldMapper.NumberType.LONG);
MappedFieldType fieldType2 = new KeywordFieldMapper.KeywordFieldType("author");
TermsAggregationBuilder termsBuilder = new TermsAggregationBuilder("authors").userValueTypeHint(ValueType.STRING).size(books.size()).field("author").order(BucketOrder.compound(BucketOrder.aggregation("chapters>num_pages.value", true), BucketOrder.key(true)));
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder("chapters", "nested_chapters");
MinAggregationBuilder minAgg = new MinAggregationBuilder("num_pages").field("num_pages");
nestedBuilder.subAggregation(minAgg);
termsBuilder.subAggregation(nestedBuilder);
Terms terms = searchAndReduce(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), termsBuilder, fieldType1, fieldType2);
assertEquals(books.size(), terms.getBuckets().size());
assertEquals("authors", terms.getName());
for (int i = 0; i < books.size(); i++) {
Tuple<String, int[]> book = books.get(i);
Terms.Bucket bucket = terms.getBuckets().get(i);
assertEquals(book.v1(), bucket.getKeyAsString());
Min numPages = ((Nested) bucket.getAggregations().get("chapters")).getAggregations().get("num_pages");
assertEquals(book.v2()[0], (int) numPages.getValue());
}
}
}
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class InternalBinaryRangeTests method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
List<Tuple<BytesRef, BytesRef>> listOfRanges = new ArrayList<>();
if (randomBoolean()) {
listOfRanges.add(Tuple.tuple(null, new BytesRef(randomAlphaOfLength(15))));
}
if (randomBoolean()) {
listOfRanges.add(Tuple.tuple(new BytesRef(randomAlphaOfLength(15)), null));
}
if (randomBoolean()) {
listOfRanges.add(Tuple.tuple(null, null));
}
final int numRanges = Math.max(0, randomNumberOfBuckets() - listOfRanges.size());
for (int i = 0; i < numRanges; i++) {
BytesRef[] values = new BytesRef[2];
values[0] = new BytesRef(randomAlphaOfLength(15));
values[1] = new BytesRef(randomAlphaOfLength(15));
Arrays.sort(values);
listOfRanges.add(Tuple.tuple(values[0], values[1]));
}
Collections.shuffle(listOfRanges, random());
ranges = Collections.unmodifiableList(listOfRanges);
}
Aggregations