use of org.opensearch.common.collect.Tuple in project geospatial by opensearch-project.
the class RestUploadGeoJSONAction method prepareRequest.
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) {
Tuple<XContentType, BytesReference> sourceTuple = restRequest.contentOrSourceParam();
RestRequest.Method method = restRequest.getHttpRequest().method();
UploadGeoJSONRequest request = new UploadGeoJSONRequest(method, sourceTuple.v2());
return channel -> client.execute(UploadGeoJSONAction.INSTANCE, request, new RestToXContentListener<>(channel));
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class InstallPluginCommand method checkMisspelledPlugin.
/**
* Returns all the official plugin names that look similar to pluginId. *
*/
private List<String> checkMisspelledPlugin(String pluginId) {
LevenshteinDistance ld = new LevenshteinDistance();
List<Tuple<Float, String>> scoredKeys = new ArrayList<>();
for (String officialPlugin : OFFICIAL_PLUGINS) {
float distance = ld.getDistance(pluginId, officialPlugin);
if (distance > 0.7f) {
scoredKeys.add(new Tuple<>(distance, officialPlugin));
}
}
CollectionUtil.timSort(scoredKeys, (a, b) -> b.v1().compareTo(a.v1()));
return scoredKeys.stream().map((a) -> a.v2()).collect(Collectors.toList());
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class TransportUpgradeAction method newResponse.
@Override
protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeResult> shardUpgradeResults, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
Map<String, Integer> successfulPrimaryShards = new HashMap<>();
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
for (ShardUpgradeResult result : shardUpgradeResults) {
successfulShards++;
String index = result.getShardId().getIndex().getName();
if (result.primary()) {
Integer count = successfulPrimaryShards.get(index);
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
}
Tuple<Version, org.apache.lucene.util.Version> versionTuple = versions.get(index);
if (versionTuple == null) {
versions.put(index, new Tuple<>(result.upgradeVersion(), result.oldestLuceneSegment()));
} else {
// We already have versions for this index - let's see if we need to update them based on the current shard
Version version = versionTuple.v1();
org.apache.lucene.util.Version luceneVersion = versionTuple.v2();
// Since we rewrite the mapping during upgrade the metadata is always rewritten by the latest version
if (result.upgradeVersion().after(versionTuple.v1())) {
version = result.upgradeVersion();
}
// oldest version that we need to support
if (result.oldestLuceneSegment().onOrAfter(versionTuple.v2()) == false) {
luceneVersion = result.oldestLuceneSegment();
}
versions.put(index, new Tuple<>(version, luceneVersion));
}
}
Map<String, Tuple<Version, String>> updatedVersions = new HashMap<>();
Metadata metadata = clusterState.metadata();
for (Map.Entry<String, Tuple<Version, org.apache.lucene.util.Version>> versionEntry : versions.entrySet()) {
String index = versionEntry.getKey();
Integer primaryCount = successfulPrimaryShards.get(index);
int expectedPrimaryCount = metadata.index(index).getNumberOfShards();
if (primaryCount == metadata.index(index).getNumberOfShards()) {
updatedVersions.put(index, new Tuple<>(versionEntry.getValue().v1(), versionEntry.getValue().v2().toString()));
} else {
logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - " + "expected[{}], received[{}]", index, expectedPrimaryCount, primaryCount == null ? 0 : primaryCount);
}
}
return new UpgradeResponse(updatedVersions, totalShards, successfulShards, failedShards, shardFailures);
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class ClusterBootstrapService method checkRequirements.
private Tuple<Set<DiscoveryNode>, List<String>> checkRequirements(Set<DiscoveryNode> nodes) {
final Set<DiscoveryNode> selectedNodes = new HashSet<>();
final List<String> unmatchedRequirements = new ArrayList<>();
for (final String bootstrapRequirement : bootstrapRequirements) {
final Set<DiscoveryNode> matchingNodes = nodes.stream().filter(n -> matchesRequirement(n, bootstrapRequirement)).collect(Collectors.toSet());
if (matchingNodes.size() == 0) {
unmatchedRequirements.add(bootstrapRequirement);
}
if (matchingNodes.size() > 1) {
throw new IllegalStateException("requirement [" + bootstrapRequirement + "] matches multiple nodes: " + matchingNodes);
}
for (final DiscoveryNode matchingNode : matchingNodes) {
if (selectedNodes.add(matchingNode) == false) {
throw new IllegalStateException("node [" + matchingNode + "] matches multiple requirements: " + bootstrapRequirements.stream().filter(r -> matchesRequirement(matchingNode, r)).collect(Collectors.toList()));
}
}
}
return Tuple.tuple(selectedNodes, unmatchedRequirements);
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class ChildrenToParentAggregatorTests method testParentChildTerms.
public void testParentChildTerms() throws IOException {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
final Map<String, Tuple<Integer, Integer>> expectedParentChildRelations = setupIndex(indexWriter);
indexWriter.close();
SortedMap<Integer, Long> entries = new TreeMap<>();
for (Tuple<Integer, Integer> value : expectedParentChildRelations.values()) {
Long l = entries.computeIfAbsent(value.v2(), integer -> 0L);
entries.put(value.v2(), l + 1);
}
List<Map.Entry<Integer, Long>> sortedValues = new ArrayList<>(entries.entrySet());
sortedValues.sort((o1, o2) -> {
// sort larger values first
int ret = o2.getValue().compareTo(o1.getValue());
if (ret != 0) {
return ret;
}
// on equal value, sort by key
return o1.getKey().compareTo(o2.getKey());
});
IndexReader indexReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(directory), new ShardId(new Index("foo", "_na_"), 1));
// TODO set "maybeWrap" to true for IndexSearcher once #23338 is resolved
IndexSearcher indexSearcher = newSearcher(indexReader, false, true);
// verify a terms-aggregation inside the parent-aggregation
testCaseTerms(new MatchAllDocsQuery(), indexSearcher, parent -> {
assertNotNull(parent);
assertTrue(JoinAggregationInspectionHelper.hasValue(parent));
LongTerms valueTerms = parent.getAggregations().get("value_terms");
assertNotNull(valueTerms);
List<LongTerms.Bucket> valueTermsBuckets = valueTerms.getBuckets();
assertNotNull(valueTermsBuckets);
assertEquals("Had: " + parent, sortedValues.size(), valueTermsBuckets.size());
int i = 0;
for (Map.Entry<Integer, Long> entry : sortedValues) {
LongTerms.Bucket bucket = valueTermsBuckets.get(i);
assertEquals(entry.getKey().longValue(), bucket.getKeyAsNumber());
assertEquals(entry.getValue(), (Long) bucket.getDocCount());
i++;
}
});
indexReader.close();
directory.close();
}
Aggregations