use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class BlobStoreRepository method writeUpdatedShardMetaDataAndComputeDeletes.
// updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up.
private void writeUpdatedShardMetaDataAndComputeDeletes(Collection<SnapshotId> snapshotIds, RepositoryData oldRepositoryData, boolean useUUIDs, ActionListener<Collection<ShardSnapshotMetaDeleteResult>> onAllShardsCompleted) {
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
final List<IndexId> indices = oldRepositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotIds);
if (indices.isEmpty()) {
onAllShardsCompleted.onResponse(Collections.emptyList());
return;
}
// Listener that flattens out the delete results for each index
final ActionListener<Collection<ShardSnapshotMetaDeleteResult>> deleteIndexMetadataListener = new GroupedActionListener<>(ActionListener.map(onAllShardsCompleted, res -> res.stream().flatMap(Collection::stream).collect(Collectors.toList())), indices.size());
for (IndexId indexId : indices) {
final Set<SnapshotId> survivingSnapshots = oldRepositoryData.getSnapshots(indexId).stream().filter(id -> snapshotIds.contains(id) == false).collect(Collectors.toSet());
final StepListener<Collection<Integer>> shardCountListener = new StepListener<>();
final Collection<String> indexMetaGenerations = snapshotIds.stream().map(id -> oldRepositoryData.indexMetaDataGenerations().indexMetaBlobId(id, indexId)).collect(Collectors.toSet());
final ActionListener<Integer> allShardCountsListener = new GroupedActionListener<>(shardCountListener, indexMetaGenerations.size());
final BlobContainer indexContainer = indexContainer(indexId);
for (String indexMetaGeneration : indexMetaGenerations) {
executor.execute(ActionRunnable.supply(allShardCountsListener, () -> {
try {
return INDEX_METADATA_FORMAT.read(indexContainer, indexMetaGeneration, namedXContentRegistry).getNumberOfShards();
} catch (Exception ex) {
logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", indexMetaGeneration, indexId.getName()), ex);
// ignoring it and letting the cleanup deal with it.
return null;
}
}));
}
shardCountListener.whenComplete(counts -> {
final int shardCount = counts.stream().mapToInt(i -> i).max().orElse(0);
if (shardCount == 0) {
deleteIndexMetadataListener.onResponse(null);
return;
}
// Listener for collecting the results of removing the snapshot from each shard's metadata in the current index
final ActionListener<ShardSnapshotMetaDeleteResult> allShardsListener = new GroupedActionListener<>(deleteIndexMetadataListener, shardCount);
for (int shardId = 0; shardId < shardCount; shardId++) {
final int finalShardId = shardId;
executor.execute(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
final BlobContainer shardContainer = shardContainer(indexId, finalShardId);
final Set<String> blobs = shardContainer.listBlobs().keySet();
final BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots;
final long newGen;
if (useUUIDs) {
newGen = -1L;
blobStoreIndexShardSnapshots = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, oldRepositoryData.shardGenerations().getShardGen(indexId, finalShardId)).v1();
} else {
Tuple<BlobStoreIndexShardSnapshots, Long> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer);
newGen = tuple.v2() + 1;
blobStoreIndexShardSnapshots = tuple.v1();
}
allShardsListener.onResponse(deleteFromShardSnapshotMeta(survivingSnapshots, indexId, finalShardId, snapshotIds, shardContainer, blobs, blobStoreIndexShardSnapshots, newGen));
}
@Override
public void onFailure(Exception ex) {
logger.warn(() -> new ParameterizedMessage("{} failed to delete shard data for shard [{}][{}]", snapshotIds, indexId.getName(), finalShardId), ex);
// Just passing null here to count down the listener instead of failing it, the stale data left behind
// here will be retried in the next delete or repository cleanup
allShardsListener.onResponse(null);
}
});
}
}, deleteIndexMetadataListener::onFailure);
}
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class BlobStoreRepository method doGetRepositoryData.
private void doGetRepositoryData(ActionListener<RepositoryData> listener) {
// Retry loading RepositoryData in a loop in case we run into concurrent modifications of the repository.
// Keep track of the most recent generation we failed to load so we can break out of the loop if we fail to load the same
// generation repeatedly.
long lastFailedGeneration = RepositoryData.UNKNOWN_REPO_GEN;
while (true) {
final long genToLoad;
if (bestEffortConsistency) {
// We're only using #latestKnownRepoGen as a hint in this mode and listing repo contents as a secondary way of trying
// to find a higher generation
final long generation;
try {
generation = latestIndexBlobId();
} catch (IOException ioe) {
listener.onFailure(new RepositoryException(metadata.name(), "Could not determine repository generation from root blobs", ioe));
return;
}
genToLoad = latestKnownRepoGen.updateAndGet(known -> Math.max(known, generation));
if (genToLoad > generation) {
logger.info("Determined repository generation [" + generation + "] from repository contents but correct generation must be at least [" + genToLoad + "]");
}
} else {
// We only rely on the generation tracked in #latestKnownRepoGen which is exclusively updated from the cluster state
genToLoad = latestKnownRepoGen.get();
}
try {
final Tuple<Long, BytesReference> cached = latestKnownRepositoryData.get();
final RepositoryData loaded;
// Caching is not used with #bestEffortConsistency see docs on #cacheRepositoryData for details
if (bestEffortConsistency == false && cached != null && cached.v1() == genToLoad) {
loaded = repositoryDataFromCachedEntry(cached);
} else {
loaded = getRepositoryData(genToLoad);
// We can cache serialized in the most recent version here without regard to the actual repository metadata version
// since we're only caching the information that we just wrote and thus won't accidentally cache any information that
// isn't safe
cacheRepositoryData(BytesReference.bytes(loaded.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)), genToLoad);
}
listener.onResponse(loaded);
return;
} catch (RepositoryException e) {
// If the generation to load changed concurrently and we didn't just try loading the same generation before we retry
if (genToLoad != latestKnownRepoGen.get() && genToLoad != lastFailedGeneration) {
lastFailedGeneration = genToLoad;
logger.warn("Failed to load repository data generation [" + genToLoad + "] because a concurrent operation moved the current generation to [" + latestKnownRepoGen.get() + "]", e);
continue;
}
if (bestEffortConsistency == false && ExceptionsHelper.unwrap(e, NoSuchFileException.class) != null) {
// We did not find the expected index-N even though the cluster state continues to point at the missing value
// of N so we mark this repository as corrupted.
markRepoCorrupted(genToLoad, e, ActionListener.wrap(v -> listener.onFailure(corruptedStateException(e)), listener::onFailure));
} else {
listener.onFailure(e);
}
return;
} catch (Exception e) {
listener.onFailure(new RepositoryException(metadata.name(), "Unexpected exception when loading repository data", e));
return;
}
}
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class BaseRestHandler method unrecognized.
protected final String unrecognized(final RestRequest request, final Set<String> invalids, final Set<String> candidates, final String detail) {
StringBuilder message = new StringBuilder(String.format(Locale.ROOT, "request [%s] contains unrecognized %s%s: ", request.path(), detail, invalids.size() > 1 ? "s" : ""));
boolean first = true;
for (final String invalid : invalids) {
final LevenshteinDistance ld = new LevenshteinDistance();
final List<Tuple<Float, String>> scoredParams = new ArrayList<>();
for (final String candidate : candidates) {
final float distance = ld.getDistance(invalid, candidate);
if (distance > 0.5f) {
scoredParams.add(new Tuple<>(distance, candidate));
}
}
CollectionUtil.timSort(scoredParams, (a, b) -> {
// sort by distance in reverse order, then parameter name for equal distances
int compare = a.v1().compareTo(b.v1());
if (compare != 0)
return -compare;
else
return a.v2().compareTo(b.v2());
});
if (first == false) {
message.append(", ");
}
message.append("[").append(invalid).append("]");
final List<String> keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList());
if (keys.isEmpty() == false) {
message.append(" -> did you mean ");
if (keys.size() == 1) {
message.append("[").append(keys.get(0)).append("]");
} else {
message.append("any of ").append(keys.toString());
}
message.append("?");
}
first = false;
}
return message.toString();
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class RestRequest method contentOrSourceParam.
/**
* Get the content of the request or the contents of the {@code source} param or throw an exception if both are missing.
* Prefer {@link #contentOrSourceParamParser()} or {@link #withContentOrSourceParamParserOrNull(CheckedConsumer)} if you need a parser.
*/
public final Tuple<XContentType, BytesReference> contentOrSourceParam() {
if (hasContentOrSourceParam() == false) {
throw new OpenSearchParseException("request body or source parameter is required");
} else if (hasContent()) {
return new Tuple<>(xContentType.get(), requiredContent());
}
String source = param("source");
String typeParam = param("source_content_type");
if (source == null || typeParam == null) {
throw new IllegalStateException("source and source_content_type parameters are required");
}
BytesArray bytes = new BytesArray(source);
final XContentType xContentType = parseContentType(Collections.singletonList(typeParam));
if (xContentType == null) {
throw new IllegalStateException("Unknown value for source_content_type [" + typeParam + "]");
}
return new Tuple<>(xContentType, bytes);
}
use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.
the class DeleteResponseTests method doFromXContentTestWithRandomFields.
private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException {
final Tuple<DeleteResponse, DeleteResponse> tuple = randomDeleteResponse();
DeleteResponse deleteResponse = tuple.v1();
DeleteResponse expectedDeleteResponse = tuple.v2();
boolean humanReadable = randomBoolean();
final XContentType xContentType = randomFrom(XContentType.values());
BytesReference originalBytes = toShuffledXContent(deleteResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
BytesReference mutated;
if (addRandomFields) {
// The ShardInfo.Failure's exception is rendered out in a "reason" object. We shouldn't add anything random there
// because exception rendering and parsing are very permissive: any extra object or field would be rendered as
// a exception custom metadata and be parsed back as a custom header, making it impossible to compare the results
// in this test.
Predicate<String> excludeFilter = path -> path.contains("reason");
mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random());
} else {
mutated = originalBytes;
}
DeleteResponse parsedDeleteResponse;
try (XContentParser parser = createParser(xContentType.xContent(), mutated)) {
parsedDeleteResponse = DeleteResponse.fromXContent(parser);
assertNull(parser.nextToken());
}
// We can't use equals() to compare the original and the parsed delete response
// because the random delete response can contain shard failures with exceptions,
// and those exceptions are not parsed back with the same types.
assertDocWriteResponse(expectedDeleteResponse, parsedDeleteResponse);
}
Aggregations