use of org.opensearch.index.Index in project OpenSearch by opensearch-project.
the class SharedClusterSnapshotRestoreIT method testDeleteSnapshot.
public void testDeleteSnapshot() throws Exception {
final int numberOfSnapshots = between(5, 15);
Client client = client();
Path repo = randomRepoPath();
createRepository("test-repo", "fs", Settings.builder().put("location", repo).put("compress", false).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createIndex("test-idx");
ensureGreen();
int[] numberOfFiles = new int[numberOfSnapshots];
logger.info("--> creating {} snapshots ", numberOfSnapshots);
for (int i = 0; i < numberOfSnapshots; i++) {
for (int j = 0; j < 10; j++) {
index("test-idx", "_doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j);
}
refresh();
createSnapshot("test-repo", "test-snap-" + i, Collections.singletonList("test-idx"));
// Store number of files after each snapshot
numberOfFiles[i] = numberOfFiles(repo);
}
assertDocCount("test-idx", 10L * numberOfSnapshots);
int numberOfFilesBeforeDeletion = numberOfFiles(repo);
logger.info("--> delete all snapshots except the first one and last one");
if (randomBoolean()) {
for (int i = 1; i < numberOfSnapshots - 1; i++) {
client.admin().cluster().prepareDeleteSnapshot("test-repo", new String[] { "test-snap-" + i }).get();
}
} else {
client.admin().cluster().prepareDeleteSnapshot("test-repo", IntStream.range(1, numberOfSnapshots - 1).mapToObj(i -> "test-snap-" + i).toArray(String[]::new)).get();
}
int numberOfFilesAfterDeletion = numberOfFiles(repo);
assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
logger.info("--> delete index");
cluster().wipeIndices("test-idx");
logger.info("--> restore index");
String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", lastSnapshot).setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
assertDocCount("test-idx", 10L * numberOfSnapshots);
startDeleteSnapshot("test-repo", lastSnapshot).get();
logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
assertFileCount(repo, numberOfFiles[0]);
}
use of org.opensearch.index.Index in project OpenSearch by opensearch-project.
the class DocWriteResponse method parseInnerToXContent.
/**
* Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method.
*
* This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning
* {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
* if needed and then immediately returns.
*/
protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser);
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if (_INDEX.equals(currentFieldName)) {
// index uuid and shard id are unknown and can't be parsed back for now.
context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1));
} else if (_ID.equals(currentFieldName)) {
context.setId(parser.text());
} else if (_VERSION.equals(currentFieldName)) {
context.setVersion(parser.longValue());
} else if (RESULT.equals(currentFieldName)) {
String result = parser.text();
for (Result r : Result.values()) {
if (r.getLowercase().equals(result)) {
context.setResult(r);
break;
}
}
} else if (FORCED_REFRESH.equals(currentFieldName)) {
context.setForcedRefresh(parser.booleanValue());
} else if (_SEQ_NO.equals(currentFieldName)) {
context.setSeqNo(parser.longValue());
} else if (_PRIMARY_TERM.equals(currentFieldName)) {
context.setPrimaryTerm(parser.longValue());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (_SHARDS.equals(currentFieldName)) {
context.setShardInfo(ShardInfo.fromXContent(parser));
} else {
// skip potential inner objects for forward compatibility
parser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
// skip potential inner arrays for forward compatibility
parser.skipChildren();
}
}
use of org.opensearch.index.Index in project OpenSearch by opensearch-project.
the class TransportBulkAction method doInternalExecute.
protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener<BulkResponse> listener) {
final long startTime = relativeTime();
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
boolean hasIndexRequestsWithPipelines = false;
final Metadata metadata = clusterService.state().getMetadata();
final Version minNodeVersion = clusterService.state().getNodes().getMinNodeVersion();
for (DocWriteRequest<?> actionRequest : bulkRequest.requests) {
IndexRequest indexRequest = getIndexWriteRequest(actionRequest);
if (indexRequest != null) {
// Each index request needs to be evaluated, because this method also modifies the IndexRequest
boolean indexRequestHasPipeline = IngestService.resolvePipelines(actionRequest, indexRequest, metadata);
hasIndexRequestsWithPipelines |= indexRequestHasPipeline;
}
if (actionRequest instanceof IndexRequest) {
IndexRequest ir = (IndexRequest) actionRequest;
ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion);
if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) {
throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally");
}
}
}
if (hasIndexRequestsWithPipelines) {
// this path is never taken.
try {
if (Assertions.ENABLED) {
final boolean arePipelinesResolved = bulkRequest.requests().stream().map(TransportBulkAction::getIndexWriteRequest).filter(Objects::nonNull).allMatch(IndexRequest::isPipelineResolved);
assert arePipelinesResolved : bulkRequest;
}
if (clusterService.localNode().isIngestNode()) {
processBulkIndexIngestRequest(task, bulkRequest, executorName, listener);
} else {
ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener);
}
} catch (Exception e) {
listener.onFailure(e);
}
return;
}
final boolean includesSystem = includesSystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices);
if (includesSystem || needToCheck()) {
// Attempt to create all the indices that we're going to need during the bulk before we start.
// Step 1: collect all the indices in the request
final Map<String, Boolean> indices = bulkRequest.requests.stream().filter(request -> request.opType() != DocWriteRequest.OpType.DELETE || request.versionType() == VersionType.EXTERNAL || request.versionType() == VersionType.EXTERNAL_GTE).collect(Collectors.toMap(DocWriteRequest::index, DocWriteRequest::isRequireAlias, (v1, v2) -> v1 || v2));
/* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
* that we'll use when we try to run the requests. */
final Map<String, IndexNotFoundException> indicesThatCannotBeCreated = new HashMap<>();
Set<String> autoCreateIndices = new HashSet<>();
ClusterState state = clusterService.state();
for (Map.Entry<String, Boolean> indexAndFlag : indices.entrySet()) {
boolean shouldAutoCreate;
final String index = indexAndFlag.getKey();
try {
shouldAutoCreate = shouldAutoCreate(index, state);
} catch (IndexNotFoundException e) {
shouldAutoCreate = false;
indicesThatCannotBeCreated.put(index, e);
}
// We should only auto create if we are not requiring it to be an alias
if (shouldAutoCreate && (indexAndFlag.getValue() == false)) {
autoCreateIndices.add(index);
}
}
// Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back.
if (autoCreateIndices.isEmpty()) {
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
} else {
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
for (String index : autoCreateIndices) {
createIndex(index, bulkRequest.timeout(), minNodeVersion, new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse result) {
if (counter.decrementAndGet() == 0) {
threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(listener) {
@Override
protected void doRun() {
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
}
});
}
}
@Override
public void onFailure(Exception e) {
if (!(ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException)) {
// fail all requests involving this index, if create didn't work
for (int i = 0; i < bulkRequest.requests.size(); i++) {
DocWriteRequest<?> request = bulkRequest.requests.get(i);
if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) {
bulkRequest.requests.set(i, null);
}
}
}
if (counter.decrementAndGet() == 0) {
final ActionListener<BulkResponse> wrappedListener = ActionListener.wrap(listener::onResponse, inner -> {
inner.addSuppressed(e);
listener.onFailure(inner);
});
threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(wrappedListener) {
@Override
protected void doRun() {
executeBulk(task, bulkRequest, startTime, wrappedListener, responses, indicesThatCannotBeCreated);
}
@Override
public void onRejection(Exception rejectedException) {
rejectedException.addSuppressed(e);
super.onRejection(rejectedException);
}
});
}
}
});
}
}
} else {
executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap());
}
}
use of org.opensearch.index.Index in project OpenSearch by opensearch-project.
the class ClusterChangedEvent method indicesDeletedFromClusterState.
// Get the deleted indices by comparing the index metadatas in the previous and new cluster states.
// If an index exists in the previous cluster state, but not in the new cluster state, it must have been deleted.
private List<Index> indicesDeletedFromClusterState() {
// https://github.com/elastic/elasticsearch/issues/11665
if (metadataChanged() == false || isNewCluster()) {
return Collections.emptyList();
}
Set<Index> deleted = null;
final Metadata previousMetadata = previousState.metadata();
final Metadata currentMetadata = state.metadata();
for (ObjectCursor<IndexMetadata> cursor : previousMetadata.indices().values()) {
IndexMetadata index = cursor.value;
IndexMetadata current = currentMetadata.index(index.getIndex());
if (current == null) {
if (deleted == null) {
deleted = new HashSet<>();
}
deleted.add(index.getIndex());
}
}
final IndexGraveyard currentGraveyard = currentMetadata.indexGraveyard();
final IndexGraveyard previousGraveyard = previousMetadata.indexGraveyard();
// each node should make sure to delete any related data.
if (currentGraveyard != previousGraveyard) {
final IndexGraveyardDiff indexGraveyardDiff = (IndexGraveyardDiff) currentGraveyard.diff(previousGraveyard);
final List<IndexGraveyard.Tombstone> added = indexGraveyardDiff.getAdded();
if (added.isEmpty() == false) {
if (deleted == null) {
deleted = new HashSet<>();
}
for (IndexGraveyard.Tombstone tombstone : added) {
deleted.add(tombstone.getIndex());
}
}
}
return deleted == null ? Collections.<Index>emptyList() : new ArrayList<>(deleted);
}
use of org.opensearch.index.Index in project OpenSearch by opensearch-project.
the class Gateway method performStateRecovery.
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
final String[] nodesIds = clusterService.state().nodes().getMasterNodes().keys().toArray(String.class);
logger.trace("performing state recovery from {}", Arrays.toString(nodesIds));
final TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds, null).actionGet();
final int requiredAllocation = 1;
if (nodesState.hasFailures()) {
for (final FailedNodeException failedNodeException : nodesState.failures()) {
logger.warn("failed to fetch state from node", failedNodeException);
}
}
final ObjectFloatHashMap<Index> indices = new ObjectFloatHashMap<>();
Metadata electedGlobalState = null;
int found = 0;
for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metadata() == null) {
continue;
}
found++;
if (electedGlobalState == null) {
electedGlobalState = nodeState.metadata();
} else if (nodeState.metadata().version() > electedGlobalState.version()) {
electedGlobalState = nodeState.metadata();
}
for (final ObjectCursor<IndexMetadata> cursor : nodeState.metadata().indices().values()) {
indices.addTo(cursor.value.getIndex(), 1);
}
}
if (found < requiredAllocation) {
listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
return;
}
// update the global state, and clean the indices, we elect them in the next phase
final Metadata.Builder metadataBuilder = Metadata.builder(electedGlobalState).removeAllIndices();
assert !indices.containsKey(null);
final Object[] keys = indices.keys;
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null) {
final Index index = (Index) keys[i];
IndexMetadata electedIndexMetadata = null;
int indexMetadataCount = 0;
for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metadata() == null) {
continue;
}
final IndexMetadata indexMetadata = nodeState.metadata().index(index);
if (indexMetadata == null) {
continue;
}
if (electedIndexMetadata == null) {
electedIndexMetadata = indexMetadata;
} else if (indexMetadata.getVersion() > electedIndexMetadata.getVersion()) {
electedIndexMetadata = indexMetadata;
}
indexMetadataCount++;
}
if (electedIndexMetadata != null) {
if (indexMetadataCount < requiredAllocation) {
logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetadataCount, requiredAllocation);
}
// TODO if this logging statement is correct then we are missing an else here
metadataBuilder.put(electedIndexMetadata, false);
}
}
}
ClusterState recoveredState = Function.<ClusterState>identity().andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings())).apply(ClusterState.builder(clusterService.getClusterName()).metadata(metadataBuilder).build());
listener.onSuccess(recoveredState);
}
Aggregations