use of org.elasticsearch.cluster.metadata.IndexGraveyard in project elasticsearch by elastic.
the class DanglingIndicesState method findNewDanglingIndices.
/**
* Finds new dangling indices by iterating over the indices and trying to find indices
* that have state on disk, but are not part of the provided meta data, or not detected
* as dangled already.
*/
Map<Index, IndexMetaData> findNewDanglingIndices(final MetaData metaData) {
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size());
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
excludeIndexPathIds.add(cursor.value.getIndex().getUUID());
}
excludeIndexPathIds.addAll(danglingIndices.keySet().stream().map(Index::getUUID).collect(Collectors.toList()));
try {
final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains);
Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size());
final IndexGraveyard graveyard = metaData.indexGraveyard();
for (IndexMetaData indexMetaData : indexMetaDataList) {
if (metaData.hasIndex(indexMetaData.getIndex().getName())) {
logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", indexMetaData.getIndex());
} else if (graveyard.containsIndex(indexMetaData.getIndex())) {
logger.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " + "index tombstones. This situation is likely caused by copying over the data directory for an index " + "that was previously deleted.", indexMetaData.getIndex());
} else {
logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " + "auto import to cluster state", indexMetaData.getIndex());
newIndices.put(indexMetaData.getIndex(), indexMetaData);
}
}
return newIndices;
} catch (IOException e) {
logger.warn("failed to list dangling indices", e);
return emptyMap();
}
}
use of org.elasticsearch.cluster.metadata.IndexGraveyard in project elasticsearch by elastic.
the class DanglingIndicesStateTests method testDanglingIndicesNotImportedWhenTombstonePresent.
public void testDanglingIndicesNotImportedWhenTombstonePresent() throws Exception {
try (NodeEnvironment env = newNodeEnvironment()) {
MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry());
DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService);
final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID");
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build();
metaStateService.writeIndex("test_write", dangledIndex);
final IndexGraveyard graveyard = IndexGraveyard.builder().addTombstone(dangledIndex.getIndex()).build();
final MetaData metaData = MetaData.builder().indexGraveyard(graveyard).build();
assertThat(danglingState.findNewDanglingIndices(metaData).size(), equalTo(0));
}
}
use of org.elasticsearch.cluster.metadata.IndexGraveyard in project elasticsearch by elastic.
the class IndicesServiceTests method testIndexAndTombstoneWithSameNameOnStartup.
/**
* This test checks an edge case where, if a node had an index (lets call it A with UUID 1), then
* deleted it (so a tombstone entry for A will exist in the cluster state), then created
* a new index A with UUID 2, then shutdown, when the node comes back online, it will look at the
* tombstones for deletions, and it should proceed with trying to delete A with UUID 1 and not
* throw any errors that the index still exists in the cluster state. This is a case of ensuring
* that tombstones that have the same name as current valid indices don't cause confusion by
* trying to delete an index that exists.
* See https://github.com/elastic/elasticsearch/issues/18054
*/
public void testIndexAndTombstoneWithSameNameOnStartup() throws Exception {
final String indexName = "test";
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
final IndicesService indicesService = getIndicesService();
final Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()).build();
final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName()).settings(idxSettings).numberOfShards(1).numberOfReplicas(0).build();
final Index tombstonedIndex = new Index(indexName, UUIDs.randomBase64UUID());
final IndexGraveyard graveyard = IndexGraveyard.builder().addTombstone(tombstonedIndex).build();
final MetaData metaData = MetaData.builder().put(indexMetaData, true).indexGraveyard(graveyard).build();
final ClusterState clusterState = new ClusterState.Builder(new ClusterName("testCluster")).metaData(metaData).build();
// if all goes well, this won't throw an exception, otherwise, it will throw an IllegalStateException
indicesService.verifyIndexIsDeleted(tombstonedIndex, clusterState);
}
use of org.elasticsearch.cluster.metadata.IndexGraveyard in project elasticsearch by elastic.
the class ClusterChangedEventTests method executeIndicesChangesTest.
// execute the indices changes test by generating random index additions and deletions and
// checking the values on the cluster changed event.
private static ClusterState executeIndicesChangesTest(final ClusterState previousState, final TombstoneDeletionQuantity deletionQuantity) {
// add random # of indices to the next cluster state
final int numAdd = randomIntBetween(0, 5);
final List<Index> stateIndices = new ArrayList<>();
for (Iterator<IndexMetaData> iter = previousState.metaData().indices().valuesIt(); iter.hasNext(); ) {
stateIndices.add(iter.next().getIndex());
}
final int numDel;
switch(deletionQuantity) {
case DELETE_ALL:
{
numDel = stateIndices.size();
break;
}
case DELETE_NONE:
{
numDel = 0;
break;
}
case DELETE_RANDOM:
{
numDel = randomIntBetween(0, Math.max(stateIndices.size() - 1, 0));
break;
}
default:
throw new AssertionError("Unhandled mode [" + deletionQuantity + "]");
}
final boolean changeClusterUUID = randomBoolean();
final List<Index> addedIndices = addIndices(numAdd, randomAsciiOfLengthBetween(5, 10));
List<Index> delIndices;
if (changeClusterUUID) {
delIndices = new ArrayList<>();
} else {
delIndices = delIndices(numDel, stateIndices);
}
final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0);
ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
final List<String> addsFromEvent = event.indicesCreated();
List<Index> delsFromEvent = event.indicesDeleted();
assertThat(new HashSet<>(addsFromEvent), equalTo(addedIndices.stream().map(Index::getName).collect(Collectors.toSet())));
assertThat(new HashSet<>(delsFromEvent), equalTo(new HashSet<>(delIndices)));
assertThat(event.metaDataChanged(), equalTo(changeClusterUUID || addedIndices.size() > 0 || delIndices.size() > 0));
final IndexGraveyard newGraveyard = event.state().metaData().indexGraveyard();
final IndexGraveyard oldGraveyard = event.previousState().metaData().indexGraveyard();
assertThat(((IndexGraveyard.IndexGraveyardDiff) newGraveyard.diff(oldGraveyard)).getAdded().size(), equalTo(delIndices.size()));
return newState;
}
use of org.elasticsearch.cluster.metadata.IndexGraveyard in project crate by crate.
the class DanglingIndicesState method findNewDanglingIndices.
/**
* Finds new dangling indices by iterating over the indices and trying to find indices
* that have state on disk, but are not part of the provided meta data, or not detected
* as dangled already.
*/
Map<Index, IndexMetadata> findNewDanglingIndices(final Metadata metadata) {
final Set<String> excludeIndexPathIds = new HashSet<>(metadata.indices().size() + danglingIndices.size());
for (ObjectCursor<IndexMetadata> cursor : metadata.indices().values()) {
excludeIndexPathIds.add(cursor.value.getIndex().getUUID());
}
excludeIndexPathIds.addAll(danglingIndices.keySet().stream().map(Index::getUUID).collect(Collectors.toList()));
try {
final List<IndexMetadata> indexMetadataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains);
Map<Index, IndexMetadata> newIndices = new HashMap<>(indexMetadataList.size());
final IndexGraveyard graveyard = metadata.indexGraveyard();
for (IndexMetadata indexMetadata : indexMetadataList) {
if (metadata.hasIndex(indexMetadata.getIndex())) {
LOGGER.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", indexMetadata.getIndex());
} else if (graveyard.containsIndex(indexMetadata.getIndex())) {
LOGGER.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " + "index tombstones. This situation is likely caused by copying over the data directory for an index " + "that was previously deleted.", indexMetadata.getIndex());
} else {
LOGGER.info("[{}] dangling index exists on local file system, but not in cluster metadata, " + "auto import to cluster state", indexMetadata.getIndex());
newIndices.put(indexMetadata.getIndex(), indexMetadata);
}
}
return newIndices;
} catch (IOException e) {
LOGGER.warn("failed to list dangling indices", e);
return emptyMap();
}
}
Aggregations