use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class DanglingIndicesState method findNewDanglingIndices.
/**
* Finds new dangling indices by iterating over the indices and trying to find indices
* that have state on disk, but are not part of the provided meta data, or not detected
* as dangled already.
*/
Map<Index, IndexMetaData> findNewDanglingIndices(final MetaData metaData) {
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size());
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
excludeIndexPathIds.add(cursor.value.getIndex().getUUID());
}
excludeIndexPathIds.addAll(danglingIndices.keySet().stream().map(Index::getUUID).collect(Collectors.toList()));
try {
final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains);
Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size());
final IndexGraveyard graveyard = metaData.indexGraveyard();
for (IndexMetaData indexMetaData : indexMetaDataList) {
if (metaData.hasIndex(indexMetaData.getIndex().getName())) {
logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", indexMetaData.getIndex());
} else if (graveyard.containsIndex(indexMetaData.getIndex())) {
logger.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " + "index tombstones. This situation is likely caused by copying over the data directory for an index " + "that was previously deleted.", indexMetaData.getIndex());
} else {
logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " + "auto import to cluster state", indexMetaData.getIndex());
newIndices.put(indexMetaData.getIndex(), indexMetaData);
}
}
return newIndices;
} catch (IOException e) {
logger.warn("failed to list dangling indices", e);
return emptyMap();
}
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class IndicesService method createIndexService.
/**
* This creates a new IndexService without registering it
*/
private synchronized IndexService createIndexService(final String reason, IndexMetaData indexMetaData, IndicesQueryCache indicesQueryCache, IndicesFieldDataCache indicesFieldDataCache, List<IndexEventListener> builtInListeners, Consumer<ShardId> globalCheckpointSyncer, IndexingOperationListener... indexingOperationListeners) throws IOException {
final Index index = indexMetaData.getIndex();
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting);
logger.debug("creating Index [{}], shards [{}]/[{}{}] - reason [{}]", indexMetaData.getIndex(), idxSettings.getNumberOfShards(), idxSettings.getNumberOfReplicas(), idxSettings.isShadowReplicaIndex() ? "s" : "", reason);
final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry);
for (IndexingOperationListener operationListener : indexingOperationListeners) {
indexModule.addIndexOperationListener(operationListener);
}
pluginsService.onIndexModule(indexModule);
for (IndexEventListener listener : builtInListeners) {
indexModule.addIndexEventListener(listener);
}
return indexModule.newIndexService(nodeEnv, xContentRegistry, this, circuitBreakerService, bigArrays, threadPool, scriptService, clusterService, client, indicesQueryCache, mapperRegistry, globalCheckpointSyncer, indicesFieldDataCache);
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class IndicesService method buildAliasFilter.
public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
/* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch
* of dependencies we pass in a function that can perform the parsing. */
CheckedFunction<byte[], QueryBuilder, IOException> filterParser = bytes -> {
try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes)) {
return new QueryParseContext(parser).parseInnerQueryBuilder();
}
};
String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions);
IndexMetaData indexMetaData = state.metaData().index(index);
return new AliasFilter(ShardSearchRequest.parseAliasFilter(filterParser, indexMetaData, aliases), aliases);
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class NodeIndicesStats method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
stats = new CommonStats(in);
if (in.readBoolean()) {
int entries = in.readVInt();
statsByShard = new HashMap<>();
for (int i = 0; i < entries; i++) {
Index index = new Index(in);
int indexShardListSize = in.readVInt();
List<IndexShardStats> indexShardStats = new ArrayList<>(indexShardListSize);
for (int j = 0; j < indexShardListSize; j++) {
indexShardStats.add(IndexShardStats.readIndexShardStats(in));
}
statsByShard.put(index, indexShardStats);
}
}
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class SyncedFlushService method attemptSyncedFlush.
/**
* a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details.
*/
public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) {
final ClusterState state = clusterService.state();
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
int numberOfShards = 0;
for (Index index : concreteIndices) {
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
numberOfShards += indexMetaData.getNumberOfShards();
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
}
if (numberOfShards == 0) {
listener.onResponse(new SyncedFlushResponse(results));
return;
}
final CountDown countDown = new CountDown(numberOfShards);
for (final Index concreteIndex : concreteIndices) {
final String index = concreteIndex.getName();
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex);
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
for (int shard = 0; shard < indexNumberOfShards; shard++) {
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
innerAttemptSyncedFlush(shardId, state, new ActionListener<ShardsSyncedFlushResult>() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
@Override
public void onFailure(Exception e) {
logger.debug("{} unexpected error while executing synced flush", shardId);
final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
});
}
}
}
Aggregations