use of com.carrotsearch.hppc.cursors.ObjectCursor in project elasticsearch by elastic.
the class BalanceConfigurationTests method testNoRebalanceOnPrimaryOverload.
public void testNoRebalanceOnPrimaryOverload() {
Settings.Builder settings = Settings.builder();
AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()), new TestGatewayAllocator(), new ShardsAllocator() {
/*
* // this allocator tries to rebuild this scenario where a rebalance is
* // triggered solely by the primary overload on node [1] where a shard
* // is rebalanced to node 0
routing_nodes:
-----node_id[0][V]
--------[test][0], node[0], [R], s[STARTED]
--------[test][4], node[0], [R], s[STARTED]
-----node_id[1][V]
--------[test][0], node[1], [P], s[STARTED]
--------[test][1], node[1], [P], s[STARTED]
--------[test][3], node[1], [R], s[STARTED]
-----node_id[2][V]
--------[test][1], node[2], [R], s[STARTED]
--------[test][2], node[2], [R], s[STARTED]
--------[test][4], node[2], [P], s[STARTED]
-----node_id[3][V]
--------[test][2], node[3], [P], s[STARTED]
--------[test][3], node[3], [P], s[STARTED]
---- unassigned
*/
public void allocate(RoutingAllocation allocation) {
RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
ShardRouting[] drain = unassigned.drain();
// we have to allocate primaries first
ArrayUtil.timSort(drain, (a, b) -> {
return a.primary() ? -1 : 1;
});
for (ShardRouting sr : drain) {
switch(sr.id()) {
case 0:
if (sr.primary()) {
allocation.routingNodes().initializeShard(sr, "node1", null, -1, allocation.changes());
} else {
allocation.routingNodes().initializeShard(sr, "node0", null, -1, allocation.changes());
}
break;
case 1:
if (sr.primary()) {
allocation.routingNodes().initializeShard(sr, "node1", null, -1, allocation.changes());
} else {
allocation.routingNodes().initializeShard(sr, "node2", null, -1, allocation.changes());
}
break;
case 2:
if (sr.primary()) {
allocation.routingNodes().initializeShard(sr, "node3", null, -1, allocation.changes());
} else {
allocation.routingNodes().initializeShard(sr, "node2", null, -1, allocation.changes());
}
break;
case 3:
if (sr.primary()) {
allocation.routingNodes().initializeShard(sr, "node3", null, -1, allocation.changes());
} else {
allocation.routingNodes().initializeShard(sr, "node1", null, -1, allocation.changes());
}
break;
case 4:
if (sr.primary()) {
allocation.routingNodes().initializeShard(sr, "node2", null, -1, allocation.changes());
} else {
allocation.routingNodes().initializeShard(sr, "node0", null, -1, allocation.changes());
}
break;
}
}
}
@Override
public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) {
throw new UnsupportedOperationException("explain not supported");
}
}, EmptyClusterInfoService.INSTANCE);
MetaData.Builder metaDataBuilder = MetaData.builder();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1);
metaDataBuilder = metaDataBuilder.put(indexMeta);
MetaData metaData = metaDataBuilder.build();
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
routingTableBuilder.addAsNew(cursor.value);
}
RoutingTable routingTable = routingTableBuilder.build();
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
for (int i = 0; i < 4; i++) {
DiscoveryNode node = newNode("node" + i);
nodes.add(node);
}
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
RoutingNodes routingNodes = clusterState.getRoutingNodes();
for (RoutingNode routingNode : routingNodes) {
for (ShardRouting shardRouting : routingNode) {
assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING));
}
}
strategy = createAllocationService(settings.build(), new NoopGatewayAllocator());
logger.info("use the new allocator and check if it moves shards");
routingNodes = clusterState.getRoutingNodes();
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();
for (RoutingNode routingNode : routingNodes) {
for (ShardRouting shardRouting : routingNode) {
assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
}
}
logger.info("start the replica shards");
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();
for (RoutingNode routingNode : routingNodes) {
for (ShardRouting shardRouting : routingNode) {
assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
}
}
logger.info("rebalancing");
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();
for (RoutingNode routingNode : routingNodes) {
for (ShardRouting shardRouting : routingNode) {
assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
}
}
}
use of com.carrotsearch.hppc.cursors.ObjectCursor in project elasticsearch by elastic.
the class RestoreService method restoreSnapshot.
/**
* Restores snapshot specified in the restore request.
*
* @param request restore request
* @param listener restore listener
*/
public void restoreSnapshot(final RestoreRequest request, final ActionListener<RestoreCompletionResponse> listener) {
try {
// Read snapshot info and metadata from the repository
Repository repository = repositoriesService.repository(request.repositoryName);
final RepositoryData repositoryData = repository.getRepositoryData();
final Optional<SnapshotId> incompatibleSnapshotId = repositoryData.getIncompatibleSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
if (incompatibleSnapshotId.isPresent()) {
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "cannot restore incompatible snapshot");
}
final Optional<SnapshotId> matchingSnapshotId = repositoryData.getSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
if (matchingSnapshotId.isPresent() == false) {
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "snapshot does not exist");
}
final SnapshotId snapshotId = matchingSnapshotId.get();
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
List<String> filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
// Make sure that we can restore from this snapshot
validateSnapshotRestorable(request.repositoryName, snapshotInfo);
// Find list of indices that we need to restore
final Map<String, String> renamedIndices = renamedIndices(request, filteredIndices);
// Now we can start the actual restore process by adding shards to be recovered in the cluster state
// and updating cluster metadata (global and index) as needed
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
RestoreInfo restoreInfo = null;
@Override
public ClusterState execute(ClusterState currentState) {
// Check if another restore process is already running - cannot run two restore processes at the
// same time
RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
if (restoreInProgress != null && !restoreInProgress.entries().isEmpty()) {
throw new ConcurrentSnapshotExecutionException(snapshot, "Restore process is already running in this cluster");
}
// Check if the snapshot to restore is currently being deleted
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
throw new ConcurrentSnapshotExecutionException(snapshot, "cannot restore a snapshot while a snapshot deletion is in-progress [" + deletionsInProgress.getEntries().get(0).getSnapshot() + "]");
}
// Updating cluster state
ClusterState.Builder builder = ClusterState.builder(currentState);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
ImmutableOpenMap<ShardId, RestoreInProgress.ShardRestoreStatus> shards;
Set<String> aliases = new HashSet<>();
if (!renamedIndices.isEmpty()) {
// We have some indices to restore
ImmutableOpenMap.Builder<ShardId, RestoreInProgress.ShardRestoreStatus> shardsBuilder = ImmutableOpenMap.builder();
final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion();
for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
String index = indexEntry.getValue();
boolean partial = checkPartial(index);
SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index);
String renamedIndexName = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
try {
snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData, minIndexCompatibilityVersion);
} catch (Exception ex) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
}
// Check that the index is closed or doesn't exist
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
IntSet ignoreShards = new IntHashSet();
final Index renamedIndex;
if (currentIndexMetaData == null) {
// Index doesn't exist - create it and start recovery
// Make sure that the index we are about to create has a validate name
MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings());
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
// Remove all aliases - they shouldn't be restored
indexMdBuilder.removeAllAliases();
} else {
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
aliases.add(alias.value);
}
}
IndexMetaData updatedIndexMetaData = indexMdBuilder.build();
if (partial) {
populateIgnoredShards(index, ignoreShards);
}
rtBuilder.addAsNewRestore(updatedIndexMetaData, recoverySource, ignoreShards);
blocks.addBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
} else {
validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial);
// Index exists and it's closed - open it in metadata and start recovery
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
if (!request.includeAliases()) {
// Remove all snapshot aliases
if (!snapshotIndexMetaData.getAliases().isEmpty()) {
indexMdBuilder.removeAllAliases();
}
/// Add existing aliases
for (ObjectCursor<AliasMetaData> alias : currentIndexMetaData.getAliases().values()) {
indexMdBuilder.putAlias(alias.value);
}
} else {
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
aliases.add(alias.value);
}
}
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
blocks.updateBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
}
for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
if (!ignoreShards.contains(shard)) {
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
} else {
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
}
}
}
shards = shardsBuilder.build();
RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards);
builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry));
} else {
shards = ImmutableOpenMap.of();
}
checkAliasNameConflicts(renamedIndices, aliases);
// Restore global state if needed
restoreGlobalStateIfRequested(mdBuilder);
if (completed(shards)) {
// We don't have any indices to restore - we are done
restoreInfo = new RestoreInfo(snapshotId.getName(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards.size(), shards.size() - failedShards(shards));
}
RoutingTable rt = rtBuilder.build();
ClusterState updatedState = builder.metaData(mdBuilder).blocks(blocks).routingTable(rt).build();
return allocationService.reroute(updatedState, "restored snapshot [" + snapshot + "]");
}
private void checkAliasNameConflicts(Map<String, String> renamedIndices, Set<String> aliases) {
for (Map.Entry<String, String> renamedIndex : renamedIndices.entrySet()) {
if (aliases.contains(renamedIndex.getKey())) {
throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
}
}
}
private void populateIgnoredShards(String index, IntSet ignoreShards) {
for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) {
if (index.equals(failure.index())) {
ignoreShards.add(failure.shardId());
}
}
}
private boolean checkPartial(String index) {
// Make sure that index was fully snapshotted
if (failed(snapshotInfo, index)) {
if (request.partial()) {
return true;
} else {
throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
}
} else {
return false;
}
}
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
// Index exist - checking that it's closed
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
// TODO: Enable restore for open indices
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because it's open");
}
// Index exist - checking if it's partial restore
if (partial) {
throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
}
// Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
}
}
/**
* Optionally updates index settings in indexMetaData by removing settings listed in ignoreSettings and
* merging them with settings in changeSettings.
*/
private IndexMetaData updateIndexSettings(IndexMetaData indexMetaData, Settings changeSettings, String[] ignoreSettings) {
if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) {
return indexMetaData;
}
Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
Map<String, String> settingsMap = new HashMap<>(indexMetaData.getSettings().getAsMap());
List<String> simpleMatchPatterns = new ArrayList<>();
for (String ignoredSetting : ignoreSettings) {
if (!Regex.isSimpleMatchPattern(ignoredSetting)) {
if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) {
throw new SnapshotRestoreException(snapshot, "cannot remove setting [" + ignoredSetting + "] on restore");
} else {
settingsMap.remove(ignoredSetting);
}
} else {
simpleMatchPatterns.add(ignoredSetting);
}
}
if (!simpleMatchPatterns.isEmpty()) {
String[] removePatterns = simpleMatchPatterns.toArray(new String[simpleMatchPatterns.size()]);
Iterator<Map.Entry<String, String>> iterator = settingsMap.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
if (UNREMOVABLE_SETTINGS.contains(entry.getKey()) == false) {
if (Regex.simpleMatch(removePatterns, entry.getKey())) {
iterator.remove();
}
}
}
}
for (Map.Entry<String, String> entry : normalizedChangeSettings.getAsMap().entrySet()) {
if (UNMODIFIABLE_SETTINGS.contains(entry.getKey())) {
throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + entry.getKey() + "] on restore");
} else {
settingsMap.put(entry.getKey(), entry.getValue());
}
}
return builder.settings(Settings.builder().put(settingsMap)).build();
}
private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) {
if (request.includeGlobalState()) {
if (metaData.persistentSettings() != null) {
Settings settings = metaData.persistentSettings();
clusterSettings.validateUpdate(settings);
mdBuilder.persistentSettings(settings);
}
if (metaData.templates() != null) {
// TODO: Should all existing templates be deleted first?
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
mdBuilder.put(cursor.value);
}
}
if (metaData.customs() != null) {
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
if (!RepositoriesMetaData.TYPE.equals(cursor.key)) {
// Don't restore repositories while we are working with them
// TODO: Should we restore them at the end?
mdBuilder.putCustom(cursor.key, cursor.value);
}
}
}
}
}
@Override
public void onFailure(String source, Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
listener.onFailure(e);
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
}
});
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
listener.onFailure(e);
}
}
use of com.carrotsearch.hppc.cursors.ObjectCursor in project elasticsearch by elastic.
the class Gateway method performStateRecovery.
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
String[] nodesIds = clusterService.state().nodes().getMasterNodes().keys().toArray(String.class);
logger.trace("performing state recovery from {}", Arrays.toString(nodesIds));
TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds, null).actionGet();
int requiredAllocation = Math.max(1, minimumMasterNodesProvider.get());
if (nodesState.hasFailures()) {
for (FailedNodeException failedNodeException : nodesState.failures()) {
logger.warn("failed to fetch state from node", failedNodeException);
}
}
ObjectFloatHashMap<Index> indices = new ObjectFloatHashMap<>();
MetaData electedGlobalState = null;
int found = 0;
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metaData() == null) {
continue;
}
found++;
if (electedGlobalState == null) {
electedGlobalState = nodeState.metaData();
} else if (nodeState.metaData().version() > electedGlobalState.version()) {
electedGlobalState = nodeState.metaData();
}
for (ObjectCursor<IndexMetaData> cursor : nodeState.metaData().indices().values()) {
indices.addTo(cursor.value.getIndex(), 1);
}
}
if (found < requiredAllocation) {
listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
return;
}
// update the global state, and clean the indices, we elect them in the next phase
MetaData.Builder metaDataBuilder = MetaData.builder(electedGlobalState).removeAllIndices();
assert !indices.containsKey(null);
final Object[] keys = indices.keys;
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null) {
Index index = (Index) keys[i];
IndexMetaData electedIndexMetaData = null;
int indexMetaDataCount = 0;
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metaData() == null) {
continue;
}
IndexMetaData indexMetaData = nodeState.metaData().index(index);
if (indexMetaData == null) {
continue;
}
if (electedIndexMetaData == null) {
electedIndexMetaData = indexMetaData;
} else if (indexMetaData.getVersion() > electedIndexMetaData.getVersion()) {
electedIndexMetaData = indexMetaData;
}
indexMetaDataCount++;
}
if (electedIndexMetaData != null) {
if (indexMetaDataCount < requiredAllocation) {
logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation);
}
// TODO if this logging statement is correct then we are missing an else here
try {
if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) {
// verify that we can actually create this index - if not we recover it as closed with lots of warn logs
indicesService.verifyIndexMetadata(electedIndexMetaData, electedIndexMetaData);
}
} catch (Exception e) {
final Index electedIndex = electedIndexMetaData.getIndex();
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e);
electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
}
metaDataBuilder.put(electedIndexMetaData, false);
}
}
}
final ClusterSettings clusterSettings = clusterService.getClusterSettings();
metaDataBuilder.persistentSettings(clusterSettings.archiveUnknownOrInvalidSettings(metaDataBuilder.persistentSettings(), e -> logUnknownSetting("persistent", e), (e, ex) -> logInvalidSetting("persistent", e, ex)));
metaDataBuilder.transientSettings(clusterSettings.archiveUnknownOrInvalidSettings(metaDataBuilder.transientSettings(), e -> logUnknownSetting("transient", e), (e, ex) -> logInvalidSetting("transient", e, ex)));
ClusterState.Builder builder = ClusterState.builder(clusterService.getClusterName());
builder.metaData(metaDataBuilder);
listener.onSuccess(builder.build());
}
use of com.carrotsearch.hppc.cursors.ObjectCursor in project crate by crate.
the class DocSchemaInfo method update.
@Override
public void update(ClusterChangedEvent event) {
assert event.metadataChanged() : "metadataChanged must be true if update is called";
// search for aliases of deleted and created indices, they must be invalidated also
Metadata prevMetadata = event.previousState().metadata();
for (Index index : event.indicesDeleted()) {
invalidateFromIndex(index, prevMetadata);
}
Metadata newMetadata = event.state().metadata();
for (String index : event.indicesCreated()) {
invalidateAliases(newMetadata.index(index).getAliases());
}
// search for templates with changed meta data => invalidate template aliases
ImmutableOpenMap<String, IndexTemplateMetadata> newTemplates = newMetadata.templates();
ImmutableOpenMap<String, IndexTemplateMetadata> prevTemplates = prevMetadata.templates();
if (!newTemplates.equals(prevTemplates)) {
for (ObjectCursor<IndexTemplateMetadata> cursor : newTemplates.values()) {
invalidateAliases(cursor.value.aliases());
}
for (ObjectCursor<IndexTemplateMetadata> cursor : prevTemplates.values()) {
invalidateAliases(cursor.value.aliases());
}
}
// search indices with changed meta data
Iterator<String> currentTablesIt = docTableByName.keySet().iterator();
ObjectLookupContainer<String> templates = newTemplates.keys();
ImmutableOpenMap<String, IndexMetadata> indices = newMetadata.indices();
while (currentTablesIt.hasNext()) {
String tableName = currentTablesIt.next();
String indexName = getIndexName(tableName);
IndexMetadata newIndexMetadata = newMetadata.index(indexName);
if (newIndexMetadata == null) {
docTableByName.remove(tableName);
} else {
IndexMetadata oldIndexMetadata = prevMetadata.index(indexName);
if (oldIndexMetadata != null && ClusterChangedEvent.indexMetadataChanged(oldIndexMetadata, newIndexMetadata)) {
docTableByName.remove(tableName);
// invalidate aliases of changed indices
invalidateAliases(newIndexMetadata.getAliases());
invalidateAliases(oldIndexMetadata.getAliases());
} else {
// this is the case if a single partition has been modified using alter table <t> partition (...)
String possibleTemplateName = PartitionName.templateName(name(), tableName);
if (templates.contains(possibleTemplateName)) {
for (ObjectObjectCursor<String, IndexMetadata> indexEntry : indices) {
if (IndexParts.isPartitioned(indexEntry.key)) {
docTableByName.remove(tableName);
break;
}
}
}
}
}
}
// re register UDFs for this schema
UserDefinedFunctionsMetadata udfMetadata = newMetadata.custom(UserDefinedFunctionsMetadata.TYPE);
if (udfMetadata != null) {
udfService.updateImplementations(schemaName, udfMetadata.functionsMetadata().stream().filter(f -> schemaName.equals(f.schema())));
}
}
use of com.carrotsearch.hppc.cursors.ObjectCursor in project crate by crate.
the class Gateway method performStateRecovery.
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
final DiscoveryNode[] nodes = clusterService.state().nodes().getMasterNodes().values().toArray(DiscoveryNode.class);
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("performing state recovery from {}", Arrays.toString(nodes));
}
var request = new TransportNodesListGatewayMetaState.Request(nodes);
PlainActionFuture<TransportNodesListGatewayMetaState.NodesGatewayMetaState> future = PlainActionFuture.newFuture();
client.executeLocally(TransportNodesListGatewayMetaState.TYPE, request, future);
final TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = future.actionGet();
final int requiredAllocation = 1;
if (nodesState.hasFailures()) {
for (final FailedNodeException failedNodeException : nodesState.failures()) {
LOGGER.warn("failed to fetch state from node", failedNodeException);
}
}
final ObjectFloatHashMap<Index> indices = new ObjectFloatHashMap<>();
Metadata electedGlobalState = null;
int found = 0;
for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metadata() == null) {
continue;
}
found++;
if (electedGlobalState == null) {
electedGlobalState = nodeState.metadata();
} else if (nodeState.metadata().version() > electedGlobalState.version()) {
electedGlobalState = nodeState.metadata();
}
for (final ObjectCursor<IndexMetadata> cursor : nodeState.metadata().indices().values()) {
indices.addTo(cursor.value.getIndex(), 1);
}
}
if (found < requiredAllocation) {
listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
return;
}
// update the global state, and clean the indices, we elect them in the next phase
final Metadata.Builder metadataBuilder = Metadata.builder(electedGlobalState).removeAllIndices();
assert !indices.containsKey(null);
final Object[] keys = indices.keys;
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null) {
final Index index = (Index) keys[i];
IndexMetadata electedIndexMetadata = null;
int indexMetadataCount = 0;
for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metadata() == null) {
continue;
}
final IndexMetadata indexMetadata = nodeState.metadata().index(index);
if (indexMetadata == null) {
continue;
}
if (electedIndexMetadata == null) {
electedIndexMetadata = indexMetadata;
} else if (indexMetadata.getVersion() > electedIndexMetadata.getVersion()) {
electedIndexMetadata = indexMetadata;
}
indexMetadataCount++;
}
if (electedIndexMetadata != null) {
if (indexMetadataCount < requiredAllocation) {
LOGGER.debug("[{}] found [{}], required [{}], not adding", index, indexMetadataCount, requiredAllocation);
}
// TODO if this logging statement is correct then we are missing an else here
metadataBuilder.put(electedIndexMetadata, false);
}
}
}
ClusterState recoveredState = Function.<ClusterState>identity().andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings())).apply(ClusterState.builder(clusterService.getClusterName()).metadata(metadataBuilder).build());
listener.onSuccess(recoveredState);
}
Aggregations