use of org.elasticsearch.cluster.metadata.Manifest in project crate by crate.
the class IncrementalClusterStateWriter method setCurrentTerm.
void setCurrentTerm(long currentTerm) throws WriteStateException {
Manifest manifest = new Manifest(currentTerm, previousManifest.getClusterStateVersion(), previousManifest.getGlobalGeneration(), new HashMap<>(previousManifest.getIndexGenerations()));
metaStateService.writeManifestAndCleanup("current term changed", manifest);
previousManifest = manifest;
}
use of org.elasticsearch.cluster.metadata.Manifest in project crate by crate.
the class IncrementalClusterStateWriter method updateClusterState.
/**
* Updates manifest and meta data on disk.
*
* @param newState new {@link ClusterState}
*
* @throws WriteStateException if exception occurs. See also {@link WriteStateException#isDirty()}.
*/
void updateClusterState(ClusterState newState) throws WriteStateException {
Metadata newMetadata = newState.metadata();
final long startTimeMillis = relativeTimeMillisSupplier.getAsLong();
final AtomicClusterStateWriter writer = new AtomicClusterStateWriter(metaStateService, previousManifest);
long globalStateGeneration = writeGlobalState(writer, newMetadata);
Map<Index, Long> indexGenerations = writeIndicesMetadata(writer, newState);
Manifest manifest = new Manifest(previousManifest.getCurrentTerm(), newState.version(), globalStateGeneration, indexGenerations);
writeManifest(writer, manifest);
previousManifest = manifest;
previousClusterState = newState;
final long durationMillis = relativeTimeMillisSupplier.getAsLong() - startTimeMillis;
final TimeValue finalSlowWriteLoggingThreshold = this.slowWriteLoggingThreshold;
if (durationMillis >= finalSlowWriteLoggingThreshold.getMillis()) {
LOGGER.warn("writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote metadata for [{}] indices and skipped [{}] unchanged indices", durationMillis, finalSlowWriteLoggingThreshold, writer.getIndicesWritten(), writer.getIndicesSkipped());
} else {
LOGGER.debug("writing cluster state took [{}ms]; wrote metadata for [{}] indices and skipped [{}] unchanged indices", durationMillis, writer.getIndicesWritten(), writer.getIndicesSkipped());
}
}
use of org.elasticsearch.cluster.metadata.Manifest in project crate by crate.
the class MetaStateService method loadFullStateBWC.
/**
* "Manifest-less" BWC version of loading metadata from disk. See also {@link #loadFullState()}
*/
private Tuple<Manifest, Metadata> loadFullStateBWC() throws IOException {
Map<Index, Long> indices = new HashMap<>();
Metadata.Builder metadataBuilder;
Tuple<Metadata, Long> metadataAndGeneration = META_DATA_FORMAT.loadLatestStateWithGeneration(LOGGER, namedXContentRegistry, nodeEnv.nodeDataPaths());
Metadata globalMetadata = metadataAndGeneration.v1();
long globalStateGeneration = metadataAndGeneration.v2();
if (globalMetadata != null) {
metadataBuilder = Metadata.builder(globalMetadata);
// TODO https://github.com/elastic/elasticsearch/issues/38556
// assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0";
} else {
metadataBuilder = Metadata.builder();
}
for (String indexFolderName : nodeEnv.availableIndexFolders()) {
Tuple<IndexMetadata, Long> indexMetadataAndGeneration = INDEX_META_DATA_FORMAT.loadLatestStateWithGeneration(LOGGER, namedXContentRegistry, nodeEnv.resolveIndexFolder(indexFolderName));
// TODO https://github.com/elastic/elasticsearch/issues/38556
// assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0";
IndexMetadata indexMetadata = indexMetadataAndGeneration.v1();
long generation = indexMetadataAndGeneration.v2();
if (indexMetadata != null) {
indices.put(indexMetadata.getIndex(), generation);
metadataBuilder.put(indexMetadata, false);
} else {
LOGGER.debug("[{}] failed to find metadata for existing index location", indexFolderName);
}
}
Manifest manifest = Manifest.unknownCurrentTermAndVersion(globalStateGeneration, indices);
return new Tuple<>(manifest, metadataBuilder.build());
}
use of org.elasticsearch.cluster.metadata.Manifest in project crate by crate.
the class IncrementalClusterStateWriterTests method testSlowLogging.
@TestLogging(value = "org.elasticsearch.gateway:WARN")
public void testSlowLogging() throws WriteStateException, IllegalAccessException {
final long slowWriteLoggingThresholdMillis;
final Settings settings;
if (randomBoolean()) {
slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis();
settings = Settings.EMPTY;
} else {
slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000);
settings = Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms").build();
}
final DiscoveryNode localNode = newNode("node");
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId())).build();
final long startTimeMillis = randomLongBetween(0L, Long.MAX_VALUE - slowWriteLoggingThresholdMillis * 10);
final AtomicLong currentTime = new AtomicLong(startTimeMillis);
final AtomicLong writeDurationMillis = new AtomicLong(slowWriteLoggingThresholdMillis);
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
final IncrementalClusterStateWriter incrementalClusterStateWriter = new IncrementalClusterStateWriter(settings, clusterSettings, mock(MetaStateService.class), new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), Collections.emptyMap()), clusterState, () -> currentTime.getAndAdd(writeDurationMillis.get()));
assertExpectedLogs(clusterState, incrementalClusterStateWriter, new MockLogAppender.SeenEventExpectation("should see warning at threshold", IncrementalClusterStateWriter.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote metadata for [0] indices and skipped [0] unchanged indices"));
writeDurationMillis.set(randomLongBetween(slowWriteLoggingThresholdMillis, slowWriteLoggingThresholdMillis * 2));
assertExpectedLogs(clusterState, incrementalClusterStateWriter, new MockLogAppender.SeenEventExpectation("should see warning above threshold", IncrementalClusterStateWriter.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote metadata for [0] indices and skipped [0] unchanged indices"));
writeDurationMillis.set(randomLongBetween(1, slowWriteLoggingThresholdMillis - 1));
assertExpectedLogs(clusterState, incrementalClusterStateWriter, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", IncrementalClusterStateWriter.class.getCanonicalName(), Level.WARN, "*"));
clusterSettings.applySettings(Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms").build());
assertExpectedLogs(clusterState, incrementalClusterStateWriter, new MockLogAppender.SeenEventExpectation("should see warning at reduced threshold", IncrementalClusterStateWriter.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote metadata for [0] indices and skipped [0] unchanged indices"));
// ensure no overflow
assertThat(currentTime.get(), lessThan(startTimeMillis + 10 * slowWriteLoggingThresholdMillis));
}
use of org.elasticsearch.cluster.metadata.Manifest in project crate by crate.
the class GatewayMetaState method start.
public void start(Settings settings, TransportService transportService, ClusterService clusterService, MetaStateService metaStateService, MetadataIndexUpgradeService metadataIndexUpgradeService, MetadataUpgrader metadataUpgrader, PersistedClusterStateService persistedClusterStateService) {
assert persistedState.get() == null : "should only start once, but already have " + persistedState.get();
if (DiscoveryNode.isMasterEligibleNode(settings) || DiscoveryNode.isDataNode(settings)) {
try {
final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState();
Metadata metadata = onDiskState.metadata;
long lastAcceptedVersion = onDiskState.lastAcceptedVersion;
long currentTerm = onDiskState.currentTerm;
if (onDiskState.empty()) {
final Tuple<Manifest, Metadata> legacyState = metaStateService.loadFullState();
if (legacyState.v1().isEmpty() == false) {
metadata = legacyState.v2();
lastAcceptedVersion = legacyState.v1().getClusterStateVersion();
currentTerm = legacyState.v1().getCurrentTerm();
}
}
PersistedState persistedState = null;
boolean success = false;
try {
final ClusterState clusterState = prepareInitialClusterState(transportService, clusterService, ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).version(lastAcceptedVersion).metadata(upgradeMetadataForNode(metadata, metadataIndexUpgradeService, metadataUpgrader)).build());
if (DiscoveryNode.isMasterEligibleNode(settings)) {
persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState);
} else {
persistedState = new AsyncLucenePersistedState(settings, transportService.getThreadPool(), new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState));
}
if (DiscoveryNode.isDataNode(settings)) {
// unreference legacy files (only keep them for dangling indices functionality)
metaStateService.unreferenceAll();
} else {
// delete legacy files
metaStateService.deleteAll();
}
// write legacy node metadata to prevent accidental downgrades from spawning empty cluster state
NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT), persistedClusterStateService.getDataPaths());
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(persistedState);
}
}
this.persistedState.set(persistedState);
} catch (IOException e) {
throw new ElasticsearchException("failed to load metadata", e);
}
} else {
final long currentTerm = 0L;
final ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).build();
if (persistedClusterStateService.getDataPaths().length > 0) {
// cluster uuid as coordinating-only nodes do not snap into a cluster as they carry no state
try (PersistedClusterStateService.Writer persistenceWriter = persistedClusterStateService.createWriter()) {
persistenceWriter.writeFullStateAndCommit(currentTerm, clusterState);
} catch (IOException e) {
throw new ElasticsearchException("failed to load metadata", e);
}
try {
// delete legacy cluster state files
metaStateService.deleteAll();
// write legacy node metadata to prevent downgrades from spawning empty cluster state
NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT), persistedClusterStateService.getDataPaths());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
persistedState.set(new InMemoryPersistedState(currentTerm, clusterState));
}
}
Aggregations