use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testClosesWriterOnFatalError.
public void testClosesWriterOnFatalError() throws IOException {
final AtomicBoolean throwException = new AtomicBoolean();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), getBigArrays(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) {
@Override
Directory createDirectory(Path path) throws IOException {
return new FilterDirectory(super.createDirectory(path)) {
@Override
public void sync(Collection<String> names) {
throw new OutOfMemoryError("simulated");
}
};
}
};
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final long newTerm = randomNonNegativeLong();
final ClusterState newState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(UUIDs.randomBase64UUID(random())).clusterUUIDCommitted(true).version(randomLongBetween(1L, Long.MAX_VALUE))).incrementVersion().build();
throwException.set(true);
assertThat(expectThrows(OutOfMemoryError.class, () -> {
if (randomBoolean()) {
writeState(writer, newTerm, newState, clusterState);
} else {
writer.commit(newTerm, newState.version());
}
}).getMessage(), containsString("simulated"));
assertFalse(writer.isOpen());
}
// check if we can open writer again
try (Writer ignored = persistedClusterStateService.createWriter()) {
}
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testLoadsFreshestState.
public void testLoadsFreshestState() throws IOException {
final Path[] dataPaths = createDataPaths();
final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
final HashSet<Path> unimportantPaths = Arrays.stream(dataPaths).collect(Collectors.toCollection(HashSet::new));
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
writeState(writer, staleTerm, ClusterState.builder(clusterState).version(staleVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).build(), clusterState);
}
}
final Path freshPath = randomFrom(dataPaths);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { freshPath })) {
unimportantPaths.remove(freshPath);
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, freshTerm, ClusterState.builder(clusterState).version(freshVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).build(), clusterState);
}
}
if (randomBoolean() && unimportantPaths.isEmpty() == false) {
IOUtils.rm(randomFrom(unimportantPaths));
}
// verify that the freshest state is chosen
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
assertThat(clusterState.term(), equalTo(freshTerm));
assertThat(clusterState.version(), equalTo(freshVersion));
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testFailsOnMismatchedCommittedClusterUUIDs.
public void testFailsOnMismatchedCommittedClusterUUIDs() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
final String clusterUUID1 = UUIDs.randomBase64UUID(random());
final String clusterUUID2 = UUIDs.randomBase64UUID(random());
// first establish consistent node IDs and write initial metadata
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID1).clusterUUIDCommitted(true).version(1)).incrementVersion().build());
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID2).clusterUUIDCommitted(true).version(1)).incrementVersion().build());
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
assertThat(message, allOf(containsString("mismatched cluster UUIDs in metadata"), containsString(clusterUUID1), containsString(clusterUUID2)));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString())));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testSlowLogging.
@TestLogging(value = "org.opensearch.gateway:WARN", reason = "to ensure that we log gateway events on WARN level")
public void testSlowLogging() throws IOException, IllegalAccessException {
final long slowWriteLoggingThresholdMillis;
final Settings settings;
if (randomBoolean()) {
slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis();
settings = Settings.EMPTY;
} else {
slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000);
settings = Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms").build();
}
final DiscoveryNode localNode = new DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT);
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId())).build();
final long startTimeMillis = randomLongBetween(0L, Long.MAX_VALUE - slowWriteLoggingThresholdMillis * 10);
final AtomicLong currentTime = new AtomicLong(startTimeMillis);
final AtomicLong writeDurationMillis = new AtomicLong(slowWriteLoggingThresholdMillis);
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), getBigArrays(), clusterSettings, () -> currentTime.getAndAdd(writeDurationMillis.get()));
try (Writer writer = persistedClusterStateService.createWriter()) {
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
writeDurationMillis.set(randomLongBetween(slowWriteLoggingThresholdMillis, slowWriteLoggingThresholdMillis * 2));
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning above threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
writeDurationMillis.set(randomLongBetween(1, slowWriteLoggingThresholdMillis - 1));
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "*"));
clusterSettings.applySettings(Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms").build());
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at reduced threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
final ClusterState newClusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.version()).put(IndexMetadata.builder("test").settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid")))).incrementVersion().build();
assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote global metadata [false] and metadata for [1] indices and skipped [0] unchanged indices"));
writeDurationMillis.set(randomLongBetween(0, writeDurationMillis.get() - 1));
assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "*"));
// ensure no overflow
assertThat(currentTime.get(), lessThan(startTimeMillis + 14 * slowWriteLoggingThresholdMillis));
}
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testPersistsAndReloadsIndexMetadataForMultipleIndices.
public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final long term = randomLongBetween(1L, Long.MAX_VALUE);
final String addedIndexUuid = UUIDs.randomBase64UUID(random());
final String updatedIndexUuid = UUIDs.randomBase64UUID(random());
final String deletedIndexUuid = UUIDs.randomBase64UUID(random());
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.metadata().version() + 1).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(term).build()).put(IndexMetadata.builder("updated").version(// -1 because it's incremented in .put()
randomLongBetween(0L, Long.MAX_VALUE - 1) - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid))).put(IndexMetadata.builder("deleted").version(// -1 because it's incremented in .put()
randomLongBetween(0L, Long.MAX_VALUE - 1) - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid)))).incrementVersion().build(), clusterState);
}
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().indices().size(), equalTo(2));
assertThat(clusterState.metadata().index("updated").getIndexUUID(), equalTo(updatedIndexUuid));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metadata().index("updated").getSettings()), equalTo(1));
assertThat(clusterState.metadata().index("deleted").getIndexUUID(), equalTo(deletedIndexUuid));
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.metadata().version() + 1).remove("deleted").put(IndexMetadata.builder("updated").settings(Settings.builder().put(clusterState.metadata().index("updated").getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2))).put(IndexMetadata.builder("added").version(// -1 because it's incremented in .put()
randomLongBetween(0L, Long.MAX_VALUE - 1) - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid)))).incrementVersion().build(), clusterState);
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().indices().size(), equalTo(2));
assertThat(clusterState.metadata().index("updated").getIndexUUID(), equalTo(updatedIndexUuid));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metadata().index("updated").getSettings()), equalTo(2));
assertThat(clusterState.metadata().index("added").getIndexUUID(), equalTo(addedIndexUuid));
assertThat(clusterState.metadata().index("deleted"), nullValue());
}
}
Aggregations