use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testLoadsFreshestState.
public void testLoadsFreshestState() throws IOException {
final Path[] dataPaths = createDataPaths();
final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
final HashSet<Path> unimportantPaths = Arrays.stream(dataPaths).collect(Collectors.toCollection(HashSet::new));
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
writeState(writer, staleTerm, ClusterState.builder(clusterState).version(staleVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).build(), clusterState);
}
}
final Path freshPath = randomFrom(dataPaths);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { freshPath })) {
unimportantPaths.remove(freshPath);
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, freshTerm, ClusterState.builder(clusterState).version(freshVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).build(), clusterState);
}
}
if (randomBoolean() && unimportantPaths.isEmpty() == false) {
IOUtils.rm(randomFrom(unimportantPaths));
}
// verify that the freshest state is chosen
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
assertThat(clusterState.term(), equalTo(freshTerm));
assertThat(clusterState.version(), equalTo(freshVersion));
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testReloadsMetadataAcrossMultipleSegments.
public void testReloadsMetadataAcrossMultipleSegments() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final int writes = between(5, 20);
final List<Index> indices = new ArrayList<>(writes);
try (Writer writer = persistedClusterStateService.createWriter()) {
for (int i = 0; i < writes; i++) {
final Index index = new Index("test-" + i, UUIDs.randomBase64UUID(random()));
indices.add(index);
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(i + 2).put(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID())))).incrementVersion().build(), clusterState);
}
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
for (Index index : indices) {
final IndexMetadata indexMetadata = clusterState.metadata().index(index.getName());
assertThat(indexMetadata.getIndexUUID(), equalTo(index.getUUID()));
}
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testFailsIfGlobalMetadataIsDuplicated.
public void testFailsIfGlobalMetadataIsDuplicated() throws IOException {
// if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe the global metadata
// is duplicated
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, 0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(), clusterState);
}
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = new NIOFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.addIndexes(dupDirectory);
indexWriter.commit();
}
}
final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
assertThat(message, allOf(containsString("duplicate global metadata found"), containsString(brokenPath.toString())));
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testCrashesWithIOErrorOnCommitFailure.
public void testCrashesWithIOErrorOnCommitFailure() throws IOException {
final AtomicBoolean throwException = new AtomicBoolean();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), getBigArrays(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) {
@Override
Directory createDirectory(Path path) throws IOException {
return new FilterDirectory(super.createDirectory(path)) {
@Override
public void rename(String source, String dest) throws IOException {
if (throwException.get() && dest.startsWith("segments")) {
throw new IOException("simulated");
}
}
};
}
};
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final long newTerm = randomNonNegativeLong();
final ClusterState newState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(UUIDs.randomBase64UUID(random())).clusterUUIDCommitted(true).version(randomLongBetween(1L, Long.MAX_VALUE))).incrementVersion().build();
throwException.set(true);
assertThat(expectThrows(IOError.class, () -> {
if (randomBoolean()) {
writeState(writer, newTerm, newState, clusterState);
} else {
writer.commit(newTerm, newState.version());
}
}).getMessage(), containsString("simulated"));
assertFalse(writer.isOpen());
}
// check if we can open writer again
try (Writer ignored = persistedClusterStateService.createWriter()) {
}
}
}
use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testPersistsAndReloadsIndexMetadataForMultipleIndices.
public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final long term = randomLongBetween(1L, Long.MAX_VALUE);
final String addedIndexUuid = UUIDs.randomBase64UUID(random());
final String updatedIndexUuid = UUIDs.randomBase64UUID(random());
final String deletedIndexUuid = UUIDs.randomBase64UUID(random());
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.metadata().version() + 1).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(term).build()).put(IndexMetadata.builder("updated").version(// -1 because it's incremented in .put()
randomLongBetween(0L, Long.MAX_VALUE - 1) - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid))).put(IndexMetadata.builder("deleted").version(// -1 because it's incremented in .put()
randomLongBetween(0L, Long.MAX_VALUE - 1) - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid)))).incrementVersion().build(), clusterState);
}
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().indices().size(), equalTo(2));
assertThat(clusterState.metadata().index("updated").getIndexUUID(), equalTo(updatedIndexUuid));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metadata().index("updated").getSettings()), equalTo(1));
assertThat(clusterState.metadata().index("deleted").getIndexUUID(), equalTo(deletedIndexUuid));
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.metadata().version() + 1).remove("deleted").put(IndexMetadata.builder("updated").settings(Settings.builder().put(clusterState.metadata().index("updated").getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2))).put(IndexMetadata.builder("added").version(// -1 because it's incremented in .put()
randomLongBetween(0L, Long.MAX_VALUE - 1) - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid)))).incrementVersion().build(), clusterState);
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().indices().size(), equalTo(2));
assertThat(clusterState.metadata().index("updated").getIndexUUID(), equalTo(updatedIndexUuid));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metadata().index("updated").getSettings()), equalTo(2));
assertThat(clusterState.metadata().index("added").getIndexUUID(), equalTo(addedIndexUuid));
assertThat(clusterState.metadata().index("deleted"), nullValue());
}
}
Aggregations