use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.
the class IndicesServiceTests method testVerifyIfIndexContentDeleted.
public void testVerifyIfIndexContentDeleted() throws Exception {
final Index index = new Index("test", UUIDs.randomBase64UUID());
final IndicesService indicesService = getIndicesService();
final NodeEnvironment nodeEnv = getNodeEnvironment();
final MetaStateService metaStateService = getInstanceFromNode(MetaStateService.class);
final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
final Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build();
final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings).numberOfShards(1).numberOfReplicas(0).build();
metaStateService.writeIndex("test index being created", indexMetadata);
final Metadata metadata = Metadata.builder(clusterService.state().metadata()).put(indexMetadata, true).build();
final ClusterState csWithIndex = new ClusterState.Builder(clusterService.state()).metadata(metadata).build();
try {
indicesService.verifyIndexIsDeleted(index, csWithIndex);
fail("Should not be able to delete index contents when the index is part of the cluster state.");
} catch (IllegalStateException e) {
assertThat(e.getMessage(), containsString("Cannot delete index"));
}
final ClusterState withoutIndex = new ClusterState.Builder(csWithIndex).metadata(Metadata.builder(csWithIndex.metadata()).remove(index.getName())).build();
indicesService.verifyIndexIsDeleted(index, withoutIndex);
assertFalse("index files should be deleted", FileSystemUtils.exists(nodeEnv.indexPaths(index)));
}
use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.
the class ShardPath method selectNewPathForShard.
public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shardId, IndexSettings indexSettings, long avgShardSizeInBytes, Map<Path, Integer> dataPathToShardCount) throws IOException {
final Path dataPath;
final Path statePath;
if (indexSettings.hasCustomDataPath()) {
dataPath = env.resolveCustomLocation(indexSettings.customDataPath(), shardId);
statePath = env.nodePaths()[0].resolve(shardId);
} else {
BigInteger totFreeSpace = BigInteger.ZERO;
for (NodeEnvironment.NodePath nodePath : env.nodePaths()) {
totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodePath.fileStore.getUsableSpace()));
}
// TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know
// how large they will be once they're done copying, instead of a silly guess for such cases:
// Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
// shard size across the cluster and 5% of the total available free space on this node:
BigInteger estShardSizeInBytes = BigInteger.valueOf(avgShardSizeInBytes).max(totFreeSpace.divide(BigInteger.valueOf(20)));
// TODO - do we need something more extensible? Yet, this does the job for now...
final NodeEnvironment.NodePath[] paths = env.nodePaths();
// If no better path is chosen, use the one with the most space by default
NodeEnvironment.NodePath bestPath = getPathWithMostFreeSpace(env);
if (paths.length != 1) {
Map<NodeEnvironment.NodePath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
// Compute how much space there is on each path
final Map<NodeEnvironment.NodePath, BigInteger> pathsToSpace = new HashMap<>(paths.length);
for (NodeEnvironment.NodePath nodePath : paths) {
FileStore fileStore = nodePath.fileStore;
BigInteger usableBytes = BigInteger.valueOf(fileStore.getUsableSpace());
pathsToSpace.put(nodePath, usableBytes);
}
bestPath = Arrays.stream(paths).filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0).sorted((p1, p2) -> {
int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L));
if (cmp == 0) {
// if the number of shards is equal, tie-break with the number of total shards
cmp = Integer.compare(dataPathToShardCount.getOrDefault(p1.path, 0), dataPathToShardCount.getOrDefault(p2.path, 0));
if (cmp == 0) {
// if the number of shards is equal, tie-break with the usable bytes
cmp = pathsToSpace.get(p2).compareTo(pathsToSpace.get(p1));
}
}
return cmp;
}).findFirst().orElse(bestPath);
}
statePath = bestPath.resolve(shardId);
dataPath = statePath;
}
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
}
use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.
the class IncrementalClusterStateWriterTests method testAtomicityWithFailures.
public void testAtomicityWithFailures() throws IOException {
try (NodeEnvironment env = newNodeEnvironment()) {
MetaStateServiceWithFailures metaStateService = new MetaStateServiceWithFailures(randomIntBetween(100, 1000), env, xContentRegistry());
// We only guarantee atomicity of writes, if there is initial Manifest file
Manifest manifest = Manifest.empty();
Metadata metadata = Metadata.EMPTY_METADATA;
metaStateService.writeManifestAndCleanup("startup", Manifest.empty());
long currentTerm = randomNonNegativeLong();
long clusterStateVersion = randomNonNegativeLong();
metaStateService.failRandomly();
Set<Metadata> possibleMetadata = new HashSet<>();
possibleMetadata.add(metadata);
for (int i = 0; i < randomIntBetween(1, 5); i++) {
IncrementalClusterStateWriter.AtomicClusterStateWriter writer = new IncrementalClusterStateWriter.AtomicClusterStateWriter(metaStateService, manifest);
metadata = randomMetadataForTx();
Map<Index, Long> indexGenerations = new HashMap<>();
try {
long globalGeneration = writer.writeGlobalState("global", metadata);
for (IndexMetadata indexMetadata : metadata) {
long generation = writer.writeIndex("index", indexMetadata);
indexGenerations.put(indexMetadata.getIndex(), generation);
}
Manifest newManifest = new Manifest(currentTerm, clusterStateVersion, globalGeneration, indexGenerations);
writer.writeManifestAndCleanup("manifest", newManifest);
possibleMetadata.clear();
possibleMetadata.add(metadata);
manifest = newManifest;
} catch (WriteStateException e) {
if (e.isDirty()) {
possibleMetadata.add(metadata);
/*
* If dirty WriteStateException occurred, it's only safe to proceed if there is subsequent
* successful write of metadata and Manifest. We prefer to break here, not to over complicate test logic.
* See also MetadataStateFormat#testFailRandomlyAndReadAnyState, that does not break.
*/
break;
}
}
}
metaStateService.noFailures();
Tuple<Manifest, Metadata> manifestAndMetadata = metaStateService.loadFullState();
Metadata loadedMetadata = manifestAndMetadata.v2();
assertTrue(possibleMetadata.stream().anyMatch(md -> metadataEquals(md, loadedMetadata)));
}
}
use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testFailsIfFreshestStateIsInStaleTerm.
public void testFailsIfFreshestStateIsInStaleTerm() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
final long staleCurrentTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
final long freshCurrentTerm = randomLongBetween(staleCurrentTerm + 1, Long.MAX_VALUE);
final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writeState(writer, staleCurrentTerm, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(1).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).version(staleVersion).build(), clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, freshCurrentTerm, clusterState, clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
writeState(writer, onDiskState.currentTerm, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(2).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).version(freshVersion).build(), clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
assertThat(message, allOf(containsString("inconsistent terms found"), containsString(Long.toString(staleCurrentTerm)), containsString(Long.toString(freshCurrentTerm))));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString())));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
}
}
use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.
the class PersistedClusterStateServiceTests method testFailsIfGlobalMetadataIsMissing.
public void testFailsIfGlobalMetadataIsMissing() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, 0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(), clusterState);
}
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
indexWriter.commit();
}
}
final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
assertThat(message, allOf(containsString("no global metadata found"), containsString(brokenPath.toString())));
}
}
Aggregations