Search in sources :

Example 1 with Writer

use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.

the class PersistedClusterStateServiceTests method testFailsOnMismatchedCommittedClusterUUIDs.

public void testFailsOnMismatchedCommittedClusterUUIDs() throws IOException {
    final Path[] dataPaths1 = createDataPaths();
    final Path[] dataPaths2 = createDataPaths();
    final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
    final String clusterUUID1 = UUIDs.randomBase64UUID(random());
    final String clusterUUID2 = UUIDs.randomBase64UUID(random());
    // first establish consistent node IDs and write initial metadata
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            assertFalse(clusterState.metadata().clusterUUIDCommitted());
            writer.writeFullStateAndCommit(0L, clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            assertFalse(clusterState.metadata().clusterUUIDCommitted());
            writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID1).clusterUUIDCommitted(true).version(1)).incrementVersion().build());
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            assertFalse(clusterState.metadata().clusterUUIDCommitted());
            writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID2).clusterUUIDCommitted(true).version(1)).incrementVersion().build());
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
        assertThat(message, allOf(containsString("mismatched cluster UUIDs in metadata"), containsString(clusterUUID1), containsString(clusterUUID2)));
        assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString())));
        assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
    }
}
Also used : Path(java.nio.file.Path) Arrays(java.util.Arrays) BigArrays(org.elasticsearch.common.util.BigArrays) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) Environment(org.elasticsearch.env.Environment) Term(org.apache.lucene.index.Term) Level(org.apache.logging.log4j.Level) CoordinationMetadata(org.elasticsearch.cluster.coordination.CoordinationMetadata) Writer(org.elasticsearch.gateway.PersistedClusterStateService.Writer) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) Directory(org.apache.lucene.store.Directory) Matchers.nullValue(org.hamcrest.Matchers.nullValue) ClusterName(org.elasticsearch.cluster.ClusterName) MockLogAppender(org.elasticsearch.test.MockLogAppender) IOContext(org.apache.lucene.store.IOContext) Path(java.nio.file.Path) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging) Matchers.allOf(org.hamcrest.Matchers.allOf) Collection(java.util.Collection) UUIDs(org.elasticsearch.common.UUIDs) MockBigArrays(org.elasticsearch.common.util.MockBigArrays) Collectors(java.util.stream.Collectors) MockPageCacheRecycler(org.elasticsearch.common.util.MockPageCacheRecycler) IndexWriter(org.apache.lucene.index.IndexWriter) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) Stream(java.util.stream.Stream) Matchers.equalTo(org.hamcrest.Matchers.equalTo) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Matchers.containsString(org.hamcrest.Matchers.containsString) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Index(org.elasticsearch.index.Index) ArrayList(java.util.ArrayList) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) Matchers.lessThan(org.hamcrest.Matchers.lessThan) ESTestCase(org.elasticsearch.test.ESTestCase) IndexOutput(org.apache.lucene.store.IndexOutput) Loggers(org.elasticsearch.common.logging.Loggers) SimpleFSDirectory(org.apache.lucene.store.SimpleFSDirectory) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) IOUtils(io.crate.common.io.IOUtils) IOException(java.io.IOException) IOError(java.io.IOError) AtomicLong(java.util.concurrent.atomic.AtomicLong) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) FilterDirectory(org.apache.lucene.store.FilterDirectory) LogManager(org.apache.logging.log4j.LogManager) ClusterState(org.elasticsearch.cluster.ClusterState) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) Matchers.containsString(org.hamcrest.Matchers.containsString) Writer(org.elasticsearch.gateway.PersistedClusterStateService.Writer) IndexWriter(org.apache.lucene.index.IndexWriter)

Example 2 with Writer

use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.

the class PersistedClusterStateServiceTests method testFailsGracefullyOnExceptionDuringFlush.

public void testFailsGracefullyOnExceptionDuringFlush() throws IOException {
    final AtomicBoolean throwException = new AtomicBoolean();
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
        final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) {

            @Override
            Directory createDirectory(Path path) throws IOException {
                return new FilterDirectory(super.createDirectory(path)) {

                    @Override
                    public IndexOutput createOutput(String name, IOContext context) throws IOException {
                        if (throwException.get()) {
                            throw new IOException("simulated");
                        }
                        return super.createOutput(name, context);
                    }
                };
            }
        };
        try (Writer writer = persistedClusterStateService.createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
            final long newTerm = randomNonNegativeLong();
            final ClusterState newState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(UUIDs.randomBase64UUID(random())).clusterUUIDCommitted(true).version(randomLongBetween(1L, Long.MAX_VALUE))).incrementVersion().build();
            throwException.set(true);
            assertThat(expectThrows(IOException.class, () -> writeState(writer, newTerm, newState, clusterState)).getMessage(), containsString("simulated"));
        }
    }
}
Also used : Path(java.nio.file.Path) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) FilterDirectory(org.apache.lucene.store.FilterDirectory) IOContext(org.apache.lucene.store.IOContext) Matchers.containsString(org.hamcrest.Matchers.containsString) IOException(java.io.IOException) Writer(org.elasticsearch.gateway.PersistedClusterStateService.Writer) IndexWriter(org.apache.lucene.index.IndexWriter)

Example 3 with Writer

use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.

the class PersistedClusterStateServiceTests method testLoadsFreshestState.

public void testLoadsFreshestState() throws IOException {
    final Path[] dataPaths = createDataPaths();
    final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
    final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
    final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
    final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
        final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            writeState(writer, staleTerm, ClusterState.builder(clusterState).version(staleVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).build(), clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { randomFrom(dataPaths) })) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writeState(writer, freshTerm, ClusterState.builder(clusterState).version(freshVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).build(), clusterState);
        }
    }
    // verify that the freshest state is chosen
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
        final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
        final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
        assertThat(clusterState.term(), equalTo(freshTerm));
        assertThat(clusterState.version(), equalTo(freshVersion));
    }
}
Also used : Path(java.nio.file.Path) ClusterState(org.elasticsearch.cluster.ClusterState) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) Writer(org.elasticsearch.gateway.PersistedClusterStateService.Writer) IndexWriter(org.apache.lucene.index.IndexWriter)

Example 4 with Writer

use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.

the class PersistedClusterStateServiceTests method testSlowLogging.

@TestLogging(value = "org.elasticsearch.gateway:WARN")
public void testSlowLogging() throws IOException, IllegalAccessException {
    final long slowWriteLoggingThresholdMillis;
    final Settings settings;
    if (randomBoolean()) {
        slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis();
        settings = Settings.EMPTY;
    } else {
        slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000);
        settings = Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms").build();
    }
    final DiscoveryNode localNode = new DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT);
    final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId())).build();
    final long startTimeMillis = randomLongBetween(0L, Long.MAX_VALUE - slowWriteLoggingThresholdMillis * 10);
    final AtomicLong currentTime = new AtomicLong(startTimeMillis);
    final AtomicLong writeDurationMillis = new AtomicLong(slowWriteLoggingThresholdMillis);
    final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
        PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), usually() ? BigArrays.NON_RECYCLING_INSTANCE : new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), clusterSettings, () -> currentTime.getAndAdd(writeDurationMillis.get()));
        try (Writer writer = persistedClusterStateService.createWriter()) {
            assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
            writeDurationMillis.set(randomLongBetween(slowWriteLoggingThresholdMillis, slowWriteLoggingThresholdMillis * 2));
            assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning above threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
            writeDurationMillis.set(randomLongBetween(1, slowWriteLoggingThresholdMillis - 1));
            assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "*"));
            clusterSettings.applySettings(Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms").build());
            assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at reduced threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
            final ClusterState newClusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.version()).put(IndexMetadata.builder("test").settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid")))).incrementVersion().build();
            assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote global metadata [false] and metadata for [1] indices and skipped [0] unchanged indices"));
            writeDurationMillis.set(randomLongBetween(1, writeDurationMillis.get() - 1));
            assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "*"));
            // ensure no overflow
            assertThat(currentTime.get(), lessThan(startTimeMillis + 14 * slowWriteLoggingThresholdMillis));
        }
    }
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) MockLogAppender(org.elasticsearch.test.MockLogAppender) MockPageCacheRecycler(org.elasticsearch.common.util.MockPageCacheRecycler) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) MockBigArrays(org.elasticsearch.common.util.MockBigArrays) AtomicLong(java.util.concurrent.atomic.AtomicLong) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) Writer(org.elasticsearch.gateway.PersistedClusterStateService.Writer) IndexWriter(org.apache.lucene.index.IndexWriter) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging)

Example 5 with Writer

use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.

the class PersistedClusterStateServiceTests method testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges.

public void testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges() throws IOException {
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
        final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
        final long globalVersion = randomLongBetween(1L, Long.MAX_VALUE);
        final String indexUUID = UUIDs.randomBase64UUID(random());
        final long indexMetadataVersion = randomLongBetween(1L, Long.MAX_VALUE);
        final long oldTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
        final long newTerm = randomLongBetween(oldTerm + 1, Long.MAX_VALUE);
        try (Writer writer = persistedClusterStateService.createWriter()) {
            ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(globalVersion).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(oldTerm).build()).put(IndexMetadata.builder("test").version(// -1 because it's incremented in .put()
            indexMetadataVersion - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)))).incrementVersion().build(), clusterState);
            clusterState = loadPersistedClusterState(persistedClusterStateService);
            IndexMetadata indexMetadata = clusterState.metadata().index("test");
            assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
            assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
            assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
            // ensure we do not wastefully persist the same index metadata version by making a bad update with the same version
            writer.writeIncrementalStateAndCommit(0L, clusterState, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)).build(), false)).incrementVersion().build());
            clusterState = loadPersistedClusterState(persistedClusterStateService);
            indexMetadata = clusterState.metadata().index("test");
            assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
            assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
            assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
            // ensure that we do persist the same index metadata version by making an update with a higher version
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)).build(), true)).incrementVersion().build(), clusterState);
            clusterState = loadPersistedClusterState(persistedClusterStateService);
            indexMetadata = clusterState.metadata().index("test");
            assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
            assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(2));
            // ensure that we also persist the index metadata when the term changes
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(newTerm).build()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 3)).build(), false)).incrementVersion().build(), clusterState);
        }
        final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
        final IndexMetadata indexMetadata = clusterState.metadata().index("test");
        assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
        assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
        assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(3));
    }
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) Writer(org.elasticsearch.gateway.PersistedClusterStateService.Writer) IndexWriter(org.apache.lucene.index.IndexWriter)

Aggregations

IndexWriter (org.apache.lucene.index.IndexWriter)16 NodeEnvironment (org.elasticsearch.env.NodeEnvironment)16 Writer (org.elasticsearch.gateway.PersistedClusterStateService.Writer)16 ClusterState (org.elasticsearch.cluster.ClusterState)15 Matchers.containsString (org.hamcrest.Matchers.containsString)11 Path (java.nio.file.Path)10 FilterDirectory (org.apache.lucene.store.FilterDirectory)9 ClusterSettings (org.elasticsearch.common.settings.ClusterSettings)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)6 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)6 Directory (org.apache.lucene.store.Directory)6 SimpleFSDirectory (org.apache.lucene.store.SimpleFSDirectory)6 IOException (java.io.IOException)5 IndexMetadata (org.elasticsearch.cluster.metadata.IndexMetadata)5 ArrayList (java.util.ArrayList)4 Collection (java.util.Collection)4 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Term (org.apache.lucene.index.Term)4 IOContext (org.apache.lucene.store.IOContext)4 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)4