use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.
the class PersistedClusterStateServiceTests method testFailsOnMismatchedCommittedClusterUUIDs.
public void testFailsOnMismatchedCommittedClusterUUIDs() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
final String clusterUUID1 = UUIDs.randomBase64UUID(random());
final String clusterUUID2 = UUIDs.randomBase64UUID(random());
// first establish consistent node IDs and write initial metadata
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID1).clusterUUIDCommitted(true).version(1)).incrementVersion().build());
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID2).clusterUUIDCommitted(true).version(1)).incrementVersion().build());
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
assertThat(message, allOf(containsString("mismatched cluster UUIDs in metadata"), containsString(clusterUUID1), containsString(clusterUUID2)));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString())));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
}
}
use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.
the class PersistedClusterStateServiceTests method testFailsGracefullyOnExceptionDuringFlush.
public void testFailsGracefullyOnExceptionDuringFlush() throws IOException {
final AtomicBoolean throwException = new AtomicBoolean();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) {
@Override
Directory createDirectory(Path path) throws IOException {
return new FilterDirectory(super.createDirectory(path)) {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
if (throwException.get()) {
throw new IOException("simulated");
}
return super.createOutput(name, context);
}
};
}
};
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final long newTerm = randomNonNegativeLong();
final ClusterState newState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).clusterUUID(UUIDs.randomBase64UUID(random())).clusterUUIDCommitted(true).version(randomLongBetween(1L, Long.MAX_VALUE))).incrementVersion().build();
throwException.set(true);
assertThat(expectThrows(IOException.class, () -> writeState(writer, newTerm, newState, clusterState)).getMessage(), containsString("simulated"));
}
}
}
use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.
the class PersistedClusterStateServiceTests method testLoadsFreshestState.
public void testLoadsFreshestState() throws IOException {
final Path[] dataPaths = createDataPaths();
final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
writeState(writer, staleTerm, ClusterState.builder(clusterState).version(staleVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).build(), clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { randomFrom(dataPaths) })) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, freshTerm, ClusterState.builder(clusterState).version(freshVersion).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).build(), clusterState);
}
}
// verify that the freshest state is chosen
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
assertThat(clusterState.term(), equalTo(freshTerm));
assertThat(clusterState.version(), equalTo(freshVersion));
}
}
use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.
the class PersistedClusterStateServiceTests method testSlowLogging.
@TestLogging(value = "org.elasticsearch.gateway:WARN")
public void testSlowLogging() throws IOException, IllegalAccessException {
final long slowWriteLoggingThresholdMillis;
final Settings settings;
if (randomBoolean()) {
slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis();
settings = Settings.EMPTY;
} else {
slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000);
settings = Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms").build();
}
final DiscoveryNode localNode = new DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT);
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId())).build();
final long startTimeMillis = randomLongBetween(0L, Long.MAX_VALUE - slowWriteLoggingThresholdMillis * 10);
final AtomicLong currentTime = new AtomicLong(startTimeMillis);
final AtomicLong writeDurationMillis = new AtomicLong(slowWriteLoggingThresholdMillis);
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), usually() ? BigArrays.NON_RECYCLING_INSTANCE : new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), clusterSettings, () -> currentTime.getAndAdd(writeDurationMillis.get()));
try (Writer writer = persistedClusterStateService.createWriter()) {
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
writeDurationMillis.set(randomLongBetween(slowWriteLoggingThresholdMillis, slowWriteLoggingThresholdMillis * 2));
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning above threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
writeDurationMillis.set(randomLongBetween(1, slowWriteLoggingThresholdMillis - 1));
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "*"));
clusterSettings.applySettings(Settings.builder().put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms").build());
assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at reduced threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote full state with [0] indices"));
final ClusterState newClusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(clusterState.version()).put(IndexMetadata.builder("test").settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid")))).incrementVersion().build();
assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.SeenEventExpectation("should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "writing cluster state took [*] which is above the warn threshold of [*]; " + "wrote global metadata [false] and metadata for [1] indices and skipped [0] unchanged indices"));
writeDurationMillis.set(randomLongBetween(1, writeDurationMillis.get() - 1));
assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.UnseenEventExpectation("should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, "*"));
// ensure no overflow
assertThat(currentTime.get(), lessThan(startTimeMillis + 14 * slowWriteLoggingThresholdMillis));
}
}
}
use of org.elasticsearch.gateway.PersistedClusterStateService.Writer in project crate by crate.
the class PersistedClusterStateServiceTests method testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges.
public void testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final long globalVersion = randomLongBetween(1L, Long.MAX_VALUE);
final String indexUUID = UUIDs.randomBase64UUID(random());
final long indexMetadataVersion = randomLongBetween(1L, Long.MAX_VALUE);
final long oldTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
final long newTerm = randomLongBetween(oldTerm + 1, Long.MAX_VALUE);
try (Writer writer = persistedClusterStateService.createWriter()) {
ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(globalVersion).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(oldTerm).build()).put(IndexMetadata.builder("test").version(// -1 because it's incremented in .put()
indexMetadataVersion - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)))).incrementVersion().build(), clusterState);
clusterState = loadPersistedClusterState(persistedClusterStateService);
IndexMetadata indexMetadata = clusterState.metadata().index("test");
assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
// ensure we do not wastefully persist the same index metadata version by making a bad update with the same version
writer.writeIncrementalStateAndCommit(0L, clusterState, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)).build(), false)).incrementVersion().build());
clusterState = loadPersistedClusterState(persistedClusterStateService);
indexMetadata = clusterState.metadata().index("test");
assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
// ensure that we do persist the same index metadata version by making an update with a higher version
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)).build(), true)).incrementVersion().build(), clusterState);
clusterState = loadPersistedClusterState(persistedClusterStateService);
indexMetadata = clusterState.metadata().index("test");
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(2));
// ensure that we also persist the index metadata when the term changes
writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(newTerm).build()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 3)).build(), false)).incrementVersion().build(), clusterState);
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final IndexMetadata indexMetadata = clusterState.metadata().index("test");
assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(3));
}
}
Aggregations