use of org.graylog2.migrations.Migration in project graylog2-server by Graylog2.
the class V20161215163900_MoveIndexSetDefaultConfigTest method upgrade.
@Test
@MongoDBFixtures("V20161215163900_MoveIndexSetDefaultConfigTest.json")
public void upgrade() throws Exception {
final long count = collection.count();
migration.upgrade();
final MigrationCompleted migrationCompleted = clusterConfigService.get(MigrationCompleted.class);
assertThat(collection.count()).withFailMessage("No document should be deleted by the migration!").isEqualTo(count);
assertThat(collection.count(Filters.exists("default"))).withFailMessage("The migration should have deleted the \"default\" field from the documents!").isEqualTo(0L);
assertThat(clusterConfigService.get(DefaultIndexSetConfig.class)).withFailMessage("The DefaultIndexSetConfig should have been written to cluster config!").isNotNull();
assertThat(clusterConfigService.get(DefaultIndexSetConfig.class).defaultIndexSetId()).isEqualTo("57f3d721a43c2d59cb750001");
assertThat(migrationCompleted).isNotNull();
assertThat(migrationCompleted.indexSetIds()).containsExactlyInAnyOrder("57f3d721a43c2d59cb750001", "57f3d721a43c2d59cb750003");
}
use of org.graylog2.migrations.Migration in project graylog2-server by Graylog2.
the class V20161130141500_DefaultStreamRecalcIndexRanges method upgrade.
@Override
public void upgrade() {
IndexSet indexSet;
try {
indexSet = indexSetRegistry.getDefault();
} catch (IllegalStateException e) {
// Try to find the default index set manually if the index set registry cannot find it.
// This is needed if you run this migration with a 2.2.0-beta.2 state before commit 460cac6af.
final IndexSetConfig indexSetConfig = indexSetService.findOne(DBQuery.is("default", true)).orElseThrow(() -> new IllegalStateException("No default index set configured! This is a bug!"));
indexSet = indexSetFactory.create(indexSetConfig);
}
final IndexSet defaultIndexSet = indexSet;
if (!cluster.isConnected()) {
LOG.info("Cluster not connected yet, delaying migration until it is reachable.");
while (true) {
try {
cluster.waitForConnectedAndDeflectorHealthy();
break;
} catch (InterruptedException | TimeoutException e) {
LOG.warn("Interrupted or timed out waiting for Elasticsearch cluster, checking again.");
}
}
}
final Set<String> indexRangesWithoutStreams = indexRangeService.findAll().stream().filter(indexRange -> defaultIndexSet.isManagedIndex(indexRange.indexName())).filter(indexRange -> indexRange.streamIds() == null).map(IndexRange::indexName).collect(Collectors.toSet());
if (indexRangesWithoutStreams.size() == 0) {
// all ranges have a stream list, even if it is empty, nothing more to do
return;
}
final String currentWriteTarget;
try {
currentWriteTarget = defaultIndexSet.getActiveWriteIndex();
} catch (TooManyAliasesException e) {
LOG.error("Multiple write targets found for write alias. Cannot continue to assign streams to older indices", e);
return;
}
for (String indexName : defaultIndexSet.getManagedIndices()) {
if (indexName.equals(currentWriteTarget)) {
// do not recalculate for current write target
continue;
}
if (!indexRangesWithoutStreams.contains(indexName)) {
// already computed streams for this index
continue;
}
LOG.info("Recalculating streams in index {}", indexName);
final CreateNewSingleIndexRangeJob createNewSingleIndexRangeJob = rebuildIndexRangeJobFactory.create(indexSetRegistry.getAll(), indexName);
createNewSingleIndexRangeJob.execute();
}
}
use of org.graylog2.migrations.Migration in project graylog2-server by Graylog2.
the class IndexRangesMigrationPeriodical method doRun.
@Override
public void doRun() {
final MongoIndexRangesMigrationComplete migrationComplete = clusterConfigService.get(MongoIndexRangesMigrationComplete.class);
if (migrationComplete != null && migrationComplete.complete) {
LOG.debug("Migration of index ranges (pre Graylog 1.2.2) already complete. Skipping migration process.");
return;
}
while (!cluster.isConnected() || !cluster.isHealthy()) {
Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS);
}
final Set<String> indexNames = ImmutableSet.copyOf(indexSetRegistry.getManagedIndices());
// Migrate old MongoDB index ranges
final SortedSet<IndexRange> mongoIndexRanges = legacyMongoIndexRangeService.findAll();
for (IndexRange indexRange : mongoIndexRanges) {
if (indexNames.contains(indexRange.indexName())) {
LOG.info("Migrating index range from MongoDB: {}", indexRange);
indexRangeService.save(indexRange);
} else {
LOG.info("Removing stale index range from MongoDB: {}", indexRange);
}
legacyMongoIndexRangeService.delete(indexRange.indexName());
}
// Check whether all index ranges have been migrated
final int numberOfIndices = indexNames.size();
final SortedSet<IndexRange> allIndexRanges = indexRangeService.findAll();
final int numberOfIndexRanges = allIndexRanges.size();
if (numberOfIndices > numberOfIndexRanges) {
LOG.info("There are more indices ({}) than there are index ranges ({}). Notifying administrator.", numberOfIndices, numberOfIndexRanges);
// remove all present index names so we can display the index sets that need manual fixing
final Set<String> extraIndices = Sets.newHashSet(indexNames);
allIndexRanges.forEach(indexRange -> extraIndices.remove(indexRange.indexName()));
final Set<String> affectedIndexSetNames = extraIndices.stream().map(indexSetRegistry::getForIndex).filter(Optional::isPresent).map(Optional::get).map(IndexSet::getConfig).map(IndexSetConfig::title).collect(Collectors.toSet());
final Notification notification = notificationService.buildNow().addSeverity(Notification.Severity.URGENT).addType(Notification.Type.INDEX_RANGES_RECALCULATION).addDetail("indices", numberOfIndices).addDetail("index_ranges", numberOfIndexRanges).addDetail("index_sets", affectedIndexSetNames.isEmpty() ? null : Joiner.on(", ").join(affectedIndexSetNames));
notificationService.publishIfFirst(notification);
}
clusterConfigService.write(new MongoIndexRangesMigrationComplete(true));
}
use of org.graylog2.migrations.Migration in project graylog2-server by Graylog2.
the class V2018070614390000_EnforceUniqueGrokPatternsTest method setUp.
@Before
public void setUp() {
collection = mongodb.mongoConnection().getMongoDatabase().getCollection("grok_patterns");
subscriber = new TestSubscriber();
clusterEventBus = new ClusterEventBus(MoreExecutors.newDirectExecutorService());
clusterEventBus.registerClusterEventSubscriber(subscriber);
migration = new V2018070614390000_EnforceUniqueGrokPatterns(collection, clusterEventBus);
}
use of org.graylog2.migrations.Migration in project graylog2-server by Graylog2.
the class V20170110150100_FixAlertConditionsMigrationTest method upgrade.
@Test
@MongoDBFixtures("V20170110150100_FixAlertConditionsMigration.json")
public void upgrade() throws Exception {
// First check all types of the existing documents
AlertConditionAssertions.assertThat(getAlertCondition("2fa6a415-ce0c-4a36-accc-dd9519eb06d9")).hasParameter("backlog", 2).hasParameter("grace", 1).hasParameter("threshold_type", "MORE").hasParameter("threshold", "5").hasParameter("time", "1");
AlertConditionAssertions.assertThat(getAlertCondition("393fd8b2-9b17-42d3-86b0-6e55d0f5343a")).hasParameter("backlog", 0).hasParameter("field", "bar").hasParameter("grace", "0").hasParameter("value", "baz");
AlertConditionAssertions.assertThat(getAlertCondition("0e75404f-c0ee-40b0-8872-b1aec441ba1c")).hasParameter("backlog", "0").hasParameter("field", "foo").hasParameter("grace", "0").hasParameter("threshold_type", "HIGHER").hasParameter("threshold", "0").hasParameter("time", "5").hasParameter("type", "MAX");
// Run the migration that should convert all affected fields to integers
migration.upgrade();
// Check all types again
AlertConditionAssertions.assertThat(getAlertCondition("2fa6a415-ce0c-4a36-accc-dd9519eb06d9")).hasParameter("backlog", 2).hasParameter("grace", 1).hasParameter("threshold_type", "MORE").hasParameter("threshold", 5).hasParameter("time", 1);
AlertConditionAssertions.assertThat(getAlertCondition("393fd8b2-9b17-42d3-86b0-6e55d0f5343a")).hasParameter("backlog", 0).hasParameter("field", "bar").hasParameter("grace", 0).hasParameter("value", "baz");
AlertConditionAssertions.assertThat(getAlertCondition("0e75404f-c0ee-40b0-8872-b1aec441ba1c")).hasParameter("backlog", 0).hasParameter("field", "foo").hasParameter("grace", 0).hasParameter("threshold_type", "HIGHER").hasParameter("threshold", 0).hasParameter("time", 5).hasParameter("type", "MAX");
final MigrationCompleted migrationCompleted = clusterConfigService.get(MigrationCompleted.class);
assertThat(migrationCompleted).isNotNull();
assertThat(migrationCompleted.streamIds()).containsOnly("58458e442f857c314491344e", "58458e442f857c314491345e");
assertThat(migrationCompleted.alertConditionIds()).containsOnly("2fa6a415-ce0c-4a36-accc-dd9519eb06d9", "393fd8b2-9b17-42d3-86b0-6e55d0f5343a", "0e75404f-c0ee-40b0-8872-b1aec441ba1c");
}
Aggregations