use of org.elasticsearch.common.settings.ClusterSettings in project crate by crate.
the class TableStatsServiceTest method testSettingsChanges.
@Test
public void testSettingsChanges() {
// Initially disabled
TableStatsService statsService = new TableStatsService(Settings.builder().put(TableStatsService.STATS_SERVICE_REFRESH_INTERVAL_SETTING.getKey(), 0).build(), THREAD_POOL, clusterService, Mockito.mock(SQLOperations.class, Answers.RETURNS_MOCKS));
Assert.assertThat(statsService.refreshInterval, Matchers.is(TimeValue.timeValueMinutes(0)));
Assert.assertThat(statsService.scheduledRefresh, Matchers.is(Matchers.nullValue()));
// Default setting
statsService = new TableStatsService(Settings.EMPTY, THREAD_POOL, clusterService, Mockito.mock(SQLOperations.class, Answers.RETURNS_MOCKS));
Assert.assertThat(statsService.refreshInterval, Matchers.is(TableStatsService.STATS_SERVICE_REFRESH_INTERVAL_SETTING.getDefault(Settings.EMPTY)));
Assert.assertThat(statsService.scheduledRefresh, Matchers.is(IsNull.notNullValue()));
ClusterSettings clusterSettings = clusterService.getClusterSettings();
// Update setting
clusterSettings.applySettings(Settings.builder().put(TableStatsService.STATS_SERVICE_REFRESH_INTERVAL_SETTING.getKey(), "10m").build());
Assert.assertThat(statsService.refreshInterval, Matchers.is(TimeValue.timeValueMinutes(10)));
Assert.assertThat(statsService.scheduledRefresh, Matchers.is(IsNull.notNullValue()));
// Disable
clusterSettings.applySettings(Settings.builder().put(TableStatsService.STATS_SERVICE_REFRESH_INTERVAL_SETTING.getKey(), 0).build());
Assert.assertThat(statsService.refreshInterval, Matchers.is(TimeValue.timeValueMillis(0)));
Assert.assertThat(statsService.scheduledRefresh, Matchers.is(Matchers.nullValue()));
// Reset setting
clusterSettings.applySettings(Settings.builder().build());
Assert.assertThat(statsService.refreshInterval, Matchers.is(TableStatsService.STATS_SERVICE_REFRESH_INTERVAL_SETTING.getDefault(Settings.EMPTY)));
Assert.assertThat(statsService.scheduledRefresh, Matchers.is(IsNull.notNullValue()));
}
use of org.elasticsearch.common.settings.ClusterSettings in project crate by crate.
the class ClusterModule method createAllocationDeciders.
// TODO: this is public so allocation benchmark can access the default deciders...can we do that in another way?
/**
* Return a new {@link AllocationDecider} instance with builtin deciders as well as those from plugins.
*/
public static Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings, List<ClusterPlugin> clusterPlugins) {
// collect deciders by class so that we can detect duplicates
Map<Class, AllocationDecider> deciders = new LinkedHashMap<>();
addAllocationDecider(deciders, new MaxRetryAllocationDecider());
addAllocationDecider(deciders, new ResizeAllocationDecider());
addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider());
addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider());
addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new NodeVersionAllocationDecider());
addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider());
addAllocationDecider(deciders, new RestoreInProgressAllocationDecider());
addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new DecommissionAllocationDecider(settings, clusterSettings));
clusterPlugins.stream().flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream()).forEach(d -> addAllocationDecider(deciders, d));
return deciders.values();
}
use of org.elasticsearch.common.settings.ClusterSettings in project crate by crate.
the class IndexWriterProjectorTest method testIndexWriter.
@Test
public void testIndexWriter() throws Throwable {
execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)");
ensureGreen();
InputCollectExpression sourceInput = new InputCollectExpression(1);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
RelationName bulkImportIdent = new RelationName(sqlExecutor.getCurrentSchema(), "bulk_import");
ClusterState state = clusterService().state();
Settings tableSettings = TableSettingsResolver.get(state.getMetadata(), bulkImportIdent, false);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class);
IndexWriterProjector writerProjector = new IndexWriterProjector(clusterService(), new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, threadPool.scheduler(), threadPool.executor(ThreadPool.Names.SEARCH), CoordinatorTxnCtx.systemTransactionContext(), new NodeContext(internalCluster().getInstance(Functions.class)), Settings.EMPTY, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(tableSettings), NumberOfReplicas.fromSettings(tableSettings, state.getNodes().getSize()), internalCluster().getInstance(TransportCreatePartitionsAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(bulkImportIdent), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING, 0, null), Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
BatchIterator rowsIterator = InMemoryBatchIterator.of(IntStream.range(0, 100).mapToObj(i -> new RowN(new Object[] { i, "{\"id\": " + i + ", \"name\": \"Arthur\"}" })).collect(Collectors.toList()), SENTINEL, true);
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(writerProjector.apply(rowsIterator), null);
Bucket objects = consumer.getBucket();
assertThat(objects, contains(isRow(100L)));
execute("refresh table bulk_import");
execute("select count(*) from bulk_import");
assertThat(response.rowCount(), is(1L));
assertThat(response.rows()[0][0], is(100L));
}
use of org.elasticsearch.common.settings.ClusterSettings in project crate by crate.
the class IndexWriterProjectorUnitTest method testNullPKValue.
@Test
public void testNullPKValue() throws Throwable {
InputCollectExpression sourceInput = new InputCollectExpression(0);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
TransportCreatePartitionsAction transportCreatePartitionsAction = mock(TransportCreatePartitionsAction.class);
IndexWriterProjector indexWriter = new IndexWriterProjector(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, scheduler, executor, CoordinatorTxnCtx.systemTransactionContext(), createNodeContext(), Settings.EMPTY, 5, 1, transportCreatePartitionsAction, (request, listener) -> {
}, IndexNameResolver.forTable(BULK_IMPORT_IDENT), RAW_SOURCE_REFERENCE, Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(1)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
RowN rowN = new RowN(new Object[] { new BytesRef("{\"y\": \"x\"}"), null });
BatchIterator<Row> batchIterator = InMemoryBatchIterator.of(Collections.singletonList(rowN), SENTINEL, true);
batchIterator = indexWriter.apply(batchIterator);
TestingRowConsumer testingBatchConsumer = new TestingRowConsumer();
testingBatchConsumer.accept(batchIterator, null);
List<Object[]> result = testingBatchConsumer.getResult();
// Zero affected rows as a NULL as a PK value will result in an exception.
// It must never bubble up as other rows might already have been written.
assertThat(result.get(0)[0], is(0L));
}
use of org.elasticsearch.common.settings.ClusterSettings in project crate by crate.
the class JobsLogsTest method testLogsArentWipedOnSizeChange.
@Test
public void testLogsArentWipedOnSizeChange() {
Settings settings = Settings.builder().put(JobsLogService.STATS_ENABLED_SETTING.getKey(), true).build();
JobsLogService stats = new JobsLogService(settings, clusterService::localNode, clusterSettings, nodeCtx, scheduler, breakerService);
LogSink<JobContextLog> jobsLogSink = (LogSink<JobContextLog>) stats.get().jobsLog();
LogSink<OperationContextLog> operationsLogSink = (LogSink<OperationContextLog>) stats.get().operationsLog();
Classification classification = new Classification(SELECT, Collections.singleton("Collect"));
jobsLogSink.add(new JobContextLog(new JobContext(UUID.randomUUID(), "select 1", 1L, User.CRATE_USER, classification), null));
clusterSettings.applySettings(Settings.builder().put(JobsLogService.STATS_ENABLED_SETTING.getKey(), true).put(JobsLogService.STATS_JOBS_LOG_SIZE_SETTING.getKey(), 200).build());
assertThat(StreamSupport.stream(stats.get().jobsLog().spliterator(), false).count(), is(1L));
operationsLogSink.add(new OperationContextLog(new OperationContext(1, UUID.randomUUID(), "foo", 2L, () -> -1), null));
operationsLogSink.add(new OperationContextLog(new OperationContext(1, UUID.randomUUID(), "foo", 3L, () -> 1), null));
clusterSettings.applySettings(Settings.builder().put(JobsLogService.STATS_ENABLED_SETTING.getKey(), true).put(JobsLogService.STATS_OPERATIONS_LOG_SIZE_SETTING.getKey(), 1).build());
assertThat(StreamSupport.stream(stats.get().operationsLog().spliterator(), false).count(), is(1L));
}
Aggregations