Search in sources :

Example 71 with NoopServiceEmitter

use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.

the class SegmentLocalCacheManagerConcurrencyTest method setUp.

@Before
public void setUp() throws Exception {
    EmittingLogger.registerEmitter(new NoopServiceEmitter());
    localSegmentCacheFolder = tmpFolder.newFolder("segment_cache_folder");
    final List<StorageLocationConfig> locations = new ArrayList<>();
    // Each segment has the size of 1000 bytes. This deep storage is capable of storing up to 2 segments.
    final StorageLocationConfig locationConfig = new StorageLocationConfig(localSegmentCacheFolder, 2000L, null);
    locations.add(locationConfig);
    manager = new SegmentLocalCacheManager(new SegmentLoaderConfig().withLocations(locations), jsonMapper);
    executorService = Execs.multiThreaded(4, "segment-loader-local-cache-manager-concurrency-test-%d");
}
Also used : ArrayList(java.util.ArrayList) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) Before(org.junit.Before)

Example 72 with NoopServiceEmitter

use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.

the class CuratorDruidCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
    dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
    coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
    metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
    configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference<>(CoordinatorDynamicConfig.builder().build())).anyTimes();
    EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
    EasyMock.replay(configManager);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(SEGPATH);
    curator.create().creatingParentsIfNeeded().forPath(SOURCE_LOAD_PATH);
    curator.create().creatingParentsIfNeeded().forPath(DESTINATION_LOAD_PATH);
    objectMapper = new DefaultObjectMapper();
    druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
    sourceLoadQueueChildrenCache = new PathChildrenCache(curator, SOURCE_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_src-%d"));
    destinationLoadQueueChildrenCache = new PathChildrenCache(curator, DESTINATION_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_dest-%d"));
    sourceLoadQueuePeon = new CuratorLoadQueuePeon(curator, SOURCE_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
    destinationLoadQueuePeon = new CuratorLoadQueuePeon(curator, DESTINATION_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
    druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
    loadManagementPeons = new ConcurrentHashMap<>();
    scheduledExecutorFactory = (corePoolSize, nameFormat) -> Executors.newSingleThreadScheduledExecutor();
    leaderAnnouncerLatch = new CountDownLatch(1);
    leaderUnannouncerLatch = new CountDownLatch(1);
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
Also used : SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) Duration(org.joda.time.Duration) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) CountDownLatch(java.util.concurrent.CountDownLatch) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DruidNode(org.apache.druid.server.DruidNode) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) Before(org.junit.Before)

Example 73 with NoopServiceEmitter

use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.

the class SQLAuditManagerTest method testCreateAuditEntryWithSkipNullConfigTrue.

@Test(timeout = 60_000L)
public void testCreateAuditEntryWithSkipNullConfigTrue() {
    ConfigSerde<Map<String, String>> mockConfigSerde = Mockito.mock(ConfigSerde.class);
    SQLAuditManager auditManagerWithSkipNull = new SQLAuditManager(connector, derbyConnectorRule.metadataTablesConfigSupplier(), new NoopServiceEmitter(), mapper, new SQLAuditManagerConfig() {

        @Override
        public boolean isSkipNullField() {
            return true;
        }
    });
    String entry1Key = "test1Key";
    String entry1Type = "test1Type";
    AuditInfo entry1AuditInfo = new AuditInfo("testAuthor", "testComment", "127.0.0.1");
    // Entry 1 payload has a null field for one of the property
    Map<String, String> entryPayload1WithNull = new HashMap<>();
    entryPayload1WithNull.put("version", "x");
    entryPayload1WithNull.put("something", null);
    auditManagerWithSkipNull.doAudit(entry1Key, entry1Type, entry1AuditInfo, entryPayload1WithNull, mockConfigSerde);
    Mockito.verify(mockConfigSerde).serializeToString(ArgumentMatchers.eq(entryPayload1WithNull), ArgumentMatchers.eq(true));
}
Also used : AuditInfo(org.apache.druid.audit.AuditInfo) HashMap(java.util.HashMap) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 74 with NoopServiceEmitter

use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.

the class SQLAuditManagerTest method testCreateAuditEntryWithPayloadOverSkipPayloadLimit.

@Test(timeout = 60_000L)
public void testCreateAuditEntryWithPayloadOverSkipPayloadLimit() throws IOException {
    int maxPayloadSize = 10;
    SQLAuditManager auditManagerWithMaxPayloadSizeBytes = new SQLAuditManager(connector, derbyConnectorRule.metadataTablesConfigSupplier(), new NoopServiceEmitter(), mapper, new SQLAuditManagerConfig() {

        @Override
        public long getMaxPayloadSizeBytes() {
            return maxPayloadSize;
        }
    });
    String entry1Key = "testKey";
    String entry1Type = "testType";
    AuditInfo entry1AuditInfo = new AuditInfo("testAuthor", "testComment", "127.0.0.1");
    String entry1Payload = "payload audit to store";
    auditManagerWithMaxPayloadSizeBytes.doAudit(entry1Key, entry1Type, entry1AuditInfo, entry1Payload, stringConfigSerde);
    byte[] payload = connector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getAuditTable(), "audit_key", "payload", "testKey");
    AuditEntry dbEntry = mapper.readValue(payload, AuditEntry.class);
    Assert.assertEquals(entry1Key, dbEntry.getKey());
    Assert.assertNotEquals(entry1Payload, dbEntry.getPayload());
    Assert.assertEquals(StringUtils.format(AuditManager.PAYLOAD_SKIP_MSG_FORMAT, maxPayloadSize), dbEntry.getPayload());
    Assert.assertEquals(entry1Type, dbEntry.getType());
    Assert.assertEquals(entry1AuditInfo, dbEntry.getAuditInfo());
}
Also used : AuditInfo(org.apache.druid.audit.AuditInfo) AuditEntry(org.apache.druid.audit.AuditEntry) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) Test(org.junit.Test)

Aggregations

NoopServiceEmitter (org.apache.druid.server.metrics.NoopServiceEmitter)74 Test (org.junit.Test)36 Before (org.junit.Before)21 DefaultGenericQueryMetricsFactory (org.apache.druid.query.DefaultGenericQueryMetricsFactory)14 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)13 IOException (java.io.IOException)13 TaskActionClientFactory (org.apache.druid.indexing.common.actions.TaskActionClientFactory)12 ServerConfig (org.apache.druid.server.initialization.ServerConfig)12 Period (org.joda.time.Period)11 DefaultTaskConfig (org.apache.druid.indexing.overlord.config.DefaultTaskConfig)10 TaskLockConfig (org.apache.druid.indexing.overlord.config.TaskLockConfig)10 TaskQueueConfig (org.apache.druid.indexing.overlord.config.TaskQueueConfig)10 QueryInterruptedException (org.apache.druid.query.QueryInterruptedException)10 ByteArrayInputStream (java.io.ByteArrayInputStream)9 File (java.io.File)9 DataSegment (org.apache.druid.timeline.DataSegment)8 List (java.util.List)7 CountDownLatch (java.util.concurrent.CountDownLatch)7 HttpServletResponse (javax.servlet.http.HttpServletResponse)7 Response (javax.ws.rs.core.Response)7