use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class SegmentLocalCacheManagerConcurrencyTest method setUp.
@Before
public void setUp() throws Exception {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
localSegmentCacheFolder = tmpFolder.newFolder("segment_cache_folder");
final List<StorageLocationConfig> locations = new ArrayList<>();
// Each segment has the size of 1000 bytes. This deep storage is capable of storing up to 2 segments.
final StorageLocationConfig locationConfig = new StorageLocationConfig(localSegmentCacheFolder, 2000L, null);
locations.add(locationConfig);
manager = new SegmentLocalCacheManager(new SegmentLoaderConfig().withLocations(locations), jsonMapper);
executorService = Execs.multiThreaded(4, "segment-loader-local-cache-manager-concurrency-test-%d");
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class CuratorDruidCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);
dataSourcesSnapshot = EasyMock.createNiceMock(DataSourcesSnapshot.class);
coordinatorRuntimeParams = EasyMock.createNiceMock(DruidCoordinatorRuntimeParams.class);
metadataRuleManager = EasyMock.createNiceMock(MetadataRuleManager.class);
configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference<>(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(SEGPATH);
curator.create().creatingParentsIfNeeded().forPath(SOURCE_LOAD_PATH);
curator.create().creatingParentsIfNeeded().forPath(DESTINATION_LOAD_PATH);
objectMapper = new DefaultObjectMapper();
druidCoordinatorConfig = new TestDruidCoordinatorConfig(new Duration(COORDINATOR_START_DELAY), new Duration(COORDINATOR_PERIOD), null, null, null, new Duration(COORDINATOR_PERIOD), null, null, null, null, null, null, null, null, null, null, 10, new Duration("PT0s"));
sourceLoadQueueChildrenCache = new PathChildrenCache(curator, SOURCE_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_src-%d"));
destinationLoadQueueChildrenCache = new PathChildrenCache(curator, DESTINATION_LOAD_PATH, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_dest-%d"));
sourceLoadQueuePeon = new CuratorLoadQueuePeon(curator, SOURCE_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
destinationLoadQueuePeon = new CuratorLoadQueuePeon(curator, DESTINATION_LOAD_PATH, objectMapper, peonExec, callbackExec, druidCoordinatorConfig);
druidNode = new DruidNode("hey", "what", false, 1234, null, true, false);
loadManagementPeons = new ConcurrentHashMap<>();
scheduledExecutorFactory = (corePoolSize, nameFormat) -> Executors.newSingleThreadScheduledExecutor();
leaderAnnouncerLatch = new CountDownLatch(1);
leaderUnannouncerLatch = new CountDownLatch(1);
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class SQLAuditManagerTest method testCreateAuditEntryWithSkipNullConfigTrue.
@Test(timeout = 60_000L)
public void testCreateAuditEntryWithSkipNullConfigTrue() {
ConfigSerde<Map<String, String>> mockConfigSerde = Mockito.mock(ConfigSerde.class);
SQLAuditManager auditManagerWithSkipNull = new SQLAuditManager(connector, derbyConnectorRule.metadataTablesConfigSupplier(), new NoopServiceEmitter(), mapper, new SQLAuditManagerConfig() {
@Override
public boolean isSkipNullField() {
return true;
}
});
String entry1Key = "test1Key";
String entry1Type = "test1Type";
AuditInfo entry1AuditInfo = new AuditInfo("testAuthor", "testComment", "127.0.0.1");
// Entry 1 payload has a null field for one of the property
Map<String, String> entryPayload1WithNull = new HashMap<>();
entryPayload1WithNull.put("version", "x");
entryPayload1WithNull.put("something", null);
auditManagerWithSkipNull.doAudit(entry1Key, entry1Type, entry1AuditInfo, entryPayload1WithNull, mockConfigSerde);
Mockito.verify(mockConfigSerde).serializeToString(ArgumentMatchers.eq(entryPayload1WithNull), ArgumentMatchers.eq(true));
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class SQLAuditManagerTest method testCreateAuditEntryWithPayloadOverSkipPayloadLimit.
@Test(timeout = 60_000L)
public void testCreateAuditEntryWithPayloadOverSkipPayloadLimit() throws IOException {
int maxPayloadSize = 10;
SQLAuditManager auditManagerWithMaxPayloadSizeBytes = new SQLAuditManager(connector, derbyConnectorRule.metadataTablesConfigSupplier(), new NoopServiceEmitter(), mapper, new SQLAuditManagerConfig() {
@Override
public long getMaxPayloadSizeBytes() {
return maxPayloadSize;
}
});
String entry1Key = "testKey";
String entry1Type = "testType";
AuditInfo entry1AuditInfo = new AuditInfo("testAuthor", "testComment", "127.0.0.1");
String entry1Payload = "payload audit to store";
auditManagerWithMaxPayloadSizeBytes.doAudit(entry1Key, entry1Type, entry1AuditInfo, entry1Payload, stringConfigSerde);
byte[] payload = connector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getAuditTable(), "audit_key", "payload", "testKey");
AuditEntry dbEntry = mapper.readValue(payload, AuditEntry.class);
Assert.assertEquals(entry1Key, dbEntry.getKey());
Assert.assertNotEquals(entry1Payload, dbEntry.getPayload());
Assert.assertEquals(StringUtils.format(AuditManager.PAYLOAD_SKIP_MSG_FORMAT, maxPayloadSize), dbEntry.getPayload());
Assert.assertEquals(entry1Type, dbEntry.getType());
Assert.assertEquals(entry1AuditInfo, dbEntry.getAuditInfo());
}
Aggregations