Search in sources :

Example 6 with MetadataStorageTablesConfig

use of org.apache.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class TestDruidStorageHandler method testCommitMultiInsertOverwriteTable.

@Test
public void testCommitMultiInsertOverwriteTable() throws MetaException, IOException {
    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
    druidStorageHandler.preCreateTable(tableMock);
    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
    HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig();
    pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
    DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(pusherConfig, config, DruidStorageHandlerUtils.JSON_MAPPER);
    // This create and publish the segment to be overwritten
    List<DataSegment> existingSegments = Collections.singletonList(createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)));
    DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME, existingSegments, true, config, dataSegmentPusher);
    // Check that there is one datasource with the published segment
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    // Sequence is the following:
    // 1) INSERT with no segments -> Original segment still present in the datasource
    // 2) INSERT OVERWRITE with no segments -> Datasource is empty
    // 3) INSERT OVERWRITE with no segments -> Datasource is empty
    // 4) INSERT with no segments -> Datasource is empty
    // 5) INSERT with one segment -> Datasource has one segment
    // 6) INSERT OVERWRITE with one segment -> Datasource has one segment
    // 7) INSERT with one segment -> Datasource has two segments
    // 8) INSERT OVERWRITE with no segments -> Datasource is empty
    // We start:
    // #1
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    Assert.assertEquals(1, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #2
    druidStorageHandler.commitInsertTable(tableMock, true);
    Assert.assertEquals(0, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #3
    druidStorageHandler.commitInsertTable(tableMock, true);
    Assert.assertEquals(0, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #4
    druidStorageHandler.commitInsertTable(tableMock, true);
    Assert.assertEquals(0, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #5
    DataSegment dataSegment1 = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(180, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
    Path descriptorPath1 = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment1, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment1, descriptorPath1);
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    Assert.assertEquals(1, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #6
    DataSegment dataSegment2 = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(200, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
    Path descriptorPath2 = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment2, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment2, descriptorPath2);
    druidStorageHandler.commitInsertTable(tableMock, true);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    Assert.assertEquals(1, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #7
    DataSegment dataSegment3 = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 200, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
    Path descriptorPath3 = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment3, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment3, descriptorPath3);
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    Assert.assertEquals(2, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
    // #8
    druidStorageHandler.commitInsertTable(tableMock, true);
    Assert.assertEquals(0, getUsedSegmentsList(connector, metadataStorageTablesConfig).size());
}
Also used : Path(org.apache.hadoop.fs.Path) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) HdfsDataSegmentPusherConfig(org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 7 with MetadataStorageTablesConfig

use of org.apache.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class TestDruidStorageHandler method testInsertIntoAppendOneMorePartition.

@Test
public void testInsertIntoAppendOneMorePartition() throws MetaException, IOException {
    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    druidStorageHandler.preCreateTable(tableMock);
    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
    HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig();
    pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
    DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(pusherConfig, config, DruidStorageHandlerUtils.JSON_MAPPER);
    List<DataSegment> existingSegments = Collections.singletonList(createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)));
    DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME, existingSegments, true, config, dataSegmentPusher);
    DataSegment dataSegment = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0));
    Path descriptorPath = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment, descriptorPath);
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    final List<DataSegment> dataSegmentList = getUsedSegmentsList(connector, metadataStorageTablesConfig);
    Assert.assertEquals(2, dataSegmentList.size());
    DataSegment persistedSegment = dataSegmentList.get(1);
    Assert.assertEquals("v0", persistedSegment.getVersion());
    Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec);
    Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum());
    Path expectedFinalHadoopPath = new Path(dataSegmentPusher.getPathForHadoop(), dataSegmentPusher.makeIndexPathName(persistedSegment, DruidStorageHandlerUtils.INDEX_ZIP));
    Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", expectedFinalHadoopPath.toString()), persistedSegment.getLoadSpec());
    Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
}
Also used : Path(org.apache.hadoop.fs.Path) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) HdfsDataSegmentPusherConfig(org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) DataSegment(org.apache.druid.timeline.DataSegment) File(java.io.File) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 8 with MetadataStorageTablesConfig

use of org.apache.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class TestDruidStorageHandler method testCommitInsertIntoTable.

@Test
public void testCommitInsertIntoTable() throws MetaException, IOException {
    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    druidStorageHandler.preCreateTable(tableMock);
    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
    List<DataSegment> existingSegments = Collections.singletonList(createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1)));
    HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig();
    pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
    DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(pusherConfig, config, DruidStorageHandlerUtils.JSON_MAPPER);
    DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME, existingSegments, true, config, dataSegmentPusher);
    DataSegment dataSegment = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
    Path descriptorPath = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment, descriptorPath);
    druidStorageHandler.commitInsertTable(tableMock, false);
    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(DruidStorageHandlerUtils.getAllDataSourceNames(connector, metadataStorageTablesConfig)).toArray());
    final List<DataSegment> dataSegmentList = getUsedSegmentsList(connector, metadataStorageTablesConfig);
    Assert.assertEquals(2, dataSegmentList.size());
    DataSegment persistedSegment = dataSegmentList.get(1);
    // Insert into appends to old version
    Assert.assertEquals("v0", persistedSegment.getVersion());
    Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec);
    Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum());
    Path expectedFinalHadoopPath = new Path(dataSegmentPusher.getPathForHadoop(), dataSegmentPusher.makeIndexPathName(persistedSegment, DruidStorageHandlerUtils.INDEX_ZIP));
    Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", expectedFinalHadoopPath.toString()), persistedSegment.getLoadSpec());
    Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
}
Also used : Path(org.apache.hadoop.fs.Path) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) HdfsDataSegmentPusherConfig(org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig) DataSegment(org.apache.druid.timeline.DataSegment) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) File(java.io.File) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 9 with MetadataStorageTablesConfig

use of org.apache.druid.metadata.MetadataStorageTablesConfig in project hive by apache.

the class TestDruidStorageHandler method testCommitInsertIntoWithNonExtendableSegment.

@Test(expected = MetaException.class)
public void testCommitInsertIntoWithNonExtendableSegment() throws MetaException, IOException {
    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    druidStorageHandler.preCreateTable(tableMock);
    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
    List<DataSegment> existingSegments = Arrays.asList(createSegment(new Path(taskDirPath, "index_old_1.zip").toString(), new Interval(100, 150, DateTimeZone.UTC), "v0", NoneShardSpec.instance()), createSegment(new Path(taskDirPath, "index_old_2.zip").toString(), new Interval(200, 250, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_3.zip").toString(), new Interval(250, 300, DateTimeZone.UTC), "v0", new LinearShardSpec(0)));
    HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig();
    pusherConfig.setStorageDirectory(taskDirPath.toString());
    DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(pusherConfig, config, DruidStorageHandlerUtils.JSON_MAPPER);
    DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME, existingSegments, true, config, dataSegmentPusher);
    // Try appending to non extendable shard spec
    DataSegment conflictingSegment = createSegment(new Path(taskDirPath, DruidStorageHandlerUtils.INDEX_ZIP).toString(), new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
    Path descriptorPath = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(conflictingSegment, new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME));
    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, conflictingSegment, descriptorPath);
    druidStorageHandler.commitInsertTable(tableMock, false);
}
Also used : Path(org.apache.hadoop.fs.Path) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) HdfsDataSegmentPusherConfig(org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig) DataSegment(org.apache.druid.timeline.DataSegment) HdfsDataSegmentPusher(org.apache.druid.storage.hdfs.HdfsDataSegmentPusher) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 10 with MetadataStorageTablesConfig

use of org.apache.druid.metadata.MetadataStorageTablesConfig in project druid by druid-io.

the class BasicRoleBasedAuthorizerTest method setUp.

@Before
public void setUp() {
    TestDerbyConnector connector = derbyConnectorRule.getConnector();
    MetadataStorageTablesConfig tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
    connector.createConfigTable();
    BasicAttributes userAttrs = new BasicAttributes(true);
    userAttrs.put(new BasicAttribute("sAMAccountName", "druiduser"));
    userAttrs.put(new BasicAttribute("memberOf", "CN=user,OU=Druid,OU=Application,OU=Groupings,DC=corp,DC=apache,DC=org"));
    BasicAttributes adminAttrs = new BasicAttributes(true);
    adminAttrs.put(new BasicAttribute("sAMAccountName", "druidadmin"));
    adminAttrs.put(new BasicAttribute("memberOf", "CN=admin,OU=Platform,OU=Groupings,DC=corp,DC=apache,DC=org"));
    userSearchResult = new SearchResult("CN=1234,OU=Employees,OU=People", null, userAttrs);
    adminSearchResult = new SearchResult("CN=9876,OU=Employees,OU=People", null, adminAttrs);
    updater = new CoordinatorBasicAuthorizerMetadataStorageUpdater(new AuthorizerMapper(ImmutableMap.of(DB_AUTHORIZER_NAME, new BasicRoleBasedAuthorizer(null, DB_AUTHORIZER_NAME, null, null, null, null, null, new MetadataStoreRoleProvider(null)), LDAP_AUTHORIZER_NAME, new BasicRoleBasedAuthorizer(null, LDAP_AUTHORIZER_NAME, null, null, null, null, null, new LDAPRoleProvider(null, groupFilters)))), connector, tablesConfig, new BasicAuthCommonCacheConfig(null, null, null, null), new ObjectMapper(new SmileFactory()), new NoopBasicAuthorizerCacheNotifier(), null);
    updater.start();
    authorizer = new BasicRoleBasedAuthorizer(null, DB_AUTHORIZER_NAME, null, null, null, null, null, new MetadataStoreRoleProvider(new MetadataStoragePollingBasicAuthorizerCacheManager(updater)));
    ldapAuthorizer = new BasicRoleBasedAuthorizer(null, LDAP_AUTHORIZER_NAME, null, null, null, null, null, new LDAPRoleProvider(new MetadataStoragePollingBasicAuthorizerCacheManager(updater), groupFilters));
}
Also used : BasicAttribute(javax.naming.directory.BasicAttribute) BasicAttributes(javax.naming.directory.BasicAttributes) CoordinatorBasicAuthorizerMetadataStorageUpdater(org.apache.druid.security.basic.authorization.db.updater.CoordinatorBasicAuthorizerMetadataStorageUpdater) MetadataStoragePollingBasicAuthorizerCacheManager(org.apache.druid.security.basic.authorization.db.cache.MetadataStoragePollingBasicAuthorizerCacheManager) BasicAuthCommonCacheConfig(org.apache.druid.security.basic.BasicAuthCommonCacheConfig) SearchResult(javax.naming.directory.SearchResult) TestDerbyConnector(org.apache.druid.metadata.TestDerbyConnector) LDAPRoleProvider(org.apache.druid.security.basic.authorization.LDAPRoleProvider) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) BasicRoleBasedAuthorizer(org.apache.druid.security.basic.authorization.BasicRoleBasedAuthorizer) MetadataStoreRoleProvider(org.apache.druid.security.basic.authorization.MetadataStoreRoleProvider) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Before(org.junit.Before)

Aggregations

MetadataStorageTablesConfig (org.apache.druid.metadata.MetadataStorageTablesConfig)15 Test (org.junit.Test)8 DataSegmentPusher (org.apache.druid.segment.loading.DataSegmentPusher)7 HdfsDataSegmentPusher (org.apache.druid.storage.hdfs.HdfsDataSegmentPusher)7 HdfsDataSegmentPusherConfig (org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig)7 DataSegment (org.apache.druid.timeline.DataSegment)7 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)7 LocalFileSystem (org.apache.hadoop.fs.LocalFileSystem)7 Path (org.apache.hadoop.fs.Path)7 Interval (org.joda.time.Interval)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)5 TestDerbyConnector (org.apache.druid.metadata.TestDerbyConnector)5 Before (org.junit.Before)5 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)4 File (java.io.File)4 BasicAuthCommonCacheConfig (org.apache.druid.security.basic.BasicAuthCommonCacheConfig)4 Injector (com.google.inject.Injector)2 BasicHTTPAuthenticator (org.apache.druid.security.basic.authentication.BasicHTTPAuthenticator)2 CoordinatorBasicAuthenticatorMetadataStorageUpdater (org.apache.druid.security.basic.authentication.db.updater.CoordinatorBasicAuthenticatorMetadataStorageUpdater)2 BasicRoleBasedAuthorizer (org.apache.druid.security.basic.authorization.BasicRoleBasedAuthorizer)2