use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.
the class BasicRoleBasedAuthorizerTest method setUp.
@Before
public void setUp() {
TestDerbyConnector connector = derbyConnectorRule.getConnector();
MetadataStorageTablesConfig tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
connector.createConfigTable();
BasicAttributes userAttrs = new BasicAttributes(true);
userAttrs.put(new BasicAttribute("sAMAccountName", "druiduser"));
userAttrs.put(new BasicAttribute("memberOf", "CN=user,OU=Druid,OU=Application,OU=Groupings,DC=corp,DC=apache,DC=org"));
BasicAttributes adminAttrs = new BasicAttributes(true);
adminAttrs.put(new BasicAttribute("sAMAccountName", "druidadmin"));
adminAttrs.put(new BasicAttribute("memberOf", "CN=admin,OU=Platform,OU=Groupings,DC=corp,DC=apache,DC=org"));
userSearchResult = new SearchResult("CN=1234,OU=Employees,OU=People", null, userAttrs);
adminSearchResult = new SearchResult("CN=9876,OU=Employees,OU=People", null, adminAttrs);
updater = new CoordinatorBasicAuthorizerMetadataStorageUpdater(new AuthorizerMapper(ImmutableMap.of(DB_AUTHORIZER_NAME, new BasicRoleBasedAuthorizer(null, DB_AUTHORIZER_NAME, null, null, null, null, null, new MetadataStoreRoleProvider(null)), LDAP_AUTHORIZER_NAME, new BasicRoleBasedAuthorizer(null, LDAP_AUTHORIZER_NAME, null, null, null, null, null, new LDAPRoleProvider(null, groupFilters)))), connector, tablesConfig, new BasicAuthCommonCacheConfig(null, null, null, null), new ObjectMapper(new SmileFactory()), new NoopBasicAuthorizerCacheNotifier(), null);
updater.start();
authorizer = new BasicRoleBasedAuthorizer(null, DB_AUTHORIZER_NAME, null, null, null, null, null, new MetadataStoreRoleProvider(new MetadataStoragePollingBasicAuthorizerCacheManager(updater)));
ldapAuthorizer = new BasicRoleBasedAuthorizer(null, LDAP_AUTHORIZER_NAME, null, null, null, null, null, new LDAPRoleProvider(new MetadataStoragePollingBasicAuthorizerCacheManager(updater), groupFilters));
}
use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.
the class CoordinatorBasicAuthorizerResourceTest method setUp.
@Before
public void setUp() {
connector = derbyConnectorRule.getConnector();
tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
connector.createConfigTable();
AuthorizerMapper authorizerMapper = new AuthorizerMapper(ImmutableMap.of(AUTHORIZER_NAME, new BasicRoleBasedAuthorizer(null, AUTHORIZER_NAME, null, null, null, null, null, null), AUTHORIZER_NAME2, new BasicRoleBasedAuthorizer(null, AUTHORIZER_NAME2, null, null, null, null, null, null), AUTHORIZER_NAME3, new BasicRoleBasedAuthorizer(null, AUTHORIZER_NAME3, null, null, "adminGroupMapping", null, null, null)));
storageUpdater = new CoordinatorBasicAuthorizerMetadataStorageUpdater(authorizerMapper, connector, tablesConfig, new BasicAuthCommonCacheConfig(null, null, null, null), new ObjectMapper(new SmileFactory()), new NoopBasicAuthorizerCacheNotifier(), null);
resource = new BasicAuthorizerResource(new CoordinatorBasicAuthorizerResourceHandler(storageUpdater, authorizerMapper, new ObjectMapper(new SmileFactory())), authValidator);
storageUpdater.start();
}
use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.
the class CoordinatorBasicAuthorizerMetadataStorageUpdaterTest method setUp.
@Before
public void setUp() {
objectMapper = new ObjectMapper(new SmileFactory());
TestDerbyConnector connector = derbyConnectorRule.getConnector();
MetadataStorageTablesConfig tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
connector.createConfigTable();
updater = new CoordinatorBasicAuthorizerMetadataStorageUpdater(new AuthorizerMapper(ImmutableMap.of(AUTHORIZER_NAME, new BasicRoleBasedAuthorizer(null, AUTHORIZER_NAME, null, null, null, null, null, null))), connector, tablesConfig, new BasicAuthCommonCacheConfig(null, null, null, null), objectMapper, new NoopBasicAuthorizerCacheNotifier(), null);
updater.start();
}
use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.
the class AbstractParallelIndexSupervisorTaskTest method prepareObjectMapper.
public void prepareObjectMapper(ObjectMapper objectMapper, IndexIO indexIO) {
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
objectMapper.setInjectableValues(new InjectableValues.Std().addValue(ExprMacroTable.class, LookupEnabledTestExprMacroTable.INSTANCE).addValue(IndexIO.class, indexIO).addValue(ObjectMapper.class, objectMapper).addValue(ChatHandlerProvider.class, new NoopChatHandlerProvider()).addValue(AuthConfig.class, new AuthConfig()).addValue(AuthorizerMapper.class, null).addValue(RowIngestionMetersFactory.class, new DropwizardRowIngestionMetersFactory()).addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT).addValue(AuthorizerMapper.class, new AuthorizerMapper(ImmutableMap.of())).addValue(AppenderatorsManager.class, TestUtils.APPENDERATORS_MANAGER).addValue(LocalDataSegmentPuller.class, new LocalDataSegmentPuller()).addValue(CoordinatorClient.class, coordinatorClient).addValue(SegmentCacheManagerFactory.class, new SegmentCacheManagerFactory(objectMapper)).addValue(RetryPolicyFactory.class, new RetryPolicyFactory(new RetryPolicyConfig())).addValue(TaskConfig.class, taskConfig));
objectMapper.registerSubtypes(new NamedType(ParallelIndexSupervisorTask.class, ParallelIndexSupervisorTask.TYPE), new NamedType(CompactionTask.CompactionTuningConfig.class, CompactionTask.CompactionTuningConfig.TYPE), new NamedType(SinglePhaseSubTask.class, SinglePhaseSubTask.TYPE), new NamedType(PartialHashSegmentGenerateTask.class, PartialHashSegmentGenerateTask.TYPE), new NamedType(PartialRangeSegmentGenerateTask.class, PartialRangeSegmentGenerateTask.TYPE), new NamedType(PartialGenericSegmentMergeTask.class, PartialGenericSegmentMergeTask.TYPE), new NamedType(PartialDimensionDistributionTask.class, PartialDimensionDistributionTask.TYPE), new NamedType(PartialDimensionCardinalityTask.class, PartialDimensionCardinalityTask.TYPE));
}
use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.
the class AppenderatorDriverRealtimeIndexTask method run.
@Override
public TaskStatus run(final TaskToolbox toolbox) {
runThread = Thread.currentThread();
authorizerMapper = toolbox.getAuthorizerMapper();
rowIngestionMeters = toolbox.getRowIngestionMetersFactory().createRowIngestionMeters();
parseExceptionHandler = new ParseExceptionHandler(rowIngestionMeters, spec.getTuningConfig().isLogParseExceptions(), spec.getTuningConfig().getMaxParseExceptions(), spec.getTuningConfig().getMaxSavedParseExceptions());
setupTimeoutAlert();
DataSchema dataSchema = spec.getDataSchema();
RealtimeAppenderatorTuningConfig tuningConfig = spec.getTuningConfig().withBasePersistDirectory(toolbox.getPersistDir());
final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null), null);
final TaskRealtimeMetricsMonitor metricsMonitor = TaskRealtimeMetricsMonitorBuilder.build(this, fireDepartmentForMetrics, rowIngestionMeters);
this.metrics = fireDepartmentForMetrics.getMetrics();
final Supplier<Committer> committerSupplier = Committers.nilSupplier();
DiscoveryDruidNode discoveryDruidNode = createDiscoveryDruidNode(toolbox);
appenderator = newAppenderator(dataSchema, tuningConfig, metrics, toolbox);
TaskLockType lockType = getContextValue(Tasks.USE_SHARED_LOCK, false) ? TaskLockType.SHARED : TaskLockType.EXCLUSIVE;
StreamAppenderatorDriver driver = newDriver(dataSchema, appenderator, toolbox, metrics, lockType);
try {
log.debug("Found chat handler of class[%s]", toolbox.getChatHandlerProvider().getClass().getName());
toolbox.getChatHandlerProvider().register(getId(), this, false);
if (toolbox.getAppenderatorsManager().shouldTaskMakeNodeAnnouncements()) {
toolbox.getDataSegmentServerAnnouncer().announce();
toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);
}
driver.startJob(segmentId -> {
try {
if (lockGranularity == LockGranularity.SEGMENT) {
return toolbox.getTaskActionClient().submit(new SegmentLockAcquireAction(TaskLockType.EXCLUSIVE, segmentId.getInterval(), segmentId.getVersion(), segmentId.getShardSpec().getPartitionNum(), 1000L)).isOk();
} else {
final TaskLock lock = toolbox.getTaskActionClient().submit(new TimeChunkLockAcquireAction(TaskLockType.EXCLUSIVE, segmentId.getInterval(), 1000L));
if (lock == null) {
return false;
}
if (lock.isRevoked()) {
throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", segmentId.getInterval()));
}
return true;
}
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// Set up metrics emission
toolbox.addMonitor(metricsMonitor);
// Delay firehose connection to avoid claiming input resources while the plumber is starting up.
final FirehoseFactory firehoseFactory = spec.getIOConfig().getFirehoseFactory();
final boolean firehoseDrainableByClosing = isFirehoseDrainableByClosing(firehoseFactory);
int sequenceNumber = 0;
String sequenceName = makeSequenceName(getId(), sequenceNumber);
final TransactionalSegmentPublisher publisher = (mustBeNullOrEmptyOverwriteSegments, mustBeNullOrEmptyDropSegments, segments, commitMetadata) -> {
if (mustBeNullOrEmptyOverwriteSegments != null && !mustBeNullOrEmptyOverwriteSegments.isEmpty()) {
throw new ISE("Stream ingestion task unexpectedly attempted to overwrite segments: %s", SegmentUtils.commaSeparatedIdentifiers(mustBeNullOrEmptyOverwriteSegments));
}
if (mustBeNullOrEmptyDropSegments != null && !mustBeNullOrEmptyDropSegments.isEmpty()) {
throw new ISE("Stream ingestion task unexpectedly attempted to drop segments: %s", SegmentUtils.commaSeparatedIdentifiers(mustBeNullOrEmptyDropSegments));
}
final SegmentTransactionalInsertAction action = SegmentTransactionalInsertAction.appendAction(segments, null, null);
return toolbox.getTaskActionClient().submit(action);
};
// Skip connecting firehose if we've been stopped before we got started.
synchronized (this) {
if (!gracefullyStopped) {
firehose = firehoseFactory.connect(Preconditions.checkNotNull(spec.getDataSchema().getParser(), "inputRowParser"), toolbox.getIndexingTmpDir());
}
}
ingestionState = IngestionState.BUILD_SEGMENTS;
// Time to read data!
while (!gracefullyStopped && firehoseDrainableByClosing && firehose.hasMore()) {
try {
InputRow inputRow = firehose.nextRow();
if (inputRow == null) {
log.debug("Discarded null row, considering thrownAway.");
rowIngestionMeters.incrementThrownAway();
} else {
AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName, committerSupplier);
if (addResult.isOk()) {
final boolean isPushRequired = addResult.isPushRequired(tuningConfig.getPartitionsSpec().getMaxRowsPerSegment(), tuningConfig.getPartitionsSpec().getMaxTotalRowsOr(DynamicPartitionsSpec.DEFAULT_MAX_TOTAL_ROWS));
if (isPushRequired) {
publishSegments(driver, publisher, committerSupplier, sequenceName);
sequenceNumber++;
sequenceName = makeSequenceName(getId(), sequenceNumber);
}
} else {
// If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
throw new ISE("Could not allocate segment for row with timestamp[%s]", inputRow.getTimestamp());
}
}
} catch (ParseException e) {
handleParseException(e);
}
}
ingestionState = IngestionState.COMPLETED;
if (!gracefullyStopped) {
synchronized (this) {
if (gracefullyStopped) {
// Someone called stopGracefully after we checked the flag. That's okay, just stop now.
log.info("Gracefully stopping.");
} else {
finishingJob = true;
}
}
if (finishingJob) {
log.info("Finishing job...");
// Publish any remaining segments
publishSegments(driver, publisher, committerSupplier, sequenceName);
waitForSegmentPublishAndHandoff(tuningConfig.getPublishAndHandoffTimeout());
}
} else if (firehose != null) {
log.info("Task was gracefully stopped, will persist data before exiting");
persistAndWait(driver, committerSupplier.get());
}
} catch (Throwable e) {
log.makeAlert(e, "Exception aborted realtime processing[%s]", dataSchema.getDataSource()).emit();
errorMsg = Throwables.getStackTraceAsString(e);
toolbox.getTaskReportFileWriter().write(getId(), getTaskCompletionReports());
return TaskStatus.failure(getId(), errorMsg);
} finally {
toolbox.getChatHandlerProvider().unregister(getId());
CloseableUtils.closeAndSuppressExceptions(firehose, e -> log.warn("Failed to close Firehose"));
appenderator.close();
CloseableUtils.closeAndSuppressExceptions(driver, e -> log.warn("Failed to close AppenderatorDriver"));
toolbox.removeMonitor(metricsMonitor);
if (toolbox.getAppenderatorsManager().shouldTaskMakeNodeAnnouncements()) {
toolbox.getDataSegmentServerAnnouncer().unannounce();
toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
}
}
log.info("Job done!");
toolbox.getTaskReportFileWriter().write(getId(), getTaskCompletionReports());
return TaskStatus.success(getId());
}
Aggregations