Search in sources :

Example 11 with AuthorizerMapper

use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.

the class SeekableStreamIndexTaskRunner method runInternal.

private TaskStatus runInternal(TaskToolbox toolbox) throws Exception {
    startTime = DateTimes.nowUtc();
    status = Status.STARTING;
    setToolbox(toolbox);
    authorizerMapper = toolbox.getAuthorizerMapper();
    rowIngestionMeters = toolbox.getRowIngestionMetersFactory().createRowIngestionMeters();
    parseExceptionHandler = new ParseExceptionHandler(rowIngestionMeters, tuningConfig.isLogParseExceptions(), tuningConfig.getMaxParseExceptions(), tuningConfig.getMaxSavedParseExceptions());
    // Now we can initialize StreamChunkReader with the given toolbox.
    final StreamChunkParser parser = new StreamChunkParser<RecordType>(this.parser, inputFormat, inputRowSchema, task.getDataSchema().getTransformSpec(), toolbox.getIndexingTmpDir(), row -> row != null && task.withinMinMaxRecordTime(row), rowIngestionMeters, parseExceptionHandler);
    initializeSequences();
    log.debug("Found chat handler of class[%s]", toolbox.getChatHandlerProvider().getClass().getName());
    toolbox.getChatHandlerProvider().register(task.getId(), this, false);
    runThread = Thread.currentThread();
    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(task.getDataSchema(), new RealtimeIOConfig(null, null), null);
    this.fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.addMonitor(TaskRealtimeMetricsMonitorBuilder.build(task, fireDepartmentForMetrics, rowIngestionMeters));
    final String lookupTier = task.getContextValue(RealtimeIndexTask.CTX_KEY_LOOKUP_TIER);
    final LookupNodeService lookupNodeService = lookupTier == null ? toolbox.getLookupNodeService() : new LookupNodeService(lookupTier);
    final DiscoveryDruidNode discoveryDruidNode = new DiscoveryDruidNode(toolbox.getDruidNode(), NodeRole.PEON, ImmutableMap.of(toolbox.getDataNodeService().getName(), toolbox.getDataNodeService(), lookupNodeService.getName(), lookupNodeService));
    Throwable caughtExceptionOuter = null;
    // milliseconds waited for created segments to be handed off
    long handoffWaitMs = 0L;
    try (final RecordSupplier<PartitionIdType, SequenceOffsetType, RecordType> recordSupplier = task.newTaskRecordSupplier()) {
        if (toolbox.getAppenderatorsManager().shouldTaskMakeNodeAnnouncements()) {
            toolbox.getDataSegmentServerAnnouncer().announce();
            toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);
        }
        appenderator = task.newAppenderator(toolbox, fireDepartmentMetrics, rowIngestionMeters, parseExceptionHandler);
        driver = task.newDriver(appenderator, toolbox, fireDepartmentMetrics);
        // Start up, set up initial sequences.
        final Object restoredMetadata = driver.startJob(segmentId -> {
            try {
                if (lockGranularityToUse == LockGranularity.SEGMENT) {
                    return toolbox.getTaskActionClient().submit(new SegmentLockAcquireAction(TaskLockType.EXCLUSIVE, segmentId.getInterval(), segmentId.getVersion(), segmentId.getShardSpec().getPartitionNum(), 1000L)).isOk();
                } else {
                    final TaskLock lock = toolbox.getTaskActionClient().submit(new TimeChunkLockAcquireAction(TaskLockType.EXCLUSIVE, segmentId.getInterval(), 1000L));
                    if (lock == null) {
                        return false;
                    }
                    if (lock.isRevoked()) {
                        throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", segmentId.getInterval()));
                    }
                    return true;
                }
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
        if (restoredMetadata == null) {
            // no persist has happened so far
            // so either this is a brand new task or replacement of a failed task
            Preconditions.checkState(sequences.get(0).startOffsets.entrySet().stream().allMatch(partitionOffsetEntry -> createSequenceNumber(partitionOffsetEntry.getValue()).compareTo(createSequenceNumber(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().get(partitionOffsetEntry.getKey()))) >= 0), "Sequence sequences are not compatible with start sequences of task");
            currOffsets.putAll(sequences.get(0).startOffsets);
        } else {
            @SuppressWarnings("unchecked") final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final SeekableStreamEndSequenceNumbers<PartitionIdType, SequenceOffsetType> restoredNextPartitions = deserializePartitionsFromMetadata(toolbox.getJsonMapper(), restoredMetadataMap.get(METADATA_NEXT_PARTITIONS));
            currOffsets.putAll(restoredNextPartitions.getPartitionSequenceNumberMap());
            // Sanity checks.
            if (!restoredNextPartitions.getStream().equals(ioConfig.getStartSequenceNumbers().getStream())) {
                throw new ISE("Restored stream[%s] but expected stream[%s]", restoredNextPartitions.getStream(), ioConfig.getStartSequenceNumbers().getStream());
            }
            if (!currOffsets.keySet().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet())) {
                throw new ISE("Restored partitions[%s] but expected partitions[%s]", currOffsets.keySet(), ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet());
            }
            // which is super rare
            if (sequences.size() == 0 || getLastSequenceMetadata().isCheckpointed()) {
                this.endOffsets.putAll(sequences.size() == 0 ? currOffsets : getLastSequenceMetadata().getEndOffsets());
            }
        }
        log.info("Initialized sequences: %s", sequences.stream().map(SequenceMetadata::toString).collect(Collectors.joining(", ")));
        // Filter out partitions with END_OF_SHARD markers since these partitions have already been fully read. This
        // should have been done by the supervisor already so this is defensive.
        int numPreFilterPartitions = currOffsets.size();
        if (currOffsets.entrySet().removeIf(x -> isEndOfShard(x.getValue()))) {
            log.info("Removed [%d] partitions from assignment which have already been closed.", numPreFilterPartitions - currOffsets.size());
        }
        // When end offsets are exclusive, we never skip the start record.
        if (!isEndOffsetExclusive()) {
            for (Map.Entry<PartitionIdType, SequenceOffsetType> entry : currOffsets.entrySet()) {
                final boolean isAtStart = entry.getValue().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().get(entry.getKey()));
                if (!isAtStart || ioConfig.getStartSequenceNumbers().getExclusivePartitions().contains(entry.getKey())) {
                    lastReadOffsets.put(entry.getKey(), entry.getValue());
                }
            }
        }
        // Set up committer.
        final Supplier<Committer> committerSupplier = () -> {
            final Map<PartitionIdType, SequenceOffsetType> snapshot = ImmutableMap.copyOf(currOffsets);
            lastPersistedOffsets.clear();
            lastPersistedOffsets.putAll(snapshot);
            return new Committer() {

                @Override
                public Object getMetadata() {
                    return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new SeekableStreamEndSequenceNumbers<>(stream, snapshot));
                }

                @Override
                public void run() {
                // Do nothing.
                }
            };
        };
        // restart publishing of sequences (if any)
        maybePersistAndPublishSequences(committerSupplier);
        Set<StreamPartition<PartitionIdType>> assignment = assignPartitions(recordSupplier);
        possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment);
        seekToStartingSequence(recordSupplier, assignment);
        ingestionState = IngestionState.BUILD_SEGMENTS;
        // Main loop.
        // Could eventually support leader/follower mode (for keeping replicas more in sync)
        boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        Throwable caughtExceptionInner = null;
        try {
            while (stillReading) {
                if (possiblyPause()) {
                    // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
                    // partitions upon resuming. Don't call "seekToStartingSequence" after "assignPartitions", because there's
                    // no need to re-seek here. All we're going to be doing is dropping partitions.
                    assignment = assignPartitions(recordSupplier);
                    possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment);
                    if (assignment.isEmpty()) {
                        log.debug("All partitions have been fully read.");
                        publishOnStop.set(true);
                        stopRequested.set(true);
                    }
                }
                // if stop is requested or task's end sequence is set by call to setEndOffsets method with finish set to true
                if (stopRequested.get() || sequences.size() == 0 || getLastSequenceMetadata().isCheckpointed()) {
                    status = Status.PUBLISHING;
                }
                if (stopRequested.get()) {
                    break;
                }
                if (backgroundThreadException != null) {
                    throw new RuntimeException(backgroundThreadException);
                }
                checkPublishAndHandoffFailure();
                maybePersistAndPublishSequences(committerSupplier);
                // calling getRecord() ensures that exceptions specific to kafka/kinesis like OffsetOutOfRangeException
                // are handled in the subclasses.
                List<OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType, RecordType>> records = getRecords(recordSupplier, toolbox);
                // note: getRecords() also updates assignment
                stillReading = !assignment.isEmpty();
                SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceToCheckpoint = null;
                for (OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType, RecordType> record : records) {
                    final boolean shouldProcess = verifyRecordInRange(record.getPartitionId(), record.getSequenceNumber());
                    log.trace("Got stream[%s] partition[%s] sequenceNumber[%s], shouldProcess[%s].", record.getStream(), record.getPartitionId(), record.getSequenceNumber(), shouldProcess);
                    if (shouldProcess) {
                        final List<InputRow> rows = parser.parse(record.getData(), isEndOfShard(record.getSequenceNumber()));
                        boolean isPersistRequired = false;
                        final SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceToUse = sequences.stream().filter(sequenceMetadata -> sequenceMetadata.canHandle(this, record)).findFirst().orElse(null);
                        if (sequenceToUse == null) {
                            throw new ISE("Cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s", record.getPartitionId(), record.getSequenceNumber(), sequences);
                        }
                        for (InputRow row : rows) {
                            final AppenderatorDriverAddResult addResult = driver.add(row, sequenceToUse.getSequenceName(), committerSupplier, true, // of rows are indexed
                            false);
                            if (addResult.isOk()) {
                                // If the number of rows in the segment exceeds the threshold after adding a row,
                                // move the segment out from the active segments of BaseAppenderatorDriver to make a new segment.
                                final boolean isPushRequired = addResult.isPushRequired(tuningConfig.getPartitionsSpec().getMaxRowsPerSegment(), tuningConfig.getPartitionsSpec().getMaxTotalRowsOr(DynamicPartitionsSpec.DEFAULT_MAX_TOTAL_ROWS));
                                if (isPushRequired && !sequenceToUse.isCheckpointed()) {
                                    sequenceToCheckpoint = sequenceToUse;
                                }
                                isPersistRequired |= addResult.isPersistRequired();
                            } else {
                                // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                                throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
                            }
                        }
                        if (isPersistRequired) {
                            Futures.addCallback(driver.persistAsync(committerSupplier.get()), new FutureCallback<Object>() {

                                @Override
                                public void onSuccess(@Nullable Object result) {
                                    log.debug("Persist completed with metadata: %s", result);
                                }

                                @Override
                                public void onFailure(Throwable t) {
                                    log.error("Persist failed, dying");
                                    backgroundThreadException = t;
                                }
                            });
                        }
                        // in kafka, we can easily get the next offset by adding 1, but for kinesis, there's no way
                        // to get the next sequence number without having to make an expensive api call. So the behavior
                        // here for kafka is to +1 while for kinesis we simply save the current sequence number
                        lastReadOffsets.put(record.getPartitionId(), record.getSequenceNumber());
                        currOffsets.put(record.getPartitionId(), getNextStartOffset(record.getSequenceNumber()));
                    }
                    // Use record.getSequenceNumber() in the moreToRead check, since currOffsets might not have been
                    // updated if we were skipping records for being beyond the end.
                    final boolean moreToReadAfterThisRecord = isMoreToReadAfterReadingRecord(record.getSequenceNumber(), endOffsets.get(record.getPartitionId()));
                    if (!moreToReadAfterThisRecord && assignment.remove(record.getStreamPartition())) {
                        log.info("Finished reading stream[%s], partition[%s].", record.getStream(), record.getPartitionId());
                        recordSupplier.assign(assignment);
                        stillReading = !assignment.isEmpty();
                    }
                }
                if (!stillReading) {
                    // We let the fireDepartmentMetrics know that all messages have been read. This way, some metrics such as
                    // high message gap need not be reported
                    fireDepartmentMetrics.markProcessingDone();
                }
                if (System.currentTimeMillis() > nextCheckpointTime) {
                    sequenceToCheckpoint = getLastSequenceMetadata();
                }
                if (sequenceToCheckpoint != null && stillReading) {
                    Preconditions.checkArgument(getLastSequenceMetadata().getSequenceName().equals(sequenceToCheckpoint.getSequenceName()), "Cannot checkpoint a sequence [%s] which is not the latest one, sequences %s", sequenceToCheckpoint, sequences);
                    requestPause();
                    final CheckPointDataSourceMetadataAction checkpointAction = new CheckPointDataSourceMetadataAction(task.getDataSource(), ioConfig.getTaskGroupId(), null, createDataSourceMetadata(new SeekableStreamStartSequenceNumbers<>(stream, sequenceToCheckpoint.getStartOffsets(), sequenceToCheckpoint.getExclusiveStartPartitions())));
                    if (!toolbox.getTaskActionClient().submit(checkpointAction)) {
                        throw new ISE("Checkpoint request with sequences [%s] failed, dying", currOffsets);
                    }
                }
            }
            ingestionState = IngestionState.COMPLETED;
        } catch (Exception e) {
            // (1) catch all exceptions while reading from kafka
            caughtExceptionInner = e;
            log.error(e, "Encountered exception in run() before persisting.");
            throw e;
        } finally {
            try {
                // persist pending data
                driver.persist(committerSupplier.get());
            } catch (Exception e) {
                if (caughtExceptionInner != null) {
                    caughtExceptionInner.addSuppressed(e);
                } else {
                    throw e;
                }
            }
        }
        synchronized (statusLock) {
            if (stopRequested.get() && !publishOnStop.get()) {
                throw new InterruptedException("Stopping without publishing");
            }
            status = Status.PUBLISHING;
        }
        // We need to copy sequences here, because the success callback in publishAndRegisterHandoff removes items from
        // the sequence list. If a publish finishes before we finish iterating through the sequence list, we can
        // end up skipping some sequences.
        List<SequenceMetadata<PartitionIdType, SequenceOffsetType>> sequencesSnapshot = new ArrayList<>(sequences);
        for (int i = 0; i < sequencesSnapshot.size(); i++) {
            final SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata = sequencesSnapshot.get(i);
            if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) {
                final boolean isLast = i == (sequencesSnapshot.size() - 1);
                if (isLast) {
                    // Shorten endOffsets of the last sequence to match currOffsets.
                    sequenceMetadata.setEndOffsets(currOffsets);
                }
                // Update assignments of the sequence, which should clear them. (This will be checked later, when the
                // Committer is built.)
                sequenceMetadata.updateAssignments(currOffsets, this::isMoreToReadAfterReadingRecord);
                publishingSequences.add(sequenceMetadata.getSequenceName());
                // persist already done in finally, so directly add to publishQueue
                publishAndRegisterHandoff(sequenceMetadata);
            }
        }
        if (backgroundThreadException != null) {
            throw new RuntimeException(backgroundThreadException);
        }
        // Wait for publish futures to complete.
        Futures.allAsList(publishWaitList).get();
        // Wait for handoff futures to complete.
        // Note that every publishing task (created by calling AppenderatorDriver.publish()) has a corresponding
        // handoffFuture. handoffFuture can throw an exception if 1) the corresponding publishFuture failed or 2) it
        // failed to persist sequences. It might also return null if handoff failed, but was recoverable.
        // See publishAndRegisterHandoff() for details.
        List<SegmentsAndCommitMetadata> handedOffList = Collections.emptyList();
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOffList = Futures.allAsList(handOffWaitList).get();
        } else {
            final long start = System.nanoTime();
            try {
                handedOffList = Futures.allAsList(handOffWaitList).get(tuningConfig.getHandoffConditionTimeout(), TimeUnit.MILLISECONDS);
            } catch (TimeoutException e) {
                // Handoff timeout is not an indexing failure, but coordination failure. We simply ignore timeout exception
                // here.
                log.makeAlert("Timeout waiting for handoff").addData("taskId", task.getId()).addData("handoffConditionTimeout", tuningConfig.getHandoffConditionTimeout()).emit();
            } finally {
                handoffWaitMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
            }
        }
        for (SegmentsAndCommitMetadata handedOff : handedOffList) {
            log.info("Handoff complete for segments: %s", String.join(", ", Lists.transform(handedOff.getSegments(), DataSegment::toString)));
        }
        appenderator.close();
    } catch (InterruptedException | RejectedExecutionException e) {
        // (2) catch InterruptedException and RejectedExecutionException thrown for the whole ingestion steps including
        // the final publishing.
        caughtExceptionOuter = e;
        try {
            Futures.allAsList(publishWaitList).cancel(true);
            Futures.allAsList(handOffWaitList).cancel(true);
            if (appenderator != null) {
                appenderator.closeNow();
            }
        } catch (Exception e2) {
            e.addSuppressed(e2);
        }
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }
        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested.get()) {
            Thread.currentThread().interrupt();
            throw e;
        }
    } catch (Exception e) {
        // (3) catch all other exceptions thrown for the whole ingestion steps including the final publishing.
        caughtExceptionOuter = e;
        try {
            Futures.allAsList(publishWaitList).cancel(true);
            Futures.allAsList(handOffWaitList).cancel(true);
            if (appenderator != null) {
                appenderator.closeNow();
            }
        } catch (Exception e2) {
            e.addSuppressed(e2);
        }
        throw e;
    } finally {
        try {
            if (driver != null) {
                driver.close();
            }
            toolbox.getChatHandlerProvider().unregister(task.getId());
            if (toolbox.getAppenderatorsManager().shouldTaskMakeNodeAnnouncements()) {
                toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
                toolbox.getDataSegmentServerAnnouncer().unannounce();
            }
        } catch (Throwable e) {
            if (caughtExceptionOuter != null) {
                caughtExceptionOuter.addSuppressed(e);
            } else {
                throw e;
            }
        }
    }
    toolbox.getTaskReportFileWriter().write(task.getId(), getTaskCompletionReports(null, handoffWaitMs));
    return TaskStatus.success(task.getId());
}
Also used : TaskReport(org.apache.druid.indexing.common.TaskReport) TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) LookupNodeService(org.apache.druid.discovery.LookupNodeService) Produces(javax.ws.rs.Produces) FireDepartmentMetrics(org.apache.druid.segment.realtime.FireDepartmentMetrics) IngestionState(org.apache.druid.indexer.IngestionState) MediaType(javax.ws.rs.core.MediaType) Future(java.util.concurrent.Future) Map(java.util.Map) InputFormat(org.apache.druid.data.input.InputFormat) TimeChunkLockAcquireAction(org.apache.druid.indexing.common.actions.TimeChunkLockAcquireAction) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) TaskRealtimeMetricsMonitorBuilder(org.apache.druid.indexing.common.TaskRealtimeMetricsMonitorBuilder) InputRow(org.apache.druid.data.input.InputRow) IndexTaskUtils(org.apache.druid.indexing.common.task.IndexTaskUtils) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) ParseExceptionHandler(org.apache.druid.segment.incremental.ParseExceptionHandler) GET(javax.ws.rs.GET) SegmentLockAcquireAction(org.apache.druid.indexing.common.actions.SegmentLockAcquireAction) InputRowSchema(org.apache.druid.data.input.InputRowSchema) TaskStatus(org.apache.druid.indexer.TaskStatus) ArrayList(java.util.ArrayList) HttpServletRequest(javax.servlet.http.HttpServletRequest) Lists(com.google.common.collect.Lists) CheckPointDataSourceMetadataAction(org.apache.druid.indexing.common.actions.CheckPointDataSourceMetadataAction) Nullable(javax.annotation.Nullable) Throwables(com.google.common.base.Throwables) IOException(java.io.IOException) FutureCallback(com.google.common.util.concurrent.FutureCallback) FireDepartment(org.apache.druid.segment.realtime.FireDepartment) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) Futures(com.google.common.util.concurrent.Futures) Lock(java.util.concurrent.locks.Lock) TreeMap(java.util.TreeMap) Committer(org.apache.druid.data.input.Committer) Preconditions(com.google.common.base.Preconditions) StreamAppenderatorDriver(org.apache.druid.segment.realtime.appenderator.StreamAppenderatorDriver) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) OrderedPartitionableRecord(org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) Path(javax.ws.rs.Path) TimeoutException(java.util.concurrent.TimeoutException) ResetDataSourceMetadataAction(org.apache.druid.indexing.common.actions.ResetDataSourceMetadataAction) SettableFuture(com.google.common.util.concurrent.SettableFuture) SeekableStreamSupervisor(org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor) ByteBuffer(java.nio.ByteBuffer) MonotonicNonNull(org.checkerframework.checker.nullness.qual.MonotonicNonNull) ChatHandler(org.apache.druid.segment.realtime.firehose.ChatHandler) QueryParam(javax.ws.rs.QueryParam) Consumes(javax.ws.rs.Consumes) TaskLock(org.apache.druid.indexing.common.TaskLock) DefaultValue(javax.ws.rs.DefaultValue) DynamicPartitionsSpec(org.apache.druid.indexer.partitions.DynamicPartitionsSpec) TypeReference(com.fasterxml.jackson.core.type.TypeReference) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) ImmutableSet(com.google.common.collect.ImmutableSet) Context(javax.ws.rs.core.Context) ImmutableMap(com.google.common.collect.ImmutableMap) InputRowSchemas(org.apache.druid.indexing.input.InputRowSchemas) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) InputRowParser(org.apache.druid.data.input.impl.InputRowParser) RealtimeIOConfig(org.apache.druid.segment.indexing.RealtimeIOConfig) Action(org.apache.druid.server.security.Action) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) LockGranularity(org.apache.druid.indexing.common.LockGranularity) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) List(java.util.List) Response(javax.ws.rs.core.Response) TaskLockType(org.apache.druid.indexing.common.TaskLockType) DataSegment(org.apache.druid.timeline.DataSegment) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) NodeRole(org.apache.druid.discovery.NodeRole) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) Supplier(com.google.common.base.Supplier) AppenderatorDriverAddResult(org.apache.druid.segment.realtime.appenderator.AppenderatorDriverAddResult) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CollectionUtils(org.apache.druid.utils.CollectionUtils) HashMap(java.util.HashMap) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) ConcurrentMap(java.util.concurrent.ConcurrentMap) RealtimeIndexTask(org.apache.druid.indexing.common.task.RealtimeIndexTask) HashSet(java.util.HashSet) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) SegmentsAndCommitMetadata(org.apache.druid.segment.realtime.appenderator.SegmentsAndCommitMetadata) Appenderator(org.apache.druid.segment.realtime.appenderator.Appenderator) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) Access(org.apache.druid.server.security.Access) POST(javax.ws.rs.POST) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) TimeUnit(java.util.concurrent.TimeUnit) Condition(java.util.concurrent.locks.Condition) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) RealtimeIOConfig(org.apache.druid.segment.indexing.RealtimeIOConfig) OrderedPartitionableRecord(org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord) SegmentsAndCommitMetadata(org.apache.druid.segment.realtime.appenderator.SegmentsAndCommitMetadata) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) CheckPointDataSourceMetadataAction(org.apache.druid.indexing.common.actions.CheckPointDataSourceMetadataAction) TaskLock(org.apache.druid.indexing.common.TaskLock) TimeoutException(java.util.concurrent.TimeoutException) ParseExceptionHandler(org.apache.druid.segment.incremental.ParseExceptionHandler) InputRow(org.apache.druid.data.input.InputRow) TimeChunkLockAcquireAction(org.apache.druid.indexing.common.actions.TimeChunkLockAcquireAction) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) SegmentLockAcquireAction(org.apache.druid.indexing.common.actions.SegmentLockAcquireAction) DataSegment(org.apache.druid.timeline.DataSegment) FireDepartment(org.apache.druid.segment.realtime.FireDepartment) ISE(org.apache.druid.java.util.common.ISE) LookupNodeService(org.apache.druid.discovery.LookupNodeService) IOException(java.io.IOException) AppenderatorDriverAddResult(org.apache.druid.segment.realtime.appenderator.AppenderatorDriverAddResult) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) Committer(org.apache.druid.data.input.Committer)

Example 12 with AuthorizerMapper

use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.

the class OverlordResource method getTasks.

@GET
@Path("/tasks")
@Produces(MediaType.APPLICATION_JSON)
public Response getTasks(@QueryParam("state") final String state, @QueryParam("datasource") final String dataSource, @QueryParam("createdTimeInterval") final String createdTimeInterval, @QueryParam("max") final Integer maxCompletedTasks, @QueryParam("type") final String type, @Context final HttpServletRequest req) {
    // check for valid state
    if (state != null) {
        if (!API_TASK_STATES.contains(StringUtils.toLowerCase(state))) {
            return Response.status(Status.BAD_REQUEST).entity(StringUtils.format("Invalid state : %s, valid values are: %s", state, API_TASK_STATES)).build();
        }
    }
    // fail fast if user not authorized to access datasource
    if (dataSource != null) {
        final ResourceAction resourceAction = new ResourceAction(new Resource(dataSource, ResourceType.DATASOURCE), Action.READ);
        final Access authResult = AuthorizationUtils.authorizeResourceAction(req, resourceAction, authorizerMapper);
        if (!authResult.isAllowed()) {
            throw new WebApplicationException(Response.status(Response.Status.FORBIDDEN).entity(StringUtils.format("Access-Check-Result: %s", authResult.toString())).build());
        }
    }
    List<TaskStatusPlus> finalTaskList = new ArrayList<>();
    Function<AnyTask, TaskStatusPlus> activeTaskTransformFunc = workItem -> new TaskStatusPlus(workItem.getTaskId(), workItem.getTaskGroupId(), workItem.getTaskType(), workItem.getCreatedTime(), workItem.getQueueInsertionTime(), workItem.getTaskState(), workItem.getRunnerTaskState(), null, workItem.getLocation(), workItem.getDataSource(), null);
    Function<TaskInfo<Task, TaskStatus>, TaskStatusPlus> completeTaskTransformFunc = taskInfo -> new TaskStatusPlus(taskInfo.getId(), taskInfo.getTask() == null ? null : taskInfo.getTask().getGroupId(), taskInfo.getTask() == null ? null : taskInfo.getTask().getType(), taskInfo.getCreatedTime(), // TaskStorage API doesn't yet allow it.
    DateTimes.EPOCH, taskInfo.getStatus().getStatusCode(), RunnerTaskState.NONE, taskInfo.getStatus().getDuration(), taskInfo.getStatus().getLocation() == null ? TaskLocation.unknown() : taskInfo.getStatus().getLocation(), taskInfo.getDataSource(), taskInfo.getStatus().getErrorMsg());
    // checking for complete tasks first to avoid querying active tasks if user only wants complete tasks
    if (state == null || "complete".equals(StringUtils.toLowerCase(state))) {
        Duration createdTimeDuration = null;
        if (createdTimeInterval != null) {
            final Interval theInterval = Intervals.of(StringUtils.replace(createdTimeInterval, "_", "/"));
            createdTimeDuration = theInterval.toDuration();
        }
        final List<TaskInfo<Task, TaskStatus>> taskInfoList = taskStorageQueryAdapter.getCompletedTaskInfoByCreatedTimeDuration(maxCompletedTasks, createdTimeDuration, dataSource);
        final List<TaskStatusPlus> completedTasks = taskInfoList.stream().map(completeTaskTransformFunc::apply).collect(Collectors.toList());
        finalTaskList.addAll(completedTasks);
    }
    final List<TaskInfo<Task, TaskStatus>> allActiveTaskInfo;
    final List<AnyTask> allActiveTasks = new ArrayList<>();
    if (state == null || !"complete".equals(StringUtils.toLowerCase(state))) {
        allActiveTaskInfo = taskStorageQueryAdapter.getActiveTaskInfo(dataSource);
        for (final TaskInfo<Task, TaskStatus> task : allActiveTaskInfo) {
            allActiveTasks.add(new AnyTask(task.getId(), task.getTask() == null ? null : task.getTask().getGroupId(), task.getTask() == null ? null : task.getTask().getType(), SettableFuture.create(), task.getDataSource(), null, null, task.getCreatedTime(), DateTimes.EPOCH, TaskLocation.unknown()));
        }
    }
    if (state == null || "waiting".equals(StringUtils.toLowerCase(state))) {
        final List<AnyTask> waitingWorkItems = filterActiveTasks(RunnerTaskState.WAITING, allActiveTasks);
        List<TaskStatusPlus> transformedWaitingList = waitingWorkItems.stream().map(activeTaskTransformFunc::apply).collect(Collectors.toList());
        finalTaskList.addAll(transformedWaitingList);
    }
    if (state == null || "pending".equals(StringUtils.toLowerCase(state))) {
        final List<AnyTask> pendingWorkItems = filterActiveTasks(RunnerTaskState.PENDING, allActiveTasks);
        List<TaskStatusPlus> transformedPendingList = pendingWorkItems.stream().map(activeTaskTransformFunc::apply).collect(Collectors.toList());
        finalTaskList.addAll(transformedPendingList);
    }
    if (state == null || "running".equals(StringUtils.toLowerCase(state))) {
        final List<AnyTask> runningWorkItems = filterActiveTasks(RunnerTaskState.RUNNING, allActiveTasks);
        List<TaskStatusPlus> transformedRunningList = runningWorkItems.stream().map(activeTaskTransformFunc::apply).collect(Collectors.toList());
        finalTaskList.addAll(transformedRunningList);
    }
    final List<TaskStatusPlus> authorizedList = securedTaskStatusPlus(finalTaskList, dataSource, type, req);
    return Response.ok(authorizedList).build();
}
Also used : Produces(javax.ws.rs.Produces) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) TaskLogStreamer(org.apache.druid.tasklogs.TaskLogStreamer) Inject(com.google.inject.Inject) Path(javax.ws.rs.Path) ResourceFilters(com.sun.jersey.spi.container.ResourceFilters) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) SettableFuture(com.google.common.util.concurrent.SettableFuture) TaskStorageQueryAdapter(org.apache.druid.indexing.overlord.TaskStorageQueryAdapter) HttpMediaType(org.apache.druid.server.http.HttpMediaType) MediaType(javax.ws.rs.core.MediaType) QueryParam(javax.ws.rs.QueryParam) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) Consumes(javax.ws.rs.Consumes) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) Optional(com.google.common.base.Optional) Task(org.apache.druid.indexing.common.task.Task) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) Map(java.util.Map) DefaultValue(javax.ws.rs.DefaultValue) HeaderParam(javax.ws.rs.HeaderParam) ForbiddenException(org.apache.druid.server.security.ForbiddenException) TaskResourceFilter(org.apache.druid.indexing.overlord.http.security.TaskResourceFilter) DELETE(javax.ws.rs.DELETE) WorkerBehaviorConfig(org.apache.druid.indexing.overlord.setup.WorkerBehaviorConfig) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) Context(javax.ws.rs.core.Context) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) Action(org.apache.druid.server.security.Action) IndexerMetadataStorageAdapter(org.apache.druid.indexing.overlord.IndexerMetadataStorageAdapter) Collectors(java.util.stream.Collectors) AuditManager(org.apache.druid.audit.AuditManager) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) DatasourceResourceFilter(org.apache.druid.server.http.security.DatasourceResourceFilter) TaskState(org.apache.druid.indexer.TaskState) List(java.util.List) Response(javax.ws.rs.core.Response) ClientTaskQuery(org.apache.druid.client.indexing.ClientTaskQuery) AuditEntry(org.apache.druid.audit.AuditEntry) DataSegment(org.apache.druid.timeline.DataSegment) WebApplicationException(javax.ws.rs.WebApplicationException) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) Logger(org.apache.druid.java.util.common.logger.Logger) SetResult(org.apache.druid.common.config.ConfigManager.SetResult) TaskActionHolder(org.apache.druid.indexing.common.actions.TaskActionHolder) AuditInfo(org.apache.druid.audit.AuditInfo) PathParam(javax.ws.rs.PathParam) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Intervals(org.apache.druid.java.util.common.Intervals) GET(javax.ws.rs.GET) Duration(org.joda.time.Duration) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) TaskStatus(org.apache.druid.indexer.TaskStatus) WorkerTaskRunnerQueryAdapter(org.apache.druid.indexing.overlord.WorkerTaskRunnerQueryAdapter) ScalingStats(org.apache.druid.indexing.overlord.autoscaling.ScalingStats) ArrayList(java.util.ArrayList) EntryExistsException(org.apache.druid.metadata.EntryExistsException) Interval(org.joda.time.Interval) HttpServletRequest(javax.servlet.http.HttpServletRequest) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) DefaultWorkerBehaviorConfig(org.apache.druid.indexing.overlord.setup.DefaultWorkerBehaviorConfig) ByteSource(com.google.common.io.ByteSource) Status(javax.ws.rs.core.Response.Status) StateResourceFilter(org.apache.druid.server.http.security.StateResourceFilter) Nullable(javax.annotation.Nullable) ImmutableWorkerInfo(org.apache.druid.indexing.overlord.ImmutableWorkerInfo) Access(org.apache.druid.server.security.Access) POST(javax.ws.rs.POST) TaskInfo(org.apache.druid.indexer.TaskInfo) ResourceType(org.apache.druid.server.security.ResourceType) DateTime(org.joda.time.DateTime) TaskLocation(org.apache.druid.indexer.TaskLocation) WorkerTaskRunner(org.apache.druid.indexing.overlord.WorkerTaskRunner) ConfigResourceFilter(org.apache.druid.server.http.security.ConfigResourceFilter) AuthorizationUtils(org.apache.druid.server.security.AuthorizationUtils) Maps(com.google.common.collect.Maps) RunnerTaskState(org.apache.druid.indexer.RunnerTaskState) ProvisioningStrategy(org.apache.druid.indexing.overlord.autoscaling.ProvisioningStrategy) Resource(org.apache.druid.server.security.Resource) ResourceAction(org.apache.druid.server.security.ResourceAction) TaskRunnerWorkItem(org.apache.druid.indexing.overlord.TaskRunnerWorkItem) Collections(java.util.Collections) Task(org.apache.druid.indexing.common.task.Task) WebApplicationException(javax.ws.rs.WebApplicationException) Resource(org.apache.druid.server.security.Resource) Access(org.apache.druid.server.security.Access) ArrayList(java.util.ArrayList) Duration(org.joda.time.Duration) TaskStatus(org.apache.druid.indexer.TaskStatus) TaskInfo(org.apache.druid.indexer.TaskInfo) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) ResourceAction(org.apache.druid.server.security.ResourceAction) Interval(org.joda.time.Interval) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 13 with AuthorizerMapper

use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.

the class OverlordResource method securedTaskStatusPlus.

private List<TaskStatusPlus> securedTaskStatusPlus(List<TaskStatusPlus> collectionToFilter, @Nullable String dataSource, @Nullable String type, HttpServletRequest req) {
    Function<TaskStatusPlus, Iterable<ResourceAction>> raGenerator = taskStatusPlus -> {
        final String taskId = taskStatusPlus.getId();
        final String taskDatasource = taskStatusPlus.getDataSource();
        if (taskDatasource == null) {
            throw new WebApplicationException(Response.serverError().entity(StringUtils.format("No task information found for task with id: [%s]", taskId)).build());
        }
        return Collections.singletonList(new ResourceAction(new Resource(taskDatasource, ResourceType.DATASOURCE), Action.READ));
    };
    List<TaskStatusPlus> optionalTypeFilteredList = collectionToFilter;
    if (type != null) {
        optionalTypeFilteredList = collectionToFilter.stream().filter(task -> type.equals(task.getType())).collect(Collectors.toList());
    }
    if (dataSource != null) {
        // skip auth check here, as it's already done in getTasks
        return optionalTypeFilteredList;
    }
    return Lists.newArrayList(AuthorizationUtils.filterAuthorizedResources(req, optionalTypeFilteredList, raGenerator, authorizerMapper));
}
Also used : Produces(javax.ws.rs.Produces) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) TaskLogStreamer(org.apache.druid.tasklogs.TaskLogStreamer) Inject(com.google.inject.Inject) Path(javax.ws.rs.Path) ResourceFilters(com.sun.jersey.spi.container.ResourceFilters) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) SettableFuture(com.google.common.util.concurrent.SettableFuture) TaskStorageQueryAdapter(org.apache.druid.indexing.overlord.TaskStorageQueryAdapter) HttpMediaType(org.apache.druid.server.http.HttpMediaType) MediaType(javax.ws.rs.core.MediaType) QueryParam(javax.ws.rs.QueryParam) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) Consumes(javax.ws.rs.Consumes) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) Optional(com.google.common.base.Optional) Task(org.apache.druid.indexing.common.task.Task) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) Map(java.util.Map) DefaultValue(javax.ws.rs.DefaultValue) HeaderParam(javax.ws.rs.HeaderParam) ForbiddenException(org.apache.druid.server.security.ForbiddenException) TaskResourceFilter(org.apache.druid.indexing.overlord.http.security.TaskResourceFilter) DELETE(javax.ws.rs.DELETE) WorkerBehaviorConfig(org.apache.druid.indexing.overlord.setup.WorkerBehaviorConfig) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) Context(javax.ws.rs.core.Context) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) Action(org.apache.druid.server.security.Action) IndexerMetadataStorageAdapter(org.apache.druid.indexing.overlord.IndexerMetadataStorageAdapter) Collectors(java.util.stream.Collectors) AuditManager(org.apache.druid.audit.AuditManager) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) DatasourceResourceFilter(org.apache.druid.server.http.security.DatasourceResourceFilter) TaskState(org.apache.druid.indexer.TaskState) List(java.util.List) Response(javax.ws.rs.core.Response) ClientTaskQuery(org.apache.druid.client.indexing.ClientTaskQuery) AuditEntry(org.apache.druid.audit.AuditEntry) DataSegment(org.apache.druid.timeline.DataSegment) WebApplicationException(javax.ws.rs.WebApplicationException) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) Logger(org.apache.druid.java.util.common.logger.Logger) SetResult(org.apache.druid.common.config.ConfigManager.SetResult) TaskActionHolder(org.apache.druid.indexing.common.actions.TaskActionHolder) AuditInfo(org.apache.druid.audit.AuditInfo) PathParam(javax.ws.rs.PathParam) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Intervals(org.apache.druid.java.util.common.Intervals) GET(javax.ws.rs.GET) Duration(org.joda.time.Duration) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) TaskStatus(org.apache.druid.indexer.TaskStatus) WorkerTaskRunnerQueryAdapter(org.apache.druid.indexing.overlord.WorkerTaskRunnerQueryAdapter) ScalingStats(org.apache.druid.indexing.overlord.autoscaling.ScalingStats) ArrayList(java.util.ArrayList) EntryExistsException(org.apache.druid.metadata.EntryExistsException) Interval(org.joda.time.Interval) HttpServletRequest(javax.servlet.http.HttpServletRequest) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) DefaultWorkerBehaviorConfig(org.apache.druid.indexing.overlord.setup.DefaultWorkerBehaviorConfig) ByteSource(com.google.common.io.ByteSource) Status(javax.ws.rs.core.Response.Status) StateResourceFilter(org.apache.druid.server.http.security.StateResourceFilter) Nullable(javax.annotation.Nullable) ImmutableWorkerInfo(org.apache.druid.indexing.overlord.ImmutableWorkerInfo) Access(org.apache.druid.server.security.Access) POST(javax.ws.rs.POST) TaskInfo(org.apache.druid.indexer.TaskInfo) ResourceType(org.apache.druid.server.security.ResourceType) DateTime(org.joda.time.DateTime) TaskLocation(org.apache.druid.indexer.TaskLocation) WorkerTaskRunner(org.apache.druid.indexing.overlord.WorkerTaskRunner) ConfigResourceFilter(org.apache.druid.server.http.security.ConfigResourceFilter) AuthorizationUtils(org.apache.druid.server.security.AuthorizationUtils) Maps(com.google.common.collect.Maps) RunnerTaskState(org.apache.druid.indexer.RunnerTaskState) ProvisioningStrategy(org.apache.druid.indexing.overlord.autoscaling.ProvisioningStrategy) Resource(org.apache.druid.server.security.Resource) ResourceAction(org.apache.druid.server.security.ResourceAction) TaskRunnerWorkItem(org.apache.druid.indexing.overlord.TaskRunnerWorkItem) Collections(java.util.Collections) WebApplicationException(javax.ws.rs.WebApplicationException) Resource(org.apache.druid.server.security.Resource) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) ResourceAction(org.apache.druid.server.security.ResourceAction)

Example 14 with AuthorizerMapper

use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.

the class OverlordResourceTest method setUp.

@Before
public void setUp() {
    taskRunner = EasyMock.createMock(TaskRunner.class);
    configManager = EasyMock.createMock(JacksonConfigManager.class);
    provisioningStrategy = EasyMock.createMock(ProvisioningStrategy.class);
    taskMaster = EasyMock.createStrictMock(TaskMaster.class);
    taskStorageQueryAdapter = EasyMock.createStrictMock(TaskStorageQueryAdapter.class);
    indexerMetadataStorageAdapter = EasyMock.createStrictMock(IndexerMetadataStorageAdapter.class);
    req = EasyMock.createStrictMock(HttpServletRequest.class);
    workerTaskRunnerQueryAdapter = EasyMock.createStrictMock(WorkerTaskRunnerQueryAdapter.class);
    EasyMock.expect(taskMaster.getTaskRunner()).andReturn(Optional.of(taskRunner)).anyTimes();
    AuthorizerMapper authMapper = new AuthorizerMapper(null) {

        @Override
        public Authorizer getAuthorizer(String name) {
            return new Authorizer() {

                @Override
                public Access authorize(AuthenticationResult authenticationResult, Resource resource, Action action) {
                    final String username = authenticationResult.getIdentity();
                    switch(resource.getName()) {
                        case "allow":
                            return new Access(true);
                        case Datasources.WIKIPEDIA:
                            // Only "Wiki Reader" can read "wikipedia"
                            return new Access(action == Action.READ && Users.WIKI_READER.equals(username));
                        case Datasources.BUZZFEED:
                            // Only "Buzz Reader" can read "buzzfeed"
                            return new Access(action == Action.READ && Users.BUZZ_READER.equals(username));
                        default:
                            return new Access(false);
                    }
                }
            };
        }
    };
    overlordResource = new OverlordResource(taskMaster, taskStorageQueryAdapter, indexerMetadataStorageAdapter, null, configManager, null, authMapper, workerTaskRunnerQueryAdapter, provisioningStrategy);
}
Also used : IndexerMetadataStorageAdapter(org.apache.druid.indexing.overlord.IndexerMetadataStorageAdapter) Action(org.apache.druid.server.security.Action) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) Resource(org.apache.druid.server.security.Resource) Access(org.apache.druid.server.security.Access) TaskStorageQueryAdapter(org.apache.druid.indexing.overlord.TaskStorageQueryAdapter) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) WorkerTaskRunner(org.apache.druid.indexing.overlord.WorkerTaskRunner) ProvisioningStrategy(org.apache.druid.indexing.overlord.autoscaling.ProvisioningStrategy) AuthenticationResult(org.apache.druid.server.security.AuthenticationResult) HttpServletRequest(javax.servlet.http.HttpServletRequest) Authorizer(org.apache.druid.server.security.Authorizer) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) WorkerTaskRunnerQueryAdapter(org.apache.druid.indexing.overlord.WorkerTaskRunnerQueryAdapter) Before(org.junit.Before)

Example 15 with AuthorizerMapper

use of org.apache.druid.server.security.AuthorizerMapper in project druid by druid-io.

the class SeekableStreamIndexTaskRunnerAuthTest method setUp.

@Before
public void setUp() {
    // Create an AuthorizerMapper that only allows access to a Datasource resource
    AuthorizerMapper authorizerMapper = new AuthorizerMapper(null) {

        @Override
        public Authorizer getAuthorizer(String name) {
            return (authenticationResult, resource, action) -> {
                final String username = authenticationResult.getIdentity();
                // - or, Datasource Write User requests Write access
                if (resource.getType().equals(ResourceType.DATASOURCE)) {
                    return new Access((action == Action.READ && username.equals(Users.DATASOURCE_READ)) || (action == Action.WRITE && username.equals(Users.DATASOURCE_WRITE)));
                }
                // Do not allow access to any other resource
                return new Access(false);
            };
        }
    };
    DataSchema dataSchema = new DataSchema("datasource", new TimestampSpec(null, null, null), new DimensionsSpec(Collections.emptyList()), new AggregatorFactory[] {}, new ArbitraryGranularitySpec(new AllGranularity(), Collections.emptyList()), TransformSpec.NONE, null, null);
    SeekableStreamIndexTaskTuningConfig tuningConfig = mock(SeekableStreamIndexTaskTuningConfig.class);
    SeekableStreamIndexTaskIOConfig<String, String> ioConfig = new TestSeekableStreamIndexTaskIOConfig();
    // Initiliaze task and task runner
    SeekableStreamIndexTask<String, String, ByteEntity> indexTask = new TestSeekableStreamIndexTask("id", dataSchema, tuningConfig, ioConfig);
    taskRunner = new TestSeekableStreamIndexTaskRunner(indexTask, authorizerMapper);
}
Also used : TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) ArbitraryGranularitySpec(org.apache.druid.segment.indexing.granularity.ArbitraryGranularitySpec) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) OrderedPartitionableRecord(org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) EasyMock.mock(org.easymock.EasyMock.mock) AllGranularity(org.apache.druid.java.util.common.granularity.AllGranularity) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) AuthenticationResult(org.apache.druid.server.security.AuthenticationResult) HttpServletRequest(javax.servlet.http.HttpServletRequest) Map(java.util.Map) ForbiddenException(org.apache.druid.server.security.ForbiddenException) EasyMock.replay(org.easymock.EasyMock.replay) AuthConfig(org.apache.druid.server.security.AuthConfig) TypeReference(com.fasterxml.jackson.core.type.TypeReference) ExpectedException(org.junit.rules.ExpectedException) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Before(org.junit.Before) DateTimes(org.apache.druid.java.util.common.DateTimes) Access(org.apache.druid.server.security.Access) ResourceType(org.apache.druid.server.security.ResourceType) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Set(java.util.Set) CsvInputFormat(org.apache.druid.data.input.impl.CsvInputFormat) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Test(org.junit.Test) Action(org.apache.druid.server.security.Action) EasyMock(org.easymock.EasyMock) LockGranularity(org.apache.druid.indexing.common.LockGranularity) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) Consumer(java.util.function.Consumer) List(java.util.List) Rule(org.junit.Rule) TreeMap(java.util.TreeMap) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) DataSchema(org.apache.druid.segment.indexing.DataSchema) Collections(java.util.Collections) TransformSpec(org.apache.druid.segment.transform.TransformSpec) Authorizer(org.apache.druid.server.security.Authorizer) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) AllGranularity(org.apache.druid.java.util.common.granularity.AllGranularity) Access(org.apache.druid.server.security.Access) ArbitraryGranularitySpec(org.apache.druid.segment.indexing.granularity.ArbitraryGranularitySpec) DataSchema(org.apache.druid.segment.indexing.DataSchema) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Before(org.junit.Before)

Aggregations

AuthorizerMapper (org.apache.druid.server.security.AuthorizerMapper)18 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Response (javax.ws.rs.core.Response)10 Access (org.apache.druid.server.security.Access)10 Action (org.apache.druid.server.security.Action)10 List (java.util.List)9 HttpServletRequest (javax.servlet.http.HttpServletRequest)8 Set (java.util.Set)7 Resource (org.apache.druid.server.security.Resource)7 Collections (java.util.Collections)6 Map (java.util.Map)6 Nullable (javax.annotation.Nullable)6 Path (javax.ws.rs.Path)6 Produces (javax.ws.rs.Produces)6 Context (javax.ws.rs.core.Context)6 MediaType (javax.ws.rs.core.MediaType)6 ForbiddenException (org.apache.druid.server.security.ForbiddenException)6 Collectors (java.util.stream.Collectors)5 GET (javax.ws.rs.GET)5 POST (javax.ws.rs.POST)5