Search in sources :

Example 81 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class ReferenceCountingResourceHolder method increment.

public Releaser increment() {
    while (true) {
        int count = this.refCount.get();
        if (count <= 0) {
            throw new ISE("Already closed!");
        }
        if (refCount.compareAndSet(count, count + 1)) {
            break;
        }
    }
    // This Releaser is supposed to be used from a single thread, so no synchronization/atomicity
    return new Releaser() {

        boolean released = false;

        @Override
        public void close() {
            if (!released) {
                decrement();
                released = true;
            } else {
                log.warn(new ISE("Already closed"), "Already closed");
            }
        }
    };
}
Also used : ISE(io.druid.java.util.common.ISE)

Example 82 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class BlockingPool method offerBatch.

private void offerBatch(List<T> offers) {
    final ReentrantLock lock = this.lock;
    lock.lock();
    try {
        if (objects.size() + offers.size() <= maxSize) {
            for (T offer : offers) {
                objects.push(offer);
            }
            notEnough.signal();
        } else {
            throw new ISE("Cannot exceed pre-configured maximum size");
        }
    } finally {
        lock.unlock();
    }
}
Also used : ReentrantLock(java.util.concurrent.locks.ReentrantLock) ISE(io.druid.java.util.common.ISE)

Example 83 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class IndexTask method generateAndPublishSegments.

private boolean generateAndPublishSegments(final TaskToolbox toolbox, final DataSchema dataSchema, final Map<Interval, List<ShardSpec>> shardSpecs, final String version, final FirehoseFactory firehoseFactory) throws IOException, InterruptedException {
    final GranularitySpec granularitySpec = dataSchema.getGranularitySpec();
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null);
    final FireDepartmentMetrics fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    final Map<String, ShardSpec> sequenceNameToShardSpecMap = Maps.newHashMap();
    if (toolbox.getMonitorScheduler() != null) {
        toolbox.getMonitorScheduler().addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));
    }
    final SegmentAllocator segmentAllocator;
    if (ingestionSchema.getIOConfig().isAppendToExisting()) {
        segmentAllocator = new ActionBasedSegmentAllocator(toolbox.getTaskActionClient(), dataSchema);
    } else {
        segmentAllocator = new SegmentAllocator() {

            @Override
            public SegmentIdentifier allocate(DateTime timestamp, String sequenceName, String previousSegmentId) throws IOException {
                Optional<Interval> interval = granularitySpec.bucketInterval(timestamp);
                if (!interval.isPresent()) {
                    throw new ISE("Could not find interval for timestamp [%s]", timestamp);
                }
                ShardSpec shardSpec = sequenceNameToShardSpecMap.get(sequenceName);
                if (shardSpec == null) {
                    throw new ISE("Could not find ShardSpec for sequenceName [%s]", sequenceName);
                }
                return new SegmentIdentifier(getDataSource(), interval.get(), version, shardSpec);
            }
        };
    }
    try (final Appenderator appenderator = newAppenderator(fireDepartmentMetrics, toolbox, dataSchema);
        final FiniteAppenderatorDriver driver = newDriver(appenderator, toolbox, segmentAllocator, fireDepartmentMetrics);
        final Firehose firehose = firehoseFactory.connect(dataSchema.getParser())) {
        final Supplier<Committer> committerSupplier = Committers.supplierFromFirehose(firehose);
        final Map<Interval, ShardSpecLookup> shardSpecLookups = Maps.newHashMap();
        if (driver.startJob() != null) {
            driver.clear();
        }
        try {
            while (firehose.hasMore()) {
                try {
                    final InputRow inputRow = firehose.nextRow();
                    final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp());
                    if (!optInterval.isPresent()) {
                        fireDepartmentMetrics.incrementThrownAway();
                        continue;
                    }
                    final Interval interval = optInterval.get();
                    if (!shardSpecLookups.containsKey(interval)) {
                        final List<ShardSpec> intervalShardSpecs = shardSpecs.get(interval);
                        if (intervalShardSpecs == null || intervalShardSpecs.isEmpty()) {
                            throw new ISE("Failed to get shardSpec for interval[%s]", interval);
                        }
                        shardSpecLookups.put(interval, intervalShardSpecs.get(0).getLookup(intervalShardSpecs));
                    }
                    final ShardSpec shardSpec = shardSpecLookups.get(interval).getShardSpec(inputRow.getTimestampFromEpoch(), inputRow);
                    final String sequenceName = String.format("index_%s_%s_%d", interval, version, shardSpec.getPartitionNum());
                    if (!sequenceNameToShardSpecMap.containsKey(sequenceName)) {
                        final ShardSpec shardSpecForPublishing = ingestionSchema.getTuningConfig().isForceExtendableShardSpecs() || ingestionSchema.getIOConfig().isAppendToExisting() ? new NumberedShardSpec(shardSpec.getPartitionNum(), shardSpecs.get(interval).size()) : shardSpec;
                        sequenceNameToShardSpecMap.put(sequenceName, shardSpecForPublishing);
                    }
                    final SegmentIdentifier identifier = driver.add(inputRow, sequenceName, committerSupplier);
                    if (identifier == null) {
                        throw new ISE("Could not allocate segment for row with timestamp[%s]", inputRow.getTimestamp());
                    }
                    fireDepartmentMetrics.incrementProcessed();
                } catch (ParseException e) {
                    if (ingestionSchema.getTuningConfig().isReportParseExceptions()) {
                        throw e;
                    } else {
                        fireDepartmentMetrics.incrementUnparseable();
                    }
                }
            }
        } finally {
            driver.persist(committerSupplier.get());
        }
        final TransactionalSegmentPublisher publisher = new TransactionalSegmentPublisher() {

            @Override
            public boolean publishSegments(Set<DataSegment> segments, Object commitMetadata) throws IOException {
                final SegmentTransactionalInsertAction action = new SegmentTransactionalInsertAction(segments, null, null);
                return toolbox.getTaskActionClient().submit(action).isSuccess();
            }
        };
        final SegmentsAndMetadata published = driver.finish(publisher, committerSupplier.get());
        if (published == null) {
            log.error("Failed to publish segments, aborting!");
            return false;
        } else {
            log.info("Published segments[%s]", Joiner.on(", ").join(Iterables.transform(published.getSegments(), new Function<DataSegment, String>() {

                @Override
                public String apply(DataSegment input) {
                    return input.getIdentifier();
                }
            })));
            return true;
        }
    }
}
Also used : RealtimeIOConfig(io.druid.segment.indexing.RealtimeIOConfig) SortedSet(java.util.SortedSet) Set(java.util.Set) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) ShardSpecLookup(io.druid.timeline.partition.ShardSpecLookup) SegmentTransactionalInsertAction(io.druid.indexing.common.actions.SegmentTransactionalInsertAction) DataSegment(io.druid.timeline.DataSegment) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) ShardSpec(io.druid.timeline.partition.ShardSpec) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec) HashBasedNumberedShardSpec(io.druid.timeline.partition.HashBasedNumberedShardSpec) DateTime(org.joda.time.DateTime) FireDepartment(io.druid.segment.realtime.FireDepartment) TransactionalSegmentPublisher(io.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) ActionBasedSegmentAllocator(io.druid.indexing.appenderator.ActionBasedSegmentAllocator) ISE(io.druid.java.util.common.ISE) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec) HashBasedNumberedShardSpec(io.druid.timeline.partition.HashBasedNumberedShardSpec) Optional(com.google.common.base.Optional) Firehose(io.druid.data.input.Firehose) SegmentsAndMetadata(io.druid.segment.realtime.appenderator.SegmentsAndMetadata) IOException(java.io.IOException) FireDepartmentMetrics(io.druid.segment.realtime.FireDepartmentMetrics) Appenderator(io.druid.segment.realtime.appenderator.Appenderator) GranularitySpec(io.druid.segment.indexing.granularity.GranularitySpec) ActionBasedSegmentAllocator(io.druid.indexing.appenderator.ActionBasedSegmentAllocator) SegmentAllocator(io.druid.segment.realtime.appenderator.SegmentAllocator) FiniteAppenderatorDriver(io.druid.segment.realtime.appenderator.FiniteAppenderatorDriver) InputRow(io.druid.data.input.InputRow) RealtimeMetricsMonitor(io.druid.segment.realtime.RealtimeMetricsMonitor) Committer(io.druid.data.input.Committer) ParseException(io.druid.java.util.common.parsers.ParseException) Interval(org.joda.time.Interval)

Example 84 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class KillTask method run.

@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
    // Confirm we have a lock (will throw if there isn't exactly one element)
    final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
    if (!myLock.getDataSource().equals(getDataSource())) {
        throw new ISE("WTF?! Lock dataSource[%s] != task dataSource[%s]", myLock.getDataSource(), getDataSource());
    }
    if (!myLock.getInterval().equals(getInterval())) {
        throw new ISE("WTF?! Lock interval[%s] != task interval[%s]", myLock.getInterval(), getInterval());
    }
    // List unused segments
    final List<DataSegment> unusedSegments = toolbox.getTaskActionClient().submit(new SegmentListUnusedAction(myLock.getDataSource(), myLock.getInterval()));
    // Verify none of these segments have versions > lock version
    for (final DataSegment unusedSegment : unusedSegments) {
        if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
            throw new ISE("WTF?! Unused segment[%s] has version[%s] > task version[%s]", unusedSegment.getIdentifier(), unusedSegment.getVersion(), myLock.getVersion());
        }
        log.info("OK to kill segment: %s", unusedSegment.getIdentifier());
    }
    // Kill segments
    for (DataSegment segment : unusedSegments) {
        toolbox.getDataSegmentKiller().kill(segment);
        toolbox.getTaskActionClient().submit(new SegmentNukeAction(ImmutableSet.of(segment)));
    }
    return TaskStatus.success(getId());
}
Also used : TaskLock(io.druid.indexing.common.TaskLock) SegmentNukeAction(io.druid.indexing.common.actions.SegmentNukeAction) ISE(io.druid.java.util.common.ISE) SegmentListUnusedAction(io.druid.indexing.common.actions.SegmentListUnusedAction) DataSegment(io.druid.timeline.DataSegment)

Example 85 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class MergeTaskBase method isReady.

/**
   * Checks pre-existing segments in "context" to confirm that this merge query is valid. Specifically, confirm that
   * we are operating on every segment that overlaps the chosen interval.
   */
@Override
public boolean isReady(TaskActionClient taskActionClient) throws Exception {
    // Try to acquire lock
    if (!super.isReady(taskActionClient)) {
        return false;
    } else {
        final Function<DataSegment, String> toIdentifier = new Function<DataSegment, String>() {

            @Override
            public String apply(DataSegment dataSegment) {
                return dataSegment.getIdentifier();
            }
        };
        final Set<String> current = ImmutableSet.copyOf(Iterables.transform(taskActionClient.submit(new SegmentListUsedAction(getDataSource(), getInterval(), null)), toIdentifier));
        final Set<String> requested = ImmutableSet.copyOf(Iterables.transform(segments, toIdentifier));
        final Set<String> missingFromRequested = Sets.difference(current, requested);
        if (!missingFromRequested.isEmpty()) {
            throw new ISE("Merge is invalid: current segment(s) are not in the requested set: %s", Joiner.on(", ").join(missingFromRequested));
        }
        final Set<String> missingFromCurrent = Sets.difference(requested, current);
        if (!missingFromCurrent.isEmpty()) {
            throw new ISE("Merge is invalid: requested segment(s) are not in the current set: %s", Joiner.on(", ").join(missingFromCurrent));
        }
        return true;
    }
}
Also used : Function(com.google.common.base.Function) ISE(io.druid.java.util.common.ISE) SegmentListUsedAction(io.druid.indexing.common.actions.SegmentListUsedAction) DataSegment(io.druid.timeline.DataSegment)

Aggregations

ISE (io.druid.java.util.common.ISE)158 IOException (java.io.IOException)37 Map (java.util.Map)23 Test (org.junit.Test)21 File (java.io.File)20 List (java.util.List)19 DateTime (org.joda.time.DateTime)18 ArrayList (java.util.ArrayList)17 DataSegment (io.druid.timeline.DataSegment)15 Interval (org.joda.time.Interval)15 Function (com.google.common.base.Function)14 TimeoutException (java.util.concurrent.TimeoutException)12 IAE (io.druid.java.util.common.IAE)10 HashMap (java.util.HashMap)10 ExecutionException (java.util.concurrent.ExecutionException)10 Stopwatch (com.google.common.base.Stopwatch)9 DimensionSpec (io.druid.query.dimension.DimensionSpec)9 ImmutableMap (com.google.common.collect.ImmutableMap)8 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)8 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)8