use of org.apache.druid.common.guava.SettableSupplier in project druid by druid-io.
the class SegmentManager method loadSegment.
/**
* Load a single segment.
*
* @param segment segment to load
* @param lazy whether to lazy load columns metadata
* @param loadFailed callBack to execute when segment lazy load failed
*
* @return true if the segment was newly loaded, false if it was already loaded
*
* @throws SegmentLoadingException if the segment cannot be loaded
*/
public boolean loadSegment(final DataSegment segment, boolean lazy, SegmentLazyLoadFailCallback loadFailed) throws SegmentLoadingException {
final ReferenceCountingSegment adapter = getSegmentReference(segment, lazy, loadFailed);
final SettableSupplier<Boolean> resultSupplier = new SettableSupplier<>();
// compute() is used to ensure that the operation for a data source is executed atomically
dataSources.compute(segment.getDataSource(), (k, v) -> {
final DataSourceState dataSourceState = v == null ? new DataSourceState() : v;
final VersionedIntervalTimeline<String, ReferenceCountingSegment> loadedIntervals = dataSourceState.getTimeline();
final PartitionChunk<ReferenceCountingSegment> entry = loadedIntervals.findChunk(segment.getInterval(), segment.getVersion(), segment.getShardSpec().getPartitionNum());
if (entry != null) {
log.warn("Told to load an adapter for segment[%s] that already exists", segment.getId());
resultSupplier.set(false);
} else {
IndexedTable table = adapter.as(IndexedTable.class);
if (table != null) {
if (dataSourceState.isEmpty() || dataSourceState.numSegments == dataSourceState.tablesLookup.size()) {
dataSourceState.tablesLookup.put(segment.getId(), new ReferenceCountingIndexedTable(table));
} else {
log.error("Cannot load segment[%s] with IndexedTable, no existing segments are joinable", segment.getId());
}
} else if (dataSourceState.tablesLookup.size() > 0) {
log.error("Cannot load segment[%s] without IndexedTable, all existing segments are joinable", segment.getId());
}
loadedIntervals.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(adapter));
dataSourceState.addSegment(segment);
resultSupplier.set(true);
}
return dataSourceState;
});
return resultSupplier.get();
}
use of org.apache.druid.common.guava.SettableSupplier in project druid by druid-io.
the class CompactionTask method decideRollupAndQueryGranularityCarryOver.
/**
* Decide which rollup & queryCardinalities to propage for the compacted segment based on
* the data segments given
*
* @param rollup Reference to update with the rollup value
* @param queryGranularity Reference to update with the queryGranularity value
* @param queryableIndexAndSegments The segments to compact
*/
private static void decideRollupAndQueryGranularityCarryOver(SettableSupplier<Boolean> rollup, SettableSupplier<Granularity> queryGranularity, List<NonnullPair<QueryableIndex, DataSegment>> queryableIndexAndSegments) {
final SettableSupplier<Boolean> rollupIsValid = new SettableSupplier<>(true);
for (NonnullPair<QueryableIndex, DataSegment> pair : queryableIndexAndSegments) {
final QueryableIndex index = pair.lhs;
if (index.getMetadata() == null) {
throw new RE("Index metadata doesn't exist for segment[%s]", pair.rhs.getId());
}
// Pick rollup value if all segments being compacted have the same, non-null, value otherwise set it to false
if (rollupIsValid.get()) {
Boolean isRollup = index.getMetadata().isRollup();
if (isRollup == null) {
rollupIsValid.set(false);
rollup.set(false);
} else if (rollup.get() == null) {
rollup.set(isRollup);
} else if (!rollup.get().equals(isRollup.booleanValue())) {
rollupIsValid.set(false);
rollup.set(false);
}
}
// Pick the finer, non-null, of the query granularities of the segments being compacted
Granularity current = index.getMetadata().getQueryGranularity();
queryGranularity.set(compareWithCurrent(queryGranularity.get(), current));
}
}
use of org.apache.druid.common.guava.SettableSupplier in project druid by druid-io.
the class CompactionTask method createDataSchema.
private static DataSchema createDataSchema(String dataSource, List<NonnullPair<QueryableIndex, DataSegment>> queryableIndexAndSegments, @Nullable DimensionsSpec dimensionsSpec, @Nullable ClientCompactionTaskTransformSpec transformSpec, @Nullable AggregatorFactory[] metricsSpec, @Nonnull ClientCompactionTaskGranularitySpec granularitySpec) {
// check index metadata &
// Decide which values to propagate (i.e. carry over) for rollup & queryGranularity
final SettableSupplier<Boolean> rollup = new SettableSupplier<>();
final SettableSupplier<Granularity> queryGranularity = new SettableSupplier<>();
decideRollupAndQueryGranularityCarryOver(rollup, queryGranularity, queryableIndexAndSegments);
final Interval totalInterval = JodaUtils.umbrellaInterval(queryableIndexAndSegments.stream().map(p -> p.rhs.getInterval()).collect(Collectors.toList()));
final Granularity queryGranularityToUse;
if (granularitySpec.getQueryGranularity() == null) {
queryGranularityToUse = queryGranularity.get();
log.info("Generate compaction task spec with segments original query granularity [%s]", queryGranularityToUse);
} else {
queryGranularityToUse = granularitySpec.getQueryGranularity();
log.info("Generate compaction task spec with new query granularity overrided from input [%s]", queryGranularityToUse);
}
final GranularitySpec uniformGranularitySpec = new UniformGranularitySpec(Preconditions.checkNotNull(granularitySpec.getSegmentGranularity()), queryGranularityToUse, granularitySpec.isRollup() == null ? rollup.get() : granularitySpec.isRollup(), Collections.singletonList(totalInterval));
// find unique dimensions
final DimensionsSpec finalDimensionsSpec = dimensionsSpec == null ? createDimensionsSpec(queryableIndexAndSegments) : dimensionsSpec;
final AggregatorFactory[] finalMetricsSpec = metricsSpec == null ? createMetricsSpec(queryableIndexAndSegments) : metricsSpec;
return new DataSchema(dataSource, new TimestampSpec(ColumnHolder.TIME_COLUMN_NAME, "millis", null), finalDimensionsSpec, finalMetricsSpec, uniformGranularitySpec, transformSpec == null ? null : new TransformSpec(transformSpec.getFilter(), null));
}
use of org.apache.druid.common.guava.SettableSupplier in project druid by druid-io.
the class TaskLockBoxConcurrencyTest method testDoInCriticalSectionWithDifferentTasks.
@Test(timeout = 60_000L)
public void testDoInCriticalSectionWithDifferentTasks() throws ExecutionException, InterruptedException, EntryExistsException {
final Interval interval = Intervals.of("2017-01-01/2017-01-02");
final Task lowPriorityTask = NoopTask.create(10);
final Task highPriorityTask = NoopTask.create(100);
lockbox.add(lowPriorityTask);
lockbox.add(highPriorityTask);
taskStorage.insert(lowPriorityTask, TaskStatus.running(lowPriorityTask.getId()));
taskStorage.insert(highPriorityTask, TaskStatus.running(highPriorityTask.getId()));
final SettableSupplier<Integer> intSupplier = new SettableSupplier<>(0);
final CountDownLatch latch = new CountDownLatch(1);
// lowPriorityTask acquires a lock first and increases the int of intSupplier in the critical section
final Future<Integer> lowPriorityFuture = service.submit(() -> {
final LockResult result = tryTimeChunkLock(TaskLockType.EXCLUSIVE, lowPriorityTask, interval);
Assert.assertTrue(result.isOk());
Assert.assertFalse(result.isRevoked());
return lockbox.doInCriticalSection(lowPriorityTask, Collections.singletonList(interval), CriticalAction.<Integer>builder().onValidLocks(() -> {
latch.countDown();
Thread.sleep(100);
intSupplier.set(intSupplier.get() + 1);
return intSupplier.get();
}).onInvalidLocks(() -> {
Assert.fail();
return null;
}).build());
});
// highPriorityTask awaits for the latch, acquires a lock, and increases the int of intSupplier in the critical
// section
final Future<Integer> highPriorityFuture = service.submit(() -> {
latch.await();
final LockResult result = acquireTimeChunkLock(TaskLockType.EXCLUSIVE, highPriorityTask, interval);
Assert.assertTrue(result.isOk());
Assert.assertFalse(result.isRevoked());
return lockbox.doInCriticalSection(highPriorityTask, Collections.singletonList(interval), CriticalAction.<Integer>builder().onValidLocks(() -> {
Thread.sleep(100);
intSupplier.set(intSupplier.get() + 1);
return intSupplier.get();
}).onInvalidLocks(() -> {
Assert.fail();
return null;
}).build());
});
Assert.assertEquals(1, lowPriorityFuture.get().intValue());
Assert.assertEquals(2, highPriorityFuture.get().intValue());
// the lock for lowPriorityTask must be revoked by the highPriorityTask after its work is done in critical section
final LockResult result = tryTimeChunkLock(TaskLockType.EXCLUSIVE, lowPriorityTask, interval);
Assert.assertFalse(result.isOk());
Assert.assertTrue(result.isRevoked());
}
use of org.apache.druid.common.guava.SettableSupplier in project druid by druid-io.
the class TaskLockBoxConcurrencyTest method testDoInCriticalSectionWithOverlappedIntervals.
@Test(timeout = 60_000L)
public void testDoInCriticalSectionWithOverlappedIntervals() throws Exception {
final List<Interval> intervals = ImmutableList.of(Intervals.of("2017-01-01/2017-01-02"), Intervals.of("2017-01-02/2017-01-03"), Intervals.of("2017-01-03/2017-01-04"));
final Task task = NoopTask.create();
lockbox.add(task);
taskStorage.insert(task, TaskStatus.running(task.getId()));
for (Interval interval : intervals) {
final LockResult result = tryTimeChunkLock(TaskLockType.EXCLUSIVE, task, interval);
Assert.assertTrue(result.isOk());
}
final SettableSupplier<Integer> intSupplier = new SettableSupplier<>(0);
final CountDownLatch latch = new CountDownLatch(1);
final Future<Integer> future1 = service.submit(() -> lockbox.doInCriticalSection(task, intervals.subList(0, 2), CriticalAction.<Integer>builder().onValidLocks(() -> {
latch.countDown();
Thread.sleep(100);
intSupplier.set(intSupplier.get() + 1);
return intSupplier.get();
}).onInvalidLocks(() -> {
Assert.fail();
return null;
}).build()));
final Future<Integer> future2 = service.submit(() -> {
latch.await();
return lockbox.doInCriticalSection(task, intervals.subList(1, 3), CriticalAction.<Integer>builder().onValidLocks(() -> {
Thread.sleep(100);
intSupplier.set(intSupplier.get() + 1);
return intSupplier.get();
}).onInvalidLocks(() -> {
Assert.fail();
return null;
}).build());
});
Assert.assertEquals(1, future1.get().intValue());
Assert.assertEquals(2, future2.get().intValue());
}
Aggregations