use of java.util.concurrent.atomic.AtomicInteger in project druid by druid-io.
the class BaseSequenceTest method testExceptionThrownInIterator.
@Test
public void testExceptionThrownInIterator() throws Exception {
final AtomicInteger closedCounter = new AtomicInteger(0);
Sequence<Integer> seq = new BaseSequence<>(new BaseSequence.IteratorMaker<Integer, Iterator<Integer>>() {
@Override
public Iterator<Integer> make() {
return new Iterator<Integer>() {
@Override
public boolean hasNext() {
throw new UnsupportedOperationException();
}
@Override
public Integer next() {
throw new UnsupportedOperationException();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public void cleanup(Iterator<Integer> iterFromMake) {
closedCounter.incrementAndGet();
}
});
SequenceTestHelper.testClosed(closedCounter, seq);
}
use of java.util.concurrent.atomic.AtomicInteger in project druid by druid-io.
the class ConcatSequenceTest method testClosingOfSequenceSequenceWhenExceptionThrown.
@Test
public void testClosingOfSequenceSequenceWhenExceptionThrown() throws Exception {
final AtomicInteger closedCount = new AtomicInteger(0);
final Sequence<Integer> seq = Sequences.concat(new BaseSequence<>(new BaseSequence.IteratorMaker<Sequence<Integer>, Iterator<Sequence<Integer>>>() {
@Override
public Iterator<Sequence<Integer>> make() {
return Arrays.asList(Sequences.simple(Arrays.asList(1, 2, 3, 4)), new UnsupportedSequence()).iterator();
}
@Override
public void cleanup(Iterator<Sequence<Integer>> iterFromMake) {
closedCount.incrementAndGet();
}
}));
SequenceTestHelper.testClosed(closedCount, seq);
}
use of java.util.concurrent.atomic.AtomicInteger in project druid by druid-io.
the class WithEffectSequenceTest method testConsistentEffectApplicationOrder.
@Test
public void testConsistentEffectApplicationOrder() {
final AtomicInteger effect1 = new AtomicInteger();
final AtomicInteger effect2 = new AtomicInteger();
final AtomicInteger counter = new AtomicInteger();
Sequence<Integer> sequence = Sequences.withEffect(Sequences.withEffect(Sequences.simple(Arrays.asList(1, 2, 3)), new Runnable() {
@Override
public void run() {
effect1.set(counter.incrementAndGet());
}
}, MoreExecutors.sameThreadExecutor()), new Runnable() {
@Override
public void run() {
effect2.set(counter.incrementAndGet());
}
}, MoreExecutors.sameThreadExecutor());
// Run sequence via accumulate
Sequences.toList(sequence, new ArrayList<Integer>());
Assert.assertEquals(1, effect1.get());
Assert.assertEquals(2, effect2.get());
// Ensure sequence runs via Yielder, because LimitedSequence extends YieldingSequenceBase which
// implements accumulate() via yielder().
// "Limiting" a sequence of 3 elements with 4 to let effects be executed. If e. g. limit with 1 or 2, effects are
// not executed.
Sequence<Integer> yieldingSequence = Sequences.limit(sequence, 4);
Sequences.toList(yieldingSequence, new ArrayList<Integer>());
Assert.assertEquals(3, effect1.get());
Assert.assertEquals(4, effect2.get());
}
use of java.util.concurrent.atomic.AtomicInteger in project druid by druid-io.
the class OffheapIncrementalIndex method addToFacts.
@Override
protected Integer addToFacts(AggregatorFactory[] metrics, boolean deserializeComplexMetrics, boolean reportParseExceptions, InputRow row, AtomicInteger numEntries, TimeAndDims key, ThreadLocal<InputRow> rowContainer, Supplier<InputRow> rowSupplier) throws IndexSizeExceededException {
ByteBuffer aggBuffer;
int bufferIndex;
int bufferOffset;
synchronized (this) {
final Integer priorIndex = facts.getPriorIndex(key);
if (null != priorIndex) {
final int[] indexAndOffset = indexAndOffsets.get(priorIndex);
bufferIndex = indexAndOffset[0];
bufferOffset = indexAndOffset[1];
aggBuffer = aggBuffers.get(bufferIndex).get();
} else {
if (metrics.length > 0 && getAggs()[0] == null) {
// note: creation of Aggregators is done lazily when at least one row from input is available
// so that FilteredAggregators could be initialized correctly.
rowContainer.set(row);
for (int i = 0; i < metrics.length; i++) {
final AggregatorFactory agg = metrics[i];
getAggs()[i] = agg.factorizeBuffered(makeColumnSelectorFactory(agg, rowSupplier, deserializeComplexMetrics));
}
rowContainer.set(null);
}
bufferIndex = aggBuffers.size() - 1;
ByteBuffer lastBuffer = aggBuffers.isEmpty() ? null : aggBuffers.get(aggBuffers.size() - 1).get();
int[] lastAggregatorsIndexAndOffset = indexAndOffsets.isEmpty() ? null : indexAndOffsets.get(indexAndOffsets.size() - 1);
if (lastAggregatorsIndexAndOffset != null && lastAggregatorsIndexAndOffset[0] != bufferIndex) {
throw new ISE("last row's aggregate's buffer and last buffer index must be same");
}
bufferOffset = aggsTotalSize + (lastAggregatorsIndexAndOffset != null ? lastAggregatorsIndexAndOffset[1] : 0);
if (lastBuffer != null && lastBuffer.capacity() - bufferOffset >= aggsTotalSize) {
aggBuffer = lastBuffer;
} else {
ResourceHolder<ByteBuffer> bb = bufferPool.take();
aggBuffers.add(bb);
bufferIndex = aggBuffers.size() - 1;
bufferOffset = 0;
aggBuffer = bb.get();
}
for (int i = 0; i < metrics.length; i++) {
getAggs()[i].init(aggBuffer, bufferOffset + aggOffsetInBuffer[i]);
}
// Last ditch sanity checks
if (numEntries.get() >= maxRowCount && facts.getPriorIndex(key) == null) {
throw new IndexSizeExceededException("Maximum number of rows [%d] reached", maxRowCount);
}
final Integer rowIndex = indexIncrement.getAndIncrement();
// note that indexAndOffsets must be updated before facts, because as soon as we update facts
// concurrent readers get hold of it and might ask for newly added row
indexAndOffsets.add(new int[] { bufferIndex, bufferOffset });
final Integer prev = facts.putIfAbsent(key, rowIndex);
if (null == prev) {
numEntries.incrementAndGet();
} else {
throw new ISE("WTF! we are in sychronized block.");
}
}
}
rowContainer.set(row);
for (int i = 0; i < metrics.length; i++) {
final BufferAggregator agg = getAggs()[i];
synchronized (agg) {
try {
agg.aggregate(aggBuffer, bufferOffset + aggOffsetInBuffer[i]);
} catch (ParseException e) {
// "aggregate" can throw ParseExceptions if a selector expects something but gets something else.
if (reportParseExceptions) {
throw new ParseException(e, "Encountered parse error for aggregator[%s]", getMetricAggs()[i].getName());
} else {
log.debug(e, "Encountered parse error, skipping aggregator[%s].", getMetricAggs()[i].getName());
}
}
}
}
rowContainer.set(null);
return numEntries.get();
}
use of java.util.concurrent.atomic.AtomicInteger in project druid by druid-io.
the class OnheapIncrementalIndex method addToFacts.
@Override
protected Integer addToFacts(AggregatorFactory[] metrics, boolean deserializeComplexMetrics, boolean reportParseExceptions, InputRow row, AtomicInteger numEntries, TimeAndDims key, ThreadLocal<InputRow> rowContainer, Supplier<InputRow> rowSupplier) throws IndexSizeExceededException {
final Integer priorIndex = facts.getPriorIndex(key);
Aggregator[] aggs;
if (null != priorIndex) {
aggs = concurrentGet(priorIndex);
doAggregate(metrics, aggs, rowContainer, row, reportParseExceptions);
} else {
aggs = new Aggregator[metrics.length];
factorizeAggs(metrics, aggs, rowContainer, row);
doAggregate(metrics, aggs, rowContainer, row, reportParseExceptions);
final Integer rowIndex = indexIncrement.getAndIncrement();
concurrentSet(rowIndex, aggs);
// Last ditch sanity checks
if (numEntries.get() >= maxRowCount && facts.getPriorIndex(key) == null) {
throw new IndexSizeExceededException("Maximum number of rows [%d] reached", maxRowCount);
}
final Integer prev = facts.putIfAbsent(key, rowIndex);
if (null == prev) {
numEntries.incrementAndGet();
} else {
// We lost a race
aggs = concurrentGet(prev);
doAggregate(metrics, aggs, rowContainer, row, reportParseExceptions);
// Free up the misfire
concurrentRemove(rowIndex);
// This is expected to occur ~80% of the time in the worst scenarios
}
}
return numEntries.get();
}
Aggregations