use of io.druid.data.input.InputRow in project druid by druid-io.
the class IndexPersistBenchmark method setup2.
@Setup(Level.Iteration)
public void setup2() throws IOException {
incIndex = makeIncIndex();
for (int i = 0; i < rowsPerSegment; i++) {
InputRow row = rows.get(i);
incIndex.add(row);
}
}
use of io.druid.data.input.InputRow in project druid by druid-io.
the class FilteredAggregatorBenchmark method ingest.
// Filtered agg doesn't work with ingestion, cardinality is not supported in incremental index
// See https://github.com/druid-io/druid/issues/3164
// @Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void ingest(Blackhole blackhole) throws Exception {
incIndexFilteredAgg = makeIncIndex(filteredMetrics);
for (InputRow row : inputRows) {
int rv = incIndexFilteredAgg.add(row);
blackhole.consume(rv);
}
}
use of io.druid.data.input.InputRow in project druid by druid-io.
the class TopNTypeInterfaceBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
executorService = Execs.multiThreaded(numSegments, "TopNThreadPool");
setupQueries();
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get("basic");
queryBuilder = SCHEMA_QUERY_MAP.get("basic").get("string");
queryBuilder.threshold(threshold);
stringQuery = queryBuilder.build();
TopNQueryBuilder longBuilder = SCHEMA_QUERY_MAP.get("basic").get("long");
longBuilder.threshold(threshold);
longQuery = longBuilder.build();
TopNQueryBuilder floatBuilder = SCHEMA_QUERY_MAP.get("basic").get("float");
floatBuilder.threshold(threshold);
floatQuery = floatBuilder.build();
incIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
log.info("Generating rows for segment " + i);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + i, schemaInfo.getDataInterval(), rowsPerSegment);
IncrementalIndex incIndex = makeIncIndex();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
}
incIndexes.add(incIndex);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(incIndexes.get(i), tmpFile, new IndexSpec());
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
qIndexes.add(qIndex);
}
factory = new TopNQueryRunnerFactory(new StupidPool<>("TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE), new TopNQueryQueryToolChest(new TopNQueryConfig(), QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of io.druid.data.input.InputRow in project druid by druid-io.
the class KafkaIndexTask method run.
@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
log.info("Starting up!");
startTime = DateTime.now();
mapper = toolbox.getObjectMapper();
status = Status.STARTING;
if (chatHandlerProvider.isPresent()) {
log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
chatHandlerProvider.get().register(getId(), this, false);
} else {
log.warn("No chat handler detected");
}
runThread = Thread.currentThread();
// Set up FireDepartmentMetrics
final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null);
fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
toolbox.getMonitorScheduler().addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));
try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
final FiniteAppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics);
final KafkaConsumer<byte[], byte[]> consumer = newConsumer()) {
appenderator = appenderator0;
final String topic = ioConfig.getStartPartitions().getTopic();
// Start up, set up initial offsets.
final Object restoredMetadata = driver.startJob();
if (restoredMetadata == null) {
nextOffsets.putAll(ioConfig.getStartPartitions().getPartitionOffsetMap());
} else {
final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
final KafkaPartitions restoredNextPartitions = toolbox.getObjectMapper().convertValue(restoredMetadataMap.get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
nextOffsets.putAll(restoredNextPartitions.getPartitionOffsetMap());
// Sanity checks.
if (!restoredNextPartitions.getTopic().equals(ioConfig.getStartPartitions().getTopic())) {
throw new ISE("WTF?! Restored topic[%s] but expected topic[%s]", restoredNextPartitions.getTopic(), ioConfig.getStartPartitions().getTopic());
}
if (!nextOffsets.keySet().equals(ioConfig.getStartPartitions().getPartitionOffsetMap().keySet())) {
throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets.keySet(), ioConfig.getStartPartitions().getPartitionOffsetMap().keySet());
}
}
// Set up sequenceNames.
final Map<Integer, String> sequenceNames = Maps.newHashMap();
for (Integer partitionNum : nextOffsets.keySet()) {
sequenceNames.put(partitionNum, String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
}
// Set up committer.
final Supplier<Committer> committerSupplier = new Supplier<Committer>() {
@Override
public Committer get() {
final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);
return new Committer() {
@Override
public Object getMetadata() {
return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), snapshot));
}
@Override
public void run() {
// Do nothing.
}
};
}
};
Set<Integer> assignment = assignPartitionsAndSeekToNext(consumer, topic);
// Main loop.
// Could eventually support leader/follower mode (for keeping replicas more in sync)
boolean stillReading = !assignment.isEmpty();
status = Status.READING;
try {
while (stillReading) {
if (possiblyPause(assignment)) {
// The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
// partitions upon resuming. This is safe even if the end offsets have not been modified.
assignment = assignPartitionsAndSeekToNext(consumer, topic);
if (assignment.isEmpty()) {
log.info("All partitions have been fully read");
publishOnStop = true;
stopRequested = true;
}
}
if (stopRequested) {
break;
}
// The retrying business is because the KafkaConsumer throws OffsetOutOfRangeException if the seeked-to
// offset is not present in the topic-partition. This can happen if we're asking a task to read from data
// that has not been written yet (which is totally legitimate). So let's wait for it to show up.
ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
try {
records = consumer.poll(POLL_TIMEOUT);
} catch (OffsetOutOfRangeException e) {
log.warn("OffsetOutOfRangeException with message [%s]", e.getMessage());
possiblyResetOffsetsOrWait(e.offsetOutOfRangePartitions(), consumer, toolbox);
stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
}
for (ConsumerRecord<byte[], byte[]> record : records) {
if (log.isTraceEnabled()) {
log.trace("Got topic[%s] partition[%d] offset[%,d].", record.topic(), record.partition(), record.offset());
}
if (record.offset() < endOffsets.get(record.partition())) {
if (record.offset() != nextOffsets.get(record.partition())) {
throw new ISE("WTF?! Got offset[%,d] after offset[%,d] in partition[%d].", record.offset(), nextOffsets.get(record.partition()), record.partition());
}
try {
final byte[] valueBytes = record.value();
if (valueBytes == null) {
throw new ParseException("null value");
}
final InputRow row = Preconditions.checkNotNull(parser.parse(ByteBuffer.wrap(valueBytes)), "row");
if (!ioConfig.getMinimumMessageTime().isPresent() || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {
final SegmentIdentifier identifier = driver.add(row, sequenceNames.get(record.partition()), committerSupplier);
if (identifier == null) {
// If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
}
fireDepartmentMetrics.incrementProcessed();
} else {
fireDepartmentMetrics.incrementThrownAway();
}
} catch (ParseException e) {
if (tuningConfig.isReportParseExceptions()) {
throw e;
} else {
log.debug(e, "Dropping unparseable row from partition[%d] offset[%,d].", record.partition(), record.offset());
fireDepartmentMetrics.incrementUnparseable();
}
}
nextOffsets.put(record.partition(), record.offset() + 1);
}
if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition())) && assignment.remove(record.partition())) {
log.info("Finished reading topic[%s], partition[%,d].", record.topic(), record.partition());
assignPartitions(consumer, topic, assignment);
stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
}
}
}
} finally {
// persist pending data
driver.persist(committerSupplier.get());
}
synchronized (statusLock) {
if (stopRequested && !publishOnStop) {
throw new InterruptedException("Stopping without publishing");
}
status = Status.PUBLISHING;
}
final TransactionalSegmentPublisher publisher = new TransactionalSegmentPublisher() {
@Override
public boolean publishSegments(Set<DataSegment> segments, Object commitMetadata) throws IOException {
final KafkaPartitions finalPartitions = toolbox.getObjectMapper().convertValue(((Map) commitMetadata).get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
// Sanity check, we should only be publishing things that match our desired end state.
if (!endOffsets.equals(finalPartitions.getPartitionOffsetMap())) {
throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
}
final SegmentTransactionalInsertAction action;
if (ioConfig.isUseTransaction()) {
action = new SegmentTransactionalInsertAction(segments, new KafkaDataSourceMetadata(ioConfig.getStartPartitions()), new KafkaDataSourceMetadata(finalPartitions));
} else {
action = new SegmentTransactionalInsertAction(segments, null, null);
}
log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());
return toolbox.getTaskActionClient().submit(action).isSuccess();
}
};
final SegmentsAndMetadata published = driver.finish(publisher, committerSupplier.get());
if (published == null) {
throw new ISE("Transaction failure publishing segments, aborting");
} else {
log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ").join(Iterables.transform(published.getSegments(), new Function<DataSegment, String>() {
@Override
public String apply(DataSegment input) {
return input.getIdentifier();
}
})), published.getCommitMetadata());
}
} catch (InterruptedException | RejectedExecutionException e) {
// handle the InterruptedException that gets wrapped in a RejectedExecutionException
if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
throw e;
}
// if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
if (!stopRequested) {
Thread.currentThread().interrupt();
throw e;
}
log.info("The task was asked to stop before completing");
} finally {
if (chatHandlerProvider.isPresent()) {
chatHandlerProvider.get().unregister(getId());
}
}
return success();
}
use of io.druid.data.input.InputRow in project druid by druid-io.
the class BaseFilterTest method selectColumnValuesMatchingFilterUsingRowBasedColumnSelectorFactory.
private List<String> selectColumnValuesMatchingFilterUsingRowBasedColumnSelectorFactory(final DimFilter filter, final String selectColumn) {
// Generate rowType
final Map<String, ValueType> rowSignature = Maps.newHashMap();
for (String columnName : Iterables.concat(adapter.getAvailableDimensions(), adapter.getAvailableMetrics())) {
rowSignature.put(columnName, adapter.getColumnCapabilities(columnName).getType());
}
// Perform test
final SettableSupplier<InputRow> rowSupplier = new SettableSupplier<>();
final ValueMatcher matcher = makeFilter(filter).makeMatcher(VIRTUAL_COLUMNS.wrap(RowBasedColumnSelectorFactory.create(rowSupplier, rowSignature)));
final List<String> values = Lists.newArrayList();
for (InputRow row : rows) {
rowSupplier.set(row);
if (matcher.matches()) {
values.add((String) row.getRaw(selectColumn));
}
}
return values;
}
Aggregations