use of org.apache.druid.data.input.impl.LongDimensionSchema in project druid by druid-io.
the class CompactionTaskTest method setupClass.
@BeforeClass
public static void setupClass() {
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-01-01/2017-02-01"), new StringDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-02-01/2017-03-01"), new StringDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-03-01/2017-04-01"), new StringDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-04-01/2017-05-01"), new StringDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-05-01/2017-06-01"), new DoubleDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-06-01/2017-07-01"), new DoubleDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-06-01/2017-06-02"), new DoubleDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-06-15/2017-06-16"), new DoubleDimensionSchema(MIXED_TYPE_COLUMN));
MIXED_TYPE_COLUMN_MAP.put(Intervals.of("2017-06-30/2017-07-01"), new DoubleDimensionSchema(MIXED_TYPE_COLUMN));
DIMENSIONS = new HashMap<>();
AGGREGATORS = new ArrayList<>();
DIMENSIONS.put(ColumnHolder.TIME_COLUMN_NAME, new LongDimensionSchema(ColumnHolder.TIME_COLUMN_NAME));
DIMENSIONS.put(TIMESTAMP_COLUMN, new LongDimensionSchema(TIMESTAMP_COLUMN));
int numUmbrellaIntervals = 6;
for (int i = 0; i < numUmbrellaIntervals; i++) {
final StringDimensionSchema schema = new StringDimensionSchema("string_dim_" + i, null, null);
DIMENSIONS.put(schema.getName(), schema);
}
for (int i = 0; i < numUmbrellaIntervals; i++) {
final LongDimensionSchema schema = new LongDimensionSchema("long_dim_" + i);
DIMENSIONS.put(schema.getName(), schema);
}
for (int i = 0; i < numUmbrellaIntervals; i++) {
final FloatDimensionSchema schema = new FloatDimensionSchema("float_dim_" + i);
DIMENSIONS.put(schema.getName(), schema);
}
for (int i = 0; i < numUmbrellaIntervals; i++) {
final DoubleDimensionSchema schema = new DoubleDimensionSchema("double_dim_" + i);
DIMENSIONS.put(schema.getName(), schema);
}
AGGREGATORS.add(new CountAggregatorFactory("agg_0"));
AGGREGATORS.add(new LongSumAggregatorFactory("agg_1", "long_dim_1"));
AGGREGATORS.add(new LongMaxAggregatorFactory("agg_2", "long_dim_2"));
AGGREGATORS.add(new FloatFirstAggregatorFactory("agg_3", "float_dim_3", null));
AGGREGATORS.add(new DoubleLastAggregatorFactory("agg_4", "double_dim_4", null));
for (int i = 0; i < SEGMENT_INTERVALS.size(); i++) {
SEGMENT_MAP.put(new DataSegment(DATA_SOURCE, SEGMENT_INTERVALS.get(i), "version_" + i, ImmutableMap.of(), findDimensions(i, SEGMENT_INTERVALS.get(i)), AGGREGATORS.stream().map(AggregatorFactory::getName).collect(Collectors.toList()), new NumberedShardSpec(0, 1), 0, SEGMENT_SIZE_BYTES), new File("file_" + i));
}
SEGMENTS = new ArrayList<>(SEGMENT_MAP.keySet());
}
use of org.apache.druid.data.input.impl.LongDimensionSchema in project druid by druid-io.
the class BloomFilterSqlAggregatorTest method createQuerySegmentWalker.
@Override
public SpecificSegmentsQuerySegmentWalker createQuerySegmentWalker() throws IOException {
InputRowParser parser = new MapInputRowParser(new TimeAndDimsParseSpec(new TimestampSpec("t", "iso", null), new DimensionsSpec(ImmutableList.<DimensionSchema>builder().addAll(DimensionsSpec.getDefaultSchemas(ImmutableList.of("dim1", "dim2", "dim3"))).add(new DoubleDimensionSchema("d1")).add(new FloatDimensionSchema("f1")).add(new LongDimensionSchema("l1")).build())));
final QueryableIndex index = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1")).withDimensionsSpec(parser).withRollup(false).build()).rows(CalciteTests.ROWS1_WITH_NUMERIC_DIMS).buildMMappedIndex();
return new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(DATA_SOURCE).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index);
}
use of org.apache.druid.data.input.impl.LongDimensionSchema in project druid by druid-io.
the class DimensionHandlerUtilsTest method testGetHandlerFromLongCapabilities.
@Test
public void testGetHandlerFromLongCapabilities() {
ColumnCapabilities capabilities = ColumnCapabilitiesImpl.createSimpleNumericColumnCapabilities(ColumnType.LONG);
DimensionHandler handler = DimensionHandlerUtils.getHandlerFromCapabilities(DIM_NAME, capabilities, null);
Assert.assertTrue(handler instanceof LongDimensionHandler);
Assert.assertTrue(handler.getDimensionSchema(capabilities) instanceof LongDimensionSchema);
}
use of org.apache.druid.data.input.impl.LongDimensionSchema in project druid by druid-io.
the class KafkaIndexTaskTest method testKafkaRecordEntityInputFormat.
@Test(timeout = 60_000L)
public void testKafkaRecordEntityInputFormat() throws Exception {
// Insert data
insertData(Iterables.limit(records, 3));
final KafkaIndexTask task = createTask(null, new DataSchema("test_ds", new TimestampSpec("timestamp", "iso", null), new DimensionsSpec(Arrays.asList(new StringDimensionSchema("dim1"), new StringDimensionSchema("dim1t"), new StringDimensionSchema("dim2"), new LongDimensionSchema("dimLong"), new FloatDimensionSchema("dimFloat"), new StringDimensionSchema("kafka.topic"), new LongDimensionSchema("kafka.offset"), new StringDimensionSchema("kafka.header.encoding"))), new AggregatorFactory[] { new DoubleSumAggregatorFactory("met1sum", "met1"), new CountAggregatorFactory("rows") }, new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, null), null), new KafkaIndexTaskIOConfig(0, "sequence0", new SeekableStreamStartSequenceNumbers<>(topic, ImmutableMap.of(0, 0L), ImmutableSet.of()), new SeekableStreamEndSequenceNumbers<>(topic, ImmutableMap.of(0, 5L)), kafkaServer.consumerProperties(), KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, true, null, null, new TestKafkaInputFormat(INPUT_FORMAT)));
Assert.assertTrue(task.supportsQueries());
final ListenableFuture<TaskStatus> future = runTask(task);
while (countEvents(task) != 3) {
Thread.sleep(25);
}
Assert.assertEquals(Status.READING, task.getRunner().getStatus());
final QuerySegmentSpec interval = OBJECT_MAPPER.readValue("\"2008/2012\"", QuerySegmentSpec.class);
List<ScanResultValue> scanResultValues = scanData(task, interval);
// verify that there are no records indexed in the rollbacked time period
Assert.assertEquals(3, Iterables.size(scanResultValues));
int i = 0;
for (ScanResultValue result : scanResultValues) {
final Map<String, Object> event = ((List<Map<String, Object>>) result.getEvents()).get(0);
Assert.assertEquals((long) i++, event.get("kafka.offset"));
Assert.assertEquals(topic, event.get("kafka.topic"));
Assert.assertEquals("application/json", event.get("kafka.header.encoding"));
}
// insert remaining data
insertData(Iterables.skip(records, 3));
// Wait for task to exit
Assert.assertEquals(TaskState.SUCCESS, future.get().getStatusCode());
// Check metrics
Assert.assertEquals(4, task.getRunner().getRowIngestionMeters().getProcessed());
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getUnparseable());
Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getThrownAway());
}
Aggregations