use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class InDimFilterTest method testContainsNullWhenValuesSetIsTreeSet.
@Test
public void testContainsNullWhenValuesSetIsTreeSet() {
// Regression test for NullPointerException caused by programmatically-generated InDimFilters that use
// TreeSets with natural comparators. These Sets throw NullPointerException on contains(null).
// InDimFilter wraps these contains methods in null-checking lambdas.
final TreeSet<String> values = new TreeSet<>();
values.add("foo");
values.add("bar");
final InDimFilter filter = new InDimFilter("dim", values, null);
final Map<String, Object> row = new HashMap<>();
row.put("dim", null);
final RowBasedColumnSelectorFactory<MapBasedRow> columnSelectorFactory = RowBasedColumnSelectorFactory.create(RowAdapters.standardRow(), () -> new MapBasedRow(0, row), RowSignature.builder().add("dim", ColumnType.STRING).build(), true);
final ValueMatcher matcher = filter.toFilter().makeMatcher(columnSelectorFactory);
// This would throw an exception without InDimFilter's null-checking lambda wrapping.
Assert.assertFalse(matcher.matches());
row.put("dim", "foo");
// Now it should match.
Assert.assertTrue(matcher.matches());
row.put("dim", "fox");
// Now it *shouldn't* match.
Assert.assertFalse(matcher.matches());
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class TestHelper method assertTimeseriesResultValue.
private static void assertTimeseriesResultValue(String msg, Result expected, Result actual) {
// Custom equals check to get fuzzy comparison of numerics, useful because different groupBy strategies don't
// always generate exactly the same results (different merge ordering / float vs double)
Assert.assertEquals(StringUtils.format("%s: timestamp", msg), expected.getTimestamp(), actual.getTimestamp());
TimeseriesResultValue expectedVal = (TimeseriesResultValue) expected.getValue();
TimeseriesResultValue actualVal = (TimeseriesResultValue) actual.getValue();
final Map<String, Object> expectedMap = expectedVal.getBaseObject();
final Map<String, Object> actualMap = actualVal.getBaseObject();
assertRow(msg, new MapBasedRow(expected.getTimestamp(), expectedMap), new MapBasedRow(actual.getTimestamp(), actualMap));
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class LimitedBufferHashGrouperTest method testIteratorOrderByDim.
@Test
public void testIteratorOrderByDim() {
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
final LimitedBufferHashGrouper<Integer> grouper = makeGrouperWithOrderBy(columnSelectorFactory, "value", OrderByColumnSpec.Direction.ASCENDING);
for (int i = 0; i < NUM_ROWS; i++) {
// limited grouper iterator will always sort by keys in ascending order, even if the heap was sorted by values
// so, we aggregate with keys and values both descending so that the results are not re-ordered by key
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", NUM_ROWS - i + KEY_BASE)));
Assert.assertTrue(String.valueOf(NUM_ROWS - i + KEY_BASE), grouper.aggregate(NUM_ROWS - i + KEY_BASE).isOk());
}
List<Grouper.Entry<Integer>> iterated = Lists.newArrayList(grouper.iterator(true));
Assert.assertEquals(LIMIT, iterated.size());
for (int i = 0; i < LIMIT; i++) {
Assert.assertEquals(KEY_BASE + i + 1L, iterated.get(i).getValues()[0]);
}
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class LimitedBufferHashGrouperTest method testIteratorOrderByAggsDesc.
@Test
public void testIteratorOrderByAggsDesc() {
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
final LimitedBufferHashGrouper<Integer> grouper = makeGrouperWithOrderBy(columnSelectorFactory, "valueSum", OrderByColumnSpec.Direction.DESCENDING);
for (int i = 0; i < NUM_ROWS; i++) {
// limited grouper iterator will always sort by keys in ascending order, even if the heap was sorted by values
// so, we aggregate with keys descending and values asending so that the results are not re-ordered by key
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", i + 1)));
Assert.assertTrue(String.valueOf(NUM_ROWS - i + KEY_BASE), grouper.aggregate(NUM_ROWS - i + KEY_BASE).isOk());
}
List<Grouper.Entry<Integer>> iterated = Lists.newArrayList(grouper.iterator(true));
Assert.assertEquals(LIMIT, iterated.size());
for (int i = 0; i < LIMIT; i++) {
Assert.assertEquals((long) NUM_ROWS - i, iterated.get(i).getValues()[0]);
}
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class LimitedBufferHashGrouperTest method testIteratorOrderByAggs.
@Test
public void testIteratorOrderByAggs() {
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
final LimitedBufferHashGrouper<Integer> grouper = makeGrouperWithOrderBy(columnSelectorFactory, "valueSum", OrderByColumnSpec.Direction.ASCENDING);
for (int i = 0; i < NUM_ROWS; i++) {
// limited grouper iterator will always sort by keys in ascending order, even if the heap was sorted by values
// so, we aggregate with keys and values both descending so that the results are not re-ordered by key
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", NUM_ROWS - i)));
Assert.assertTrue(String.valueOf(NUM_ROWS - i + KEY_BASE), grouper.aggregate(NUM_ROWS - i + KEY_BASE).isOk());
}
List<Grouper.Entry<Integer>> iterated = Lists.newArrayList(grouper.iterator(true));
Assert.assertEquals(LIMIT, iterated.size());
for (int i = 0; i < LIMIT; i++) {
Assert.assertEquals(i + 1L, iterated.get(i).getValues()[0]);
}
}
Aggregations