use of org.apache.hadoop.hive.accumulo.predicate.AccumuloPredicateHandler in project hive by apache.
the class TestHiveAccumuloTableInputFormat method testIteratorNotInSplitsCompensation.
@Test
public void testIteratorNotInSplitsCompensation() throws Exception {
FileInputFormat.addInputPath(conf, new Path("unused"));
InputSplit[] splits = inputformat.getSplits(conf, 0);
assertEquals(1, splits.length);
InputSplit split = splits[0];
IteratorSetting is = new IteratorSetting(1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, PrimitiveComparisonFilter.class);
is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName());
is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
is.addOption(PrimitiveComparisonFilter.CONST_VAL, new String(Base64.encodeBase64(new byte[] { '0' })));
is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:cq");
// Mock out the predicate handler because it's just easier
AccumuloPredicateHandler predicateHandler = Mockito.mock(AccumuloPredicateHandler.class);
Mockito.when(predicateHandler.getIterators(Mockito.any(JobConf.class), Mockito.any(ColumnMapper.class))).thenReturn(Arrays.asList(is));
// Set it on our inputformat
inputformat.predicateHandler = predicateHandler;
inputformat.getRecordReader(split, conf, null);
// The code should account for the bug and update the iterators on the split
List<IteratorSetting> settingsOnSplit = ((HiveAccumuloSplit) split).getSplit().getIterators();
assertEquals(1, settingsOnSplit.size());
assertEquals(is, settingsOnSplit.get(0));
}
Aggregations