use of org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList in project phoenix by apache.
the class EncodedColumnQualifierCellsListTest method testIterator.
@Test
public void testIterator() {
EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
Cell[] cells = new Cell[7];
int i = 0;
populateListAndArray(list, cells);
Iterator itr = list.iterator();
assertTrue(itr.hasNext());
// test itr.next()
i = 0;
while (itr.hasNext()) {
assertEquals(cells[i++], itr.next());
}
assertEquals(7, list.size());
// test itr.remove()
itr = list.iterator();
i = 0;
int numRemoved = 0;
try {
itr.remove();
fail("Remove not allowed till next() is called");
} catch (IllegalStateException expected) {
}
while (itr.hasNext()) {
assertEquals(cells[i++], itr.next());
itr.remove();
numRemoved++;
}
assertEquals("Number of elements removed should have been the size of the list", 7, numRemoved);
}
use of org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList in project phoenix by apache.
the class EncodedColumnQualifierCellsListTest method testRemove.
@Test
public void testRemove() throws Exception {
EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
Cell[] cells = new Cell[7];
populateListAndArray(list, cells);
assertTrue(list.remove(cells[0]));
assertEquals(6, list.size());
assertTrue(list.remove(cells[6]));
assertEquals(5, list.size());
assertTrue(list.remove(cells[3]));
assertEquals(4, list.size());
assertFalse(list.remove(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))));
assertEquals(4, list.size());
}
use of org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList in project phoenix by apache.
the class EncodedColumnQualifierCellsListTest method testLastIndexOf.
@Test
public void testLastIndexOf() throws Exception {
EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
Cell[] cells = new Cell[7];
populateListAndArray(list, cells);
for (int i = 0; i < cells.length; i++) {
assertEquals(i, list.lastIndexOf(cells[i]));
}
}
use of org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList in project phoenix by apache.
the class EncodedColumnQualifierCellsListTest method testIsEmpty.
@Test
public void testIsEmpty() throws Exception {
EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
assertTrue(list.isEmpty());
populateList(list);
assertFalse(list.isEmpty());
Iterator itr = list.iterator();
while (itr.hasNext()) {
itr.next();
itr.remove();
if (itr.hasNext()) {
assertFalse(list.isEmpty());
}
}
assertTrue(list.isEmpty());
}
use of org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList in project phoenix by apache.
the class GroupedAggregateRegionObserver method scanUnordered.
/**
* Used for an aggregate query in which the key order does not necessarily match the group by
* key order. In this case, we must collect all distinct groups within a region into a map,
* aggregating as we go.
* @param limit TODO
*/
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, final RegionScanner scanner, final List<Expression> expressions, final ServerAggregators aggregators, long limit) throws IOException {
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
}
RegionCoprocessorEnvironment env = c.getEnvironment();
Configuration conf = env.getConfiguration();
int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES);
byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
if (estDistValsBytes != null) {
// Allocate 1.5x estimation
estDistVals = Math.max(MIN_DISTINCT_VALUES, (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
}
Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
final boolean spillableEnabled = conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
GroupByCache groupByCache = GroupByCacheFactory.INSTANCE.newCache(env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan), aggregators, estDistVals);
boolean success = false;
try {
boolean hasMore;
Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
}
Region region = c.getEnvironment().getRegion();
boolean acquiredLock = false;
try {
region.startRegionOperation();
acquiredLock = true;
synchronized (scanner) {
do {
List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
// Results are potentially returned even when the return
// value of s.next is false
// since this is an indication of whether or not there are
// more values after the
// ones returned
hasMore = scanner.nextRaw(results);
if (!results.isEmpty()) {
result.setKeyValues(results);
ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, expressions);
Aggregator[] rowAggregators = groupByCache.cache(key);
// Aggregate values here
aggregators.aggregate(rowAggregators, result);
}
} while (hasMore && groupByCache.size() < limit);
}
} finally {
if (acquiredLock)
region.closeRegionOperation();
}
RegionScanner regionScanner = groupByCache.getScanner(scanner);
// Do not sort here, but sort back on the client instead
// The reason is that if the scan ever extends beyond a region
// (which can happen if we're basing our parallelization split
// points on old metadata), we'll get incorrect query results.
success = true;
return regionScanner;
} finally {
if (!success) {
Closeables.closeQuietly(groupByCache);
}
}
}
Aggregations