use of org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter in project hadoop by apache.
the class FlowScanner method nextInternal.
/**
* This method loops through the cells in a given row of the
* {@link FlowRunTable}. It looks at the tags of each cell to figure out how
* to process the contents. It then calculates the sum or min or max for each
* column or returns the cell as is.
*
* @param cells
* @param scannerContext
* @return true if next row is available for the scanner, false otherwise
* @throws IOException
*/
private boolean nextInternal(List<Cell> cells, ScannerContext scannerContext) throws IOException {
Cell cell = null;
startNext();
// Loop through all the cells in this row
// For min/max/metrics we do need to scan the entire set of cells to get the
// right one
// But with flush/compaction, the number of cells being scanned will go down
// cells are grouped per column qualifier then sorted by cell timestamp
// (latest to oldest) per column qualifier
// So all cells in one qualifier come one after the other before we see the
// next column qualifier
ByteArrayComparator comp = new ByteArrayComparator();
byte[] previousColumnQualifier = Separator.EMPTY_BYTES;
AggregationOperation currentAggOp = null;
SortedSet<Cell> currentColumnCells = new TreeSet<>(KeyValue.COMPARATOR);
Set<String> alreadySeenAggDim = new HashSet<>();
int addedCnt = 0;
long currentTimestamp = System.currentTimeMillis();
ValueConverter converter = null;
int limit = batchSize;
while (limit <= 0 || addedCnt < limit) {
cell = peekAtNextCell(scannerContext);
if (cell == null) {
break;
}
byte[] currentColumnQualifier = CellUtil.cloneQualifier(cell);
if (previousColumnQualifier == null) {
// first time in loop
previousColumnQualifier = currentColumnQualifier;
}
converter = getValueConverter(currentColumnQualifier);
if (comp.compare(previousColumnQualifier, currentColumnQualifier) != 0) {
addedCnt += emitCells(cells, currentColumnCells, currentAggOp, converter, currentTimestamp);
resetState(currentColumnCells, alreadySeenAggDim);
previousColumnQualifier = currentColumnQualifier;
currentAggOp = getCurrentAggOp(cell);
converter = getValueConverter(currentColumnQualifier);
}
collectCells(currentColumnCells, currentAggOp, cell, alreadySeenAggDim, converter, scannerContext);
nextCell(scannerContext);
}
if ((!currentColumnCells.isEmpty()) && ((limit <= 0 || addedCnt < limit))) {
addedCnt += emitCells(cells, currentColumnCells, currentAggOp, converter, currentTimestamp);
if (LOG.isDebugEnabled()) {
if (addedCnt > 0) {
LOG.debug("emitted cells. " + addedCnt + " for " + this.action + " rowKey=" + FlowRunRowKey.parseRowKey(CellUtil.cloneRow(cells.get(0))));
} else {
LOG.debug("emitted no cells for " + this.action);
}
}
}
return hasMore();
}
Aggregations