use of com.palantir.atlasdb.keyvalue.impl.LocalRowColumnRangeIterator in project atlasdb by palantir.
the class DbKvs method getRowsColumnRange.
@Override
public Map<byte[], RowColumnRangeIterator> getRowsColumnRange(TableReference tableRef, Iterable<byte[]> rows, BatchColumnRangeSelection batchColumnRangeSelection, long timestamp) {
List<byte[]> rowList = ImmutableList.copyOf(rows);
Map<byte[], List<Map.Entry<Cell, Value>>> firstPage = getFirstRowsColumnRangePage(tableRef, rowList, batchColumnRangeSelection, timestamp);
Map<byte[], RowColumnRangeIterator> ret = Maps.newHashMapWithExpectedSize(rowList.size());
for (Entry<byte[], List<Map.Entry<Cell, Value>>> e : firstPage.entrySet()) {
List<Map.Entry<Cell, Value>> results = e.getValue();
if (results.isEmpty()) {
ret.put(e.getKey(), new LocalRowColumnRangeIterator(e.getValue().iterator()));
continue;
}
byte[] lastCol = results.get(results.size() - 1).getKey().getColumnName();
RowColumnRangeIterator firstPageIter = new LocalRowColumnRangeIterator(e.getValue().iterator());
if (isEndOfColumnRange(lastCol, batchColumnRangeSelection.getEndCol())) {
ret.put(e.getKey(), firstPageIter);
} else {
byte[] nextCol = RangeRequests.nextLexicographicName(lastCol);
BatchColumnRangeSelection nextColumnRangeSelection = BatchColumnRangeSelection.create(nextCol, batchColumnRangeSelection.getEndCol(), batchColumnRangeSelection.getBatchHint());
Iterator<Map.Entry<Cell, Value>> nextPagesIter = getRowColumnRange(tableRef, e.getKey(), nextColumnRangeSelection, timestamp);
ret.put(e.getKey(), new LocalRowColumnRangeIterator(Iterators.concat(firstPageIter, nextPagesIter)));
}
}
return ret;
}
use of com.palantir.atlasdb.keyvalue.impl.LocalRowColumnRangeIterator in project atlasdb by palantir.
the class CassandraKeyValueServiceImpl method getRowsColumnRangeIteratorForSingleHost.
private Map<byte[], RowColumnRangeIterator> getRowsColumnRangeIteratorForSingleHost(InetSocketAddress host, TableReference tableRef, List<byte[]> rows, BatchColumnRangeSelection batchColumnRangeSelection, long startTs) {
try {
RowColumnRangeExtractor.RowColumnRangeResult firstPage = getRowsColumnRangeForSingleHost(host, tableRef, rows, batchColumnRangeSelection, startTs);
Map<byte[], LinkedHashMap<Cell, Value>> results = firstPage.getResults();
Map<byte[], Column> rowsToLastCompositeColumns = firstPage.getRowsToLastCompositeColumns();
Map<byte[], byte[]> incompleteRowsToNextColumns = Maps.newHashMap();
for (Entry<byte[], Column> e : rowsToLastCompositeColumns.entrySet()) {
byte[] row = e.getKey();
byte[] col = CassandraKeyValueServices.decomposeName(e.getValue()).getLhSide();
// If we read a version of the cell before our start timestamp, it will be the most recent version
// readable to us and we can continue to the next column. Otherwise we have to continue reading
// this column.
Map<Cell, Value> rowResult = results.get(row);
boolean completedCell = (rowResult != null) && rowResult.containsKey(Cell.create(row, col));
boolean endOfRange = isEndOfColumnRange(completedCell, col, firstPage.getRowsToRawColumnCount().get(row), batchColumnRangeSelection);
if (!endOfRange) {
byte[] nextCol = getNextColumnRangeColumn(completedCell, col);
incompleteRowsToNextColumns.put(row, nextCol);
}
}
Map<byte[], RowColumnRangeIterator> ret = Maps.newHashMapWithExpectedSize(rows.size());
for (byte[] row : rowsToLastCompositeColumns.keySet()) {
Iterator<Entry<Cell, Value>> resultIterator;
Map<Cell, Value> result = results.get(row);
if (result != null) {
resultIterator = result.entrySet().iterator();
} else {
resultIterator = Collections.emptyIterator();
}
byte[] nextCol = incompleteRowsToNextColumns.get(row);
if (nextCol == null) {
ret.put(row, new LocalRowColumnRangeIterator(resultIterator));
} else {
BatchColumnRangeSelection newColumnRange = BatchColumnRangeSelection.create(nextCol, batchColumnRangeSelection.getEndCol(), batchColumnRangeSelection.getBatchHint());
ret.put(row, new LocalRowColumnRangeIterator(Iterators.concat(resultIterator, getRowColumnRange(host, tableRef, row, newColumnRange, startTs))));
}
}
// We saw no Cassandra results at all for these rows, so the entire column range is empty for these rows.
for (byte[] row : firstPage.getEmptyRows()) {
ret.put(row, new LocalRowColumnRangeIterator(Collections.emptyIterator()));
}
return ret;
} catch (Exception e) {
throw QosAwareThrowables.unwrapAndThrowRateLimitExceededOrAtlasDbDependencyException(e);
}
}
use of com.palantir.atlasdb.keyvalue.impl.LocalRowColumnRangeIterator in project atlasdb by palantir.
the class SnapshotTransaction method partitionByRow.
/**
* Partitions a {@link RowColumnRangeIterator} into contiguous blocks that share the same row name.
* {@link KeyValueService#getRowsColumnRange(TableReference, Iterable, ColumnRangeSelection, int, long)} guarantees
* that all columns for a single row are adjacent, so this method will return an {@link Iterator} with exactly one
* entry per non-empty row.
*/
private Iterator<Map.Entry<byte[], RowColumnRangeIterator>> partitionByRow(RowColumnRangeIterator rawResults) {
PeekingIterator<Map.Entry<Cell, Value>> peekableRawResults = Iterators.peekingIterator(rawResults);
return new AbstractIterator<Map.Entry<byte[], RowColumnRangeIterator>>() {
byte[] prevRowName;
@Override
protected Map.Entry<byte[], RowColumnRangeIterator> computeNext() {
finishConsumingPreviousRow(peekableRawResults);
if (!peekableRawResults.hasNext()) {
return endOfData();
}
byte[] nextRowName = peekableRawResults.peek().getKey().getRowName();
Iterator<Map.Entry<Cell, Value>> columnsIterator = new AbstractIterator<Map.Entry<Cell, Value>>() {
@Override
protected Map.Entry<Cell, Value> computeNext() {
if (!peekableRawResults.hasNext() || !Arrays.equals(peekableRawResults.peek().getKey().getRowName(), nextRowName)) {
return endOfData();
}
return peekableRawResults.next();
}
};
prevRowName = nextRowName;
return Maps.immutableEntry(nextRowName, new LocalRowColumnRangeIterator(columnsIterator));
}
private void finishConsumingPreviousRow(PeekingIterator<Map.Entry<Cell, Value>> iter) {
int numConsumed = 0;
while (iter.hasNext() && Arrays.equals(iter.peek().getKey().getRowName(), prevRowName)) {
iter.next();
numConsumed++;
}
if (numConsumed > 0) {
log.warn("Not all columns for row {} were read. {} columns were discarded.", UnsafeArg.of("row", Arrays.toString(prevRowName)), SafeArg.of("numColumnsDiscarded", numConsumed));
}
}
};
}
Aggregations