use of com.palantir.atlasdb.keyvalue.api.BatchColumnRangeSelection in project atlasdb by palantir.
the class DbKvs method getRowColumnRange.
private Iterator<Map.Entry<Cell, Value>> getRowColumnRange(TableReference tableRef, byte[] row, BatchColumnRangeSelection batchColumnRangeSelection, long timestamp) {
List<byte[]> rowList = ImmutableList.of(row);
return ClosableIterators.wrap(new AbstractPagingIterable<Entry<Cell, Value>, TokenBackedBasicResultsPage<Entry<Cell, Value>, byte[]>>() {
@Override
protected TokenBackedBasicResultsPage<Entry<Cell, Value>, byte[]> getFirstPage() throws Exception {
return page(batchColumnRangeSelection.getStartCol());
}
@Override
protected TokenBackedBasicResultsPage<Map.Entry<Cell, Value>, byte[]> getNextPage(TokenBackedBasicResultsPage<Map.Entry<Cell, Value>, byte[]> previous) throws Exception {
return page(previous.getTokenForNextPage());
}
TokenBackedBasicResultsPage<Map.Entry<Cell, Value>, byte[]> page(byte[] startCol) throws Exception {
BatchColumnRangeSelection range = BatchColumnRangeSelection.create(startCol, batchColumnRangeSelection.getEndCol(), batchColumnRangeSelection.getBatchHint());
List<Map.Entry<Cell, Value>> nextPage = Iterables.getOnlyElement(extractRowColumnRangePage(tableRef, range, timestamp, rowList).values());
if (nextPage.isEmpty()) {
return SimpleTokenBackedResultsPage.create(startCol, ImmutableList.<Entry<Cell, Value>>of(), false);
}
byte[] lastCol = nextPage.get(nextPage.size() - 1).getKey().getColumnName();
if (isEndOfColumnRange(lastCol, batchColumnRangeSelection.getEndCol())) {
return SimpleTokenBackedResultsPage.create(lastCol, nextPage, false);
}
byte[] nextCol = RangeRequests.nextLexicographicName(lastCol);
return SimpleTokenBackedResultsPage.create(nextCol, nextPage, true);
}
}.iterator());
}
use of com.palantir.atlasdb.keyvalue.api.BatchColumnRangeSelection in project atlasdb by palantir.
the class DbKvs method loadColumnsForBatches.
private Iterator<Iterator<Map.Entry<Cell, Value>>> loadColumnsForBatches(TableReference tableRef, ColumnRangeSelection columnRangeSelection, long timestamp, Map<Sha256Hash, byte[]> rowHashesToBytes, Iterator<Map<Sha256Hash, Integer>> batches, Map<Sha256Hash, Integer> columnCountByRowHash) {
Iterator<Iterator<Map.Entry<Cell, Value>>> results = new AbstractIterator<Iterator<Map.Entry<Cell, Value>>>() {
private Sha256Hash lastRowHashInPreviousBatch = null;
private byte[] lastColumnInPreviousBatch = null;
@Override
protected Iterator<Map.Entry<Cell, Value>> computeNext() {
if (!batches.hasNext()) {
return endOfData();
}
Map<Sha256Hash, Integer> currentBatch = batches.next();
RowsColumnRangeBatchRequest columnRangeSelectionsByRow = getBatchColumnRangeSelectionsByRow(currentBatch, columnCountByRowHash);
Map<byte[], List<Map.Entry<Cell, Value>>> resultsByRow = extractRowColumnRangePage(tableRef, columnRangeSelectionsByRow, timestamp);
int totalEntries = resultsByRow.values().stream().mapToInt(List::size).sum();
if (totalEntries == 0) {
return Collections.emptyIterator();
}
// Ensure order matches that of the provided batch.
List<Map.Entry<Cell, Value>> loadedColumns = new ArrayList<>(totalEntries);
for (Sha256Hash rowHash : currentBatch.keySet()) {
byte[] row = rowHashesToBytes.get(rowHash);
loadedColumns.addAll(resultsByRow.get(row));
}
Cell lastCell = Iterables.getLast(loadedColumns).getKey();
lastRowHashInPreviousBatch = Sha256Hash.computeHash(lastCell.getRowName());
lastColumnInPreviousBatch = lastCell.getColumnName();
return loadedColumns.iterator();
}
private RowsColumnRangeBatchRequest getBatchColumnRangeSelectionsByRow(Map<Sha256Hash, Integer> columnCountsByRowHashInBatch, Map<Sha256Hash, Integer> totalColumnCountsByRowHash) {
ImmutableRowsColumnRangeBatchRequest.Builder rowsColumnRangeBatch = ImmutableRowsColumnRangeBatchRequest.builder().columnRangeSelection(columnRangeSelection);
Iterator<Map.Entry<Sha256Hash, Integer>> entries = columnCountsByRowHashInBatch.entrySet().iterator();
while (entries.hasNext()) {
Map.Entry<Sha256Hash, Integer> entry = entries.next();
Sha256Hash rowHash = entry.getKey();
byte[] row = rowHashesToBytes.get(rowHash);
boolean isPartialFirstRow = Objects.equals(lastRowHashInPreviousBatch, rowHash);
if (isPartialFirstRow) {
byte[] startCol = RangeRequests.nextLexicographicName(lastColumnInPreviousBatch);
BatchColumnRangeSelection columnRange = BatchColumnRangeSelection.create(startCol, columnRangeSelection.getEndCol(), entry.getValue());
rowsColumnRangeBatch.partialFirstRow(Maps.immutableEntry(row, columnRange));
continue;
}
boolean isFullyLoadedRow = totalColumnCountsByRowHash.get(rowHash).equals(entry.getValue());
if (isFullyLoadedRow) {
rowsColumnRangeBatch.addRowsToLoadFully(row);
} else {
Preconditions.checkArgument(!entries.hasNext(), "Only the last row should be partial.");
BatchColumnRangeSelection columnRange = BatchColumnRangeSelection.create(columnRangeSelection, entry.getValue());
rowsColumnRangeBatch.partialLastRow(Maps.immutableEntry(row, columnRange));
}
}
return rowsColumnRangeBatch.build();
}
};
return results;
}
use of com.palantir.atlasdb.keyvalue.api.BatchColumnRangeSelection in project atlasdb by palantir.
the class DbKvs method getRowsColumnRange.
@Override
public Map<byte[], RowColumnRangeIterator> getRowsColumnRange(TableReference tableRef, Iterable<byte[]> rows, BatchColumnRangeSelection batchColumnRangeSelection, long timestamp) {
List<byte[]> rowList = ImmutableList.copyOf(rows);
Map<byte[], List<Map.Entry<Cell, Value>>> firstPage = getFirstRowsColumnRangePage(tableRef, rowList, batchColumnRangeSelection, timestamp);
Map<byte[], RowColumnRangeIterator> ret = Maps.newHashMapWithExpectedSize(rowList.size());
for (Entry<byte[], List<Map.Entry<Cell, Value>>> e : firstPage.entrySet()) {
List<Map.Entry<Cell, Value>> results = e.getValue();
if (results.isEmpty()) {
ret.put(e.getKey(), new LocalRowColumnRangeIterator(e.getValue().iterator()));
continue;
}
byte[] lastCol = results.get(results.size() - 1).getKey().getColumnName();
RowColumnRangeIterator firstPageIter = new LocalRowColumnRangeIterator(e.getValue().iterator());
if (isEndOfColumnRange(lastCol, batchColumnRangeSelection.getEndCol())) {
ret.put(e.getKey(), firstPageIter);
} else {
byte[] nextCol = RangeRequests.nextLexicographicName(lastCol);
BatchColumnRangeSelection nextColumnRangeSelection = BatchColumnRangeSelection.create(nextCol, batchColumnRangeSelection.getEndCol(), batchColumnRangeSelection.getBatchHint());
Iterator<Map.Entry<Cell, Value>> nextPagesIter = getRowColumnRange(tableRef, e.getKey(), nextColumnRangeSelection, timestamp);
ret.put(e.getKey(), new LocalRowColumnRangeIterator(Iterators.concat(firstPageIter, nextPagesIter)));
}
}
return ret;
}
use of com.palantir.atlasdb.keyvalue.api.BatchColumnRangeSelection in project atlasdb by palantir.
the class KeyValueServices method mergeGetRowsColumnRangeIntoSingleIterator.
public static RowColumnRangeIterator mergeGetRowsColumnRangeIntoSingleIterator(KeyValueService kvs, TableReference tableRef, Iterable<byte[]> rows, ColumnRangeSelection columnRangeSelection, int batchHint, long timestamp) {
if (Iterables.isEmpty(rows)) {
return new LocalRowColumnRangeIterator(Collections.emptyIterator());
}
int columnBatchSize = batchHint / Iterables.size(rows);
BatchColumnRangeSelection batchColumnRangeSelection = BatchColumnRangeSelection.create(columnRangeSelection, columnBatchSize);
Map<byte[], RowColumnRangeIterator> rowsColumnRanges = kvs.getRowsColumnRange(tableRef, rows, batchColumnRangeSelection, timestamp);
// Return results in the same order as the provided rows.
Iterable<RowColumnRangeIterator> orderedRanges = Iterables.transform(rows, rowsColumnRanges::get);
return new LocalRowColumnRangeIterator(Iterators.concat(orderedRanges.iterator()));
}
use of com.palantir.atlasdb.keyvalue.api.BatchColumnRangeSelection in project atlasdb by palantir.
the class CassandraKeyValueServiceImpl method getRowsColumnRangeIteratorForSingleHost.
private Map<byte[], RowColumnRangeIterator> getRowsColumnRangeIteratorForSingleHost(InetSocketAddress host, TableReference tableRef, List<byte[]> rows, BatchColumnRangeSelection batchColumnRangeSelection, long startTs) {
try {
RowColumnRangeExtractor.RowColumnRangeResult firstPage = getRowsColumnRangeForSingleHost(host, tableRef, rows, batchColumnRangeSelection, startTs);
Map<byte[], LinkedHashMap<Cell, Value>> results = firstPage.getResults();
Map<byte[], Column> rowsToLastCompositeColumns = firstPage.getRowsToLastCompositeColumns();
Map<byte[], byte[]> incompleteRowsToNextColumns = Maps.newHashMap();
for (Entry<byte[], Column> e : rowsToLastCompositeColumns.entrySet()) {
byte[] row = e.getKey();
byte[] col = CassandraKeyValueServices.decomposeName(e.getValue()).getLhSide();
// If we read a version of the cell before our start timestamp, it will be the most recent version
// readable to us and we can continue to the next column. Otherwise we have to continue reading
// this column.
Map<Cell, Value> rowResult = results.get(row);
boolean completedCell = (rowResult != null) && rowResult.containsKey(Cell.create(row, col));
boolean endOfRange = isEndOfColumnRange(completedCell, col, firstPage.getRowsToRawColumnCount().get(row), batchColumnRangeSelection);
if (!endOfRange) {
byte[] nextCol = getNextColumnRangeColumn(completedCell, col);
incompleteRowsToNextColumns.put(row, nextCol);
}
}
Map<byte[], RowColumnRangeIterator> ret = Maps.newHashMapWithExpectedSize(rows.size());
for (byte[] row : rowsToLastCompositeColumns.keySet()) {
Iterator<Entry<Cell, Value>> resultIterator;
Map<Cell, Value> result = results.get(row);
if (result != null) {
resultIterator = result.entrySet().iterator();
} else {
resultIterator = Collections.emptyIterator();
}
byte[] nextCol = incompleteRowsToNextColumns.get(row);
if (nextCol == null) {
ret.put(row, new LocalRowColumnRangeIterator(resultIterator));
} else {
BatchColumnRangeSelection newColumnRange = BatchColumnRangeSelection.create(nextCol, batchColumnRangeSelection.getEndCol(), batchColumnRangeSelection.getBatchHint());
ret.put(row, new LocalRowColumnRangeIterator(Iterators.concat(resultIterator, getRowColumnRange(host, tableRef, row, newColumnRange, startTs))));
}
}
// We saw no Cassandra results at all for these rows, so the entire column range is empty for these rows.
for (byte[] row : firstPage.getEmptyRows()) {
ret.put(row, new LocalRowColumnRangeIterator(Collections.emptyIterator()));
}
return ret;
} catch (Exception e) {
throw QosAwareThrowables.unwrapAndThrowRateLimitExceededOrAtlasDbDependencyException(e);
}
}
Aggregations