use of com.palantir.nexus.db.sql.AgnosticLightResultRow in project atlasdb by palantir.
the class DbKvs method getColumnCountsUnordered.
private Map<Sha256Hash, Integer> getColumnCountsUnordered(TableReference tableRef, List<byte[]> rowList, ColumnRangeSelection columnRangeSelection, long timestamp) {
return runRead(tableRef, dbReadTable -> {
Map<Sha256Hash, Integer> counts = new HashMap<>(rowList.size());
try (ClosableIterator<AgnosticLightResultRow> iter = dbReadTable.getRowsColumnRangeCounts(rowList, timestamp, columnRangeSelection)) {
while (iter.hasNext()) {
AgnosticLightResultRow row = iter.next();
Sha256Hash rowHash = Sha256Hash.computeHash(row.getBytes(ROW));
counts.put(rowHash, row.getInteger("column_count"));
}
}
return counts;
});
}
use of com.palantir.nexus.db.sql.AgnosticLightResultRow in project atlasdb by palantir.
the class DbKvs method getTimestampsPageInternal.
private TokenBackedBasicResultsPage<RowResult<Set<Long>>, Token> getTimestampsPageInternal(DbReadTable table, RangeRequest range, long timestamp, long batchSize, Token token) {
Set<byte[]> rows = Sets.newHashSet();
int maxRows = getMaxRowsFromBatchHint(range.getBatchHint());
try (ClosableIterator<AgnosticLightResultRow> rangeResults = table.getRange(range, timestamp, maxRows)) {
while (rows.size() < maxRows && rangeResults.hasNext()) {
byte[] rowName = rangeResults.next().getBytes(ROW);
if (rowName != null) {
rows.add(rowName);
}
}
if (rows.isEmpty()) {
return SimpleTokenBackedResultsPage.create(null, ImmutableList.<RowResult<Set<Long>>>of(), false);
}
}
ColumnSelection cols = range.getColumnNames().isEmpty() ? ColumnSelection.all() : ColumnSelection.create(range.getColumnNames());
TimestampsByCellResultWithToken result = getTimestampsByCell(table, rows, cols, timestamp, batchSize, range.isReverse(), token);
NavigableMap<byte[], SortedMap<byte[], Set<Long>>> cellsByRow = Cells.breakCellsUpByRow(Multimaps.asMap(result.entries));
if (range.isReverse()) {
cellsByRow = cellsByRow.descendingMap();
}
List<RowResult<Set<Long>>> finalResults = cellsByRow.entrySet().stream().map(entry -> RowResult.create(entry.getKey(), entry.getValue())).collect(Collectors.toList());
return SimpleTokenBackedResultsPage.create(result.getToken(), finalResults, result.mayHaveMoreResults());
}
use of com.palantir.nexus.db.sql.AgnosticLightResultRow in project atlasdb by palantir.
the class DbKvs method doGetLatestTimestamps.
private static Map<Cell, Long> doGetLatestTimestamps(DbReadTable table, Map<Cell, Long> timestampByCell) {
try (ClosableIterator<AgnosticLightResultRow> iter = table.getLatestCells(timestampByCell, false)) {
Map<Cell, Long> results = Maps.newHashMap();
while (iter.hasNext()) {
AgnosticLightResultRow row = iter.next();
Cell cell = Cell.create(row.getBytes(ROW), row.getBytes(COL));
long ts = row.getLong(TIMESTAMP);
Long oldTs = results.put(cell, ts);
if (oldTs != null && oldTs > ts) {
results.put(cell, oldTs);
}
}
return results;
}
}
use of com.palantir.nexus.db.sql.AgnosticLightResultRow in project atlasdb by palantir.
the class TimestampsByCellResultWithToken method moveForward.
/**
* @param oldToken token from previous page, specifying if we have already processed some entries from the current
* row and should therefore skip them. If oldToken.shouldSkip() is true, we iterate until the end or the first
* result that is either:
* 1. In another row
* 2. In a greater column
*/
private TimestampsByCellResultWithToken moveForward(Token oldToken) {
boolean skipping = oldToken.shouldSkip();
while (skipping && iterator.hasNext()) {
AgnosticLightResultRow nextResult = iterator.peek();
if (finishedSkipping(oldToken, nextResult)) {
skipping = false;
} else {
iterator.next();
}
}
entries.putAll(rowBuffer);
return this;
}
use of com.palantir.nexus.db.sql.AgnosticLightResultRow in project atlasdb by palantir.
the class OracleOverflowValueLoader method loadOverflowValues.
@Override
public Map<Long, byte[]> loadOverflowValues(ConnectionSupplier conns, TableReference tableRef, Collection<Long> overflowIds) {
if (overflowIds.isEmpty()) {
return Collections.emptyMap();
} else {
Map<Long, byte[]> ret = Maps.newHashMapWithExpectedSize(overflowIds.size());
for (FullQuery query : getOverflowQueries(conns, tableRef, overflowIds)) {
try (ClosableIterator<AgnosticLightResultRow> overflowIter = select(conns, query)) {
while (overflowIter.hasNext()) {
AgnosticLightResultRow row = overflowIter.next();
// QA-94468 LONG RAW typed columns ("val" in this case) must be retrieved first from the result
// set. See https://docs.oracle.com/cd/B19306_01/java.102/b14355/jstreams.htm#i1007581
byte[] val = row.getBytes("val");
long id = row.getLong("id");
ret.put(id, val);
}
}
}
return ret;
}
}
Aggregations