use of com.palantir.util.crypto.Sha256Hash in project atlasdb by palantir.
the class DbKvs method extractRowColumnRangePageInternal.
private Map<byte[], List<Map.Entry<Cell, Value>>> extractRowColumnRangePageInternal(DbReadTable table, TableReference tableRef, Supplier<ClosableIterator<AgnosticLightResultRow>> rowLoader, Collection<byte[]> allRows) {
Map<Sha256Hash, byte[]> hashesToBytes = Maps.newHashMapWithExpectedSize(allRows.size());
Map<Sha256Hash, List<Cell>> cellsByRow = Maps.newHashMap();
for (byte[] row : allRows) {
Sha256Hash rowHash = Sha256Hash.computeHash(row);
hashesToBytes.put(rowHash, row);
cellsByRow.put(rowHash, Lists.newArrayList());
}
boolean hasOverflow = table.hasOverflowValues();
Map<Cell, Value> values = Maps.newHashMap();
Map<Cell, OverflowValue> overflowValues = Maps.newHashMap();
try (ClosableIterator<AgnosticLightResultRow> iter = rowLoader.get()) {
while (iter.hasNext()) {
AgnosticLightResultRow row = iter.next();
Cell cell = Cell.create(row.getBytes(ROW), row.getBytes(COL));
Sha256Hash rowHash = Sha256Hash.computeHash(cell.getRowName());
cellsByRow.get(rowHash).add(cell);
Long overflowId = hasOverflow ? row.getLongObject("overflow") : null;
if (overflowId == null) {
Value value = Value.create(row.getBytes(VAL), row.getLong(TIMESTAMP));
Value oldValue = values.put(cell, value);
if (oldValue != null && oldValue.getTimestamp() > value.getTimestamp()) {
values.put(cell, oldValue);
}
} else {
OverflowValue ov = ImmutableOverflowValue.of(row.getLong(TIMESTAMP), overflowId);
OverflowValue oldOv = overflowValues.put(cell, ov);
if (oldOv != null && oldOv.ts() > ov.ts()) {
overflowValues.put(cell, oldOv);
}
}
}
}
fillOverflowValues(table.getConnectionSupplier(), tableRef, overflowValues, values);
Map<byte[], List<Map.Entry<Cell, Value>>> results = Maps.newHashMapWithExpectedSize(allRows.size());
for (Entry<Sha256Hash, List<Cell>> e : cellsByRow.entrySet()) {
List<Map.Entry<Cell, Value>> fullResults = Lists.newArrayListWithExpectedSize(e.getValue().size());
for (Cell c : e.getValue()) {
fullResults.add(Iterables.getOnlyElement(ImmutableMap.of(c, values.get(c)).entrySet()));
}
results.put(hashesToBytes.get(e.getKey()), fullResults);
}
return results;
}
use of com.palantir.util.crypto.Sha256Hash in project atlasdb by palantir.
the class DbKvs method loadColumnsForBatches.
private Iterator<Iterator<Map.Entry<Cell, Value>>> loadColumnsForBatches(TableReference tableRef, ColumnRangeSelection columnRangeSelection, long timestamp, Map<Sha256Hash, byte[]> rowHashesToBytes, Iterator<Map<Sha256Hash, Integer>> batches, Map<Sha256Hash, Integer> columnCountByRowHash) {
Iterator<Iterator<Map.Entry<Cell, Value>>> results = new AbstractIterator<Iterator<Map.Entry<Cell, Value>>>() {
private Sha256Hash lastRowHashInPreviousBatch = null;
private byte[] lastColumnInPreviousBatch = null;
@Override
protected Iterator<Map.Entry<Cell, Value>> computeNext() {
if (!batches.hasNext()) {
return endOfData();
}
Map<Sha256Hash, Integer> currentBatch = batches.next();
RowsColumnRangeBatchRequest columnRangeSelectionsByRow = getBatchColumnRangeSelectionsByRow(currentBatch, columnCountByRowHash);
Map<byte[], List<Map.Entry<Cell, Value>>> resultsByRow = extractRowColumnRangePage(tableRef, columnRangeSelectionsByRow, timestamp);
int totalEntries = resultsByRow.values().stream().mapToInt(List::size).sum();
if (totalEntries == 0) {
return Collections.emptyIterator();
}
// Ensure order matches that of the provided batch.
List<Map.Entry<Cell, Value>> loadedColumns = new ArrayList<>(totalEntries);
for (Sha256Hash rowHash : currentBatch.keySet()) {
byte[] row = rowHashesToBytes.get(rowHash);
loadedColumns.addAll(resultsByRow.get(row));
}
Cell lastCell = Iterables.getLast(loadedColumns).getKey();
lastRowHashInPreviousBatch = Sha256Hash.computeHash(lastCell.getRowName());
lastColumnInPreviousBatch = lastCell.getColumnName();
return loadedColumns.iterator();
}
private RowsColumnRangeBatchRequest getBatchColumnRangeSelectionsByRow(Map<Sha256Hash, Integer> columnCountsByRowHashInBatch, Map<Sha256Hash, Integer> totalColumnCountsByRowHash) {
ImmutableRowsColumnRangeBatchRequest.Builder rowsColumnRangeBatch = ImmutableRowsColumnRangeBatchRequest.builder().columnRangeSelection(columnRangeSelection);
Iterator<Map.Entry<Sha256Hash, Integer>> entries = columnCountsByRowHashInBatch.entrySet().iterator();
while (entries.hasNext()) {
Map.Entry<Sha256Hash, Integer> entry = entries.next();
Sha256Hash rowHash = entry.getKey();
byte[] row = rowHashesToBytes.get(rowHash);
boolean isPartialFirstRow = Objects.equals(lastRowHashInPreviousBatch, rowHash);
if (isPartialFirstRow) {
byte[] startCol = RangeRequests.nextLexicographicName(lastColumnInPreviousBatch);
BatchColumnRangeSelection columnRange = BatchColumnRangeSelection.create(startCol, columnRangeSelection.getEndCol(), entry.getValue());
rowsColumnRangeBatch.partialFirstRow(Maps.immutableEntry(row, columnRange));
continue;
}
boolean isFullyLoadedRow = totalColumnCountsByRowHash.get(rowHash).equals(entry.getValue());
if (isFullyLoadedRow) {
rowsColumnRangeBatch.addRowsToLoadFully(row);
} else {
Preconditions.checkArgument(!entries.hasNext(), "Only the last row should be partial.");
BatchColumnRangeSelection columnRange = BatchColumnRangeSelection.create(columnRangeSelection, entry.getValue());
rowsColumnRangeBatch.partialLastRow(Maps.immutableEntry(row, columnRange));
}
}
return rowsColumnRangeBatch.build();
}
};
return results;
}
use of com.palantir.util.crypto.Sha256Hash in project atlasdb by palantir.
the class HotspottyDataStreamStore method putHashIndexTask.
private void putHashIndexTask(Transaction t, Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToMetadata) {
Multimap<HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxRow, HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxColumnValue> indexMap = HashMultimap.create();
for (Entry<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> e : rowsToMetadata.entrySet()) {
HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow row = e.getKey();
StreamMetadata metadata = e.getValue();
Preconditions.checkArgument(metadata.getStatus() == Status.STORED, "Should only index successfully stored streams.");
Sha256Hash hash = Sha256Hash.EMPTY;
if (metadata.getHash() != com.google.protobuf.ByteString.EMPTY) {
hash = new Sha256Hash(metadata.getHash().toByteArray());
}
HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxRow hashRow = HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxRow.of(hash);
HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxColumn column = HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxColumn.of(row.getId());
HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxColumnValue columnValue = HotspottyDataStreamHashAidxTable.HotspottyDataStreamHashAidxColumnValue.of(column, 0L);
indexMap.put(hashRow, columnValue);
}
HotspottyDataStreamHashAidxTable hiTable = tables.getHotspottyDataStreamHashAidxTable(t);
hiTable.put(indexMap);
}
use of com.palantir.util.crypto.Sha256Hash in project atlasdb by palantir.
the class DataStreamStore method deleteStreams.
/**
* This should only be used from the cleanup tasks.
*/
void deleteStreams(Transaction t, final Set<Long> streamIds) {
if (streamIds.isEmpty()) {
return;
}
Set<DataStreamMetadataTable.DataStreamMetadataRow> smRows = Sets.newHashSet();
Multimap<DataStreamHashAidxTable.DataStreamHashAidxRow, DataStreamHashAidxTable.DataStreamHashAidxColumn> shToDelete = HashMultimap.create();
for (Long streamId : streamIds) {
smRows.add(DataStreamMetadataTable.DataStreamMetadataRow.of(streamId));
}
DataStreamMetadataTable table = tables.getDataStreamMetadataTable(t);
Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> metadatas = table.getMetadatas(smRows);
Set<DataStreamValueTable.DataStreamValueRow> streamValueToDelete = Sets.newHashSet();
for (Entry<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> e : metadatas.entrySet()) {
Long streamId = e.getKey().getId();
long blocks = getNumberOfBlocksFromMetadata(e.getValue());
for (long i = 0; i < blocks; i++) {
streamValueToDelete.add(DataStreamValueTable.DataStreamValueRow.of(streamId, i));
}
ByteString streamHash = e.getValue().getHash();
Sha256Hash hash = Sha256Hash.EMPTY;
if (streamHash != com.google.protobuf.ByteString.EMPTY) {
hash = new Sha256Hash(streamHash.toByteArray());
} else {
log.error("Empty hash for stream {}", streamId);
}
DataStreamHashAidxTable.DataStreamHashAidxRow hashRow = DataStreamHashAidxTable.DataStreamHashAidxRow.of(hash);
DataStreamHashAidxTable.DataStreamHashAidxColumn column = DataStreamHashAidxTable.DataStreamHashAidxColumn.of(streamId);
shToDelete.put(hashRow, column);
}
tables.getDataStreamHashAidxTable(t).delete(shToDelete);
tables.getDataStreamValueTable(t).delete(streamValueToDelete);
table.delete(smRows);
}
use of com.palantir.util.crypto.Sha256Hash in project atlasdb by palantir.
the class DataStreamStore method putHashIndexTask.
private void putHashIndexTask(Transaction t, Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToMetadata) {
Multimap<DataStreamHashAidxTable.DataStreamHashAidxRow, DataStreamHashAidxTable.DataStreamHashAidxColumnValue> indexMap = HashMultimap.create();
for (Entry<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> e : rowsToMetadata.entrySet()) {
DataStreamMetadataTable.DataStreamMetadataRow row = e.getKey();
StreamMetadata metadata = e.getValue();
Preconditions.checkArgument(metadata.getStatus() == Status.STORED, "Should only index successfully stored streams.");
Sha256Hash hash = Sha256Hash.EMPTY;
if (metadata.getHash() != com.google.protobuf.ByteString.EMPTY) {
hash = new Sha256Hash(metadata.getHash().toByteArray());
}
DataStreamHashAidxTable.DataStreamHashAidxRow hashRow = DataStreamHashAidxTable.DataStreamHashAidxRow.of(hash);
DataStreamHashAidxTable.DataStreamHashAidxColumn column = DataStreamHashAidxTable.DataStreamHashAidxColumn.of(row.getId());
DataStreamHashAidxTable.DataStreamHashAidxColumnValue columnValue = DataStreamHashAidxTable.DataStreamHashAidxColumnValue.of(column, 0L);
indexMap.put(hashRow, columnValue);
}
DataStreamHashAidxTable hiTable = tables.getDataStreamHashAidxTable(t);
hiTable.put(indexMap);
}
Aggregations