use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class SerializableTransaction method verifyCells.
private void verifyCells(Transaction readOnlyTransaction) {
for (Entry<TableReference, Set<Cell>> tableAndCellsEntry : cellsRead.entrySet()) {
TableReference table = tableAndCellsEntry.getKey();
Set<Cell> cells = tableAndCellsEntry.getValue();
final ConcurrentNavigableMap<Cell, byte[]> readsForTable = getReadsForTable(table);
for (Iterable<Cell> batch : Iterables.partition(cells, BATCH_SIZE)) {
// We don't want to verify any reads that we wrote to cause we will just read our own values.
// NB: If the value has changed between read and write, our normal SI checking handles this case
Iterable<Cell> batchWithoutWrites = writesByTable.get(table) != null ? Iterables.filter(batch, Predicates.not(Predicates.in(writesByTable.get(table).keySet()))) : batch;
ImmutableSet<Cell> batchWithoutWritesSet = ImmutableSet.copyOf(batchWithoutWrites);
Map<Cell, byte[]> currentBatch = readOnlyTransaction.get(table, batchWithoutWritesSet);
ImmutableMap<Cell, byte[]> originalReads = Maps.toMap(Sets.intersection(batchWithoutWritesSet, readsForTable.keySet()), Functions.forMap(readsForTable));
if (!areMapsEqual(currentBatch, originalReads)) {
handleTransactionConflict(table);
}
}
}
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class SerializableTransaction method verifyColumnRanges.
private void verifyColumnRanges(Transaction readOnlyTransaction) {
// verify each set of reads to ensure they are the same.
for (Entry<TableReference, ConcurrentMap<byte[], ConcurrentMap<BatchColumnRangeSelection, byte[]>>> tableAndRange : columnRangeEndsByTable.entrySet()) {
TableReference table = tableAndRange.getKey();
Map<byte[], ConcurrentMap<BatchColumnRangeSelection, byte[]>> columnRangeEnds = tableAndRange.getValue();
Map<Cell, byte[]> writes = writesByTable.get(table);
Map<BatchColumnRangeSelection, List<byte[]>> rangesToRows = Maps.newHashMap();
for (Entry<byte[], ConcurrentMap<BatchColumnRangeSelection, byte[]>> rowAndRangeEnds : columnRangeEnds.entrySet()) {
byte[] row = rowAndRangeEnds.getKey();
Map<BatchColumnRangeSelection, byte[]> rangeEnds = columnRangeEnds.get(row);
for (Entry<BatchColumnRangeSelection, byte[]> e : rangeEnds.entrySet()) {
BatchColumnRangeSelection range = e.getKey();
byte[] rangeEnd = e.getValue();
if (rangeEnd.length != 0 && !RangeRequests.isTerminalRow(false, rangeEnd)) {
range = BatchColumnRangeSelection.create(range.getStartCol(), RangeRequests.getNextStartRow(false, rangeEnd), range.getBatchHint());
}
if (rangesToRows.get(range) != null) {
rangesToRows.get(range).add(row);
} else {
rangesToRows.put(range, ImmutableList.of(row));
}
}
}
for (Entry<BatchColumnRangeSelection, List<byte[]>> e : rangesToRows.entrySet()) {
BatchColumnRangeSelection range = e.getKey();
List<byte[]> rows = e.getValue();
Map<byte[], BatchingVisitable<Map.Entry<Cell, byte[]>>> result = readOnlyTransaction.getRowsColumnRange(table, rows, range);
for (Entry<byte[], BatchingVisitable<Map.Entry<Cell, byte[]>>> res : result.entrySet()) {
byte[] row = res.getKey();
BatchingVisitableView<Entry<Cell, byte[]>> bv = BatchingVisitableView.of(res.getValue());
NavigableMap<Cell, ByteBuffer> readsInRange = Maps.transformValues(getReadsInColumnRange(table, row, range), input -> ByteBuffer.wrap(input));
boolean isEqual = bv.transformBatch(input -> filterWritesFromCells(input, writes)).isEqual(readsInRange.entrySet());
if (!isEqual) {
handleTransactionConflict(table);
}
}
}
}
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class KeyValueServiceMigratorsTest method skipsTheOldScrubTable.
@Test
public void skipsTheOldScrubTable() {
TableReference tableToMigrate = TableReference.create(Namespace.DEFAULT_NAMESPACE, "can-be-migrated");
KeyValueService fromKvs = mock(KeyValueService.class);
when(fromKvs.getAllTableNames()).thenReturn(ImmutableSet.of(AtlasDbConstants.OLD_SCRUB_TABLE, tableToMigrate));
Set<TableReference> creatableTableNames = KeyValueServiceMigrators.getMigratableTableNames(fromKvs, ImmutableSet.of());
assertThat(creatableTableNames).containsExactly(tableToMigrate);
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class ScrubberTest method testScrubQueueIsCleared.
@Test
public void testScrubQueueIsCleared() {
Cell cell1 = Cell.create(new byte[] { 1 }, new byte[] { 2 });
Cell cell2 = Cell.create(new byte[] { 2 }, new byte[] { 3 });
Cell cell3 = Cell.create(new byte[] { 3 }, new byte[] { 4 });
TableReference tableRef = TableReference.createFromFullyQualifiedName("foo.bar");
kvs.createTable(tableRef, new byte[] {});
kvs.putWithTimestamps(tableRef, ImmutableMultimap.<Cell, Value>builder().put(cell1, Value.create(new byte[] { 3 }, 10)).put(cell1, Value.create(new byte[] { 4 }, 20)).put(cell2, Value.create(new byte[] { 4 }, 30)).put(cell2, Value.create(new byte[] { 5 }, 40)).put(cell2, Value.create(new byte[] { 6 }, 50)).put(cell3, Value.create(new byte[] { 7 }, 60)).build());
transactions.putUnlessExists(10, 15);
transactions.putUnlessExists(20, 25);
transactions.putUnlessExists(30, 35);
transactions.putUnlessExists(50, 55);
transactions.putUnlessExists(60, 65);
scrubStore.queueCellsForScrubbing(ImmutableMultimap.of(cell1, tableRef), 10, 100);
scrubStore.queueCellsForScrubbing(ImmutableMultimap.of(cell1, tableRef), 20, 100);
scrubStore.queueCellsForScrubbing(ImmutableMultimap.of(cell2, tableRef), 40, 100);
scrubStore.queueCellsForScrubbing(ImmutableMultimap.of(cell2, tableRef), 50, 100);
scrubStore.queueCellsForScrubbing(ImmutableMultimap.of(cell3, tableRef), 60, 100);
scrubber.runBackgroundScrubTask(null);
List<SortedMap<Long, Multimap<TableReference, Cell>>> scrubQueue = BatchingVisitables.copyToList(scrubStore.getBatchingVisitableScrubQueue(Long.MAX_VALUE, null, null));
Assert.assertEquals(ImmutableList.of(), scrubQueue);
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class Benchmarks method createStreamingTable.
private static void createStreamingTable(KeyValueService kvs, TableReference parentTable, String columnName) {
StreamStoreDefinition ssd = new StreamStoreDefinitionBuilder(columnName, "Value", ValueType.VAR_LONG).inMemoryThreshold(1024 * 1024).build();
ssd.getTables().forEach((tableName, tableDefinition) -> {
TableReference streamingTable = TableReference.create(parentTable.getNamespace(), tableName);
kvs.createTable(streamingTable, tableDefinition.toTableMetadata().persistToBytes());
});
}
Aggregations