use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class Scrubber method runBackgroundScrubTask.
@VisibleForTesting
void runBackgroundScrubTask(final TransactionManager txManager) {
log.debug("Starting scrub task");
// Warning: Let T be the hard delete transaction that triggered a scrub, and let S be its
// start timestamp. If the locks for T happen to time out right after T checks that its
// locks are held but right before T writes its commit timestamp (extremely rare case), AND
// the unreadable timestamp is greater than S, then the scrub task could actually roll back
// the hard delete transaction (forcing it to abort or retry). Note that this doesn't affect
// correctness, but could be an annoying edge cause that causes hard delete to take longer
// than it otherwise would have.
Long immutableTimestamp = immutableTimestampSupplier.get();
Long unreadableTimestamp = unreadableTimestampSupplier.get();
final long maxScrubTimestamp = aggressiveScrub ? immutableTimestamp : Math.min(unreadableTimestamp, immutableTimestamp);
log.debug("Scrub task immutableTimestamp: {}, unreadableTimestamp: {}, maxScrubTimestamp: {}", immutableTimestamp, unreadableTimestamp, maxScrubTimestamp);
final int batchSize = (int) Math.ceil(batchSizeSupplier.get() * ((double) threadCount / readThreadCount));
List<byte[]> rangeBoundaries = Lists.newArrayList();
rangeBoundaries.add(PtBytes.EMPTY_BYTE_ARRAY);
if (readThreadCount > 1) {
// This will actually partition into the closest higher power of 2 number of ranges.
rangeBoundaries.addAll(Ordering.from(UnsignedBytes.lexicographicalComparator()).sortedCopy(new UniformRowNamePartitioner(ValueType.BLOB).getPartitions(readThreadCount - 1)));
}
rangeBoundaries.add(PtBytes.EMPTY_BYTE_ARRAY);
List<Future<Void>> readerFutures = Lists.newArrayList();
final AtomicInteger totalCellsRead = new AtomicInteger(0);
for (int i = 0; i < rangeBoundaries.size() - 1; i++) {
final byte[] startRow = rangeBoundaries.get(i);
final byte[] endRow = rangeBoundaries.get(i + 1);
readerFutures.add(readerExec.submit(() -> {
BatchingVisitable<SortedMap<Long, Multimap<TableReference, Cell>>> scrubQueue = scrubberStore.getBatchingVisitableScrubQueue(maxScrubTimestamp, startRow, endRow);
scrubQueue.batchAccept(batchSize, batch -> {
for (SortedMap<Long, Multimap<TableReference, Cell>> cells : batch) {
// We may actually get more cells than the batch size. The batch size is used
// for pulling off the scrub queue, and a single entry in the scrub queue may
// match multiple tables. These will get broken down into smaller batches later
// on when we actually do deletes.
int numCellsRead = scrubSomeCells(cells, txManager, maxScrubTimestamp);
int totalRead = totalCellsRead.addAndGet(numCellsRead);
log.debug("Scrub task processed {} cells in a batch, total {} processed so far.", numCellsRead, totalRead);
if (!isScrubEnabled.get()) {
log.debug("Stopping scrub for banned hours.");
break;
}
}
return isScrubEnabled.get();
});
return null;
}));
}
for (Future<Void> readerFuture : readerFutures) {
Futures.getUnchecked(readerFuture);
}
log.debug("Scrub background task running at timestamp {} processed a total of {} cells", maxScrubTimestamp, totalCellsRead.get());
log.debug("Finished scrub task");
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class SnapshotTransactionTest method readRow.
private RowResult<byte[]> readRow(byte[] defaultRow) {
Transaction readTransaction = txManager.createNewTransaction();
SortedMap<byte[], RowResult<byte[]>> allRows = readTransaction.getRows(TABLE, ImmutableSet.of(defaultRow), ColumnSelection.all());
return allRows.get(defaultRow);
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class SnapshotTransactionTest method testWriteChangedConflictsNoThrow.
@Test
public void testWriteChangedConflictsNoThrow() {
overrideConflictHandlerForTable(TABLE, ConflictHandler.RETRY_ON_VALUE_CHANGED);
final Cell cell = Cell.create(PtBytes.toBytes("row1"), PtBytes.toBytes("column1"));
Transaction t1 = txManager.createNewTransaction();
Transaction t2 = txManager.createNewTransaction();
t1.delete(TABLE, ImmutableSet.of(cell));
t2.delete(TABLE, ImmutableSet.of(cell));
t1.commit();
t2.commit();
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class SnapshotTransactionTest method testWriteChangedConflictsThrow.
@Test
public void testWriteChangedConflictsThrow() {
overrideConflictHandlerForTable(TABLE, ConflictHandler.RETRY_ON_VALUE_CHANGED);
final Cell cell = Cell.create(PtBytes.toBytes("row1"), PtBytes.toBytes("column1"));
Transaction t1 = txManager.createNewTransaction();
Transaction t2 = txManager.createNewTransaction();
t1.delete(TABLE, ImmutableSet.of(cell));
t2.put(TABLE, ImmutableMap.of(cell, new byte[1]));
t1.commit();
try {
t2.commit();
fail();
} catch (TransactionConflictException e) {
// good
}
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t1.delete(TABLE, ImmutableSet.of(cell));
t2.put(TABLE, ImmutableMap.of(cell, new byte[1]));
t2.commit();
try {
t1.commit();
fail();
} catch (TransactionConflictException e) {
// good
}
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t2.delete(TABLE, ImmutableSet.of(cell));
t1.put(TABLE, ImmutableMap.of(cell, new byte[1]));
t2.commit();
try {
t1.commit();
fail();
} catch (TransactionConflictException e) {
// good
}
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t2.delete(TABLE, ImmutableSet.of(cell));
t1.put(TABLE, ImmutableMap.of(cell, new byte[1]));
t1.commit();
try {
t2.commit();
fail();
} catch (TransactionConflictException e) {
// good
}
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class SnapshotTransactionTest method testTransactionWriteWriteConflicts.
@Test
public void testTransactionWriteWriteConflicts() throws Exception {
// This test creates various types of conflicting writes and makes sure that write-write
// conflicts are thrown when necessary, and not thrown when there actually isn't a conflict.
Cell row1Column1 = Cell.create(PtBytes.toBytes("row1"), PtBytes.toBytes("column1"));
Cell row1Column2 = Cell.create(PtBytes.toBytes("row1"), PtBytes.toBytes("column2"));
Cell row2Column1 = Cell.create(PtBytes.toBytes("row2"), PtBytes.toBytes("column1"));
// First transaction commits first, second tries to commit same write
Transaction t1 = txManager.createNewTransaction();
Transaction t2 = txManager.createNewTransaction();
t1.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t2.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t1.commit();
try {
t2.commit();
assertTrue(false);
} catch (TransactionConflictException e) {
// We expect to catch this exception
}
// Second transaction commits first, first tries to commit same write
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t1.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t2.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t2.commit();
try {
t1.commit();
assertTrue(false);
} catch (TransactionConflictException e) {
// We expect to catch this exception
}
// Transactions committing to different rows
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t1.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t2.put(TABLE1, ImmutableMap.of(row2Column1, BigInteger.valueOf(1).toByteArray()));
t1.commit();
t2.commit();
// Transactions committing to different tables
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t1.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t2.put(TABLE2, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t1.commit();
t2.commit();
// Transactions committing to different columns in the same row
t1 = txManager.createNewTransaction();
t2 = txManager.createNewTransaction();
t1.put(TABLE1, ImmutableMap.of(row1Column1, BigInteger.valueOf(1).toByteArray()));
t2.put(TABLE1, ImmutableMap.of(row1Column2, BigInteger.valueOf(1).toByteArray()));
t1.commit();
t2.commit();
}
Aggregations