use of com.palantir.atlasdb.transaction.api.TransactionManager in project atlasdb by palantir.
the class AtlasDbEteServer method run.
@Override
public void run(AtlasDbEteConfiguration config, final Environment environment) throws Exception {
TransactionManager transactionManager = tryToCreateTransactionManager(config, environment);
environment.jersey().register(new SimpleTodoResource(new TodoClient(transactionManager)));
environment.jersey().register(new SimpleCheckAndSetResource(new CheckAndSetClient(transactionManager)));
environment.jersey().register(HttpRemotingJerseyFeature.INSTANCE);
environment.jersey().register(new NotInitializedExceptionMapper());
environment.jersey().register(new CleanupMetadataResourceImpl(transactionManager, config.getAtlasDbConfig().initializeAsync()));
}
use of com.palantir.atlasdb.transaction.api.TransactionManager in project atlasdb by palantir.
the class Scrubber method runBackgroundScrubTask.
@VisibleForTesting
void runBackgroundScrubTask(final TransactionManager txManager) {
log.debug("Starting scrub task");
// Warning: Let T be the hard delete transaction that triggered a scrub, and let S be its
// start timestamp. If the locks for T happen to time out right after T checks that its
// locks are held but right before T writes its commit timestamp (extremely rare case), AND
// the unreadable timestamp is greater than S, then the scrub task could actually roll back
// the hard delete transaction (forcing it to abort or retry). Note that this doesn't affect
// correctness, but could be an annoying edge cause that causes hard delete to take longer
// than it otherwise would have.
Long immutableTimestamp = immutableTimestampSupplier.get();
Long unreadableTimestamp = unreadableTimestampSupplier.get();
final long maxScrubTimestamp = aggressiveScrub ? immutableTimestamp : Math.min(unreadableTimestamp, immutableTimestamp);
log.debug("Scrub task immutableTimestamp: {}, unreadableTimestamp: {}, maxScrubTimestamp: {}", immutableTimestamp, unreadableTimestamp, maxScrubTimestamp);
final int batchSize = (int) Math.ceil(batchSizeSupplier.get() * ((double) threadCount / readThreadCount));
List<byte[]> rangeBoundaries = Lists.newArrayList();
rangeBoundaries.add(PtBytes.EMPTY_BYTE_ARRAY);
if (readThreadCount > 1) {
// This will actually partition into the closest higher power of 2 number of ranges.
rangeBoundaries.addAll(Ordering.from(UnsignedBytes.lexicographicalComparator()).sortedCopy(new UniformRowNamePartitioner(ValueType.BLOB).getPartitions(readThreadCount - 1)));
}
rangeBoundaries.add(PtBytes.EMPTY_BYTE_ARRAY);
List<Future<Void>> readerFutures = Lists.newArrayList();
final AtomicInteger totalCellsRead = new AtomicInteger(0);
for (int i = 0; i < rangeBoundaries.size() - 1; i++) {
final byte[] startRow = rangeBoundaries.get(i);
final byte[] endRow = rangeBoundaries.get(i + 1);
readerFutures.add(readerExec.submit(() -> {
BatchingVisitable<SortedMap<Long, Multimap<TableReference, Cell>>> scrubQueue = scrubberStore.getBatchingVisitableScrubQueue(maxScrubTimestamp, startRow, endRow);
scrubQueue.batchAccept(batchSize, batch -> {
for (SortedMap<Long, Multimap<TableReference, Cell>> cells : batch) {
// We may actually get more cells than the batch size. The batch size is used
// for pulling off the scrub queue, and a single entry in the scrub queue may
// match multiple tables. These will get broken down into smaller batches later
// on when we actually do deletes.
int numCellsRead = scrubSomeCells(cells, txManager, maxScrubTimestamp);
int totalRead = totalCellsRead.addAndGet(numCellsRead);
log.debug("Scrub task processed {} cells in a batch, total {} processed so far.", numCellsRead, totalRead);
if (!isScrubEnabled.get()) {
log.debug("Stopping scrub for banned hours.");
break;
}
}
return isScrubEnabled.get();
});
return null;
}));
}
for (Future<Void> readerFuture : readerFutures) {
Futures.getUnchecked(readerFuture);
}
log.debug("Scrub background task running at timestamp {} processed a total of {} cells", maxScrubTimestamp, totalCellsRead.get());
log.debug("Finished scrub task");
}
use of com.palantir.atlasdb.transaction.api.TransactionManager in project atlasdb by palantir.
the class KeyValueServiceMigrator method internalMigrate.
private void internalMigrate() throws InterruptedException {
Set<TableReference> tables = KeyValueServiceMigrators.getMigratableTableNames(fromKvs, unmigratableTables);
TransactionManager txManager = toTransactionManager;
TransactionManager readTxManager = fromTransactionManager;
GeneralTaskCheckpointer checkpointer = new GeneralTaskCheckpointer(checkpointTable, toKvs, txManager);
ExecutorService executor = PTExecutors.newFixedThreadPool(threads);
try {
migrateTables(tables, readTxManager, txManager, toKvs, migrationTimestampSupplier.get(), executor, checkpointer);
processMessage("Data migration complete.", KvsMigrationMessageLevel.INFO);
} catch (Throwable t) {
processMessage("Migration failed.", t, KvsMigrationMessageLevel.ERROR);
Throwables.throwUncheckedException(t);
} finally {
executor.shutdown();
executor.awaitTermination(10000L, TimeUnit.MILLISECONDS);
}
}
use of com.palantir.atlasdb.transaction.api.TransactionManager in project atlasdb by palantir.
the class TransactionManagerTest method shouldNotMakeRemoteCallsInAReadonlyTransactionIfNoWorkIsDone.
@Test
public void shouldNotMakeRemoteCallsInAReadonlyTransactionIfNoWorkIsDone() {
TimestampService mockTimestampService = mock(TimestampService.class);
LockService mockLockService = mock(LockService.class);
TransactionManager txnManagerWithMocks = SerializableTransactionManager.createForTest(getKeyValueService(), mockTimestampService, LockClient.of("foo"), mockLockService, transactionService, () -> AtlasDbConstraintCheckingMode.FULL_CONSTRAINT_CHECKING_THROWS_EXCEPTIONS, conflictDetectionManager, sweepStrategyManager, NoOpCleaner.INSTANCE, AbstractTransactionTest.GET_RANGES_THREAD_POOL_SIZE, AbstractTransactionTest.DEFAULT_GET_RANGES_CONCURRENCY, () -> AtlasDbConstants.DEFAULT_TIMESTAMP_CACHE_SIZE, MultiTableSweepQueueWriter.NO_OP);
// fetch an immutable timestamp once so it's cached
when(mockTimestampService.getFreshTimestamp()).thenReturn(1L);
when(mockLockService.getMinLockedInVersionId("foo")).thenReturn(1L);
txnManagerWithMocks.getImmutableTimestamp();
verify(mockTimestampService).getFreshTimestamp();
verify(mockLockService).getMinLockedInVersionId("foo");
// now execute a read transaction
txnManagerWithMocks.runTaskReadOnly(txn -> null);
verifyNoMoreInteractions(mockLockService);
verifyNoMoreInteractions(mockTimestampService);
}
use of com.palantir.atlasdb.transaction.api.TransactionManager in project atlasdb by palantir.
the class TransactionManagerTest method shouldNotConflictIfImmutableTimestampLockExpiresIfNoReadsOrWrites.
@Test
public void shouldNotConflictIfImmutableTimestampLockExpiresIfNoReadsOrWrites() {
TimelockService timelock = mock(TimelockService.class);
LockService mockLockService = mock(LockService.class);
TransactionManager txnManagerWithMocks = new SerializableTransactionManager(keyValueService, timelock, mockLockService, transactionService, () -> AtlasDbConstraintCheckingMode.FULL_CONSTRAINT_CHECKING_THROWS_EXCEPTIONS, conflictDetectionManager, sweepStrategyManager, NoOpCleaner.INSTANCE, TimestampTrackerImpl.createNoOpTracker(), () -> AtlasDbConstants.DEFAULT_TIMESTAMP_CACHE_SIZE, false, () -> AtlasDbConstants.DEFAULT_TRANSACTION_LOCK_ACQUIRE_TIMEOUT_MS, AbstractTransactionTest.GET_RANGES_THREAD_POOL_SIZE, AbstractTransactionTest.DEFAULT_GET_RANGES_CONCURRENCY, MultiTableSweepQueueWriter.NO_OP);
when(timelock.getFreshTimestamp()).thenReturn(1L);
when(timelock.lockImmutableTimestamp(any())).thenReturn(LockImmutableTimestampResponse.of(2L, LockToken.of(UUID.randomUUID())));
txnManagerWithMocks.runTaskThrowOnConflict(txn -> null);
}
Aggregations