use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class Scrubber method runBackgroundScrubTask.
@VisibleForTesting
void runBackgroundScrubTask(final TransactionManager txManager) {
log.debug("Starting scrub task");
// Warning: Let T be the hard delete transaction that triggered a scrub, and let S be its
// start timestamp. If the locks for T happen to time out right after T checks that its
// locks are held but right before T writes its commit timestamp (extremely rare case), AND
// the unreadable timestamp is greater than S, then the scrub task could actually roll back
// the hard delete transaction (forcing it to abort or retry). Note that this doesn't affect
// correctness, but could be an annoying edge cause that causes hard delete to take longer
// than it otherwise would have.
Long immutableTimestamp = immutableTimestampSupplier.get();
Long unreadableTimestamp = unreadableTimestampSupplier.get();
final long maxScrubTimestamp = aggressiveScrub ? immutableTimestamp : Math.min(unreadableTimestamp, immutableTimestamp);
log.debug("Scrub task immutableTimestamp: {}, unreadableTimestamp: {}, maxScrubTimestamp: {}", immutableTimestamp, unreadableTimestamp, maxScrubTimestamp);
final int batchSize = (int) Math.ceil(batchSizeSupplier.get() * ((double) threadCount / readThreadCount));
List<byte[]> rangeBoundaries = Lists.newArrayList();
rangeBoundaries.add(PtBytes.EMPTY_BYTE_ARRAY);
if (readThreadCount > 1) {
// This will actually partition into the closest higher power of 2 number of ranges.
rangeBoundaries.addAll(Ordering.from(UnsignedBytes.lexicographicalComparator()).sortedCopy(new UniformRowNamePartitioner(ValueType.BLOB).getPartitions(readThreadCount - 1)));
}
rangeBoundaries.add(PtBytes.EMPTY_BYTE_ARRAY);
List<Future<Void>> readerFutures = Lists.newArrayList();
final AtomicInteger totalCellsRead = new AtomicInteger(0);
for (int i = 0; i < rangeBoundaries.size() - 1; i++) {
final byte[] startRow = rangeBoundaries.get(i);
final byte[] endRow = rangeBoundaries.get(i + 1);
readerFutures.add(readerExec.submit(() -> {
BatchingVisitable<SortedMap<Long, Multimap<TableReference, Cell>>> scrubQueue = scrubberStore.getBatchingVisitableScrubQueue(maxScrubTimestamp, startRow, endRow);
scrubQueue.batchAccept(batchSize, batch -> {
for (SortedMap<Long, Multimap<TableReference, Cell>> cells : batch) {
// We may actually get more cells than the batch size. The batch size is used
// for pulling off the scrub queue, and a single entry in the scrub queue may
// match multiple tables. These will get broken down into smaller batches later
// on when we actually do deletes.
int numCellsRead = scrubSomeCells(cells, txManager, maxScrubTimestamp);
int totalRead = totalCellsRead.addAndGet(numCellsRead);
log.debug("Scrub task processed {} cells in a batch, total {} processed so far.", numCellsRead, totalRead);
if (!isScrubEnabled.get()) {
log.debug("Stopping scrub for banned hours.");
break;
}
}
return isScrubEnabled.get();
});
return null;
}));
}
for (Future<Void> readerFuture : readerFutures) {
Futures.getUnchecked(readerFuture);
}
log.debug("Scrub background task running at timestamp {} processed a total of {} cells", maxScrubTimestamp, totalCellsRead.get());
log.debug("Finished scrub task");
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class KeyValueServiceMigrator method internalMigrate.
private void internalMigrate() throws InterruptedException {
Set<TableReference> tables = KeyValueServiceMigrators.getMigratableTableNames(fromKvs, unmigratableTables);
TransactionManager txManager = toTransactionManager;
TransactionManager readTxManager = fromTransactionManager;
GeneralTaskCheckpointer checkpointer = new GeneralTaskCheckpointer(checkpointTable, toKvs, txManager);
ExecutorService executor = PTExecutors.newFixedThreadPool(threads);
try {
migrateTables(tables, readTxManager, txManager, toKvs, migrationTimestampSupplier.get(), executor, checkpointer);
processMessage("Data migration complete.", KvsMigrationMessageLevel.INFO);
} catch (Throwable t) {
processMessage("Migration failed.", t, KvsMigrationMessageLevel.ERROR);
Throwables.throwUncheckedException(t);
} finally {
executor.shutdown();
executor.awaitTermination(10000L, TimeUnit.MILLISECONDS);
}
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class KeyValueServiceValidator method validateTables.
private void validateTables(Set<TableReference> tables) {
ExecutorService executor = PTExecutors.newFixedThreadPool(threads);
List<Future<Void>> futures = Lists.newArrayList();
for (final TableReference table : tables) {
Future<Void> future = executor.submit(() -> {
try {
validateTable(table);
} catch (RuntimeException e) {
Throwables.rewrapAndThrowUncheckedException("Exception while validating " + table, e);
}
return null;
});
futures.add(future);
}
futures.forEach(future -> Futures.getUnchecked(future));
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class Scrubber method getCommitTimestampRollBackIfNecessary.
private long getCommitTimestampRollBackIfNecessary(long startTimestamp, Multimap<TableReference, Cell> tableNameToCell) {
Long commitTimestamp = transactionService.get(startTimestamp);
if (commitTimestamp == null) {
// can never cause correctness issues, only liveness issues)
try {
transactionService.putUnlessExists(startTimestamp, TransactionConstants.FAILED_COMMIT_TS);
} catch (KeyAlreadyExistsException e) {
String msg = "Could not roll back transaction with start timestamp " + startTimestamp + "; either" + " it was already rolled back (by a different transaction), or it committed successfully" + " before we could roll it back.";
log.error("This isn't a bug but it should be very infrequent. {}", msg, new TransactionFailedRetriableException(msg, e));
}
commitTimestamp = transactionService.get(startTimestamp);
}
if (commitTimestamp == null) {
throw new RuntimeException("expected commit timestamp to be non-null for startTs: " + startTimestamp);
}
if (commitTimestamp == TransactionConstants.FAILED_COMMIT_TS) {
for (TableReference table : tableNameToCell.keySet()) {
Map<Cell, Long> toDelete = Maps2.createConstantValueMap(tableNameToCell.get(table), startTimestamp);
keyValueService.delete(table, Multimaps.forMap(toDelete));
}
}
return commitTimestamp;
}
use of com.palantir.atlasdb.keyvalue.api.TableReference in project atlasdb by palantir.
the class AbstractTableMappingService method generateMapToFullTableNames.
@Override
public Map<TableReference, TableReference> generateMapToFullTableNames(Set<TableReference> tableRefs) {
Map<TableReference, TableReference> shortNameToFullTableName = Maps.newHashMapWithExpectedSize(tableRefs.size());
Set<TableReference> tablesToReload = Sets.newHashSet();
for (TableReference inputName : tableRefs) {
if (inputName.isFullyQualifiedName()) {
shortNameToFullTableName.put(inputName, inputName);
} else if (tableMap.get().containsValue(inputName)) {
shortNameToFullTableName.put(inputName, getFullTableName(inputName));
} else if (unmappedTables.containsKey(inputName)) {
shortNameToFullTableName.put(inputName, inputName);
} else {
tablesToReload.add(inputName);
}
}
if (!tablesToReload.isEmpty()) {
updateTableMap();
for (TableReference tableRef : Sets.difference(tablesToReload, tableMap.get().values())) {
unmappedTables.put(tableRef, true);
shortNameToFullTableName.put(tableRef, tableRef);
}
for (TableReference tableRef : Sets.intersection(tablesToReload, tableMap.get().values())) {
shortNameToFullTableName.put(tableRef, getFullTableName(tableRef));
}
}
return shortNameToFullTableName;
}
Aggregations