use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class Scrubber method getCommitTimestampRollBackIfNecessary.
private long getCommitTimestampRollBackIfNecessary(long startTimestamp, Multimap<TableReference, Cell> tableNameToCell) {
Long commitTimestamp = transactionService.get(startTimestamp);
if (commitTimestamp == null) {
// can never cause correctness issues, only liveness issues)
try {
transactionService.putUnlessExists(startTimestamp, TransactionConstants.FAILED_COMMIT_TS);
} catch (KeyAlreadyExistsException e) {
String msg = "Could not roll back transaction with start timestamp " + startTimestamp + "; either" + " it was already rolled back (by a different transaction), or it committed successfully" + " before we could roll it back.";
log.error("This isn't a bug but it should be very infrequent. {}", msg, new TransactionFailedRetriableException(msg, e));
}
commitTimestamp = transactionService.get(startTimestamp);
}
if (commitTimestamp == null) {
throw new RuntimeException("expected commit timestamp to be non-null for startTs: " + startTimestamp);
}
if (commitTimestamp == TransactionConstants.FAILED_COMMIT_TS) {
for (TableReference table : tableNameToCell.keySet()) {
Map<Cell, Long> toDelete = Maps2.createConstantValueMap(tableNameToCell.get(table), startTimestamp);
keyValueService.delete(table, Multimaps.forMap(toDelete));
}
}
return commitTimestamp;
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class TestHashComponentsStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
TestHashComponentsStreamMetadataTable mdTable = tables.getTestHashComponentsStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class StreamTestWithHashStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
StreamTestWithHashStreamMetadataTable mdTable = tables.getStreamTestWithHashStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class ValueStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
ValueStreamMetadataTable mdTable = tables.getValueStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<ValueStreamMetadataTable.ValueStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<ValueStreamMetadataTable.ValueStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(ValueStreamMetadataTable.ValueStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(ValueStreamMetadataTable.ValueStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<ValueStreamMetadataTable.ValueStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class SnapshotTransaction method getWithPostFilteringInternal.
/**
* This will return all the keys that still need to be postFiltered. It will output properly
* postFiltered keys to the results output param.
*/
private <T> Map<Cell, Value> getWithPostFilteringInternal(TableReference tableRef, Map<Cell, Value> rawResults, @Output Map<Cell, T> results, Function<Value, T> transformer) {
Set<Long> startTimestampsForValues = getStartTimestampsForValues(rawResults.values());
Map<Long, Long> commitTimestamps = getCommitTimestamps(tableRef, startTimestampsForValues, true);
Map<Cell, Long> keysToReload = Maps.newHashMapWithExpectedSize(0);
Map<Cell, Long> keysToDelete = Maps.newHashMapWithExpectedSize(0);
for (Map.Entry<Cell, Value> e : rawResults.entrySet()) {
Cell key = e.getKey();
Value value = e.getValue();
if (value.getTimestamp() == Value.INVALID_VALUE_TIMESTAMP) {
getMeter(AtlasDbMetricNames.CellFilterMetrics.INVALID_START_TS).mark();
// we clean up old values, and this transaction started at a timestamp before the garbage collection.
switch(getReadSentinelBehavior()) {
case IGNORE:
break;
case THROW_EXCEPTION:
throw new TransactionFailedRetriableException("Tried to read a value that has been deleted. " + " This can be caused by hard delete transactions using the type " + TransactionType.AGGRESSIVE_HARD_DELETE + ". It can also be caused by transactions taking too long, or" + " its locks expired. Retrying it should work.");
default:
throw new IllegalStateException("Invalid read sentinel behavior " + getReadSentinelBehavior());
}
} else {
Long theirCommitTimestamp = commitTimestamps.get(value.getTimestamp());
if (theirCommitTimestamp == null || theirCommitTimestamp == TransactionConstants.FAILED_COMMIT_TS) {
keysToReload.put(key, value.getTimestamp());
if (shouldDeleteAndRollback()) {
// This is from a failed transaction so we can roll it back and then reload it.
keysToDelete.put(key, value.getTimestamp());
getMeter(AtlasDbMetricNames.CellFilterMetrics.INVALID_COMMIT_TS).mark();
}
} else if (theirCommitTimestamp > getStartTimestamp()) {
// The value's commit timestamp is after our start timestamp.
// This means the value is from a transaction which committed
// after our transaction began. We need to try reading at an
// earlier timestamp.
keysToReload.put(key, value.getTimestamp());
getMeter(AtlasDbMetricNames.CellFilterMetrics.COMMIT_TS_GREATER_THAN_TRANSACTION_TS).mark();
} else {
// The value has a commit timestamp less than our start timestamp, and is visible and valid.
if (value.getContents().length != 0) {
results.put(key, transformer.apply(value));
}
}
}
}
if (!keysToDelete.isEmpty()) {
// if we can't roll back the failed transactions, we should just try again
if (!rollbackFailedTransactions(tableRef, keysToDelete, commitTimestamps, defaultTransactionService)) {
return rawResults;
}
}
if (!keysToReload.isEmpty()) {
Map<Cell, Value> nextRawResults = keyValueService.get(tableRef, keysToReload);
validateExternalAndCommitLocksIfNecessary(tableRef, getStartTimestamp());
return nextRawResults;
} else {
return ImmutableMap.of();
}
}
Aggregations