use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class DataStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
DataStreamMetadataTable mdTable = tables.getDataStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(DataStreamMetadataTable.DataStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(DataStreamMetadataTable.DataStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class HotspottyDataStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
HotspottyDataStreamMetadataTable mdTable = tables.getHotspottyDataStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class SnapshotTransactionTest method cleanupPreCommitConditionsOnFailure.
@Test
public void cleanupPreCommitConditionsOnFailure() {
MutableLong counter = new MutableLong(0L);
PreCommitCondition failsCondition = new PreCommitCondition() {
@Override
public void throwIfConditionInvalid(long timestamp) {
throw new TransactionFailedRetriableException("Condition failed");
}
@Override
public void cleanup() {
counter.increment();
}
};
try {
serializableTxManager.runTaskWithConditionThrowOnConflict(failsCondition, (tx, condition) -> {
tx.put(TABLE, ImmutableMap.of(TEST_CELL, PtBytes.toBytes("value")));
return null;
});
fail();
} catch (TransactionFailedRetriableException e) {
// expected
}
assertThat(counter.intValue(), is(1));
try {
serializableTxManager.runTaskReadOnlyWithCondition(failsCondition, (tx, condition) -> tx.get(TABLE, ImmutableSet.of(TEST_CELL)));
fail();
} catch (TransactionFailedRetriableException e) {
// expected
}
assertThat(counter.intValue(), is(2));
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class StreamTestMaxMemStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
StreamTestMaxMemStreamMetadataTable mdTable = tables.getStreamTestMaxMemStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
use of com.palantir.atlasdb.transaction.api.TransactionFailedRetriableException in project atlasdb by palantir.
the class StreamTestStreamStore method putMetadataAndHashIndexTask.
@Override
protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) {
StreamTestStreamMetadataTable mdTable = tables.getStreamTestStreamMetadataTable(t);
Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet());
Map<StreamTestStreamMetadataTable.StreamTestStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap();
Map<StreamTestStreamMetadataTable.StreamTestStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap();
for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) {
long streamId = e.getKey();
StreamMetadata metadata = e.getValue();
StreamMetadata prevMetadata = prevMetadatas.get(streamId);
if (metadata.getStatus() == Status.STORED) {
if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) {
// This can happen if we cleanup old streams.
throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata);
}
rowsToStoredMetadata.put(StreamTestStreamMetadataTable.StreamTestStreamMetadataRow.of(streamId), metadata);
} else if (metadata.getStatus() == Status.STORING) {
// This will prevent two users trying to store the same id.
if (prevMetadata != null) {
throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId);
}
rowsToUnstoredMetadata.put(StreamTestStreamMetadataTable.StreamTestStreamMetadataRow.of(streamId), metadata);
}
}
putHashIndexTask(t, rowsToStoredMetadata);
Map<StreamTestStreamMetadataTable.StreamTestStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap();
rowsToMetadata.putAll(rowsToStoredMetadata);
rowsToMetadata.putAll(rowsToUnstoredMetadata);
mdTable.putMetadata(rowsToMetadata);
}
Aggregations