use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class SnapshotTransactionTest method concurrentlyIncrementValueThousandTimesAndGet.
private long concurrentlyIncrementValueThousandTimesAndGet() throws InterruptedException, ExecutionException {
CompletionService<Void> executor = new ExecutorCompletionService<Void>(PTExecutors.newFixedThreadPool(8));
final Cell cell = Cell.create(PtBytes.toBytes("row1"), PtBytes.toBytes("column1"));
Transaction t1 = txManager.createNewTransaction();
t1.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(0L)));
t1.commit();
for (int i = 0; i < 1000; i++) {
executor.submit(() -> {
txManager.runTaskWithRetry((TxTask) t -> {
long prev = EncodingUtils.decodeVarLong(t.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
t.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(prev + 1)));
return null;
});
return null;
});
}
for (int i = 0; i < 1000; i++) {
Future<Void> future = executor.take();
future.get();
}
t1 = txManager.createNewTransaction();
return EncodingUtils.decodeVarLong(t1.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class SnapshotTransactionTest method testTransactionAtomicity.
@Test
public void testTransactionAtomicity() throws Exception {
// This test runs multiple transactions in parallel, with KeyValueService.put calls throwing
// a RuntimeException from time to time and hanging other times. which effectively kills the
// thread. We ensure that every transaction either adds 5 rows to the table or adds 0 rows
// by checking at the end that the number of rows is a multiple of 5.
final TableReference tableRef = TABLE;
Random random = new Random(1);
final UnstableKeyValueService unstableKvs = new UnstableKeyValueService(keyValueService, random);
final TestTransactionManager unstableTransactionManager = new TestTransactionManagerImpl(unstableKvs, timestampService, lockClient, lockService, transactionService, conflictDetectionManager, sweepStrategyManager, sweepQueue);
ScheduledExecutorService service = PTExecutors.newScheduledThreadPool(20);
for (int i = 0; i < 30; i++) {
final int threadNumber = i;
service.schedule((Callable<Void>) () -> {
if (threadNumber == 10) {
unstableKvs.setRandomlyThrow(true);
}
if (threadNumber == 20) {
unstableKvs.setRandomlyHang(true);
}
Transaction transaction = unstableTransactionManager.createNewTransaction();
BatchingVisitable<RowResult<byte[]>> results = transaction.getRange(tableRef, RangeRequest.builder().build());
final MutableInt nextIndex = new MutableInt(0);
results.batchAccept(1, AbortingVisitors.batching((AbortingVisitor<RowResult<byte[]>, Exception>) row -> {
byte[] dataBytes = row.getColumns().get(PtBytes.toBytes("data"));
BigInteger dataValue = new BigInteger(dataBytes);
nextIndex.setValue(Math.max(nextIndex.toInteger(), dataValue.intValue() + 1));
return true;
}));
// rows to the table.
for (int j = 0; j < 5; j++) {
int rowNumber = nextIndex.toInteger() + j;
Cell cell = Cell.create(PtBytes.toBytes("row" + rowNumber), PtBytes.toBytes("data"));
transaction.put(tableRef, ImmutableMap.of(cell, BigInteger.valueOf(rowNumber).toByteArray()));
Thread.yield();
}
transaction.commit();
return null;
}, i * 20, TimeUnit.MILLISECONDS);
}
service.shutdown();
service.awaitTermination(1, TimeUnit.SECONDS);
// Verify each table has a number of rows that's a multiple of 5
Transaction verifyTransaction = txManager.createNewTransaction();
BatchingVisitable<RowResult<byte[]>> results = verifyTransaction.getRange(tableRef, RangeRequest.builder().build());
final MutableInt numRows = new MutableInt(0);
results.batchAccept(1, AbortingVisitors.batching((AbortingVisitor<RowResult<byte[]>, Exception>) row -> {
numRows.increment();
return true;
}));
Assert.assertEquals(0, numRows.toInteger() % 5);
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class StreamTestWithHashStreamStore method makeStreamUsingTransaction.
private InputStream makeStreamUsingTransaction(Transaction parent, Long id, StreamMetadata metadata) {
BiConsumer<Long, OutputStream> singleBlockLoader = (index, destination) -> loadSingleBlockToOutputStream(parent, id, index, destination);
BlockGetter pageRefresher = new BlockLoader(singleBlockLoader, BLOCK_SIZE_IN_BYTES);
long totalBlocks = getNumberOfBlocksFromMetadata(metadata);
int blocksInMemory = getNumberOfBlocksThatFitInMemory();
try {
return BlockConsumingInputStream.create(pageRefresher, totalBlocks, blocksInMemory);
} catch (IOException e) {
throw Throwables.throwUncheckedException(e);
}
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class AbstractTransactionTest method testNoDirtyReads.
@Test
public void testNoDirtyReads() {
Transaction t1 = startTransaction();
Transaction t2 = startTransaction();
put(t2, "row1", "col1", "v1");
t2.commit();
assertNull(get(t1, "row1", "col1"));
}
use of com.palantir.atlasdb.transaction.api.Transaction in project atlasdb by palantir.
the class AbstractTransactionTest method testRangesTransactionColumnSelection.
@Test
public void testRangesTransactionColumnSelection() {
Transaction t = startTransaction();
put(t, "row1", "col1", "v1");
t.commit();
RangeRequest range1 = RangeRequest.builder().batchHint(3).build();
RangeRequest range2 = range1.getBuilder().retainColumns(ColumnSelection.create(ImmutableSet.of(PtBytes.toBytes("col1")))).build();
t = startTransaction();
Iterable<BatchingVisitable<RowResult<byte[]>>> ranges = t.getRanges(TEST_TABLE, Iterables.limit(Iterables.cycle(range1, range2), 1000));
for (BatchingVisitable<RowResult<byte[]>> batchingVisitable : ranges) {
final List<RowResult<byte[]>> list = BatchingVisitables.copyToList(batchingVisitable);
assertEquals(1, list.size());
assertEquals(1, list.get(0).getColumns().size());
}
RangeRequest range3 = range1.getBuilder().retainColumns(ColumnSelection.create(ImmutableSet.of(PtBytes.toBytes("col2")))).build();
verifyAllGetRangesImplsRangeSizes(t, range3, 0);
}
Aggregations