use of org.apache.tephra.TransactionContext in project phoenix by apache.
the class FlappingTransactionIT method testExternalTxContext.
@Test
public void testExternalTxContext() throws Exception {
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
String fullTableName = generateUniqueName();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
TransactionSystemClient txServiceClient = pconn.getQueryServices().getTransactionSystemClient();
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
}
// Use HBase level Tephra APIs to start a new transaction
TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
TransactionContext txContext = new TransactionContext(txServiceClient, txAware);
txContext.start();
// Use HBase APIs to add a new row
Put put = new Put(Bytes.toBytes("z"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
txAware.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
// New connection should not see data as it hasn't been committed yet
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
}
// Use new connection to create a row with a conflict
Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
// Use Tephra APIs directly to finish (i.e. commit) the transaction
txContext.finish();
// Confirm that attempt to commit row with conflict fails
try {
connWithConflict.commit();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
} finally {
connWithConflict.close();
}
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
}
// Repeat the same as above, but this time abort the transaction
txContext = new TransactionContext(txServiceClient, txAware);
txContext.start();
// Use HBase APIs to add a new row
put = new Put(Bytes.toBytes("j"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
txAware.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(5, rs.getInt(1));
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
// Use Tephra APIs directly to abort (i.e. rollback) the transaction
txContext.abort();
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
// Should succeed since conflicting row was aborted
connWithConflict.commit();
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
}
// Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
// written to hide it.
Result result = htable.get(new Get(Bytes.toBytes("j")));
assertTrue(result.isEmpty());
}
use of org.apache.tephra.TransactionContext in project cdap by caskdata.
the class QueueTest method testOneEnqueueDequeue.
private void testOneEnqueueDequeue(DequeueStrategy strategy) throws Exception {
// since this is used by more than one test method, ensure uniqueness of the queue name by adding strategy
QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queue1" + strategy.toString());
configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0L, 1, strategy, null), new ConsumerGroupConfig(1L, 1, strategy, null)));
List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(0L, 0, 1, strategy, null), new ConsumerConfig(1L, 0, 1, strategy, null));
try (QueueProducer producer = queueClientFactory.createProducer(queueName)) {
TransactionContext txContext = createTxContext(producer);
txContext.start();
producer.enqueue(new QueueEntry(Bytes.toBytes(55)));
txContext.finish();
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 2)) {
txContext = createTxContext(consumer);
txContext.start();
Assert.assertEquals(55, Bytes.toInt(consumer.dequeue().iterator().next()));
txContext.finish();
}
}
forceEviction(queueName, 2);
// verifying that consumer of the 2nd group can process items: they were not evicted
try (QueueConsumer consumer2 = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 2)) {
TransactionContext txContext = createTxContext(consumer2);
txContext.start();
Assert.assertEquals(55, Bytes.toInt(consumer2.dequeue().iterator().next()));
txContext.finish();
}
// now all should be evicted
verifyQueueIsEmpty(queueName, consumerConfigs);
}
use of org.apache.tephra.TransactionContext in project cdap by caskdata.
the class QueueTest method testQueueAbortRetrySkip.
@Test(timeout = TIMEOUT_MS)
public void testQueueAbortRetrySkip() throws Exception {
QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queuefailure");
configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(1L, 1, DequeueStrategy.HASH, "key")));
List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null), new ConsumerConfig(1, 0, 1, DequeueStrategy.HASH, "key"));
createEnqueueRunnable(queueName, 5, 1, null).run();
try (QueueConsumer fifoConsumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 2);
QueueConsumer hashConsumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 2)) {
TransactionContext txContext = createTxContext(fifoConsumer, hashConsumer);
txContext.start();
Assert.assertEquals(0, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
Assert.assertEquals(0, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
// Abort the consumer transaction
txContext.abort();
// Dequeue again in a new transaction, should see the same entries
txContext.start();
Assert.assertEquals(0, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
Assert.assertEquals(0, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
txContext.finish();
// Dequeue again, now should get next entry
txContext.start();
Assert.assertEquals(1, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
Assert.assertEquals(1, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
txContext.finish();
// Dequeue a result and abort.
txContext.start();
DequeueResult<byte[]> fifoResult = fifoConsumer.dequeue();
DequeueResult<byte[]> hashResult = hashConsumer.dequeue();
Assert.assertEquals(2, Bytes.toInt(fifoResult.iterator().next()));
Assert.assertEquals(2, Bytes.toInt(hashResult.iterator().next()));
txContext.abort();
// Now skip the result with a new transaction.
txContext.start();
fifoResult.reclaim();
hashResult.reclaim();
txContext.finish();
// Dequeue again, it should see a new entry
txContext.start();
Assert.assertEquals(3, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
Assert.assertEquals(3, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
txContext.finish();
// Dequeue again, it should see a new entry
txContext.start();
Assert.assertEquals(4, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
Assert.assertEquals(4, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
txContext.finish();
}
verifyQueueIsEmpty(queueName, consumerConfigs);
}
use of org.apache.tephra.TransactionContext in project cdap by caskdata.
the class QueueTest method testRollback.
@Test(timeout = TIMEOUT_MS)
public void testRollback() throws Exception {
QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queuerollback");
ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
configureGroups(queueName, ImmutableList.of(consumerConfig));
try (QueueProducer producer = queueClientFactory.createProducer(queueName);
QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
TransactionContext txContext = createTxContext(producer, consumer, new TransactionAware() {
boolean canCommit = false;
@Override
public void startTx(Transaction tx) {
}
@Override
public void updateTx(Transaction tx) {
}
@Override
public Collection<byte[]> getTxChanges() {
return ImmutableList.of();
}
@Override
public boolean commitTx() throws Exception {
// Flip-flop between commit success/failure.
boolean res = canCommit;
canCommit = !canCommit;
return res;
}
@Override
public void postTxCommit() {
}
@Override
public boolean rollbackTx() throws Exception {
return true;
}
@Override
public String getTransactionAwareName() {
return "test";
}
});
// First, try to enqueue and commit would fail
txContext.start();
try {
producer.enqueue(new QueueEntry(Bytes.toBytes(1)));
txContext.finish();
// If reaches here, it's wrong, as exception should be thrown.
Assert.assertTrue(false);
} catch (TransactionFailureException e) {
txContext.abort();
}
// Try to enqueue again. Within the same transaction, dequeue should be empty.
txContext.start();
producer.enqueue(new QueueEntry(Bytes.toBytes(1)));
Assert.assertTrue(consumer.dequeue().isEmpty());
txContext.finish();
// This time, enqueue has been committed, dequeue would see the item
txContext.start();
try {
Assert.assertEquals(1, Bytes.toInt(consumer.dequeue().iterator().next()));
txContext.finish();
// If reaches here, it's wrong, as exception should be thrown.
Assert.assertTrue(false);
} catch (TransactionFailureException e) {
txContext.abort();
}
// Dequeue again, since last tx was rollback, this dequeue should see the item again.
txContext.start();
Assert.assertEquals(1, Bytes.toInt(consumer.dequeue().iterator().next()));
txContext.finish();
}
}
use of org.apache.tephra.TransactionContext in project cdap by caskdata.
the class QueueTest method testConcurrentEnqueue.
@Category(SlowTests.class)
@Test
public void testConcurrentEnqueue() throws Exception {
// This test is for testing multiple producers that writes with a delay after a transaction started.
// This is for verifying consumer advances the startKey correctly.
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "concurrent");
configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0, 1, DequeueStrategy.FIFO, null)));
final CyclicBarrier barrier = new CyclicBarrier(4);
ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
// Starts three producers to enqueue concurrently. For each entry, starts a TX, sleep, enqueue, commit.
ExecutorService executor = Executors.newFixedThreadPool(3);
final int entryCount = 50;
for (int i = 0; i < 3; i++) {
final QueueProducer producer = queueClientFactory.createProducer(queueName);
final int producerId = i + 1;
executor.execute(new Runnable() {
@Override
public void run() {
try {
barrier.await();
for (int i = 0; i < entryCount; i++) {
TransactionContext txContext = createTxContext(producer);
txContext.start();
// Sleeps at different rate to make the scan in consumer has higher change to see
// the transaction but not the entry (as not yet written)
TimeUnit.MILLISECONDS.sleep(producerId * 50);
producer.enqueue(new QueueEntry(Bytes.toBytes(i)));
txContext.finish();
}
} catch (Exception e) {
LOG.error(e.getMessage(), e);
} finally {
Closeables.closeQuietly(producer);
}
}
});
}
// sum(0..entryCount) * 3
int expectedSum = entryCount * (entryCount - 1) / 2 * 3;
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
// Trigger starts of producer
barrier.await();
int dequeueSum = 0;
int noProgress = 0;
while (dequeueSum != expectedSum && noProgress < 200) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
if (!result.isEmpty()) {
noProgress = 0;
int value = Bytes.toInt(result.iterator().next());
dequeueSum += value;
} else {
noProgress++;
TimeUnit.MILLISECONDS.sleep(10);
}
txContext.finish();
}
Assert.assertEquals(expectedSum, dequeueSum);
}
}
Aggregations