Search in sources :

Example 1 with TransactionNotInProgressException

use of org.apache.tephra.TransactionNotInProgressException in project cdap by caskdata.

the class HBaseQueueDebugger method scanQueue.

private void scanQueue(TransactionExecutor txExecutor, HBaseConsumerStateStore stateStore, QueueName queueName, QueueBarrier start, @Nullable QueueBarrier end, final QueueStatistics outStats) throws Exception {
    final byte[] queueRowPrefix = QueueEntryRow.getQueueRowPrefix(queueName);
    ConsumerGroupConfig groupConfig = start.getGroupConfig();
    printProgress("Got consumer group config: %s\n", groupConfig);
    HBaseQueueAdmin admin = queueClientFactory.getQueueAdmin();
    TableId tableId = admin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE);
    HTable hTable = queueClientFactory.createHTable(tableId);
    printProgress("Looking at HBase table: %s\n", Bytes.toString(hTable.getTableName()));
    final byte[] stateColumnName = Bytes.add(QueueEntryRow.STATE_COLUMN_PREFIX, Bytes.toBytes(groupConfig.getGroupId()));
    int distributorBuckets = queueClientFactory.getDistributorBuckets(hTable.getTableDescriptor());
    ShardedHBaseQueueStrategy queueStrategy = new ShardedHBaseQueueStrategy(tableUtil, distributorBuckets);
    ScanBuilder scan = tableUtil.buildScan();
    scan.setStartRow(start.getStartRow());
    if (end != null) {
        scan.setStopRow(end.getStartRow());
    } else {
        scan.setStopRow(QueueEntryRow.getQueueEntryRowKey(queueName, Long.MAX_VALUE, Integer.MAX_VALUE));
    }
    // Needs to include meta column for row that doesn't have state yet.
    scan.addColumn(QueueEntryRow.COLUMN_FAMILY, QueueEntryRow.META_COLUMN);
    scan.addColumn(QueueEntryRow.COLUMN_FAMILY, stateColumnName);
    // Don't do block cache for debug tool. We don't want old blocks get cached
    scan.setCacheBlocks(false);
    scan.setMaxVersions(1);
    printProgress("Scanning section with scan: %s\n", scan.toString());
    List<Integer> instanceIds = Lists.newArrayList();
    if (groupConfig.getDequeueStrategy() == DequeueStrategy.FIFO) {
        instanceIds.add(0);
    } else {
        for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
            instanceIds.add(instanceId);
        }
    }
    final int rowsCache = Integer.parseInt(System.getProperty(PROP_ROWS_CACHE, "100000"));
    for (final int instanceId : instanceIds) {
        printProgress("Processing instance %d", instanceId);
        ConsumerConfig consConfig = new ConsumerConfig(groupConfig, instanceId);
        final QueueScanner scanner = queueStrategy.createScanner(consConfig, hTable, scan.build(), rowsCache);
        try {
            txExecutor.execute(new TransactionExecutor.Procedure<HBaseConsumerStateStore>() {

                @Override
                public void apply(HBaseConsumerStateStore input) throws Exception {
                    ImmutablePair<byte[], Map<byte[], byte[]>> result;
                    while ((result = scanner.next()) != null) {
                        byte[] rowKey = result.getFirst();
                        Map<byte[], byte[]> columns = result.getSecond();
                        visitRow(outStats, input.getTransaction(), rowKey, columns.get(stateColumnName), queueRowPrefix.length);
                        if (showProgress() && outStats.getTotal() % rowsCache == 0) {
                            System.out.printf("\rProcessing instance %d: %s", instanceId, outStats.getReport(showTxTimestampOnly()));
                        }
                    }
                }
            }, stateStore);
        } catch (TransactionFailureException e) {
            // Ignore transaction not in progress exception as it's caused by short TX timeout on commit
            if (!(Throwables.getRootCause(e) instanceof TransactionNotInProgressException)) {
                throw Throwables.propagate(e);
            }
        }
        printProgress("\rProcessing instance %d: %s\n", instanceId, outStats.getReport(showTxTimestampOnly()));
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) ShardedHBaseQueueStrategy(co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy) ScanBuilder(co.cask.cdap.data2.util.hbase.ScanBuilder) TransactionExecutor(org.apache.tephra.TransactionExecutor) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) HTable(org.apache.hadoop.hbase.client.HTable) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionFailureException(org.apache.tephra.TransactionFailureException) NotFoundException(co.cask.cdap.common.NotFoundException) HBaseConsumerStateStore(co.cask.cdap.data2.transaction.queue.hbase.HBaseConsumerStateStore) TransactionFailureException(org.apache.tephra.TransactionFailureException) ImmutablePair(co.cask.cdap.common.utils.ImmutablePair) HBaseQueueAdmin(co.cask.cdap.data2.transaction.queue.hbase.HBaseQueueAdmin) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueScanner(co.cask.cdap.data2.transaction.queue.QueueScanner) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Map(java.util.Map)

Example 2 with TransactionNotInProgressException

use of org.apache.tephra.TransactionNotInProgressException in project cdap by caskdata.

the class DefaultStore method upgrade.

/**
   * Method to add version in DefaultStore.
   *
   * @throws InterruptedException
   * @throws IOException
   * @throws DatasetManagementException
   */
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
    // If upgrade is already complete, then simply return.
    if (isUpgradeComplete()) {
        LOG.info("{} is already upgraded.", NAME);
        return;
    }
    final AtomicInteger maxRows = new AtomicInteger(1000);
    final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
    LOG.info("Starting upgrade of {}.", NAME);
    // to check whether they need to do additional scans to accommodate old data formats.
    while (!isUpgradeComplete()) {
        sleepTimeInSecs.set(60);
        try {
            Transactions.execute(transactional, new TxCallable<Void>() {

                @Override
                public Void call(DatasetContext context) throws Exception {
                    AppMetadataStore store = getAppMetadataStore(context);
                    boolean upgradeComplete = store.upgradeVersionKeys(maxRows.get());
                    if (upgradeComplete) {
                        store.setUpgradeComplete(APP_VERSION_UPGRADE_KEY);
                    }
                    return null;
                }
            });
        } catch (TransactionFailureException e) {
            if (e instanceof TransactionConflictException) {
                LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
                sleepTimeInSecs.set(10);
            } else if (e instanceof TransactionNotInProgressException) {
                int currMaxRows = maxRows.get();
                if (currMaxRows > 500) {
                    maxRows.decrementAndGet();
                } else {
                    LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
                    return;
                }
                sleepTimeInSecs.set(10);
                LOG.debug("Upgrade step faced a Transaction Timeout exception. " + "Reducing the number of max rows to : {} and retrying the operation now.", maxRows.get(), e);
            } else {
                LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
                sleepTimeInSecs.set(60);
            }
        }
        TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
    }
    LOG.info("Upgrade of {} is complete.", NAME);
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionConflictException(org.apache.tephra.TransactionConflictException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) DatasetContext(co.cask.cdap.api.data.DatasetContext) TransactionFailureException(org.apache.tephra.TransactionFailureException) ProgramNotFoundException(co.cask.cdap.common.ProgramNotFoundException) ApplicationNotFoundException(co.cask.cdap.common.ApplicationNotFoundException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionConflictException(org.apache.tephra.TransactionConflictException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) NoSuchElementException(java.util.NoSuchElementException) IOException(java.io.IOException)

Example 3 with TransactionNotInProgressException

use of org.apache.tephra.TransactionNotInProgressException in project cdap by caskdata.

the class DatasetBasedStreamSizeScheduleStore method upgrade.

/**
   * Method to add version in StreamSizeSchedule row key in SchedulerStore.
   *
   * @throws InterruptedException
   * @throws IOException
   * @throws DatasetManagementException
   */
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
    // Wait until the store is initialized
    // Use a new instance of table since Table is not thread safe
    Table metaTable = null;
    while (metaTable == null) {
        try {
            metaTable = tableUtil.getMetaTable();
        } catch (Exception e) {
        // ignore exception
        }
        TimeUnit.SECONDS.sleep(10);
    }
    if (isUpgradeComplete()) {
        LOG.info("{} is already upgraded.", NAME);
        return;
    }
    final AtomicInteger maxNumberUpdateRows = new AtomicInteger(1000);
    final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
    LOG.info("Starting upgrade of {}.", NAME);
    while (true) {
        sleepTimeInSecs.set(60);
        try {
            if (executeUpgradeInTransaction(table, maxNumberUpdateRows)) {
                break;
            }
        } catch (TransactionFailureException e) {
            if (e instanceof TransactionConflictException) {
                LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
                sleepTimeInSecs.set(10);
            } else if (e instanceof TransactionNotInProgressException) {
                int currMaxRows = maxNumberUpdateRows.get();
                if (currMaxRows > 500) {
                    maxNumberUpdateRows.decrementAndGet();
                } else {
                    LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
                    return;
                }
                sleepTimeInSecs.set(10);
                LOG.debug("Upgrade step faced a Transaction Timeout exception. " + "Current number of max update rows is set to : {} and retrying the operation now.", maxNumberUpdateRows.get(), e);
            } else {
                LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
                sleepTimeInSecs.set(60);
            }
        }
        TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
    }
    LOG.info("Upgrade of {} is complete.", NAME);
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) Table(co.cask.cdap.api.dataset.table.Table) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionConflictException(org.apache.tephra.TransactionConflictException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionFailureException(org.apache.tephra.TransactionFailureException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionConflictException(org.apache.tephra.TransactionConflictException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException)

Aggregations

TransactionFailureException (org.apache.tephra.TransactionFailureException)3 TransactionNotInProgressException (org.apache.tephra.TransactionNotInProgressException)3 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)2 IOException (java.io.IOException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 TransactionConflictException (org.apache.tephra.TransactionConflictException)2 DatasetContext (co.cask.cdap.api.data.DatasetContext)1 Table (co.cask.cdap.api.dataset.table.Table)1 ApplicationNotFoundException (co.cask.cdap.common.ApplicationNotFoundException)1 NotFoundException (co.cask.cdap.common.NotFoundException)1 ProgramNotFoundException (co.cask.cdap.common.ProgramNotFoundException)1 ImmutablePair (co.cask.cdap.common.utils.ImmutablePair)1 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)1 ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)1 QueueScanner (co.cask.cdap.data2.transaction.queue.QueueScanner)1 HBaseConsumerStateStore (co.cask.cdap.data2.transaction.queue.hbase.HBaseConsumerStateStore)1 HBaseQueueAdmin (co.cask.cdap.data2.transaction.queue.hbase.HBaseQueueAdmin)1 ShardedHBaseQueueStrategy (co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy)1 TableId (co.cask.cdap.data2.util.TableId)1 ScanBuilder (co.cask.cdap.data2.util.hbase.ScanBuilder)1