Search in sources :

Example 66 with CConfiguration

use of co.cask.cdap.common.conf.CConfiguration in project cdap by caskdata.

the class HiveExploreServiceTimeoutTest method start.

@BeforeClass
public static void start() throws Exception {
    // Set smaller values for timeouts for testing
    CConfiguration cConfiguration = CConfiguration.create();
    cConfiguration.setLong(Constants.Explore.ACTIVE_OPERATION_TIMEOUT_SECS, ACTIVE_OPERATION_TIMEOUT_SECS);
    cConfiguration.setLong(Constants.Explore.INACTIVE_OPERATION_TIMEOUT_SECS, INACTIVE_OPERATION_TIMEOUT_SECS);
    cConfiguration.setLong(Constants.Explore.CLEANUP_JOB_SCHEDULE_SECS, CLEANUP_JOB_SCHEDULE_SECS);
    initialize(cConfiguration, tmpFolder);
    exploreService = injector.getInstance(ExploreService.class);
    datasetFramework.addModule(KEY_STRUCT_VALUE, new KeyStructValueTableDefinition.KeyStructValueTableModule());
    // Performing admin operations to create dataset instance
    datasetFramework.addInstance("keyStructValueTable", MY_TABLE, DatasetProperties.EMPTY);
    // Accessing dataset instance to perform data operations
    KeyStructValueTableDefinition.KeyStructValueTable table = datasetFramework.getDataset(MY_TABLE, DatasetDefinition.NO_ARGUMENTS, null);
    Assert.assertNotNull(table);
    Transaction tx1 = transactionManager.startShort(100);
    table.startTx(tx1);
    KeyStructValueTableDefinition.KeyValue.Value value1 = new KeyStructValueTableDefinition.KeyValue.Value("first", Lists.newArrayList(1, 2, 3, 4, 5));
    KeyStructValueTableDefinition.KeyValue.Value value2 = new KeyStructValueTableDefinition.KeyValue.Value("two", Lists.newArrayList(10, 11, 12, 13, 14));
    table.put("1", value1);
    table.put("2", value2);
    Assert.assertEquals(value1, table.get("1"));
    Assert.assertTrue(table.commitTx());
    transactionManager.canCommit(tx1, table.getTxChanges());
    transactionManager.commit(tx1);
    table.postTxCommit();
    Transaction tx2 = transactionManager.startShort(100);
    table.startTx(tx2);
    Assert.assertEquals(value1, table.get("1"));
}
Also used : Transaction(org.apache.tephra.Transaction) KeyStructValueTableDefinition(co.cask.cdap.explore.service.datasets.KeyStructValueTableDefinition) CConfiguration(co.cask.cdap.common.conf.CConfiguration) BeforeClass(org.junit.BeforeClass)

Example 67 with CConfiguration

use of co.cask.cdap.common.conf.CConfiguration in project cdap by caskdata.

the class MessageTableRegionObserver method initializePruneState.

private void initializePruneState(RegionCoprocessorEnvironment env) {
    CConfiguration conf = topicMetadataCache.getCConfiguration();
    if (conf != null) {
        pruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE);
        if (Boolean.TRUE.equals(pruneEnable)) {
            String pruneTable = conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE);
            long pruneFlushInterval = TimeUnit.SECONDS.toMillis(conf.getLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, TxConstants.TransactionPruning.DEFAULT_PRUNE_FLUSH_INTERVAL));
            compactionState = new CompactionState(env, TableName.valueOf(pruneTable), pruneFlushInterval);
            if (LOG.isDebugEnabled()) {
                TableName tableName = env.getRegion().getRegionInfo().getTable();
                LOG.debug(String.format("Automatic invalid list pruning is enabled for table %s:%s. Compaction state " + "will be recorded in table %s", tableName.getNamespaceAsString(), tableName.getNameAsString(), pruneTable));
            }
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) CompactionState(org.apache.tephra.hbase.txprune.CompactionState) CConfiguration(co.cask.cdap.common.conf.CConfiguration)

Example 68 with CConfiguration

use of co.cask.cdap.common.conf.CConfiguration in project cdap by caskdata.

the class HBaseQueueRegionObserver method initializePruneState.

private void initializePruneState(RegionCoprocessorEnvironment env) {
    CConfiguration conf = configCache.getCConf();
    if (conf != null) {
        pruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE);
        if (Boolean.TRUE.equals(pruneEnable)) {
            String pruneTable = conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE);
            long pruneFlushInterval = TimeUnit.SECONDS.toMillis(conf.getLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, TxConstants.TransactionPruning.DEFAULT_PRUNE_FLUSH_INTERVAL));
            compactionState = new CompactionState(env, TableName.valueOf(pruneTable), pruneFlushInterval);
            if (LOG.isDebugEnabled()) {
                TableName tableName = env.getRegion().getRegionInfo().getTable();
                LOG.debug(String.format("Automatic invalid list pruning is enabled for table %s:%s. Compaction state " + "will be recorded in table %s", tableName.getNamespaceAsString(), tableName.getNameAsString(), pruneTable));
            }
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) CompactionState(org.apache.tephra.hbase.txprune.CompactionState) CConfiguration(co.cask.cdap.common.conf.CConfiguration)

Example 69 with CConfiguration

use of co.cask.cdap.common.conf.CConfiguration in project cdap by caskdata.

the class HBaseQueueRegionObserver method reloadPruneState.

private void reloadPruneState(RegionCoprocessorEnvironment env) {
    if (pruneEnable == null) {
        // If prune enable has never been initialized, try to do so now
        initializePruneState(env);
    } else {
        CConfiguration conf = configCache.getCConf();
        if (conf != null) {
            boolean newPruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE);
            if (newPruneEnable != pruneEnable) {
                // pruning enable has been changed, resetting prune state
                if (LOG.isDebugEnabled()) {
                    LOG.debug(String.format("Transaction Invalid List pruning feature is set to %s now for region %s.", newPruneEnable, env.getRegion().getRegionInfo().getRegionNameAsString()));
                }
                resetPruneState();
                initializePruneState(env);
            }
        }
    }
}
Also used : CConfiguration(co.cask.cdap.common.conf.CConfiguration)

Example 70 with CConfiguration

use of co.cask.cdap.common.conf.CConfiguration in project cdap by caskdata.

the class HBaseQueueRegionObserver method initializePruneState.

private void initializePruneState(RegionCoprocessorEnvironment env) {
    CConfiguration conf = configCache.getCConf();
    if (conf != null) {
        pruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE);
        if (Boolean.TRUE.equals(pruneEnable)) {
            String pruneTable = conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE);
            long pruneFlushInterval = TimeUnit.SECONDS.toMillis(conf.getLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, TxConstants.TransactionPruning.DEFAULT_PRUNE_FLUSH_INTERVAL));
            compactionState = new CompactionState(env, TableName.valueOf(pruneTable), pruneFlushInterval);
            if (LOG.isDebugEnabled()) {
                TableName tableName = env.getRegion().getRegionInfo().getTable();
                LOG.debug(String.format("Automatic invalid list pruning is enabled for table %s:%s. Compaction state " + "will be recorded in table %s", tableName.getNamespaceAsString(), tableName.getNameAsString(), pruneTable));
            }
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) CompactionState(org.apache.tephra.hbase.txprune.CompactionState) CConfiguration(co.cask.cdap.common.conf.CConfiguration)

Aggregations

CConfiguration (co.cask.cdap.common.conf.CConfiguration)180 Test (org.junit.Test)52 BeforeClass (org.junit.BeforeClass)46 ConfigModule (co.cask.cdap.common.guice.ConfigModule)40 Injector (com.google.inject.Injector)35 Configuration (org.apache.hadoop.conf.Configuration)32 AbstractModule (com.google.inject.AbstractModule)31 AuthorizationEnforcementModule (co.cask.cdap.security.authorization.AuthorizationEnforcementModule)28 DataSetsModules (co.cask.cdap.data.runtime.DataSetsModules)27 DiscoveryRuntimeModule (co.cask.cdap.common.guice.DiscoveryRuntimeModule)26 AuthenticationContextModules (co.cask.cdap.security.auth.context.AuthenticationContextModules)26 AuthorizationTestModule (co.cask.cdap.security.authorization.AuthorizationTestModule)25 TransactionManager (org.apache.tephra.TransactionManager)23 NonCustomLocationUnitTestModule (co.cask.cdap.common.guice.NonCustomLocationUnitTestModule)22 UnsupportedUGIProvider (co.cask.cdap.security.impersonation.UnsupportedUGIProvider)19 Location (org.apache.twill.filesystem.Location)18 DefaultOwnerAdmin (co.cask.cdap.security.impersonation.DefaultOwnerAdmin)17 SystemDatasetRuntimeModule (co.cask.cdap.data.runtime.SystemDatasetRuntimeModule)16 File (java.io.File)16 ZKClientModule (co.cask.cdap.common.guice.ZKClientModule)14