Search in sources :

Example 71 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class HBaseQueueRegionObserver method start.

@Override
public void start(CoprocessorEnvironment env) {
    if (env instanceof RegionCoprocessorEnvironment) {
        HTableDescriptor tableDesc = ((RegionCoprocessorEnvironment) env).getRegion().getTableDesc();
        String hTableName = tableDesc.getNameAsString();
        String prefixBytes = tableDesc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES);
        try {
            // Default to SALT_BYTES for the older salted queue implementation.
            this.prefixBytes = prefixBytes == null ? SaltedHBaseQueueStrategy.SALT_BYTES : Integer.parseInt(prefixBytes);
        } catch (NumberFormatException e) {
            // Shouldn't happen for table created by cdap.
            LOG.error("Unable to parse value of '" + HBaseQueueAdmin.PROPERTY_PREFIX_BYTES + "' property. " + "Default to " + SaltedHBaseQueueStrategy.SALT_BYTES, e);
            this.prefixBytes = SaltedHBaseQueueStrategy.SALT_BYTES;
        }
        namespaceId = HTableNameConverter.from(tableDesc).getNamespace();
        appName = HBaseQueueAdmin.getApplicationName(hTableName);
        flowName = HBaseQueueAdmin.getFlowName(hTableName);
        Configuration conf = env.getConfiguration();
        String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
        final String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
        txStateCacheSupplier = new DefaultTransactionStateCacheSupplier(sysConfigTablePrefix, conf);
        txStateCache = txStateCacheSupplier.get();
        txSnapshotSupplier = new Supplier<TransactionVisibilityState>() {

            @Override
            public TransactionVisibilityState get() {
                return txStateCache.getLatestState();
            }
        };
        String queueConfigTableId = HBaseQueueAdmin.getConfigTableName();
        configTableName = HTableNameConverter.toTableName(hbaseNamespacePrefix, TableId.from(namespaceId, queueConfigTableId));
        cConfReader = new CConfigurationReader(conf, sysConfigTablePrefix);
        configCacheSupplier = createConfigCache(env);
        configCache = configCacheSupplier.get();
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Configuration(org.apache.hadoop.conf.Configuration) CConfigurationReader(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader) TransactionVisibilityState(org.apache.tephra.persist.TransactionVisibilityState) DefaultTransactionStateCacheSupplier(co.cask.cdap.data2.transaction.coprocessor.DefaultTransactionStateCacheSupplier) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 72 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class PayloadTableRegionObserver method start.

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    if (e instanceof RegionCoprocessorEnvironment) {
        RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
        HTableDescriptor tableDesc = env.getRegion().getTableDesc();
        String metadataTableNamespace = tableDesc.getValue(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE);
        String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
        prefixLength = Integer.valueOf(tableDesc.getValue(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES));
        String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
        CConfigurationReader cConfReader = new CConfigurationReader(env.getConfiguration(), sysConfigTablePrefix);
        topicMetadataCacheSupplier = new TopicMetadataCacheSupplier(env, cConfReader, hbaseNamespacePrefix, metadataTableNamespace, new DefaultScanBuilder());
        topicMetadataCache = topicMetadataCacheSupplier.get();
    }
}
Also used : TopicMetadataCacheSupplier(co.cask.cdap.messaging.TopicMetadataCacheSupplier) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) CConfigurationReader(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader) DefaultScanBuilder(co.cask.cdap.data2.util.hbase.DefaultScanBuilder) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 73 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class IncrementHandler method start.

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    if (e instanceof RegionCoprocessorEnvironment) {
        RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
        this.region = ((RegionCoprocessorEnvironment) e).getRegion();
        this.state = new IncrementHandlerState(env.getConfiguration(), env.getRegion().getTableDesc());
        HTableDescriptor tableDesc = env.getRegion().getTableDesc();
        for (HColumnDescriptor columnDesc : tableDesc.getFamilies()) {
            state.initFamily(columnDesc.getName(), convertFamilyValues(columnDesc.getValues()));
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IncrementHandlerState(co.cask.cdap.data2.increment.hbase.IncrementHandlerState) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 74 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class IncrementHandler method start.

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    if (e instanceof RegionCoprocessorEnvironment) {
        RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
        this.region = ((RegionCoprocessorEnvironment) e).getRegion();
        this.state = new IncrementHandlerState(env.getConfiguration(), env.getRegion().getTableDesc());
        HTableDescriptor tableDesc = env.getRegion().getTableDesc();
        for (HColumnDescriptor columnDesc : tableDesc.getFamilies()) {
            state.initFamily(columnDesc.getName(), convertFamilyValues(columnDesc.getValues()));
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IncrementHandlerState(co.cask.cdap.data2.increment.hbase.IncrementHandlerState) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 75 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class HBaseQueueRegionObserver method start.

@Override
public void start(CoprocessorEnvironment env) {
    if (env instanceof RegionCoprocessorEnvironment) {
        HTableDescriptor tableDesc = ((RegionCoprocessorEnvironment) env).getRegion().getTableDesc();
        String hTableName = tableDesc.getNameAsString();
        String prefixBytes = tableDesc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES);
        try {
            // Default to SALT_BYTES for the older salted queue implementation.
            this.prefixBytes = prefixBytes == null ? SaltedHBaseQueueStrategy.SALT_BYTES : Integer.parseInt(prefixBytes);
        } catch (NumberFormatException e) {
            // Shouldn't happen for table created by cdap.
            LOG.error("Unable to parse value of '" + HBaseQueueAdmin.PROPERTY_PREFIX_BYTES + "' property. " + "Default to " + SaltedHBaseQueueStrategy.SALT_BYTES, e);
            this.prefixBytes = SaltedHBaseQueueStrategy.SALT_BYTES;
        }
        namespaceId = HTableNameConverter.from(tableDesc).getNamespace();
        appName = HBaseQueueAdmin.getApplicationName(hTableName);
        flowName = HBaseQueueAdmin.getFlowName(hTableName);
        Configuration conf = env.getConfiguration();
        String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
        final String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
        txStateCacheSupplier = new DefaultTransactionStateCacheSupplier(sysConfigTablePrefix, conf);
        txStateCache = txStateCacheSupplier.get();
        txSnapshotSupplier = new Supplier<TransactionVisibilityState>() {

            @Override
            public TransactionVisibilityState get() {
                return txStateCache.getLatestState();
            }
        };
        String queueConfigTableId = HBaseQueueAdmin.getConfigTableName();
        configTableName = HTableNameConverter.toTableName(hbaseNamespacePrefix, TableId.from(namespaceId, queueConfigTableId));
        cConfReader = new CConfigurationReader(conf, sysConfigTablePrefix);
        configCacheSupplier = createConfigCache(env);
        configCache = configCacheSupplier.get();
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Configuration(org.apache.hadoop.conf.Configuration) CConfigurationReader(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader) TransactionVisibilityState(org.apache.tephra.persist.TransactionVisibilityState) DefaultTransactionStateCacheSupplier(co.cask.cdap.data2.transaction.coprocessor.DefaultTransactionStateCacheSupplier) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)78 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)37 Configuration (org.apache.hadoop.conf.Configuration)25 CConfigurationReader (co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader)21 Test (org.junit.Test)16 TopicMetadataCacheSupplier (co.cask.cdap.messaging.TopicMetadataCacheSupplier)14 Put (org.apache.hadoop.hbase.client.Put)14 Region (org.apache.hadoop.hbase.regionserver.Region)14 DefaultScanBuilder (co.cask.cdap.data2.util.hbase.DefaultScanBuilder)11 Mutation (org.apache.hadoop.hbase.client.Mutation)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)9 Cell (org.apache.hadoop.hbase.Cell)8 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)8 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)8 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)8 User (org.apache.hadoop.hbase.security.User)8 CConfiguration (co.cask.cdap.common.conf.CConfiguration)7 IncrementHandlerState (co.cask.cdap.data2.increment.hbase.IncrementHandlerState)7 CConfigurationCacheSupplier (co.cask.cdap.data2.transaction.coprocessor.CConfigurationCacheSupplier)7