use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.
the class PayloadTableRegionObserver method start.
@Override
public void start(CoprocessorEnvironment e) throws IOException {
if (e instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
HTableDescriptor tableDesc = env.getRegion().getTableDesc();
String metadataTableNamespace = tableDesc.getValue(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE);
String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
prefixLength = Integer.valueOf(tableDesc.getValue(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES));
String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
CConfigurationReader cConfReader = new CConfigurationReader(env.getConfiguration(), sysConfigTablePrefix);
topicMetadataCacheSupplier = new TopicMetadataCacheSupplier(env, cConfReader, hbaseNamespacePrefix, metadataTableNamespace, new DefaultScanBuilder());
topicMetadataCache = topicMetadataCacheSupplier.get();
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.
the class MessageTableRegionObserver method start.
@Override
public void start(CoprocessorEnvironment e) throws IOException {
if (e instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
HTableDescriptor tableDesc = env.getRegion().getTableDesc();
String metadataTableNamespace = tableDesc.getValue(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE);
String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
prefixLength = Integer.valueOf(tableDesc.getValue(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES));
String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
CConfigurationReader cConfReader = new CConfigurationReader(env.getConfiguration(), sysConfigTablePrefix);
txStateCacheSupplier = getTransactionStateCacheSupplier(hbaseNamespacePrefix, env.getConfiguration());
txStateCache = txStateCacheSupplier.get();
topicMetadataCacheSupplier = new TopicMetadataCacheSupplier(env, cConfReader, hbaseNamespacePrefix, metadataTableNamespace, new HBase11ScanBuilder());
topicMetadataCache = topicMetadataCacheSupplier.get();
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class SecureBulkLoadManager method cleanupBulkLoad.
public void cleanupBulkLoad(final Region region, final CleanupBulkLoadRequest request) throws IOException {
List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers(region);
if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) {
ObserverContext<RegionCoprocessorEnvironment> ctx = new ObserverContext<>(getActiveUser());
ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost().findCoprocessorEnvironment(BulkLoadObserver.class).get(0));
for (BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
bulkLoadObserver.preCleanupBulkLoad(ctx, request);
}
}
fs.delete(new Path(request.getBulkToken()), true);
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class CoprocessorHConnection method getConnectionForEnvironment.
/**
* Create a {@link ClusterConnection} based on the environment in which we are running the
* coprocessor. The {@link ClusterConnection} must be externally cleaned up
* (we bypass the usual HTable cleanup mechanisms since we own everything).
* @param env environment hosting the {@link ClusterConnection}
* @return instance of {@link ClusterConnection}.
* @throws IOException if we cannot create the connection
*/
public static ClusterConnection getConnectionForEnvironment(CoprocessorEnvironment env) throws IOException {
// this bit is a little hacky - just trying to get it going for the moment
if (env instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
RegionServerServices services = e.getRegionServerServices();
if (services instanceof HRegionServer) {
return new CoprocessorHConnection((HRegionServer) services);
}
}
return (ClusterConnection) ConnectionFactory.createConnection(env.getConfiguration());
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hadoop by apache.
the class FlowRunCoprocessor method start.
@Override
public void start(CoprocessorEnvironment e) throws IOException {
if (e instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
this.region = env.getRegion();
isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(region.getRegionInfo(), env.getConfiguration());
}
}
Aggregations