use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.
the class TestCachingHTableFactory method testCacheCorrectlyExpiresTable.
@Test
public void testCacheCorrectlyExpiresTable() throws Exception {
// setup the mocks for the tables we will request
HTableFactory delegate = Mockito.mock(HTableFactory.class);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
ImmutableBytesPtr t1 = new ImmutableBytesPtr(Bytes.toBytes("t1"));
ImmutableBytesPtr t2 = new ImmutableBytesPtr(Bytes.toBytes("t2"));
ImmutableBytesPtr t3 = new ImmutableBytesPtr(Bytes.toBytes("t3"));
HTableInterface table1 = Mockito.mock(HTableInterface.class);
HTableInterface table2 = Mockito.mock(HTableInterface.class);
HTableInterface table3 = Mockito.mock(HTableInterface.class);
// setup our factory with a cache size of 2
CachingHTableFactory factory = new CachingHTableFactory(delegate, 2, e);
Mockito.when(delegate.getTable(t1, factory.getPool())).thenReturn(table1);
Mockito.when(delegate.getTable(t2, factory.getPool())).thenReturn(table2);
Mockito.when(delegate.getTable(t3, factory.getPool())).thenReturn(table3);
HTableInterface ft1 = factory.getTable(t1);
HTableInterface ft2 = factory.getTable(t2);
ft1.close();
HTableInterface ft3 = factory.getTable(t3);
// get the same table a second time, after it has gone out of cache
factory.getTable(t1);
Mockito.verify(delegate, Mockito.times(2)).getTable(t1, factory.getPool());
Mockito.verify(delegate, Mockito.times(1)).getTable(t2, factory.getPool());
Mockito.verify(delegate, Mockito.times(1)).getTable(t3, factory.getPool());
Mockito.verify(table1).close();
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.
the class GroupedAggregateRegionObserver method scanUnordered.
/**
* Used for an aggregate query in which the key order does not necessarily match the group by
* key order. In this case, we must collect all distinct groups within a region into a map,
* aggregating as we go.
* @param limit TODO
*/
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, final RegionScanner scanner, final List<Expression> expressions, final ServerAggregators aggregators, long limit) throws IOException {
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
}
RegionCoprocessorEnvironment env = c.getEnvironment();
Configuration conf = env.getConfiguration();
int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES);
byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
if (estDistValsBytes != null) {
// Allocate 1.5x estimation
estDistVals = Math.max(MIN_DISTINCT_VALUES, (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
}
Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
final boolean spillableEnabled = conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
GroupByCache groupByCache = GroupByCacheFactory.INSTANCE.newCache(env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan), aggregators, estDistVals);
boolean success = false;
try {
boolean hasMore;
Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
}
Region region = c.getEnvironment().getRegion();
boolean acquiredLock = false;
try {
region.startRegionOperation();
acquiredLock = true;
synchronized (scanner) {
do {
List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
// Results are potentially returned even when the return
// value of s.next is false
// since this is an indication of whether or not there are
// more values after the
// ones returned
hasMore = scanner.nextRaw(results);
if (!results.isEmpty()) {
result.setKeyValues(results);
ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, expressions);
Aggregator[] rowAggregators = groupByCache.cache(key);
// Aggregate values here
aggregators.aggregate(rowAggregators, result);
}
} while (hasMore && groupByCache.size() < limit);
}
} finally {
if (acquiredLock)
region.closeRegionOperation();
}
RegionScanner regionScanner = groupByCache.getScanner(scanner);
// Do not sort here, but sort back on the client instead
// The reason is that if the scan ever extends beyond a region
// (which can happen if we're basing our parallelization split
// points on old metadata), we'll get incorrect query results.
success = true;
return regionScanner;
} finally {
if (!success) {
Closeables.closeQuietly(groupByCache);
}
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.
the class MetaDataRegionObserver method postOpen.
@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
final RegionCoprocessorEnvironment env = e.getEnvironment();
Runnable r = new Runnable() {
@Override
public void run() {
HTableInterface metaTable = null;
HTableInterface statsTable = null;
try {
ReadOnlyProps props = new ReadOnlyProps(env.getConfiguration().iterator());
Thread.sleep(1000);
metaTable = env.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props));
statsTable = env.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, props));
if (UpgradeUtil.truncateStats(metaTable, statsTable)) {
LOG.info("Stats are successfully truncated for upgrade 4.7!!");
}
} catch (Exception exception) {
LOG.warn("Exception while truncate stats..," + " please check and delete stats manually inorder to get proper result with old client!!");
LOG.warn(exception.getStackTrace());
} finally {
try {
if (metaTable != null) {
metaTable.close();
}
if (statsTable != null) {
statsTable.close();
}
} catch (IOException e) {
}
}
}
};
Thread t = new Thread(r);
t.setDaemon(true);
t.start();
if (!enableRebuildIndex) {
LOG.info("Failure Index Rebuild is skipped by configuration.");
return;
}
// turn off verbose deprecation logging
Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
if (deprecationLogger != null) {
deprecationLogger.setLevel(Level.WARN);
}
try {
Class.forName(PhoenixDriver.class.getName());
// starts index rebuild schedule work
BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
// run scheduled task every 10 secs
executor.scheduleAtFixedRate(task, 10000, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS);
} catch (ClassNotFoundException ex) {
LOG.error("BuildIndexScheduleTask cannot start!", ex);
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.
the class PayloadTableRegionObserver method start.
@Override
public void start(CoprocessorEnvironment e) throws IOException {
if (e instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
HTableDescriptor tableDesc = env.getRegion().getTableDesc();
String metadataTableNamespace = tableDesc.getValue(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE);
String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
prefixLength = Integer.valueOf(tableDesc.getValue(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES));
String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
CConfigurationReader cConfReader = new CConfigurationReader(env.getConfiguration(), sysConfigTablePrefix);
topicMetadataCacheSupplier = new TopicMetadataCacheSupplier(env, cConfReader, hbaseNamespacePrefix, metadataTableNamespace, new DefaultScanBuilder());
topicMetadataCache = topicMetadataCacheSupplier.get();
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.
the class HBaseQueueRegionObserver method start.
@Override
public void start(CoprocessorEnvironment env) {
if (env instanceof RegionCoprocessorEnvironment) {
HTableDescriptor tableDesc = ((RegionCoprocessorEnvironment) env).getRegion().getTableDesc();
String hTableName = tableDesc.getNameAsString();
String prefixBytes = tableDesc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES);
try {
// Default to SALT_BYTES for the older salted queue implementation.
this.prefixBytes = prefixBytes == null ? SaltedHBaseQueueStrategy.SALT_BYTES : Integer.parseInt(prefixBytes);
} catch (NumberFormatException e) {
// Shouldn't happen for table created by cdap.
LOG.error("Unable to parse value of '" + HBaseQueueAdmin.PROPERTY_PREFIX_BYTES + "' property. " + "Default to " + SaltedHBaseQueueStrategy.SALT_BYTES, e);
this.prefixBytes = SaltedHBaseQueueStrategy.SALT_BYTES;
}
namespaceId = HTableNameConverter.from(tableDesc).getNamespace();
appName = HBaseQueueAdmin.getApplicationName(hTableName);
flowName = HBaseQueueAdmin.getFlowName(hTableName);
Configuration conf = env.getConfiguration();
String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
final String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
txStateCacheSupplier = new DefaultTransactionStateCacheSupplier(sysConfigTablePrefix, conf);
txStateCache = txStateCacheSupplier.get();
txSnapshotSupplier = new Supplier<TransactionVisibilityState>() {
@Override
public TransactionVisibilityState get() {
return txStateCache.getLatestState();
}
};
String queueConfigTableId = HBaseQueueAdmin.getConfigTableName();
configTableName = HTableNameConverter.toTableName(hbaseNamespacePrefix, TableId.from(namespaceId, queueConfigTableId));
cConfReader = new CConfigurationReader(conf, sysConfigTablePrefix);
configCacheSupplier = createConfigCache(env);
configCache = configCacheSupplier.get();
}
}
Aggregations