Search in sources :

Example 66 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestCachingHTableFactory method testCacheCorrectlyExpiresTable.

@Test
public void testCacheCorrectlyExpiresTable() throws Exception {
    // setup the mocks for the tables we will request
    HTableFactory delegate = Mockito.mock(HTableFactory.class);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    ImmutableBytesPtr t1 = new ImmutableBytesPtr(Bytes.toBytes("t1"));
    ImmutableBytesPtr t2 = new ImmutableBytesPtr(Bytes.toBytes("t2"));
    ImmutableBytesPtr t3 = new ImmutableBytesPtr(Bytes.toBytes("t3"));
    HTableInterface table1 = Mockito.mock(HTableInterface.class);
    HTableInterface table2 = Mockito.mock(HTableInterface.class);
    HTableInterface table3 = Mockito.mock(HTableInterface.class);
    // setup our factory with a cache size of 2
    CachingHTableFactory factory = new CachingHTableFactory(delegate, 2, e);
    Mockito.when(delegate.getTable(t1, factory.getPool())).thenReturn(table1);
    Mockito.when(delegate.getTable(t2, factory.getPool())).thenReturn(table2);
    Mockito.when(delegate.getTable(t3, factory.getPool())).thenReturn(table3);
    HTableInterface ft1 = factory.getTable(t1);
    HTableInterface ft2 = factory.getTable(t2);
    ft1.close();
    HTableInterface ft3 = factory.getTable(t3);
    // get the same table a second time, after it has gone out of cache
    factory.getTable(t1);
    Mockito.verify(delegate, Mockito.times(2)).getTable(t1, factory.getPool());
    Mockito.verify(delegate, Mockito.times(1)).getTable(t2, factory.getPool());
    Mockito.verify(delegate, Mockito.times(1)).getTable(t3, factory.getPool());
    Mockito.verify(table1).close();
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Configuration(org.apache.hadoop.conf.Configuration) CachingHTableFactory(org.apache.phoenix.hbase.index.table.CachingHTableFactory) CachingHTableFactory(org.apache.phoenix.hbase.index.table.CachingHTableFactory) HTableFactory(org.apache.phoenix.hbase.index.table.HTableFactory) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Test(org.junit.Test)

Example 67 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class GroupedAggregateRegionObserver method scanUnordered.

/**
     * Used for an aggregate query in which the key order does not necessarily match the group by
     * key order. In this case, we must collect all distinct groups within a region into a map,
     * aggregating as we go.
     * @param limit TODO
     */
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, final RegionScanner scanner, final List<Expression> expressions, final ServerAggregators aggregators, long limit) throws IOException {
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
    }
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Configuration conf = env.getConfiguration();
    int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES);
    byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
    if (estDistValsBytes != null) {
        // Allocate 1.5x estimation
        estDistVals = Math.max(MIN_DISTINCT_VALUES, (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
    }
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    final boolean spillableEnabled = conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
    GroupByCache groupByCache = GroupByCacheFactory.INSTANCE.newCache(env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan), aggregators, estDistVals);
    boolean success = false;
    try {
        boolean hasMore;
        Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
        if (logger.isDebugEnabled()) {
            logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
        }
        Region region = c.getEnvironment().getRegion();
        boolean acquiredLock = false;
        try {
            region.startRegionOperation();
            acquiredLock = true;
            synchronized (scanner) {
                do {
                    List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                    // Results are potentially returned even when the return
                    // value of s.next is false
                    // since this is an indication of whether or not there are
                    // more values after the
                    // ones returned
                    hasMore = scanner.nextRaw(results);
                    if (!results.isEmpty()) {
                        result.setKeyValues(results);
                        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, expressions);
                        Aggregator[] rowAggregators = groupByCache.cache(key);
                        // Aggregate values here
                        aggregators.aggregate(rowAggregators, result);
                    }
                } while (hasMore && groupByCache.size() < limit);
            }
        } finally {
            if (acquiredLock)
                region.closeRegionOperation();
        }
        RegionScanner regionScanner = groupByCache.getScanner(scanner);
        // Do not sort here, but sort back on the client instead
        // The reason is that if the scan ever extends beyond a region
        // (which can happen if we're basing our parallelization split
        // points on old metadata), we'll get incorrect query results.
        success = true;
        return regionScanner;
    } finally {
        if (!success) {
            Closeables.closeQuietly(groupByCache);
        }
    }
}
Also used : EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) Configuration(org.apache.hadoop.conf.Configuration) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) PInteger(org.apache.phoenix.schema.types.PInteger) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Region(org.apache.hadoop.hbase.regionserver.Region) Cell(org.apache.hadoop.hbase.Cell) SpillableGroupByCache(org.apache.phoenix.cache.aggcache.SpillableGroupByCache) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Example 68 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class MetaDataRegionObserver method postOpen.

@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
    final RegionCoprocessorEnvironment env = e.getEnvironment();
    Runnable r = new Runnable() {

        @Override
        public void run() {
            HTableInterface metaTable = null;
            HTableInterface statsTable = null;
            try {
                ReadOnlyProps props = new ReadOnlyProps(env.getConfiguration().iterator());
                Thread.sleep(1000);
                metaTable = env.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props));
                statsTable = env.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, props));
                if (UpgradeUtil.truncateStats(metaTable, statsTable)) {
                    LOG.info("Stats are successfully truncated for upgrade 4.7!!");
                }
            } catch (Exception exception) {
                LOG.warn("Exception while truncate stats..," + " please check and delete stats manually inorder to get proper result with old client!!");
                LOG.warn(exception.getStackTrace());
            } finally {
                try {
                    if (metaTable != null) {
                        metaTable.close();
                    }
                    if (statsTable != null) {
                        statsTable.close();
                    }
                } catch (IOException e) {
                }
            }
        }
    };
    Thread t = new Thread(r);
    t.setDaemon(true);
    t.start();
    if (!enableRebuildIndex) {
        LOG.info("Failure Index Rebuild is skipped by configuration.");
        return;
    }
    // turn off verbose deprecation logging
    Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
    if (deprecationLogger != null) {
        deprecationLogger.setLevel(Level.WARN);
    }
    try {
        Class.forName(PhoenixDriver.class.getName());
        // starts index rebuild schedule work
        BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
        // run scheduled task every 10 secs
        executor.scheduleAtFixedRate(task, 10000, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS);
    } catch (ClassNotFoundException ex) {
        LOG.error("BuildIndexScheduleTask cannot start!", ex);
    }
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) PhoenixDriver(org.apache.phoenix.jdbc.PhoenixDriver) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Logger(org.apache.log4j.Logger) ServiceException(com.google.protobuf.ServiceException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) IOException(java.io.IOException)

Example 69 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class PayloadTableRegionObserver method start.

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    if (e instanceof RegionCoprocessorEnvironment) {
        RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
        HTableDescriptor tableDesc = env.getRegion().getTableDesc();
        String metadataTableNamespace = tableDesc.getValue(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE);
        String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
        prefixLength = Integer.valueOf(tableDesc.getValue(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES));
        String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
        CConfigurationReader cConfReader = new CConfigurationReader(env.getConfiguration(), sysConfigTablePrefix);
        topicMetadataCacheSupplier = new TopicMetadataCacheSupplier(env, cConfReader, hbaseNamespacePrefix, metadataTableNamespace, new DefaultScanBuilder());
        topicMetadataCache = topicMetadataCacheSupplier.get();
    }
}
Also used : TopicMetadataCacheSupplier(co.cask.cdap.messaging.TopicMetadataCacheSupplier) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) CConfigurationReader(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader) DefaultScanBuilder(co.cask.cdap.data2.util.hbase.DefaultScanBuilder) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 70 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project cdap by caskdata.

the class HBaseQueueRegionObserver method start.

@Override
public void start(CoprocessorEnvironment env) {
    if (env instanceof RegionCoprocessorEnvironment) {
        HTableDescriptor tableDesc = ((RegionCoprocessorEnvironment) env).getRegion().getTableDesc();
        String hTableName = tableDesc.getNameAsString();
        String prefixBytes = tableDesc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES);
        try {
            // Default to SALT_BYTES for the older salted queue implementation.
            this.prefixBytes = prefixBytes == null ? SaltedHBaseQueueStrategy.SALT_BYTES : Integer.parseInt(prefixBytes);
        } catch (NumberFormatException e) {
            // Shouldn't happen for table created by cdap.
            LOG.error("Unable to parse value of '" + HBaseQueueAdmin.PROPERTY_PREFIX_BYTES + "' property. " + "Default to " + SaltedHBaseQueueStrategy.SALT_BYTES, e);
            this.prefixBytes = SaltedHBaseQueueStrategy.SALT_BYTES;
        }
        namespaceId = HTableNameConverter.from(tableDesc).getNamespace();
        appName = HBaseQueueAdmin.getApplicationName(hTableName);
        flowName = HBaseQueueAdmin.getFlowName(hTableName);
        Configuration conf = env.getConfiguration();
        String hbaseNamespacePrefix = tableDesc.getValue(Constants.Dataset.TABLE_PREFIX);
        final String sysConfigTablePrefix = HTableNameConverter.getSysConfigTablePrefix(hbaseNamespacePrefix);
        txStateCacheSupplier = new DefaultTransactionStateCacheSupplier(sysConfigTablePrefix, conf);
        txStateCache = txStateCacheSupplier.get();
        txSnapshotSupplier = new Supplier<TransactionVisibilityState>() {

            @Override
            public TransactionVisibilityState get() {
                return txStateCache.getLatestState();
            }
        };
        String queueConfigTableId = HBaseQueueAdmin.getConfigTableName();
        configTableName = HTableNameConverter.toTableName(hbaseNamespacePrefix, TableId.from(namespaceId, queueConfigTableId));
        cConfReader = new CConfigurationReader(conf, sysConfigTablePrefix);
        configCacheSupplier = createConfigCache(env);
        configCache = configCacheSupplier.get();
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Configuration(org.apache.hadoop.conf.Configuration) CConfigurationReader(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader) TransactionVisibilityState(org.apache.tephra.persist.TransactionVisibilityState) DefaultTransactionStateCacheSupplier(co.cask.cdap.data2.transaction.coprocessor.DefaultTransactionStateCacheSupplier) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)78 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)37 Configuration (org.apache.hadoop.conf.Configuration)25 CConfigurationReader (co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader)21 Test (org.junit.Test)16 TopicMetadataCacheSupplier (co.cask.cdap.messaging.TopicMetadataCacheSupplier)14 Put (org.apache.hadoop.hbase.client.Put)14 Region (org.apache.hadoop.hbase.regionserver.Region)14 DefaultScanBuilder (co.cask.cdap.data2.util.hbase.DefaultScanBuilder)11 Mutation (org.apache.hadoop.hbase.client.Mutation)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)9 Cell (org.apache.hadoop.hbase.Cell)8 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)8 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)8 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)8 User (org.apache.hadoop.hbase.security.User)8 CConfiguration (co.cask.cdap.common.conf.CConfiguration)7 IncrementHandlerState (co.cask.cdap.data2.increment.hbase.IncrementHandlerState)7 CConfigurationCacheSupplier (co.cask.cdap.data2.transaction.coprocessor.CConfigurationCacheSupplier)7