Search in sources :

Example 11 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class SkipScanAfterManualSplitIT method testManualSplit.

@Test
public void testManualSplit() throws Exception {
    String tableName = generateUniqueName();
    byte[] tableNameBytes = Bytes.toBytes(tableName);
    initTable(tableName);
    Connection conn = getConnection();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    int nRegions = services.getAllTableRegions(tableNameBytes).size();
    int nInitialRegions = nRegions;
    HBaseAdmin admin = services.getAdmin();
    try {
        admin.split(tableName);
        int nTries = 0;
        while (nRegions == nInitialRegions && nTries < 10) {
            Thread.sleep(1000);
            nRegions = services.getAllTableRegions(tableNameBytes).size();
            nTries++;
        }
        // Split finished by this time, but cache isn't updated until
        // table is accessed
        assertEquals(nRegions, nInitialRegions);
        int nRows = 2;
        String query = "SELECT count(*) FROM " + tableName + " WHERE a IN ('tl','jt',' a',' b',' c',' d')";
        ResultSet rs1 = conn.createStatement().executeQuery(query);
        assertTrue(rs1.next());
        nRegions = services.getAllTableRegions(tableNameBytes).size();
        // Region cache has been updated, as there are more regions now
        assertNotEquals(nRegions, nInitialRegions);
        /*
            if (nRows != rs1.getInt(1)) {
                // Run the same query again and it always passes now
                // (as region cache is up-to-date)
                ResultSet r2 = conn.createStatement().executeQuery(query);
                assertTrue(r2.next());
                assertEquals(nRows, r2.getInt(1));
            }
            */
        assertEquals(nRows, rs1.getInt(1));
    } finally {
        admin.close();
    }
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Test(org.junit.Test)

Example 12 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class ScanPlan method getEstimateOfDataSizeToScanIfWithinThreshold.

/**
     * @return Pair of numbers in which the first part is estimated number of bytes that will be
     *         scanned and the second part is estimated number of rows. Returned value is null if
     *         estimated size of data to scan is beyond a threshold.
     * @throws SQLException
     */
private static Pair<Long, Long> getEstimateOfDataSizeToScanIfWithinThreshold(StatementContext context, PTable table, Integer perScanLimit) throws SQLException {
    Scan scan = context.getScan();
    ConnectionQueryServices services = context.getConnection().getQueryServices();
    long estRowSize = SchemaUtil.estimateRowSize(table);
    long regionSize = services.getProps().getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    if (perScanLimit == null || scan.getFilter() != null) {
        /*
             * If a limit is not provided or if we have a filter, then we are not able to decide whether
             * the amount of data we need to scan is less than the threshold.
             */
        return null;
    }
    float factor = services.getProps().getFloat(QueryServices.LIMITED_QUERY_SERIAL_THRESHOLD, QueryServicesOptions.DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD);
    long threshold = (long) (factor * regionSize);
    long estimatedBytes = perScanLimit * estRowSize;
    long estimatedRows = perScanLimit;
    return (perScanLimit * estRowSize < threshold) ? new Pair<>(estimatedBytes, estimatedRows) : null;
}
Also used : Scan(org.apache.hadoop.hbase.client.Scan) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 13 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class PhoenixRecordReader method initialize.

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    final PhoenixInputSplit pSplit = (PhoenixInputSplit) split;
    final List<Scan> scans = pSplit.getScans();
    try {
        List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
        StatementContext ctx = queryPlan.getContext();
        ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
        String tableName = queryPlan.getTableRef().getTable().getPhysicalName().getString();
        String snapshotName = this.configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY);
        // Clear the table region boundary cache to make sure long running jobs stay up to date
        byte[] tableNameBytes = queryPlan.getTableRef().getTable().getPhysicalName().getBytes();
        ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices();
        services.clearTableRegionCache(tableNameBytes);
        long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
        boolean isRequestMetricsEnabled = readMetrics.isRequestMetricsEnabled();
        for (Scan scan : scans) {
            // For MR, skip the region boundary check exception if we encounter a split. ref: PHOENIX-2599
            scan.setAttribute(BaseScannerRegionObserver.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true));
            PeekingResultIterator peekingResultIterator;
            ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan, isRequestMetricsEnabled);
            if (snapshotName != null) {
                // result iterator to read snapshots
                final TableSnapshotResultIterator tableSnapshotResultIterator = new TableSnapshotResultIterator(configuration, scan, scanMetricsHolder);
                peekingResultIterator = LookAheadResultIterator.wrap(tableSnapshotResultIterator);
            } else {
                final TableResultIterator tableResultIterator = new TableResultIterator(queryPlan.getContext().getConnection().getMutationState(), scan, scanMetricsHolder, renewScannerLeaseThreshold, queryPlan, MapReduceParallelScanGrouper.getInstance());
                peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator);
            }
            iterators.add(peekingResultIterator);
        }
        ResultIterator iterator = queryPlan.useRoundRobinIterator() ? RoundRobinResultIterator.newIterator(iterators, queryPlan) : ConcatResultIterator.newIterator(iterators);
        if (queryPlan.getContext().getSequenceManager().getSequenceCount() > 0) {
            iterator = new SequenceResultIterator(iterator, queryPlan.getContext().getSequenceManager());
        }
        this.resultIterator = iterator;
        // Clone the row projector as it's not thread safe and would be used simultaneously by
        // multiple threads otherwise.
        this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
    } catch (SQLException e) {
        LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ", e.getMessage()));
        Throwables.propagate(e);
    }
}
Also used : ReadMetricQueue(org.apache.phoenix.monitoring.ReadMetricQueue) SQLException(java.sql.SQLException) ScanMetricsHolder(org.apache.phoenix.monitoring.ScanMetricsHolder) StatementContext(org.apache.phoenix.compile.StatementContext) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) Scan(org.apache.hadoop.hbase.client.Scan) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 14 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class MetaDataClient method updateCache.

private MetaDataMutationResult updateCache(PName origTenantId, String schemaName, String tableName, boolean alwaysHitServer, Long resolvedTimestamp) throws SQLException {
    // TODO: pass byte[] herez
    boolean systemTable = SYSTEM_CATALOG_SCHEMA.equals(schemaName);
    // System tables must always have a null tenantId
    PName tenantId = systemTable ? null : origTenantId;
    PTable table = null;
    PTableRef tableRef = null;
    String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    long tableTimestamp = HConstants.LATEST_TIMESTAMP;
    long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP;
    try {
        tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName));
        table = tableRef.getTable();
        tableTimestamp = table.getTimeStamp();
        tableResolvedTimestamp = tableRef.getResolvedTimeStamp();
    } catch (TableNotFoundException e) {
    }
    boolean defaultTransactional = connection.getQueryServices().getProps().getBoolean(QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_TRANSACTIONAL);
    // start a txn if all table are transactional by default or if we found the table in the cache and it is transactional
    // TODO if system tables become transactional remove the check
    boolean isTransactional = defaultTransactional || (table != null && table.isTransactional());
    if (!systemTable && isTransactional && !connection.getMutationState().isTransactionStarted()) {
        connection.getMutationState().startTransaction();
    }
    resolvedTimestamp = resolvedTimestamp == null ? TransactionUtil.getResolvedTimestamp(connection, isTransactional, HConstants.LATEST_TIMESTAMP) : resolvedTimestamp;
    // 2. table was already resolved as of that timestamp
    if (table != null && !alwaysHitServer && (systemTable || resolvedTimestamp == tableResolvedTimestamp || connection.getMetaDataCache().getAge(tableRef) < table.getUpdateCacheFrequency())) {
        return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, QueryConstants.UNSET_TIMESTAMP, table);
    }
    int maxTryCount = tenantId == null ? 1 : 2;
    int tryCount = 0;
    MetaDataMutationResult result;
    do {
        final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
        final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
        ConnectionQueryServices queryServices = connection.getQueryServices();
        result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, resolvedTimestamp);
        // if the table was assumed to be transactional, but is actually not transactional then re-resolve as of the right timestamp (and vice versa)
        if (table == null && result.getTable() != null && result.getTable().isTransactional() != isTransactional) {
            result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, TransactionUtil.getResolvedTimestamp(connection, result.getTable().isTransactional(), HConstants.LATEST_TIMESTAMP));
        }
        if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) {
            return result;
        }
        MutationCode code = result.getMutationCode();
        PTable resultTable = result.getTable();
        // We found an updated table, so update our cache
        if (resultTable != null) {
            // Cache table, even if multi-tenant table found for null tenant_id
            // These may be accessed by tenant-specific connections, as the
            // tenant_id will always be added to mask other tenants data.
            // Otherwise, a tenant would be required to create a VIEW first
            // which is not really necessary unless you want to filter or add
            // columns
            addTableToCache(result);
            return result;
        } else {
            // server again.
            if (table != null) {
                // Ensures that table in result is set to table found in our cache.
                if (code == MutationCode.TABLE_ALREADY_EXISTS) {
                    result.setTable(table);
                    // Although this table is up-to-date, the parent table may not be.
                    // In this case, we update the parent table which may in turn pull
                    // in indexes to add to this table.
                    long resolvedTime = TransactionUtil.getResolvedTime(connection, result);
                    if (addIndexesFromParentTable(result, resolvedTimestamp)) {
                        connection.addTable(result.getTable(), resolvedTime);
                    } else {
                        // if we aren't adding the table, we still need to update the resolved time of the table
                        connection.updateResolvedTimestamp(table, resolvedTime);
                    }
                    return result;
                }
                // Otherwise, we're up to date, so there's nothing to do.
                if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) {
                    connection.removeTable(origTenantId, fullTableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                }
            }
        }
        // Try again with global tenantId
        tenantId = null;
    } while (++tryCount < maxTryCount);
    return result;
}
Also used : IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 15 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class QueryTimeoutIT method testSetRPCTimeOnConnection.

@Test
public void testSetRPCTimeOnConnection() throws Exception {
    Properties overriddenProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    overriddenProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
    overriddenProps.setProperty("hbase.rpc.timeout", Long.toString(100));
    String url = QueryUtil.getConnectionUrl(overriddenProps, config, "longRunning");
    Connection conn1 = DriverManager.getConnection(url, overriddenProps);
    ConnectionQueryServices s1 = conn1.unwrap(PhoenixConnection.class).getQueryServices();
    ReadOnlyProps configProps = s1.getProps();
    assertEquals("100", configProps.get("hbase.rpc.timeout"));
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    props.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
    Connection conn2 = DriverManager.getConnection(getUrl(), props);
    ConnectionQueryServices s2 = conn2.unwrap(PhoenixConnection.class).getQueryServices();
    assertFalse(s1 == s2);
    Connection conn3 = DriverManager.getConnection(getUrl(), props);
    ConnectionQueryServices s3 = conn3.unwrap(PhoenixConnection.class).getQueryServices();
    assertTrue(s2 == s3);
    Connection conn4 = DriverManager.getConnection(url, overriddenProps);
    ConnectionQueryServices s4 = conn4.unwrap(PhoenixConnection.class).getQueryServices();
    assertTrue(s1 == s4);
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Test(org.junit.Test)

Aggregations

ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)38 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Connection (java.sql.Connection)14 SQLException (java.sql.SQLException)12 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)9 PTable (org.apache.phoenix.schema.PTable)9 Test (org.junit.Test)9 ResultSet (java.sql.ResultSet)8 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)8 ArrayList (java.util.ArrayList)7 Properties (java.util.Properties)7 PreparedStatement (java.sql.PreparedStatement)5 Put (org.apache.hadoop.hbase.client.Put)5 Hint (org.apache.phoenix.parse.HintNode.Hint)5 Scan (org.apache.hadoop.hbase.client.Scan)4 MutationState (org.apache.phoenix.execute.MutationState)4 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)4 DelegateConnectionQueryServices (org.apache.phoenix.query.DelegateConnectionQueryServices)4 PColumn (org.apache.phoenix.schema.PColumn)4