Search in sources :

Example 71 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class ProjectionCompiler method projectAllIndexColumns.

private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns, List<? extends PDatum> targetColumns) throws SQLException {
    ColumnResolver resolver = context.getResolver();
    PTable index = tableRef.getTable();
    int projectedOffset = projectedExpressions.size();
    PhoenixConnection conn = context.getConnection();
    PName tenantId = conn.getTenantId();
    String tableName = index.getParentName().getString();
    PTable dataTable = null;
    try {
        dataTable = conn.getTable(new PTableKey(tenantId, tableName));
    } catch (TableNotFoundException e) {
        if (tenantId != null) {
            // Check with null tenantId
            dataTable = conn.getTable(new PTableKey(null, tableName));
        } else {
            throw e;
        }
    }
    int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
    int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
    int minIndexPKOffset = getMinPKOffset(index, tenantId);
    if (index.getIndexType() != IndexType.LOCAL) {
        if (index.getColumns().size() - minIndexPKOffset != dataTable.getColumns().size() - minTablePKOffset) {
            // We'll end up not using this by the optimizer, so just throw
            String schemaNameStr = dataTable.getSchemaName() == null ? null : dataTable.getSchemaName().getString();
            String tableNameStr = dataTable.getTableName() == null ? null : dataTable.getTableName().getString();
            throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, WildcardParseNode.INSTANCE.toString());
        }
    }
    for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) {
        PColumn column = dataTable.getColumns().get(i);
        // Skip tenant ID column (which may not be the first column, but is the first PK column)
        if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) {
            tableOffset++;
            continue;
        }
        PColumn tableColumn = dataTable.getColumns().get(i);
        String indexColName = IndexUtil.getIndexColumnName(tableColumn);
        PColumn indexColumn = null;
        ColumnRef ref = null;
        try {
            indexColumn = index.getColumnForColumnName(indexColName);
            ref = new ColumnRef(tableRef, indexColumn.getPosition());
        } catch (ColumnNotFoundException e) {
            if (index.getIndexType() == IndexType.LOCAL) {
                try {
                    ref = new LocalIndexDataColumnRef(context, indexColName);
                    indexColumn = ref.getColumn();
                } catch (ColumnFamilyNotFoundException c) {
                    throw e;
                }
            } else {
                throw e;
            }
        }
        String colName = tableColumn.getName().getString();
        String tableAlias = tableRef.getTableAlias();
        if (resolveColumn) {
            try {
                if (tableAlias != null) {
                    ref = resolver.resolveColumn(null, tableAlias, indexColName);
                } else {
                    String schemaName = index.getSchemaName().getString();
                    ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
                }
            } catch (AmbiguousColumnException e) {
                if (indexColumn.getFamilyName() != null) {
                    ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName);
                } else {
                    throw e;
                }
            }
        }
        Expression expression = ref.newColumnExpression();
        expression = coerceIfNecessary(i - tableOffset + projectedOffset, targetColumns, expression);
        // We do not need to check if the column is a viewConstant, because view constants never
        // appear as a column in an index
        projectedExpressions.add(expression);
        boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
        ExpressionProjector projector = new ExpressionProjector(colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
        projectedColumns.add(projector);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) PColumn(org.apache.phoenix.schema.PColumn) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) BaseTerminalExpression(org.apache.phoenix.expression.BaseTerminalExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) ColumnRef(org.apache.phoenix.schema.ColumnRef) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) PTableKey(org.apache.phoenix.schema.PTableKey)

Example 72 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class ExplainPlanWithStatsEnabledIT method assertUseStatsForQueryFlag.

private static void assertUseStatsForQueryFlag(String tableName, PhoenixConnection conn, Boolean expected) throws TableNotFoundException, SQLException {
    assertEquals(expected, conn.unwrap(PhoenixConnection.class).getMetaDataCache().getTableRef(new PTableKey(null, tableName)).getTable().useStatsForParallelization());
    String query = "SELECT USE_STATS_FOR_PARALLELIZATION FROM SYSTEM.CATALOG WHERE TABLE_NAME = ? AND COLUMN_NAME IS NULL AND COLUMN_FAMILY IS NULL AND TENANT_ID IS NULL";
    PreparedStatement stmt = conn.prepareStatement(query);
    stmt.setString(1, tableName);
    ResultSet rs = stmt.executeQuery();
    rs.next();
    boolean b = rs.getBoolean(1);
    if (expected == null) {
        assertTrue(rs.wasNull());
    } else {
        assertEquals(expected, b);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PreparedStatement(java.sql.PreparedStatement) PTableKey(org.apache.phoenix.schema.PTableKey)

Example 73 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class ExplainPlanWithStatsEnabledIT method testUseStatsForParallelizationProperyOnViewIndex.

@Test
public void testUseStatsForParallelizationProperyOnViewIndex() throws SQLException {
    String tableName = generateUniqueName();
    String viewName = generateUniqueName();
    String tenantViewName = generateUniqueName();
    String viewIndexName = generateUniqueName();
    boolean useStats = !DEFAULT_USE_STATS_FOR_PARALLELIZATION;
    try (Connection conn = DriverManager.getConnection(getUrl())) {
        conn.createStatement().execute("create table " + tableName + "(tenantId CHAR(15) NOT NULL, pk1 integer NOT NULL, v varchar CONSTRAINT PK PRIMARY KEY " + "(tenantId, pk1)) MULTI_TENANT=true");
        try (Connection tenantConn = getTenantConnection("tenant1")) {
            conn.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName);
            conn.createStatement().execute("CREATE INDEX " + viewIndexName + " on " + viewName + " (v) ");
            tenantConn.createStatement().execute("CREATE VIEW " + tenantViewName + " AS SELECT * FROM " + viewName);
            conn.createStatement().execute("ALTER TABLE " + tableName + " set USE_STATS_FOR_PARALLELIZATION=" + useStats);
            // fetch the latest view ptable
            PhoenixRuntime.getTableNoCache(tenantConn, viewName);
            PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
            PTable viewIndex = phxConn.getTable(new PTableKey(phxConn.getTenantId(), viewIndexName));
            assertEquals("USE_STATS_FOR_PARALLELIZATION property set incorrectly", useStats, PhoenixConfigurationUtil.getStatsForParallelizationProp(tenantConn.unwrap(PhoenixConnection.class), viewIndex));
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable) Test(org.junit.Test)

Example 74 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class PartialIndexRebuilderIT method mutateRandomly.

private static boolean mutateRandomly(Connection conn, String fullTableName, int nRows, boolean checkForInactive, String fullIndexName) throws SQLException, InterruptedException {
    PTableKey key = new PTableKey(null, fullTableName);
    PMetaData metaCache = conn.unwrap(PhoenixConnection.class).getMetaDataCache();
    boolean hasInactiveIndex = false;
    int batchSize = 200;
    if (checkForInactive) {
        batchSize = 3;
    }
    for (int i = 0; i < 10000; i++) {
        int pk = Math.abs(RAND.nextInt()) % nRows;
        int v1 = Math.abs(RAND.nextInt()) % nRows;
        int v2 = Math.abs(RAND.nextInt()) % nRows;
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + pk + "," + v1 + "," + v2 + ")");
        if (i % batchSize == 0) {
            conn.commit();
            if (checkForInactive) {
                if (hasInactiveIndex(metaCache, key)) {
                    checkForInactive = false;
                    hasInactiveIndex = true;
                    batchSize = 200;
                }
            }
        }
    }
    conn.commit();
    for (int i = 0; i < 10000; i++) {
        int pk = Math.abs(RAND.nextInt()) % nRows;
        conn.createStatement().execute("DELETE FROM " + fullTableName + " WHERE k= " + pk);
        if (i % batchSize == 0) {
            conn.commit();
            if (checkForInactive) {
                if (hasInactiveIndex(metaCache, key)) {
                    checkForInactive = false;
                    hasInactiveIndex = true;
                    batchSize = 200;
                }
            }
        }
    }
    conn.commit();
    for (int i = 0; i < 10000; i++) {
        int pk = Math.abs(RAND.nextInt()) % nRows;
        int v1 = Math.abs(RAND.nextInt()) % nRows;
        int v2 = Math.abs(RAND.nextInt()) % nRows;
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + pk + "," + v1 + "," + v2 + ")");
        if (i % batchSize == 0) {
            conn.commit();
            if (checkForInactive) {
                if (hasInactiveIndex(metaCache, key)) {
                    checkForInactive = false;
                    hasInactiveIndex = true;
                    batchSize = 200;
                }
            }
        }
    }
    conn.commit();
    return hasInactiveIndex;
}
Also used : PMetaData(org.apache.phoenix.schema.PMetaData) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTableKey(org.apache.phoenix.schema.PTableKey)

Example 75 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class PartialIndexRebuilderIT method testBatchingDuringRebuild.

@Test
public void testBatchingDuringRebuild() throws Throwable {
    String schemaName = generateUniqueName();
    String tableName = generateUniqueName();
    String indexName = generateUniqueName();
    final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
    PTableKey key = new PTableKey(null, fullTableName);
    final MyClock clock = new MyClock(1000);
    EnvironmentEdgeManager.injectEdge(clock);
    try (Connection conn = DriverManager.getConnection(getUrl())) {
        PMetaData metaCache = conn.unwrap(PhoenixConnection.class).getMetaDataCache();
        conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
        clock.time += 100;
        conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1, v2)");
        clock.time += 100;
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
        conn.commit();
        clock.time += 100;
        HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
        long disableTime = clock.currentTime();
        IndexUtil.updateIndexState(fullIndexName, disableTime, metaTable, PIndexState.DISABLE);
        clock.time += 100;
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('bb','bb', '11')");
        conn.commit();
        clock.time += REBUILD_PERIOD;
        assertTrue(hasDisabledIndex(metaCache, key));
        assertEquals(2, TestUtil.getRowCount(conn, fullTableName));
        assertEquals(1, TestUtil.getRowCount(conn, fullIndexName));
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('ccc','ccc','222')");
        conn.commit();
        assertEquals(3, TestUtil.getRowCount(conn, fullTableName));
        assertEquals(1, TestUtil.getRowCount(conn, fullIndexName));
        clock.time += 100;
        waitForIndexState(conn, fullTableName, fullIndexName, PIndexState.INACTIVE);
        clock.time += WAIT_AFTER_DISABLED;
        runIndexRebuilder(fullTableName);
        assertEquals(2, TestUtil.getRowCount(conn, fullIndexName));
        clock.time += REBUILD_PERIOD;
        runIndexRebuilder(fullTableName);
        // Verify that other batches were processed
        assertTrue(TestUtil.checkIndexState(conn, fullIndexName, PIndexState.ACTIVE, 0L));
        IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
    } finally {
        EnvironmentEdgeManager.injectEdge(null);
    }
}
Also used : PMetaData(org.apache.phoenix.schema.PMetaData) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTableKey(org.apache.phoenix.schema.PTableKey) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Test(org.junit.Test)

Aggregations

PTableKey (org.apache.phoenix.schema.PTableKey)89 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)77 PTable (org.apache.phoenix.schema.PTable)55 Connection (java.sql.Connection)48 Test (org.junit.Test)40 Properties (java.util.Properties)23 ResultSet (java.sql.ResultSet)14 PColumn (org.apache.phoenix.schema.PColumn)14 PreparedStatement (java.sql.PreparedStatement)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 SQLException (java.sql.SQLException)11 TableNotFoundException (org.apache.phoenix.schema.TableNotFoundException)11 PMetaData (org.apache.phoenix.schema.PMetaData)10 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PName (org.apache.phoenix.schema.PName)9 BaseTest (org.apache.phoenix.query.BaseTest)8 Result (org.apache.hadoop.hbase.client.Result)7 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)7 Scan (org.apache.hadoop.hbase.client.Scan)7 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)6