Search in sources :

Example 1 with ColumnResolver

use of org.apache.phoenix.compile.ColumnResolver in project phoenix by apache.

the class QueryOptimizer method getApplicablePlans.

private List<QueryPlan> getApplicablePlans(QueryPlan dataPlan, PhoenixStatement statement, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, boolean stopAtBestPlan) throws SQLException {
    if (!useIndexes) {
        return Collections.singletonList(dataPlan);
    }
    if (dataPlan instanceof BaseQueryPlan) {
        return getApplicablePlans((BaseQueryPlan) dataPlan, statement, targetColumns, parallelIteratorFactory, stopAtBestPlan);
    }
    SelectStatement select = (SelectStatement) dataPlan.getStatement();
    ColumnResolver resolver = FromCompiler.getResolverForQuery(select, statement.getConnection());
    Map<TableRef, QueryPlan> dataPlans = null;
    // non-correlated sub-query, then rewrite the query with found index tables.
    if (select.isJoin() || (select.getWhere() != null && select.getWhere().hasSubquery())) {
        JoinCompiler.JoinTable join = JoinCompiler.compile(statement, select, resolver);
        Map<TableRef, TableRef> replacement = null;
        for (JoinCompiler.Table table : join.getTables()) {
            if (table.isSubselect())
                continue;
            TableRef tableRef = table.getTableRef();
            SelectStatement stmt = table.getAsSubqueryForOptimization(tableRef.equals(dataPlan.getTableRef()));
            // so the filter conditions can be taken into account in optimization.
            if (stmt.getWhere() != null && stmt.getWhere().hasSubquery()) {
                StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
                ;
                ParseNode dummyWhere = GenSubqueryParamValuesRewriter.replaceWithDummyValues(stmt.getWhere(), context);
                stmt = FACTORY.select(stmt, dummyWhere);
            }
            // TODO: It seems inefficient to be recompiling the statement again inside of this optimize call
            QueryPlan subDataPlan = new QueryCompiler(statement, stmt, FromCompiler.getResolverForQuery(stmt, statement.getConnection()), false, false, null).compile();
            QueryPlan subPlan = optimize(statement, subDataPlan);
            TableRef newTableRef = subPlan.getTableRef();
            if (!newTableRef.equals(tableRef)) {
                if (replacement == null) {
                    replacement = new HashMap<TableRef, TableRef>();
                    dataPlans = new HashMap<TableRef, QueryPlan>();
                }
                replacement.put(tableRef, newTableRef);
                dataPlans.put(newTableRef, subDataPlan);
            }
        }
        if (replacement != null) {
            select = rewriteQueryWithIndexReplacement(statement.getConnection(), resolver, select, replacement);
            resolver = FromCompiler.getResolverForQuery(select, statement.getConnection());
        }
    }
    // Re-compile the plan with option "optimizeSubquery" turned on, so that enclosed
    // sub-queries can be optimized recursively.
    QueryCompiler compiler = new QueryCompiler(statement, select, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), true, true, dataPlans);
    return Collections.singletonList(compiler.compile());
}
Also used : JoinCompiler(org.apache.phoenix.compile.JoinCompiler) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) QueryPlan(org.apache.phoenix.compile.QueryPlan) QueryCompiler(org.apache.phoenix.compile.QueryCompiler) SequenceManager(org.apache.phoenix.compile.SequenceManager) StatementContext(org.apache.phoenix.compile.StatementContext) SelectStatement(org.apache.phoenix.parse.SelectStatement) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) AndParseNode(org.apache.phoenix.parse.AndParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) Scan(org.apache.hadoop.hbase.client.Scan) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef)

Example 2 with ColumnResolver

use of org.apache.phoenix.compile.ColumnResolver in project phoenix by apache.

the class BaseIndexIT method createIndexOnTableWithSpecifiedDefaultCF.

@Test
public void createIndexOnTableWithSpecifiedDefaultCF() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String tableName = "TBL_" + generateUniqueName();
    String indexName = "IND_" + generateUniqueName();
    String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
    String fullIndexName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
    try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
        conn.setAutoCommit(false);
        String query;
        ResultSet rs;
        String ddl = "CREATE TABLE " + fullTableName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) DEFAULT_COLUMN_FAMILY='A'" + (!tableDDLOptions.isEmpty() ? "," + tableDDLOptions : "");
        Statement stmt = conn.createStatement();
        stmt.execute(ddl);
        query = "SELECT * FROM " + tableName;
        rs = conn.createStatement().executeQuery(query);
        assertFalse(rs.next());
        String options = localIndex ? "SALT_BUCKETS=10, MULTI_TENANT=true, IMMUTABLE_ROWS=true, DISABLE_WAL=true" : "";
        conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2) " + options);
        query = "SELECT * FROM " + fullIndexName;
        rs = conn.createStatement().executeQuery(query);
        assertFalse(rs.next());
        // check options set correctly on index
        TableName indexTableName = TableName.create(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
        NamedTableNode indexNode = NamedTableNode.create(null, indexTableName, null);
        ColumnResolver resolver = FromCompiler.getResolver(indexNode, conn.unwrap(PhoenixConnection.class));
        PTable indexTable = resolver.getTables().get(0).getTable();
        // Can't set IMMUTABLE_ROWS, MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an index
        assertNull(indexTable.getDefaultFamilyName());
        assertFalse(indexTable.isMultiTenant());
        // Should match table
        assertEquals(mutable, !indexTable.isImmutableRows());
        if (localIndex) {
            assertEquals(10, indexTable.getBucketNum().intValue());
            assertTrue(indexTable.isWALDisabled());
        }
    }
}
Also used : TableName(org.apache.phoenix.parse.TableName) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) Properties(java.util.Properties) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) PTable(org.apache.phoenix.schema.PTable) BaseTest(org.apache.phoenix.query.BaseTest) Test(org.junit.Test)

Example 3 with ColumnResolver

use of org.apache.phoenix.compile.ColumnResolver in project phoenix by apache.

the class IndexUtil method rewriteViewStatement.

/**
 * Rewrite a view statement to be valid against an index
 * @param conn
 * @param index
 * @param table
 * @return
 * @throws SQLException
 */
public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, String viewStatement) throws SQLException {
    if (viewStatement == null) {
        return null;
    }
    SelectStatement select = new SQLParser(viewStatement).parseQuery();
    ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table));
    SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver);
    ParseNode whereNode = translatedSelect.getWhere();
    PhoenixStatement statement = new PhoenixStatement(conn);
    TableRef indexTableRef = new TableRef(index) {

        @Override
        public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
            return '"' + ref.getColumn().getName().getString() + '"';
        }
    };
    ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef);
    StatementContext context = new StatementContext(statement, indexResolver);
    // Compile to ensure validity
    WhereCompiler.compile(context, whereNode);
    StringBuilder buf = new StringBuilder();
    whereNode.toSQL(indexResolver, buf);
    return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString());
}
Also used : SelectStatement(org.apache.phoenix.parse.SelectStatement) SQLParser(org.apache.phoenix.parse.SQLParser) ParseNode(org.apache.phoenix.parse.ParseNode) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef) StatementContext(org.apache.phoenix.compile.StatementContext)

Example 4 with ColumnResolver

use of org.apache.phoenix.compile.ColumnResolver in project phoenix by apache.

the class MetaDataEndpointImpl method dropColumnsFromChildViews.

private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete, int clientVersion) throws IOException, SQLException {
    List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
    // are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Delete) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnDeletesForBaseTable.add((Delete) m);
            }
        }
    }
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short numColsDeleted = 0;
        byte[] viewTenantId = viewInfo.getTenantId();
        byte[] viewSchemaName = viewInfo.getSchemaName();
        byte[] viewName = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
        // lock the rows corresponding to views so that no other thread can modify the view
        // meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock, clientVersion);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        int numCols = view.getColumns().size();
        int minDroppedColOrdinalPos = Integer.MAX_VALUE;
        for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to
            // be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            // it
            if (existingViewColumn != null && view.getViewStatement() != null) {
                ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
                PhoenixConnection conn = null;
                try {
                    conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                } catch (ClassNotFoundException e) {
                }
                PhoenixStatement statement = new PhoenixStatement(conn);
                TableRef baseTableRef = new TableRef(basePhysicalTable);
                ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
                StatementContext context = new StatementContext(statement, columnResolver);
                Expression whereExpression = WhereCompiler.compile(context, viewWhere);
                Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
                ColumnFinder columnFinder = new ColumnFinder(colExpression);
                whereExpression.accept(columnFinder);
                if (columnFinder.getColumnFound()) {
                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
                }
            }
            minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
            if (existingViewColumn != null) {
                --numColsDeleted;
                if (ordinalPositionList.size() == 0) {
                    ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
                    for (PColumn col : view.getColumns()) {
                        ordinalPositionList.addColumn(getColumnKey(viewKey, col));
                    }
                }
                ordinalPositionList.dropColumn(columnKey);
                Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
                mutationsForAddingColumnsToViews.add(viewColumnDelete);
                // drop any view indexes that need this column
                dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete, clientVersion);
            }
        }
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
    }
    return null;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) ParseNode(org.apache.phoenix.parse.ParseNode) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) SQLParser(org.apache.phoenix.parse.SQLParser) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnRef(org.apache.phoenix.schema.ColumnRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 5 with ColumnResolver

use of org.apache.phoenix.compile.ColumnResolver in project phoenix by apache.

the class MetaDataClient method dropColumn.

public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(false);
        PName tenantId = connection.getTenantId();
        TableName tableNameNode = statement.getTable().getName();
        String schemaName = tableNameNode.getSchemaName();
        String tableName = tableNameNode.getTableName();
        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
        boolean retried = false;
        while (true) {
            final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
            TableRef tableRef = resolver.getTables().get(0);
            PTable table = tableRef.getTable();
            List<ColumnName> columnRefs = statement.getColumnRefs();
            if (columnRefs == null) {
                columnRefs = Lists.newArrayListWithCapacity(0);
            }
            List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
            List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
            List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
            List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
            for (ColumnName column : columnRefs) {
                ColumnRef columnRef = null;
                try {
                    columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
                } catch (ColumnNotFoundException e) {
                    if (statement.ifExists()) {
                        return new MutationState(0, 0, connection);
                    }
                    throw e;
                }
                PColumn columnToDrop = columnRef.getColumn();
                tableColumnsToDrop.add(columnToDrop);
                if (SchemaUtil.isPKColumn(columnToDrop)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
                } else if (table.isAppendOnlySchema()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
                }
                columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
            }
            dropColumnMutations(table, tableColumnsToDrop);
            boolean removedIndexTableOrColumn = false;
            Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
            for (PTable index : table.getIndexes()) {
                IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                // get the covered columns
                List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
                Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
                Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
                for (PColumn columnToDrop : tableColumnsToDrop) {
                    Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
                    ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
                    boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
                    if (isColumnIndexed) {
                        if (index.getViewIndexId() == null) {
                            indexesToDrop.add(new TableRef(index));
                        }
                        connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
                        removedIndexTableOrColumn = true;
                    } else if (coveredCols.contains(colDropRef)) {
                        String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
                        PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
                        indexColumnsToDrop.add(indexColumn);
                        // add the index column to be dropped so that we actually delete the column values
                        columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
                        removedIndexTableOrColumn = true;
                    }
                }
                if (!indexColumnsToDrop.isEmpty()) {
                    long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
                    dropColumnMutations(index, indexColumnsToDrop);
                    long clientTimestamp = MutationState.getTableTimestamp(timeStamp, connection.getSCN());
                    connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
                }
            }
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            // Force table header to be first in list
            Collections.reverse(tableMetaData);
            /*
                 * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
                 * in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
                 * ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
                 * support declaring what the empty column family is on a table, as:
                 * - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
                 * - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
                 *    the empty column family name on the PTable.
                 */
            for (ColumnRef columnRefToDrop : columnsToDrop) {
                PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
                byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
                if (emptyCF != null) {
                    try {
                        tableContainingColumnToDrop.getColumnFamily(emptyCF);
                    } catch (ColumnFamilyNotFoundException e) {
                        // Only if it's not already a column family do we need to ensure it's created
                        Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
                        family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
                        // Just use a Put without any key values as the Mutation, as addColumn will treat this specially
                        // TODO: pass through schema name and table name instead to these methods as it's cleaner
                        byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                        if (tenantIdBytes == null)
                            tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
                        connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
                    }
                }
            }
            MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
            try {
                MutationCode code = processMutationResult(schemaName, tableName, result);
                if (code == MutationCode.COLUMN_NOT_FOUND) {
                    addTableToCache(result);
                    if (!statement.ifExists()) {
                        throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
                    }
                    return new MutationState(0, 0, connection);
                }
                // the server when needed.
                if (tableColumnsToDrop.size() > 0) {
                    if (removedIndexTableOrColumn)
                        connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                    else
                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
                }
                // If we have a VIEW, then only delete the metadata, and leave the table data alone
                if (table.getType() != PTableType.VIEW) {
                    MutationState state = null;
                    connection.setAutoCommit(true);
                    Long scn = connection.getSCN();
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    PostDDLCompiler compiler = new PostDDLCompiler(connection);
                    boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                    // if the index is a local index or view index it uses a shared physical table
                    // so we need to issue deletes markers for all the rows of the index
                    final List<TableRef> tableRefsToDrop = Lists.newArrayList();
                    Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
                    if (result.getSharedTablesToDelete() != null) {
                        for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
                            PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
                            TableRef indexTableRef = new TableRef(viewIndexTable);
                            PName indexTableTenantId = sharedTableState.getTenantId();
                            if (indexTableTenantId == null) {
                                tableRefsToDrop.add(indexTableRef);
                            } else {
                                if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
                                    tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
                                }
                                tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
                            }
                        }
                    }
                    // they would have been dropped in ConnectionQueryServices.dropColumn)
                    if (!dropMetaData) {
                        tableRefsToDrop.addAll(indexesToDrop);
                    }
                    // Drop any index tables that had the dropped column in the PK
                    state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
                    // Drop any tenant-specific indexes
                    if (!tenantIdTableRefMap.isEmpty()) {
                        for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
                            String indexTenantId = entry.getKey();
                            Properties props = new Properties(connection.getClientInfo());
                            props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
                            try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
                                PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
                                state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
                            }
                        }
                    }
                    // See https://issues.apache.org/jira/browse/PHOENIX-3605
                    if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
                        // Update empty key value column if necessary
                        for (ColumnRef droppedColumnRef : columnsToDrop) {
                            // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
                            // to get any updates from the region server.
                            // TODO: move this into PostDDLCompiler
                            // TODO: consider filtering mutable indexes here, but then the issue is that
                            // we'd need to force an update of the data row empty key value if a mutable
                            // secondary index is changing its empty key value family.
                            droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
                            TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
                            PColumn droppedColumn = droppedColumnRef.getColumn();
                            MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
                            state = connection.getQueryServices().updateData(plan);
                        }
                    }
                    // Return the last MutationState
                    return state;
                }
                return new MutationState(0, 0, connection);
            } catch (ConcurrentTableMutationException e) {
                if (retried) {
                    throw e;
                }
                table = connection.getTable(new PTableKey(tenantId, fullTableName));
                retried = true;
            }
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) Properties(java.util.Properties) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ArrayList(java.util.ArrayList) List(java.util.List) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) MutationPlan(org.apache.phoenix.compile.MutationPlan) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) SharedTableState(org.apache.phoenix.coprocessor.MetaDataProtocol.SharedTableState) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

ColumnResolver (org.apache.phoenix.compile.ColumnResolver)13 StatementContext (org.apache.phoenix.compile.StatementContext)8 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)8 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)7 ParseNode (org.apache.phoenix.parse.ParseNode)6 TableRef (org.apache.phoenix.schema.TableRef)6 MutationState (org.apache.phoenix.execute.MutationState)5 PTable (org.apache.phoenix.schema.PTable)5 ArrayList (java.util.ArrayList)4 List (java.util.List)4 Properties (java.util.Properties)4 Mutation (org.apache.hadoop.hbase.client.Mutation)4 TableName (org.apache.phoenix.parse.TableName)4 PreparedStatement (java.sql.PreparedStatement)3 Expression (org.apache.phoenix.expression.Expression)3 RowKeyColumnExpression (org.apache.phoenix.expression.RowKeyColumnExpression)3 PDataType (org.apache.phoenix.schema.types.PDataType)3 ByteString (com.google.protobuf.ByteString)2 Connection (java.sql.Connection)2 ResultSet (java.sql.ResultSet)2