Search in sources :

Example 1 with MutationPlan

use of org.apache.phoenix.compile.MutationPlan in project phoenix by apache.

the class MetaDataClient method getMutationPlanForBuildingIndex.

private MutationPlan getMutationPlanForBuildingIndex(PTable index, TableRef dataTableRef) throws SQLException {
    MutationPlan mutationPlan;
    if (index.getIndexType() == IndexType.LOCAL) {
        PostLocalIndexDDLCompiler compiler = new PostLocalIndexDDLCompiler(connection, getFullTableName(dataTableRef));
        mutationPlan = compiler.compile(index);
    } else {
        PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(connection, dataTableRef);
        mutationPlan = compiler.compile(index);
    }
    return mutationPlan;
}
Also used : PostIndexDDLCompiler(org.apache.phoenix.compile.PostIndexDDLCompiler) MutationPlan(org.apache.phoenix.compile.MutationPlan) PostLocalIndexDDLCompiler(org.apache.phoenix.compile.PostLocalIndexDDLCompiler)

Example 2 with MutationPlan

use of org.apache.phoenix.compile.MutationPlan in project phoenix by apache.

the class MetaDataClient method dropColumn.

public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(false);
        PName tenantId = connection.getTenantId();
        TableName tableNameNode = statement.getTable().getName();
        String schemaName = tableNameNode.getSchemaName();
        String tableName = tableNameNode.getTableName();
        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
        boolean retried = false;
        while (true) {
            final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
            TableRef tableRef = resolver.getTables().get(0);
            PTable table = tableRef.getTable();
            List<ColumnName> columnRefs = statement.getColumnRefs();
            if (columnRefs == null) {
                columnRefs = Lists.newArrayListWithCapacity(0);
            }
            List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
            List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
            List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
            List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
            for (ColumnName column : columnRefs) {
                ColumnRef columnRef = null;
                try {
                    columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
                } catch (ColumnNotFoundException e) {
                    if (statement.ifExists()) {
                        return new MutationState(0, 0, connection);
                    }
                    throw e;
                }
                PColumn columnToDrop = columnRef.getColumn();
                tableColumnsToDrop.add(columnToDrop);
                if (SchemaUtil.isPKColumn(columnToDrop)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
                } else if (table.isAppendOnlySchema()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
                }
                columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
            }
            dropColumnMutations(table, tableColumnsToDrop);
            boolean removedIndexTableOrColumn = false;
            Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
            for (PTable index : table.getIndexes()) {
                IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                // get the covered columns 
                List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
                Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
                Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
                for (PColumn columnToDrop : tableColumnsToDrop) {
                    Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
                    ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
                    boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
                    if (isColumnIndexed) {
                        if (index.getViewIndexId() == null) {
                            indexesToDrop.add(new TableRef(index));
                        }
                        connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
                        removedIndexTableOrColumn = true;
                    } else if (coveredCols.contains(colDropRef)) {
                        String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
                        PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
                        indexColumnsToDrop.add(indexColumn);
                        // add the index column to be dropped so that we actually delete the column values
                        columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
                        removedIndexTableOrColumn = true;
                    }
                }
                if (!indexColumnsToDrop.isEmpty()) {
                    long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
                    dropColumnMutations(index, indexColumnsToDrop);
                    long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
                    connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
                }
            }
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            // Force table header to be first in list
            Collections.reverse(tableMetaData);
            /*
                 * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
                 * in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
                 * ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
                 * support declaring what the empty column family is on a table, as:
                 * - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
                 * - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
                 *    the empty column family name on the PTable.
                 */
            for (ColumnRef columnRefToDrop : columnsToDrop) {
                PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
                byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
                if (emptyCF != null) {
                    try {
                        tableContainingColumnToDrop.getColumnFamily(emptyCF);
                    } catch (ColumnFamilyNotFoundException e) {
                        // Only if it's not already a column family do we need to ensure it's created
                        Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
                        family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
                        // Just use a Put without any key values as the Mutation, as addColumn will treat this specially
                        // TODO: pass through schema name and table name instead to these methods as it's cleaner
                        byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                        if (tenantIdBytes == null)
                            tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
                        connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
                    }
                }
            }
            MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
            try {
                MutationCode code = processMutationResult(schemaName, tableName, result);
                if (code == MutationCode.COLUMN_NOT_FOUND) {
                    addTableToCache(result);
                    if (!statement.ifExists()) {
                        throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
                    }
                    return new MutationState(0, 0, connection);
                }
                // the server when needed.
                if (tableColumnsToDrop.size() > 0) {
                    if (removedIndexTableOrColumn)
                        connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                    else
                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
                }
                // If we have a VIEW, then only delete the metadata, and leave the table data alone
                if (table.getType() != PTableType.VIEW) {
                    MutationState state = null;
                    connection.setAutoCommit(true);
                    Long scn = connection.getSCN();
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    PostDDLCompiler compiler = new PostDDLCompiler(connection);
                    boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                    // if the index is a local index or view index it uses a shared physical table
                    // so we need to issue deletes markers for all the rows of the index
                    final List<TableRef> tableRefsToDrop = Lists.newArrayList();
                    Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
                    if (result.getSharedTablesToDelete() != null) {
                        for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
                            PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
                            TableRef indexTableRef = new TableRef(viewIndexTable);
                            PName indexTableTenantId = sharedTableState.getTenantId();
                            if (indexTableTenantId == null) {
                                tableRefsToDrop.add(indexTableRef);
                            } else {
                                if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
                                    tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
                                }
                                tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
                            }
                        }
                    }
                    // they would have been dropped in ConnectionQueryServices.dropColumn)
                    if (!dropMetaData) {
                        tableRefsToDrop.addAll(indexesToDrop);
                    }
                    // Drop any index tables that had the dropped column in the PK
                    state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
                    // Drop any tenant-specific indexes
                    if (!tenantIdTableRefMap.isEmpty()) {
                        for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
                            String indexTenantId = entry.getKey();
                            Properties props = new Properties(connection.getClientInfo());
                            props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
                            try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
                                PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
                                state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
                            }
                        }
                    }
                    // See https://issues.apache.org/jira/browse/PHOENIX-3605
                    if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
                        // Update empty key value column if necessary
                        for (ColumnRef droppedColumnRef : columnsToDrop) {
                            // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
                            // to get any updates from the region server.
                            // TODO: move this into PostDDLCompiler
                            // TODO: consider filtering mutable indexes here, but then the issue is that
                            // we'd need to force an update of the data row empty key value if a mutable
                            // secondary index is changing its empty key value family.
                            droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
                            TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
                            PColumn droppedColumn = droppedColumnRef.getColumn();
                            MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
                            state = connection.getQueryServices().updateData(plan);
                        }
                    }
                    // Return the last MutationState
                    return state;
                }
                return new MutationState(0, 0, connection);
            } catch (ConcurrentTableMutationException e) {
                if (retried) {
                    throw e;
                }
                table = connection.getTable(new PTableKey(tenantId, fullTableName));
                retried = true;
            }
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) Properties(java.util.Properties) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ArrayList(java.util.ArrayList) List(java.util.List) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) MutationPlan(org.apache.phoenix.compile.MutationPlan) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) SharedTableState(org.apache.phoenix.coprocessor.MetaDataProtocol.SharedTableState) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 3 with MutationPlan

use of org.apache.phoenix.compile.MutationPlan in project phoenix by apache.

the class MetaDataClient method buildIndex.

private MutationState buildIndex(PTable index, TableRef dataTableRef) throws SQLException {
    AlterIndexStatement indexStatement = null;
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(true);
        MutationPlan mutationPlan = getMutationPlanForBuildingIndex(index, dataTableRef);
        Scan scan = mutationPlan.getContext().getScan();
        Long scn = connection.getSCN();
        try {
            if (ScanUtil.isDefaultTimeRange(scan.getTimeRange())) {
                if (scn == null) {
                    scn = mutationPlan.getContext().getCurrentTime();
                }
                scan.setTimeRange(dataTableRef.getLowerBoundTimeStamp(), scn);
            }
        } catch (IOException e) {
            throw new SQLException(e);
        }
        // execute index population upsert select
        long startTime = System.currentTimeMillis();
        MutationState state = connection.getQueryServices().updateData(mutationPlan);
        long firstUpsertSelectTime = System.currentTimeMillis() - startTime;
        // for global indexes on non transactional tables we might have to
        // run a second index population upsert select to handle data rows
        // that were being written on the server while the index was created
        long sleepTime = connection.getQueryServices().getProps().getLong(QueryServices.INDEX_POPULATION_SLEEP_TIME, QueryServicesOptions.DEFAULT_INDEX_POPULATION_SLEEP_TIME);
        if (!dataTableRef.getTable().isTransactional() && sleepTime > 0) {
            long delta = sleepTime - firstUpsertSelectTime;
            if (delta > 0) {
                try {
                    Thread.sleep(delta);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
                }
            }
            // set the min timestamp of second index upsert select some time before the index
            // was created
            long minTimestamp = index.getTimeStamp() - firstUpsertSelectTime;
            try {
                mutationPlan.getContext().getScan().setTimeRange(minTimestamp, scn);
            } catch (IOException e) {
                throw new SQLException(e);
            }
            MutationState newMutationState = connection.getQueryServices().updateData(mutationPlan);
            state.join(newMutationState);
        }
        indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, TableName.create(index.getSchemaName().getString(), index.getTableName().getString())), dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE);
        alterIndex(indexStatement);
        return state;
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : MutationState(org.apache.phoenix.execute.MutationState) SQLException(java.sql.SQLException) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) AlterIndexStatement(org.apache.phoenix.parse.AlterIndexStatement) Scan(org.apache.hadoop.hbase.client.Scan) IOException(java.io.IOException) MutationPlan(org.apache.phoenix.compile.MutationPlan)

Example 4 with MutationPlan

use of org.apache.phoenix.compile.MutationPlan in project phoenix by apache.

the class MetaDataClient method buildIndex.

/**
     * For new mutations only should not be used if there are deletes done in the data table between start time and end
     * time passed to the method.
     */
public MutationState buildIndex(PTable index, TableRef dataTableRef, long startTime, long EndTime) throws SQLException {
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, TableName.create(index.getSchemaName().getString(), index.getTableName().getString())), dataTableRef.getTable().getTableName().getString(), false, PIndexState.INACTIVE);
        alterIndex(indexStatement);
        connection.setAutoCommit(true);
        MutationPlan mutationPlan = getMutationPlanForBuildingIndex(index, dataTableRef);
        Scan scan = mutationPlan.getContext().getScan();
        try {
            scan.setTimeRange(startTime, EndTime);
        } catch (IOException e) {
            throw new SQLException(e);
        }
        MutationState state = connection.getQueryServices().updateData(mutationPlan);
        indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, TableName.create(index.getSchemaName().getString(), index.getTableName().getString())), dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE);
        alterIndex(indexStatement);
        return state;
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : MutationState(org.apache.phoenix.execute.MutationState) SQLException(java.sql.SQLException) AlterIndexStatement(org.apache.phoenix.parse.AlterIndexStatement) Scan(org.apache.hadoop.hbase.client.Scan) IOException(java.io.IOException) MutationPlan(org.apache.phoenix.compile.MutationPlan)

Example 5 with MutationPlan

use of org.apache.phoenix.compile.MutationPlan in project phoenix by apache.

the class MetaDataClient method updateStatisticsInternal.

private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps, List<byte[]> cfs) throws SQLException {
    ReadOnlyProps props = connection.getQueryServices().getProps();
    final long msMinBetweenUpdates = props.getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB, props.getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS) / 2);
    byte[] tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
    Long scn = connection.getSCN();
    // Always invalidate the cache
    long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
    String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
    ResultSet rs = connection.createStatement().executeQuery(query);
    long msSinceLastUpdate = Long.MAX_VALUE;
    if (rs.next()) {
        msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
    }
    long rowCount = 0;
    if (msSinceLastUpdate >= msMinBetweenUpdates) {
        /*
             * Execute a COUNT(*) through PostDDLCompiler as we need to use the logicalTable passed through,
             * since it may not represent a "real" table in the case of the view indexes of a base table.
             */
        PostDDLCompiler compiler = new PostDDLCompiler(connection);
        //even if table is transactional, while calculating stats we scan the table non-transactionally to
        //view all the data belonging to the table
        PTable nonTxnLogicalTable = new DelegateTable(logicalTable) {

            @Override
            public boolean isTransactional() {
                return false;
            }
        };
        TableRef tableRef = new TableRef(null, nonTxnLogicalTable, clientTimeStamp, false);
        MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), null, cfs, null, clientTimeStamp);
        Scan scan = plan.getContext().getScan();
        scan.setCacheBlocks(false);
        scan.setAttribute(ANALYZE_TABLE, TRUE_BYTES);
        boolean runUpdateStatsAsync = props.getBoolean(QueryServices.RUN_UPDATE_STATS_ASYNC, DEFAULT_RUN_UPDATE_STATS_ASYNC);
        scan.setAttribute(RUN_UPDATE_STATS_ASYNC_ATTRIB, runUpdateStatsAsync ? TRUE_BYTES : FALSE_BYTES);
        if (statsProps != null) {
            Object gp_width = statsProps.get(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB);
            if (gp_width != null) {
                scan.setAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES, PLong.INSTANCE.toBytes(gp_width));
            }
            Object gp_per_region = statsProps.get(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB);
            if (gp_per_region != null) {
                scan.setAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION, PInteger.INSTANCE.toBytes(gp_per_region));
            }
        }
        MutationState mutationState = plan.execute();
        rowCount = mutationState.getUpdateCount();
    }
    /*
         *  Update the stats table so that client will pull the new one with the updated stats.
         *  Even if we don't run the command due to the last update time, invalidate the cache.
         *  This supports scenarios in which a major compaction was manually initiated and the
         *  client wants the modified stats to be reflected immediately.
         */
    if (cfs == null) {
        List<PColumnFamily> families = logicalTable.getColumnFamilies();
        if (families.isEmpty()) {
            connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), SchemaUtil.getEmptyColumnFamily(logicalTable)));
        } else {
            for (PColumnFamily family : families) {
                connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), family.getName().getBytes()));
            }
        }
    } else {
        for (byte[] cf : cfs) {
            connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), cf));
        }
    }
    return rowCount;
}
Also used : GuidePostsKey(org.apache.phoenix.schema.stats.GuidePostsKey) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationPlan(org.apache.phoenix.compile.MutationPlan) ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) ResultSet(java.sql.ResultSet) Scan(org.apache.hadoop.hbase.client.Scan)

Aggregations

MutationPlan (org.apache.phoenix.compile.MutationPlan)10 MutationState (org.apache.phoenix.execute.MutationState)9 PLong (org.apache.phoenix.schema.types.PLong)7 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)7 PostDDLCompiler (org.apache.phoenix.compile.PostDDLCompiler)6 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)5 Mutation (org.apache.hadoop.hbase.client.Mutation)4 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)4 IOException (java.io.IOException)3 PreparedStatement (java.sql.PreparedStatement)3 SQLException (java.sql.SQLException)3 ArrayList (java.util.ArrayList)3 Scan (org.apache.hadoop.hbase.client.Scan)3 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)3 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 List (java.util.List)2 Pair (org.apache.hadoop.hbase.util.Pair)2 ColumnResolver (org.apache.phoenix.compile.ColumnResolver)2 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)2