Search in sources :

Example 86 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class ConnectionQueryServicesImpl method addColumnQualifierColumn.

// Special method for adding the column qualifier column for 4.10.
private PhoenixConnection addColumnQualifierColumn(PhoenixConnection oldMetaConnection, Long timestamp) throws SQLException {
    Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo());
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp));
    // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again
    PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props);
    PTable sysCatalogPTable = metaConnection.getTable(new PTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
    int numColumns = sysCatalogPTable.getColumns().size();
    try (PreparedStatement mutateTable = metaConnection.prepareStatement(MetaDataClient.MUTATE_TABLE)) {
        mutateTable.setString(1, null);
        mutateTable.setString(2, SYSTEM_CATALOG_SCHEMA);
        mutateTable.setString(3, SYSTEM_CATALOG_TABLE);
        mutateTable.setString(4, PTableType.SYSTEM.getSerializedValue());
        mutateTable.setLong(5, sysCatalogPTable.getSequenceNumber() + 1);
        mutateTable.setInt(6, numColumns + 1);
        mutateTable.execute();
    }
    List<Mutation> tableMetadata = new ArrayList<>();
    tableMetadata.addAll(metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond());
    metaConnection.rollback();
    PColumn column = new PColumnImpl(PNameFactory.newName("COLUMN_QUALIFIER"), PNameFactory.newName(DEFAULT_COLUMN_FAMILY_NAME), PVarbinary.INSTANCE, null, null, true, numColumns, SortOrder.ASC, null, null, false, null, false, false, Bytes.toBytes("COLUMN_QUALIFIER"));
    String upsertColumnMetadata = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "," + DATA_TYPE + "," + NULLABLE + "," + COLUMN_SIZE + "," + DECIMAL_DIGITS + "," + ORDINAL_POSITION + "," + SORT_ORDER + "," + DATA_TABLE_NAME + "," + ARRAY_SIZE + "," + VIEW_CONSTANT + "," + IS_VIEW_REFERENCED + "," + PK_NAME + "," + KEY_SEQ + "," + COLUMN_DEF + "," + IS_ROW_TIMESTAMP + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
    try (PreparedStatement colUpsert = metaConnection.prepareStatement(upsertColumnMetadata)) {
        colUpsert.setString(1, null);
        colUpsert.setString(2, SYSTEM_CATALOG_SCHEMA);
        colUpsert.setString(3, SYSTEM_CATALOG_TABLE);
        colUpsert.setString(4, "COLUMN_QUALIFIER");
        colUpsert.setString(5, DEFAULT_COLUMN_FAMILY);
        colUpsert.setInt(6, column.getDataType().getSqlType());
        colUpsert.setInt(7, ResultSetMetaData.columnNullable);
        colUpsert.setNull(8, Types.INTEGER);
        colUpsert.setNull(9, Types.INTEGER);
        colUpsert.setInt(10, sysCatalogPTable.getBucketNum() != null ? numColumns : (numColumns + 1));
        colUpsert.setInt(11, SortOrder.ASC.getSystemValue());
        colUpsert.setString(12, null);
        colUpsert.setNull(13, Types.INTEGER);
        colUpsert.setBytes(14, null);
        colUpsert.setBoolean(15, false);
        colUpsert.setString(16, sysCatalogPTable.getPKName() == null ? null : sysCatalogPTable.getPKName().getString());
        colUpsert.setNull(17, Types.SMALLINT);
        colUpsert.setNull(18, Types.VARCHAR);
        colUpsert.setBoolean(19, false);
        colUpsert.execute();
    }
    tableMetadata.addAll(metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond());
    metaConnection.rollback();
    metaConnection.getQueryServices().addColumn(tableMetadata, sysCatalogPTable, Collections.<String, List<Pair<String, Object>>>emptyMap(), Collections.<String>emptySet(), Lists.newArrayList(column));
    metaConnection.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp);
    ConnectionQueryServicesImpl.this.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp);
    clearCache();
    return metaConnection;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) ArrayList(java.util.ArrayList) PreparedStatement(java.sql.PreparedStatement) Properties(java.util.Properties) PTable(org.apache.phoenix.schema.PTable) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) PColumn(org.apache.phoenix.schema.PColumn) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableKey(org.apache.phoenix.schema.PTableKey) Pair(org.apache.hadoop.hbase.util.Pair)

Example 87 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasPostProcessing = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    DeletingParallelIteratorFactory parallelIteratorFactoryToBe;
    resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
    final TableRef targetTableRef = resolverToBe.getTables().get(0);
    PTable table = targetTableRef.getTable();
    // TODO: SchemaUtil.isReadOnly(PTable, connection)?
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    List<PTable> clientSideIndexes = getClientSideMaintainedIndexes(targetTableRef);
    final boolean hasClientSideIndexes = !clientSideIndexes.isEmpty();
    boolean isSalted = table.getBucketNum() != null;
    boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset;
    int selectColumnCount = pkColumnCount;
    for (PTable index : clientSideIndexes) {
        selectColumnCount += index.getPKColumns().size() - pkColumnCount;
    }
    Set<PColumn> projectedColumns = new LinkedHashSet<PColumn>(selectColumnCount + pkColumnOffset);
    List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount);
    for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) {
        PColumn column = table.getPKColumns().get(i);
        projectedColumns.add(column);
    }
    for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) {
        PColumn column = table.getPKColumns().get(i);
        projectedColumns.add(column);
        aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
    }
    // Project all non PK indexed columns so that we can do the proper index maintenance
    for (PTable index : table.getIndexes()) {
        IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
        // Go through maintainer as it handles functional indexes correctly
        for (Pair<String, String> columnInfo : maintainer.getIndexedColumnInfo()) {
            String familyName = columnInfo.getFirst();
            if (familyName != null) {
                String columnName = columnInfo.getSecond();
                boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty();
                PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
                if (!projectedColumns.contains(column)) {
                    projectedColumns.add(column);
                    aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null)));
                }
            }
        }
    }
    select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
    select = StatementNormalizer.normalize(select, resolverToBe);
    SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
    boolean hasPreProcessing = transformedSelect != select;
    if (transformedSelect != select) {
        resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
        select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
    }
    final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing;
    boolean noQueryReqd = !hasPreOrPostProcessing;
    // No limit and no sub queries, joins, etc in where clause
    // Can't run on same server for transactional data, as we need the row keys for the data
    // that is being upserted for conflict detection purposes.
    // If we have immutable indexes, we'd increase the number of bytes scanned by executing
    // separate queries against each index, so better to drive from a single table in that case.
    boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() && !hasClientSideIndexes;
    HintNode hint = delete.getHint();
    if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
        select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE));
    }
    parallelIteratorFactoryToBe = hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection);
    QueryOptimizer optimizer = new QueryOptimizer(services);
    QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe, new SequenceManager(statement));
    final QueryPlan dataPlan = compiler.compile();
    // TODO: the select clause should know that there's a sub query, but doesn't seem to currently
    queryPlans = Lists.newArrayList(!clientSideIndexes.isEmpty() ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe) : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe));
    // Filter out any local indexes that don't contain all indexed columns.
    // We have to do this manually because local indexes are still used
    // when referenced columns aren't in the index, so they won't be
    // filtered by the optimizer.
    queryPlans = new ArrayList<>(queryPlans);
    Iterator<QueryPlan> iterator = queryPlans.iterator();
    while (iterator.hasNext()) {
        QueryPlan plan = iterator.next();
        if (plan.getTableRef().getTable().getIndexType() == IndexType.LOCAL) {
            if (!plan.getContext().getDataColumns().isEmpty()) {
                iterator.remove();
            }
        }
    }
    if (queryPlans.isEmpty()) {
        queryPlans = Collections.singletonList(dataPlan);
    }
    runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX;
    // We need to have all indexed columns available in all immutable indexes in order
    // to generate the delete markers from the query. We also cannot have any filters
    // except for our SkipScanFilter for point lookups.
    // A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause
    // may have been optimized out. Instead, we check that there's a single SkipScanFilter
    // If we can generate a plan for every index, that means all the required columns are available in every index,
    // hence we can drive the delete from any of the plans.
    noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
    int queryPlanIndex = 0;
    while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
        QueryPlan plan = queryPlans.get(queryPlanIndex++);
        StatementContext context = plan.getContext();
        noQueryReqd &= (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup();
    }
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all.
    if (noQueryReqd) {
        // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
        // from the data table, while the others will be for deleting rows from immutable indexes.
        List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size());
        for (final QueryPlan plan : queryPlans) {
            mutationPlans.add(new SingleRowDeleteMutationPlan(plan, connection, maxSize, maxSizeBytes));
        }
        return new MultiRowDeleteMutationPlan(dataPlan, mutationPlans);
    } else if (runOnServer) {
        // TODO: better abstraction
        final StatementContext context = dataPlan.getContext();
        Scan scan = context.getScan();
        scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
        // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
        // The coprocessor will delete each row returned from the scan
        // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
        SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
        RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
        if (dataPlan.getProjector().projectEveryRow()) {
            projectorToBe = new RowProjector(projectorToBe, true);
        }
        final RowProjector projector = projectorToBe;
        final QueryPlan aggPlan = new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, dataPlan);
        return new ServerSelectDeleteMutationPlan(dataPlan, connection, aggPlan, projector, maxSize, maxSizeBytes);
    } else {
        final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
        List<PColumn> adjustedProjectedColumns = Lists.newArrayListWithExpectedSize(projectedColumns.size());
        final int offset = table.getBucketNum() == null ? 0 : 1;
        Iterator<PColumn> projectedColsItr = projectedColumns.iterator();
        int i = 0;
        while (projectedColsItr.hasNext()) {
            final int position = i++;
            adjustedProjectedColumns.add(new DelegateColumn(projectedColsItr.next()) {

                @Override
                public int getPosition() {
                    return position + offset;
                }
            });
        }
        PTable projectedTable = PTableImpl.makePTable(table, PTableType.PROJECTED, adjustedProjectedColumns);
        final TableRef projectedTableRef = new TableRef(projectedTable, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp());
        QueryPlan bestPlanToBe = dataPlan;
        for (QueryPlan plan : queryPlans) {
            PTable planTable = plan.getTableRef().getTable();
            if (planTable.getIndexState() != PIndexState.BUILDING) {
                bestPlanToBe = plan;
                break;
            }
        }
        final QueryPlan bestPlan = bestPlanToBe;
        final List<TableRef> otherTableRefs = Lists.newArrayListWithExpectedSize(clientSideIndexes.size());
        for (PTable index : clientSideIndexes) {
            if (!bestPlan.getTableRef().getTable().equals(index)) {
                otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()));
            }
        }
        if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) {
            otherTableRefs.add(projectedTableRef);
        }
        return new ClientSelectDeleteMutationPlan(targetTableRef, dataPlan, bestPlan, hasPreOrPostProcessing, parallelIteratorFactory, otherTableRefs, projectedTableRef, maxSize, maxSizeBytes, connection);
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) ArrayList(java.util.ArrayList) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) AliasedNode(org.apache.phoenix.parse.AliasedNode) Hint(org.apache.phoenix.parse.HintNode.Hint) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) Scan(org.apache.hadoop.hbase.client.Scan) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef)

Example 88 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MutationState method generateMutations.

private void generateMutations(final TableRef tableRef, final long mutationTimestamp, final long serverTimestamp, final MultiRowMutationState values, final List<Mutation> mutationList, final List<Mutation> mutationsPertainingToIndex) {
    final PTable table = tableRef.getTable();
    boolean tableWithRowTimestampCol = table.getRowTimestampColPos() != -1;
    Iterator<Map.Entry<ImmutableBytesPtr, RowMutationState>> iterator = values.entrySet().iterator();
    long timestampToUse = mutationTimestamp;
    MultiRowMutationState modifiedValues = new MultiRowMutationState(16);
    while (iterator.hasNext()) {
        Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry = iterator.next();
        byte[] onDupKeyBytes = rowEntry.getValue().getOnDupKeyBytes();
        boolean hasOnDupKey = onDupKeyBytes != null;
        ImmutableBytesPtr key = rowEntry.getKey();
        RowMutationState state = rowEntry.getValue();
        if (tableWithRowTimestampCol) {
            RowTimestampColInfo rowTsColInfo = state.getRowTimestampColInfo();
            if (rowTsColInfo.useServerTimestamp()) {
                // regenerate the key with this timestamp.
                key = getNewRowKeyWithRowTimestamp(key, serverTimestamp, table);
                // since we are about to modify the byte[] stored in key (which changes its hashcode)
                // we need to remove the entry from the values map and add a new entry with the modified byte[]
                modifiedValues.put(key, state);
                iterator.remove();
                timestampToUse = serverTimestamp;
            } else {
                if (rowTsColInfo.getTimestamp() != null) {
                    timestampToUse = rowTsColInfo.getTimestamp();
                }
            }
        }
        PRow row = table.newRow(connection.getKeyValueBuilder(), timestampToUse, key, hasOnDupKey);
        List<Mutation> rowMutations, rowMutationsPertainingToIndex;
        if (rowEntry.getValue().getColumnValues() == PRow.DELETE_MARKER) {
            // means delete
            row.delete();
            rowMutations = row.toRowMutations();
            // The DeleteCompiler already generates the deletes for indexes, so no need to do it again
            rowMutationsPertainingToIndex = Collections.emptyList();
        } else {
            for (Map.Entry<PColumn, byte[]> valueEntry : rowEntry.getValue().getColumnValues().entrySet()) {
                row.setValue(valueEntry.getKey(), valueEntry.getValue());
            }
            rowMutations = row.toRowMutations();
            // TODO: use our ServerCache
            for (Mutation mutation : rowMutations) {
                if (onDupKeyBytes != null) {
                    mutation.setAttribute(PhoenixIndexBuilder.ATOMIC_OP_ATTRIB, onDupKeyBytes);
                }
            }
            rowMutationsPertainingToIndex = rowMutations;
        }
        mutationList.addAll(rowMutations);
        if (mutationsPertainingToIndex != null)
            mutationsPertainingToIndex.addAll(rowMutationsPertainingToIndex);
    }
    values.putAll(modifiedValues);
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) Entry(java.util.Map.Entry) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map)

Example 89 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MutationState method joinMutationState.

private void joinMutationState(TableRef tableRef, MultiRowMutationState srcRows, Map<TableRef, MultiRowMutationState> dstMutations) {
    PTable table = tableRef.getTable();
    boolean isIndex = table.getType() == PTableType.INDEX;
    boolean incrementRowCount = dstMutations == this.mutations;
    MultiRowMutationState existingRows = dstMutations.put(tableRef, srcRows);
    if (existingRows != null) {
        // Loop through new rows and replace existing with new
        for (Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry : srcRows.entrySet()) {
            // Replace existing row with new row
            RowMutationState existingRowMutationState = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
            if (existingRowMutationState != null) {
                Map<PColumn, byte[]> existingValues = existingRowMutationState.getColumnValues();
                if (existingValues != PRow.DELETE_MARKER) {
                    Map<PColumn, byte[]> newRow = rowEntry.getValue().getColumnValues();
                    // if new row is PRow.DELETE_MARKER, it means delete, and we don't need to merge it with existing row.
                    if (newRow != PRow.DELETE_MARKER) {
                        // decrement estimated size by the size of the old row
                        estimatedSize -= existingRowMutationState.calculateEstimatedSize();
                        // Merge existing column values with new column values
                        existingRowMutationState.join(rowEntry.getValue());
                        // increment estimated size by the size of the new row
                        estimatedSize += existingRowMutationState.calculateEstimatedSize();
                        // Now that the existing row has been merged with the new row, replace it back
                        // again (since it was merged with the new one above).
                        existingRows.put(rowEntry.getKey(), existingRowMutationState);
                    }
                }
            } else {
                if (incrementRowCount && !isIndex) {
                    // Don't count index rows in row count
                    numRows++;
                    // increment estimated size by the size of the new row
                    estimatedSize += rowEntry.getValue().calculateEstimatedSize();
                }
            }
        }
        // Put the existing one back now that it's merged
        dstMutations.put(tableRef, existingRows);
    } else {
        // Size new map at batch size as that's what it'll likely grow to.
        MultiRowMutationState newRows = new MultiRowMutationState(connection.getMutateBatchSize());
        newRows.putAll(srcRows);
        dstMutations.put(tableRef, newRows);
        if (incrementRowCount && !isIndex) {
            numRows += srcRows.size();
            // if we added all the rows from newMutationState we can just increment the
            // estimatedSize by newMutationState.estimatedSize
            estimatedSize += srcRows.estimatedSize;
        }
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Map(java.util.Map) PTable(org.apache.phoenix.schema.PTable)

Example 90 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class BaseResultIterators method computeColumnsInCommon.

private int computeColumnsInCommon() {
    PTable dataTable;
    if ((dataTable = dataPlan.getTableRef().getTable()).getBucketNum() != null) {
        // unable to compute prefix range for salted data table
        return 0;
    }
    PTable table = getTable();
    int nColumnsOffset = dataTable.isMultiTenant() ? 1 : 0;
    int nColumnsInCommon = nColumnsOffset;
    List<PColumn> dataPKColumns = dataTable.getPKColumns();
    List<PColumn> indexPKColumns = table.getPKColumns();
    int nIndexPKColumns = indexPKColumns.size();
    int nDataPKColumns = dataPKColumns.size();
    // Skip INDEX_ID and tenant ID columns
    for (int i = 1 + nColumnsInCommon; i < nIndexPKColumns; i++) {
        PColumn indexColumn = indexPKColumns.get(i);
        String indexColumnName = indexColumn.getName().getString();
        String cf = IndexUtil.getDataColumnFamilyName(indexColumnName);
        if (cf.length() != 0) {
            break;
        }
        if (i > nDataPKColumns) {
            break;
        }
        PColumn dataColumn = dataPKColumns.get(i - 1);
        String dataColumnName = dataColumn.getName().getString();
        // since it wouldn't make sense to have an index with every column in common.
        if (indexColumn.getDataType() == dataColumn.getDataType() && dataColumnName.equals(IndexUtil.getDataColumnName(indexColumnName))) {
            nColumnsInCommon++;
            continue;
        }
        break;
    }
    return nColumnsInCommon;
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)101 PTable (org.apache.phoenix.schema.PTable)59 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)26 Expression (org.apache.phoenix.expression.Expression)21 TableRef (org.apache.phoenix.schema.TableRef)20 ArrayList (java.util.ArrayList)19 PName (org.apache.phoenix.schema.PName)18 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)17 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)17 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PTableKey (org.apache.phoenix.schema.PTableKey)14 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)13 SQLException (java.sql.SQLException)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)12 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 Map (java.util.Map)11