Search in sources :

Example 81 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MetaDataEndpointImpl method dropColumn.

@Override
public void dropColumn(RpcController controller, final DropColumnRequest request, RpcCallback<MetaDataResponse> done) {
    List<Mutation> tableMetaData = null;
    final List<byte[]> tableNamesToDelete = Lists.newArrayList();
    final List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
    try {
        tableMetaData = ProtobufUtil.getMutations(request);
        MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {

            @Override
            public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
                byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
                byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
                byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
                boolean deletePKColumn = false;
                getCoprocessorHost().preAlterTable(Bytes.toString(tenantId), SchemaUtil.getTableName(schemaName, tableName), TableName.valueOf(table.getPhysicalName().getBytes()), getParentPhysicalTableName(table), table.getType());
                List<Mutation> additionalTableMetaData = Lists.newArrayList();
                PTableType type = table.getType();
                if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
                    TableViewFinder childViewsResult = new TableViewFinder();
                    findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp, request.getClientVersion());
                    if (childViewsResult.hasViews()) {
                        MetaDataMutationResult mutationResult = dropColumnsFromChildViews(region, table, locks, tableMetaData, additionalTableMetaData, schemaName, tableName, invalidateList, clientTimeStamp, childViewsResult, tableNamesToDelete, sharedTablesToDelete, request.getClientVersion());
                        // return if we were not able to drop the column successfully
                        if (mutationResult != null)
                            return mutationResult;
                    }
                }
                for (Mutation m : tableMetaData) {
                    if (m instanceof Delete) {
                        byte[] key = m.getRow();
                        int pkCount = getVarChars(key, rowKeyMetaData);
                        if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
                            PColumn columnToDelete = null;
                            try {
                                if (pkCount > FAMILY_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) {
                                    PColumnFamily family = table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
                                    columnToDelete = family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]);
                                } else if (pkCount > COLUMN_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) {
                                    deletePKColumn = true;
                                    columnToDelete = table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]));
                                } else {
                                    continue;
                                }
                                if (table.getType() == PTableType.VIEW) {
                                    if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT && columnToDelete.getPosition() < table.getBaseColumnCount()) {
                                        /*
                                             * If the column being dropped is inherited from the base table, then the
                                             * view is about to diverge itself from the base table. The consequence of
                                             * this divergence is that that any further meta-data changes made to the
                                             * base table will not be propagated to the hierarchy of views where this
                                             * view is the root.
                                             */
                                        byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
                                        Put updateBaseColumnCountPut = new Put(viewKey);
                                        byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
                                        PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT, baseColumnCountPtr, 0);
                                        updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr);
                                        additionalTableMetaData.add(updateBaseColumnCountPut);
                                    }
                                }
                                if (columnToDelete.isViewReferenced()) {
                                    // Disallow deletion of column referenced in WHERE clause of view
                                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
                                }
                                // drop any indexes that need the column that is going to be dropped
                                dropIndexes(table, region, invalidateList, locks, clientTimeStamp, schemaName, tableName, additionalTableMetaData, columnToDelete, tableNamesToDelete, sharedTablesToDelete, request.getClientVersion());
                            } catch (ColumnFamilyNotFoundException e) {
                                return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
                            } catch (ColumnNotFoundException e) {
                                return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
                            }
                        }
                    }
                }
                if (deletePKColumn) {
                    if (table.getPKColumns().size() == 1) {
                        return new MetaDataMutationResult(MutationCode.NO_PK_COLUMNS, EnvironmentEdgeManager.currentTimeMillis(), null);
                    }
                }
                tableMetaData.addAll(additionalTableMetaData);
                long currentTime = MetaDataUtil.getClientTimeStamp(tableMetaData);
                return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, null, tableNamesToDelete, sharedTablesToDelete);
            }
        }, request.getClientVersion());
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
        }
    } catch (Throwable e) {
        logger.error("Drop column failed: ", e);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException("Error when dropping column: ", e));
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SQLException(java.sql.SQLException) ByteString(com.google.protobuf.ByteString) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTableType(org.apache.phoenix.schema.PTableType) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) Put(org.apache.hadoop.hbase.client.Put) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 82 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MetaDataEndpointImpl method addColumnsAndTablePropertiesToChildViews.

private MetaDataMutationResult addColumnsAndTablePropertiesToChildViews(PTable basePhysicalTable, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, Region region, List<RowLock> locks, int clientVersion) throws IOException, SQLException {
    List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
    Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
    // Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Put) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            // check if this put is for adding a column
            if (pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null && rkmd[COLUMN_NAME_INDEX].length > 0 && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnPutsForBaseTable.add(new PutWithOrdinalPosition((Put) m, getInteger((Put) m, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES)));
            } else // check if the put is for a table property
            if (pkCount <= COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                for (Cell cell : m.getFamilyCellMap().get(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)) {
                    for (TableProperty tableProp : TableProperty.values()) {
                        byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
                        if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) == 0 && tableProp.isValidOnView() && tableProp.isMutable()) {
                            Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell));
                            tablePropertyCellMap.put(tableProp, tablePropCell);
                        }
                    }
                }
            }
        }
    }
    // Sort the puts by ordinal position
    Collections.sort(columnPutsForBaseTable);
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short deltaNumPkColsSoFar = 0;
        short columnsAddedToView = 0;
        short columnsAddedToBaseTable = 0;
        byte[] tenantId = viewInfo.getTenantId();
        byte[] schema = viewInfo.getSchemaName();
        byte[] table = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
        // lock the rows corresponding to views so that no other thread can modify the view meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock, clientVersion);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
        boolean addingExistingPkCol = false;
        int numCols = view.getColumns().size();
        // add the new columns to the child view
        for (PutWithOrdinalPosition p : columnPutsForBaseTable) {
            Put baseTableColumnPut = p.put;
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(baseTableColumnPut.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            boolean isPkCol = columnFamily == null;
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            if (existingViewColumn != null) {
                MetaDataMutationResult result = validateColumnForAddToBaseTable(existingViewColumn, baseTableColumnPut, basePhysicalTable, isPkCol, view);
                if (result != null) {
                    return result;
                }
                if (isPkCol) {
                    viewPkCols.remove(existingViewColumn);
                    addingExistingPkCol = true;
                }
                /*
                     * For views that are not diverged, we need to make sure that the existing columns
                     * have the same ordinal position as in the base table. This is important because
                     * we rely on the ordinal position of the column to figure out whether dropping a 
                     * column from the view will end up diverging the view from the base table.
                     * 
                     * For already diverged views, we don't care about the ordinal position of the existing column.
                     */
                if (!isDivergedView(view)) {
                    int newOrdinalPosition = p.ordinalPosition;
                    // Check if the ordinal position of the column was getting updated from previous add column
                    // mutations.
                    int existingOrdinalPos = ordinalPositionList.getOrdinalPositionOfColumn(columnKey);
                    if (ordinalPositionList.size() == 0) {
                        /*
                             * No ordinal positions to be updated are in the list. In that case, check whether the
                             * existing ordinal position of the column is different from its new ordinal position.
                             * If yes, then initialize the ordinal position list with this column's ordinal position
                             * as the offset.
                             */
                        existingOrdinalPos = getOrdinalPosition(view, existingViewColumn);
                        if (existingOrdinalPos != newOrdinalPosition) {
                            ordinalPositionList.setOffset(newOrdinalPosition);
                            ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                            for (PColumn col : view.getColumns()) {
                                int ordinalPos = getOrdinalPosition(view, col);
                                if (ordinalPos >= newOrdinalPosition) {
                                    if (ordinalPos == existingOrdinalPos) {
                                        /*
                                             * No need to update ordinal positions of columns beyond the existing column's 
                                             * old ordinal position.
                                             */
                                        break;
                                    }
                                    // increment ordinal position of columns occurring after this column by 1
                                    int updatedPos = ordinalPos + 1;
                                    ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
                                }
                            }
                        }
                    } else {
                        if (existingOrdinalPos != newOrdinalPosition) {
                            ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                        }
                    }
                    columnsAddedToBaseTable++;
                }
            } else {
                // The column doesn't exist in the view.
                Put viewColumnPut = new Put(columnKey, clientTimeStamp);
                for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
                    viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell)));
                }
                if (isDivergedView(view)) {
                    if (isPkCol) {
                        /* 
                             * Only pk cols of the base table are added to the diverged views. These pk 
                             * cols are added at the end.
                             */
                        int lastOrdinalPos = getOrdinalPosition(view, view.getColumns().get(numCols - 1));
                        int newPosition = ++lastOrdinalPos;
                        byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
                        PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
                        viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
                        mutationsForAddingColumnsToViews.add(viewColumnPut);
                    } else {
                        // move on to the next column
                        continue;
                    }
                } else {
                    int newOrdinalPosition = p.ordinalPosition;
                    /*
                         * For a non-diverged view, we need to make sure that the base table column
                         * is added at the right position.
                         */
                    if (ordinalPositionList.size() == 0) {
                        ordinalPositionList.setOffset(newOrdinalPosition);
                        ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                        for (PColumn col : view.getColumns()) {
                            int ordinalPos = getOrdinalPosition(view, col);
                            if (ordinalPos >= newOrdinalPosition) {
                                // increment ordinal position of columns by 1
                                int updatedPos = ordinalPos + 1;
                                ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
                            }
                        }
                    } else {
                        ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                    }
                    mutationsForAddingColumnsToViews.add(viewColumnPut);
                }
                if (isPkCol) {
                    deltaNumPkColsSoFar++;
                    // Set the key sequence for the pk column to be added
                    short currentKeySeq = SchemaUtil.getMaxKeySeq(view);
                    short newKeySeq = (short) (currentKeySeq + deltaNumPkColsSoFar);
                    byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
                    PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
                    viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
                    addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view, deltaNumPkColsSoFar, columnName, viewColumnPut);
                }
                columnsAddedToView++;
                columnsAddedToBaseTable++;
            }
        }
        /*
             * Allow adding a pk columns to base table : 1. if all the view pk columns are exactly the same as the base
             * table pk columns 2. if we are adding all the existing view pk columns to the base table
             */
        if (addingExistingPkCol && !viewPkCols.equals(basePhysicalTable.getPKColumns())) {
            return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
        }
        addViewIndexesHeaderRowMutations(mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, view, deltaNumPkColsSoFar);
        // set table properties in child view
        if (!tablePropertyCellMap.isEmpty()) {
            Put viewHeaderRowPut = new Put(viewKey, clientTimeStamp);
            for (TableProperty tableProp : TableProperty.values()) {
                Cell tablePropertyCell = tablePropertyCellMap.get(tableProp);
                if (tablePropertyCell != null) {
                    // set this table property on the view :
                    // 1. if it is not mutable on a view (which means the property is always the same as the base table)
                    // 2. or if it is mutable on a view and if it doesn't exist on the view
                    // 3. or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
                    Object viewProp = tableProp.getPTableValue(view);
                    if (!tableProp.isMutableOnView() || viewProp == null || viewProp.equals(tableProp.getPTableValue(basePhysicalTable))) {
                        viewHeaderRowPut.add(CellUtil.createCell(viewKey, CellUtil.cloneFamily(tablePropertyCell), CellUtil.cloneQualifier(tablePropertyCell), clientTimeStamp, tablePropertyCell.getTypeByte(), CellUtil.cloneValue(tablePropertyCell)));
                    }
                }
            }
            byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
            PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
            viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
            // invalidate the view so that it is removed from the cache
            invalidateList.add(new ImmutableBytesPtr(viewKey));
            mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
        }
        /*
             * Increment the sequence number by 1 if:
             * 1) For a diverged view, there were columns (pk columns) added to the view.
             * 2) For a non-diverged view if the base column count changed.
             */
        boolean changeSequenceNumber = (isDivergedView(view) && columnsAddedToView > 0) || (!isDivergedView(view) && columnsAddedToBaseTable > 0);
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, columnsAddedToView, columnsAddedToBaseTable, viewKey, view, ordinalPositionList, numCols, changeSequenceNumber);
    }
    return null;
}
Also used : ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) Cell(org.apache.hadoop.hbase.Cell) TableProperty(org.apache.phoenix.schema.TableProperty) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Put(org.apache.hadoop.hbase.client.Put) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 83 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MetaDataEndpointImpl method createTable.

@Override
public void createTable(RpcController controller, CreateTableRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[][] rowKeyMetaData = new byte[3][];
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        int clientVersion = request.getClientVersion();
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        boolean isNamespaceMapped = MetaDataUtil.isNameSpaceMapped(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
        final IndexType indexType = MetaDataUtil.getIndexType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
        byte[] parentSchemaName = null;
        byte[] parentTableName = null;
        PTableType tableType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
        byte[] parentTableKey = null;
        Mutation viewPhysicalTableRow = null;
        Set<TableName> indexes = new HashSet<TableName>();
        ;
        byte[] cPhysicalName = SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped).getBytes();
        byte[] cParentPhysicalName = null;
        if (tableType == PTableType.VIEW) {
            byte[][] parentSchemaTableNames = new byte[3][];
            byte[][] parentPhysicalSchemaTableNames = new byte[3][];
            /*
                 * For a view, we lock the base physical table row. For a mapped view, there is 
                 * no link present to the physical table. So the viewPhysicalTableRow is null
                 * in that case.
                 */
            viewPhysicalTableRow = getPhysicalTableRowForView(tableMetadata, parentSchemaTableNames, parentPhysicalSchemaTableNames);
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            if (parentPhysicalSchemaTableNames[2] != null) {
                parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentPhysicalSchemaTableNames[1], parentPhysicalSchemaTableNames[2]);
                PTable parentTable = getTable(env, parentTableKey, new ImmutableBytesPtr(parentTableKey), clientTimeStamp, clientTimeStamp, clientVersion);
                if (parentTable == null) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                cParentPhysicalName = parentTable.getPhysicalName().getBytes();
                if (parentSchemaTableNames[2] != null && Bytes.compareTo(parentSchemaTableNames[2], parentPhysicalSchemaTableNames[2]) != 0) {
                    // if view is created on view
                    byte[] parentKey = SchemaUtil.getTableKey(parentSchemaTableNames[0] == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaTableNames[0], parentSchemaTableNames[1], parentSchemaTableNames[2]);
                    parentTable = getTable(env, parentKey, new ImmutableBytesPtr(parentKey), clientTimeStamp, clientTimeStamp, clientVersion);
                    if (parentTable == null) {
                        // it could be a global view
                        parentKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentSchemaTableNames[1], parentSchemaTableNames[2]);
                        parentTable = getTable(env, parentKey, new ImmutableBytesPtr(parentKey), clientTimeStamp, clientTimeStamp, clientVersion);
                    }
                }
                if (parentTable == null) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                for (PTable index : parentTable.getIndexes()) {
                    indexes.add(TableName.valueOf(index.getPhysicalName().getBytes()));
                }
            } else {
                // Mapped View
                cParentPhysicalName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
            }
            parentSchemaName = parentPhysicalSchemaTableNames[1];
            parentTableName = parentPhysicalSchemaTableNames[2];
        } else if (tableType == PTableType.INDEX) {
            parentSchemaName = schemaName;
            /* 
                 * For an index we lock the parent table's row which could be a physical table or a view.
                 * If the parent table is a physical table, then the tenantIdBytes is empty because
                 * we allow creating an index with a tenant connection only if the parent table is a view.
                 */
            parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
            parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName);
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            PTable parentTable = loadTable(env, parentTableKey, new ImmutableBytesPtr(parentTableKey), clientTimeStamp, clientTimeStamp, clientVersion);
            if (IndexType.LOCAL == indexType) {
                cPhysicalName = parentTable.getPhysicalName().getBytes();
                cParentPhysicalName = parentTable.getPhysicalName().getBytes();
            } else if (parentTable.getType() == PTableType.VIEW) {
                cPhysicalName = MetaDataUtil.getViewIndexPhysicalName(parentTable.getPhysicalName().getBytes());
                cParentPhysicalName = parentTable.getPhysicalName().getBytes();
            } else {
                cParentPhysicalName = SchemaUtil.getPhysicalHBaseTableName(parentSchemaName, parentTableName, isNamespaceMapped).getBytes();
            }
        }
        getCoprocessorHost().preCreateTable(Bytes.toString(tenantIdBytes), SchemaUtil.getTableName(schemaName, tableName), (tableType == PTableType.VIEW) ? null : TableName.valueOf(cPhysicalName), cParentPhysicalName == null ? null : TableName.valueOf(cParentPhysicalName), tableType, /* TODO: During inital create we may not need the family map */
        Collections.<byte[]>emptySet(), indexes);
        Region region = env.getRegion();
        List<RowLock> locks = Lists.newArrayList();
        // Place a lock using key for the table to be created
        byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        try {
            acquireLock(region, tableKey, locks);
            // If the table key resides outside the region, return without doing anything
            MetaDataMutationResult result = checkTableKeyInRegion(tableKey, region);
            if (result != null) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            ImmutableBytesPtr parentCacheKey = null;
            PTable parentTable = null;
            if (parentTableName != null) {
                // Check if the parent table resides in the same region. If not, don't worry about locking the parent table row
                // or loading the parent table. For a view, the parent table that needs to be locked is the base physical table.
                // For an index on view, the view header row needs to be locked.
                result = checkTableKeyInRegion(parentTableKey, region);
                if (result == null) {
                    acquireLock(region, parentTableKey, locks);
                    parentCacheKey = new ImmutableBytesPtr(parentTableKey);
                    parentTable = loadTable(env, parentTableKey, parentCacheKey, clientTimeStamp, clientTimeStamp, clientVersion);
                    if (parentTable == null || isTableDeleted(parentTable)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    // make sure we haven't gone over our threshold for indexes on this table.
                    if (execeededIndexQuota(tableType, parentTable)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    long parentTableSeqNumber;
                    if (tableType == PTableType.VIEW && viewPhysicalTableRow != null && request.hasClientVersion()) {
                        // Starting 4.5, the client passes the sequence number of the physical table in the table metadata.
                        parentTableSeqNumber = MetaDataUtil.getSequenceNumber(viewPhysicalTableRow);
                    } else if (tableType == PTableType.VIEW && !request.hasClientVersion()) {
                        // Before 4.5, due to a bug, the parent table key wasn't available.
                        // So don't do anything and prevent the exception from being thrown.
                        parentTableSeqNumber = parentTable.getSequenceNumber();
                    } else {
                        parentTableSeqNumber = MetaDataUtil.getParentSequenceNumber(tableMetadata);
                    }
                    // If parent table isn't at the expected sequence number, then return
                    if (parentTable.getSequenceNumber() != parentTableSeqNumber) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.CONCURRENT_TABLE_MUTATION);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setTable(PTableImpl.toProto(parentTable));
                        done.run(builder.build());
                        return;
                    }
                }
            }
            // Load child table next
            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
            // Get as of latest timestamp so we can detect if we have a newer table that already
            // exists without making an additional query
            PTable table = loadTable(env, tableKey, cacheKey, clientTimeStamp, HConstants.LATEST_TIMESTAMP, clientVersion);
            if (table != null) {
                if (table.getTimeStamp() < clientTimeStamp) {
                    // continue
                    if (!isTableDeleted(table)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setTable(PTableImpl.toProto(table));
                        done.run(builder.build());
                        return;
                    }
                } else {
                    builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    builder.setTable(PTableImpl.toProto(table));
                    done.run(builder.build());
                    return;
                }
            }
            // sends over depending on its base physical table.
            if (tableType != PTableType.VIEW) {
                UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp);
            }
            // tableMetadata and set the view statement and partition column correctly
            if (parentTable != null && parentTable.getAutoPartitionSeqName() != null) {
                long autoPartitionNum = 1;
                try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                    Statement stmt = connection.createStatement()) {
                    String seqName = parentTable.getAutoPartitionSeqName();
                    // Not going through the standard route of using statement.execute() as that code path
                    // is blocked if the metadata hasn't been been upgraded to the new minor release.
                    String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s", seqName);
                    PhoenixStatement ps = stmt.unwrap(PhoenixStatement.class);
                    QueryPlan plan = ps.compileQuery(seqNextValueSql);
                    ResultIterator resultIterator = plan.iterator();
                    PhoenixResultSet rs = ps.newResultSet(resultIterator, plan.getProjector(), plan.getContext());
                    rs.next();
                    autoPartitionNum = rs.getLong(1);
                } catch (SequenceNotFoundException e) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                PColumn autoPartitionCol = parentTable.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable));
                if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                builder.setAutoPartitionNum(autoPartitionNum);
                // set the VIEW STATEMENT column of the header row
                Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
                NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
                List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                Cell cell = cells.get(0);
                String autoPartitionWhere = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum);
                String hbaseVersion = VersionInfo.getVersion();
                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
                KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion);
                MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr);
                byte[] value = ptr.copyBytesIfNecessary();
                byte[] viewStatement = null;
                // if we have an existing where clause add the auto partition where clause to it
                if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) {
                    viewStatement = Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere));
                } else {
                    viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
                }
                Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
                cells.add(viewStatementCell);
                // set the IS_VIEW_REFERENCED column of the auto partition column row
                Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
                familyCellMap = autoPartitionPut.getFamilyCellMap();
                cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                cell = cells.get(0);
                PDataType dataType = autoPartitionCol.getDataType();
                Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                byte[] bytes = new byte[dataType.getByteSize() + 1];
                dataType.toBytes(val, bytes, 0);
                Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
                cells.add(viewConstantCell);
            }
            Short indexId = null;
            if (request.hasAllocateIndexId() && request.getAllocateIndexId()) {
                String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes);
                try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class)) {
                    PName physicalName = parentTable.getPhysicalName();
                    int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets();
                    SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, physicalName, nSequenceSaltBuckets, parentTable.isNamespaceMapped());
                    // TODO Review Earlier sequence was created at (SCN-1/LATEST_TIMESTAMP) and incremented at the client max(SCN,dataTable.getTimestamp), but it seems we should
                    // use always LATEST_TIMESTAMP to avoid seeing wrong sequence values by different connection having SCN
                    // or not.
                    long sequenceTimestamp = HConstants.LATEST_TIMESTAMP;
                    try {
                        connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp);
                    } catch (SequenceAlreadyExistsException e) {
                    }
                    long[] seqValues = new long[1];
                    SQLException[] sqlExceptions = new SQLException[1];
                    connection.getQueryServices().incrementSequences(Collections.singletonList(new SequenceAllocation(key, 1)), HConstants.LATEST_TIMESTAMP, seqValues, sqlExceptions);
                    if (sqlExceptions[0] != null) {
                        throw sqlExceptions[0];
                    }
                    long seqValue = seqValues[0];
                    if (seqValue > Short.MAX_VALUE) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
                    NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
                    List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                    Cell cell = cells.get(0);
                    PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                    Object val = dataType.toObject(seqValue, PLong.INSTANCE);
                    byte[] bytes = new byte[dataType.getByteSize() + 1];
                    dataType.toBytes(val, bytes, 0);
                    Cell indexIdCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_INDEX_ID_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
                    cells.add(indexIdCell);
                    indexId = (short) seqValue;
                }
            }
            // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
            // system table. Basically, we get all the locks that we don't already hold for all the
            // tableMetadata rows. This ensures we don't have deadlock situations (ensuring
            // primary and then index table locks are held, in that order). For now, we just don't support
            // indexing on the system table. This is an issue because of the way we manage batch mutation
            // in the Indexer.
            mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            // Invalidate the cache - the next getTable call will add it
            // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            if (parentCacheKey != null) {
                metaDataCache.invalidate(parentCacheKey);
            }
            metaDataCache.invalidate(cacheKey);
            // Get timeStamp from mutations - the above method sets it if it's unset
            long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
            if (indexId != null) {
                builder.setViewIndexId(indexId);
            }
            builder.setMutationTime(currentTimeStamp);
            done.run(builder.build());
            return;
        } finally {
            releaseRowLocks(region, locks);
        }
    } catch (Throwable t) {
        logger.error("createTable failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : ByteString(com.google.protobuf.ByteString) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) PDataType(org.apache.phoenix.schema.types.PDataType) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) HashSet(java.util.HashSet) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) TableName(org.apache.hadoop.hbase.TableName) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) KeyValue(org.apache.hadoop.hbase.KeyValue) SQLException(java.sql.SQLException) SequenceAlreadyExistsException(org.apache.phoenix.schema.SequenceAlreadyExistsException) QueryPlan(org.apache.phoenix.compile.QueryPlan) PColumn(org.apache.phoenix.schema.PColumn) SequenceKey(org.apache.phoenix.schema.SequenceKey) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) SequenceNotFoundException(org.apache.phoenix.schema.SequenceNotFoundException) IndexType(org.apache.phoenix.schema.PTable.IndexType) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Statement(java.sql.Statement) PTableType(org.apache.phoenix.schema.PTableType) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) GenericKeyValueBuilder(org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Put(org.apache.hadoop.hbase.client.Put) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PName(org.apache.phoenix.schema.PName)

Example 84 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class UngroupedAggregateRegionObserver method doPostScannerOpen.

@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException, SQLException {
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Region region = env.getRegion();
    long ts = scan.getTimeRange().getMax();
    boolean localIndexScan = ScanUtil.isLocalIndex(scan);
    if (ScanUtil.isAnalyzeTable(scan)) {
        byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
        byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
        // Let this throw, as this scan is being done for the sole purpose of collecting stats
        StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(env, region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes);
        return collectStats(s, statsCollector, region, scan, env.getConfiguration());
    } else if (ScanUtil.isIndexRebuild(scan)) {
        return rebuildIndices(s, region, scan, env.getConfiguration());
    }
    int offsetToBe = 0;
    if (localIndexScan) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offsetToBe);
    }
    final int offset = offsetToBe;
    PTable projectedTable = null;
    PTable writeToTable = null;
    byte[][] values = null;
    byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
    boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
    if (isDescRowKeyOrderUpgrade) {
        logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
        projectedTable = deserializeTable(descRowKeyTableBytes);
        try {
            writeToTable = PTableImpl.makePTable(projectedTable, true);
        } catch (SQLException e) {
            // Impossible
            ServerUtil.throwIOException("Upgrade failed", e);
        }
        values = new byte[projectedTable.getPKColumns().size()][];
    }
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    MutationList indexMutations = localIndexBytes == null ? new MutationList() : new MutationList(1024);
    RegionScanner theScanner = s;
    byte[] replayMutations = scan.getAttribute(BaseScannerRegionObserver.REPLAY_WRITES);
    byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
    byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
    List<Expression> selectExpressions = null;
    byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
    boolean isUpsert = false;
    boolean isDelete = false;
    byte[] deleteCQ = null;
    byte[] deleteCF = null;
    byte[] emptyCF = null;
    HTable targetHTable = null;
    boolean isPKChanging = false;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (upsertSelectTable != null) {
        isUpsert = true;
        projectedTable = deserializeTable(upsertSelectTable);
        targetHTable = new HTable(upsertSelectConfig, projectedTable.getPhysicalName().getBytes());
        selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
        values = new byte[projectedTable.getPKColumns().size()][];
        isPKChanging = ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
    } else {
        byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
        isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
        if (!isDelete) {
            deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
            deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
        }
        emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
    }
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env, useQualifierAsIndex, useNewValueColumnQualifier);
    }
    int maxBatchSize = 0;
    long maxBatchSizeBytes = 0L;
    MutationList mutations = new MutationList();
    boolean needToWrite = false;
    Configuration conf = env.getConfiguration();
    long flushSize = region.getTableDesc().getMemStoreFlushSize();
    if (flushSize <= 0) {
        flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
    }
    /**
     * Slow down the writes if the memstore size more than
     * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
     * bytes. This avoids flush storm to hdfs for cases like index building where reads and
     * write happen to all the table regions in the server.
     */
    final long blockingMemStoreSize = flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1);
    boolean buildLocalIndex = indexMaintainers != null && dataColumns == null && !localIndexScan;
    if (buildLocalIndex) {
        checkForLocalIndexColumnFamilies(region, indexMaintainers);
    }
    if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
        needToWrite = true;
        maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        mutations = new MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize / 10));
        maxBatchSizeBytes = conf.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    }
    Aggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), conf);
    Aggregator[] rowAggregators = aggregators.getAggregators();
    boolean hasMore;
    boolean hasAny = false;
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
    }
    int rowCount = 0;
    final RegionScanner innerScanner = theScanner;
    boolean useIndexProto = true;
    byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    // for backward compatiblity fall back to look by the old attribute
    if (indexMaintainersPtr == null) {
        indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
        useIndexProto = false;
    }
    byte[] clientVersionBytes = scan.getAttribute(PhoenixIndexCodec.CLIENT_VERSION);
    boolean acquiredLock = false;
    boolean incrScanRefCount = false;
    final TenantCache tenantCache = GlobalCache.getTenantCache(env, ScanUtil.getTenantId(scan));
    try (MemoryChunk em = tenantCache.getMemoryManager().allocate(0)) {
        if (needToWrite) {
            synchronized (lock) {
                if (isRegionClosingOrSplitting) {
                    throw new IOException("Temporarily unable to write from scan because region is closing or splitting");
                }
                scansReferenceCount++;
                incrScanRefCount = true;
                lock.notifyAll();
            }
        }
        region.startRegionOperation();
        acquiredLock = true;
        long size = 0;
        synchronized (innerScanner) {
            do {
                List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                // Results are potentially returned even when the return value of s.next is false
                // since this is an indication of whether or not there are more values after the
                // ones returned
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    rowCount++;
                    result.setKeyValues(results);
                    if (isDescRowKeyOrderUpgrade) {
                        Arrays.fill(values, null);
                        Cell firstKV = results.get(0);
                        RowKeySchema schema = projectedTable.getRowKeySchema();
                        int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
                        for (int i = 0; i < schema.getFieldCount(); i++) {
                            Boolean hasValue = schema.next(ptr, i, maxOffset);
                            if (hasValue == null) {
                                break;
                            }
                            Field field = schema.getField(i);
                            if (field.getSortOrder() == SortOrder.DESC) {
                                // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
                                if (field.getDataType().isArrayType()) {
                                    field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), field.getSortOrder(), // force to use correct separator byte
                                    true);
                                } else // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
                                if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
                                    int len = ptr.getLength();
                                    while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                        len--;
                                    }
                                    ptr.set(ptr.get(), ptr.getOffset(), len);
                                // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
                                } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
                                    byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
                                    ptr.set(invertedBytes);
                                }
                            } else if (field.getDataType() == PBinary.INSTANCE) {
                                // Remove trailing space characters so that the setValues call below will replace them
                                // with the correct zero byte character. Note this is somewhat dangerous as these
                                // could be legit, but I don't know what the alternative is.
                                int len = ptr.getLength();
                                while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                    len--;
                                }
                                ptr.set(ptr.get(), ptr.getOffset(), len);
                            }
                            values[i] = ptr.copyBytes();
                        }
                        writeToTable.newKey(ptr, values);
                        if (Bytes.compareTo(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr.get(), ptr.getOffset() + offset, ptr.getLength()) == 0) {
                            continue;
                        }
                        byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (offset > 0) {
                            // for local indexes (prepend region start key)
                            byte[] newRowWithOffset = new byte[offset + newRow.length];
                            System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);
                            ;
                            System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
                            newRow = newRowWithOffset;
                        }
                        byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
                        for (Cell cell : results) {
                            // Copy existing cell but with new row key
                            Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                            switch(KeyValue.Type.codeToType(cell.getTypeByte())) {
                                case Put:
                                    // If Put, point delete old Put
                                    Delete del = new Delete(oldRow);
                                    del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
                                    mutations.add(del);
                                    Put put = new Put(newRow);
                                    put.add(newCell);
                                    mutations.add(put);
                                    break;
                                case Delete:
                                case DeleteColumn:
                                case DeleteFamily:
                                case DeleteFamilyVersion:
                                    Delete delete = new Delete(newRow);
                                    delete.addDeleteMarker(newCell);
                                    mutations.add(delete);
                                    break;
                            }
                        }
                    } else if (buildLocalIndex) {
                        for (IndexMaintainer maintainer : indexMaintainers) {
                            if (!results.isEmpty()) {
                                result.getKey(ptr);
                                ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), results);
                                Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
                                indexMutations.add(put);
                            }
                        }
                        result.setKeyValues(results);
                    } else if (isDelete) {
                        // FIXME: the version of the Delete constructor without the lock
                        // args was introduced in 0.94.4, thus if we try to use it here
                        // we can no longer use the 0.94.2 version of the client.
                        Cell firstKV = results.get(0);
                        Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts);
                        if (replayMutations != null) {
                            delete.setAttribute(REPLAY_WRITES, replayMutations);
                        }
                        mutations.add(delete);
                        // force tephra to ignore this deletes
                        delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                    } else if (isUpsert) {
                        Arrays.fill(values, null);
                        int bucketNumOffset = 0;
                        if (projectedTable.getBucketNum() != null) {
                            values[0] = new byte[] { 0 };
                            bucketNumOffset = 1;
                        }
                        int i = bucketNumOffset;
                        List<PColumn> projectedColumns = projectedTable.getColumns();
                        for (; i < projectedTable.getPKColumns().size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                values[i] = ptr.copyBytes();
                                // column being projected into then invert the bits.
                                if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
                                    SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
                                }
                            } else {
                                values[i] = ByteUtil.EMPTY_BYTE_ARRAY;
                            }
                        }
                        projectedTable.newKey(ptr, values);
                        PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
                        for (; i < projectedColumns.size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                PColumn column = projectedColumns.get(i);
                                if (!column.getDataType().isSizeCompatible(ptr, null, expression.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale(), column.getName().getString(), ptr);
                                }
                                column.getDataType().coerceBytes(ptr, null, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
                                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                                row.setValue(column, bytes);
                            }
                        }
                        for (Mutation mutation : row.toRowMutations()) {
                            if (replayMutations != null) {
                                mutation.setAttribute(REPLAY_WRITES, replayMutations);
                            }
                            mutations.add(mutation);
                        }
                        for (i = 0; i < selectExpressions.size(); i++) {
                            selectExpressions.get(i).reset();
                        }
                    } else if (deleteCF != null && deleteCQ != null) {
                        // if no empty key value is being set
                        if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
                            Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength());
                            delete.deleteColumns(deleteCF, deleteCQ, ts);
                            // force tephra to ignore this deletes
                            delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                            mutations.add(delete);
                        }
                    }
                    if (emptyCF != null) {
                        /*
                             * If we've specified an emptyCF, then we need to insert an empty
                             * key value "retroactively" for any key value that is visible at
                             * the timestamp that the DDL was issued. Key values that are not
                             * visible at this timestamp will not ever be projected up to
                             * scans past this timestamp, so don't need to be considered.
                             * We insert one empty key value per row per timestamp.
                             */
                        Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
                        for (Cell kv : results) {
                            long kvts = kv.getTimestamp();
                            if (!timeStamps.contains(kvts)) {
                                Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
                                put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY);
                                mutations.add(put);
                            }
                        }
                    }
                    if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, targetHTable, useIndexProto, isPKChanging, clientVersionBytes);
                        mutations.clear();
                    }
                    if (ServerUtil.readyToCommit(indexMutations.size(), indexMutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        setIndexAndTransactionProperties(indexMutations, indexUUID, indexMaintainersPtr, txState, clientVersionBytes, useIndexProto);
                        commitBatch(region, indexMutations, blockingMemStoreSize);
                        indexMutations.clear();
                    }
                    size += aggregators.aggregate(rowAggregators, result);
                    while (size > em.getSize()) {
                        logger.info("Request: {}, resizing {} by 1024*1024", size, em.getSize());
                        em.resize(em.getSize() + 1024 * 1024);
                    }
                    hasAny = true;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, targetHTable, useIndexProto, isPKChanging, clientVersionBytes);
                mutations.clear();
            }
            if (!indexMutations.isEmpty()) {
                commitBatch(region, indexMutations, blockingMemStoreSize);
                indexMutations.clear();
            }
        }
    } finally {
        if (needToWrite && incrScanRefCount) {
            synchronized (lock) {
                scansReferenceCount--;
                if (scansReferenceCount < 0) {
                    logger.warn("Scan reference count went below zero. Something isn't correct. Resetting it back to zero");
                    scansReferenceCount = 0;
                }
                lock.notifyAll();
            }
        }
        try {
            if (targetHTable != null) {
                targetHTable.close();
            }
        } finally {
            try {
                innerScanner.close();
            } finally {
                if (acquiredLock)
                    region.closeRegionOperation();
            }
        }
    }
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
    }
    final boolean hadAny = hasAny;
    KeyValue keyValue = null;
    if (hadAny) {
        byte[] value = aggregators.toBytes(rowAggregators);
        keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    }
    final KeyValue aggKeyValue = keyValue;
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        private boolean done = !hadAny;

        @Override
        public boolean isFilterDone() {
            return done;
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            if (done)
                return false;
            done = true;
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Configuration(org.apache.hadoop.conf.Configuration) TupleProjector(org.apache.phoenix.execute.TupleProjector) PTable(org.apache.phoenix.schema.PTable) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) TenantCache(org.apache.phoenix.cache.TenantCache) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Aggregators(org.apache.phoenix.expression.aggregator.Aggregators) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) KeyValue(org.apache.hadoop.hbase.KeyValue) StatisticsCollector(org.apache.phoenix.schema.stats.StatisticsCollector) SQLException(java.sql.SQLException) HTable(org.apache.hadoop.hbase.client.HTable) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) Field(org.apache.phoenix.schema.ValueSchema.Field) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Put(org.apache.hadoop.hbase.client.Put) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) TableRef(org.apache.phoenix.schema.TableRef) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Example 85 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final Map<ImmutableBytesPtr, ServerCache> caches, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    ScanRanges scanRanges = context.getScanRanges();
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (scanRanges == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return getWrappedIterator(caches, ResultIterator.EMPTY_ITERATOR);
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan, caches);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    PhoenixConnection connection = context.getConnection();
    final int smallScanThreshold = connection.getQueryServices().getProps().getInt(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD);
    if (statement.getHint().hasHint(Hint.SMALL) || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold)) {
        scan.setSmall(true);
    }
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = scanRanges.getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // Always use latest timestamp unless scn is set or transactional (see PHOENIX-4089)
            scn = HConstants.LATEST_TIMESTAMP;
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes.
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan, caches);
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) IOException(java.io.IOException) ScanRanges(org.apache.phoenix.compile.ScanRanges) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)101 PTable (org.apache.phoenix.schema.PTable)59 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)26 Expression (org.apache.phoenix.expression.Expression)21 TableRef (org.apache.phoenix.schema.TableRef)20 ArrayList (java.util.ArrayList)19 PName (org.apache.phoenix.schema.PName)18 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)17 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)17 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PTableKey (org.apache.phoenix.schema.PTableKey)14 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)13 SQLException (java.sql.SQLException)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)12 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 Map (java.util.Map)11