Search in sources :

Example 96 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method dropSchema.

@Override
public void dropSchema(RpcController controller, DropSchemaRequest request, RpcCallback<MetaDataResponse> done) {
    String schemaName = null;
    try {
        List<Mutation> schemaMetaData = ProtobufUtil.getMutations(request);
        schemaName = request.getSchemaName();
        byte[] lockKey = SchemaUtil.getSchemaKey(schemaName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMetaData);
        try {
            acquireLock(region, lockKey, locks);
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>(1);
            result = doDropSchema(clientTimeStamp, schemaName, lockKey, schemaMetaData, invalidateList);
            if (result.getMutationCode() != MutationCode.SCHEMA_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            region.mutateRowsWithLocks(schemaMetaData, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            long currentTime = MetaDataUtil.getClientTimeStamp(schemaMetaData);
            for (ImmutableBytesPtr ptr : invalidateList) {
                metaDataCache.invalidate(ptr);
                metaDataCache.put(ptr, newDeletedSchemaMarker(currentTime));
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("drop schema failed:", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 97 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method loadTable.

private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp) throws IOException, SQLException {
    Region region = env.getRegion();
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp)) != null) {
        return table;
    }
    // found
    if (table == null && (table = buildDeletedTable(key, cacheKey, region, clientTimeStamp)) != null) {
        return table;
    }
    return null;
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Region(org.apache.hadoop.hbase.regionserver.Region) PTable(org.apache.phoenix.schema.PTable)

Example 98 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method createFunction.

@Override
public void createFunction(RpcController controller, CreateFunctionRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[][] rowKeyMetaData = new byte[2][];
    byte[] functionName = null;
    try {
        List<Mutation> functionMetaData = ProtobufUtil.getMutations(request);
        boolean temporaryFunction = request.getTemporary();
        MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
        byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData);
        try {
            acquireLock(region, lockKey, locks);
            // Get as of latest timestamp so we can detect if we have a newer function that already
            // exists without making an additional query
            ImmutableBytesPtr cacheKey = new FunctionBytesPtr(lockKey);
            PFunction function = loadFunction(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp, request.getReplace(), functionMetaData);
            if (function != null) {
                if (function.getTimeStamp() < clientTimeStamp) {
                    // continue
                    if (!isFunctionDeleted(function)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.addFunction(PFunction.toProto(function));
                        done.run(builder.build());
                        if (!request.getReplace()) {
                            return;
                        }
                    }
                } else {
                    builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_FUNCTION_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    builder.addFunction(PFunction.toProto(function));
                    done.run(builder.build());
                    return;
                }
            }
            // Don't store function info for temporary functions.
            if (!temporaryFunction) {
                region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            }
            // Invalidate the cache - the next getFunction call will add it
            // TODO: consider loading the function that was just created here, patching up the parent function, and updating the cache
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            metaDataCache.invalidate(cacheKey);
            // Get timeStamp from mutations - the above method sets it if it's unset
            long currentTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData);
            builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND);
            builder.setMutationTime(currentTimeStamp);
            done.run(builder.build());
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("createFunction failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(Bytes.toString(functionName), t));
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PFunction(org.apache.phoenix.parse.PFunction) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) FunctionBytesPtr(org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)

Example 99 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method clearTableFromCache.

@Override
public void clearTableFromCache(RpcController controller, ClearTableFromCacheRequest request, RpcCallback<ClearTableFromCacheResponse> done) {
    byte[] schemaName = request.getSchemaName().toByteArray();
    byte[] tableName = request.getTableName().toByteArray();
    try {
        byte[] tenantId = request.getTenantId().toByteArray();
        byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
        Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
        metaDataCache.invalidate(cacheKey);
    } catch (Throwable t) {
        logger.error("incrementTableTimeStamp failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)

Example 100 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method addColumnsAndTablePropertiesToChildViews.

private MetaDataMutationResult addColumnsAndTablePropertiesToChildViews(PTable basePhysicalTable, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, Region region, List<RowLock> locks) throws IOException, SQLException {
    List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
    Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
    // Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Put) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            // check if this put is for adding a column
            if (pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null && rkmd[COLUMN_NAME_INDEX].length > 0 && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnPutsForBaseTable.add(new PutWithOrdinalPosition((Put) m, getInteger((Put) m, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES)));
            } else // check if the put is for a table property
            if (pkCount <= COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                for (Cell cell : m.getFamilyCellMap().get(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)) {
                    for (TableProperty tableProp : TableProperty.values()) {
                        byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
                        if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) == 0 && tableProp.isValidOnView() && tableProp.isMutable()) {
                            Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell));
                            tablePropertyCellMap.put(tableProp, tablePropCell);
                        }
                    }
                }
            }
        }
    }
    // Sort the puts by ordinal position 
    Collections.sort(columnPutsForBaseTable);
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short deltaNumPkColsSoFar = 0;
        short columnsAddedToView = 0;
        short columnsAddedToBaseTable = 0;
        byte[] tenantId = viewInfo.getTenantId();
        byte[] schema = viewInfo.getSchemaName();
        byte[] table = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
        // lock the rows corresponding to views so that no other thread can modify the view meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
        boolean addingExistingPkCol = false;
        int numCols = view.getColumns().size();
        // add the new columns to the child view
        for (PutWithOrdinalPosition p : columnPutsForBaseTable) {
            Put baseTableColumnPut = p.put;
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(baseTableColumnPut.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            boolean isPkCol = columnFamily == null;
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            if (existingViewColumn != null) {
                MetaDataMutationResult result = validateColumnForAddToBaseTable(existingViewColumn, baseTableColumnPut, basePhysicalTable, isPkCol, view);
                if (result != null) {
                    return result;
                }
                if (isPkCol) {
                    viewPkCols.remove(existingViewColumn);
                    addingExistingPkCol = true;
                }
                /*
                     * For views that are not diverged, we need to make sure that the existing columns
                     * have the same ordinal position as in the base table. This is important because
                     * we rely on the ordinal position of the column to figure out whether dropping a 
                     * column from the view will end up diverging the view from the base table.
                     * 
                     * For already diverged views, we don't care about the ordinal position of the existing column.
                     */
                if (!isDivergedView(view)) {
                    int newOrdinalPosition = p.ordinalPosition;
                    // Check if the ordinal position of the column was getting updated from previous add column
                    // mutations.
                    int existingOrdinalPos = ordinalPositionList.getOrdinalPositionOfColumn(columnKey);
                    if (ordinalPositionList.size() == 0) {
                        /*
                             * No ordinal positions to be updated are in the list. In that case, check whether the
                             * existing ordinal position of the column is different from its new ordinal position.
                             * If yes, then initialize the ordinal position list with this column's ordinal position
                             * as the offset.
                             */
                        existingOrdinalPos = getOrdinalPosition(view, existingViewColumn);
                        if (existingOrdinalPos != newOrdinalPosition) {
                            ordinalPositionList.setOffset(newOrdinalPosition);
                            ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                            for (PColumn col : view.getColumns()) {
                                int ordinalPos = getOrdinalPosition(view, col);
                                if (ordinalPos >= newOrdinalPosition) {
                                    if (ordinalPos == existingOrdinalPos) {
                                        /*
                                             * No need to update ordinal positions of columns beyond the existing column's 
                                             * old ordinal position.
                                             */
                                        break;
                                    }
                                    // increment ordinal position of columns occurring after this column by 1
                                    int updatedPos = ordinalPos + 1;
                                    ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
                                }
                            }
                        }
                    } else {
                        if (existingOrdinalPos != newOrdinalPosition) {
                            ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                        }
                    }
                    columnsAddedToBaseTable++;
                }
            } else {
                // The column doesn't exist in the view.
                Put viewColumnPut = new Put(columnKey, clientTimeStamp);
                for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
                    viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell)));
                }
                if (isDivergedView(view)) {
                    if (isPkCol) {
                        /* 
                             * Only pk cols of the base table are added to the diverged views. These pk 
                             * cols are added at the end.
                             */
                        int lastOrdinalPos = getOrdinalPosition(view, view.getColumns().get(numCols - 1));
                        int newPosition = ++lastOrdinalPos;
                        byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
                        PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
                        viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
                        mutationsForAddingColumnsToViews.add(viewColumnPut);
                    } else {
                        // move on to the next column
                        continue;
                    }
                } else {
                    int newOrdinalPosition = p.ordinalPosition;
                    /*
                         * For a non-diverged view, we need to make sure that the base table column
                         * is added at the right position.
                         */
                    if (ordinalPositionList.size() == 0) {
                        ordinalPositionList.setOffset(newOrdinalPosition);
                        ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                        for (PColumn col : view.getColumns()) {
                            int ordinalPos = getOrdinalPosition(view, col);
                            if (ordinalPos >= newOrdinalPosition) {
                                // increment ordinal position of columns by 1
                                int updatedPos = ordinalPos + 1;
                                ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
                            }
                        }
                    } else {
                        ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
                    }
                    mutationsForAddingColumnsToViews.add(viewColumnPut);
                }
                if (isPkCol) {
                    deltaNumPkColsSoFar++;
                    // Set the key sequence for the pk column to be added
                    short currentKeySeq = SchemaUtil.getMaxKeySeq(view);
                    short newKeySeq = (short) (currentKeySeq + deltaNumPkColsSoFar);
                    byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
                    PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
                    viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
                    addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view, deltaNumPkColsSoFar, columnName, viewColumnPut);
                }
                columnsAddedToView++;
                columnsAddedToBaseTable++;
            }
        }
        /*
             * Allow adding a pk columns to base table : 1. if all the view pk columns are exactly the same as the base
             * table pk columns 2. if we are adding all the existing view pk columns to the base table
             */
        if (addingExistingPkCol && !viewPkCols.equals(basePhysicalTable.getPKColumns())) {
            return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
        }
        addViewIndexesHeaderRowMutations(mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, view, deltaNumPkColsSoFar);
        // set table properties in child view
        if (!tablePropertyCellMap.isEmpty()) {
            Put viewHeaderRowPut = new Put(viewKey, clientTimeStamp);
            for (TableProperty tableProp : TableProperty.values()) {
                Cell tablePropertyCell = tablePropertyCellMap.get(tableProp);
                if (tablePropertyCell != null) {
                    // or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
                    if (!tableProp.isMutableOnView() || tableProp.getPTableValue(view).equals(tableProp.getPTableValue(basePhysicalTable))) {
                        viewHeaderRowPut.add(CellUtil.createCell(viewKey, CellUtil.cloneFamily(tablePropertyCell), CellUtil.cloneQualifier(tablePropertyCell), clientTimeStamp, tablePropertyCell.getTypeByte(), CellUtil.cloneValue(tablePropertyCell)));
                    }
                }
            }
            byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
            PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
            viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
            // invalidate the view so that it is removed from the cache
            invalidateList.add(new ImmutableBytesPtr(viewKey));
            mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
        }
        /*
             * Increment the sequence number by 1 if:
             * 1) For a diverged view, there were columns (pk columns) added to the view.
             * 2) For a non-diverged view if the base column count changed.
             */
        boolean changeSequenceNumber = (isDivergedView(view) && columnsAddedToView > 0) || (!isDivergedView(view) && columnsAddedToBaseTable > 0);
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, columnsAddedToView, columnsAddedToBaseTable, viewKey, view, ordinalPositionList, numCols, changeSequenceNumber);
    }
    return null;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) PColumn(org.apache.phoenix.schema.PColumn) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) TableProperty(org.apache.phoenix.schema.TableProperty) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11