Search in sources :

Example 21 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method loadSchema.

private PSchema loadSchema(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp) throws IOException, SQLException {
    Region region = env.getRegion();
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PSchema schema = (PSchema) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (schema != null) {
        return schema;
    }
    ArrayList<byte[]> arrayList = new ArrayList<byte[]>(1);
    arrayList.add(key);
    List<PSchema> schemas = buildSchemas(arrayList, region, asOfTimeStamp, cacheKey);
    if (schemas != null)
        return schemas.get(0);
    // found
    if (schema == null && (schema = buildDeletedSchema(key, cacheKey, region, clientTimeStamp)) != null) {
        return schema;
    }
    return null;
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region) PSchema(org.apache.phoenix.parse.PSchema)

Example 22 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method dropTable.

@Override
public void dropTable(RpcController controller, DropTableRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    boolean isCascade = request.getCascade();
    byte[][] rowKeyMetaData = new byte[3][];
    String tableType = request.getTableType();
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        // Disallow deletion of a system table
        if (tableType.equals(PTableType.SYSTEM.getSerializedValue())) {
            builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
            builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
            done.run(builder.build());
            return;
        }
        List<byte[]> tableNamesToDelete = Lists.newArrayList();
        List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
        byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
        byte[] lockTableName = parentTableName == null ? tableName : parentTableName;
        byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, lockTableName);
        byte[] key = parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        try {
            acquireLock(region, lockKey, locks);
            if (key != lockKey) {
                acquireLock(region, key, locks);
            }
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            result = doDropTable(key, tenantIdBytes, schemaName, tableName, parentTableName, PTableType.fromSerializedValue(tableType), tableMetadata, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, isCascade);
            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            // Commit the list of deletion.
            region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            for (ImmutableBytesPtr ckey : invalidateList) {
                metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
            }
            if (parentTableName != null) {
                ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(lockKey);
                metaDataCache.invalidate(parentCacheKey);
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("dropTable failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 23 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method loadFunction.

private PFunction loadFunction(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp, boolean isReplace, List<Mutation> deleteMutationsForReplace) throws IOException, SQLException {
    Region region = env.getRegion();
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PFunction function = (PFunction) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (function != null && !isReplace) {
        return function;
    }
    ArrayList<byte[]> arrayList = new ArrayList<byte[]>(1);
    arrayList.add(key);
    List<PFunction> functions = buildFunctions(arrayList, region, asOfTimeStamp, isReplace, deleteMutationsForReplace);
    if (functions != null)
        return functions.get(0);
    // found
    if (function == null && (function = buildDeletedFunction(key, cacheKey, region, clientTimeStamp)) != null) {
        return function;
    }
    return null;
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) PFunction(org.apache.phoenix.parse.PFunction) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region)

Example 24 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method doGetFunctions.

private List<PFunction> doGetFunctions(List<byte[]> keys, long clientTimeStamp) throws IOException, SQLException {
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    Region region = env.getRegion();
    Collections.sort(keys, new Comparator<byte[]>() {

        @Override
        public int compare(byte[] o1, byte[] o2) {
            return Bytes.compareTo(o1, o2);
        }
    });
    /*
         * Lock directly on key, though it may be an index table. This will just prevent a table
         * from getting rebuilt too often.
         */
    List<RowLock> rowLocks = new ArrayList<Region.RowLock>(keys.size());
    ;
    try {
        rowLocks = new ArrayList<Region.RowLock>(keys.size());
        for (int i = 0; i < keys.size(); i++) {
            Region.RowLock rowLock = region.getRowLock(keys.get(i), false);
            if (rowLock == null) {
                throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(keys.get(i)));
            }
            rowLocks.add(rowLock);
        }
        List<PFunction> functionsAvailable = new ArrayList<PFunction>(keys.size());
        int numFunctions = keys.size();
        Iterator<byte[]> iterator = keys.iterator();
        while (iterator.hasNext()) {
            byte[] key = iterator.next();
            PFunction function = (PFunction) metaDataCache.getIfPresent(new FunctionBytesPtr(key));
            if (function != null && function.getTimeStamp() < clientTimeStamp) {
                if (isFunctionDeleted(function)) {
                    return null;
                }
                functionsAvailable.add(function);
                iterator.remove();
            }
        }
        if (functionsAvailable.size() == numFunctions)
            return functionsAvailable;
        // Query for the latest table first, since it's not cached
        List<PFunction> buildFunctions = buildFunctions(keys, region, clientTimeStamp, false, Collections.<Mutation>emptyList());
        if (buildFunctions == null || buildFunctions.isEmpty()) {
            return null;
        }
        functionsAvailable.addAll(buildFunctions);
        if (functionsAvailable.size() == numFunctions)
            return functionsAvailable;
        return null;
    } finally {
        for (Region.RowLock lock : rowLocks) {
            lock.release();
        }
        rowLocks.clear();
    }
}
Also used : PFunction(org.apache.phoenix.parse.PFunction) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) FunctionBytesPtr(org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)

Example 25 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class GroupedAggregateRegionObserver method doPostScannerOpen.

/**
     * Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
     * of expressions from the scan and returns the aggregated rows of each group. For example,
     * given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
     * the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
     * do a sort and a final aggregation, since multiple rows with the same key may be returned from
     * different regions.
     */
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
    boolean keyOrdered = false;
    byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);
    if (expressionBytes == null) {
        expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
        keyOrdered = true;
    }
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        Region region = c.getEnvironment().getRegion();
        offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offset);
    }
    List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
    ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
    RegionScanner innerScanner = s;
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
        innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        innerScanner = new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier);
    }
    long limit = Long.MAX_VALUE;
    byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
    if (limitBytes != null) {
        limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
    }
    if (keyOrdered) {
        // already in the required group by key order
        return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
    } else {
        // Otherwse, collect them all up in an in memory map
        return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
    }
}
Also used : TupleProjector(org.apache.phoenix.execute.TupleProjector) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) Region(org.apache.hadoop.hbase.regionserver.Region) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11