Search in sources :

Example 1 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method loadSchema.

private PSchema loadSchema(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp) throws IOException, SQLException {
    Region region = env.getRegion();
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PSchema schema = (PSchema) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (schema != null) {
        return schema;
    }
    ArrayList<byte[]> arrayList = new ArrayList<byte[]>(1);
    arrayList.add(key);
    List<PSchema> schemas = buildSchemas(arrayList, region, asOfTimeStamp, cacheKey);
    if (schemas != null)
        return schemas.get(0);
    // found
    if (schema == null && (schema = buildDeletedSchema(key, cacheKey, region, clientTimeStamp)) != null) {
        return schema;
    }
    return null;
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region) PSchema(org.apache.phoenix.parse.PSchema)

Example 2 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method loadFunction.

private PFunction loadFunction(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp, boolean isReplace, List<Mutation> deleteMutationsForReplace) throws IOException, SQLException {
    Region region = env.getRegion();
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PFunction function = (PFunction) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (function != null && !isReplace) {
        return function;
    }
    ArrayList<byte[]> arrayList = new ArrayList<byte[]>(1);
    arrayList.add(key);
    List<PFunction> functions = buildFunctions(arrayList, region, asOfTimeStamp, isReplace, deleteMutationsForReplace);
    if (functions != null)
        return functions.get(0);
    // found
    if (function == null && (function = buildDeletedFunction(key, cacheKey, region, clientTimeStamp)) != null) {
        return function;
    }
    return null;
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) PFunction(org.apache.phoenix.parse.PFunction) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region)

Example 3 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method mutateColumn.

private MetaDataMutationResult mutateColumn(List<Mutation> tableMetadata, ColumnMutator mutator) throws IOException {
    byte[][] rowKeyMetaData = new byte[5][];
    MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
    byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
    try {
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            return result;
        }
        List<RowLock> locks = Lists.newArrayList();
        try {
            acquireLock(region, key, locks);
            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            invalidateList.add(cacheKey);
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
            if (logger.isDebugEnabled()) {
                if (table == null) {
                    logger.debug("Table " + Bytes.toStringBinary(key) + " not found in cache. Will build through scan");
                } else {
                    logger.debug("Table " + Bytes.toStringBinary(key) + " found in cache with timestamp " + table.getTimeStamp() + " seqNum " + table.getSequenceNumber());
                }
            }
            // Get client timeStamp from mutations
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            if (table == null && (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP)) == null) {
                // if not found then call newerTableExists and add delete marker for timestamp
                // found
                table = buildDeletedTable(key, cacheKey, region, clientTimeStamp);
                if (table != null) {
                    logger.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
                    return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
                return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
            if (table.getTimeStamp() >= clientTimeStamp) {
                logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
                return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table);
            } else if (isTableDeleted(table)) {
                return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
            // lookup
            long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1;
            // tableMetaData
            if (logger.isDebugEnabled()) {
                logger.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum " + expectedSeqNum + " and found seqNum " + table.getSequenceNumber() + " with " + table.getColumns().size() + " columns: " + table.getColumns());
            }
            if (expectedSeqNum != table.getSequenceNumber()) {
                if (logger.isDebugEnabled()) {
                    logger.debug("For table " + Bytes.toStringBinary(key) + " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum");
                }
                return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table);
            }
            PTableType type = table.getType();
            if (type == PTableType.INDEX) {
                // Disallow mutation of an index table
                return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
            } else {
                // server-side, except for indexing, we always expect the keyvalues to be standard KeyValues
                PTableType expectedType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
                // We said to drop a table, but found a view or visa versa
                if (type != expectedType) {
                    return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
            }
            result = mutator.updateMutation(table, rowKeyMetaData, tableMetadata, region, invalidateList, locks, clientTimeStamp);
            // if the update mutation caused tables to be deleted, the mutation code returned will be MutationCode.TABLE_ALREADY_EXISTS 
            if (result != null && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                return result;
            }
            region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            // Invalidate from cache
            for (ImmutableBytesPtr invalidateKey : invalidateList) {
                metaDataCache.invalidate(invalidateKey);
            }
            // Get client timeStamp from mutations, since it may get updated by the
            // mutateRowsWithLocks call
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            // if the update mutation caused tables to be deleted just return the result which will contain the table to be deleted
            if (result != null) {
                return result;
            } else {
                table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP);
                return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table);
            }
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        ServerUtil.throwIOException(SchemaUtil.getTableName(schemaName, tableName), t);
        // impossible
        return null;
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTableType(org.apache.phoenix.schema.PTableType) ArrayList(java.util.ArrayList) PTable(org.apache.phoenix.schema.PTable) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 4 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method clearCache.

@Override
public void clearCache(RpcController controller, ClearCacheRequest request, RpcCallback<ClearCacheResponse> done) {
    GlobalCache cache = GlobalCache.getInstance(this.env);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    metaDataCache.invalidateAll();
    long unfreedBytes = cache.clearTenantCache();
    ClearCacheResponse.Builder builder = ClearCacheResponse.newBuilder();
    builder.setUnfreedBytes(unfreedBytes);
    done.run(builder.build());
}
Also used : GlobalCache(org.apache.phoenix.cache.GlobalCache) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ClearCacheResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse)

Example 5 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method doGetTable.

private PTable doGetTable(byte[] key, long clientTimeStamp, RowLock rowLock) throws IOException, SQLException {
    ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    // Ask Lars about the expense of this call - if we don't take the lock, we still won't get
    // partial results
    // get the co-processor environment
    // TODO: check that key is within region.getStartKey() and region.getEndKey()
    // and return special code to force client to lookup region from meta.
    Region region = env.getRegion();
    /*
         * Lock directly on key, though it may be an index table. This will just prevent a table
         * from getting rebuilt too often.
         */
    final boolean wasLocked = (rowLock != null);
    boolean blockWriteRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
    if (!wasLocked) {
        rowLock = region.getRowLock(key, false);
        if (rowLock == null) {
            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
        }
    }
    try {
        PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
        // can safely not call this, since we only allow modifications to the latest.
        if (table != null && table.getTimeStamp() < clientTimeStamp) {
            // Table on client is up-to-date with table on server, so just return
            if (isTableDeleted(table)) {
                return null;
            }
            return table;
        }
        // Try cache again in case we were waiting on a lock
        table = (PTable) metaDataCache.getIfPresent(cacheKey);
        // can safely not call this, since we only allow modifications to the latest.
        if (table != null && table.getTimeStamp() < clientTimeStamp) {
            // Table on client is up-to-date with table on server, so just return
            if (isTableDeleted(table)) {
                return null;
            }
            return table;
        }
        // Query for the latest table first, since it's not cached
        table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP);
        if ((table != null && table.getTimeStamp() < clientTimeStamp) || (blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
            return table;
        }
        // Otherwise, query for an older version of the table - it won't be cached
        return buildTable(key, cacheKey, region, clientTimeStamp);
    } finally {
        if (!wasLocked)
            rowLock.release();
    }
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Region(org.apache.hadoop.hbase.regionserver.Region) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PTable(org.apache.phoenix.schema.PTable)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)27 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)27 Region (org.apache.hadoop.hbase.regionserver.Region)18 PTable (org.apache.phoenix.schema.PTable)14 ArrayList (java.util.ArrayList)11 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)10 Scan (org.apache.hadoop.hbase.client.Scan)9 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)9 Cell (org.apache.hadoop.hbase.Cell)7 Mutation (org.apache.hadoop.hbase.client.Mutation)7 PSmallint (org.apache.phoenix.schema.types.PSmallint)7 PTinyint (org.apache.phoenix.schema.types.PTinyint)7 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)5 PFunction (org.apache.phoenix.parse.PFunction)5 ByteString (com.google.protobuf.ByteString)4 PSchema (org.apache.phoenix.parse.PSchema)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 FunctionBytesPtr (org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)3 PName (org.apache.phoenix.schema.PName)3