Search in sources :

Example 11 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method buildDeletedSchema.

private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }
    Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    List<Cell> results = Lists.<Cell>newArrayList();
    try (RegionScanner scanner = region.getScanner(scan)) {
        scanner.next(results);
    }
    // HBase ignores the time range on a raw scan (HBASE-7362)
    if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
        Cell kv = results.get(0);
        if (kv.getTypeByte() == Type.Delete.getCode()) {
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            PSchema schema = newDeletedSchemaMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, schema);
            return schema;
        }
    }
    return null;
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PSchema(org.apache.phoenix.parse.PSchema) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Example 12 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method buildDeletedFunction.

private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }
    Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    List<Cell> results = Lists.<Cell>newArrayList();
    try (RegionScanner scanner = region.getScanner(scan)) {
        scanner.next(results);
    }
    // HBase ignores the time range on a raw scan (HBASE-7362)
    if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
        Cell kv = results.get(0);
        if (kv.getTypeByte() == Type.Delete.getCode()) {
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            PFunction function = newDeletedFunctionMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, function);
            return function;
        }
    }
    return null;
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) PFunction(org.apache.phoenix.parse.PFunction) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Example 13 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method loadTable.

private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp) throws IOException, SQLException {
    Region region = env.getRegion();
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp)) != null) {
        return table;
    }
    // found
    if (table == null && (table = buildDeletedTable(key, cacheKey, region, clientTimeStamp)) != null) {
        return table;
    }
    return null;
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Region(org.apache.hadoop.hbase.regionserver.Region) PTable(org.apache.phoenix.schema.PTable)

Example 14 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method doDropTable.

private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName, byte[] tableName, byte[] parentTableName, PTableType tableType, List<Mutation> rowsToDelete, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete, boolean isCascade) throws IOException, SQLException {
    long clientTimeStamp = MetaDataUtil.getClientTimeStamp(rowsToDelete);
    Region region = env.getRegion();
    ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (table != null || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP)) != null) {
        if (table.getTimeStamp() < clientTimeStamp) {
            if (isTableDeleted(table) || tableType != table.getType()) {
                return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
        } else {
            return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
    }
    // there was a table, but it's been deleted. In either case we want to return.
    if (table == null) {
        if (buildDeletedTable(key, cacheKey, region, clientTimeStamp) != null) {
            return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
    }
    // Make sure we're not deleting the "wrong" child
    if (parentTableName != null && table.getParentTableName() != null && !Arrays.equals(parentTableName, table.getParentTableName().getBytes())) {
        return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
    }
    // Since we don't allow back in time DDL, we know if we have a table it's the one
    // we want to delete. FIXME: we shouldn't need a scan here, but should be able to
    // use the table to generate the Delete markers.
    Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
    List<byte[]> indexNames = Lists.newArrayList();
    List<Cell> results = Lists.newArrayList();
    try (RegionScanner scanner = region.getScanner(scan)) {
        scanner.next(results);
        if (results.isEmpty()) {
            // Should not be possible
            return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
            // Handle any child views that exist
            TableViewFinder tableViewFinderResult = findChildViews(region, tenantId, table);
            if (tableViewFinderResult.hasViews()) {
                if (isCascade) {
                    if (tableViewFinderResult.allViewsInMultipleRegions()) {
                        // view metadata spans multiple regions
                        return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
                    } else if (tableViewFinderResult.allViewsInSingleRegion()) {
                        // Recursively delete views - safe as all the views as all in the same region
                        for (ViewInfo viewInfo : tableViewFinderResult.getViewInfoList()) {
                            byte[] viewTenantId = viewInfo.getTenantId();
                            byte[] viewSchemaName = viewInfo.getSchemaName();
                            byte[] viewName = viewInfo.getViewName();
                            byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
                            Delete delete = new Delete(viewKey, clientTimeStamp);
                            rowsToDelete.add(delete);
                            acquireLock(region, viewKey, locks);
                            MetaDataMutationResult result = doDropTable(viewKey, viewTenantId, viewSchemaName, viewName, null, PTableType.VIEW, rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false);
                            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                                return result;
                            }
                        }
                    }
                } else {
                    // DROP without CASCADE on tables with child views is not permitted
                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
            }
        }
        // Add to list of HTables to delete, unless it's a view or its a shared index
        if (tableType != PTableType.VIEW && table.getViewIndexId() == null) {
            tableNamesToDelete.add(table.getPhysicalName().getBytes());
        } else {
            sharedTablesToDelete.add(new SharedTableState(table));
        }
        invalidateList.add(cacheKey);
        byte[][] rowKeyMetaData = new byte[5][];
        do {
            Cell kv = results.get(LINK_TYPE_INDEX);
            int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData);
            if (nColumns == 5 && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0) {
                LinkType linkType = LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]);
                if (rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 && linkType == LinkType.INDEX_TABLE) {
                    indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
                } else if (linkType == LinkType.PARENT_TABLE || linkType == LinkType.PHYSICAL_TABLE) {
                    // delete parent->child link for views
                    Cell parentTenantIdCell = MetaDataUtil.getCell(results, PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES);
                    PName parentTenantId = parentTenantIdCell != null ? PNameFactory.newName(parentTenantIdCell.getValueArray(), parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) : null;
                    byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId, table.getParentSchemaName(), table.getParentTableName(), table.getTenantId(), table.getName());
                    Delete linkDelete = new Delete(linkKey, clientTimeStamp);
                    rowsToDelete.add(linkDelete);
                }
            }
            // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
            // FIXME: the version of the Delete constructor without the lock args was introduced
            // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
            // of the client.
            Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
            rowsToDelete.add(delete);
            results.clear();
            scanner.next(results);
        } while (!results.isEmpty());
    }
    // Recursively delete indexes
    for (byte[] indexName : indexNames) {
        byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName);
        // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
        // FIXME: the version of the Delete constructor without the lock args was introduced
        // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
        // of the client.
        Delete delete = new Delete(indexKey, clientTimeStamp);
        rowsToDelete.add(delete);
        acquireLock(region, indexKey, locks);
        MetaDataMutationResult result = doDropTable(indexKey, tenantId, schemaName, indexName, tableName, PTableType.INDEX, rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false);
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            return result;
        }
    }
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), table, tableNamesToDelete);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PName(org.apache.phoenix.schema.PName) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) LinkType(org.apache.phoenix.schema.PTable.LinkType) Cell(org.apache.hadoop.hbase.Cell)

Example 15 with PMetaDataEntity

use of org.apache.phoenix.schema.PMetaDataEntity in project phoenix by apache.

the class MetaDataEndpointImpl method buildTable.

private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException, SQLException {
    Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
    RegionScanner scanner = region.getScanner(scan);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    try {
        PTable oldTable = (PTable) metaDataCache.getIfPresent(cacheKey);
        long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP - 1 : oldTable.getTimeStamp();
        PTable newTable;
        boolean blockWriteRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
        newTable = getTable(scanner, clientTimeStamp, tableTimeStamp);
        if (newTable == null) {
            return null;
        }
        if (oldTable == null || tableTimeStamp < newTable.getTimeStamp() || (blockWriteRebuildIndex && newTable.getIndexDisableTimestamp() > 0)) {
            if (logger.isDebugEnabled()) {
                logger.debug("Caching table " + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), cacheKey.getLength()) + " at seqNum " + newTable.getSequenceNumber() + " with newer timestamp " + newTable.getTimeStamp() + " versus " + tableTimeStamp);
            }
            metaDataCache.put(cacheKey, newTable);
        }
        return newTable;
    } finally {
        scanner.close();
    }
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Scan(org.apache.hadoop.hbase.client.Scan) PTable(org.apache.phoenix.schema.PTable)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)27 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)27 Region (org.apache.hadoop.hbase.regionserver.Region)18 PTable (org.apache.phoenix.schema.PTable)14 ArrayList (java.util.ArrayList)11 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)10 Scan (org.apache.hadoop.hbase.client.Scan)9 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)9 Cell (org.apache.hadoop.hbase.Cell)7 Mutation (org.apache.hadoop.hbase.client.Mutation)7 PSmallint (org.apache.phoenix.schema.types.PSmallint)7 PTinyint (org.apache.phoenix.schema.types.PTinyint)7 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)5 PFunction (org.apache.phoenix.parse.PFunction)5 ByteString (com.google.protobuf.ByteString)4 PSchema (org.apache.phoenix.parse.PSchema)4 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 FunctionBytesPtr (org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)3 PName (org.apache.phoenix.schema.PName)3