Search in sources :

Example 31 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class BaseQueryPlan method serializeIndexMaintainerIntoScan.

private void serializeIndexMaintainerIntoScan(Scan scan, PTable dataTable) throws SQLException {
    PName name = context.getCurrentTable().getTable().getName();
    List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
    for (PTable index : dataTable.getIndexes()) {
        if (index.getName().equals(name) && index.getIndexType() == IndexType.LOCAL) {
            indexes.add(index);
            break;
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection());
    scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
    if (dataTable.isTransactional()) {
        scan.setAttribute(BaseScannerRegionObserver.TX_STATE, context.getConnection().getMutationState().encodeTransaction());
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PName(org.apache.phoenix.schema.PName) PTable(org.apache.phoenix.schema.PTable)

Example 32 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class PFunction method createFromProto.

public static PFunction createFromProto(org.apache.phoenix.coprocessor.generated.PFunctionProtos.PFunction function) {
    PName tenantId = null;
    if (function.hasTenantId()) {
        tenantId = PNameFactory.newName(function.getTenantId().toByteArray());
    }
    String functionName = function.getFunctionName();
    long timeStamp = function.getTimeStamp();
    String className = function.getClassname();
    String jarPath = function.getJarPath();
    String returnType = function.getReturnType();
    List<FunctionArgument> args = new ArrayList<FunctionArgument>(function.getArgumentsCount());
    for (PFunctionArg arg : function.getArgumentsList()) {
        String argType = arg.getArgumentType();
        boolean isArrayType = arg.hasIsArrayType() ? arg.getIsArrayType() : false;
        PDataType dataType = isArrayType ? PDataType.fromTypeId(PDataType.sqlArrayType(SchemaUtil.normalizeIdentifier(SchemaUtil.normalizeIdentifier(argType)))) : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(argType));
        boolean isConstant = arg.hasIsConstant() ? arg.getIsConstant() : false;
        String defaultValue = arg.hasDefaultValue() ? arg.getDefaultValue() : null;
        String minValue = arg.hasMinValue() ? arg.getMinValue() : null;
        String maxValue = arg.hasMaxValue() ? arg.getMaxValue() : null;
        args.add(new FunctionArgument(argType, isArrayType, isConstant, defaultValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(defaultValue))).getValue()), minValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(minValue))).getValue()), maxValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(maxValue))).getValue())));
    }
    return new PFunction(tenantId, functionName, args, returnType, className, jarPath, timeStamp, false, function.hasIsReplace() ? true : false);
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) PFunctionArg(org.apache.phoenix.coprocessor.generated.PFunctionProtos.PFunctionArg) PName(org.apache.phoenix.schema.PName) ArrayList(java.util.ArrayList)

Example 33 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class ProjectionCompiler method projectAllIndexColumns.

private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns, List<? extends PDatum> targetColumns) throws SQLException {
    ColumnResolver resolver = context.getResolver();
    PTable index = tableRef.getTable();
    int projectedOffset = projectedExpressions.size();
    PhoenixConnection conn = context.getConnection();
    PName tenantId = conn.getTenantId();
    String tableName = index.getParentName().getString();
    PTable dataTable = null;
    try {
        dataTable = conn.getTable(new PTableKey(tenantId, tableName));
    } catch (TableNotFoundException e) {
        if (tenantId != null) {
            // Check with null tenantId
            dataTable = conn.getTable(new PTableKey(null, tableName));
        } else {
            throw e;
        }
    }
    int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
    int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
    int minIndexPKOffset = getMinPKOffset(index, tenantId);
    if (index.getIndexType() != IndexType.LOCAL) {
        if (index.getColumns().size() - minIndexPKOffset != dataTable.getColumns().size() - minTablePKOffset) {
            // We'll end up not using this by the optimizer, so just throw
            String schemaNameStr = dataTable.getSchemaName() == null ? null : dataTable.getSchemaName().getString();
            String tableNameStr = dataTable.getTableName() == null ? null : dataTable.getTableName().getString();
            throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, WildcardParseNode.INSTANCE.toString());
        }
    }
    for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) {
        PColumn column = dataTable.getColumns().get(i);
        // Skip tenant ID column (which may not be the first column, but is the first PK column)
        if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) {
            tableOffset++;
            continue;
        }
        PColumn tableColumn = dataTable.getColumns().get(i);
        String indexColName = IndexUtil.getIndexColumnName(tableColumn);
        PColumn indexColumn = null;
        ColumnRef ref = null;
        try {
            indexColumn = index.getColumnForColumnName(indexColName);
            ref = new ColumnRef(tableRef, indexColumn.getPosition());
        } catch (ColumnNotFoundException e) {
            if (index.getIndexType() == IndexType.LOCAL) {
                try {
                    ref = new LocalIndexDataColumnRef(context, indexColName);
                    indexColumn = ref.getColumn();
                } catch (ColumnFamilyNotFoundException c) {
                    throw e;
                }
            } else {
                throw e;
            }
        }
        String colName = tableColumn.getName().getString();
        String tableAlias = tableRef.getTableAlias();
        if (resolveColumn) {
            try {
                if (tableAlias != null) {
                    ref = resolver.resolveColumn(null, tableAlias, indexColName);
                } else {
                    String schemaName = index.getSchemaName().getString();
                    ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
                }
            } catch (AmbiguousColumnException e) {
                if (indexColumn.getFamilyName() != null) {
                    ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName);
                } else {
                    throw e;
                }
            }
        }
        Expression expression = ref.newColumnExpression();
        expression = coerceIfNecessary(i - tableOffset + projectedOffset, targetColumns, expression);
        // We do not need to check if the column is a viewConstant, because view constants never
        // appear as a column in an index
        projectedExpressions.add(expression);
        boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
        ExpressionProjector projector = new ExpressionProjector(colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
        projectedColumns.add(projector);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) PTable(org.apache.phoenix.schema.PTable) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) PColumn(org.apache.phoenix.schema.PColumn) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) BaseTerminalExpression(org.apache.phoenix.expression.BaseTerminalExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) ColumnRef(org.apache.phoenix.schema.ColumnRef) LocalIndexDataColumnRef(org.apache.phoenix.schema.LocalIndexDataColumnRef) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) PTableKey(org.apache.phoenix.schema.PTableKey)

Example 34 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class MetaDataEndpointImpl method doDropTable.

private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName, byte[] tableName, byte[] parentTableName, PTableType tableType, List<Mutation> rowsToDelete, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete, boolean isCascade) throws IOException, SQLException {
    long clientTimeStamp = MetaDataUtil.getClientTimeStamp(rowsToDelete);
    Region region = env.getRegion();
    ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (table != null || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP)) != null) {
        if (table.getTimeStamp() < clientTimeStamp) {
            if (isTableDeleted(table) || tableType != table.getType()) {
                return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
        } else {
            return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
    }
    // there was a table, but it's been deleted. In either case we want to return.
    if (table == null) {
        if (buildDeletedTable(key, cacheKey, region, clientTimeStamp) != null) {
            return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
    }
    // Make sure we're not deleting the "wrong" child
    if (parentTableName != null && table.getParentTableName() != null && !Arrays.equals(parentTableName, table.getParentTableName().getBytes())) {
        return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
    }
    // Since we don't allow back in time DDL, we know if we have a table it's the one
    // we want to delete. FIXME: we shouldn't need a scan here, but should be able to
    // use the table to generate the Delete markers.
    Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
    List<byte[]> indexNames = Lists.newArrayList();
    List<Cell> results = Lists.newArrayList();
    try (RegionScanner scanner = region.getScanner(scan)) {
        scanner.next(results);
        if (results.isEmpty()) {
            // Should not be possible
            return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
            // Handle any child views that exist
            TableViewFinder tableViewFinderResult = findChildViews(region, tenantId, table);
            if (tableViewFinderResult.hasViews()) {
                if (isCascade) {
                    if (tableViewFinderResult.allViewsInMultipleRegions()) {
                        // view metadata spans multiple regions
                        return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
                    } else if (tableViewFinderResult.allViewsInSingleRegion()) {
                        // Recursively delete views - safe as all the views as all in the same region
                        for (ViewInfo viewInfo : tableViewFinderResult.getViewInfoList()) {
                            byte[] viewTenantId = viewInfo.getTenantId();
                            byte[] viewSchemaName = viewInfo.getSchemaName();
                            byte[] viewName = viewInfo.getViewName();
                            byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
                            Delete delete = new Delete(viewKey, clientTimeStamp);
                            rowsToDelete.add(delete);
                            acquireLock(region, viewKey, locks);
                            MetaDataMutationResult result = doDropTable(viewKey, viewTenantId, viewSchemaName, viewName, null, PTableType.VIEW, rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false);
                            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                                return result;
                            }
                        }
                    }
                } else {
                    // DROP without CASCADE on tables with child views is not permitted
                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
            }
        }
        // Add to list of HTables to delete, unless it's a view or its a shared index
        if (tableType != PTableType.VIEW && table.getViewIndexId() == null) {
            tableNamesToDelete.add(table.getPhysicalName().getBytes());
        } else {
            sharedTablesToDelete.add(new SharedTableState(table));
        }
        invalidateList.add(cacheKey);
        byte[][] rowKeyMetaData = new byte[5][];
        do {
            Cell kv = results.get(LINK_TYPE_INDEX);
            int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData);
            if (nColumns == 5 && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0) {
                LinkType linkType = LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]);
                if (rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 && linkType == LinkType.INDEX_TABLE) {
                    indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
                } else if (linkType == LinkType.PARENT_TABLE || linkType == LinkType.PHYSICAL_TABLE) {
                    // delete parent->child link for views
                    Cell parentTenantIdCell = MetaDataUtil.getCell(results, PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES);
                    PName parentTenantId = parentTenantIdCell != null ? PNameFactory.newName(parentTenantIdCell.getValueArray(), parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) : null;
                    byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId, table.getParentSchemaName(), table.getParentTableName(), table.getTenantId(), table.getName());
                    Delete linkDelete = new Delete(linkKey, clientTimeStamp);
                    rowsToDelete.add(linkDelete);
                }
            }
            // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
            // FIXME: the version of the Delete constructor without the lock args was introduced
            // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
            // of the client.
            Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
            rowsToDelete.add(delete);
            results.clear();
            scanner.next(results);
        } while (!results.isEmpty());
    }
    // Recursively delete indexes
    for (byte[] indexName : indexNames) {
        byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName);
        // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
        // FIXME: the version of the Delete constructor without the lock args was introduced
        // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
        // of the client.
        Delete delete = new Delete(indexKey, clientTimeStamp);
        rowsToDelete.add(delete);
        acquireLock(region, indexKey, locks);
        MetaDataMutationResult result = doDropTable(indexKey, tenantId, schemaName, indexName, tableName, PTableType.INDEX, rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false);
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            return result;
        }
    }
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), table, tableNamesToDelete);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PName(org.apache.phoenix.schema.PName) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) LinkType(org.apache.phoenix.schema.PTable.LinkType) Cell(org.apache.hadoop.hbase.Cell)

Example 35 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class SequenceManager method newSequenceReference.

public SequenceValueExpression newSequenceReference(SequenceValueParseNode node) throws SQLException {
    PName tenantName = statement.getConnection().getTenantId();
    String tenantId = tenantName == null ? null : tenantName.getString();
    TableName tableName = node.getTableName();
    if (tableName.getSchemaName() == null && statement.getConnection().getSchema() != null) {
        tableName = TableName.create(statement.getConnection().getSchema(), tableName.getTableName());
    }
    int nSaltBuckets = statement.getConnection().getQueryServices().getSequenceSaltBuckets();
    ParseNode numToAllocateNode = node.getNumToAllocateNode();
    long numToAllocate = determineNumToAllocate(tableName, numToAllocateNode);
    SequenceKey key = new SequenceKey(tenantId, tableName.getSchemaName(), tableName.getTableName(), nSaltBuckets);
    SequenceValueExpression expression = sequenceMap.get(key);
    if (expression == null) {
        int index = sequenceMap.size();
        expression = new SequenceValueExpression(key, node.getOp(), index, numToAllocate);
        sequenceMap.put(key, expression);
    } else if (expression.op != node.getOp() || expression.getNumToAllocate() < numToAllocate) {
        // Keep the maximum allocation size we see in a statement
        SequenceValueExpression oldExpression = expression;
        expression = new SequenceValueExpression(key, node.getOp(), expression.getIndex(), Math.max(expression.getNumToAllocate(), numToAllocate));
        if (oldExpression.getNumToAllocate() < numToAllocate) {
            // If we found a NEXT VALUE expression with a higher number to allocate
            // We override the original expression
            sequenceMap.put(key, expression);
        }
    }
    // If we see a NEXT and a CURRENT, treat the CURRENT just like a NEXT
    if (node.getOp() == Op.NEXT_VALUE) {
        isNextSequence.set(expression.getIndex());
    }
    return expression;
}
Also used : TableName(org.apache.phoenix.parse.TableName) SequenceKey(org.apache.phoenix.schema.SequenceKey) PName(org.apache.phoenix.schema.PName) ParseNode(org.apache.phoenix.parse.ParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode)

Aggregations

PName (org.apache.phoenix.schema.PName)45 PTable (org.apache.phoenix.schema.PTable)26 PColumn (org.apache.phoenix.schema.PColumn)18 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)13 Test (org.junit.Test)10 SQLException (java.sql.SQLException)9 Cell (org.apache.hadoop.hbase.Cell)9 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)9 PTableKey (org.apache.phoenix.schema.PTableKey)9 TableRef (org.apache.phoenix.schema.TableRef)8 PSmallint (org.apache.phoenix.schema.types.PSmallint)8 Connection (java.sql.Connection)7 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 PTinyint (org.apache.phoenix.schema.types.PTinyint)7 List (java.util.List)6 Mutation (org.apache.hadoop.hbase.client.Mutation)6 Scan (org.apache.hadoop.hbase.client.Scan)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)6 IOException (java.io.IOException)5