Search in sources :

Example 6 with PSchema

use of org.apache.phoenix.parse.PSchema in project phoenix by apache.

the class MetaDataEndpointImpl method getSchema.

private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) throws IOException, SQLException {
    List<Cell> results = Lists.newArrayList();
    scanner.next(results);
    if (results.isEmpty()) {
        return null;
    }
    Cell keyValue = results.get(0);
    byte[] keyBuffer = keyValue.getRowArray();
    int keyLength = keyValue.getRowLength();
    int keyOffset = keyValue.getRowOffset();
    PName tenantId = newPName(keyBuffer, keyOffset, keyLength);
    int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length;
    if (tenantIdLength == 0) {
        tenantId = null;
    }
    PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1);
    long timeStamp = keyValue.getTimestamp();
    return new PSchema(schemaName.getString(), timeStamp);
}
Also used : PName(org.apache.phoenix.schema.PName) PSchema(org.apache.phoenix.parse.PSchema) Cell(org.apache.hadoop.hbase.Cell) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint)

Example 7 with PSchema

use of org.apache.phoenix.parse.PSchema in project phoenix by apache.

the class ParallelIteratorsSplitTest method getSplits.

private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
    final List<TableRef> tableRefs = Collections.singletonList(tableRef);
    ColumnResolver resolver = new ColumnResolver() {

        @Override
        public List<PFunction> getFunctions() {
            return Collections.emptyList();
        }

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            return null;
        }

        @Override
        public List<PSchema> getSchemas() {
            return null;
        }
    };
    PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    final PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
    context.setScanRanges(scanRanges);
    ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {

        private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
            return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return ExplainPlan.EMPTY_PLAN;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator() throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public long getEstimatedSize() {
            return 0;
        }

        @Override
        public Set<TableRef> getSourceRefs() {
            return tableRefs;
        }

        @Override
        public TableRef getTableRef() {
            return tableRef;
        }

        @Override
        public RowProjector getProjector() {
            return RowProjector.EMPTY_PROJECTOR;
        }

        @Override
        public Integer getLimit() {
            return null;
        }

        @Override
        public Integer getOffset() {
            return null;
        }

        @Override
        public OrderBy getOrderBy() {
            return OrderBy.EMPTY_ORDER_BY;
        }

        @Override
        public GroupBy getGroupBy() {
            return GroupBy.EMPTY_GROUP_BY;
        }

        @Override
        public List<KeyRange> getSplits() {
            return null;
        }

        @Override
        public FilterableStatement getStatement() {
            return SelectStatement.SELECT_ONE;
        }

        @Override
        public boolean isDegenerate() {
            return false;
        }

        @Override
        public boolean isRowKeyOrdered() {
            return true;
        }

        @Override
        public List<List<Scan>> getScans() {
            return null;
        }

        @Override
        public Operation getOperation() {
            return Operation.QUERY;
        }

        @Override
        public boolean useRoundRobinIterator() {
            return false;
        }

        @Override
        public Long getEstimatedRowsToScan() {
            return null;
        }

        @Override
        public Long getEstimatedBytesToScan() {
            return null;
        }
    }, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
    List<KeyRange> keyRanges = parallelIterators.getSplits();
    return keyRanges;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) SequenceManager(org.apache.phoenix.compile.SequenceManager) StatementContext(org.apache.phoenix.compile.StatementContext) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) List(java.util.List) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) OrderBy(org.apache.phoenix.compile.OrderByCompiler.OrderBy) ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) GroupBy(org.apache.phoenix.compile.GroupByCompiler.GroupBy) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) ParallelScanGrouper(org.apache.phoenix.iterate.ParallelScanGrouper) RowProjector(org.apache.phoenix.compile.RowProjector) Scan(org.apache.hadoop.hbase.client.Scan) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) PhoenixParameterMetaData(org.apache.phoenix.jdbc.PhoenixParameterMetaData) ExplainPlan(org.apache.phoenix.compile.ExplainPlan)

Example 8 with PSchema

use of org.apache.phoenix.parse.PSchema in project phoenix by apache.

the class MetaDataClient method dropSchema.

public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PSchema schema = new PSchema(executableDropSchemaStatement.getSchemaName());
        String schemaName = schema.getSchemaName();
        boolean ifExists = executableDropSchemaStatement.ifExists();
        byte[] key = SchemaUtil.getSchemaKey(schemaName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> schemaMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete schemaDelete = new Delete(key, clientTimeStamp);
        schemaMetaData.add(schemaDelete);
        MetaDataMutationResult result = connection.getQueryServices().dropSchema(schemaMetaData, schemaName);
        MutationCode code = result.getMutationCode();
        schema = result.getSchema();
        switch(code) {
            case SCHEMA_NOT_FOUND:
                if (!ifExists) {
                    throw new SchemaNotFoundException(schemaName);
                }
                break;
            case NEWER_SCHEMA_FOUND:
                throw new NewerSchemaAlreadyExistsException(schemaName);
            case TABLES_EXIST_ON_SCHEMA:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA).setSchemaName(schemaName).build().buildException();
            default:
                connection.removeSchema(schema, result.getMutationTime());
                break;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PSchema(org.apache.phoenix.parse.PSchema) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 9 with PSchema

use of org.apache.phoenix.parse.PSchema in project phoenix by apache.

the class MetaDataClient method updateCache.

public MetaDataMutationResult updateCache(String schemaName, boolean alwaysHitServer) throws SQLException {
    long clientTimeStamp = getClientTimeStamp();
    PSchema schema = null;
    try {
        schema = connection.getMetaDataCache().getSchema(new PTableKey(null, schemaName));
        if (schema != null && !alwaysHitServer) {
            return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, QueryConstants.UNSET_TIMESTAMP);
        }
    } catch (SchemaNotFoundException e) {
    }
    MetaDataMutationResult result;
    result = connection.getQueryServices().getSchema(schemaName, clientTimeStamp);
    return result;
}
Also used : PSchema(org.apache.phoenix.parse.PSchema) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 10 with PSchema

use of org.apache.phoenix.parse.PSchema in project phoenix by apache.

the class MetaDataEndpointImpl method buildDeletedSchema.

private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }
    Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    List<Cell> results = Lists.<Cell>newArrayList();
    try (RegionScanner scanner = region.getScanner(scan)) {
        scanner.next(results);
    }
    // HBase ignores the time range on a raw scan (HBASE-7362)
    if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
        Cell kv = results.get(0);
        if (kv.getTypeByte() == Type.Delete.getCode()) {
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            PSchema schema = newDeletedSchemaMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, schema);
            return schema;
        }
    }
    return null;
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PSchema(org.apache.phoenix.parse.PSchema) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

PSchema (org.apache.phoenix.parse.PSchema)13 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)6 Scan (org.apache.hadoop.hbase.client.Scan)5 Region (org.apache.hadoop.hbase.regionserver.Region)4 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)4 SQLException (java.sql.SQLException)3 Cell (org.apache.hadoop.hbase.Cell)3 Mutation (org.apache.hadoop.hbase.client.Mutation)3 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)3 MutationState (org.apache.phoenix.execute.MutationState)3 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)3 ByteString (com.google.protobuf.ByteString)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)2 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)2 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)2 ResultIterator (org.apache.phoenix.iterate.ResultIterator)2 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)2 PFunction (org.apache.phoenix.parse.PFunction)2