Search in sources :

Example 6 with SequenceKey

use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropSequence.

@Override
public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.dropSequence(timestamp);
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        try {
            Result result = htable.append(append);
            return sequence.dropSequence(result);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result)

Example 7 with SequenceKey

use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.

the class MetaDataEndpointImpl method createTable.

@Override
public void createTable(RpcController controller, CreateTableRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[][] rowKeyMetaData = new byte[3][];
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        byte[] parentSchemaName = null;
        byte[] parentTableName = null;
        PTableType tableType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
        byte[] parentTableKey = null;
        Mutation viewPhysicalTableRow = null;
        if (tableType == PTableType.VIEW) {
            byte[][] parentSchemaTableNames = new byte[2][];
            /*
                 * For a view, we lock the base physical table row. For a mapped view, there is 
                 * no link present to the physical table. So the viewPhysicalTableRow is null
                 * in that case.
                 */
            viewPhysicalTableRow = getPhysicalTableForView(tableMetadata, parentSchemaTableNames);
            parentSchemaName = parentSchemaTableNames[0];
            parentTableName = parentSchemaTableNames[1];
            if (parentTableName != null) {
                parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentSchemaName, parentTableName);
            }
        } else if (tableType == PTableType.INDEX) {
            parentSchemaName = schemaName;
            /* 
                 * For an index we lock the parent table's row which could be a physical table or a view.
                 * If the parent table is a physical table, then the tenantIdBytes is empty because
                 * we allow creating an index with a tenant connection only if the parent table is a view.
                 */
            parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
            parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName);
        }
        Region region = env.getRegion();
        List<RowLock> locks = Lists.newArrayList();
        // Place a lock using key for the table to be created
        byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        try {
            acquireLock(region, tableKey, locks);
            // If the table key resides outside the region, return without doing anything
            MetaDataMutationResult result = checkTableKeyInRegion(tableKey, region);
            if (result != null) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            ImmutableBytesPtr parentCacheKey = null;
            PTable parentTable = null;
            if (parentTableName != null) {
                // Check if the parent table resides in the same region. If not, don't worry about locking the parent table row
                // or loading the parent table. For a view, the parent table that needs to be locked is the base physical table.
                // For an index on view, the view header row needs to be locked. 
                result = checkTableKeyInRegion(parentTableKey, region);
                if (result == null) {
                    acquireLock(region, parentTableKey, locks);
                    parentCacheKey = new ImmutableBytesPtr(parentTableKey);
                    parentTable = loadTable(env, parentTableKey, parentCacheKey, clientTimeStamp, clientTimeStamp);
                    if (parentTable == null || isTableDeleted(parentTable)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    long parentTableSeqNumber;
                    if (tableType == PTableType.VIEW && viewPhysicalTableRow != null && request.hasClientVersion()) {
                        // Starting 4.5, the client passes the sequence number of the physical table in the table metadata.
                        parentTableSeqNumber = MetaDataUtil.getSequenceNumber(viewPhysicalTableRow);
                    } else if (tableType == PTableType.VIEW && !request.hasClientVersion()) {
                        // Before 4.5, due to a bug, the parent table key wasn't available.
                        // So don't do anything and prevent the exception from being thrown.
                        parentTableSeqNumber = parentTable.getSequenceNumber();
                    } else {
                        parentTableSeqNumber = MetaDataUtil.getParentSequenceNumber(tableMetadata);
                    }
                    // If parent table isn't at the expected sequence number, then return
                    if (parentTable.getSequenceNumber() != parentTableSeqNumber) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.CONCURRENT_TABLE_MUTATION);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setTable(PTableImpl.toProto(parentTable));
                        done.run(builder.build());
                        return;
                    }
                }
            }
            // Load child table next
            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
            // Get as of latest timestamp so we can detect if we have a newer table that already
            // exists without making an additional query
            PTable table = loadTable(env, tableKey, cacheKey, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
            if (table != null) {
                if (table.getTimeStamp() < clientTimeStamp) {
                    // continue
                    if (!isTableDeleted(table)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setTable(PTableImpl.toProto(table));
                        done.run(builder.build());
                        return;
                    }
                } else {
                    builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    builder.setTable(PTableImpl.toProto(table));
                    done.run(builder.build());
                    return;
                }
            }
            // sends over depending on its base physical table.
            if (tableType != PTableType.VIEW) {
                UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp);
            }
            // tableMetadata and set the view statement and partition column correctly
            if (parentTable != null && parentTable.getAutoPartitionSeqName() != null) {
                long autoPartitionNum = 1;
                try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                    Statement stmt = connection.createStatement()) {
                    String seqName = parentTable.getAutoPartitionSeqName();
                    // Not going through the standard route of using statement.execute() as that code path
                    // is blocked if the metadata hasn't been been upgraded to the new minor release.
                    String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s", seqName);
                    PhoenixStatement ps = stmt.unwrap(PhoenixStatement.class);
                    QueryPlan plan = ps.compileQuery(seqNextValueSql);
                    ResultIterator resultIterator = plan.iterator();
                    PhoenixResultSet rs = ps.newResultSet(resultIterator, plan.getProjector(), plan.getContext());
                    rs.next();
                    autoPartitionNum = rs.getLong(1);
                } catch (SequenceNotFoundException e) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                PColumn autoPartitionCol = parentTable.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable));
                if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                builder.setAutoPartitionNum(autoPartitionNum);
                // set the VIEW STATEMENT column of the header row
                Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
                NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
                List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                Cell cell = cells.get(0);
                String autoPartitionWhere = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum);
                String hbaseVersion = VersionInfo.getVersion();
                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
                KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion);
                MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr);
                byte[] value = ptr.copyBytesIfNecessary();
                byte[] viewStatement = null;
                // if we have an existing where clause add the auto partition where clause to it
                if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) {
                    viewStatement = Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere));
                } else {
                    viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
                }
                Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
                cells.add(viewStatementCell);
                // set the IS_VIEW_REFERENCED column of the auto partition column row
                Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
                familyCellMap = autoPartitionPut.getFamilyCellMap();
                cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                cell = cells.get(0);
                PDataType dataType = autoPartitionCol.getDataType();
                Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                byte[] bytes = new byte[dataType.getByteSize() + 1];
                dataType.toBytes(val, bytes, 0);
                Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
                cells.add(viewConstantCell);
            }
            Short indexId = null;
            if (request.hasAllocateIndexId() && request.getAllocateIndexId()) {
                String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes);
                try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class)) {
                    PName physicalName = parentTable.getPhysicalName();
                    int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets();
                    SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, physicalName, nSequenceSaltBuckets, parentTable.isNamespaceMapped());
                    // TODO Review Earlier sequence was created at (SCN-1/LATEST_TIMESTAMP) and incremented at the client max(SCN,dataTable.getTimestamp), but it seems we should
                    // use always LATEST_TIMESTAMP to avoid seeing wrong sequence values by different connection having SCN
                    // or not. 
                    long sequenceTimestamp = HConstants.LATEST_TIMESTAMP;
                    try {
                        connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp);
                    } catch (SequenceAlreadyExistsException e) {
                    }
                    long[] seqValues = new long[1];
                    SQLException[] sqlExceptions = new SQLException[1];
                    connection.getQueryServices().incrementSequences(Collections.singletonList(new SequenceAllocation(key, 1)), HConstants.LATEST_TIMESTAMP, seqValues, sqlExceptions);
                    if (sqlExceptions[0] != null) {
                        throw sqlExceptions[0];
                    }
                    long seqValue = seqValues[0];
                    if (seqValue > Short.MAX_VALUE) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
                    NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
                    List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                    Cell cell = cells.get(0);
                    PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                    Object val = dataType.toObject(seqValue, PLong.INSTANCE);
                    byte[] bytes = new byte[dataType.getByteSize() + 1];
                    dataType.toBytes(val, bytes, 0);
                    Cell indexIdCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_INDEX_ID_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
                    cells.add(indexIdCell);
                    indexId = (short) seqValue;
                }
            }
            // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
            // system table. Basically, we get all the locks that we don't already hold for all the
            // tableMetadata rows. This ensures we don't have deadlock situations (ensuring
            // primary and then index table locks are held, in that order). For now, we just don't support
            // indexing on the system table. This is an issue because of the way we manage batch mutation
            // in the Indexer.
            region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            // Invalidate the cache - the next getTable call will add it
            // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            if (parentCacheKey != null) {
                metaDataCache.invalidate(parentCacheKey);
            }
            metaDataCache.invalidate(cacheKey);
            // Get timeStamp from mutations - the above method sets it if it's unset
            long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
            if (indexId != null) {
                builder.setViewIndexId(indexId);
            }
            builder.setMutationTime(currentTimeStamp);
            done.run(builder.build());
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("createTable failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) KeyValue(org.apache.hadoop.hbase.KeyValue) SQLException(java.sql.SQLException) SequenceAlreadyExistsException(org.apache.phoenix.schema.SequenceAlreadyExistsException) ByteString(com.google.protobuf.ByteString) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) PDataType(org.apache.phoenix.schema.types.PDataType) SequenceKey(org.apache.phoenix.schema.SequenceKey) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) SequenceNotFoundException(org.apache.phoenix.schema.SequenceNotFoundException) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Statement(java.sql.Statement) PTableType(org.apache.phoenix.schema.PTableType) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) GenericKeyValueBuilder(org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) Put(org.apache.hadoop.hbase.client.Put) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PName(org.apache.phoenix.schema.PName) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 8 with SequenceKey

use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.

the class SequenceManager method validateSequences.

public void validateSequences(Sequence.ValueOp action) throws SQLException {
    if (sequenceMap.isEmpty()) {
        return;
    }
    int maxSize = sequenceMap.size();
    long[] dstSequenceValues = new long[maxSize];
    sequencePosition = new int[maxSize];
    nextSequences = Lists.newArrayListWithExpectedSize(maxSize);
    currentSequences = Lists.newArrayListWithExpectedSize(maxSize);
    for (Map.Entry<SequenceKey, SequenceValueExpression> entry : sequenceMap.entrySet()) {
        if (isNextSequence.get(entry.getValue().getIndex())) {
            nextSequences.add(new SequenceAllocation(entry.getKey(), entry.getValue().getNumToAllocate()));
        } else {
            currentSequences.add(entry.getKey());
        }
    }
    long[] srcSequenceValues = new long[nextSequences.size()];
    SQLException[] sqlExceptions = new SQLException[nextSequences.size()];
    // Sort the next sequences to prevent deadlocks
    Collections.sort(nextSequences);
    // Create reverse indexes
    for (int i = 0; i < nextSequences.size(); i++) {
        sequencePosition[i] = sequenceMap.get(nextSequences.get(i)).getIndex();
    }
    int offset = nextSequences.size();
    for (int i = 0; i < currentSequences.size(); i++) {
        sequencePosition[i + offset] = sequenceMap.get(currentSequences.get(i)).getIndex();
    }
    ConnectionQueryServices services = this.statement.getConnection().getQueryServices();
    Long scn = statement.getConnection().getSCN();
    long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
    services.validateSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions, action);
    setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions);
}
Also used : SequenceKey(org.apache.phoenix.schema.SequenceKey) SQLException(java.sql.SQLException) PLong(org.apache.phoenix.schema.types.PLong) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) Map(java.util.Map) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 9 with SequenceKey

use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.

the class SequenceManager method newSequenceReference.

public SequenceValueExpression newSequenceReference(SequenceValueParseNode node) throws SQLException {
    PName tenantName = statement.getConnection().getTenantId();
    String tenantId = tenantName == null ? null : tenantName.getString();
    TableName tableName = node.getTableName();
    if (tableName.getSchemaName() == null && statement.getConnection().getSchema() != null) {
        tableName = TableName.create(statement.getConnection().getSchema(), tableName.getTableName());
    }
    int nSaltBuckets = statement.getConnection().getQueryServices().getSequenceSaltBuckets();
    ParseNode numToAllocateNode = node.getNumToAllocateNode();
    long numToAllocate = determineNumToAllocate(tableName, numToAllocateNode);
    SequenceKey key = new SequenceKey(tenantId, tableName.getSchemaName(), tableName.getTableName(), nSaltBuckets);
    SequenceValueExpression expression = sequenceMap.get(key);
    if (expression == null) {
        int index = sequenceMap.size();
        expression = new SequenceValueExpression(key, node.getOp(), index, numToAllocate);
        sequenceMap.put(key, expression);
    } else if (expression.op != node.getOp() || expression.getNumToAllocate() < numToAllocate) {
        // Keep the maximum allocation size we see in a statement
        SequenceValueExpression oldExpression = expression;
        expression = new SequenceValueExpression(key, node.getOp(), expression.getIndex(), Math.max(expression.getNumToAllocate(), numToAllocate));
        if (oldExpression.getNumToAllocate() < numToAllocate) {
            // If we found a NEXT VALUE expression with a higher number to allocate
            // We override the original expression
            sequenceMap.put(key, expression);
        }
    }
    // If we see a NEXT and a CURRENT, treat the CURRENT just like a NEXT
    if (node.getOp() == Op.NEXT_VALUE) {
        isNextSequence.set(expression.getIndex());
    }
    return expression;
}
Also used : TableName(org.apache.phoenix.parse.TableName) SequenceKey(org.apache.phoenix.schema.SequenceKey) PName(org.apache.phoenix.schema.PName) ParseNode(org.apache.phoenix.parse.ParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode)

Aggregations

SequenceKey (org.apache.phoenix.schema.SequenceKey)9 SQLException (java.sql.SQLException)5 IOException (java.io.IOException)4 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)4 Result (org.apache.hadoop.hbase.client.Result)4 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)4 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)4 Sequence (org.apache.phoenix.schema.Sequence)4 SequenceAllocation (org.apache.phoenix.schema.SequenceAllocation)4 Append (org.apache.hadoop.hbase.client.Append)3 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)3 PTinyint (org.apache.phoenix.schema.types.PTinyint)3 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)2 EmptySequenceCacheException (org.apache.phoenix.schema.EmptySequenceCacheException)2 PName (org.apache.phoenix.schema.PName)2 SequenceInfo (org.apache.phoenix.schema.SequenceInfo)2 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)2 ByteString (com.google.protobuf.ByteString)1 Statement (java.sql.Statement)1 ArrayList (java.util.ArrayList)1