Search in sources :

Example 1 with PhoenixTransactionContext

use of org.apache.phoenix.transaction.PhoenixTransactionContext in project phoenix by apache.

the class FlappingTransactionIT method testExternalTxContext.

@Test
public void testExternalTxContext() throws Exception {
    ResultSet rs;
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    String fullTableName = generateUniqueName();
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    Statement stmt = conn.createStatement();
    stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
    HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
    stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
    conn.commit();
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
    }
    PhoenixTransactionContext txContext = TransactionFactory.getTransactionProvider().getTransactionContext(pconn);
    PhoenixTransactionalTable txTable = TransactionFactory.getTransactionProvider().getTransactionalTable(txContext, htable);
    txContext.begin();
    // Use HBase APIs to add a new row
    Put put = new Put(Bytes.toBytes("z"));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
    txTable.put(put);
    // Use Phoenix APIs to add new row (sharing the transaction context)
    pconn.setTransactionContext(txContext);
    conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
    // New connection should not see data as it hasn't been committed yet
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
    }
    // Use new connection to create a row with a conflict
    Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
    connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
    // Existing connection should see data even though it hasn't been committed yet
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    // Use TM APIs directly to finish (i.e. commit) the transaction
    txContext.commit();
    // Confirm that attempt to commit row with conflict fails
    try {
        connWithConflict.commit();
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
    }
    // New connection should now see data as it has been committed
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(3, rs.getInt(1));
    }
    // Repeat the same as above, but this time abort the transaction
    txContext = TransactionFactory.getTransactionProvider().getTransactionContext(pconn);
    txTable = TransactionFactory.getTransactionProvider().getTransactionalTable(txContext, htable);
    txContext.begin();
    // Use HBase APIs to add a new row
    put = new Put(Bytes.toBytes("j"));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
    txTable.put(put);
    // Use Phoenix APIs to add new row (sharing the transaction context)
    pconn.setTransactionContext(txContext);
    conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
    // Existing connection should see data even though it hasn't been committed yet
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(5, rs.getInt(1));
    connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
    rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    // Use TM APIs directly to abort (i.e. rollback) the transaction
    txContext.abort();
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    // Should succeed since conflicting row was aborted
    connWithConflict.commit();
    // New connection should now see data as it has been committed
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(4, rs.getInt(1));
    }
    // Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
    // written to hide it.
    Result result = htable.get(new Get(Bytes.toBytes("j")));
    assertTrue(result.isEmpty());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) PhoenixTransactionalTable(org.apache.phoenix.transaction.PhoenixTransactionalTable) Get(org.apache.hadoop.hbase.client.Get) ResultSet(java.sql.ResultSet) PhoenixTransactionContext(org.apache.phoenix.transaction.PhoenixTransactionContext) Test(org.junit.Test)

Example 2 with PhoenixTransactionContext

use of org.apache.phoenix.transaction.PhoenixTransactionContext in project phoenix by apache.

the class NonAggregateRegionScannerFactory method getRegionScanner.

@Override
public RegionScanner getRegionScanner(final Scan scan, final RegionScanner s) throws Throwable {
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        Region region = getRegion();
        offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offset);
    }
    byte[] scanOffsetBytes = scan.getAttribute(BaseScannerRegionObserver.SCAN_OFFSET);
    Integer scanOffset = null;
    if (scanOffsetBytes != null) {
        scanOffset = (Integer) PInteger.INSTANCE.toObject(scanOffsetBytes);
    }
    RegionScanner innerScanner = s;
    Set<KeyValueColumnExpression> arrayKVRefs = Sets.newHashSet();
    Expression[] arrayFuncRefs = deserializeArrayPostionalExpressionInfoFromScan(scan, innerScanner, arrayKVRefs);
    TupleProjector tupleProjector = null;
    Region dataRegion = null;
    IndexMaintainer indexMaintainer = null;
    byte[][] viewConstants = null;
    PhoenixTransactionContext tx = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    if (dataColumns != null) {
        tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
        dataRegion = env.getRegion();
        boolean useProto = false;
        byte[] localIndexBytes = scan.getAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO);
        useProto = localIndexBytes != null;
        if (localIndexBytes == null) {
            localIndexBytes = scan.getAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD);
        }
        List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
        indexMaintainer = indexMaintainers.get(0);
        viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
        tx = MutationState.decodeTransaction(txState);
    }
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(getMinMaxQualifiersFromScan(scan)) && scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
    // setting dataRegion in case of a non-coprocessor environment
    if (dataRegion == null && env.getConfiguration().get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY) != null) {
        dataRegion = env.getRegion();
    }
    innerScanner = getWrappedScanner(env, innerScanner, arrayKVRefs, arrayFuncRefs, offset, scan, dataColumns, tupleProjector, dataRegion, indexMaintainer, tx, viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr, useQualifierAsIndex);
    final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan);
    if (j != null) {
        innerScanner = new HashJoinRegionScanner(innerScanner, p, j, tenantId, env, useQualifierAsIndex, useNewValueColumnQualifier);
    }
    if (scanOffset != null) {
        innerScanner = getOffsetScanner(innerScanner, new OffsetResultIterator(new RegionScannerResultIterator(innerScanner, getMinMaxQualifiersFromScan(scan), encodingScheme), scanOffset), scan.getAttribute(QueryConstants.LAST_SCAN) != null);
    }
    final OrderedResultIterator iterator = deserializeFromScan(scan, innerScanner);
    if (iterator == null) {
        return innerScanner;
    }
    // TODO:the above wrapped scanner should be used here also
    return getTopNScanner(env, innerScanner, iterator, tenantId);
}
Also used : TupleProjector(org.apache.phoenix.execute.TupleProjector) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PInteger(org.apache.phoenix.schema.types.PInteger) BaseRegionScanner(org.apache.phoenix.coprocessor.BaseRegionScanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) HashJoinRegionScanner(org.apache.phoenix.coprocessor.HashJoinRegionScanner) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) OrderByExpression(org.apache.phoenix.expression.OrderByExpression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) Region(org.apache.hadoop.hbase.regionserver.Region) HashJoinRegionScanner(org.apache.phoenix.coprocessor.HashJoinRegionScanner) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) PhoenixTransactionContext(org.apache.phoenix.transaction.PhoenixTransactionContext) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 3 with PhoenixTransactionContext

use of org.apache.phoenix.transaction.PhoenixTransactionContext in project phoenix by apache.

the class PhoenixIndexMetaDataBuilder method getIndexMetaDataCache.

private static IndexMetaDataCache getIndexMetaDataCache(RegionCoprocessorEnvironment env, Map<String, byte[]> attributes) throws IOException {
    if (attributes == null) {
        return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE;
    }
    byte[] uuid = attributes.get(PhoenixIndexCodec.INDEX_UUID);
    if (uuid == null) {
        return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE;
    }
    byte[] md = attributes.get(PhoenixIndexCodec.INDEX_PROTO_MD);
    if (md == null) {
        md = attributes.get(PhoenixIndexCodec.INDEX_MD);
    }
    if (md != null) {
        boolean useProto = md != null;
        byte[] txState = attributes.get(BaseScannerRegionObserver.TX_STATE);
        final List<IndexMaintainer> indexMaintainers = IndexMaintainer.deserialize(md, useProto);
        final PhoenixTransactionContext txnContext = TransactionFactory.getTransactionProvider().getTransactionContext(txState);
        byte[] clientVersionBytes = attributes.get(PhoenixIndexCodec.CLIENT_VERSION);
        final int clientVersion = clientVersionBytes == null ? IndexMetaDataCache.UNKNOWN_CLIENT_VERSION : Bytes.toInt(clientVersionBytes);
        return new IndexMetaDataCache() {

            @Override
            public void close() throws IOException {
            }

            @Override
            public List<IndexMaintainer> getIndexMaintainers() {
                return indexMaintainers;
            }

            @Override
            public PhoenixTransactionContext getTransactionContext() {
                return txnContext;
            }

            @Override
            public int getClientVersion() {
                return clientVersion;
            }
        };
    } else {
        byte[] tenantIdBytes = attributes.get(PhoenixRuntime.TENANT_ID_ATTRIB);
        ImmutableBytesPtr tenantId = tenantIdBytes == null ? null : new ImmutableBytesPtr(tenantIdBytes);
        TenantCache cache = GlobalCache.getTenantCache(env, tenantId);
        IndexMetaDataCache indexCache = (IndexMetaDataCache) cache.getServerCache(new ImmutableBytesPtr(uuid));
        if (indexCache == null) {
            String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + "host=" + env.getRegionServerServices().getServerName();
            SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND).setMessage(msg).build().buildException();
            // will not return
            ServerUtil.throwIOException("Index update failed", e);
        }
        return indexCache;
    }
}
Also used : IndexMetaDataCache(org.apache.phoenix.cache.IndexMetaDataCache) TenantCache(org.apache.phoenix.cache.TenantCache) SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PhoenixTransactionContext(org.apache.phoenix.transaction.PhoenixTransactionContext) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 4 with PhoenixTransactionContext

use of org.apache.phoenix.transaction.PhoenixTransactionContext in project phoenix by apache.

the class IndexMetaDataCacheFactory method newCache.

@Override
public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, final MemoryChunk chunk, boolean useProtoForIndexMaintainer, final int clientVersion) throws SQLException {
    // just use the standard keyvalue builder - this doesn't really need to be fast
    final List<IndexMaintainer> maintainers = IndexMaintainer.deserialize(cachePtr, GenericKeyValueBuilder.INSTANCE, useProtoForIndexMaintainer);
    final PhoenixTransactionContext txnContext;
    try {
        txnContext = txState.length != 0 ? TransactionFactory.getTransactionProvider().getTransactionContext(txState) : null;
    } catch (IOException e) {
        throw new SQLException(e);
    }
    return new IndexMetaDataCache() {

        @Override
        public void close() throws IOException {
            chunk.close();
        }

        @Override
        public List<IndexMaintainer> getIndexMaintainers() {
            return maintainers;
        }

        @Override
        public PhoenixTransactionContext getTransactionContext() {
            return txnContext;
        }

        @Override
        public int getClientVersion() {
            return clientVersion;
        }
    };
}
Also used : IndexMetaDataCache(org.apache.phoenix.cache.IndexMetaDataCache) SQLException(java.sql.SQLException) IOException(java.io.IOException) PhoenixTransactionContext(org.apache.phoenix.transaction.PhoenixTransactionContext)

Example 5 with PhoenixTransactionContext

use of org.apache.phoenix.transaction.PhoenixTransactionContext in project phoenix by apache.

the class PhoenixTransactionalIndexer method preBatchMutate.

@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
    Mutation m = miniBatchOp.getOperation(0);
    if (!codec.isEnabled(m)) {
        super.preBatchMutate(c, miniBatchOp);
        return;
    }
    PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaDataBuilder(c.getEnvironment()).getIndexMetaData(miniBatchOp);
    if (indexMetaData.getClientVersion() >= PhoenixDatabaseMetaData.MIN_TX_CLIENT_SIDE_MAINTENANCE && !indexMetaData.hasLocalIndexes()) {
        // Still generate index updates server side for local indexes
        super.preBatchMutate(c, miniBatchOp);
        return;
    }
    BatchMutateContext context = new BatchMutateContext();
    setBatchMutateContext(c, context);
    Collection<Pair<Mutation, byte[]>> indexUpdates = null;
    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }
        RegionCoprocessorEnvironment env = c.getEnvironment();
        PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext();
        if (txnContext == null) {
            throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
        }
        PhoenixTxIndexMutationGenerator generator = new PhoenixTxIndexMutationGenerator(env.getConfiguration(), indexMetaData, env.getRegionInfo().getTable().getName(), env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey());
        try (HTableInterface htable = env.getTable(env.getRegionInfo().getTable())) {
            // get the index updates for all elements in this batch
            indexUpdates = generator.getIndexUpdates(htable, getMutationIterator(miniBatchOp));
        }
        byte[] tableName = c.getEnvironment().getRegion().getTableDesc().getTableName().getName();
        Iterator<Pair<Mutation, byte[]>> indexUpdatesItr = indexUpdates.iterator();
        List<Mutation> localUpdates = new ArrayList<Mutation>(indexUpdates.size());
        while (indexUpdatesItr.hasNext()) {
            Pair<Mutation, byte[]> next = indexUpdatesItr.next();
            if (Bytes.compareTo(next.getSecond(), tableName) == 0) {
                // These mutations will not go through the preDelete hooks, so we
                // must manually convert them here.
                Mutation mutation = TransactionUtil.convertIfDelete(next.getFirst());
                localUpdates.add(mutation);
                indexUpdatesItr.remove();
            }
        }
        if (!localUpdates.isEmpty()) {
            miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()]));
        }
        if (!indexUpdates.isEmpty()) {
            context.indexUpdates = indexUpdates;
        }
        current.addTimelineAnnotation("Built index updates, doing preStep");
        TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size());
    } catch (Throwable t) {
        String msg = "Failed to update index with entries:" + indexUpdates;
        LOG.error(msg, t);
        ServerUtil.throwIOException(msg, t);
    }
}
Also used : TraceScope(org.apache.htrace.TraceScope) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Span(org.apache.htrace.Span) NullSpan(org.apache.phoenix.trace.util.NullSpan) PhoenixTxIndexMutationGenerator(org.apache.phoenix.execute.PhoenixTxIndexMutationGenerator) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) DelegateRegionCoprocessorEnvironment(org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment) Mutation(org.apache.hadoop.hbase.client.Mutation) PhoenixTransactionContext(org.apache.phoenix.transaction.PhoenixTransactionContext) Pair(org.apache.hadoop.hbase.util.Pair)

Aggregations

PhoenixTransactionContext (org.apache.phoenix.transaction.PhoenixTransactionContext)5 SQLException (java.sql.SQLException)3 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)2 IndexMetaDataCache (org.apache.phoenix.cache.IndexMetaDataCache)2 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)2 IOException (java.io.IOException)1 Connection (java.sql.Connection)1 PreparedStatement (java.sql.PreparedStatement)1 ResultSet (java.sql.ResultSet)1 Statement (java.sql.Statement)1 ArrayList (java.util.ArrayList)1 Properties (java.util.Properties)1 Get (org.apache.hadoop.hbase.client.Get)1 Mutation (org.apache.hadoop.hbase.client.Mutation)1 Put (org.apache.hadoop.hbase.client.Put)1 Result (org.apache.hadoop.hbase.client.Result)1 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)1 Region (org.apache.hadoop.hbase.regionserver.Region)1 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)1 Pair (org.apache.hadoop.hbase.util.Pair)1