Search in sources :

Example 36 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method commit.

public void commit() throws SQLException {
    Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> txMutations = Collections.emptyMap();
    int retryCount = 0;
    do {
        boolean sendSuccessful = false;
        boolean retryCommit = false;
        SQLException sqlE = null;
        try {
            send();
            txMutations = this.txMutations;
            sendSuccessful = true;
        } catch (SQLException e) {
            sqlE = e;
        } finally {
            try {
                if (txContext != null && isTransactionStarted()) {
                    TransactionFailureException txFailure = null;
                    boolean finishSuccessful = false;
                    try {
                        if (sendSuccessful) {
                            txContext.finish();
                            finishSuccessful = true;
                        }
                    } catch (TransactionFailureException e) {
                        if (logger.isInfoEnabled())
                            logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() + " with retry count of " + retryCount);
                        retryCommit = (e instanceof TransactionConflictException && retryCount < MAX_COMMIT_RETRIES);
                        txFailure = e;
                        SQLException nextE = TransactionUtil.getTransactionFailureException(e);
                        if (sqlE == null) {
                            sqlE = nextE;
                        } else {
                            sqlE.setNextException(nextE);
                        }
                    } finally {
                        // If send fails or finish fails, abort the tx
                        if (!finishSuccessful) {
                            try {
                                txContext.abort(txFailure);
                                if (logger.isInfoEnabled())
                                    logger.info("Abort successful");
                            } catch (TransactionFailureException e) {
                                if (logger.isInfoEnabled())
                                    logger.info("Abort failed with " + e);
                                SQLException nextE = TransactionUtil.getTransactionFailureException(e);
                                if (sqlE == null) {
                                    sqlE = nextE;
                                } else {
                                    sqlE.setNextException(nextE);
                                }
                            }
                        }
                    }
                }
            } finally {
                try {
                    resetState();
                } finally {
                    if (retryCommit) {
                        startTransaction();
                        // Add back read fences
                        Set<TableRef> txTableRefs = txMutations.keySet();
                        for (TableRef tableRef : txTableRefs) {
                            PTable dataTable = tableRef.getTable();
                            addDMLFence(dataTable);
                        }
                        try {
                            // Only retry if an index was added
                            retryCommit = shouldResubmitTransaction(txTableRefs);
                        } catch (SQLException e) {
                            retryCommit = false;
                            if (sqlE == null) {
                                sqlE = e;
                            } else {
                                sqlE.setNextException(e);
                            }
                        }
                    }
                    if (sqlE != null && !retryCommit) {
                        throw sqlE;
                    }
                }
            }
        }
        // Retry commit once if conflict occurred and index was added
        if (!retryCommit) {
            break;
        }
        retryCount++;
        mutations.putAll(txMutations);
    } while (true);
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) SQLException(java.sql.SQLException) TransactionConflictException(org.apache.tephra.TransactionConflictException) Map(java.util.Map) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef) PTable(org.apache.phoenix.schema.PTable)

Example 37 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class ScanPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    // Set any scan attributes before creating the scanner, as it will be too late afterwards
    scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
    ResultIterator scanner;
    TableRef tableRef = this.getTableRef();
    PTable table = tableRef.getTable();
    boolean isSalted = table.getBucketNum() != null;
    /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
         * limit is provided, run query serially.
         */
    boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
    Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
    boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
    /*
         * For queries that are doing a row key order by and are not possibly querying more than a
         * threshold worth of data, then we only need to initialize scanners corresponding to the
         * first (or last, if reverse) scan per region.
         */
    boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
    BaseResultIterators iterators;
    if (isOffsetOnServer) {
        iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
    } else if (isSerial) {
        iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
    } else {
        iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
    }
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    if (isOffsetOnServer) {
        scanner = new ConcatResultIterator(iterators);
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    } else if (isOrdered) {
        scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
    } else {
        if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
            /*
                 * For salted tables or local index, a merge sort is needed if: 
                 * 1) The config phoenix.query.force.rowkeyorder is set to true 
                 * 2) Or if the query has an order by that wants to sort
                 * the results by the row key (forward or reverse ordering)
                 */
            scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
        } else if (useRoundRobinIterator()) {
            /*
                 * For any kind of tables, round robin is possible if there is
                 * no ordering of rows needed.
                 */
            scanner = new RoundRobinResultIterator(iterators, this);
        } else {
            scanner = new ConcatResultIterator(iterators);
        }
        if (offset != null) {
            scanner = new OffsetResultIterator(scanner, offset);
        }
        if (limit != null) {
            scanner = new LimitingResultIterator(scanner, limit);
        }
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) ChunkedResultIterator(org.apache.phoenix.iterate.ChunkedResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RoundRobinResultIterator(org.apache.phoenix.iterate.RoundRobinResultIterator) PTable(org.apache.phoenix.schema.PTable) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) TableRef(org.apache.phoenix.schema.TableRef)

Example 38 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method joinMutationState.

private void joinMutationState(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> srcMutations, Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> dstMutations) {
    // Merge newMutation with this one, keeping state from newMutation for any overlaps
    for (Map.Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> entry : srcMutations.entrySet()) {
        // Replace existing entries for the table with new entries
        TableRef tableRef = entry.getKey();
        Map<ImmutableBytesPtr, RowMutationState> srcRows = entry.getValue();
        joinMutationState(tableRef, srcRows, dstMutations);
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Map(java.util.Map) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 39 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class WhereCompilerTest method testTenantConstraintsAddedToScanWithNullTenantTypeId.

@Test
public void testTenantConstraintsAddedToScanWithNullTenantTypeId() throws SQLException {
    String tenantId = "000000000000123";
    createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, " + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, id)) multi_tenant=true");
    createTestTable(getUrl(tenantId), "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST");
    String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(tenantId), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Filter filter = scan.getFilter();
    PTable table = plan.getTableRef().getTable();
    Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
    Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
    assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOp.EQUAL, aInteger, 0), constantComparison(CompareOp.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter);
    byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);
    assertArrayEquals(startRow, scan.getStartRow());
    byte[] stopRow = startRow;
    assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) RowKeyComparisonFilter(org.apache.phoenix.filter.RowKeyComparisonFilter) TestUtil.multiEncodedKVFilter(org.apache.phoenix.util.TestUtil.multiEncodedKVFilter) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Filter(org.apache.hadoop.hbase.filter.Filter) TestUtil.singleKVFilter(org.apache.phoenix.util.TestUtil.singleKVFilter) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) PTable(org.apache.phoenix.schema.PTable) TableRef(org.apache.phoenix.schema.TableRef) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 40 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MetaDataEndpointImpl method dropColumnsFromChildViews.

private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
    List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
    // are being added.
    for (Mutation m : tableMetadata) {
        if (m instanceof Delete) {
            byte[][] rkmd = new byte[5][];
            int pkCount = getVarChars(m.getRow(), rkmd);
            if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                columnDeletesForBaseTable.add((Delete) m);
            }
        }
    }
    for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
        short numColsDeleted = 0;
        byte[] viewTenantId = viewInfo.getTenantId();
        byte[] viewSchemaName = viewInfo.getSchemaName();
        byte[] viewName = viewInfo.getViewName();
        byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
        // lock the rows corresponding to views so that no other thread can modify the view
        // meta-data
        RowLock viewRowLock = acquireLock(region, viewKey, locks);
        PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
        ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
        int numCols = view.getColumns().size();
        int minDroppedColOrdinalPos = Integer.MAX_VALUE;
        for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
            PColumn existingViewColumn = null;
            byte[][] rkmd = new byte[5][];
            getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
            String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
            String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
            byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
            try {
                existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
            } catch (ColumnFamilyNotFoundException e) {
            // ignore since it means that the column family is not present for the column to
            // be added.
            } catch (ColumnNotFoundException e) {
            // ignore since it means the column is not present in the view
            }
            // it
            if (existingViewColumn != null && view.getViewStatement() != null) {
                ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
                PhoenixConnection conn = null;
                try {
                    conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                } catch (ClassNotFoundException e) {
                }
                PhoenixStatement statement = new PhoenixStatement(conn);
                TableRef baseTableRef = new TableRef(basePhysicalTable);
                ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
                StatementContext context = new StatementContext(statement, columnResolver);
                Expression whereExpression = WhereCompiler.compile(context, viewWhere);
                Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
                ColumnFinder columnFinder = new ColumnFinder(colExpression);
                whereExpression.accept(columnFinder);
                if (columnFinder.getColumnFound()) {
                    return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
                }
            }
            minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
            if (existingViewColumn != null) {
                --numColsDeleted;
                if (ordinalPositionList.size() == 0) {
                    ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
                    for (PColumn col : view.getColumns()) {
                        ordinalPositionList.addColumn(getColumnKey(viewKey, col));
                    }
                }
                ordinalPositionList.dropColumn(columnKey);
                Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
                mutationsForAddingColumnsToViews.add(viewColumnDelete);
                // drop any view indexes that need this column
                dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
            }
        }
        updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
    }
    return null;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) ParseNode(org.apache.phoenix.parse.ParseNode) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) SQLParser(org.apache.phoenix.parse.SQLParser) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnRef(org.apache.phoenix.schema.ColumnRef) TableRef(org.apache.phoenix.schema.TableRef)

Aggregations

TableRef (org.apache.phoenix.schema.TableRef)43 PTable (org.apache.phoenix.schema.PTable)30 PColumn (org.apache.phoenix.schema.PColumn)16 Expression (org.apache.phoenix.expression.Expression)14 SQLException (java.sql.SQLException)13 ColumnRef (org.apache.phoenix.schema.ColumnRef)13 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)12 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)12 Scan (org.apache.hadoop.hbase.client.Scan)11 ParseNode (org.apache.phoenix.parse.ParseNode)11 SelectStatement (org.apache.phoenix.parse.SelectStatement)10 ArrayList (java.util.ArrayList)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PTableRef (org.apache.phoenix.schema.PTableRef)8 List (java.util.List)7 Map (java.util.Map)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 Hint (org.apache.phoenix.parse.HintNode.Hint)7 Tuple (org.apache.phoenix.schema.tuple.Tuple)6 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5