Search in sources :

Example 11 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class PartialCommitIT method getConnectionWithTableOrderPreservingMutationState.

private PhoenixConnection getConnectionWithTableOrderPreservingMutationState() throws SQLException {
    Connection con = driver.connect(url, new Properties());
    PhoenixConnection phxCon = new PhoenixConnection(con.unwrap(PhoenixConnection.class));
    final Map<TableRef, Map<ImmutableBytesPtr, MutationState.RowMutationState>> mutations = Maps.newTreeMap(new TableRefComparator());
    // passing a null mutation state forces the connection.newMutationState() to be used to create the MutationState
    return new PhoenixConnection(phxCon, null) {

        @Override
        protected MutationState newMutationState(int maxSize, int maxSizeBytes) {
            return new MutationState(maxSize, maxSizeBytes, this, mutations, null, null);
        }

        ;
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) Map(java.util.Map) TableRef(org.apache.phoenix.schema.TableRef)

Example 12 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    if (statement.getHint().hasHint(Hint.SMALL)) {
        scan.setSmall(true);
    }
    PhoenixConnection connection = context.getConnection();
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // If we haven't resolved the time at the beginning of compilation, don't
            // force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
            scn = tableRef.getTimeStamp();
            if (scn == QueryConstants.UNSET_TIMESTAMP) {
                scn = HConstants.LATEST_TIMESTAMP;
            }
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes. 
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan);
    iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {

        @Override
        public void close() throws SQLException {
            try {
                super.close();
            } finally {
                SQLCloseables.closeAll(dependencies);
            }
        }
    };
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 13 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method validateAll.

/**
     * Validates that the meta data is valid against the server meta data if we haven't yet done so.
     * Otherwise, for every UPSERT VALUES call, we'd need to hit the server to see if the meta data
     * has changed.
     * @return the server time to use for the upsert
     * @throws SQLException if the table or any columns no longer exist
     */
private long[] validateAll() throws SQLException {
    int i = 0;
    long[] timeStamps = new long[this.mutations.size()];
    for (Map.Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> entry : mutations.entrySet()) {
        TableRef tableRef = entry.getKey();
        timeStamps[i++] = validate(tableRef, entry.getValue());
    }
    return timeStamps;
}
Also used : Map(java.util.Map) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 14 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method send.

@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
    int i = 0;
    long[] serverTimeStamps = null;
    boolean sendAll = false;
    if (tableRefIterator == null) {
        serverTimeStamps = validateAll();
        tableRefIterator = mutations.keySet().iterator();
        sendAll = true;
    }
    Map<ImmutableBytesPtr, RowMutationState> valuesMap;
    List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
    Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
    // add tracing for this operation
    try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
        Span span = trace.getSpan();
        ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
        boolean isTransactional;
        while (tableRefIterator.hasNext()) {
            // at this point we are going through mutations for each table
            final TableRef tableRef = tableRefIterator.next();
            valuesMap = mutations.get(tableRef);
            if (valuesMap == null || valuesMap.isEmpty()) {
                continue;
            }
            // Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
            long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
            final PTable table = tableRef.getTable();
            Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
            // build map from physical table to mutation list
            boolean isDataTable = true;
            while (mutationsIterator.hasNext()) {
                Pair<PName, List<Mutation>> pair = mutationsIterator.next();
                PName hTableName = pair.getFirst();
                List<Mutation> mutationList = pair.getSecond();
                TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
                List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
                if (oldMutationList != null)
                    mutationList.addAll(0, oldMutationList);
                isDataTable = false;
            }
            // committed in the event of a failure.
            if (table.isTransactional()) {
                addUncommittedStatementIndexes(valuesMap.values());
                if (txMutations.isEmpty()) {
                    txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
                }
                // Keep all mutations we've encountered until a commit or rollback.
                // This is not ideal, but there's not good way to get the values back
                // in the event that we need to replay the commit.
                // Copy TableRef so we have the original PTable and know when the
                // indexes have changed.
                joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
            }
        }
        long serverTimestamp = HConstants.LATEST_TIMESTAMP;
        Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
        while (mutationsIterator.hasNext()) {
            Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
            TableInfo tableInfo = pair.getKey();
            byte[] htableName = tableInfo.getHTableName().getBytes();
            List<Mutation> mutationList = pair.getValue();
            //create a span per target table
            //TODO maybe we can be smarter about the table name to string here?
            Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
            int retryCount = 0;
            boolean shouldRetry = false;
            do {
                TableRef origTableRef = tableInfo.getOrigTableRef();
                PTable table = origTableRef.getTable();
                table.getIndexMaintainers(indexMetaDataPtr, connection);
                final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
                // If we haven't retried yet, retry for this case only, as it's possible that
                // a split will occur after we send the index metadata cache to all known
                // region servers.
                shouldRetry = cache != null;
                SQLException sqlE = null;
                HTableInterface hTable = connection.getQueryServices().getTable(htableName);
                try {
                    if (table.isTransactional()) {
                        // Track tables to which we've sent uncommitted data
                        txTableRefs.add(origTableRef);
                        addDMLFence(table);
                        uncommittedPhysicalNames.add(table.getPhysicalName().getString());
                        // rollback
                        if (!table.getIndexes().isEmpty()) {
                            hTable = new MetaDataAwareHTable(hTable, origTableRef);
                        }
                        TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
                        // during a commit), as we don't need conflict detection for these.
                        if (tableInfo.isDataTable()) {
                            // Even for immutable, we need to do this so that an abort has the state
                            // necessary to generate the rows to delete.
                            addTransactionParticipant(txnAware);
                        } else {
                            txnAware.startTx(getTransaction());
                        }
                        hTable = txnAware;
                    }
                    long numMutations = mutationList.size();
                    GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
                    long startTime = System.currentTimeMillis();
                    child.addTimelineAnnotation("Attempt " + retryCount);
                    List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
                    for (List<Mutation> mutationBatch : mutationBatchList) {
                        hTable.batch(mutationBatch);
                        batchCount++;
                    }
                    if (logger.isDebugEnabled())
                        logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
                    child.stop();
                    child.stop();
                    shouldRetry = false;
                    long mutationCommitTime = System.currentTimeMillis() - startTime;
                    GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
                    long mutationSizeBytes = calculateMutationSize(mutationList);
                    MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
                    mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
                    if (tableInfo.isDataTable()) {
                        numRows -= numMutations;
                    }
                    // Remove batches as we process them
                    mutations.remove(origTableRef);
                } catch (Exception e) {
                    serverTimestamp = ServerUtil.parseServerTimestamp(e);
                    SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                    if (inferredE != null) {
                        if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                            // Swallow this exception once, as it's possible that we split after sending the index metadata
                            // and one of the region servers doesn't have it. This will cause it to have it the next go around.
                            // If it fails again, we don't retry.
                            String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
                            logger.warn(LogUtil.addCustomAnnotations(msg, connection));
                            connection.getQueryServices().clearTableRegionCache(htableName);
                            // add a new child span as this one failed
                            child.addTimelineAnnotation(msg);
                            child.stop();
                            child = Tracing.child(span, "Failed batch, attempting retry");
                            continue;
                        }
                        e = inferredE;
                    }
                    // Throw to client an exception that indicates the statements that
                    // were not committed successfully.
                    sqlE = new CommitException(e, getUncommittedStatementIndexes(), serverTimestamp);
                } finally {
                    try {
                        if (cache != null)
                            cache.close();
                    } finally {
                        try {
                            hTable.close();
                        } catch (IOException e) {
                            if (sqlE != null) {
                                sqlE.setNextException(ServerUtil.parseServerException(e));
                            } else {
                                sqlE = ServerUtil.parseServerException(e);
                            }
                        }
                        if (sqlE != null) {
                            throw sqlE;
                        }
                    }
                }
            } while (shouldRetry && retryCount++ < 1);
        }
    }
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) SQLException(java.sql.SQLException) MutationMetric(org.apache.phoenix.monitoring.MutationMetricQueue.MutationMetric) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Span(org.apache.htrace.Span) PTable(org.apache.phoenix.schema.PTable) Entry(java.util.Map.Entry) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) TraceScope(org.apache.htrace.TraceScope) IOException(java.io.IOException) TransactionFailureException(org.apache.tephra.TransactionFailureException) IllegalDataException(org.apache.phoenix.schema.IllegalDataException) TimeoutException(java.util.concurrent.TimeoutException) TransactionConflictException(org.apache.tephra.TransactionConflictException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) IOException(java.io.IOException) PName(org.apache.phoenix.schema.PName) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 15 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method addRowMutations.

private Iterator<Pair<PName, List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values, final long timestamp, boolean includeAllIndexes, final boolean sendAll) {
    final PTable table = tableRef.getTable();
    final // Only maintain tables with immutable rows through this client-side mechanism
    Iterator<PTable> indexes = // TODO: remove check for isWALDisabled once PHOENIX-3137 is fixed.
    includeAllIndexes || table.isWALDisabled() ? IndexMaintainer.nonDisabledIndexIterator(table.getIndexes().iterator()) : table.isImmutableRows() ? IndexMaintainer.enabledGlobalIndexIterator(table.getIndexes().iterator()) : Iterators.<PTable>emptyIterator();
    final List<Mutation> mutationList = Lists.newArrayListWithExpectedSize(values.size());
    final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
    generateMutations(tableRef, timestamp, values, mutationList, mutationsPertainingToIndex);
    return new Iterator<Pair<PName, List<Mutation>>>() {

        boolean isFirst = true;

        @Override
        public boolean hasNext() {
            return isFirst || indexes.hasNext();
        }

        @Override
        public Pair<PName, List<Mutation>> next() {
            if (isFirst) {
                isFirst = false;
                return new Pair<PName, List<Mutation>>(table.getPhysicalName(), mutationList);
            }
            PTable index = indexes.next();
            List<Mutation> indexMutations;
            try {
                indexMutations = IndexUtil.generateIndexData(table, index, values, mutationsPertainingToIndex, connection.getKeyValueBuilder(), connection);
                // we may also have to include delete mutations for immutable tables if we are not processing all the tables in the mutations map
                if (!sendAll) {
                    TableRef key = new TableRef(index);
                    Map<ImmutableBytesPtr, RowMutationState> rowToColumnMap = mutations.remove(key);
                    if (rowToColumnMap != null) {
                        final List<Mutation> deleteMutations = Lists.newArrayList();
                        generateMutations(tableRef, timestamp, rowToColumnMap, deleteMutations, null);
                        indexMutations.addAll(deleteMutations);
                    }
                }
            } catch (SQLException e) {
                throw new IllegalDataException(e);
            }
            return new Pair<PName, List<Mutation>>(index.getPhysicalName(), indexMutations);
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}
Also used : SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PName(org.apache.phoenix.schema.PName) Iterator(java.util.Iterator) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef) Pair(org.apache.hadoop.hbase.util.Pair) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Aggregations

TableRef (org.apache.phoenix.schema.TableRef)43 PTable (org.apache.phoenix.schema.PTable)30 PColumn (org.apache.phoenix.schema.PColumn)16 Expression (org.apache.phoenix.expression.Expression)14 SQLException (java.sql.SQLException)13 ColumnRef (org.apache.phoenix.schema.ColumnRef)13 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)12 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)12 Scan (org.apache.hadoop.hbase.client.Scan)11 ParseNode (org.apache.phoenix.parse.ParseNode)11 SelectStatement (org.apache.phoenix.parse.SelectStatement)10 ArrayList (java.util.ArrayList)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PTableRef (org.apache.phoenix.schema.PTableRef)8 List (java.util.List)7 Map (java.util.Map)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 Hint (org.apache.phoenix.parse.HintNode.Hint)7 Tuple (org.apache.phoenix.schema.tuple.Tuple)6 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5