Search in sources :

Example 36 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataUtil method getPutOnlyAutoPartitionColumn.

public static Put getPutOnlyAutoPartitionColumn(PTable parentTable, List<Mutation> tableMetaData) {
    int autoPartitionPutIndex = parentTable.isMultiTenant() ? 2 : 1;
    int i = 0;
    for (Mutation m : tableMetaData) {
        if (m instanceof Put && i++ == autoPartitionPutIndex) {
            return (Put) m;
        }
    }
    throw new IllegalStateException("No auto partition column row found in table metadata");
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Put(org.apache.hadoop.hbase.client.Put)

Example 37 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataUtil method getTenantIdAndFunctionName.

public static void getTenantIdAndFunctionName(List<Mutation> functionMetadata, byte[][] rowKeyMetaData) {
    Mutation m = getTableHeaderRow(functionMetadata);
    getVarChars(m.getRow(), 2, rowKeyMetaData);
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 38 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MutationState method send.

@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
    int i = 0;
    long[] serverTimeStamps = null;
    boolean sendAll = false;
    if (tableRefIterator == null) {
        serverTimeStamps = validateAll();
        tableRefIterator = mutations.keySet().iterator();
        sendAll = true;
    }
    Map<ImmutableBytesPtr, RowMutationState> valuesMap;
    List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
    Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
    // add tracing for this operation
    try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
        Span span = trace.getSpan();
        ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
        boolean isTransactional;
        while (tableRefIterator.hasNext()) {
            // at this point we are going through mutations for each table
            final TableRef tableRef = tableRefIterator.next();
            valuesMap = mutations.get(tableRef);
            if (valuesMap == null || valuesMap.isEmpty()) {
                continue;
            }
            // Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
            long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
            final PTable table = tableRef.getTable();
            Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
            // build map from physical table to mutation list
            boolean isDataTable = true;
            while (mutationsIterator.hasNext()) {
                Pair<PName, List<Mutation>> pair = mutationsIterator.next();
                PName hTableName = pair.getFirst();
                List<Mutation> mutationList = pair.getSecond();
                TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
                List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
                if (oldMutationList != null)
                    mutationList.addAll(0, oldMutationList);
                isDataTable = false;
            }
            // committed in the event of a failure.
            if (table.isTransactional()) {
                addUncommittedStatementIndexes(valuesMap.values());
                if (txMutations.isEmpty()) {
                    txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
                }
                // Keep all mutations we've encountered until a commit or rollback.
                // This is not ideal, but there's not good way to get the values back
                // in the event that we need to replay the commit.
                // Copy TableRef so we have the original PTable and know when the
                // indexes have changed.
                joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
            }
        }
        long serverTimestamp = HConstants.LATEST_TIMESTAMP;
        Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
        while (mutationsIterator.hasNext()) {
            Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
            TableInfo tableInfo = pair.getKey();
            byte[] htableName = tableInfo.getHTableName().getBytes();
            List<Mutation> mutationList = pair.getValue();
            //create a span per target table
            //TODO maybe we can be smarter about the table name to string here?
            Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
            int retryCount = 0;
            boolean shouldRetry = false;
            do {
                TableRef origTableRef = tableInfo.getOrigTableRef();
                PTable table = origTableRef.getTable();
                table.getIndexMaintainers(indexMetaDataPtr, connection);
                final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
                // If we haven't retried yet, retry for this case only, as it's possible that
                // a split will occur after we send the index metadata cache to all known
                // region servers.
                shouldRetry = cache != null;
                SQLException sqlE = null;
                HTableInterface hTable = connection.getQueryServices().getTable(htableName);
                try {
                    if (table.isTransactional()) {
                        // Track tables to which we've sent uncommitted data
                        txTableRefs.add(origTableRef);
                        addDMLFence(table);
                        uncommittedPhysicalNames.add(table.getPhysicalName().getString());
                        // rollback
                        if (!table.getIndexes().isEmpty()) {
                            hTable = new MetaDataAwareHTable(hTable, origTableRef);
                        }
                        TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
                        // during a commit), as we don't need conflict detection for these.
                        if (tableInfo.isDataTable()) {
                            // Even for immutable, we need to do this so that an abort has the state
                            // necessary to generate the rows to delete.
                            addTransactionParticipant(txnAware);
                        } else {
                            txnAware.startTx(getTransaction());
                        }
                        hTable = txnAware;
                    }
                    long numMutations = mutationList.size();
                    GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
                    long startTime = System.currentTimeMillis();
                    child.addTimelineAnnotation("Attempt " + retryCount);
                    List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
                    for (List<Mutation> mutationBatch : mutationBatchList) {
                        hTable.batch(mutationBatch);
                        batchCount++;
                    }
                    if (logger.isDebugEnabled())
                        logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
                    child.stop();
                    child.stop();
                    shouldRetry = false;
                    long mutationCommitTime = System.currentTimeMillis() - startTime;
                    GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
                    long mutationSizeBytes = calculateMutationSize(mutationList);
                    MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
                    mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
                    if (tableInfo.isDataTable()) {
                        numRows -= numMutations;
                    }
                    // Remove batches as we process them
                    mutations.remove(origTableRef);
                } catch (Exception e) {
                    serverTimestamp = ServerUtil.parseServerTimestamp(e);
                    SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                    if (inferredE != null) {
                        if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                            // Swallow this exception once, as it's possible that we split after sending the index metadata
                            // and one of the region servers doesn't have it. This will cause it to have it the next go around.
                            // If it fails again, we don't retry.
                            String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
                            logger.warn(LogUtil.addCustomAnnotations(msg, connection));
                            connection.getQueryServices().clearTableRegionCache(htableName);
                            // add a new child span as this one failed
                            child.addTimelineAnnotation(msg);
                            child.stop();
                            child = Tracing.child(span, "Failed batch, attempting retry");
                            continue;
                        }
                        e = inferredE;
                    }
                    // Throw to client an exception that indicates the statements that
                    // were not committed successfully.
                    sqlE = new CommitException(e, getUncommittedStatementIndexes(), serverTimestamp);
                } finally {
                    try {
                        if (cache != null)
                            cache.close();
                    } finally {
                        try {
                            hTable.close();
                        } catch (IOException e) {
                            if (sqlE != null) {
                                sqlE.setNextException(ServerUtil.parseServerException(e));
                            } else {
                                sqlE = ServerUtil.parseServerException(e);
                            }
                        }
                        if (sqlE != null) {
                            throw sqlE;
                        }
                    }
                }
            } while (shouldRetry && retryCount++ < 1);
        }
    }
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) SQLException(java.sql.SQLException) MutationMetric(org.apache.phoenix.monitoring.MutationMetricQueue.MutationMetric) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Span(org.apache.htrace.Span) PTable(org.apache.phoenix.schema.PTable) Entry(java.util.Map.Entry) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) TraceScope(org.apache.htrace.TraceScope) IOException(java.io.IOException) TransactionFailureException(org.apache.tephra.TransactionFailureException) IllegalDataException(org.apache.phoenix.schema.IllegalDataException) TimeoutException(java.util.concurrent.TimeoutException) TransactionConflictException(org.apache.tephra.TransactionConflictException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) IOException(java.io.IOException) PName(org.apache.phoenix.schema.PName) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 39 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MutationState method addRowMutations.

private Iterator<Pair<PName, List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values, final long timestamp, boolean includeAllIndexes, final boolean sendAll) {
    final PTable table = tableRef.getTable();
    final // Only maintain tables with immutable rows through this client-side mechanism
    Iterator<PTable> indexes = // TODO: remove check for isWALDisabled once PHOENIX-3137 is fixed.
    includeAllIndexes || table.isWALDisabled() ? IndexMaintainer.nonDisabledIndexIterator(table.getIndexes().iterator()) : table.isImmutableRows() ? IndexMaintainer.enabledGlobalIndexIterator(table.getIndexes().iterator()) : Iterators.<PTable>emptyIterator();
    final List<Mutation> mutationList = Lists.newArrayListWithExpectedSize(values.size());
    final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
    generateMutations(tableRef, timestamp, values, mutationList, mutationsPertainingToIndex);
    return new Iterator<Pair<PName, List<Mutation>>>() {

        boolean isFirst = true;

        @Override
        public boolean hasNext() {
            return isFirst || indexes.hasNext();
        }

        @Override
        public Pair<PName, List<Mutation>> next() {
            if (isFirst) {
                isFirst = false;
                return new Pair<PName, List<Mutation>>(table.getPhysicalName(), mutationList);
            }
            PTable index = indexes.next();
            List<Mutation> indexMutations;
            try {
                indexMutations = IndexUtil.generateIndexData(table, index, values, mutationsPertainingToIndex, connection.getKeyValueBuilder(), connection);
                // we may also have to include delete mutations for immutable tables if we are not processing all the tables in the mutations map
                if (!sendAll) {
                    TableRef key = new TableRef(index);
                    Map<ImmutableBytesPtr, RowMutationState> rowToColumnMap = mutations.remove(key);
                    if (rowToColumnMap != null) {
                        final List<Mutation> deleteMutations = Lists.newArrayList();
                        generateMutations(tableRef, timestamp, rowToColumnMap, deleteMutations, null);
                        indexMutations.addAll(deleteMutations);
                    }
                }
            } catch (SQLException e) {
                throw new IllegalDataException(e);
            }
            return new Pair<PName, List<Mutation>>(index.getPhysicalName(), indexMutations);
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}
Also used : SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PName(org.apache.phoenix.schema.PName) Iterator(java.util.Iterator) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef) Pair(org.apache.hadoop.hbase.util.Pair) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Example 40 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MutationState method getMutationBatchList.

/**
     * Split the list of mutations into multiple lists that don't exceed row and byte thresholds
     * @param allMutationList List of HBase mutations
     * @return List of lists of mutations
     */
public static List<List<Mutation>> getMutationBatchList(long batchSize, long batchSizeBytes, List<Mutation> allMutationList) {
    List<List<Mutation>> mutationBatchList = Lists.newArrayList();
    List<Mutation> currentList = Lists.newArrayList();
    long currentBatchSizeBytes = 0L;
    for (Mutation mutation : allMutationList) {
        long mutationSizeBytes = KeyValueUtil.calculateMutationDiskSize(mutation);
        if (currentList.size() == batchSize || currentBatchSizeBytes + mutationSizeBytes > batchSizeBytes) {
            if (currentList.size() > 0) {
                mutationBatchList.add(currentList);
                currentList = Lists.newArrayList();
                currentBatchSizeBytes = 0L;
            }
        }
        currentList.add(mutation);
        currentBatchSizeBytes += mutationSizeBytes;
    }
    if (currentList.size() > 0) {
        mutationBatchList.add(currentList);
    }
    return mutationBatchList;
}
Also used : List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12