Search in sources :

Example 1 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class MetaTableAccessor method multiMutate.

/**
   * Performs an atomic multi-mutate operation against the given table.
   */
// Used by the RSGroup Coprocessor Endpoint. It had a copy/paste of the below. Need to reveal
// this facility for CPEP use or at least those CPEPs that are on their way to becoming part of
// core as is the intent for RSGroup eventually.
public static void multiMutate(Connection connection, final Table table, byte[] row, final List<Mutation> mutations) throws IOException {
    if (METALOG.isDebugEnabled()) {
        METALOG.debug(mutationsToString(mutations));
    }
    // TODO: Need rollback!!!!
    // TODO: Need Retry!!!
    // TODO: What for a timeout? Default write timeout? GET FROM HTABLE?
    // TODO: Review when we come through with ProcedureV2.
    RegionServerCallable<MutateRowsResponse, MultiRowMutationProtos.MultiRowMutationService.BlockingInterface> callable = new RegionServerCallable<MutateRowsResponse, MultiRowMutationProtos.MultiRowMutationService.BlockingInterface>(connection, table.getName(), row, null) {

        /*RpcController not used in this CPEP!*/
        @Override
        protected MutateRowsResponse rpcCall() throws Exception {
            final MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
            for (Mutation mutation : mutations) {
                if (mutation instanceof Put) {
                    builder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
                } else if (mutation instanceof Delete) {
                    builder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
                } else {
                    throw new DoNotRetryIOException("multi in MetaEditor doesn't support " + mutation.getClass().getName());
                }
            }
            // The call to #prepare that ran before this invocation will have populated HRegionLocation.
            HRegionLocation hrl = getLocation();
            RegionSpecifier region = ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hrl.getRegionInfo().getRegionName());
            builder.setRegion(region);
            // that makes com.google.protobuf.RpcController and then copy into it configs.
            return getStub().mutateRows(null, builder.build());
        }

        @Override
        protected // Called on the end of the super.prepare call. Set the stub.
        void setStubByServiceName(ServerName serviceName) throws /*Ignored*/
        IOException {
            CoprocessorRpcChannel channel = table.coprocessorService(getRow());
            setStub(MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel));
        }
    };
    int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
    // The region location should be cached in connection. Call prepare so this callable picks
    // up the region location (see super.prepare method).
    callable.prepare(false);
    callable.call(writeTimeout);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationProtos(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos) Put(org.apache.hadoop.hbase.client.Put) RegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) RegionServerCallable(org.apache.hadoop.hbase.client.RegionServerCallable) MutateRowsResponse(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) MutateRowsRequest(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 2 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class RSGroupInfoManagerImpl method flushConfigTable.

private synchronized Map<TableName, String> flushConfigTable(Map<String, RSGroupInfo> groupMap) throws IOException {
    Map<TableName, String> newTableMap = Maps.newHashMap();
    List<Mutation> mutations = Lists.newArrayList();
    // populate deletes
    for (String groupName : prevRSGroups) {
        if (!groupMap.containsKey(groupName)) {
            Delete d = new Delete(Bytes.toBytes(groupName));
            mutations.add(d);
        }
    }
    // populate puts
    for (RSGroupInfo RSGroupInfo : groupMap.values()) {
        RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
        Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
        p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
        mutations.add(p);
        for (TableName entry : RSGroupInfo.getTables()) {
            newTableMap.put(entry, RSGroupInfo.getName());
        }
    }
    if (mutations.size() > 0) {
        multiMutate(mutations);
    }
    return newTableMap;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) RSGroupProtos(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Example 3 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class RSGroupInfoManagerImpl method multiMutate.

private void multiMutate(List<Mutation> mutations) throws IOException {
    CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
    MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
    for (Mutation mutation : mutations) {
        if (mutation instanceof Put) {
            mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.PUT, mutation));
        } else if (mutation instanceof Delete) {
            mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.DELETE, mutation));
        } else {
            throw new DoNotRetryIOException("multiMutate doesn't support " + mutation.getClass().getName());
        }
    }
    MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service = MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
    try {
        service.mutateRows(null, mmrBuilder.build());
    } catch (ServiceException ex) {
        ProtobufUtil.toIOException(ex);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ServiceException(com.google.protobuf.ServiceException) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Example 4 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class ReplicationSink method replicateEntries.

/**
   * Replicate this array of entries directly into the local cluster using the native client. Only
   * operates against raw protobuf type saving on a conversion from pb to pojo.
   * @param entries
   * @param cells
   * @param replicationClusterId Id which will uniquely identify source cluster FS client
   *          configurations in the replication configuration directory
   * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
   *          directory
   * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
   * @throws IOException If failed to replicate the data
   */
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
    if (entries.isEmpty())
        return;
    if (cells == null)
        throw new NullPointerException("TODO: Add handling of null CellScanner");
    // to the same table.
    try {
        long totalReplicated = 0;
        // Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
        // invocation of this method per table and cluster id.
        Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
        // Map of table name Vs list of pair of family and list of hfile paths from its namespace
        Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
        for (WALEntry entry : entries) {
            TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
            Cell previousCell = null;
            Mutation m = null;
            int count = entry.getAssociatedCellCount();
            for (int i = 0; i < count; i++) {
                // Throw index out of bounds if our cell count is off
                if (!cells.advance()) {
                    throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
                }
                Cell cell = cells.current();
                // Handle bulk load hfiles replication
                if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
                    if (bulkLoadHFileMap == null) {
                        bulkLoadHFileMap = new HashMap<>();
                    }
                    buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
                } else {
                    // Handle wal replication
                    if (isNewRowOrType(previousCell, cell)) {
                        // Create new mutation
                        m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                        List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
                        for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
                            clusterIds.add(toUUID(clusterId));
                        }
                        m.setClusterIds(clusterIds);
                        addToHashMultiMap(rowMap, table, clusterIds, m);
                    }
                    if (CellUtil.isDelete(cell)) {
                        ((Delete) m).addDeleteMarker(cell);
                    } else {
                        ((Put) m).add(cell);
                    }
                    previousCell = cell;
                }
            }
            totalReplicated++;
        }
        // TODO Replicating mutations and bulk loaded data can be made parallel
        if (!rowMap.isEmpty()) {
            LOG.debug("Started replicating mutations.");
            for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
                batch(entry.getKey(), entry.getValue().values());
            }
            LOG.debug("Finished replicating mutations.");
        }
        if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
            LOG.debug("Started replicating bulk loaded data.");
            HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
            hFileReplicator.replicate();
            LOG.debug("Finished replicating bulk loaded data.");
        }
        int size = entries.size();
        this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
        this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
        this.totalReplicatedEdits.addAndGet(totalReplicated);
    } catch (IOException ex) {
        LOG.error("Unable to accept edit because:", ex);
        throw ex;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) Row(org.apache.hadoop.hbase.client.Row) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Mutation(org.apache.hadoop.hbase.client.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 5 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class WALSplitter method getMutationsFromWALEntry.

/**
   * This function is used to construct mutations from a WALEntry. It also
   * reconstructs WALKey &amp; WALEdit from the passed in WALEntry
   * @param entry
   * @param cells
   * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
   *          extracted from the passed in WALEntry.
   * @return list of Pair&lt;MutationType, Mutation&gt; to be replayed
   * @throws IOException
   */
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
    if (entry == null) {
        // return an empty array
        return new ArrayList<>();
    }
    long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
    int count = entry.getAssociatedCellCount();
    List<MutationReplay> mutations = new ArrayList<>();
    Cell previousCell = null;
    Mutation m = null;
    WALKey key = null;
    WALEdit val = null;
    if (logEntry != null)
        val = new WALEdit();
    for (int i = 0; i < count; i++) {
        // Throw index out of bounds if our cell count is off
        if (!cells.advance()) {
            throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
        }
        Cell cell = cells.current();
        if (val != null)
            val.add(cell);
        boolean isNewRowOrType = previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(previousCell, cell);
        if (isNewRowOrType) {
            // Create new mutation
            if (CellUtil.isDelete(cell)) {
                m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Deletes don't have nonces.
                mutations.add(new MutationReplay(MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
            } else {
                m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Puts might come from increment or append, thus we need nonces.
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                mutations.add(new MutationReplay(MutationType.PUT, m, nonceGroup, nonce));
            }
        }
        if (CellUtil.isDelete(cell)) {
            ((Delete) m).addDeleteMarker(cell);
        } else {
            ((Put) m).add(cell);
        }
        m.setDurability(durability);
        previousCell = cell;
    }
    // reconstruct WALKey
    if (logEntry != null) {
        org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey();
        List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
        for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
            clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
        }
        key = new WALKey(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
        logEntry.setFirst(key);
        logEntry.setSecond(val);
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Mutation(org.apache.hadoop.hbase.client.Mutation) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12