Search in sources :

Example 81 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class TestCoprocessorMetrics method testRegionObserverEndpoint.

@Test
public void testRegionObserverEndpoint() throws IOException, ServiceException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
        Admin admin = connection.getAdmin()) {
        admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionEndpoint.class.getName()));
        try (Table table = connection.getTable(tableName)) {
            List<Mutation> mutations = Lists.newArrayList(new Put(foo), new Put(bar));
            MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
            for (Mutation mutation : mutations) {
                mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
            }
            CoprocessorRpcChannel channel = table.coprocessorService(bar);
            MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
            MutateRowsRequest mrm = mrmBuilder.build();
            service.mutateRows(null, mrm);
        }
    }
    // Find out the MetricRegistry used by the CP using the global registries
    MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor(CustomRegionEndpoint.class.getName());
    Optional<MetricRegistry> registry = MetricRegistries.global().get(info);
    assertTrue(registry.isPresent());
    Optional<Metric> metric = registry.get().get("EndpointExecution");
    assertTrue(metric.isPresent());
    Timer endpointExecutions = (Timer) metric.get();
    assertEquals(1, endpointExecutions.getHistogram().getCount());
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationService(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) MetricRegistry(org.apache.hadoop.hbase.metrics.MetricRegistry) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) MutateRowsRequest(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) Timer(org.apache.hadoop.hbase.metrics.Timer) Metric(org.apache.hadoop.hbase.metrics.Metric) Mutation(org.apache.hadoop.hbase.client.Mutation) MetricRegistryInfo(org.apache.hadoop.hbase.metrics.MetricRegistryInfo) Test(org.junit.Test)

Example 82 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class MetaTableAccessor method mergeRegions.

/**
   * Merge the two regions into one in an atomic operation. Deletes the two
   * merging regions in hbase:meta and adds the merged region with the information of
   * two merging regions.
   * @param connection connection we're using
   * @param mergedRegion the merged region
   * @param regionA
   * @param regionB
   * @param sn the location of the region
   * @param masterSystemTime
   * @param saveBarrier true if need save replication barrier in meta, used for serial replication
   * @throws IOException
   */
public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName sn, int regionReplication, long masterSystemTime, boolean saveBarrier) throws IOException {
    Table meta = getMetaHTable(connection);
    try {
        HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
        // use the maximum of what master passed us vs local time.
        long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
        // Put for parent
        Put putOfMerged = makePutFromRegionInfo(copyOfMerged, time);
        putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
        putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
        // Deletes for merging regions
        Delete deleteA = makeDeleteFromRegionInfo(regionA, time);
        Delete deleteB = makeDeleteFromRegionInfo(regionB, time);
        // The merged is a new region, openSeqNum = 1 is fine.
        addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId());
        // be cached whenever the primary region is looked up from meta
        for (int i = 1; i < regionReplication; i++) {
            addEmptyLocation(putOfMerged, i);
        }
        byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString() + HConstants.DELIMITER);
        Mutation[] mutations;
        if (saveBarrier) {
            Put putBarrierA = makeDaughterPut(regionA.getEncodedNameAsBytes(), mergedRegion.getEncodedNameAsBytes());
            Put putBarrierB = makeDaughterPut(regionB.getEncodedNameAsBytes(), mergedRegion.getEncodedNameAsBytes());
            Put putDaughter = makeParentPut(mergedRegion.getEncodedNameAsBytes(), Bytes.toBytes(regionA.getEncodedName() + "," + regionB.getEncodedName()));
            mutations = new Mutation[] { putOfMerged, deleteA, deleteB, putBarrierA, putBarrierB, putDaughter };
        } else {
            mutations = new Mutation[] { putOfMerged, deleteA, deleteB };
        }
        multiMutate(connection, meta, tableRow, mutations);
    } finally {
        meta.close();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Example 83 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class MetaTableAccessor method splitRegion.

/**
   * Splits the region into two in an atomic operation. Offlines the parent
   * region with the information that it is split into two, and also adds
   * the daughter regions. Does not add the location information to the daughter
   * regions since they are not open yet.
   * @param connection connection we're using
   * @param parent the parent region which is split
   * @param splitA Split daughter region A
   * @param splitB Split daughter region A
   * @param sn the location of the region
   * @param saveBarrier true if need save replication barrier in meta, used for serial replication
   */
public static void splitRegion(final Connection connection, HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB, ServerName sn, int regionReplication, boolean saveBarrier) throws IOException {
    Table meta = getMetaHTable(connection);
    try {
        HRegionInfo copyOfParent = new HRegionInfo(parent);
        copyOfParent.setOffline(true);
        copyOfParent.setSplit(true);
        //Put for parent
        Put putParent = makePutFromRegionInfo(copyOfParent);
        addDaughtersToPut(putParent, splitA, splitB);
        //Puts for daughters
        Put putA = makePutFromRegionInfo(splitA);
        Put putB = makePutFromRegionInfo(splitB);
        //new regions, openSeqNum = 1 is fine.
        addLocation(putA, sn, 1, -1, splitA.getReplicaId());
        addLocation(putB, sn, 1, -1, splitB.getReplicaId());
        // cached whenever the primary region is looked up from meta
        for (int i = 1; i < regionReplication; i++) {
            addEmptyLocation(putA, i);
            addEmptyLocation(putB, i);
        }
        Mutation[] mutations;
        if (saveBarrier) {
            Put parentPut = makeDaughterPut(parent.getEncodedNameAsBytes(), Bytes.toBytes(splitA.getEncodedName() + "," + splitB.getEncodedName()));
            Put daughterPutA = makeParentPut(splitA.getEncodedNameAsBytes(), parent.getEncodedNameAsBytes());
            Put daughterPutB = makeParentPut(splitB.getEncodedNameAsBytes(), parent.getEncodedNameAsBytes());
            mutations = new Mutation[] { putParent, putA, putB, parentPut, daughterPutA, daughterPutB };
        } else {
            mutations = new Mutation[] { putParent, putA, putB };
        }
        byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
        multiMutate(connection, meta, tableRow, mutations);
    } finally {
        meta.close();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Example 84 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class MetaTableAccessor method mutationsToString.

private static String mutationsToString(List<? extends Mutation> mutations) throws IOException {
    StringBuilder sb = new StringBuilder();
    String prefix = "";
    for (Mutation mutation : mutations) {
        sb.append(prefix).append(mutationToString(mutation));
        prefix = ", ";
    }
    return sb.toString();
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 85 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class BulkDeleteEndpoint method delete.

@Override
public void delete(RpcController controller, BulkDeleteRequest request, RpcCallback<BulkDeleteResponse> done) {
    long totalRowsDeleted = 0L;
    long totalVersionsDeleted = 0L;
    Region region = env.getRegion();
    int rowBatchSize = request.getRowBatchSize();
    Long timestamp = null;
    if (request.hasTimestamp()) {
        timestamp = request.getTimestamp();
    }
    DeleteType deleteType = request.getDeleteType();
    boolean hasMore = true;
    RegionScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
            // What we need is just the rowkeys. So only 1st KV from any row is enough.
            // Only when it is a row delete, we can apply this filter.
            // In other types we rely on the scan to know which all columns to be deleted.
            scan.setFilter(new FirstKeyOnlyFilter());
        }
        // Here by assume that the scan is perfect with the appropriate
        // filter and having necessary column(s).
        scanner = region.getScanner(scan);
        while (hasMore) {
            List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize);
            for (int i = 0; i < rowBatchSize; i++) {
                List<Cell> results = new ArrayList<>();
                hasMore = scanner.next(results);
                if (results.size() > 0) {
                    deleteRows.add(results);
                }
                if (!hasMore) {
                    // There are no more rows.
                    break;
                }
            }
            if (deleteRows.size() > 0) {
                Mutation[] deleteArr = new Mutation[deleteRows.size()];
                int i = 0;
                for (List<Cell> deleteRow : deleteRows) {
                    deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp);
                }
                OperationStatus[] opStatus = region.batchMutate(deleteArr, HConstants.NO_NONCE, HConstants.NO_NONCE);
                for (i = 0; i < opStatus.length; i++) {
                    if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
                        break;
                    }
                    totalRowsDeleted++;
                    if (deleteType == DeleteType.VERSION) {
                        byte[] versionsDeleted = deleteArr[i].getAttribute(NO_OF_VERSIONS_TO_DELETE);
                        if (versionsDeleted != null) {
                            totalVersionsDeleted += Bytes.toInt(versionsDeleted);
                        }
                    }
                }
            }
        }
    } catch (IOException ioe) {
        LOG.error(ioe);
        // Call ServerRpcController#getFailedOn() to retrieve this IOException at client side.
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ioe) {
                LOG.error(ioe);
            }
        }
    }
    Builder responseBuilder = BulkDeleteResponse.newBuilder();
    responseBuilder.setRowsDeleted(totalRowsDeleted);
    if (deleteType == DeleteType.VERSION) {
        responseBuilder.setVersionsDeleted(totalVersionsDeleted);
    }
    BulkDeleteResponse result = responseBuilder.build();
    done.run(result);
}
Also used : BulkDeleteResponse(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Builder(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder) ArrayList(java.util.ArrayList) IOException(java.io.IOException) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) OperationStatus(org.apache.hadoop.hbase.regionserver.OperationStatus) Region(org.apache.hadoop.hbase.regionserver.Region) DeleteType(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType) Scan(org.apache.hadoop.hbase.client.Scan) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12