Search in sources :

Example 1 with DeleteType

use of org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType in project hbase by apache.

the class BulkDeleteEndpoint method delete.

@Override
public void delete(RpcController controller, BulkDeleteRequest request, RpcCallback<BulkDeleteResponse> done) {
    long totalRowsDeleted = 0L;
    long totalVersionsDeleted = 0L;
    Region region = env.getRegion();
    int rowBatchSize = request.getRowBatchSize();
    Long timestamp = null;
    if (request.hasTimestamp()) {
        timestamp = request.getTimestamp();
    }
    DeleteType deleteType = request.getDeleteType();
    boolean hasMore = true;
    RegionScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
            // What we need is just the rowkeys. So only 1st KV from any row is enough.
            // Only when it is a row delete, we can apply this filter.
            // In other types we rely on the scan to know which all columns to be deleted.
            scan.setFilter(new FirstKeyOnlyFilter());
        }
        // Here by assume that the scan is perfect with the appropriate
        // filter and having necessary column(s).
        scanner = region.getScanner(scan);
        while (hasMore) {
            List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize);
            for (int i = 0; i < rowBatchSize; i++) {
                List<Cell> results = new ArrayList<>();
                hasMore = scanner.next(results);
                if (results.size() > 0) {
                    deleteRows.add(results);
                }
                if (!hasMore) {
                    // There are no more rows.
                    break;
                }
            }
            if (deleteRows.size() > 0) {
                Mutation[] deleteArr = new Mutation[deleteRows.size()];
                int i = 0;
                for (List<Cell> deleteRow : deleteRows) {
                    deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp);
                }
                OperationStatus[] opStatus = region.batchMutate(deleteArr, HConstants.NO_NONCE, HConstants.NO_NONCE);
                for (i = 0; i < opStatus.length; i++) {
                    if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
                        break;
                    }
                    totalRowsDeleted++;
                    if (deleteType == DeleteType.VERSION) {
                        byte[] versionsDeleted = deleteArr[i].getAttribute(NO_OF_VERSIONS_TO_DELETE);
                        if (versionsDeleted != null) {
                            totalVersionsDeleted += Bytes.toInt(versionsDeleted);
                        }
                    }
                }
            }
        }
    } catch (IOException ioe) {
        LOG.error(ioe);
        // Call ServerRpcController#getFailedOn() to retrieve this IOException at client side.
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ioe) {
                LOG.error(ioe);
            }
        }
    }
    Builder responseBuilder = BulkDeleteResponse.newBuilder();
    responseBuilder.setRowsDeleted(totalRowsDeleted);
    if (deleteType == DeleteType.VERSION) {
        responseBuilder.setVersionsDeleted(totalVersionsDeleted);
    }
    BulkDeleteResponse result = responseBuilder.build();
    done.run(result);
}
Also used : BulkDeleteResponse(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Builder(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder) ArrayList(java.util.ArrayList) IOException(java.io.IOException) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) OperationStatus(org.apache.hadoop.hbase.regionserver.OperationStatus) Region(org.apache.hadoop.hbase.regionserver.Region) DeleteType(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType) Scan(org.apache.hadoop.hbase.client.Scan) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Cell (org.apache.hadoop.hbase.Cell)1 Mutation (org.apache.hadoop.hbase.client.Mutation)1 Scan (org.apache.hadoop.hbase.client.Scan)1 DeleteType (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType)1 BulkDeleteResponse (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse)1 Builder (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder)1 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)1 OperationStatus (org.apache.hadoop.hbase.regionserver.OperationStatus)1 Region (org.apache.hadoop.hbase.regionserver.Region)1 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)1