use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class TestCoprocessorMetrics method testRegionObserverEndpoint.
@Test
public void testRegionObserverEndpoint() throws IOException, ServiceException {
final TableName tableName = TableName.valueOf(name.getMethodName());
try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
Admin admin = connection.getAdmin()) {
admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionEndpoint.class.getName()));
try (Table table = connection.getTable(tableName)) {
List<Mutation> mutations = Lists.newArrayList(new Put(foo), new Put(bar));
MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
}
CoprocessorRpcChannel channel = table.coprocessorService(bar);
MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
MutateRowsRequest mrm = mrmBuilder.build();
service.mutateRows(null, mrm);
}
}
// Find out the MetricRegistry used by the CP using the global registries
MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor(CustomRegionEndpoint.class.getName());
Optional<MetricRegistry> registry = MetricRegistries.global().get(info);
assertTrue(registry.isPresent());
Optional<Metric> metric = registry.get().get("EndpointExecution");
assertTrue(metric.isPresent());
Timer endpointExecutions = (Timer) metric.get();
assertEquals(1, endpointExecutions.getHistogram().getCount());
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class MetaTableAccessor method mergeRegions.
/**
* Merge the two regions into one in an atomic operation. Deletes the two
* merging regions in hbase:meta and adds the merged region with the information of
* two merging regions.
* @param connection connection we're using
* @param mergedRegion the merged region
* @param regionA
* @param regionB
* @param sn the location of the region
* @param masterSystemTime
* @param saveBarrier true if need save replication barrier in meta, used for serial replication
* @throws IOException
*/
public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName sn, int regionReplication, long masterSystemTime, boolean saveBarrier) throws IOException {
Table meta = getMetaHTable(connection);
try {
HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
// use the maximum of what master passed us vs local time.
long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
// Put for parent
Put putOfMerged = makePutFromRegionInfo(copyOfMerged, time);
putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
// Deletes for merging regions
Delete deleteA = makeDeleteFromRegionInfo(regionA, time);
Delete deleteB = makeDeleteFromRegionInfo(regionB, time);
// The merged is a new region, openSeqNum = 1 is fine.
addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId());
// be cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putOfMerged, i);
}
byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString() + HConstants.DELIMITER);
Mutation[] mutations;
if (saveBarrier) {
Put putBarrierA = makeDaughterPut(regionA.getEncodedNameAsBytes(), mergedRegion.getEncodedNameAsBytes());
Put putBarrierB = makeDaughterPut(regionB.getEncodedNameAsBytes(), mergedRegion.getEncodedNameAsBytes());
Put putDaughter = makeParentPut(mergedRegion.getEncodedNameAsBytes(), Bytes.toBytes(regionA.getEncodedName() + "," + regionB.getEncodedName()));
mutations = new Mutation[] { putOfMerged, deleteA, deleteB, putBarrierA, putBarrierB, putDaughter };
} else {
mutations = new Mutation[] { putOfMerged, deleteA, deleteB };
}
multiMutate(connection, meta, tableRow, mutations);
} finally {
meta.close();
}
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class MetaTableAccessor method splitRegion.
/**
* Splits the region into two in an atomic operation. Offlines the parent
* region with the information that it is split into two, and also adds
* the daughter regions. Does not add the location information to the daughter
* regions since they are not open yet.
* @param connection connection we're using
* @param parent the parent region which is split
* @param splitA Split daughter region A
* @param splitB Split daughter region A
* @param sn the location of the region
* @param saveBarrier true if need save replication barrier in meta, used for serial replication
*/
public static void splitRegion(final Connection connection, HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB, ServerName sn, int regionReplication, boolean saveBarrier) throws IOException {
Table meta = getMetaHTable(connection);
try {
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = makePutFromRegionInfo(copyOfParent);
addDaughtersToPut(putParent, splitA, splitB);
//Puts for daughters
Put putA = makePutFromRegionInfo(splitA);
Put putB = makePutFromRegionInfo(splitB);
//new regions, openSeqNum = 1 is fine.
addLocation(putA, sn, 1, -1, splitA.getReplicaId());
addLocation(putB, sn, 1, -1, splitB.getReplicaId());
// cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putA, i);
addEmptyLocation(putB, i);
}
Mutation[] mutations;
if (saveBarrier) {
Put parentPut = makeDaughterPut(parent.getEncodedNameAsBytes(), Bytes.toBytes(splitA.getEncodedName() + "," + splitB.getEncodedName()));
Put daughterPutA = makeParentPut(splitA.getEncodedNameAsBytes(), parent.getEncodedNameAsBytes());
Put daughterPutB = makeParentPut(splitB.getEncodedNameAsBytes(), parent.getEncodedNameAsBytes());
mutations = new Mutation[] { putParent, putA, putB, parentPut, daughterPutA, daughterPutB };
} else {
mutations = new Mutation[] { putParent, putA, putB };
}
byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
multiMutate(connection, meta, tableRow, mutations);
} finally {
meta.close();
}
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class MetaTableAccessor method mutationsToString.
private static String mutationsToString(List<? extends Mutation> mutations) throws IOException {
StringBuilder sb = new StringBuilder();
String prefix = "";
for (Mutation mutation : mutations) {
sb.append(prefix).append(mutationToString(mutation));
prefix = ", ";
}
return sb.toString();
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class BulkDeleteEndpoint method delete.
@Override
public void delete(RpcController controller, BulkDeleteRequest request, RpcCallback<BulkDeleteResponse> done) {
long totalRowsDeleted = 0L;
long totalVersionsDeleted = 0L;
Region region = env.getRegion();
int rowBatchSize = request.getRowBatchSize();
Long timestamp = null;
if (request.hasTimestamp()) {
timestamp = request.getTimestamp();
}
DeleteType deleteType = request.getDeleteType();
boolean hasMore = true;
RegionScanner scanner = null;
try {
Scan scan = ProtobufUtil.toScan(request.getScan());
if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
// What we need is just the rowkeys. So only 1st KV from any row is enough.
// Only when it is a row delete, we can apply this filter.
// In other types we rely on the scan to know which all columns to be deleted.
scan.setFilter(new FirstKeyOnlyFilter());
}
// Here by assume that the scan is perfect with the appropriate
// filter and having necessary column(s).
scanner = region.getScanner(scan);
while (hasMore) {
List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize);
for (int i = 0; i < rowBatchSize; i++) {
List<Cell> results = new ArrayList<>();
hasMore = scanner.next(results);
if (results.size() > 0) {
deleteRows.add(results);
}
if (!hasMore) {
// There are no more rows.
break;
}
}
if (deleteRows.size() > 0) {
Mutation[] deleteArr = new Mutation[deleteRows.size()];
int i = 0;
for (List<Cell> deleteRow : deleteRows) {
deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp);
}
OperationStatus[] opStatus = region.batchMutate(deleteArr, HConstants.NO_NONCE, HConstants.NO_NONCE);
for (i = 0; i < opStatus.length; i++) {
if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
break;
}
totalRowsDeleted++;
if (deleteType == DeleteType.VERSION) {
byte[] versionsDeleted = deleteArr[i].getAttribute(NO_OF_VERSIONS_TO_DELETE);
if (versionsDeleted != null) {
totalVersionsDeleted += Bytes.toInt(versionsDeleted);
}
}
}
}
}
} catch (IOException ioe) {
LOG.error(ioe);
// Call ServerRpcController#getFailedOn() to retrieve this IOException at client side.
CoprocessorRpcUtils.setControllerException(controller, ioe);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException ioe) {
LOG.error(ioe);
}
}
}
Builder responseBuilder = BulkDeleteResponse.newBuilder();
responseBuilder.setRowsDeleted(totalRowsDeleted);
if (deleteType == DeleteType.VERSION) {
responseBuilder.setVersionsDeleted(totalVersionsDeleted);
}
BulkDeleteResponse result = responseBuilder.build();
done.run(result);
}
Aggregations