use of org.apache.hadoop.hbase.client.Delete in project camel by apache.
the class HBaseProducer method process.
public void process(Exchange exchange) throws Exception {
try (Table table = endpoint.getTable()) {
updateHeaders(exchange);
String operation = (String) exchange.getIn().getHeader(HBaseConstants.OPERATION);
Integer maxScanResult = exchange.getIn().getHeader(HBaseConstants.HBASE_MAX_SCAN_RESULTS, Integer.class);
String fromRowId = (String) exchange.getIn().getHeader(HBaseConstants.FROM_ROW);
String stopRowId = (String) exchange.getIn().getHeader(HBaseConstants.STOP_ROW);
CellMappingStrategy mappingStrategy = endpoint.getCellMappingStrategyFactory().getStrategy(exchange.getIn());
HBaseData data = mappingStrategy.resolveModel(exchange.getIn());
List<Put> putOperations = new LinkedList<>();
List<Delete> deleteOperations = new LinkedList<>();
List<HBaseRow> getOperationResult = new LinkedList<>();
List<HBaseRow> scanOperationResult = new LinkedList<>();
for (HBaseRow hRow : data.getRows()) {
hRow.apply(rowModel);
if (HBaseConstants.PUT.equals(operation)) {
putOperations.add(createPut(hRow));
} else if (HBaseConstants.GET.equals(operation)) {
HBaseRow getResultRow = getCells(table, hRow);
getOperationResult.add(getResultRow);
} else if (HBaseConstants.DELETE.equals(operation)) {
deleteOperations.add(createDeleteRow(hRow));
} else if (HBaseConstants.SCAN.equals(operation)) {
scanOperationResult = scanCells(table, hRow, fromRowId, stopRowId, maxScanResult, endpoint.getFilters());
}
}
//Check if we have something to add.
if (!putOperations.isEmpty()) {
table.put(putOperations);
} else if (!deleteOperations.isEmpty()) {
table.delete(deleteOperations);
} else if (!getOperationResult.isEmpty()) {
mappingStrategy.applyGetResults(exchange.getOut(), new HBaseData(getOperationResult));
} else if (!scanOperationResult.isEmpty()) {
mappingStrategy.applyScanResults(exchange.getOut(), new HBaseData(scanOperationResult));
}
}
}
use of org.apache.hadoop.hbase.client.Delete in project camel by apache.
the class HBaseIdempotentRepository method remove.
@Override
public boolean remove(Object o) {
try {
byte[] b = HBaseHelper.toBytes(o);
if (table.exists(new Get(b))) {
Delete delete = new Delete(b);
table.delete(delete);
return true;
} else {
return false;
}
} catch (Exception e) {
LOG.warn("Error removing object {} from HBase repository.", o);
return false;
}
}
use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.
the class MetaTableAccessor method multiMutate.
/**
* Performs an atomic multi-mutate operation against the given table.
*/
// Used by the RSGroup Coprocessor Endpoint. It had a copy/paste of the below. Need to reveal
// this facility for CPEP use or at least those CPEPs that are on their way to becoming part of
// core as is the intent for RSGroup eventually.
public static void multiMutate(Connection connection, final Table table, byte[] row, final List<Mutation> mutations) throws IOException {
if (METALOG.isDebugEnabled()) {
METALOG.debug(mutationsToString(mutations));
}
// TODO: Need rollback!!!!
// TODO: Need Retry!!!
// TODO: What for a timeout? Default write timeout? GET FROM HTABLE?
// TODO: Review when we come through with ProcedureV2.
RegionServerCallable<MutateRowsResponse, MultiRowMutationProtos.MultiRowMutationService.BlockingInterface> callable = new RegionServerCallable<MutateRowsResponse, MultiRowMutationProtos.MultiRowMutationService.BlockingInterface>(connection, table.getName(), row, null) {
/*RpcController not used in this CPEP!*/
@Override
protected MutateRowsResponse rpcCall() throws Exception {
final MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
builder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multi in MetaEditor doesn't support " + mutation.getClass().getName());
}
}
// The call to #prepare that ran before this invocation will have populated HRegionLocation.
HRegionLocation hrl = getLocation();
RegionSpecifier region = ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hrl.getRegionInfo().getRegionName());
builder.setRegion(region);
// that makes com.google.protobuf.RpcController and then copy into it configs.
return getStub().mutateRows(null, builder.build());
}
@Override
protected // Called on the end of the super.prepare call. Set the stub.
void setStubByServiceName(ServerName serviceName) throws /*Ignored*/
IOException {
CoprocessorRpcChannel channel = table.coprocessorService(getRow());
setStub(MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel));
}
};
int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
// The region location should be cached in connection. Call prepare so this callable picks
// up the region location (see super.prepare method).
callable.prepare(false);
callable.call(writeTimeout);
}
use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.
the class MetaTableAccessor method removeRegionReplicasFromMeta.
/**
* Deletes some replica columns corresponding to replicas for the passed rows
* @param metaRows rows in hbase:meta
* @param replicaIndexToDeleteFrom the replica ID we would start deleting from
* @param numReplicasToRemove how many replicas to remove
* @param connection connection we're using to access meta table
* @throws IOException
*/
public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows, int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection) throws IOException {
int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
for (byte[] row : metaRows) {
long now = EnvironmentEdgeManager.currentTime();
Delete deleteReplicaLocations = new Delete(row);
for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
deleteReplicaLocations.addColumns(getCatalogFamily(), getServerColumn(i), now);
deleteReplicaLocations.addColumns(getCatalogFamily(), getSeqNumColumn(i), now);
deleteReplicaLocations.addColumns(getCatalogFamily(), getStartCodeColumn(i), now);
}
deleteFromMetaTable(connection, deleteReplicaLocations);
}
}
use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.
the class MetaTableAccessor method deleteTableState.
/**
* Remove state for table from meta
* @param connection to use for deletion
* @param table to delete state for
*/
public static void deleteTableState(Connection connection, TableName table) throws IOException {
long time = EnvironmentEdgeManager.currentTime();
Delete delete = new Delete(table.getName());
delete.addColumns(getTableFamily(), getStateColumn(), time);
deleteFromMetaTable(connection, delete);
LOG.info("Deleted table " + table + " state from META");
}
Aggregations