use of org.apache.hadoop.hbase.client.Row in project phoenix by apache.
the class DynamicColumnIT method initTable.
@Before
public void initTable() throws Exception {
tableName = generateUniqueName();
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
try (HBaseAdmin admin = services.getAdmin()) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
admin.createTable(htd);
}
try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] dv = Bytes.toBytes("DV");
byte[] first = Bytes.toBytes("F");
byte[] f1v1 = Bytes.toBytes("F1V1");
byte[] f1v2 = Bytes.toBytes("F1V2");
byte[] f2v1 = Bytes.toBytes("F2V1");
byte[] f2v2 = Bytes.toBytes("F2V2");
byte[] key = Bytes.toBytes("entry1");
Put put = new Put(key);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
mutations.add(put);
hTable.batch(mutations);
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
pconn.createStatement().execute("create table " + tableName + " (entry varchar not null," + " F varchar," + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + " CONSTRAINT pk PRIMARY KEY (entry)) COLUMN_ENCODED_BYTES=NONE");
}
}
}
use of org.apache.hadoop.hbase.client.Row in project atlas by apache.
the class HBaseStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws BackendException {
logger.debug("Enter mutateMany");
final MaskedTimestamp commitTime = new MaskedTimestamp(txh);
// In case of an addition and deletion with identical timestamps, the
// deletion tombstone wins.
// http://hbase.apache.org/book/versions.html#d244e4250
Map<StaticBuffer, Pair<Put, Delete>> commandsPerKey = convertToCommands(mutations, commitTime.getAdditionTime(times.getUnit()), commitTime.getDeletionTime(times.getUnit()));
// actual batch operation
List<Row> batch = new ArrayList<>(commandsPerKey.size());
// convert sorted commands into representation required for 'batch' operation
for (Pair<Put, Delete> commands : commandsPerKey.values()) {
if (commands.getFirst() != null)
batch.add(commands.getFirst());
if (commands.getSecond() != null)
batch.add(commands.getSecond());
}
try {
TableMask table = null;
try {
table = cnx.getTable(tableName);
logger.debug("mutateMany : batch mutate started size {} ", batch.size());
table.batch(batch, new Object[batch.size()]);
logger.debug("mutateMany : batch mutate finished {} ", batch.size());
} finally {
IOUtils.closeQuietly(table);
}
} catch (IOException | InterruptedException e) {
throw new TemporaryBackendException(e);
}
sleepAfterWrite(txh, commitTime);
}
use of org.apache.hadoop.hbase.client.Row in project hbase by apache.
the class RequestConverter method buildNoDataRegionActions.
/**
* Create a protocol buffer multirequest with NO data for a list of actions (data is carried
* otherwise than via protobuf). This means it just notes attributes, whether to write the
* WAL, etc., and the presence in protobuf serves as place holder for the data which is
* coming along otherwise. Note that Get is different. It does not contain 'data' and is always
* carried by protobuf. We return references to the data by adding them to the passed in
* <code>data</code> param.
* <p> Propagates Actions original index.
* <p> The passed in multiRequestBuilder will be populated with region actions.
* @param regionName The region name of the actions.
* @param actions The actions that are grouped by the same region name.
* @param cells Place to stuff references to actual data.
* @param multiRequestBuilder The multiRequestBuilder to be populated with region actions.
* @param regionActionBuilder regionActionBuilder to be used to build region action.
* @param actionBuilder actionBuilder to be used to build action.
* @param mutationBuilder mutationBuilder to be used to build mutation.
* @param nonceGroup nonceGroup to be applied.
* @param indexMap Map of created RegionAction to the original index for a
* RowMutations/CheckAndMutate within the original list of actions
* @throws IOException
*/
public static void buildNoDataRegionActions(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map<Integer, Integer> indexMap) throws IOException {
regionActionBuilder.clear();
RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
boolean hasNonce = false;
List<Action> rowMutationsList = new ArrayList<>();
List<Action> checkAndMutates = new ArrayList<>();
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());
mutationBuilder.clear();
if (row instanceof Get) {
Get g = (Get) row;
builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Delete) {
buildNoDataRegionAction((Delete) row, cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Append) {
buildNoDataRegionAction((Append) row, cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (row instanceof Increment) {
buildNoDataRegionAction((Increment) row, cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) {
rowMutationsList.add(action);
} else if (row instanceof CheckAndMutate) {
checkAndMutates.add(action);
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
if (builder.getActionCount() > 0) {
multiRequestBuilder.addRegionAction(builder.build());
}
// We maintain a map to keep track of this RegionAction and the original Action index.
for (Action action : rowMutationsList) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) action.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
multiRequestBuilder.addRegionAction(builder.build());
// This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
// Action index.
for (Action action : checkAndMutates) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
CheckAndMutate cam = (CheckAndMutate) action.getAction();
builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange()));
if (cam.getAction() instanceof Put) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Put) cam.getAction(), cells, builder, actionBuilder, mutationBuilder);
} else if (cam.getAction() instanceof Delete) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Delete) cam.getAction(), cells, builder, actionBuilder, mutationBuilder);
} else if (cam.getAction() instanceof Increment) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Increment) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof Append) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Append) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof RowMutations) {
boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
} else {
throw new DoNotRetryIOException("CheckAndMutate doesn't support " + cam.getAction().getClass().getName());
}
multiRequestBuilder.addRegionAction(builder.build());
// This CheckAndMutate region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
multiRequestBuilder.setNonceGroup(nonceGroup);
}
}
use of org.apache.hadoop.hbase.client.Row in project hbase by apache.
the class ReplicationSink method replicateEntries.
/**
* Replicate this array of entries directly into the local cluster using the native client. Only
* operates against raw protobuf type saving on a conversion from pb to pojo.
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
* directory
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
* @throws IOException If failed to replicate the data
*/
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
if (entries.isEmpty()) {
return;
}
// to the same table.
try {
long totalReplicated = 0;
// Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
// invocation of this method per table and cluster id.
Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
Map<List<String>, Map<String, List<Pair<byte[], List<String>>>>> bulkLoadsPerClusters = null;
for (WALEntry entry : entries) {
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
if (this.walEntrySinkFilter != null) {
if (this.walEntrySinkFilter.filter(table, entry.getKey().getWriteTime())) {
// Skip Cells in CellScanner associated with this entry.
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
}
continue;
}
}
Cell previousCell = null;
Mutation mutation = null;
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
// Handle bulk load hfiles replication
if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell);
if (bld.getReplicate()) {
if (bulkLoadsPerClusters == null) {
bulkLoadsPerClusters = new HashMap<>();
}
// Map of table name Vs list of pair of family and list of
// hfile paths from its namespace
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = bulkLoadsPerClusters.computeIfAbsent(bld.getClusterIdsList(), k -> new HashMap<>());
buildBulkLoadHFileMap(bulkLoadHFileMap, table, bld);
}
} else {
// Handle wal replication
if (isNewRowOrType(previousCell, cell)) {
// Create new mutation
mutation = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
mutation.setClusterIds(clusterIds);
mutation.setAttribute(ReplicationUtils.REPLICATION_ATTR_NAME, HConstants.EMPTY_BYTE_ARRAY);
addToHashMultiMap(rowMap, table, clusterIds, mutation);
}
if (CellUtil.isDelete(cell)) {
((Delete) mutation).add(cell);
} else {
((Put) mutation).add(cell);
}
previousCell = cell;
}
}
totalReplicated++;
}
// TODO Replicating mutations and bulk loaded data can be made parallel
if (!rowMap.isEmpty()) {
LOG.debug("Started replicating mutations.");
for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
batch(entry.getKey(), entry.getValue().values(), rowSizeWarnThreshold);
}
LOG.debug("Finished replicating mutations.");
}
if (bulkLoadsPerClusters != null) {
for (Entry<List<String>, Map<String, List<Pair<byte[], List<String>>>>> entry : bulkLoadsPerClusters.entrySet()) {
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = entry.getValue();
if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
LOG.debug("Replicating {} bulk loaded data", entry.getKey().toString());
Configuration providerConf = this.provider.getConf(this.conf, replicationClusterId);
try (HFileReplicator hFileReplicator = new HFileReplicator(providerConf, sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection(), entry.getKey())) {
hFileReplicator.replicate();
LOG.debug("Finished replicating {} bulk loaded data", entry.getKey().toString());
}
}
}
}
int size = entries.size();
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
this.totalReplicatedEdits.addAndGet(totalReplicated);
} catch (IOException ex) {
LOG.error("Unable to accept edit because:", ex);
throw ex;
}
}
use of org.apache.hadoop.hbase.client.Row in project incubator-atlas by apache.
the class HBaseStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws BackendException {
logger.debug("Enter mutateMany");
final MaskedTimestamp commitTime = new MaskedTimestamp(txh);
// In case of an addition and deletion with identical timestamps, the
// deletion tombstone wins.
// http://hbase.apache.org/book/versions.html#d244e4250
Map<StaticBuffer, Pair<Put, Delete>> commandsPerKey = convertToCommands(mutations, commitTime.getAdditionTime(times.getUnit()), commitTime.getDeletionTime(times.getUnit()));
// actual batch operation
List<Row> batch = new ArrayList<>(commandsPerKey.size());
// convert sorted commands into representation required for 'batch' operation
for (Pair<Put, Delete> commands : commandsPerKey.values()) {
if (commands.getFirst() != null)
batch.add(commands.getFirst());
if (commands.getSecond() != null)
batch.add(commands.getSecond());
}
try {
TableMask table = null;
try {
table = cnx.getTable(tableName);
logger.debug("mutateMany : batch mutate started size {} ", batch.size());
table.batch(batch, new Object[batch.size()]);
logger.debug("mutateMany : batch mutate finished {} ", batch.size());
} finally {
IOUtils.closeQuietly(table);
}
} catch (IOException | InterruptedException e) {
throw new TemporaryBackendException(e);
}
sleepAfterWrite(txh, commitTime);
}
Aggregations