use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class AccessControlLists method addUserPermission.
/**
* Stores a new user permission grant in the access control lists table.
* @param conf the configuration
* @param userPerm the details of the permission to be granted
* @param t acl table instance. It is closed upon method return.
* @throws IOException in the case of an error accessing the metadata table
*/
static void addUserPermission(Configuration conf, UserPermission userPerm, Table t, boolean mergeExistingPermissions) throws IOException {
Permission.Action[] actions = userPerm.getActions();
byte[] rowKey = userPermissionRowKey(userPerm);
Put p = new Put(rowKey);
byte[] key = userPermissionKey(userPerm);
if ((actions == null) || (actions.length == 0)) {
String msg = "No actions associated with user '" + Bytes.toString(userPerm.getUser()) + "'";
LOG.warn(msg);
throw new IOException(msg);
}
Set<Permission.Action> actionSet = new TreeSet<Permission.Action>();
if (mergeExistingPermissions) {
List<UserPermission> perms = getUserPermissions(conf, rowKey);
UserPermission currentPerm = null;
for (UserPermission perm : perms) {
if (Bytes.equals(perm.getUser(), userPerm.getUser()) && ((userPerm.isGlobal() && ACL_TABLE_NAME.equals(perm.getTableName())) || perm.tableFieldsEqual(userPerm))) {
currentPerm = perm;
break;
}
}
if (currentPerm != null && currentPerm.getActions() != null) {
actionSet.addAll(Arrays.asList(currentPerm.getActions()));
}
}
// merge current action with new action.
actionSet.addAll(Arrays.asList(actions));
// serialize to byte array.
byte[] value = new byte[actionSet.size()];
int index = 0;
for (Permission.Action action : actionSet) {
value[index++] = action.code();
}
p.addImmutable(ACL_LIST_FAMILY, key, value);
if (LOG.isDebugEnabled()) {
LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " " + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
}
try {
t.put(p);
} finally {
t.close();
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class ReplicationSink method replicateEntries.
/**
* Replicate this array of entries directly into the local cluster using the native client. Only
* operates against raw protobuf type saving on a conversion from pb to pojo.
* @param entries
* @param cells
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
* directory
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
* @throws IOException If failed to replicate the data
*/
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
if (entries.isEmpty())
return;
if (cells == null)
throw new NullPointerException("TODO: Add handling of null CellScanner");
// to the same table.
try {
long totalReplicated = 0;
// Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
// invocation of this method per table and cluster id.
Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
// Map of table name Vs list of pair of family and list of hfile paths from its namespace
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
for (WALEntry entry : entries) {
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
Cell previousCell = null;
Mutation m = null;
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
// Handle bulk load hfiles replication
if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
if (bulkLoadHFileMap == null) {
bulkLoadHFileMap = new HashMap<>();
}
buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
} else {
// Handle wal replication
if (isNewRowOrType(previousCell, cell)) {
// Create new mutation
m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
m.setClusterIds(clusterIds);
addToHashMultiMap(rowMap, table, clusterIds, m);
}
if (CellUtil.isDelete(cell)) {
((Delete) m).addDeleteMarker(cell);
} else {
((Put) m).add(cell);
}
previousCell = cell;
}
}
totalReplicated++;
}
// TODO Replicating mutations and bulk loaded data can be made parallel
if (!rowMap.isEmpty()) {
LOG.debug("Started replicating mutations.");
for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
batch(entry.getKey(), entry.getValue().values());
}
LOG.debug("Finished replicating mutations.");
}
if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
LOG.debug("Started replicating bulk loaded data.");
HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
hFileReplicator.replicate();
LOG.debug("Finished replicating bulk loaded data.");
}
int size = entries.size();
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
this.totalReplicatedEdits.addAndGet(totalReplicated);
} catch (IOException ex) {
LOG.error("Unable to accept edit because:", ex);
throw ex;
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class WALSplitter method getMutationsFromWALEntry.
/**
* This function is used to construct mutations from a WALEntry. It also
* reconstructs WALKey & WALEdit from the passed in WALEntry
* @param entry
* @param cells
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
* extracted from the passed in WALEntry.
* @return list of Pair<MutationType, Mutation> to be replayed
* @throws IOException
*/
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
if (entry == null) {
// return an empty array
return new ArrayList<>();
}
long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
int count = entry.getAssociatedCellCount();
List<MutationReplay> mutations = new ArrayList<>();
Cell previousCell = null;
Mutation m = null;
WALKey key = null;
WALEdit val = null;
if (logEntry != null)
val = new WALEdit();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
if (val != null)
val.add(cell);
boolean isNewRowOrType = previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(previousCell, cell);
if (isNewRowOrType) {
// Create new mutation
if (CellUtil.isDelete(cell)) {
m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Deletes don't have nonces.
mutations.add(new MutationReplay(MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
} else {
m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Puts might come from increment or append, thus we need nonces.
long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
mutations.add(new MutationReplay(MutationType.PUT, m, nonceGroup, nonce));
}
}
if (CellUtil.isDelete(cell)) {
((Delete) m).addDeleteMarker(cell);
} else {
((Put) m).add(cell);
}
m.setDurability(durability);
previousCell = cell;
}
// reconstruct WALKey
if (logEntry != null) {
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey();
List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
}
key = new WALKey(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
logEntry.setFirst(key);
logEntry.setSecond(val);
}
return mutations;
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HBaseTestingUtility method loadNumericRows.
public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
for (int i = startRow; i < endRow; i++) {
byte[] data = Bytes.toBytes(String.valueOf(i));
Put put = new Put(data);
put.addColumn(f, null, data);
t.put(put);
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HBaseTestingUtility method loadTable.
/**
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
List<Put> puts = new ArrayList<>();
for (byte[] row : HBaseTestingUtility.ROWS) {
Put put = new Put(row);
put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
for (int i = 0; i < f.length; i++) {
byte[] value1 = value != null ? value : row;
put.addColumn(f[i], f[i], value1);
}
puts.add(put);
}
t.put(puts);
return puts.size();
}
Aggregations