use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class ReplicationSink method replicateEntries.
/**
* Replicate this array of entries directly into the local cluster using the native client. Only
* operates against raw protobuf type saving on a conversion from pb to pojo.
* @param entries
* @param cells
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
* directory
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
* @throws IOException If failed to replicate the data
*/
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
if (entries.isEmpty())
return;
if (cells == null)
throw new NullPointerException("TODO: Add handling of null CellScanner");
// to the same table.
try {
long totalReplicated = 0;
// Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
// invocation of this method per table and cluster id.
Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
// Map of table name Vs list of pair of family and list of hfile paths from its namespace
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
for (WALEntry entry : entries) {
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
Cell previousCell = null;
Mutation m = null;
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
// Handle bulk load hfiles replication
if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
if (bulkLoadHFileMap == null) {
bulkLoadHFileMap = new HashMap<>();
}
buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
} else {
// Handle wal replication
if (isNewRowOrType(previousCell, cell)) {
// Create new mutation
m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
m.setClusterIds(clusterIds);
addToHashMultiMap(rowMap, table, clusterIds, m);
}
if (CellUtil.isDelete(cell)) {
((Delete) m).addDeleteMarker(cell);
} else {
((Put) m).add(cell);
}
previousCell = cell;
}
}
totalReplicated++;
}
// TODO Replicating mutations and bulk loaded data can be made parallel
if (!rowMap.isEmpty()) {
LOG.debug("Started replicating mutations.");
for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
batch(entry.getKey(), entry.getValue().values());
}
LOG.debug("Finished replicating mutations.");
}
if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
LOG.debug("Started replicating bulk loaded data.");
HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
hFileReplicator.replicate();
LOG.debug("Finished replicating bulk loaded data.");
}
int size = entries.size();
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
this.totalReplicatedEdits.addAndGet(totalReplicated);
} catch (IOException ex) {
LOG.error("Unable to accept edit because:", ex);
throw ex;
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class AccessControlLists method loadAll.
/**
* Loads all of the permission grants stored in a region of the {@code _acl_}
* table.
*
* @param aclRegion
* @return a map of the permissions for this table.
* @throws IOException
*/
static Map<byte[], ListMultimap<String, TablePermission>> loadAll(Region aclRegion) throws IOException {
if (!isAclRegion(aclRegion)) {
throw new IOException("Can only load permissions from " + ACL_TABLE_NAME);
}
Map<byte[], ListMultimap<String, TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
// do a full scan of _acl_ table
Scan scan = new Scan();
scan.addFamily(ACL_LIST_FAMILY);
InternalScanner iScanner = null;
try {
iScanner = aclRegion.getScanner(scan);
while (true) {
List<Cell> row = new ArrayList<>();
boolean hasNext = iScanner.next(row);
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
byte[] entry = null;
for (Cell kv : row) {
if (entry == null) {
entry = CellUtil.cloneRow(kv);
}
Pair<String, TablePermission> permissionsOfUserOnTable = parsePermissionRecord(entry, kv);
if (permissionsOfUserOnTable != null) {
String username = permissionsOfUserOnTable.getFirst();
TablePermission permissions = permissionsOfUserOnTable.getSecond();
perms.put(username, permissions);
}
}
if (entry != null) {
allPerms.put(entry, perms);
}
if (!hasNext) {
break;
}
}
} finally {
if (iScanner != null) {
iScanner.close();
}
}
return allPerms;
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class AccessControlLists method parsePermissions.
private static ListMultimap<String, TablePermission> parsePermissions(byte[] entryName, Result result) {
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
if (result != null && result.size() > 0) {
for (Cell kv : result.rawCells()) {
Pair<String, TablePermission> permissionsOfUserOnTable = parsePermissionRecord(entryName, kv);
if (permissionsOfUserOnTable != null) {
String username = permissionsOfUserOnTable.getFirst();
TablePermission permissions = permissionsOfUserOnTable.getSecond();
perms.put(username, permissions);
}
}
}
return perms;
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class ChainWALEntryFilter method filterCells.
private void filterCells(Entry entry) {
if (entry == null || cellFilters.length == 0) {
return;
}
ArrayList<Cell> cells = entry.getEdit().getCells();
int size = cells.size();
for (int i = size - 1; i >= 0; i--) {
Cell cell = cells.get(i);
for (WALCellFilter filter : cellFilters) {
cell = filter.filterCell(entry, cell);
if (cell != null) {
cells.set(i, cell);
} else {
cells.remove(i);
break;
}
}
}
if (cells.size() < size / 2) {
cells.trimToSize();
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class ProtobufLogWriter method append.
@Override
public void append(Entry entry) throws IOException {
entry.setCompressionContext(compressionContext);
entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build().writeDelimitedTo(output);
for (Cell cell : entry.getEdit().getCells()) {
// cellEncoder must assume little about the stream, since we write PB and cells in turn.
cellEncoder.write(cell);
}
length.set(output.getPos());
}
Aggregations