use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class ReplicationSink method replicateEntries.
/**
* Replicate this array of entries directly into the local cluster using the native client. Only
* operates against raw protobuf type saving on a conversion from pb to pojo.
* @param entries
* @param cells
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
* directory
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
* @throws IOException If failed to replicate the data
*/
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
if (entries.isEmpty())
return;
if (cells == null)
throw new NullPointerException("TODO: Add handling of null CellScanner");
// to the same table.
try {
long totalReplicated = 0;
// Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
// invocation of this method per table and cluster id.
Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
// Map of table name Vs list of pair of family and list of hfile paths from its namespace
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
for (WALEntry entry : entries) {
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
Cell previousCell = null;
Mutation m = null;
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
// Handle bulk load hfiles replication
if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
if (bulkLoadHFileMap == null) {
bulkLoadHFileMap = new HashMap<>();
}
buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
} else {
// Handle wal replication
if (isNewRowOrType(previousCell, cell)) {
// Create new mutation
m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
m.setClusterIds(clusterIds);
addToHashMultiMap(rowMap, table, clusterIds, m);
}
if (CellUtil.isDelete(cell)) {
((Delete) m).addDeleteMarker(cell);
} else {
((Put) m).add(cell);
}
previousCell = cell;
}
}
totalReplicated++;
}
// TODO Replicating mutations and bulk loaded data can be made parallel
if (!rowMap.isEmpty()) {
LOG.debug("Started replicating mutations.");
for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
batch(entry.getKey(), entry.getValue().values());
}
LOG.debug("Finished replicating mutations.");
}
if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
LOG.debug("Started replicating bulk loaded data.");
HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
hFileReplicator.replicate();
LOG.debug("Finished replicating bulk loaded data.");
}
int size = entries.size();
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
this.totalReplicatedEdits.addAndGet(totalReplicated);
} catch (IOException ex) {
LOG.error("Unable to accept edit because:", ex);
throw ex;
}
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class ReaderBase method next.
@Override
public Entry next(Entry reuse) throws IOException {
Entry e = reuse;
if (e == null) {
e = new Entry(new WALKey(), new WALEdit());
}
if (compressionContext != null) {
e.setCompressionContext(compressionContext);
}
boolean hasEntry = false;
try {
hasEntry = readNext(e);
} catch (IllegalArgumentException iae) {
TableName tableName = e.getKey().getTablename();
if (tableName != null && tableName.equals(TableName.OLD_ROOT_TABLE_NAME)) {
// It is old ROOT table edit, ignore it
LOG.info("Got an old ROOT edit, ignoring ");
return next(e);
} else
throw iae;
}
edit++;
if (compressionContext != null && emptyCompressionContext) {
emptyCompressionContext = false;
}
return hasEntry ? e : null;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class ZKDataMigrator method queryForTableStates.
/**
* Method for table states migration.
* Used when upgrading from pre-2.0 to 2.0
* Reading state from zk, applying them to internal state
* and delete.
* Used by master to clean migration from zk based states to
* table descriptor based states.
*/
@Deprecated
public static Map<TableName, TableState.State> queryForTableStates(ZooKeeperWatcher zkw) throws KeeperException, InterruptedException {
Map<TableName, TableState.State> rv = new HashMap<>();
List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.tableZNode);
if (children == null)
return rv;
for (String child : children) {
TableName tableName = TableName.valueOf(child);
ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
TableState.State newState = TableState.State.ENABLED;
if (state != null) {
switch(state) {
case ENABLED:
newState = TableState.State.ENABLED;
break;
case DISABLED:
newState = TableState.State.DISABLED;
break;
case DISABLING:
newState = TableState.State.DISABLING;
break;
case ENABLING:
newState = TableState.State.ENABLING;
break;
default:
}
}
rv.put(tableName, newState);
}
return rv;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class HFileCorruptionChecker method createMobRegionDirChecker.
/**
* Creates an instance of MobRegionDirChecker.
* @param tableDir The current table directory.
* @return An instance of MobRegionDirChecker.
*/
private MobRegionDirChecker createMobRegionDirChecker(Path tableDir) {
TableName tableName = FSUtils.getTableName(tableDir);
Path mobDir = MobUtils.getMobRegionPath(conf, tableName);
return new MobRegionDirChecker(mobDir);
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class IntegrationTestBulkLoad method installSlowingCoproc.
/**
* Modify table {@code getTableName()} to carry {@link SlowMeCoproScanOperations}.
*/
private void installSlowingCoproc() throws IOException, InterruptedException {
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT)
return;
TableName t = getTablename();
Admin admin = util.getAdmin();
HTableDescriptor desc = admin.getTableDescriptor(t);
desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
HBaseTestingUtility.modifyTableSync(admin, desc);
}
Aggregations