use of org.apache.hadoop.hbase.client.replication.TableCFs in project hbase by apache.
the class DumpReplicationQueues method dumpReplicationQueues.
private int dumpReplicationQueues(DumpOptions opts) throws Exception {
Configuration conf = getConf();
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + EnvironmentEdgeManager.currentTime(), new WarnOnlyAbortable(), true);
try {
// Our zk watcher
LOG.info("Our Quorum: " + zkw.getQuorum());
List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
if (replicatedTableCFs.isEmpty()) {
LOG.info("No tables with a configured replication peer were found.");
return (0);
} else {
LOG.info("Replicated Tables: " + replicatedTableCFs);
}
List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
if (peers.isEmpty()) {
LOG.info("Replication is enabled but no peer configuration was found.");
}
System.out.println("Dumping replication peers and configurations:");
System.out.println(dumpPeersState(peers));
if (opts.isDistributed()) {
LOG.info("Found [--distributed], will poll each RegionServer.");
Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
System.out.println(dumpQueues(zkw, peerIds, opts.isHdfs()));
System.out.println(dumpReplicationSummary());
} else {
// use ZK instead
System.out.print("Dumping replication znodes via ZooKeeper:");
System.out.println(ZKDump.getReplicationZnodesDump(zkw));
}
return (0);
} catch (IOException e) {
return (-1);
} finally {
zkw.close();
}
}
use of org.apache.hadoop.hbase.client.replication.TableCFs in project hbase by apache.
the class HBaseAdmin method listReplicatedTableCFs.
@Override
public List<TableCFs> listReplicatedTableCFs() throws IOException {
List<TableCFs> replicatedTableCFs = new ArrayList<>();
HTableDescriptor[] tables = listTables();
for (HTableDescriptor table : tables) {
HColumnDescriptor[] columns = table.getColumnFamilies();
Map<String, Integer> cfs = new HashMap<>();
for (HColumnDescriptor column : columns) {
if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
cfs.put(column.getNameAsString(), column.getScope());
}
}
if (!cfs.isEmpty()) {
replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs));
}
}
return replicatedTableCFs;
}
use of org.apache.hadoop.hbase.client.replication.TableCFs in project hbase by apache.
the class RawAsyncHBaseAdmin method listReplicatedTableCFs.
@Override
public CompletableFuture<List<TableCFs>> listReplicatedTableCFs() {
CompletableFuture<List<TableCFs>> future = new CompletableFuture<List<TableCFs>>();
addListener(listTableDescriptors(), (tables, error) -> {
if (!completeExceptionally(future, error)) {
List<TableCFs> replicatedTableCFs = new ArrayList<>();
tables.forEach(table -> {
Map<String, Integer> cfs = new HashMap<>();
Stream.of(table.getColumnFamilies()).filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL).forEach(column -> {
cfs.put(column.getNameAsString(), column.getScope());
});
if (!cfs.isEmpty()) {
replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs));
}
});
future.complete(replicatedTableCFs);
}
});
return future;
}
use of org.apache.hadoop.hbase.client.replication.TableCFs in project hbase by apache.
the class TestReplicationSmallTests method testVerifyListReplicatedTable.
/**
* Test for HBASE-8663
* <p>
* Create two new Tables with colfamilies enabled for replication then run
* {@link Admin#listReplicatedTableCFs()}. Finally verify the table:colfamilies.
*/
@Test
public void testVerifyListReplicatedTable() throws Exception {
LOG.info("testVerifyListReplicatedTable");
final String tName = "VerifyListReplicated_";
final String colFam = "cf1";
final int numOfTables = 3;
Admin hadmin = UTIL1.getAdmin();
// Create Tables
for (int i = 0; i < numOfTables; i++) {
hadmin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(tName + i)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(colFam)).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build());
}
// verify the result
List<TableCFs> replicationColFams = hbaseAdmin.listReplicatedTableCFs();
// array of 3 with init value of zero
int[] match = new int[numOfTables];
for (int i = 0; i < replicationColFams.size(); i++) {
TableCFs replicationEntry = replicationColFams.get(i);
String tn = replicationEntry.getTable().getNameAsString();
if (tn.startsWith(tName) && replicationEntry.getColumnFamilyMap().containsKey(colFam)) {
// get the last digit
int m = Integer.parseInt(tn.substring(tn.length() - 1));
// should only increase once
match[m]++;
}
}
// check the matching result
for (int i = 0; i < match.length; i++) {
assertTrue("listReplicated() does not match table " + i, (match[i] == 1));
}
// drop tables
for (int i = 0; i < numOfTables; i++) {
TableName tableName = TableName.valueOf(tName + i);
hadmin.disableTable(tableName);
hadmin.deleteTable(tableName);
}
hadmin.close();
}
Aggregations