use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class SplitAllRegionOfTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
// Don't try the split if we're stopping
if (context.isStopping()) {
return;
}
// Don't always split. This should allow splitting of a full table later in the run
if (ThreadLocalRandom.current().nextDouble() < (((double) splits) / ((double) maxFullTableSplits)) / ((double) 2)) {
splits++;
getLogger().info("Performing action: Split all regions of {}", tableName);
admin.split(tableName);
} else {
getLogger().info("Skipping split of all regions.");
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHFileOutputFormat2 method doIncrementalLoadTest.
private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, boolean putSortReducer, List<String> tableStr) throws Exception {
util = new HBaseTestingUtil();
Configuration conf = util.getConfiguration();
conf.setBoolean(MultiTableHFileOutputFormat.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
int hostCount = 1;
int regionNum = 5;
if (shouldKeepLocality) {
// We should change host count higher than hdfs replica count when MiniHBaseCluster supports
// explicit hostnames parameter just like MiniDFSCluster does.
hostCount = 3;
regionNum = 20;
}
String[] hostnames = new String[hostCount];
for (int i = 0; i < hostCount; ++i) {
hostnames[i] = "datanode_" + i;
}
StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(hostCount).dataNodeHosts(hostnames).build();
util.startMiniCluster(option);
Map<String, Table> allTables = new HashMap<>(tableStr.size());
List<HFileOutputFormat2.TableInfo> tableInfo = new ArrayList<>(tableStr.size());
boolean writeMultipleTables = tableStr.size() > 1;
for (String tableStrSingle : tableStr) {
byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
TableName tableName = TableName.valueOf(tableStrSingle);
Table table = util.createTable(tableName, FAMILIES, splitKeys);
RegionLocator r = util.getConnection().getRegionLocator(tableName);
assertEquals("Should start with empty table", 0, util.countRows(table));
int numRegions = r.getStartKeys().length;
assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
allTables.put(tableStrSingle, table);
tableInfo.add(new HFileOutputFormat2.TableInfo(table.getDescriptor(), r));
}
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
// Generate the bulk load files
runIncrementalPELoad(conf, tableInfo, testDir, putSortReducer);
if (writeMultipleTables) {
testDir = new Path(testDir, "default");
}
for (Table tableSingle : allTables.values()) {
// This doesn't write into the table, just makes files
assertEquals("HFOF should not touch actual table", 0, util.countRows(tableSingle));
}
int numTableDirs = 0;
FileStatus[] fss = testDir.getFileSystem(conf).listStatus(testDir);
for (FileStatus tf : fss) {
Path tablePath = testDir;
if (writeMultipleTables) {
if (allTables.containsKey(tf.getPath().getName())) {
++numTableDirs;
tablePath = tf.getPath();
} else {
continue;
}
}
// Make sure that a directory was created for every CF
int dir = 0;
fss = tablePath.getFileSystem(conf).listStatus(tablePath);
for (FileStatus f : fss) {
for (byte[] family : FAMILIES) {
if (Bytes.toString(family).equals(f.getPath().getName())) {
++dir;
}
}
}
assertEquals("Column family not found in FS.", FAMILIES.length, dir);
}
if (writeMultipleTables) {
assertEquals("Dir for all input tables not created", numTableDirs, allTables.size());
}
Admin admin = util.getConnection().getAdmin();
try {
// handle the split case
if (shouldChangeRegions) {
Table chosenTable = allTables.values().iterator().next();
// Choose a semi-random table if multiple tables are available
LOG.info("Changing regions in table " + chosenTable.getName().getNameAsString());
admin.disableTable(chosenTable.getName());
util.waitUntilNoRegionsInTransition();
util.deleteTable(chosenTable.getName());
byte[][] newSplitKeys = generateRandomSplitKeys(14);
Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys);
while (util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) {
Thread.sleep(200);
LOG.info("Waiting for new region assignment to happen");
}
}
// Perform the actual load
for (HFileOutputFormat2.TableInfo singleTableInfo : tableInfo) {
Path tableDir = testDir;
String tableNameStr = singleTableInfo.getTableDescriptor().getTableName().getNameAsString();
LOG.info("Running BulkLoadHFiles on table" + tableNameStr);
if (writeMultipleTables) {
tableDir = new Path(testDir, tableNameStr);
}
Table currentTable = allTables.get(tableNameStr);
TableName currentTableName = currentTable.getName();
BulkLoadHFiles.create(conf).bulkLoad(currentTableName, tableDir);
// Ensure data shows up
int expectedRows = 0;
if (putSortReducer) {
// no rows should be extracted
assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, util.countRows(currentTable));
} else {
expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, util.countRows(currentTable));
Scan scan = new Scan();
ResultScanner results = currentTable.getScanner(scan);
for (Result res : results) {
assertEquals(FAMILIES.length, res.rawCells().length);
Cell first = res.rawCells()[0];
for (Cell kv : res.rawCells()) {
assertTrue(CellUtil.matchingRows(first, kv));
assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
}
}
results.close();
}
String tableDigestBefore = util.checksumRows(currentTable);
// Check region locality
HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
for (HRegion region : util.getHBaseCluster().getRegions(currentTableName)) {
hbd.add(region.getHDFSBlocksDistribution());
}
for (String hostname : hostnames) {
float locality = hbd.getBlockLocalityIndex(hostname);
LOG.info("locality of [" + hostname + "]: " + locality);
assertEquals(100, (int) (locality * 100));
}
// Cause regions to reopen
admin.disableTable(currentTableName);
while (!admin.isTableDisabled(currentTableName)) {
Thread.sleep(200);
LOG.info("Waiting for table to disable");
}
admin.enableTable(currentTableName);
util.waitTableAvailable(currentTableName);
assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(currentTable));
}
} finally {
for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) {
tableInfoSingle.getRegionLocator().close();
}
for (Entry<String, Table> singleTable : allTables.entrySet()) {
singleTable.getValue().close();
util.deleteTable(singleTable.getValue().getName());
}
testDir.getFileSystem(conf).delete(testDir, true);
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHFileOutputFormat2 method manualTest.
public void manualTest(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
util = new HBaseTestingUtil(conf);
if ("newtable".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
byte[][] splitKeys = generateRandomSplitKeys(4);
Table table = util.createTable(tname, FAMILIES, splitKeys);
} else if ("incremental".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
try (Connection c = ConnectionFactory.createConnection(conf);
Admin admin = c.getAdmin();
RegionLocator regionLocator = c.getRegionLocator(tname)) {
Path outDir = new Path("incremental-out");
runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin.getDescriptor(tname), regionLocator)), outDir, false);
}
} else {
throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental");
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHFileOutputFormat2 method testMRIncrementalLoadWithLocalityMultiCluster.
@Test
public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception {
// Start cluster A
util = new HBaseTestingUtil();
Configuration confA = util.getConfiguration();
int hostCount = 3;
int regionNum = 20;
String[] hostnames = new String[hostCount];
for (int i = 0; i < hostCount; ++i) {
hostnames[i] = "datanode_" + i;
}
StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(hostCount).dataNodeHosts(hostnames).build();
util.startMiniCluster(option);
// Start cluster B
HBaseTestingUtil utilB = new HBaseTestingUtil();
Configuration confB = utilB.getConfiguration();
utilB.startMiniCluster(option);
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
TableName tableName = TableName.valueOf("table");
// Create table in cluster B
try (Table table = utilB.createTable(tableName, FAMILIES, splitKeys);
RegionLocator r = utilB.getConnection().getRegionLocator(tableName)) {
// Generate the bulk load files
// Job has zookeeper configuration for cluster A
// Assume reading from cluster A by TableInputFormat and creating hfiles to cluster B
Job job = new Job(confA, "testLocalMRIncrementalLoad");
Configuration jobConf = job.getConfiguration();
final UUID key = ConfigurationCaptorConnection.configureConnectionImpl(jobConf);
job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
setupRandomGeneratorMapper(job, false);
HFileOutputFormat2.configureIncrementalLoad(job, table, r);
assertEquals(confB.get(HConstants.ZOOKEEPER_QUORUM), jobConf.get(HFileOutputFormat2.REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY));
assertEquals(confB.get(HConstants.ZOOKEEPER_CLIENT_PORT), jobConf.get(HFileOutputFormat2.REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY));
assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), jobConf.get(HFileOutputFormat2.REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY));
String bSpecificConfigKey = "my.override.config.for.b";
String bSpecificConfigValue = "b-specific-value";
jobConf.set(HFileOutputFormat2.REMOTE_CLUSTER_CONF_PREFIX + bSpecificConfigKey, bSpecificConfigValue);
FileOutputFormat.setOutputPath(job, testDir);
assertFalse(util.getTestFileSystem().exists(testDir));
assertTrue(job.waitForCompletion(true));
final List<Configuration> configs = ConfigurationCaptorConnection.getCapturedConfigarutions(key);
assertFalse(configs.isEmpty());
for (Configuration config : configs) {
assertEquals(confB.get(HConstants.ZOOKEEPER_QUORUM), config.get(HConstants.ZOOKEEPER_QUORUM));
assertEquals(confB.get(HConstants.ZOOKEEPER_CLIENT_PORT), config.get(HConstants.ZOOKEEPER_CLIENT_PORT));
assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), config.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
assertEquals(bSpecificConfigValue, config.get(bSpecificConfigKey));
}
} finally {
utilB.deleteTable(tableName);
testDir.getFileSystem(confA).delete(testDir, true);
util.shutdownMiniCluster();
utilB.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestReplicaWithCluster method testReplicaAndReplication.
@SuppressWarnings("deprecation")
@Test
public void testReplicaAndReplication() throws Exception {
TableDescriptorBuilder builder = HTU.createModifyableTableDescriptor("testReplicaAndReplication");
builder.setRegionReplication(NB_SERVERS);
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(row).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build());
builder.setCoprocessor(SlowMeCopro.class.getName());
TableDescriptor tableDescriptor = builder.build();
HTU.getAdmin().createTable(tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
MiniZooKeeperCluster miniZK = HTU.getZkCluster();
HTU2 = new HBaseTestingUtil(conf2);
HTU2.setZkCluster(miniZK);
HTU2.startMiniCluster(NB_SERVERS);
LOG.info("Setup second Zk");
HTU2.getAdmin().createTable(tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Admin admin = connection.getAdmin()) {
ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder().setClusterKey(HTU2.getClusterKey()).build();
admin.addReplicationPeer("2", rpc);
}
Put p = new Put(row);
p.addColumn(row, row, row);
final Table table = HTU.getConnection().getTable(tableDescriptor.getTableName());
table.put(p);
HTU.getAdmin().flush(table.getName());
LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table.get(g);
Assert.assertTrue(r.isStale());
return !r.isEmpty();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
}
});
table.close();
LOG.info("stale get on the first cluster done. Now for the second.");
final Table table2 = HTU.getConnection().getTable(tableDescriptor.getTableName());
Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table2.get(g);
Assert.assertTrue(r.isStale());
return !r.isEmpty();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
}
});
table2.close();
HTU.getAdmin().disableTable(tableDescriptor.getTableName());
HTU.deleteTable(tableDescriptor.getTableName());
HTU2.getAdmin().disableTable(tableDescriptor.getTableName());
HTU2.deleteTable(tableDescriptor.getTableName());
// We shutdown HTU2 minicluster later, in afterClass(), as shutting down
// the minicluster has negative impact of deleting all HConnections in JVM.
}
Aggregations