use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestClassLoading method testHBase3810.
@Test
public // less strict
void testHBase3810() throws Exception {
// allowed value pattern: [path] | class name | [priority] | [key values]
File jarFile1 = buildCoprocessorJar(cpName1);
File jarFile2 = buildCoprocessorJar(cpName2);
File jarFile5 = buildCoprocessorJar(cpName5);
File jarFile6 = buildCoprocessorJar(cpName6);
String cpKey1 = "COPROCESSOR$1";
String cpKey2 = " Coprocessor$2 ";
String cpKey3 = " coprocessor$03 ";
String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER;
String cpValue2 = getLocalPath(jarFile2) + " | " + cpName2 + " | ";
// load from default class loader
String cpValue3 = " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v ";
// create a table that references the jar
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName);
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
// add 3 coprocessors by setting htd attributes directly.
tdb.setValue(cpKey1, cpValue1);
tdb.setValue(cpKey2, cpValue2);
tdb.setValue(cpKey3, cpValue3);
// add 2 coprocessor by using new htd.setCoprocessor() api
CoprocessorDescriptor coprocessorDescriptor = CoprocessorDescriptorBuilder.newBuilder(cpName5).setJarPath(new Path(getLocalPath(jarFile5)).toString()).setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build();
tdb.setCoprocessor(coprocessorDescriptor);
Map<String, String> kvs = new HashMap<>();
kvs.put("k1", "v1");
kvs.put("k2", "v2");
kvs.put("k3", "v3");
coprocessorDescriptor = CoprocessorDescriptorBuilder.newBuilder(cpName6).setJarPath(new Path(getLocalPath(jarFile6)).toString()).setPriority(Coprocessor.PRIORITY_USER).setProperties(kvs).build();
tdb.setCoprocessor(coprocessorDescriptor);
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
TableDescriptor tableDescriptor = tdb.build();
admin.createTable(tableDescriptor);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessor was loaded
boolean found_2 = false, found_1 = false, found_3 = false, found_5 = false, found_6 = false;
boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, found6_k4 = false;
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
found_1 = found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null);
found_2 = found_2 || (region.getCoprocessorHost().findCoprocessor(cpName2) != null);
found_3 = found_3 || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null);
found_5 = found_5 || (region.getCoprocessorHost().findCoprocessor(cpName5) != null);
CoprocessorEnvironment env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName6);
if (env != null) {
found_6 = true;
Configuration conf = env.getConfiguration();
found6_k1 = conf.get("k1") != null;
found6_k2 = conf.get("k2") != null;
found6_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found_1);
assertTrue("Class " + cpName2 + " was missing on a region", found_2);
assertTrue("Class SimpleRegionObserver was missing on a region", found_3);
assertTrue("Class " + cpName5 + " was missing on a region", found_5);
assertTrue("Class " + cpName6 + " was missing on a region", found_6);
assertTrue("Configuration key 'k1' was missing on a region", found6_k1);
assertTrue("Configuration key 'k2' was missing on a region", found6_k2);
assertTrue("Configuration key 'k3' was missing on a region", found6_k3);
assertFalse("Configuration key 'k4' wasn't configured", found6_k4);
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestSeparateClientZKCluster method testMetaRegionMove.
@Test
public void testMetaRegionMove() throws Exception {
TableName tn = name.getTableName();
// create table
Connection conn = TEST_UTIL.getConnection();
try (Admin admin = conn.getAdmin();
Table table = conn.getTable(tn);
RegionLocator locator = conn.getRegionLocator(tn)) {
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family);
TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
admin.createTable(tableDescBuilder.build());
// issue some requests to cache the region location
Put put = new Put(row);
put.addColumn(family, qualifier, value);
table.put(put);
Get get = new Get(row);
Result result = table.get(get);
// move meta region and confirm client could detect
ServerName destServerName = null;
for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
ServerName name = rst.getRegionServer().getServerName();
if (!name.equals(cluster.getServerHoldingMeta())) {
destServerName = name;
break;
}
}
admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), destServerName);
LOG.debug("Finished moving meta");
// invalidate client cache
RegionInfo region = locator.getRegionLocation(row).getRegion();
ServerName currentServer = cluster.getServerHoldingRegion(tn, region.getRegionName());
for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
ServerName name = rst.getRegionServer().getServerName();
if (!name.equals(currentServer)) {
destServerName = name;
break;
}
}
admin.move(region.getEncodedNameAsBytes(), destServerName);
LOG.debug("Finished moving user region");
put = new Put(row);
put.addColumn(family, qualifier, newVal);
table.put(put);
result = table.get(get);
LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier)));
assertArrayEquals(newVal, result.getValue(family, qualifier));
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestBlockReorderMultiBlocks method testHBaseCluster.
/**
* Test that the hook works within HBase, including when there are multiple blocks.
*/
@Test()
public void testHBaseCluster() throws Exception {
byte[] sb = Bytes.toBytes("sb");
htu.startMiniZKCluster();
SingleProcessHBaseCluster hbm = htu.startMiniHBaseCluster();
hbm.waitForActiveAndReadyMaster();
HRegionServer targetRs = hbm.getRegionServer(0);
// We want to have a datanode with the same name as the region server, so
// we're going to get the regionservername, and start a new datanode with this name.
String host4 = targetRs.getServerName().getHostname();
LOG.info("Starting a new datanode with the name=" + host4);
cluster.startDataNodes(conf, 1, true, null, new String[] { "/r4" }, new String[] { host4 }, null);
cluster.waitClusterUp();
final int repCount = 3;
// We use the regionserver file system & conf as we expect it to have the hook.
conf = targetRs.getConfiguration();
HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
Table h = htu.createTable(TableName.valueOf(name.getMethodName()), sb);
// Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
// with the same node will be used. We can't really stop an existing datanode, this would
// make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
// Now we need to find the log file, its locations, and look at it
String rootDir = new Path(CommonFSUtils.getWALRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath();
DistributedFileSystem mdfs = (DistributedFileSystem) hbm.getMaster().getMasterFileSystem().getFileSystem();
int nbTest = 0;
while (nbTest < 10) {
final List<HRegion> regions = targetRs.getRegions(h.getName());
final CountDownLatch latch = new CountDownLatch(regions.size());
// listen for successful log rolls
final WALActionsListener listener = new WALActionsListener() {
@Override
public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
latch.countDown();
}
};
for (HRegion region : regions) {
region.getWAL().registerWALActionsListener(listener);
}
htu.getAdmin().rollWALWriter(targetRs.getServerName());
// wait
try {
latch.await();
} catch (InterruptedException exception) {
LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " + "tests fail, it's probably because we should still be waiting.");
Thread.currentThread().interrupt();
}
for (Region region : regions) {
((HRegion) region).getWAL().unregisterWALActionsListener(listener);
}
// We need a sleep as the namenode is informed asynchronously
Thread.sleep(100);
// insert one put to ensure a minimal size
Put p = new Put(sb);
p.addColumn(sb, sb, sb);
h.put(p);
DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
HdfsFileStatus[] hfs = dl.getPartialListing();
// As we wrote a put, we should have at least one log file.
Assert.assertTrue(hfs.length >= 1);
for (HdfsFileStatus hf : hfs) {
// Because this is a live cluster, log files might get archived while we're processing
try {
LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
String logFile = rootDir + "/" + hf.getLocalName();
FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
LOG.info("Checking log file: " + logFile);
// Now checking that the hook is up and running
// We can't call directly getBlockLocations, it's not available in HFileSystem
// We're trying multiple times to be sure, as the order is random
BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
if (bls.length > 0) {
BlockLocation bl = bls[0];
LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
for (int i = 0; i < bl.getHosts().length - 1; i++) {
LOG.info(bl.getHosts()[i] + " " + logFile);
Assert.assertNotSame(bl.getHosts()[i], host4);
}
String last = bl.getHosts()[bl.getHosts().length - 1];
LOG.info(last + " " + logFile);
if (host4.equals(last)) {
nbTest++;
LOG.info(logFile + " is on the new datanode and is ok");
if (bl.getHosts().length == 3) {
// We can test this case from the file system as well
// Checking the underlying file system. Multiple times as the order is random
testFromDFS(dfs, logFile, repCount, host4);
// now from the master
testFromDFS(mdfs, logFile, repCount, host4);
}
}
}
} catch (FileNotFoundException exception) {
LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
} catch (RemoteException exception) {
if (exception.unwrapRemoteException() instanceof FileNotFoundException) {
LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
} else {
throw exception;
}
}
}
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestIncrementalBackup method TestIncBackupRestore.
// implement all test cases in 1 test since incremental
// backup/restore has dependencies
@Test
public void TestIncBackupRestore() throws Exception {
int ADD_ROWS = 99;
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
final byte[] fam3Name = Bytes.toBytes("f3");
final byte[] mobName = Bytes.toBytes("mob");
TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true).setMobThreshold(5L).build()).build();
TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
try (Connection conn = ConnectionFactory.createConnection(conf1)) {
int NB_ROWS_FAM3 = 6;
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
Admin admin = conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table
Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
LOG.debug("written " + ADD_ROWS + " rows to " + table1);
// additionally, insert rows to MOB cf
int NB_ROWS_MOB = 111;
insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
t1.close();
Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
Table t2 = conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
t2.close();
LOG.debug("written " + 5 + " rows to " + table2);
// split table1
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
List<HRegion> regions = cluster.getRegions(table1);
byte[] name = regions.get(0).getRegionInfo().getRegionName();
long startSplitTime = EnvironmentEdgeManager.currentTime();
try {
admin.splitRegionAsync(name).get();
} catch (Exception e) {
// although split fail, this may not affect following check in current API,
// exception will be thrown.
LOG.debug("region is not splittable, because " + e);
}
while (!admin.isTableAvailable(table1)) {
Thread.sleep(100);
}
long endSplitTime = EnvironmentEdgeManager.currentTime();
// split finished
LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
// #3 - incremental backup for multiple tables
tables = Lists.newArrayList(table1, table2);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// add column family f2 to table1
// drop column family f3
final byte[] fam2Name = Bytes.toBytes("f2");
newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name).build();
TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
int NB_ROWS_FAM2 = 7;
Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
t3.close();
// Wait for 5 sec to make sure that old WALs were deleted
Thread.sleep(5000);
// #4 - additional incremental backup for multiple tables
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple2));
// #5 - restore full backup for all tables
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
LOG.debug("Restoring full " + backupIdFull);
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, true));
// #6.1 - check tables for full restore
Admin hAdmin = TEST_UTIL.getAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
assertTrue(hAdmin.tableExists(table2_restore));
hAdmin.close();
// #6.2 - checking row count of tables for full restore
Table hTable = conn.getTable(table1_restore);
Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
hTable.close();
hTable = conn.getTable(table2_restore);
Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
hTable.close();
// #7 - restore incremental backup for multiple tables, with overwrite
TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
hTable = conn.getTable(table1_restore);
LOG.debug("After incremental restore: " + hTable.getDescriptor());
int countFamName = TEST_UTIL.countRows(hTable, famName);
LOG.debug("f1 has " + countFamName + " rows");
Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
LOG.debug("f2 has " + countFam2Name + " rows");
Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
int countMobName = TEST_UTIL.countRows(hTable, mobName);
LOG.debug("mob has " + countMobName + " rows");
Assert.assertEquals(countMobName, NB_ROWS_MOB);
hTable.close();
hTable = conn.getTable(table2_restore);
Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
hTable.close();
admin.close();
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestRSGroupMajorCompactionTTL method setUp.
@Before
@Override
public void setUp() throws Exception {
utility = new HBaseTestingUtil();
Configuration conf = utility.getConfiguration();
RSGroupUtil.enableRSGroup(conf);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, NUM_SLAVES_BASE);
conf.setInt("hbase.hfile.compaction.discharger.interval", 10);
utility.startMiniCluster(NUM_SLAVES_BASE);
SingleProcessHBaseCluster cluster = utility.getHBaseCluster();
final HMaster master = cluster.getMaster();
// wait for balancer to come online
utility.waitFor(60000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() {
return master.isInitialized() && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline();
}
});
admin = utility.getAdmin();
}
Aggregations