Search in sources :

Example 46 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestReplicationChangingPeerRegionservers method testChangingNumberOfPeerRegionServers.

@Test
public void testChangingNumberOfPeerRegionServers() throws IOException, InterruptedException {
    LOG.info("testSimplePutDelete");
    SingleProcessHBaseCluster peerCluster = UTIL2.getMiniHBaseCluster();
    // This test wants two RS's up. We only run one generally so add one.
    peerCluster.startRegionServer();
    Waiter.waitFor(peerCluster.getConfiguration(), 30000, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return peerCluster.getLiveRegionServerThreads().size() > 1;
        }
    });
    int numRS = peerCluster.getRegionServerThreads().size();
    doPutTest(Bytes.toBytes(1));
    int rsToStop = peerCluster.getServerWithMeta() == 0 ? 1 : 0;
    peerCluster.stopRegionServer(rsToStop);
    peerCluster.waitOnRegionServer(rsToStop);
    // Sanity check
    assertEquals(numRS - 1, peerCluster.getRegionServerThreads().size());
    doPutTest(Bytes.toBytes(2));
    peerCluster.startRegionServer();
    // Sanity check
    assertEquals(numRS, peerCluster.getRegionServerThreads().size());
    doPutTest(Bytes.toBytes(3));
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Waiter(org.apache.hadoop.hbase.Waiter) IOException(java.io.IOException) Test(org.junit.Test)

Example 47 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterObserver method testSnapshotOperations.

@Test
public void testSnapshotOperations() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
    cp.resetStates();
    // create a table
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build();
    Admin admin = UTIL.getAdmin();
    tableCreationLatch = new CountDownLatch(1);
    admin.createTable(tableDescriptor);
    tableCreationLatch.await();
    tableCreationLatch = new CountDownLatch(1);
    admin.disableTable(tableName);
    assertTrue(admin.isTableDisabled(tableName));
    try {
        // Test snapshot operation
        assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled());
        admin.snapshot(TEST_SNAPSHOT, tableName);
        assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled());
        // Test list operation
        admin.listSnapshots();
        assertTrue("Coprocessor should have been called on snapshot list", cp.wasListSnapshotCalled());
        // Test clone operation
        admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
        assertTrue("Coprocessor should have been called on snapshot clone", cp.wasCloneSnapshotCalled());
        assertFalse("Coprocessor restore should not have been called on snapshot clone", cp.wasRestoreSnapshotCalled());
        admin.disableTable(TEST_CLONE);
        assertTrue(admin.isTableDisabled(tableName));
        deleteTable(admin, TEST_CLONE);
        // Test restore operation
        cp.resetStates();
        admin.restoreSnapshot(TEST_SNAPSHOT);
        assertTrue("Coprocessor should have been called on snapshot restore", cp.wasRestoreSnapshotCalled());
        assertFalse("Coprocessor clone should not have been called on snapshot restore", cp.wasCloneSnapshotCalled());
        admin.deleteSnapshot(TEST_SNAPSHOT);
        assertTrue("Coprocessor should have been called on snapshot delete", cp.wasDeleteSnapshotCalled());
    } finally {
        deleteTable(admin, tableName);
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) TableName(org.apache.hadoop.hbase.TableName) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HMaster(org.apache.hadoop.hbase.master.HMaster) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 48 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterCoprocessorExceptionWithAbort method testExceptionFromCoprocessorWhenCreatingTable.

@Test
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // set a watch on the zookeeper /hbase/master node. If the master dies,
    // the node will be deleted.
    ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK error: " + why, e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK master tracker error, why=", e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    masterTracker.start();
    zkw.registerListener(masterTracker);
    // Test (part of the) output that should have be printed by master when it aborts:
    // (namely the part that shows the set of loaded coprocessors).
    // In this test, there is only a single coprocessor (BuggyMasterObserver).
    assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
    CreateTableThread createTableThread = new CreateTableThread(UTIL);
    // Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver.
    // Master will then abort and the /hbase/master zk node will be deleted.
    createTableThread.start();
    // Wait up to 30 seconds for master's /hbase/master zk node to go away after master aborts.
    for (int i = 0; i < 30; i++) {
        if (masterTracker.masterZKNodeWasDeleted == true) {
            break;
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            fail("InterruptedException while waiting for master zk node to " + "be deleted.");
        }
    }
    assertTrue("Master aborted on coprocessor exception, as expected.", masterTracker.masterZKNodeWasDeleted);
    createTableThread.interrupt();
    try {
        createTableThread.join(1000);
    } catch (InterruptedException e) {
        assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", true);
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) Abortable(org.apache.hadoop.hbase.Abortable) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 49 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestClassLoading method testPrivateClassLoader.

@Test
public // HBASE-6308: Test CP classloader is the CoprocessorClassLoader
void testPrivateClassLoader() throws Exception {
    File jarFile = buildCoprocessorJar(cpName4);
    // create a table that references the jar
    TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName4));
    tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
    tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER);
    TableDescriptor tableDescriptor = tdb.build();
    Admin admin = TEST_UTIL.getAdmin();
    admin.createTable(tableDescriptor);
    waitForTable(tableDescriptor.getTableName());
    // verify that the coprocessor was loaded correctly
    boolean found = false;
    SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
    for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
        if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) {
            Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4);
            if (cp != null) {
                found = true;
                assertEquals("Class " + cpName4 + " was not loaded by CoprocessorClassLoader", cp.getClass().getClassLoader().getClass(), CoprocessorClassLoader.class);
            }
        }
    }
    assertTrue("Class " + cpName4 + " was missing on a region", found);
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) Admin(org.apache.hadoop.hbase.client.Admin) File(java.io.File) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 50 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestClassLoading method testClassLoadingFromHDFS.

@Test
public // HBASE-3516: Test CP Class loading from HDFS
void testClassLoadingFromHDFS() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    File jarFile1 = buildCoprocessorJar(cpName1);
    File jarFile2 = buildCoprocessorJar(cpName2);
    // copy the jars into dfs
    fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR));
    String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName();
    Path pathOnHDFS1 = new Path(jarFileOnHDFS1);
    assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1));
    LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1);
    fs.copyFromLocalFile(new Path(jarFile2.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR));
    String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName();
    Path pathOnHDFS2 = new Path(jarFileOnHDFS2);
    assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS2));
    LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
    // create a table that references the coprocessors
    TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName);
    tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
    // without configuration values
    tdb.setValue("COPROCESSOR$1", jarFileOnHDFS1 + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER);
    // with configuration values
    tdb.setValue("COPROCESSOR$2", jarFileOnHDFS2 + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
    Admin admin = TEST_UTIL.getAdmin();
    if (admin.tableExists(tableName)) {
        if (admin.isTableEnabled(tableName)) {
            admin.disableTable(tableName);
        }
        admin.deleteTable(tableName);
    }
    CoprocessorClassLoader.clearCache();
    byte[] startKey = { 10, 63 };
    byte[] endKey = { 12, 43 };
    TableDescriptor tableDescriptor = tdb.build();
    admin.createTable(tableDescriptor, startKey, endKey, 4);
    waitForTable(tableDescriptor.getTableName());
    // verify that the coprocessors were loaded
    boolean foundTableRegion = false;
    boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true;
    Map<Region, Set<ClassLoader>> regionsActiveClassLoaders = new HashMap<>();
    SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
    for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
        if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
            foundTableRegion = true;
            CoprocessorEnvironment env;
            env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
            found1 = found1 && (env != null);
            env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
            found2 = found2 && (env != null);
            if (env != null) {
                Configuration conf = env.getConfiguration();
                found2_k1 = found2_k1 && (conf.get("k1") != null);
                found2_k2 = found2_k2 && (conf.get("k2") != null);
                found2_k3 = found2_k3 && (conf.get("k3") != null);
            } else {
                found2_k1 = false;
                found2_k2 = false;
                found2_k3 = false;
            }
            regionsActiveClassLoaders.put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders());
        }
    }
    assertTrue("No region was found for table " + tableName, foundTableRegion);
    assertTrue("Class " + cpName1 + " was missing on a region", found1);
    assertTrue("Class " + cpName2 + " was missing on a region", found2);
    assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
    assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
    assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
    // check if CP classloaders are cached
    assertNotNull(jarFileOnHDFS1 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS1));
    assertNotNull(jarFileOnHDFS2 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS2));
    // two external jar used, should be one classloader per jar
    assertEquals("The number of cached classloaders should be equal to the number" + " of external jar files", 2, CoprocessorClassLoader.getAllCached().size());
    // check if region active classloaders are shared across all RS regions
    Set<ClassLoader> externalClassLoaders = new HashSet<>(CoprocessorClassLoader.getAllCached());
    for (Map.Entry<Region, Set<ClassLoader>> regionCP : regionsActiveClassLoaders.entrySet()) {
        assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + regionCP.getValue(), externalClassLoaders.containsAll(regionCP.getValue()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) CoprocessorEnvironment(org.apache.hadoop.hbase.CoprocessorEnvironment) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) FileSystem(org.apache.hadoop.fs.FileSystem) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) CoprocessorClassLoader(org.apache.hadoop.hbase.util.CoprocessorClassLoader) File(java.io.File) HashMap(java.util.HashMap) Map(java.util.Map) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)85 Test (org.junit.Test)69 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)31 TableName (org.apache.hadoop.hbase.TableName)26 Admin (org.apache.hadoop.hbase.client.Admin)24 Table (org.apache.hadoop.hbase.client.Table)22 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)22 HMaster (org.apache.hadoop.hbase.master.HMaster)21 ServerName (org.apache.hadoop.hbase.ServerName)18 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)18 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)14 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)13 IOException (java.io.IOException)12 Configuration (org.apache.hadoop.conf.Configuration)12 Put (org.apache.hadoop.hbase.client.Put)12 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)12 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)10 File (java.io.File)9 Path (org.apache.hadoop.fs.Path)9 RegionMoverBuilder (org.apache.hadoop.hbase.util.RegionMover.RegionMoverBuilder)9