use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestBackupBase method setUpHelper.
public static void setUpHelper() throws Exception {
BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
if (secure) {
// set the always on security provider
UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(), HadoopSecurityEnabledUserProviderForTesting.class);
// setup configuration
SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
}
conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
BackupManager.decorateMasterConfiguration(conf1);
BackupManager.decorateRegionServerConfiguration(conf1);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// Set TTL for old WALs to 1 sec to enforce fast cleaning of an archived
// WAL files
conf1.setLong(TimeToLiveLogCleaner.TTL_CONF_KEY, 1000);
conf1.setLong(LogCleaner.OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, 1000);
// Set MultiWAL (with 2 default WAL files per RS)
conf1.set(WALFactory.WAL_PROVIDER, provider);
TEST_UTIL.startMiniCluster();
if (useSecondCluster) {
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
TEST_UTIL2 = new HBaseTestingUtil(conf2);
TEST_UTIL2.setZkCluster(TEST_UTIL.getZkCluster());
TEST_UTIL2.startMiniDFSCluster(3);
String root2 = TEST_UTIL2.getConfiguration().get("fs.defaultFS");
Path p = new Path(new Path(root2), "/tmp/wal");
CommonFSUtils.setWALRootDir(TEST_UTIL2.getConfiguration(), p);
TEST_UTIL2.startMiniCluster();
}
conf1 = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniMapReduceCluster();
BACKUP_ROOT_DIR = new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR).toString();
LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
if (useSecondCluster) {
BACKUP_REMOTE_ROOT_DIR = new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR).toString();
LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
}
createTables();
populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestBackupBase method setUp.
/**
* Setup Cluster with appropriate configurations before running tests.
*
* @throws Exception if starting the mini cluster or setting up the tables fails
*/
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL = new HBaseTestingUtil();
conf1 = TEST_UTIL.getConfiguration();
autoRestoreOnFailure = true;
useSecondCluster = false;
setUpHelper();
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestTableMapReduceUtil method testInitCredentialsForCluster4.
@Test
@SuppressWarnings("unchecked")
public void testInitCredentialsForCluster4() throws Exception {
HBaseTestingUtil util1 = new HBaseTestingUtil();
// Assume util1 is insecure cluster
// Do not start util1 because cannot boot secured mini cluster and insecure mini cluster at once
HBaseTestingUtil util2 = new HBaseTestingUtil();
File keytab = new File(util2.getDataTestDir("keytab").toUri().getPath());
MiniKdc kdc = util2.setupMiniKdc(keytab);
try {
String username = UserGroupInformation.getLoginUser().getShortUserName();
String userPrincipal = username + "/localhost";
kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL);
loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath());
try (Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) {
Configuration conf1 = util1.getConfiguration();
Job job = Job.getInstance(conf1);
TableMapReduceUtil.initCredentialsForCluster(job, util2.getConfiguration());
Credentials credentials = job.getCredentials();
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
assertEquals(1, tokens.size());
String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher());
Token<AuthenticationTokenIdentifier> tokenForCluster = (Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId));
assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername());
}
} finally {
kdc.stop();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestTableMapReduceUtil method testInitCredentialsForCluster3.
@Test
public void testInitCredentialsForCluster3() throws Exception {
HBaseTestingUtil util1 = new HBaseTestingUtil();
File keytab = new File(util1.getDataTestDir("keytab").toUri().getPath());
MiniKdc kdc = util1.setupMiniKdc(keytab);
try {
String username = UserGroupInformation.getLoginUser().getShortUserName();
String userPrincipal = username + "/localhost";
kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL);
loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath());
try (Closeable util1Closeable = startSecureMiniCluster(util1, kdc, userPrincipal)) {
HBaseTestingUtil util2 = new HBaseTestingUtil();
// Assume util2 is insecure cluster
// Do not start util2 because cannot boot secured mini cluster and insecure mini cluster at
// once
Configuration conf1 = util1.getConfiguration();
Job job = Job.getInstance(conf1);
TableMapReduceUtil.initCredentialsForCluster(job, util2.getConfiguration());
Credentials credentials = job.getCredentials();
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
assertTrue(tokens.isEmpty());
}
} finally {
kdc.stop();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestTableMapReduceUtil method testInitCredentialsForCluster1.
@Test
public void testInitCredentialsForCluster1() throws Exception {
HBaseTestingUtil util1 = new HBaseTestingUtil();
HBaseTestingUtil util2 = new HBaseTestingUtil();
util1.startMiniCluster();
try {
util2.startMiniCluster();
try {
Configuration conf1 = util1.getConfiguration();
Job job = Job.getInstance(conf1);
TableMapReduceUtil.initCredentialsForCluster(job, util2.getConfiguration());
Credentials credentials = job.getCredentials();
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
assertTrue(tokens.isEmpty());
} finally {
util2.shutdownMiniCluster();
}
} finally {
util1.shutdownMiniCluster();
}
}
Aggregations