use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestCatalogJanitor method testCleanParent.
@Test
public void testCleanParent() throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
try {
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
// Add a reference under splitA directory so we don't clear out the parent.
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
Path storedir = HStore.getStoreHomedir(tabledir, splita, htd.getColumnFamilies()[0].getName());
Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Path path = ref.write(fs, p);
assertTrue(fs.exists(path));
assertFalse(janitor.cleanParent(parent, r));
// Remove the reference file and try again.
assertTrue(fs.delete(p, true));
assertTrue(janitor.cleanParent(parent, r));
} finally {
services.stop("shutdown");
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestCatalogJanitor method testDuplicateHFileResolution.
/**
* Test that if a store file with the same name is present as those already backed up cause the
* already archived files to be timestamped backup
*/
@Test
public void testDuplicateHFileResolution() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
// create the janitor
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
// archive directory. Otherwise, it just seems to pick the first root directory it can find (so
// the single test passes, but when the full suite is run, things get borked).
FSUtils.setRootDir(fs.getConf(), rootdir);
Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
System.out.println("Old root:" + rootdir);
System.out.println("Old table:" + tabledir);
System.out.println("Old store:" + storedir);
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName());
System.out.println("Old archive:" + storeArchive);
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// now add store files with the same names as before to check backup
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
// cleanup
services.stop("Test finished");
janitor.cancel(true);
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestDistributedLogSplitting method setup.
@BeforeClass
public static void setup() throws Exception {
TEST_UTIL = new HBaseTestingUtility(HBaseConfiguration.create());
dfsCluster = TEST_UTIL.startMiniDFSCluster(1);
zkCluster = TEST_UTIL.startMiniZKCluster();
originalConf = TEST_UTIL.getConfiguration();
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestAssignmentManagerMetrics method startCluster.
@BeforeClass
public static void startCluster() throws Exception {
LOG.info("Starting cluster");
TEST_UTIL = new HBaseTestingUtility();
conf = TEST_UTIL.getConfiguration();
// Disable sanity check for coprocessor
conf.setBoolean("hbase.table.sanity.checks", false);
// set RIT stuck warning threshold to a small value
conf.setInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 20);
// set msgInterval to 1 second
conf.setInt("hbase.regionserver.msginterval", msgInterval);
// set tablesOnMaster to none
conf.set("hbase.balancer.tablesOnMaster", "none");
// set client sync wait timeout to 5sec
conf.setInt("hbase.client.sync.wait.timeout.msec", 2500);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 2500);
TEST_UTIL.startMiniCluster(1);
cluster = TEST_UTIL.getHBaseCluster();
master = cluster.getMaster();
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestCanaryTool method setUp.
@Before
public void setUp() throws Exception {
testingUtility = new HBaseTestingUtility();
testingUtility.startMiniCluster();
LogManager.getRootLogger().addAppender(mockAppender);
}
Aggregations