Search in sources :

Example 21 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestMasterReplication method loadAndValidateHFileReplication.

private void loadAndValidateHFileReplication(String testName, int masterNumber, int[] slaveNumbers, byte[] row, byte[] fam, Table[] tables, byte[][][] hfileRanges, int numOfRows, int[] expectedCounts, boolean toValidate) throws Exception {
    HBaseTestingUtility util = utilities[masterNumber];
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(fam));
    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
        byte[] from = range[0];
        byte[] to = range[1];
        HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
    }
    Table source = tables[masterNumber];
    final TableName tableName = source.getName();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
    String[] args = { dir.toString(), tableName.toString() };
    loader.run(args);
    if (toValidate) {
        for (int slaveClusterNumber : slaveNumbers) {
            wait(slaveClusterNumber, tables[slaveClusterNumber], expectedCounts[slaveClusterNumber]);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) LoadIncrementalHFiles(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem)

Example 22 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestFSUtils method testVersion.

@Test
public void testVersion() throws DeserializationException, IOException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final FileSystem fs = htu.getTestFileSystem();
    final Path rootdir = htu.getDataTestDir();
    assertNull(FSUtils.getVersion(fs, rootdir));
    // Write out old format version file.  See if we can read it in and convert.
    Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
    FSDataOutputStream s = fs.create(versionFile);
    final String version = HConstants.FILE_SYSTEM_VERSION;
    s.writeUTF(version);
    s.close();
    assertTrue(fs.exists(versionFile));
    FileStatus[] status = fs.listStatus(versionFile);
    assertNotNull(status);
    assertTrue(status.length > 0);
    String newVersion = FSUtils.getVersion(fs, rootdir);
    assertEquals(version.length(), newVersion.length());
    assertEquals(version, newVersion);
    // File will have been converted. Exercise the pb format
    assertEquals(version, FSUtils.getVersion(fs, rootdir));
    FSUtils.checkVersion(fs, rootdir, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 23 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestFSUtils method verifyFileInDirWithStoragePolicy.

private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    Configuration conf = htu.getConfiguration();
    conf.set(HConstants.WAL_STORAGE_POLICY, policy);
    MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
    try {
        assertTrue(FSUtils.isHDFS(conf));
        FileSystem fs = FileSystem.get(conf);
        Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
        fs.mkdirs(testDir);
        FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
        String file = UUID.randomUUID().toString();
        Path p = new Path(testDir, file);
        WriteDataToHDFS(fs, p, 4096);
        // will assert existance before deleting.
        cleanupFile(fs, testDir);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 24 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestFSUtils method testRemoveWALRootPath.

@Test
public void testRemoveWALRootPath() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    Configuration conf = htu.getConfiguration();
    FSUtils.setRootDir(conf, new Path("file:///user/hbase"));
    Path testFile = new Path(FSUtils.getRootDir(conf), "test/testfile");
    Path tmpFile = new Path("file:///test/testfile");
    assertEquals(FSUtils.removeWALRootPath(testFile, conf), "test/testfile");
    assertEquals(FSUtils.removeWALRootPath(tmpFile, conf), tmpFile.toString());
    FSUtils.setWALRootDir(conf, new Path("file:///user/hbaseLogDir"));
    assertEquals(FSUtils.removeWALRootPath(testFile, conf), testFile.toString());
    Path logFile = new Path(FSUtils.getWALRootDir(conf), "test/testlog");
    assertEquals(FSUtils.removeWALRootPath(logFile, conf), "test/testlog");
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Test(org.junit.Test)

Example 25 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestCatalogJanitor method parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst.

/**
   * Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
   *
   * @param rootDir the test case name, used as the HBase testing utility root
   * @param lastEndKey the end key of the split parent
   * @throws IOException
   * @throws InterruptedException
   */
private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final String rootDir, final byte[] lastEndKey) throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, rootDir);
    MasterServices services = new MockMasterServices(htu);
    CatalogJanitor janitor = new CatalogJanitor(services);
    final HTableDescriptor htd = createHTableDescriptor();
    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
    // Parent
    HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), lastEndKey);
    // Sleep a second else the encoded name on these regions comes out
    // same for all with same start key and made in same second.
    Thread.sleep(1001);
    // Daughter a
    HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
    Thread.sleep(1001);
    // Make daughters of daughter a; splitaa and splitab.
    HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
    HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
    // Daughter b
    HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), lastEndKey);
    Thread.sleep(1001);
    // Make Daughters of daughterb; splitba and splitbb.
    HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
    HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"), lastEndKey);
    // First test that our Comparator works right up in CatalogJanitor.
    // Just fo kicks.
    SortedMap<HRegionInfo, Result> regions = new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator());
    // Now make sure that this regions map sorts as we expect it to.
    regions.put(parent, createResult(parent, splita, splitb));
    regions.put(splitb, createResult(splitb, splitba, splitbb));
    regions.put(splita, createResult(splita, splitaa, splitab));
    // Assert its properly sorted.
    int index = 0;
    for (Map.Entry<HRegionInfo, Result> e : regions.entrySet()) {
        if (index == 0) {
            assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
        } else if (index == 1) {
            assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
        } else if (index == 2) {
            assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
        }
        index++;
    }
    // Now play around with the cleanParent function.  Create a ref from splita
    // up to the parent.
    Path splitaRef = createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
    // Make sure actual super parent sticks around because splita has a ref.
    assertFalse(janitor.cleanParent(parent, regions.get(parent)));
    //splitba, and split bb, do not have dirs in fs.  That means that if
    // we test splitb, it should get cleaned up.
    assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
    // Now remove ref from splita to parent... so parent can be let go and so
    // the daughter splita can be split (can't split if still references).
    // BUT make the timing such that the daughter gets cleaned up before we
    // can get a chance to let go of the parent.
    FileSystem fs = FileSystem.get(htu.getConfiguration());
    assertTrue(fs.delete(splitaRef, true));
    // Create the refs from daughters of splita.
    Path splitaaRef = createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
    Path splitabRef = createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
    // Test splita.  It should stick around because references from splitab, etc.
    assertFalse(janitor.cleanParent(splita, regions.get(splita)));
    // Now clean up parent daughter first.  Remove references from its daughters.
    assertTrue(fs.delete(splitaaRef, true));
    assertTrue(fs.delete(splitabRef, true));
    assertTrue(janitor.cleanParent(splita, regions.get(splita)));
    // Super parent should get cleaned up now both splita and splitb are gone.
    assertTrue(janitor.cleanParent(parent, regions.get(parent)));
    services.stop("test finished");
    janitor.cancel(true);
}
Also used : Path(org.apache.hadoop.fs.Path) SplitParentFirstComparator(org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) Map(java.util.Map) SortedMap(java.util.SortedMap) TreeMap(java.util.TreeMap)

Aggregations

HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)136 Configuration (org.apache.hadoop.conf.Configuration)50 BeforeClass (org.junit.BeforeClass)49 Test (org.junit.Test)42 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)35 Path (org.apache.hadoop.fs.Path)29 Admin (org.apache.hadoop.hbase.client.Admin)24 FileSystem (org.apache.hadoop.fs.FileSystem)22 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 Before (org.junit.Before)14 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)11 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)11 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)10 Table (org.apache.hadoop.hbase.client.Table)8 HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Result (org.apache.hadoop.hbase.client.Result)7