Search in sources :

Example 31 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestFileLink method testHDFSLinkReadDuringRename.

/**
 * Test, on HDFS, that the FileLink is still readable
 * even when the current file gets renamed.
 */
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
    HBaseTestingUtil testUtil = new HBaseTestingUtil();
    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());
    try {
        testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
    } finally {
        testUtil.shutdownMiniCluster();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Test(org.junit.Test)

Example 32 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestFileLink method testHDFSLinkReadDuringDelete.

/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
    HBaseTestingUtil testUtil = new HBaseTestingUtil();
    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());
    try {
        List<Path> files = new ArrayList<>();
        for (int i = 0; i < 3; i++) {
            Path path = new Path(String.format("test-data-%d", i));
            writeSomeData(fs, path, 1 << 20, (byte) i);
            files.add(path);
        }
        FileLink link = new FileLink(files);
        FSDataInputStream in = link.open(fs);
        try {
            byte[] data = new byte[8192];
            int n;
            // Switch to file 1
            n = in.read(data);
            dataVerify(data, n, (byte) 0);
            fs.delete(files.get(0), true);
            skipBuffer(in, (byte) 0);
            // Switch to file 2
            n = in.read(data);
            dataVerify(data, n, (byte) 1);
            fs.delete(files.get(1), true);
            skipBuffer(in, (byte) 1);
            // Switch to file 3
            n = in.read(data);
            dataVerify(data, n, (byte) 2);
            fs.delete(files.get(2), true);
            skipBuffer(in, (byte) 2);
            // No more files available
            try {
                n = in.read(data);
                assert (n <= 0);
            } catch (FileNotFoundException e) {
                assertTrue(true);
            }
        } finally {
            in.close();
        }
    } finally {
        testUtil.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Test(org.junit.Test)

Example 33 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestCacheConfig method testFileBucketCacheConfig.

@Test
public void testFileBucketCacheConfig() throws IOException {
    HBaseTestingUtil htu = new HBaseTestingUtil(this.conf);
    try {
        Path p = new Path(htu.getDataTestDir(), "bc.txt");
        FileSystem fs = FileSystem.get(this.conf);
        fs.create(p).close();
        this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p);
        doBucketCacheConfigTest();
    } finally {
        htu.cleanupTestDir();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Test(org.junit.Test)

Example 34 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestMobCompactionWithDefaults method htuStart.

@BeforeClass
public static void htuStart() throws Exception {
    HTU = new HBaseTestingUtil();
    conf = HTU.getConfiguration();
    conf.setInt("hfile.format.version", 3);
    // Disable automatic MOB compaction
    conf.setLong(MobConstants.MOB_COMPACTION_CHORE_PERIOD, 0);
    // Disable automatic MOB file cleaner chore
    conf.setLong(MobConstants.MOB_CLEANER_PERIOD, 0);
    // Set minimum age to archive to 10 sec
    conf.setLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, minAgeToArchive);
    // Set compacted file discharger interval to a half minAgeToArchive
    conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive / 2);
    conf.setBoolean("hbase.regionserver.compaction.enabled", false);
    HTU.startMiniCluster();
}
Also used : HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) BeforeClass(org.junit.BeforeClass)

Example 35 with HBaseTestingUtil

use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.

the class TestMobFileCleanerChore method setUp.

@Before
public void setUp() throws Exception {
    HTU = new HBaseTestingUtil();
    conf = HTU.getConfiguration();
    initConf();
    HTU.startMiniCluster();
    admin = HTU.getAdmin();
    chore = new MobFileCleanerChore();
    familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true).setMobThreshold(mobLen).setMaxVersions(1).build();
    tableDescriptor = HTU.createModifyableTableDescriptor("testMobCompactTable").setColumnFamily(familyDescriptor).build();
    table = HTU.createTable(tableDescriptor, null);
}
Also used : HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Before(org.junit.Before)

Aggregations

HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)144 Configuration (org.apache.hadoop.conf.Configuration)42 Test (org.junit.Test)42 Before (org.junit.Before)41 BeforeClass (org.junit.BeforeClass)37 Path (org.apache.hadoop.fs.Path)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Admin (org.apache.hadoop.hbase.client.Admin)22 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)15 StartTestingClusterOption (org.apache.hadoop.hbase.StartTestingClusterOption)14 FileSystem (org.apache.hadoop.fs.FileSystem)13 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)12 TableName (org.apache.hadoop.hbase.TableName)10 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)10 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)9 ServerName (org.apache.hadoop.hbase.ServerName)8 Table (org.apache.hadoop.hbase.client.Table)8 ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)8 IOException (java.io.IOException)7 ArrayList (java.util.ArrayList)7