Search in sources :

Example 11 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestMobCompactor method verifyEncryption.

private boolean verifyEncryption(TableName tableName, String familyName) throws IOException {
    Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
    boolean hasFiles = false;
    if (fs.exists(mobDirPath)) {
        FileStatus[] files = fs.listStatus(mobDirPath);
        hasFiles = files != null && files.length > 0;
        Assert.assertTrue(hasFiles);
        Path path = files[0].getPath();
        CacheConfig cacheConf = new CacheConfig(conf);
        StoreFile sf = new StoreFile(TEST_UTIL.getTestFileSystem(), path, conf, cacheConf, BloomType.NONE);
        HFile.Reader reader = sf.createReader().getHFileReader();
        byte[] encryptionKey = reader.getTrailer().getEncryptionKey();
        Assert.assertTrue(null != encryptionKey);
        Assert.assertTrue(reader.getFileContext().getEncryptionContext().getCipher().getName().equals(HConstants.CIPHER_AES));
    }
    return hasFiles;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 12 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestCompoundBloomFilter method setUp.

@Before
public void setUp() throws IOException {
    conf = TEST_UTIL.getConfiguration();
    // This test requires the most recent HFile format (i.e. v2).
    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
    fs = FileSystem.get(conf);
    cacheConf = new CacheConfig(conf);
    blockCache = cacheConf.getBlockCache();
    assertNotNull(blockCache);
}
Also used : CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Before(org.junit.Before)

Example 13 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class DataBlockEncodingTool method main.

/**
   * A command line interface to benchmarks. Parses command-line arguments and
   * runs the appropriate benchmarks.
   * @param args Should have length at least 1 and holds the file path to HFile.
   * @throws IOException If you specified the wrong file.
   */
public static void main(final String[] args) throws IOException {
    // set up user arguments
    Options options = new Options();
    options.addOption(OPT_HFILE_NAME, true, "HFile to analyse (REQUIRED)");
    options.getOption(OPT_HFILE_NAME).setArgName("FILENAME");
    options.addOption(OPT_KV_LIMIT, true, "Maximum number of KeyValues to process. A benchmark stops running " + "after iterating over this many KV pairs.");
    options.getOption(OPT_KV_LIMIT).setArgName("NUMBER");
    options.addOption(OPT_MEASURE_THROUGHPUT, false, "Measure read throughput");
    options.addOption(OPT_OMIT_CORRECTNESS_TEST, false, "Omit corectness tests.");
    options.addOption(OPT_ENCODING_ALGORITHM, true, "What kind of compression algorithm use for comparison.");
    options.addOption(OPT_BENCHMARK_N_TIMES, true, "Number of times to run each benchmark. Default value: " + DEFAULT_BENCHMARK_N_TIMES);
    options.addOption(OPT_BENCHMARK_N_OMIT, true, "Number of first runs of every benchmark to exclude from " + "statistics (" + DEFAULT_BENCHMARK_N_OMIT + " by default, so that " + "only the last " + (DEFAULT_BENCHMARK_N_TIMES - DEFAULT_BENCHMARK_N_OMIT) + " times are included in statistics.)");
    // parse arguments
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        System.err.println("Could not parse arguments!");
        System.exit(-1);
        // avoid warning
        return;
    }
    int kvLimit = Integer.MAX_VALUE;
    if (cmd.hasOption(OPT_KV_LIMIT)) {
        kvLimit = Integer.parseInt(cmd.getOptionValue(OPT_KV_LIMIT));
    }
    // basic argument sanity checks
    if (!cmd.hasOption(OPT_HFILE_NAME)) {
        LOG.error("Please specify HFile name using the " + OPT_HFILE_NAME + " option");
        printUsage(options);
        System.exit(-1);
    }
    String pathName = cmd.getOptionValue(OPT_HFILE_NAME);
    String compressionName = DEFAULT_COMPRESSION.getName();
    if (cmd.hasOption(OPT_ENCODING_ALGORITHM)) {
        compressionName = cmd.getOptionValue(OPT_ENCODING_ALGORITHM).toLowerCase(Locale.ROOT);
    }
    boolean doBenchmark = cmd.hasOption(OPT_MEASURE_THROUGHPUT);
    boolean doVerify = !cmd.hasOption(OPT_OMIT_CORRECTNESS_TEST);
    if (cmd.hasOption(OPT_BENCHMARK_N_TIMES)) {
        benchmarkNTimes = Integer.valueOf(cmd.getOptionValue(OPT_BENCHMARK_N_TIMES));
    }
    if (cmd.hasOption(OPT_BENCHMARK_N_OMIT)) {
        benchmarkNOmit = Integer.valueOf(cmd.getOptionValue(OPT_BENCHMARK_N_OMIT));
    }
    if (benchmarkNTimes < benchmarkNOmit) {
        LOG.error("The number of times to run each benchmark (" + benchmarkNTimes + ") must be greater than the number of benchmark runs to exclude " + "from statistics (" + benchmarkNOmit + ")");
        System.exit(1);
    }
    LOG.info("Running benchmark " + benchmarkNTimes + " times. " + "Excluding the first " + benchmarkNOmit + " times from statistics.");
    final Configuration conf = HBaseConfiguration.create();
    try {
        testCodecs(conf, kvLimit, pathName, compressionName, doBenchmark, doVerify);
    } finally {
        (new CacheConfig(conf)).getBlockCache().shutdown();
    }
}
Also used : Options(org.apache.commons.cli.Options) CommandLine(org.apache.commons.cli.CommandLine) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) PosixParser(org.apache.commons.cli.PosixParser) CommandLineParser(org.apache.commons.cli.CommandLineParser) ParseException(org.apache.commons.cli.ParseException) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 14 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestBlocksRead method initHRegion.

/**
   * Callers must afterward call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
   * @param tableName
   * @param callingMethod
   * @param conf
   * @param family
   * @throws IOException
   * @return created and initialized region.
   */
private Region initHRegion(byte[] tableName, String callingMethod, Configuration conf, String family) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor familyDesc;
    for (int i = 0; i < BLOOM_TYPE.length; i++) {
        BloomType bloomType = BLOOM_TYPE[i];
        familyDesc = new HColumnDescriptor(family + "_" + bloomType).setBlocksize(1).setBloomFilterType(BLOOM_TYPE[i]);
        htd.addFamily(familyDesc);
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    Path path = new Path(DIR + callingMethod);
    Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
    blockCache = new CacheConfig(conf).getBlockCache();
    return r;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 15 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestFSErrorsExposed method testStoreFileScannerThrowsErrors.

/**
   * Injects errors into the pread calls of an on-disk file, and makes
   * sure those bubble up to the StoreFileScanner
   */
@Test
public void testStoreFileScannerThrowsErrors() throws IOException {
    Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
    HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
    FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
    HFileSystem fs = new HFileSystem(faultyfs);
    CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
    HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
    TestStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
    StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE);
    List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), false, true, false, false, // 0 is passed as readpoint because this test operates on StoreFile directly
    0);
    KeyValueScanner scanner = scanners.get(0);
    FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
    assertNotNull(inStream);
    scanner.seek(KeyValue.LOWESTKEY);
    // Do at least one successful read
    assertNotNull(scanner.next());
    faultyfs.startFaults();
    try {
        int scanned = 0;
        while (scanner.next() != null) {
            scanned++;
        }
        fail("Scanner didn't throw after faults injected");
    } catch (IOException ioe) {
        LOG.info("Got expected exception", ioe);
        assertTrue(ioe.getMessage().contains("Could not iterate"));
    }
    scanner.close();
}
Also used : Path(org.apache.hadoop.fs.Path) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) IOException(java.io.IOException) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Aggregations

CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)63 Path (org.apache.hadoop.fs.Path)28 Test (org.junit.Test)26 Configuration (org.apache.hadoop.conf.Configuration)21 HFile (org.apache.hadoop.hbase.io.hfile.HFile)21 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)21 FileSystem (org.apache.hadoop.fs.FileSystem)20 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)20 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)15 KeyValue (org.apache.hadoop.hbase.KeyValue)14 TableName (org.apache.hadoop.hbase.TableName)14 Region (org.apache.hadoop.hbase.regionserver.Region)13 Store (org.apache.hadoop.hbase.regionserver.Store)13 Cell (org.apache.hadoop.hbase.Cell)10 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)10 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)10 IOException (java.io.IOException)9 CountDownLatch (java.util.concurrent.CountDownLatch)8 FileStatus (org.apache.hadoop.fs.FileStatus)8 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)8