Search in sources :

Example 11 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class HBaseFsck method rebuildMeta.

/**
   * Rebuilds meta from information in hdfs/fs.  Depends on configuration settings passed into
   * hbck constructor to point to a particular fs/dir. Assumes HBase is OFFLINE.
   *
   * @param fix flag that determines if method should attempt to fix holes
   * @return true if successful, false if attempt failed.
   */
public boolean rebuildMeta(boolean fix) throws IOException, InterruptedException {
    // TODO check to make sure hbase is offline. (or at least the table
    // currently being worked on is off line)
    // Determine what's on HDFS
    LOG.info("Loading HBase regioninfo from HDFS...");
    // populating regioninfo table.
    loadHdfsRegionDirs();
    int errs = errors.getErrorList().size();
    // update tableInfos based on region info in fs.
    tablesInfo = loadHdfsRegionInfos();
    checkHdfsIntegrity(false, false);
    // make sure ok.
    if (errors.getErrorList().size() != errs) {
        // While in error state, iterate until no more fixes possible
        while (true) {
            fixes = 0;
            suggestFixes(tablesInfo);
            errors.clear();
            // update tableInfos based on region info in fs.
            loadHdfsRegionInfos();
            checkHdfsIntegrity(shouldFixHdfsHoles(), shouldFixHdfsOverlaps());
            int errCount = errors.getErrorList().size();
            if (fixes == 0) {
                if (errCount > 0) {
                    // failed to fix problems.
                    return false;
                } else {
                    // no fixes and no problems? drop out and fix stuff!
                    break;
                }
            }
        }
    }
    // we can rebuild, move old meta out of the way and start
    LOG.info("HDFS regioninfo's seems good.  Sidelining old hbase:meta");
    Path backupDir = sidelineOldMeta();
    LOG.info("Creating new hbase:meta");
    String walFactoryId = "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8);
    HRegion meta = createNewMeta(walFactoryId);
    // populate meta
    List<Put> puts = generatePuts(tablesInfo);
    if (puts == null) {
        LOG.fatal("Problem encountered when creating new hbase:meta entries.  " + "You may need to restore the previously sidelined hbase:meta");
        return false;
    }
    meta.batchMutate(puts.toArray(new Put[puts.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
    meta.close();
    if (meta.getWAL() != null) {
        meta.getWAL().close();
    }
    // clean up the temporary hbck meta recovery WAL directory
    removeHBCKMetaRecoveryWALDir(walFactoryId);
    LOG.info("Success! hbase:meta table rebuilt.");
    LOG.info("Old hbase:meta is moved into " + backupDir);
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Put(org.apache.hadoop.hbase.client.Put)

Example 12 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class HBaseFsckRepair method createHDFSRegionDir.

/**
   * Creates, flushes, and closes a new region.
   */
public static HRegion createHDFSRegionDir(Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException {
    // Create HRegion
    Path root = FSUtils.getRootDir(conf);
    HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
    // Close the new region to flush to disk. Close log file too.
    region.close();
    return region;
}
Also used : Path(org.apache.hadoop.fs.Path) HRegion(org.apache.hadoop.hbase.regionserver.HRegion)

Example 13 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestAsyncTableGetMultiThreaded method test.

@Test
public void test() throws IOException, InterruptedException, ExecutionException {
    int numThreads = 20;
    AtomicBoolean stop = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-"));
    List<Future<?>> futures = new ArrayList<>();
    IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
        run(stop);
        return null;
    })));
    Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
    Admin admin = TEST_UTIL.getAdmin();
    for (byte[] splitPoint : SPLIT_KEYS) {
        admin.split(TABLE_NAME, splitPoint);
        for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
            region.compact(true);
        }
        Thread.sleep(5000);
        admin.balancer(true);
        Thread.sleep(5000);
        ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
        ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer)).findAny().get();
        admin.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), Bytes.toBytes(newMetaServer.getServerName()));
        Thread.sleep(5000);
    }
    stop.set(true);
    executor.shutdown();
    for (Future<?> future : futures) {
        future.get();
    }
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) ClientTests(org.apache.hadoop.hbase.testclassification.ClientTests) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) ExecutorService(java.util.concurrent.ExecutorService) Threads(org.apache.hadoop.hbase.util.Threads) Bytes(org.apache.hadoop.hbase.util.Bytes) TABLES_ON_MASTER(org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.TABLES_ON_MASTER) AfterClass(org.junit.AfterClass) CompactingMemStore(org.apache.hadoop.hbase.regionserver.CompactingMemStore) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) org.apache.hadoop.hbase(org.apache.hadoop.hbase) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) ByteBufferPool(org.apache.hadoop.hbase.io.ByteBufferPool) HBASE_CLIENT_META_OPERATION_TIMEOUT(org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) ArrayList(java.util.ArrayList) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Random(java.util.Random) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 14 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestFromClientSide3 method assertNoLocks.

private static void assertNoLocks(final TableName tableName) throws IOException, InterruptedException {
    HRegion region = (HRegion) find(tableName);
    assertEquals(0, region.getLockedRows().size());
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion)

Example 15 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestCacheOnWrite method testNotCachingDataBlocksDuringCompactionInternals.

private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException, InterruptedException {
    // TODO: need to change this test if we add a cache size threshold for
    // compactions, or if we implement some other kind of intelligent logic for
    // deciding what blocks to cache-on-write on compaction.
    final String table = "CompactionCacheOnWrite";
    final String cf = "myCF";
    final byte[] cfBytes = Bytes.toBytes(cf);
    final int maxVersions = 3;
    Region region = TEST_UTIL.createTestRegion(table, new HColumnDescriptor(cf).setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions).setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()));
    int rowIdx = 0;
    long ts = EnvironmentEdgeManager.currentTime();
    for (int iFile = 0; iFile < 5; ++iFile) {
        for (int iRow = 0; iRow < 500; ++iRow) {
            String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow;
            Put p = new Put(Bytes.toBytes(rowStr));
            ++rowIdx;
            for (int iCol = 0; iCol < 10; ++iCol) {
                String qualStr = "col" + iCol;
                String valueStr = "value_" + rowStr + "_" + qualStr;
                for (int iTS = 0; iTS < 5; ++iTS) {
                    if (useTags) {
                        Tag t = new ArrayBackedTag((byte) 1, "visibility");
                        Tag[] tags = new Tag[1];
                        tags[0] = t;
                        KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
                        p.add(kv);
                    } else {
                        p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
                    }
                }
            }
            p.setDurability(Durability.ASYNC_WAL);
            region.put(p);
        }
        region.flush(true);
    }
    clearBlockCache(blockCache);
    assertEquals(0, blockCache.getBlockCount());
    region.compact(false);
    LOG.debug("compactStores() returned");
    for (CachedBlock block : blockCache) {
        assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
        assertNotEquals(BlockType.DATA, block.getBlockType());
    }
    ((HRegion) region).close();
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Put(org.apache.hadoop.hbase.client.Put) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19