Search in sources :

Example 1 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class RestoreSnapshotHelper method cloneHdfsRegions.

/**
   * Clone specified regions. For each region create a new region
   * and create a HFileLink for each hfile.
   */
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, final Map<String, SnapshotRegionManifest> regionManifests, final List<HRegionInfo> regions) throws IOException {
    if (regions == null || regions.isEmpty())
        return null;
    final Map<String, HRegionInfo> snapshotRegions = new HashMap<>(regions.size());
    // clone region info (change embedded tableName with the new one)
    HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
    for (int i = 0; i < clonedRegionsInfo.length; ++i) {
        // clone the region info from the snapshot region info
        HRegionInfo snapshotRegionInfo = regions.get(i);
        clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);
        // add the region name mapping between snapshot and cloned
        String snapshotRegionName = snapshotRegionInfo.getEncodedName();
        String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
        regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
        LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);
        // Add mapping between cloned region name and snapshot region info
        snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
    }
    // create the regions on disk
    ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {

        @Override
        public void fillRegion(final HRegion region) throws IOException {
            HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
            cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
        }
    });
    return clonedRegionsInfo;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HashMap(java.util.HashMap) ModifyRegionUtils(org.apache.hadoop.hbase.util.ModifyRegionUtils) IOException(java.io.IOException)

Example 2 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.

@Test(timeout = 300000)
public void testArchivingOnSingleTable() throws Exception {
    createArchiveDirectory();
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<Region> regions = new ArrayList<>();
    regions.add(region);
    when(rss.getOnlineRegions()).thenReturn(regions);
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // get the current hfiles in the archive directory
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
        throw new RuntimeException("Didn't archive any files!");
    }
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
    runCleaner(cleaner, finished, stop);
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
    // but we still have the archive directory
    assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 3 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestAdmin1 method testSplitAndMergeWithReplicaTable.

@Test
public void testSplitAndMergeWithReplicaTable() throws Exception {
    // The test tries to directly split replica regions and directly merge replica regions. These
    // are not allowed. The test validates that. Then the test does a valid split/merge of allowed
    // regions.
    // Set up a table with 3 regions and replication set to 3
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.setRegionReplication(3);
    byte[] cf = "f".getBytes();
    HColumnDescriptor hcd = new HColumnDescriptor(cf);
    desc.addFamily(hcd);
    byte[][] splitRows = new byte[2][];
    splitRows[0] = new byte[] { (byte) '4' };
    splitRows[1] = new byte[] { (byte) '7' };
    TEST_UTIL.getAdmin().createTable(desc, splitRows);
    List<HRegion> oldRegions;
    do {
        oldRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
        Thread.sleep(10);
    } while (//3 regions * 3 replicas
    oldRegions.size() != 9);
    // write some data to the table
    Table ht = TEST_UTIL.getConnection().getTable(tableName);
    List<Put> puts = new ArrayList<>();
    byte[] qualifier = "c".getBytes();
    Put put = new Put(new byte[] { (byte) '1' });
    put.addColumn(cf, qualifier, "100".getBytes());
    puts.add(put);
    put = new Put(new byte[] { (byte) '6' });
    put.addColumn(cf, qualifier, "100".getBytes());
    puts.add(put);
    put = new Put(new byte[] { (byte) '8' });
    put.addColumn(cf, qualifier, "100".getBytes());
    puts.add(put);
    ht.put(puts);
    ht.close();
    List<Pair<HRegionInfo, ServerName>> regions = MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName);
    boolean gotException = false;
    // regions). Try splitting that region via the split API . Should fail
    try {
        TEST_UTIL.getAdmin().splitRegion(regions.get(1).getFirst().getRegionName());
    } catch (IllegalArgumentException ex) {
        gotException = true;
    }
    assertTrue(gotException);
    gotException = false;
    // this API goes direct to the regionserver skipping any checks in the admin). Should fail
    try {
        TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), new byte[] { (byte) '1' });
    } catch (IOException ex) {
        gotException = true;
    }
    assertTrue(gotException);
    gotException = false;
    // Try merging a replica with another. Should fail.
    try {
        TEST_UTIL.getHBaseAdmin().mergeRegionsSync(regions.get(1).getFirst().getEncodedNameAsBytes(), regions.get(2).getFirst().getEncodedNameAsBytes(), true);
    } catch (IllegalArgumentException m) {
        gotException = true;
    }
    assertTrue(gotException);
    // Try going to the master directly (that will skip the check in admin)
    try {
        byte[][] nameofRegionsToMerge = new byte[2][];
        nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes();
        nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
        MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
        ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster().mergeTableRegions(null, request);
    } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException m) {
        Throwable t = m.getCause();
        do {
            if (t instanceof MergeRegionException) {
                gotException = true;
                break;
            }
            t = t.getCause();
        } while (t != null);
    }
    assertTrue(gotException);
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) MergeRegionException(org.apache.hadoop.hbase.exceptions.MergeRegionException) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 4 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestAdmin1 method testHFileReplication.

/*
   * Test DFS replication for column families, where one CF has default replication(3) and the other
   * is set to 1.
   */
@Test(timeout = 300000)
public void testHFileReplication() throws Exception {
    final TableName tableName = TableName.valueOf(this.name.getMethodName());
    String fn1 = "rep1";
    HColumnDescriptor hcd1 = new HColumnDescriptor(fn1);
    hcd1.setDFSReplication((short) 1);
    String fn = "defaultRep";
    HColumnDescriptor hcd = new HColumnDescriptor(fn);
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(hcd);
    htd.addFamily(hcd1);
    Table table = TEST_UTIL.createTable(htd, null);
    TEST_UTIL.waitTableAvailable(tableName);
    Put p = new Put(Bytes.toBytes("defaultRep_rk"));
    byte[] q1 = Bytes.toBytes("q1");
    byte[] v1 = Bytes.toBytes("v1");
    p.addColumn(Bytes.toBytes(fn), q1, v1);
    List<Put> puts = new ArrayList<>(2);
    puts.add(p);
    p = new Put(Bytes.toBytes("rep1_rk"));
    p.addColumn(Bytes.toBytes(fn1), q1, v1);
    puts.add(p);
    try {
        table.put(puts);
        admin.flush(tableName);
        List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName);
        for (HRegion r : regions) {
            Store store = r.getStore(Bytes.toBytes(fn));
            for (StoreFile sf : store.getStorefiles()) {
                assertTrue(sf.toString().contains(fn));
                assertTrue("Column family " + fn + " should have 3 copies", FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) == (sf.getFileInfo().getFileStatus().getReplication()));
            }
            store = r.getStore(Bytes.toBytes(fn1));
            for (StoreFile sf : store.getStorefiles()) {
                assertTrue(sf.toString().contains(fn1));
                assertTrue("Column family " + fn1 + " should have only 1 copy", 1 == sf.getFileInfo().getFileStatus().getReplication());
            }
        }
    } finally {
        if (admin.isTableEnabled(tableName)) {
            this.admin.disableTable(tableName);
            this.admin.deleteTable(tableName);
        }
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Store(org.apache.hadoop.hbase.regionserver.Store) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) Test(org.junit.Test)

Example 5 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestHFileArchiving method testDeleteRegionWithNoStoreFiles.

/**
   * Test that the region directory is removed when we archive a region without store files, but
   * still has hidden files.
   * @throws Exception
   */
@Test
public void testDeleteRegionWithNoStoreFiles() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    UTIL.createTable(tableName, TEST_FAM);
    // get the current store files for the region
    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);
    FileSystem fs = region.getRegionFileSystem().getFileSystem();
    // make sure there are some files in the regiondir
    Path rootDir = FSUtils.getRootDir(fs.getConf());
    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
    FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
    Assert.assertNotNull("No files in the region directory", regionFiles);
    if (LOG.isDebugEnabled()) {
        List<Path> files = new ArrayList<>();
        for (FileStatus file : regionFiles) {
            files.add(file.getPath());
        }
        LOG.debug("Current files:" + files);
    }
    // delete the visible folders so we just have hidden files/folders
    final PathFilter dirFilter = new FSUtils.DirFilter(fs);
    PathFilter nonHidden = new PathFilter() {

        @Override
        public boolean accept(Path file) {
            return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
        }
    };
    FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
    for (FileStatus store : storeDirs) {
        LOG.debug("Deleting store for test");
        fs.delete(store.getPath(), true);
    }
    // then archive the region
    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
    // and check to make sure the region directoy got deleted
    assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));
    UTIL.deleteTable(tableName);
}
Also used : Path(org.apache.hadoop.fs.Path) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19