Search in sources :

Example 46 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestAssignmentManagerOnCluster method testAssignDisabledRegion.

/**
   * Test force unassign/assign a region of a disabled table
   */
@Test(timeout = 60000)
public void testAssignDisabledRegion() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    MyMaster master = null;
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        // Assign the region
        master = (MyMaster) cluster.getMaster();
        AssignmentManager am = master.getAssignmentManager();
        RegionStates regionStates = am.getRegionStates();
        assertTrue(TEST_UTIL.assignRegion(hri));
        // Disable the table
        admin.disableTable(tableName);
        assertTrue(regionStates.isRegionOffline(hri));
        // You can't assign a disabled region
        am.assign(hri, true);
        assertTrue(regionStates.isRegionOffline(hri));
        // You can't unassign a disabled region either
        am.unassign(hri);
        assertTrue(regionStates.isRegionOffline(hri));
    } finally {
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 47 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestCatalogJanitor method testCleanParent.

@Test
public void testCleanParent() throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    MasterServices services = new MockMasterServices(htu);
    try {
        CatalogJanitor janitor = new CatalogJanitor(services);
        // Create regions.
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
        htd.addFamily(new HColumnDescriptor("f"));
        HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
        HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
        HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
        // Test that when both daughter regions are in place, that we do not
        // remove the parent.
        Result r = createResult(parent, splita, splitb);
        // Add a reference under splitA directory so we don't clear out the parent.
        Path rootdir = services.getMasterFileSystem().getRootDir();
        Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
        Path storedir = HStore.getStoreHomedir(tabledir, splita, htd.getColumnFamilies()[0].getName());
        Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
        long now = System.currentTimeMillis();
        // Reference name has this format: StoreFile#REF_NAME_PARSER
        Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
        FileSystem fs = services.getMasterFileSystem().getFileSystem();
        Path path = ref.write(fs, p);
        assertTrue(fs.exists(path));
        assertFalse(janitor.cleanParent(parent, r));
        // Remove the reference file and try again.
        assertTrue(fs.delete(p, true));
        assertTrue(janitor.cleanParent(parent, r));
    } finally {
        services.stop("shutdown");
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Reference(org.apache.hadoop.hbase.io.Reference) FileSystem(org.apache.hadoop.fs.FileSystem) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Test(org.junit.Test)

Example 48 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestCatalogJanitor method testDuplicateHFileResolution.

/**
   * Test that if a store file with the same name is present as those already backed up cause the
   * already archived files to be timestamped backup
   */
@Test
public void testDuplicateHFileResolution() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    MasterServices services = new MockMasterServices(htu);
    // create the janitor
    CatalogJanitor janitor = new CatalogJanitor(services);
    // Create regions.
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    htd.addFamily(new HColumnDescriptor("f"));
    HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
    HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
    HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
    // Test that when both daughter regions are in place, that we do not
    // remove the parent.
    Result r = createResult(parent, splita, splitb);
    FileSystem fs = FileSystem.get(htu.getConfiguration());
    Path rootdir = services.getMasterFileSystem().getRootDir();
    // have to set the root directory since we use it in HFileDisposer to figure out to get to the
    // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
    // the single test passes, but when the full suite is run, things get borked).
    FSUtils.setRootDir(fs.getConf(), rootdir);
    Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
    Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
    System.out.println("Old root:" + rootdir);
    System.out.println("Old table:" + tabledir);
    System.out.println("Old store:" + storedir);
    Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName());
    System.out.println("Old archive:" + storeArchive);
    // enable archiving, make sure that files get archived
    addMockStoreFiles(2, services, storedir);
    // get the current store files for comparison
    FileStatus[] storeFiles = fs.listStatus(storedir);
    // do the cleaning of the parent
    assertTrue(janitor.cleanParent(parent, r));
    // and now check to make sure that the files have actually been archived
    FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
    // now add store files with the same names as before to check backup
    // enable archiving, make sure that files get archived
    addMockStoreFiles(2, services, storedir);
    // do the cleaning of the parent
    assertTrue(janitor.cleanParent(parent, r));
    // and now check to make sure that the files have actually been archived
    archivedStoreFiles = fs.listStatus(storeArchive);
    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
    // cleanup
    services.stop("Test finished");
    janitor.cancel(true);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Test(org.junit.Test)

Example 49 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestDistributedLogSplitting method testSameVersionUpdatesRecoveryWithCompaction.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testSameVersionUpdatesRecoveryWithCompaction() throws Exception {
    LOG.info("testSameVersionUpdatesRecoveryWithWrites");
    conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 30 * 1024);
    conf.setInt("hbase.hstore.compactionThreshold", 3);
    startCluster(NUM_RS);
    final AtomicLong sequenceId = new AtomicLong(100);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 2000;
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);
    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
    try {
        List<HRegionInfo> regions = null;
        HRegionServer hrs = null;
        for (int i = 0; i < NUM_RS; i++) {
            boolean isCarryingMeta = false;
            hrs = rsts.get(i).getRegionServer();
            regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo region : regions) {
                if (region.isMetaRegion()) {
                    isCarryingMeta = true;
                    break;
                }
            }
            if (isCarryingMeta) {
                continue;
            }
            break;
        }
        LOG.info("#regions = " + regions.size());
        Iterator<HRegionInfo> it = regions.iterator();
        while (it.hasNext()) {
            HRegionInfo region = it.next();
            if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
                it.remove();
            }
        }
        if (regions.isEmpty())
            return;
        HRegionInfo curRegionInfo = regions.get(0);
        byte[] startRow = curRegionInfo.getStartKey();
        if (startRow == null || startRow.length == 0) {
            startRow = new byte[] { 0, 0, 0, 0, 1 };
        }
        byte[] row = Bytes.incrementBytes(startRow, 1);
        // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
        row = Arrays.copyOfRange(row, 3, 8);
        long value = 0;
        final TableName tableName = TableName.valueOf(name.getMethodName());
        byte[] family = Bytes.toBytes("family");
        byte[] qualifier = Bytes.toBytes("c1");
        long timeStamp = System.currentTimeMillis();
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        final WAL wal = hrs.getWAL(curRegionInfo);
        for (int i = 0; i < NUM_LOG_LINES; i += 1) {
            WALEdit e = new WALEdit();
            value++;
            e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
            wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), e, true);
        }
        wal.sync();
        wal.shutdown();
        // wait for abort completes
        this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
        // verify we got the last value
        LOG.info("Verification Starts...");
        Get g = new Get(row);
        Result r = ht.get(g);
        long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
        // after flush & compaction
        LOG.info("Verification after flush...");
        TEST_UTIL.getAdmin().flush(tableName);
        TEST_UTIL.getAdmin().compact(tableName);
        // wait for compaction completes
        TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return (TEST_UTIL.getAdmin().getCompactionState(tableName) == CompactionState.NONE);
            }
        });
        r = ht.get(g);
        theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) OperationConflictException(org.apache.hadoop.hbase.exceptions.OperationConflictException) RegionInRecoveryException(org.apache.hadoop.hbase.exceptions.RegionInRecoveryException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) Get(org.apache.hadoop.hbase.client.Get) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Waiter(org.apache.hadoop.hbase.Waiter) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 50 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestMaster method testMoveThrowsUnknownRegionException.

@Test
public void testMoveThrowsUnknownRegionException() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor("value");
    htd.addFamily(hcd);
    admin.createTable(htd, null);
    try {
        HRegionInfo hri = new HRegionInfo(tableName, Bytes.toBytes("A"), Bytes.toBytes("Z"));
        admin.move(hri.getEncodedNameAsBytes(), null);
        fail("Region should not be moved since it is fake");
    } catch (IOException ioe) {
        assertTrue(ioe instanceof UnknownRegionException);
    } finally {
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38