Search in sources :

Example 91 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestChangingEncoding method prepareTest.

private void prepareTest(String testId) throws IOException {
    tableName = TableName.valueOf("test_table_" + testId);
    HTableDescriptor htd = new HTableDescriptor(tableName);
    hcd = new HColumnDescriptor(CF);
    htd.addFamily(hcd);
    try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
        admin.createTable(htd);
    }
    numBatchesWritten = 0;
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 92 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestRegionPlacement method verifyRegionServerUpdated.

/**
   * Verify all the online region servers has been updated to the
   * latest assignment plan
   * @param plan
   * @throws IOException
   */
private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException {
    // Verify all region servers contain the correct favored nodes information
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    for (int i = 0; i < SLAVES; i++) {
        HRegionServer rs = cluster.getRegionServer(i);
        for (Region region : rs.getOnlineRegions(TableName.valueOf("testRegionAssignment"))) {
            InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(region.getRegionInfo().getEncodedName());
            List<ServerName> favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo());
            // except for hbase:meta and ROOT
            if (favoredServerList == null) {
                HTableDescriptor desc = region.getTableDesc();
                // Verify they are ROOT and hbase:meta regions since no favored nodes
                assertNull(favoredSocketAddress);
                assertTrue("User region " + region.getTableDesc().getTableName() + " should have favored nodes", (desc.isRootRegion() || desc.isMetaRegion()));
            } else {
                // For user region, the favored nodes in the region server should be
                // identical to favored nodes in the assignmentPlan
                assertTrue(favoredSocketAddress.length == favoredServerList.size());
                assertTrue(favoredServerList.size() > 0);
                for (int j = 0; j < favoredServerList.size(); j++) {
                    InetSocketAddress addrFromRS = favoredSocketAddress[j];
                    InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved(favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort());
                    assertNotNull(addrFromRS);
                    assertNotNull(addrFromPlan);
                    assertTrue("Region server " + rs.getServerName().getHostAndPort() + " has the " + positions[j] + " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + addrFromRS + " which is inconsistent with the plan " + addrFromPlan, addrFromRS.equals(addrFromPlan));
                }
            }
        }
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) ServerName(org.apache.hadoop.hbase.ServerName) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Region(org.apache.hadoop.hbase.regionserver.Region) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 93 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestSimpleRegionNormalizerOnCluster method testRegionNormalizationSplitOnCluster.

void testRegionNormalizationSplitOnCluster(boolean limitedByQuota) throws Exception {
    TableName TABLENAME;
    if (limitedByQuota) {
        String nsp = "np2";
        NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
        admin.createNamespace(nspDesc);
        TABLENAME = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + name.getMethodName());
    } else {
        TABLENAME = TableName.valueOf(name.getMethodName());
    }
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();
    try (Table ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) {
        // Need to get sorted list of regions here
        List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
        Collections.sort(generatedRegions, new Comparator<HRegion>() {

            @Override
            public int compare(HRegion o1, HRegion o2) {
                return o1.getRegionInfo().compareTo(o2.getRegionInfo());
            }
        });
        HRegion region = generatedRegions.get(0);
        generateTestData(region, 1);
        region.flush(true);
        region = generatedRegions.get(1);
        generateTestData(region, 1);
        region.flush(true);
        region = generatedRegions.get(2);
        generateTestData(region, 2);
        region.flush(true);
        region = generatedRegions.get(3);
        generateTestData(region, 2);
        region.flush(true);
        region = generatedRegions.get(4);
        generateTestData(region, 5);
        region.flush(true);
    }
    HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
    htd.setNormalizationEnabled(true);
    admin.modifyTable(TABLENAME, htd);
    admin.flush(TABLENAME);
    assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), TABLENAME));
    // Now trigger a split and stop when the split is in progress
    // to let region load to update
    Thread.sleep(5000);
    m.normalizeRegions();
    if (limitedByQuota) {
        long skippedSplitcnt = 0;
        do {
            skippedSplitcnt = m.getRegionNormalizer().getSkippedCount(PlanType.SPLIT);
            Thread.sleep(100);
        } while (skippedSplitcnt == 0L);
        assert (skippedSplitcnt > 0);
    } else {
        while (true) {
            List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
            int cnt = 0;
            for (HRegion region : regions) {
                String regionName = region.getRegionInfo().getRegionNameAsString();
                if (regionName.startsWith("testRegionNormalizationSplitOnCluster,zzzzz")) {
                    cnt++;
                }
            }
            if (cnt >= 2) {
                break;
            }
        }
    }
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HMaster(org.apache.hadoop.hbase.master.HMaster) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor)

Example 94 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestFSHLog method testSyncRunnerIndexOverflow.

@Test
public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
    final String name = this.name.getMethodName();
    FSHLog log = new FSHLog(FS, FSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null);
    try {
        Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
        ringBufferEventHandlerField.setAccessible(true);
        FSHLog.RingBufferEventHandler ringBufferEventHandler = (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
        Field syncRunnerIndexField = FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex");
        syncRunnerIndexField.setAccessible(true);
        syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.name.getMethodName())).addFamily(new HColumnDescriptor("row"));
        NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (byte[] fam : htd.getFamiliesKeys()) {
            scopes.put(fam, 0);
        }
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
        MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
        for (int i = 0; i < 10; i++) {
            addEdits(log, hri, htd, 1, mvcc, scopes);
        }
    } finally {
        log.close();
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Field(java.lang.reflect.Field) Test(org.junit.Test)

Example 95 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestLogRollAbort method testRSAbortWithUnflushedEdits.

/**
   * Tests that RegionServer aborts if we hit an error closing the WAL when
   * there are unsynced WAL edits.  See HBASE-4282.
   */
@Test
public void testRSAbortWithUnflushedEdits() throws Exception {
    LOG.info("Starting testRSAbortWithUnflushedEdits()");
    // When the hbase:meta table can be opened, the region servers are running
    TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close();
    // Create the test table and open it
    TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc);
    Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
    try {
        HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
        WAL log = server.getWAL(null);
        Put p = new Put(Bytes.toBytes("row2001"));
        p.addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2001));
        table.put(p);
        log.sync();
        p = new Put(Bytes.toBytes("row2002"));
        p.addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2002));
        table.put(p);
        dfsCluster.restartDataNodes();
        LOG.info("Restarted datanodes");
        try {
            log.rollWriter(true);
        } catch (FailedLogCloseException flce) {
        // Expected exception.  We used to expect that there would be unsynced appends but this
        // not reliable now that sync plays a roll in wall rolling.  The above puts also now call
        // sync.
        } catch (Throwable t) {
            LOG.fatal("FAILED TEST: Got wrong exception", t);
        }
    } finally {
        table.close();
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) WAL(org.apache.hadoop.hbase.wal.WAL) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43