Search in sources :

Example 81 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestMasterMetricsWrapper method testOfflineRegion.

/**
 * tests online and offline region number
 */
@Test
public void testOfflineRegion() throws Exception {
    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master);
    TableName table = TableName.valueOf("testRegionNumber");
    try {
        RegionInfo hri;
        byte[] FAMILY = Bytes.toBytes("FAMILY");
        TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(table).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
        TEST_UTIL.getAdmin().createTable(tableDescriptor, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
        // wait till the table is assigned
        long timeoutTime = EnvironmentEdgeManager.currentTime() + 1000;
        while (true) {
            List<RegionInfo> regions = master.getAssignmentManager().getRegionStates().getRegionsOfTable(table);
            if (regions.size() > 3) {
                hri = regions.get(2);
                break;
            }
            long now = EnvironmentEdgeManager.currentTime();
            if (now > timeoutTime) {
                fail("Could not find an online region");
            }
            Thread.sleep(10);
        }
        PairOfSameType<Integer> regionNumberPair = info.getRegionCounts();
        assertEquals(5, regionNumberPair.getFirst().intValue());
        assertEquals(0, regionNumberPair.getSecond().intValue());
        TEST_UTIL.getAdmin().offline(hri.getRegionName());
        timeoutTime = EnvironmentEdgeManager.currentTime() + 800;
        RegionStates regionStates = master.getAssignmentManager().getRegionStates();
        while (true) {
            if (regionStates.getRegionByStateOfTable(table).get(RegionState.State.OFFLINE).contains(hri)) {
                break;
            }
            long now = EnvironmentEdgeManager.currentTime();
            if (now > timeoutTime) {
                fail("Failed to offline the region in time");
                break;
            }
            Thread.sleep(10);
        }
        regionNumberPair = info.getRegionCounts();
        assertEquals(4, regionNumberPair.getFirst().intValue());
        assertEquals(1, regionNumberPair.getSecond().intValue());
    } finally {
        TEST_UTIL.deleteTable(table);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionStates(org.apache.hadoop.hbase.master.assignment.RegionStates) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 82 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestMasterOperationsForRegionReplicas method testCreateTableWithSingleReplica.

@Test
public void testCreateTableWithSingleReplica() throws Exception {
    final int numRegions = 3;
    final int numReplica = 1;
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica).setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build();
        ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
        TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
        TEST_UTIL.waitUntilNoRegionsInTransition();
        validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
        List<RegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
        assertEquals(numRegions * numReplica, hris.size());
    } finally {
        ADMIN.disableTable(tableName);
        ADMIN.deleteTable(tableName);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 83 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestEncryptionKeyRotation method testCFKeyRotation.

@Test
public void testCFKeyRotation() throws Exception {
    // Create the table schema
    TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("default", name.getMethodName()));
    ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf"));
    String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
    columnFamilyDescriptorBuilder.setEncryptionType(algorithm);
    columnFamilyDescriptorBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey));
    tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build());
    TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
    // Create the table and some on disk files
    createTableAndFlush(tableDescriptor);
    // Verify we have store file(s) with the initial key
    final List<Path> initialPaths = findStorefilePaths(tableDescriptor.getTableName());
    assertTrue(initialPaths.size() > 0);
    for (Path path : initialPaths) {
        assertTrue("Store file " + path + " has incorrect key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
    }
    // Update the schema with a new encryption key
    columnFamilyDescriptorBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), secondCFKey));
    TEST_UTIL.getAdmin().modifyColumnFamily(tableDescriptor.getTableName(), columnFamilyDescriptorBuilder.build());
    // Need a predicate for online schema change
    Thread.sleep(5000);
    // And major compact
    TEST_UTIL.getAdmin().majorCompact(tableDescriptor.getTableName());
    // waiting for the major compaction to complete
    TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() {

        @Override
        public boolean evaluate() throws IOException {
            return TEST_UTIL.getAdmin().getCompactionState(tableDescriptor.getTableName()) == CompactionState.NONE;
        }
    });
    List<Path> pathsAfterCompaction = findStorefilePaths(tableDescriptor.getTableName());
    assertTrue(pathsAfterCompaction.size() > 0);
    for (Path path : pathsAfterCompaction) {
        assertTrue("Store file " + path + " has incorrect key", Bytes.equals(secondCFKey.getEncoded(), extractHFileKey(path)));
    }
    List<Path> compactedPaths = findCompactedStorefilePaths(tableDescriptor.getTableName());
    assertTrue(compactedPaths.size() > 0);
    for (Path path : compactedPaths) {
        assertTrue("Store file " + path + " retains initial key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) IOException(java.io.IOException) Waiter(org.apache.hadoop.hbase.Waiter) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 84 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestEndToEndSplitTransaction method testCanSplitJustAfterASplit.

/**
 * This is the test for : HBASE-20940 This test will split the region and try to open an reference
 * over store file. Once store file has any reference, it makes sure that region can't be split
 */
@Test
public void testCanSplitJustAfterASplit() throws Exception {
    LOG.info("Starting testCanSplitJustAfterASplit");
    byte[] fam = Bytes.toBytes("cf_split");
    CompactSplit compactSplit = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread();
    TableName tableName = TableName.valueOf("CanSplitTable");
    Table source = TEST_UTIL.getConnection().getTable(tableName);
    Admin admin = TEST_UTIL.getAdmin();
    // set a large min compaction file count to avoid compaction just after splitting.
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
    Map<String, StoreFileReader> scanner = Maps.newHashMap();
    try {
        admin.createTable(htd);
        TEST_UTIL.loadTable(source, fam);
        compactSplit.setCompactionsEnabled(false);
        admin.split(tableName);
        TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getRegions(tableName).size() == 2);
        List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
        regions.stream().forEach(r -> r.getStores().get(0).getStorefiles().stream().filter(s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName())).forEach(sf -> {
            StoreFileReader reader = ((HStoreFile) sf).getReader();
            reader.getStoreFileScanner(true, false, false, 0, 0, false);
            scanner.put(r.getRegionInfo().getEncodedName(), reader);
            LOG.info("Got reference to file = " + sf.getPath() + ",for region = " + r.getRegionInfo().getEncodedName());
        }));
        assertTrue("Regions did not split properly", regions.size() > 1);
        assertTrue("Could not get reference any of the store file", scanner.size() > 1);
        compactSplit.setCompactionsEnabled(true);
        for (HRegion region : regions) {
            region.compact(true);
        }
        regions.stream().filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName())).forEach(r -> assertFalse("Contains an open file reference which can be split", r.getStores().get(0).canSplit()));
    } finally {
        scanner.values().forEach(s -> {
            try {
                s.close(true);
            } catch (IOException ioe) {
                LOG.error("Failed while closing store file", ioe);
            }
        });
        scanner.clear();
        Closeables.close(source, true);
        if (!compactSplit.isCompactionsEnabled()) {
            compactSplit.setCompactionsEnabled(true);
        }
        TEST_UTIL.deleteTableIfAny(tableName);
    }
}
Also used : CatalogFamilyFormat(org.apache.hadoop.hbase.CatalogFamilyFormat) Result(org.apache.hadoop.hbase.client.Result) ChoreService(org.apache.hadoop.hbase.ChoreService) LoggerFactory(org.slf4j.LoggerFactory) Random(java.util.Random) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) ClassRule(org.junit.ClassRule) Pair(org.apache.hadoop.hbase.util.Pair) Maps(org.apache.hbase.thirdparty.com.google.common.collect.Maps) AfterClass(org.junit.AfterClass) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Get(org.apache.hadoop.hbase.client.Get) Set(java.util.Set) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Stoppable(org.apache.hadoop.hbase.Stoppable) List(java.util.List) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Assert.assertFalse(org.junit.Assert.assertFalse) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Waiter(org.apache.hadoop.hbase.Waiter) BeforeClass(org.junit.BeforeClass) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HConstants(org.apache.hadoop.hbase.HConstants) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TestName(org.junit.rules.TestName) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) Threads(org.apache.hadoop.hbase.util.Threads) Bytes(org.apache.hadoop.hbase.util.Bytes) Iterators(org.apache.hbase.thirdparty.com.google.common.collect.Iterators) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) Put(org.apache.hadoop.hbase.client.Put) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Rule(org.junit.Rule) Admin(org.apache.hadoop.hbase.client.Admin) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) PairOfSameType(org.apache.hadoop.hbase.util.PairOfSameType) Connection(org.apache.hadoop.hbase.client.Connection) Table(org.apache.hadoop.hbase.client.Table) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Assert.assertEquals(org.junit.Assert.assertEquals) ScheduledChore(org.apache.hadoop.hbase.ScheduledChore) Table(org.apache.hadoop.hbase.client.Table) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 85 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestHRegion method testCellTTLs.

@Test
public void testCellTTLs() throws IOException {
    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(edge);
    final byte[] row = Bytes.toBytes("testRow");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    // 10 seconds
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(10).build()).build();
    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
    region = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), TEST_UTIL.getDataTestDir(), conf, tableDescriptor);
    assertNotNull(region);
    long now = EnvironmentEdgeManager.currentTime();
    // Add a cell that will expire in 5 seconds via cell TTL
    region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
    // Add a cell that will expire after 10 seconds via family setting
    region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
    // Add a cell that will expire in 15 seconds via cell TTL
    region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
    // Add a cell that will expire in 20 seconds via family setting
    region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
    // Flush so we are sure store scanning gets this right
    region.flush(true);
    // A query at time T+0 should return all cells
    Result r = region.get(new Get(row));
    assertNotNull(r.getValue(fam1, q1));
    assertNotNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+5 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNotNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+10 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+15 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+20 seconds
    edge.incrementTime(10000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNull(r.getValue(fam1, q3));
    assertNull(r.getValue(fam1, q4));
    // Fun with disappearing increments
    // Start at 1
    region.put(new Put(row).addColumn(fam1, q1, Bytes.toBytes(1L)));
    r = region.get(new Get(row));
    byte[] val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(1L, Bytes.toLong(val));
    // Increment with a TTL of 5 seconds
    Increment incr = new Increment(row).addColumn(fam1, q1, 1L);
    incr.setTTL(5000);
    // 2
    region.increment(incr);
    // New value should be 2
    r = region.get(new Get(row));
    val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(2L, Bytes.toLong(val));
    // Increment time to T+25 seconds
    edge.incrementTime(5000);
    // Value should be back to 1
    r = region.get(new Get(row));
    val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(1L, Bytes.toLong(val));
    // Increment time to T+30 seconds
    edge.incrementTime(5000);
    // Original value written at T+20 should be gone now via family TTL
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39