Search in sources :

Example 66 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAsyncTableAdminApi method testEnableTableRetainAssignment.

@Test(timeout = 300000)
public void testEnableTableRetainAssignment() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
    int expectedRegions = splitKeys.length + 1;
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, splitKeys).join();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        List<HRegionLocation> regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
        // Disable table.
        admin.disableTable(tableName).join();
        // Enable table, use retain assignment to assign regions.
        admin.enableTable(tableName).join();
        List<HRegionLocation> regions2 = l.getAllRegionLocations();
        // Check the assignment.
        assertEquals(regions.size(), regions2.size());
        assertTrue(regions2.containsAll(regions));
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 67 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAsyncTableAdminApi method testCreateTableWithRegions.

@Test(timeout = 300000)
public void testCreateTableWithRegions() throws IOException, InterruptedException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
    int expectedRegions = splitKeys.length + 1;
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, splitKeys).join();
    List<HRegionLocation> regions;
    Iterator<HRegionLocation> hris;
    HRegionInfo hri;
    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
        System.err.println("Found " + regions.size() + " regions");
        hris = regions.iterator();
        hri = hris.next().getRegionInfo();
        assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[0]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[0]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[1]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[1]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[2]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[2]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[3]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[3]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[4]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[4]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[5]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[5]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[6]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[6]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[7]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[7]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[8]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[8]));
        assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
        verifyRoundRobinDistribution(conn, l, expectedRegions);
    }
    // Now test using start/end with a number of regions
    // Use 80 bit numbers to make sure we aren't limited
    byte[] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
    byte[] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
    // Splitting into 10 regions, we expect (null,1) ... (9, null)
    // with (1,2) (2,3) (3,4) (4,5) (5,6) (6,7) (7,8) (8,9) in the middle
    expectedRegions = 10;
    final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");
    desc = new HTableDescriptor(tableName2);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, startKey, endKey, expectedRegions).join();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName2)) {
        regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
        System.err.println("Found " + regions.size() + " regions");
        hris = regions.iterator();
        hri = hris.next().getRegionInfo();
        assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
        assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
        verifyRoundRobinDistribution(conn, l, expectedRegions);
    }
    // Try once more with something that divides into something infinite
    startKey = new byte[] { 0, 0, 0, 0, 0, 0 };
    endKey = new byte[] { 1, 0, 0, 0, 0, 0 };
    expectedRegions = 5;
    final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
    desc = new HTableDescriptor(tableName3);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, startKey, endKey, expectedRegions).join();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName3)) {
        regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
        System.err.println("Found " + regions.size() + " regions");
        verifyRoundRobinDistribution(conn, l, expectedRegions);
    }
    // Try an invalid case where there are duplicate split keys
    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } };
    final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");
    desc = new HTableDescriptor(tableName4);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    try {
        admin.createTable(desc, splitKeys).join();
        fail("Should not be able to create this table because of " + "duplicate split keys");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof IllegalArgumentException);
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test)

Example 68 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAsyncTableAdminApi method testCreateTable.

@Test(timeout = 300000)
public void testCreateTable() throws Exception {
    HTableDescriptor[] tables = admin.listTables().get();
    int numTables = tables.length;
    final TableName tableName = TableName.valueOf(name.getMethodName());
    admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(FAMILY))).join();
    tables = admin.listTables().get();
    assertEquals(numTables + 1, tables.length);
    assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
    assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName));
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 69 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAsyncTableAdminApi method testDeleteTables.

@Test(timeout = 300000)
public void testDeleteTables() throws Exception {
    TableName[] tables = { TableName.valueOf(name.getMethodName() + "1"), TableName.valueOf(name.getMethodName() + "2"), TableName.valueOf(name.getMethodName() + "3") };
    Arrays.stream(tables).map(HTableDescriptor::new).map((table) -> table.addFamily(new HColumnDescriptor(FAMILY))).forEach((table) -> {
        admin.createTable(table).join();
        admin.tableExists(table.getTableName()).thenAccept((exist) -> assertTrue(exist)).join();
        try {
            TEST_UTIL.getAdmin().disableTable(table.getTableName());
        } catch (Exception e) {
        }
    });
    HTableDescriptor[] failed = admin.deleteTables(Pattern.compile("testDeleteTables.*")).get();
    assertEquals(0, failed.length);
    Arrays.stream(tables).forEach((table) -> {
        admin.tableExists(table).thenAccept((exist) -> assertFalse(exist)).join();
    });
}
Also used : Arrays(java.util.Arrays) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) AsyncMetaTableAccessor(org.apache.hadoop.hbase.AsyncMetaTableAccessor) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HashMap(java.util.HashMap) ClientTests(org.apache.hadoop.hbase.testclassification.ClientTests) ArrayList(java.util.ArrayList) HConstants(org.apache.hadoop.hbase.HConstants) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TestName(org.junit.rules.TestName) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) Assert.fail(org.junit.Assert.fail) ServerName(org.apache.hadoop.hbase.ServerName) Bytes(org.apache.hadoop.hbase.util.Bytes) TableName(org.apache.hadoop.hbase.TableName) Iterator(java.util.Iterator) Assert.assertTrue(org.junit.Assert.assertTrue) Set(java.util.Set) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) List(java.util.List) Rule(org.junit.Rule) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Assert(org.junit.Assert) FSUtils(org.apache.hadoop.hbase.util.FSUtils) Assert.assertEquals(org.junit.Assert.assertEquals) TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 70 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAsyncTableBatch method testPartialSuccess.

@Test
public void testPartialSuccess() throws IOException, InterruptedException, ExecutionException {
    Admin admin = TEST_UTIL.getAdmin();
    HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME);
    htd.addCoprocessor(ErrorInjectObserver.class.getName());
    admin.modifyTable(TABLE_NAME, htd);
    AsyncTableBase table = tableGetter.apply(TABLE_NAME);
    table.putAll(Arrays.asList(SPLIT_KEYS).stream().map(k -> new Put(k).addColumn(FAMILY, CQ, k)).collect(Collectors.toList())).get();
    List<CompletableFuture<Result>> futures = table.get(Arrays.asList(SPLIT_KEYS).stream().map(k -> new Get(k)).collect(Collectors.toList()));
    for (int i = 0; i < SPLIT_KEYS.length - 1; i++) {
        assertArrayEquals(SPLIT_KEYS[i], futures.get(i).get().getValue(FAMILY, CQ));
    }
    try {
        futures.get(SPLIT_KEYS.length - 1).get();
    } catch (ExecutionException e) {
        assertThat(e.getCause(), instanceOf(RetriesExhaustedException.class));
    }
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) CompletableFuture(java.util.concurrent.CompletableFuture) ClientTests(org.apache.hadoop.hbase.testclassification.ClientTests) Function(java.util.function.Function) ArrayList(java.util.ArrayList) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) Assert.assertThat(org.junit.Assert.assertThat) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) After(org.junit.After) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Parameterized(org.junit.runners.Parameterized) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) AfterClass(org.junit.AfterClass) RegionObserver(org.apache.hadoop.hbase.coprocessor.RegionObserver) Parameter(org.junit.runners.Parameterized.Parameter) Assert.assertTrue(org.junit.Assert.assertTrue) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) UncheckedIOException(java.io.UncheckedIOException) ExecutionException(java.util.concurrent.ExecutionException) List(java.util.List) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ForkJoinPool(java.util.concurrent.ForkJoinPool) ObserverContext(org.apache.hadoop.hbase.coprocessor.ObserverContext) Assert.assertEquals(org.junit.Assert.assertEquals) CompletableFuture(java.util.concurrent.CompletableFuture) ExecutionException(java.util.concurrent.ExecutionException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43