Search in sources :

Example 26 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAdmin1 method testSplitAndMergeWithReplicaTable.

@Test
public void testSplitAndMergeWithReplicaTable() throws Exception {
    // The test tries to directly split replica regions and directly merge replica regions. These
    // are not allowed. The test validates that. Then the test does a valid split/merge of allowed
    // regions.
    // Set up a table with 3 regions and replication set to 3
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.setRegionReplication(3);
    byte[] cf = "f".getBytes();
    HColumnDescriptor hcd = new HColumnDescriptor(cf);
    desc.addFamily(hcd);
    byte[][] splitRows = new byte[2][];
    splitRows[0] = new byte[] { (byte) '4' };
    splitRows[1] = new byte[] { (byte) '7' };
    TEST_UTIL.getAdmin().createTable(desc, splitRows);
    List<HRegion> oldRegions;
    do {
        oldRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
        Thread.sleep(10);
    } while (//3 regions * 3 replicas
    oldRegions.size() != 9);
    // write some data to the table
    Table ht = TEST_UTIL.getConnection().getTable(tableName);
    List<Put> puts = new ArrayList<>();
    byte[] qualifier = "c".getBytes();
    Put put = new Put(new byte[] { (byte) '1' });
    put.addColumn(cf, qualifier, "100".getBytes());
    puts.add(put);
    put = new Put(new byte[] { (byte) '6' });
    put.addColumn(cf, qualifier, "100".getBytes());
    puts.add(put);
    put = new Put(new byte[] { (byte) '8' });
    put.addColumn(cf, qualifier, "100".getBytes());
    puts.add(put);
    ht.put(puts);
    ht.close();
    List<Pair<HRegionInfo, ServerName>> regions = MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName);
    boolean gotException = false;
    // regions). Try splitting that region via the split API . Should fail
    try {
        TEST_UTIL.getAdmin().splitRegion(regions.get(1).getFirst().getRegionName());
    } catch (IllegalArgumentException ex) {
        gotException = true;
    }
    assertTrue(gotException);
    gotException = false;
    // this API goes direct to the regionserver skipping any checks in the admin). Should fail
    try {
        TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), new byte[] { (byte) '1' });
    } catch (IOException ex) {
        gotException = true;
    }
    assertTrue(gotException);
    gotException = false;
    // Try merging a replica with another. Should fail.
    try {
        TEST_UTIL.getHBaseAdmin().mergeRegionsSync(regions.get(1).getFirst().getEncodedNameAsBytes(), regions.get(2).getFirst().getEncodedNameAsBytes(), true);
    } catch (IllegalArgumentException m) {
        gotException = true;
    }
    assertTrue(gotException);
    // Try going to the master directly (that will skip the check in admin)
    try {
        byte[][] nameofRegionsToMerge = new byte[2][];
        nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes();
        nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
        MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
        ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster().mergeTableRegions(null, request);
    } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException m) {
        Throwable t = m.getCause();
        do {
            if (t instanceof MergeRegionException) {
                gotException = true;
                break;
            }
            t = t.getCause();
        } while (t != null);
    }
    assertTrue(gotException);
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) MergeRegionException(org.apache.hadoop.hbase.exceptions.MergeRegionException) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 27 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAdmin1 method testHFileReplication.

/*
   * Test DFS replication for column families, where one CF has default replication(3) and the other
   * is set to 1.
   */
@Test(timeout = 300000)
public void testHFileReplication() throws Exception {
    final TableName tableName = TableName.valueOf(this.name.getMethodName());
    String fn1 = "rep1";
    HColumnDescriptor hcd1 = new HColumnDescriptor(fn1);
    hcd1.setDFSReplication((short) 1);
    String fn = "defaultRep";
    HColumnDescriptor hcd = new HColumnDescriptor(fn);
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(hcd);
    htd.addFamily(hcd1);
    Table table = TEST_UTIL.createTable(htd, null);
    TEST_UTIL.waitTableAvailable(tableName);
    Put p = new Put(Bytes.toBytes("defaultRep_rk"));
    byte[] q1 = Bytes.toBytes("q1");
    byte[] v1 = Bytes.toBytes("v1");
    p.addColumn(Bytes.toBytes(fn), q1, v1);
    List<Put> puts = new ArrayList<>(2);
    puts.add(p);
    p = new Put(Bytes.toBytes("rep1_rk"));
    p.addColumn(Bytes.toBytes(fn1), q1, v1);
    puts.add(p);
    try {
        table.put(puts);
        admin.flush(tableName);
        List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName);
        for (HRegion r : regions) {
            Store store = r.getStore(Bytes.toBytes(fn));
            for (StoreFile sf : store.getStorefiles()) {
                assertTrue(sf.toString().contains(fn));
                assertTrue("Column family " + fn + " should have 3 copies", FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) == (sf.getFileInfo().getFileStatus().getReplication()));
            }
            store = r.getStore(Bytes.toBytes(fn1));
            for (StoreFile sf : store.getStorefiles()) {
                assertTrue(sf.toString().contains(fn1));
                assertTrue("Column family " + fn1 + " should have only 1 copy", 1 == sf.getFileInfo().getFileStatus().getReplication());
            }
        }
    } finally {
        if (admin.isTableEnabled(tableName)) {
            this.admin.disableTable(tableName);
            this.admin.deleteTable(tableName);
        }
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Store(org.apache.hadoop.hbase.regionserver.Store) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) Test(org.junit.Test)

Example 28 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAdmin1 method testTableAvailableWithRandomSplitKeys.

@Test(timeout = 300000)
public void testTableAvailableWithRandomSplitKeys() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor("col"));
    byte[][] splitKeys = new byte[1][];
    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 } };
    admin.createTable(desc);
    boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys);
    assertFalse("Table should be created with 1 row in META", tableAvailable);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 29 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAdmin1 method testOnlineChangeTableSchema.

/**
   * Verify schema modification takes.
   * @throws IOException
   * @throws InterruptedException
   */
@Test(timeout = 300000)
public void testOnlineChangeTableSchema() throws IOException, InterruptedException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor[] tables = admin.listTables();
    int numTables = tables.length;
    TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
    tables = this.admin.listTables();
    assertEquals(numTables + 1, tables.length);
    // FIRST, do htabledescriptor changes.
    HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
    // Make a copy and assert copy is good.
    HTableDescriptor copy = new HTableDescriptor(htd);
    assertTrue(htd.equals(copy));
    // Now amend the copy. Introduce differences.
    long newFlushSize = htd.getMemStoreFlushSize() / 2;
    if (newFlushSize <= 0) {
        newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
    }
    copy.setMemStoreFlushSize(newFlushSize);
    final String key = "anyoldkey";
    assertTrue(htd.getValue(key) == null);
    copy.setValue(key, key);
    boolean expectedException = false;
    try {
        admin.modifyTable(tableName, copy);
    } catch (TableNotDisabledException re) {
        expectedException = true;
    }
    assertFalse(expectedException);
    HTableDescriptor modifiedHtd = this.admin.getTableDescriptor(tableName);
    assertFalse(htd.equals(modifiedHtd));
    assertTrue(copy.equals(modifiedHtd));
    assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize());
    assertEquals(key, modifiedHtd.getValue(key));
    // Now work on column family changes.
    int countOfFamilies = modifiedHtd.getFamilies().size();
    assertTrue(countOfFamilies > 0);
    HColumnDescriptor hcd = modifiedHtd.getFamilies().iterator().next();
    int maxversions = hcd.getMaxVersions();
    final int newMaxVersions = maxversions + 1;
    hcd.setMaxVersions(newMaxVersions);
    final byte[] hcdName = hcd.getName();
    expectedException = false;
    try {
        this.admin.modifyColumnFamily(tableName, hcd);
    } catch (TableNotDisabledException re) {
        expectedException = true;
    }
    assertFalse(expectedException);
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    HColumnDescriptor modifiedHcd = modifiedHtd.getFamily(hcdName);
    assertEquals(newMaxVersions, modifiedHcd.getMaxVersions());
    // Try adding a column
    assertFalse(this.admin.isTableDisabled(tableName));
    final String xtracolName = "xtracol";
    HColumnDescriptor xtracol = new HColumnDescriptor(xtracolName);
    xtracol.setValue(xtracolName, xtracolName);
    expectedException = false;
    try {
        this.admin.addColumnFamily(tableName, xtracol);
    } catch (TableNotDisabledException re) {
        expectedException = true;
    }
    // Add column should work even if the table is enabled
    assertFalse(expectedException);
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    hcd = modifiedHtd.getFamily(xtracol.getName());
    assertTrue(hcd != null);
    assertTrue(hcd.getValue(xtracolName).equals(xtracolName));
    // Delete the just-added column.
    this.admin.deleteColumnFamily(tableName, xtracol.getName());
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    hcd = modifiedHtd.getFamily(xtracol.getName());
    assertTrue(hcd == null);
    // Delete the table
    this.admin.disableTable(tableName);
    this.admin.deleteTable(tableName);
    this.admin.listTables();
    assertFalse(this.admin.tableExists(tableName));
}
Also used : TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 30 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAdmin1 method testEnableTableRetainAssignment.

/**
   * Test retain assignment on enableTable.
   *
   * @throws IOException
   */
@Test(timeout = 300000)
public void testEnableTableRetainAssignment() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
    int expectedRegions = splitKeys.length + 1;
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, splitKeys);
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        List<HRegionLocation> regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
        // Disable table.
        admin.disableTable(tableName);
        // Enable table, use retain assignment to assign regions.
        admin.enableTable(tableName);
        List<HRegionLocation> regions2 = l.getAllRegionLocations();
        // Check the assignment.
        assertEquals(regions.size(), regions2.size());
        assertTrue(regions2.containsAll(regions));
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43