Search in sources :

Example 51 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestMasterOperationsForRegionReplicas method testCreateTableWithSingleReplica.

@Test
public void testCreateTableWithSingleReplica() throws Exception {
    final int numRegions = 3;
    final int numReplica = 1;
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.setRegionReplication(numReplica);
        desc.addFamily(new HColumnDescriptor("family"));
        ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
        validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
        List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
        assert (hris.size() == numRegions * numReplica);
    } finally {
        ADMIN.disableTable(tableName);
        ADMIN.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 52 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestAssignmentManagerMetrics method testRITAssignmentManagerMetrics.

@Test
public void testRITAssignmentManagerMetrics() throws Exception {
    final TableName TABLENAME = TableName.valueOf(name.getMethodName());
    final byte[] FAMILY = Bytes.toBytes("family");
    Table table = null;
    try {
        table = TEST_UTIL.createTable(TABLENAME, FAMILY);
        final byte[] row = Bytes.toBytes("row");
        final byte[] qualifier = Bytes.toBytes("qualifier");
        final byte[] value = Bytes.toBytes("value");
        Put put = new Put(row);
        put.addColumn(FAMILY, qualifier, value);
        table.put(put);
        // Sleep 3 seconds, wait for doMetrics chore catching up
        Thread.sleep(msgInterval * 3);
        // check the RIT is 0
        MetricsAssignmentManagerSource amSource = master.getAssignmentManager().getAssignmentManagerMetrics().getMetricsProcSource();
        metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 0, amSource);
        metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_OVER_THRESHOLD_NAME, 0, amSource);
        // alter table with a non-existing coprocessor
        HTableDescriptor htd = new HTableDescriptor(TABLENAME);
        HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
        htd.addFamily(hcd);
        String spec = "hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
        htd.addCoprocessorWithSpec(spec);
        try {
            TEST_UTIL.getAdmin().modifyTable(TABLENAME, htd);
            fail("Expected region failed to open");
        } catch (IOException e) {
        // expected, the RS will crash and the assignment will spin forever waiting for a RS
        // to assign the region. the region will not go to FAILED_OPEN because in this case
        // we have just one RS and it will do one retry.
        }
        // Sleep 3 seconds, wait for doMetrics chore catching up
        Thread.sleep(msgInterval * 3);
        metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 1, amSource);
        metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_OVER_THRESHOLD_NAME, 1, amSource);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 53 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestAssignmentManagerOnCluster method testOpenFailedUnrecoverable.

/**
   * This tests region open failure which is not recoverable
   */
@Test(timeout = 60000)
public void testOpenFailedUnrecoverable() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        FileSystem fs = FileSystem.get(conf);
        Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableName);
        Path regionDir = new Path(tableDir, hri.getEncodedName());
        // create a file named the same as the region dir to
        // mess up with region opening
        fs.create(regionDir, true);
        HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
        AssignmentManager am = master.getAssignmentManager();
        assertFalse(TEST_UTIL.assignRegion(hri));
        RegionState state = am.getRegionStates().getRegionState(hri);
        assertEquals(RegionState.State.FAILED_OPEN, state.getState());
        // Failed to open due to file system issue. Region state should
        // carry the opening region server so that we can force close it
        // later on before opening it again. See HBASE-9092.
        assertNotNull(state.getServerName());
        // remove the blocking file, so that region can be opened
        fs.delete(regionDir, true);
        assertTrue(TEST_UTIL.assignRegion(hri));
        ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
        TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
    } finally {
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) ServerName(org.apache.hadoop.hbase.ServerName) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 54 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestAssignmentManagerOnCluster method testOpenFailed.

/**
   * This tests region open failed
   */
@Test(timeout = 60000)
public void testOpenFailed() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        MyLoadBalancer.controledRegion = hri;
        HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
        AssignmentManager am = master.getAssignmentManager();
        assertFalse(TEST_UTIL.assignRegion(hri));
        RegionState state = am.getRegionStates().getRegionState(hri);
        assertEquals(RegionState.State.FAILED_OPEN, state.getState());
        // Failed to open since no plan, so it's on no server
        assertNull(state.getServerName());
        MyLoadBalancer.controledRegion = null;
        assertTrue(TEST_UTIL.assignRegion(hri));
        ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
        TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
    } finally {
        MyLoadBalancer.controledRegion = null;
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ServerName(org.apache.hadoop.hbase.ServerName) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 55 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestStore method testCreateWriter.

/**
   * Verify that compression and data block encoding are respected by the
   * Store.createWriterInTmp() method, used on store flush.
   */
@Test
public void testCreateWriter() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    FileSystem fs = FileSystem.get(conf);
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setCompressionType(Compression.Algorithm.GZ);
    hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
    init(name.getMethodName(), conf, hcd);
    // Test createWriterInTmp()
    StoreFileWriter writer = store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false);
    Path path = writer.getPath();
    writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
    writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
    writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
    writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
    writer.close();
    // Verify that compression and encoding settings are respected
    HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
    Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
    Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
    reader.close();
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38