use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestMasterOperationsForRegionReplicas method testCreateTableWithSingleReplica.
@Test
public void testCreateTableWithSingleReplica() throws Exception {
final int numRegions = 3;
final int numReplica = 1;
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setRegionReplication(numReplica);
desc.addFamily(new HColumnDescriptor("family"));
ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
assert (hris.size() == numRegions * numReplica);
} finally {
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAssignmentManagerMetrics method testRITAssignmentManagerMetrics.
@Test
public void testRITAssignmentManagerMetrics() throws Exception {
final TableName TABLENAME = TableName.valueOf(name.getMethodName());
final byte[] FAMILY = Bytes.toBytes("family");
Table table = null;
try {
table = TEST_UTIL.createTable(TABLENAME, FAMILY);
final byte[] row = Bytes.toBytes("row");
final byte[] qualifier = Bytes.toBytes("qualifier");
final byte[] value = Bytes.toBytes("value");
Put put = new Put(row);
put.addColumn(FAMILY, qualifier, value);
table.put(put);
// Sleep 3 seconds, wait for doMetrics chore catching up
Thread.sleep(msgInterval * 3);
// check the RIT is 0
MetricsAssignmentManagerSource amSource = master.getAssignmentManager().getAssignmentManagerMetrics().getMetricsProcSource();
metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 0, amSource);
metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_OVER_THRESHOLD_NAME, 0, amSource);
// alter table with a non-existing coprocessor
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
String spec = "hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
htd.addCoprocessorWithSpec(spec);
try {
TEST_UTIL.getAdmin().modifyTable(TABLENAME, htd);
fail("Expected region failed to open");
} catch (IOException e) {
// expected, the RS will crash and the assignment will spin forever waiting for a RS
// to assign the region. the region will not go to FAILED_OPEN because in this case
// we have just one RS and it will do one retry.
}
// Sleep 3 seconds, wait for doMetrics chore catching up
Thread.sleep(msgInterval * 3);
metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 1, amSource);
metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_OVER_THRESHOLD_NAME, 1, amSource);
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAssignmentManagerOnCluster method testOpenFailedUnrecoverable.
/**
* This tests region open failure which is not recoverable
*/
@Test(timeout = 60000)
public void testOpenFailedUnrecoverable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
FileSystem fs = FileSystem.get(conf);
Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableName);
Path regionDir = new Path(tableDir, hri.getEncodedName());
// create a file named the same as the region dir to
// mess up with region opening
fs.create(regionDir, true);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
assertFalse(TEST_UTIL.assignRegion(hri));
RegionState state = am.getRegionStates().getRegionState(hri);
assertEquals(RegionState.State.FAILED_OPEN, state.getState());
// Failed to open due to file system issue. Region state should
// carry the opening region server so that we can force close it
// later on before opening it again. See HBASE-9092.
assertNotNull(state.getServerName());
// remove the blocking file, so that region can be opened
fs.delete(regionDir, true);
assertTrue(TEST_UTIL.assignRegion(hri));
ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAssignmentManagerOnCluster method testOpenFailed.
/**
* This tests region open failed
*/
@Test(timeout = 60000)
public void testOpenFailed() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
MyLoadBalancer.controledRegion = hri;
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
assertFalse(TEST_UTIL.assignRegion(hri));
RegionState state = am.getRegionStates().getRegionState(hri);
assertEquals(RegionState.State.FAILED_OPEN, state.getState());
// Failed to open since no plan, so it's on no server
assertNull(state.getServerName());
MyLoadBalancer.controledRegion = null;
assertTrue(TEST_UTIL.assignRegion(hri));
ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
} finally {
MyLoadBalancer.controledRegion = null;
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestStore method testCreateWriter.
/**
* Verify that compression and data block encoding are respected by the
* Store.createWriterInTmp() method, used on store flush.
*/
@Test
public void testCreateWriter() throws Exception {
Configuration conf = HBaseConfiguration.create();
FileSystem fs = FileSystem.get(conf);
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setCompressionType(Compression.Algorithm.GZ);
hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
init(name.getMethodName(), conf, hcd);
// Test createWriterInTmp()
StoreFileWriter writer = store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false);
Path path = writer.getPath();
writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
writer.close();
// Verify that compression and encoding settings are respected
HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
reader.close();
}
Aggregations