use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestAdmin1 method testSplitAndMergeWithReplicaTable.
@Test
public void testSplitAndMergeWithReplicaTable() throws Exception {
// The test tries to directly split replica regions and directly merge replica regions. These
// are not allowed. The test validates that. Then the test does a valid split/merge of allowed
// regions.
// Set up a table with 3 regions and replication set to 3
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setRegionReplication(3);
byte[] cf = "f".getBytes();
HColumnDescriptor hcd = new HColumnDescriptor(cf);
desc.addFamily(hcd);
byte[][] splitRows = new byte[2][];
splitRows[0] = new byte[] { (byte) '4' };
splitRows[1] = new byte[] { (byte) '7' };
TEST_UTIL.getAdmin().createTable(desc, splitRows);
List<HRegion> oldRegions;
do {
oldRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
Thread.sleep(10);
} while (//3 regions * 3 replicas
oldRegions.size() != 9);
// write some data to the table
Table ht = TEST_UTIL.getConnection().getTable(tableName);
List<Put> puts = new ArrayList<>();
byte[] qualifier = "c".getBytes();
Put put = new Put(new byte[] { (byte) '1' });
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
put = new Put(new byte[] { (byte) '6' });
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
put = new Put(new byte[] { (byte) '8' });
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
ht.put(puts);
ht.close();
List<Pair<HRegionInfo, ServerName>> regions = MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName);
boolean gotException = false;
// regions). Try splitting that region via the split API . Should fail
try {
TEST_UTIL.getAdmin().splitRegion(regions.get(1).getFirst().getRegionName());
} catch (IllegalArgumentException ex) {
gotException = true;
}
assertTrue(gotException);
gotException = false;
// this API goes direct to the regionserver skipping any checks in the admin). Should fail
try {
TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), new byte[] { (byte) '1' });
} catch (IOException ex) {
gotException = true;
}
assertTrue(gotException);
gotException = false;
// Try merging a replica with another. Should fail.
try {
TEST_UTIL.getHBaseAdmin().mergeRegionsSync(regions.get(1).getFirst().getEncodedNameAsBytes(), regions.get(2).getFirst().getEncodedNameAsBytes(), true);
} catch (IllegalArgumentException m) {
gotException = true;
}
assertTrue(gotException);
// Try going to the master directly (that will skip the check in admin)
try {
byte[][] nameofRegionsToMerge = new byte[2][];
nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes();
nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster().mergeTableRegions(null, request);
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException m) {
Throwable t = m.getCause();
do {
if (t instanceof MergeRegionException) {
gotException = true;
break;
}
t = t.getCause();
} while (t != null);
}
assertTrue(gotException);
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestAdmin1 method testHFileReplication.
/*
* Test DFS replication for column families, where one CF has default replication(3) and the other
* is set to 1.
*/
@Test(timeout = 300000)
public void testHFileReplication() throws Exception {
final TableName tableName = TableName.valueOf(this.name.getMethodName());
String fn1 = "rep1";
HColumnDescriptor hcd1 = new HColumnDescriptor(fn1);
hcd1.setDFSReplication((short) 1);
String fn = "defaultRep";
HColumnDescriptor hcd = new HColumnDescriptor(fn);
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(hcd);
htd.addFamily(hcd1);
Table table = TEST_UTIL.createTable(htd, null);
TEST_UTIL.waitTableAvailable(tableName);
Put p = new Put(Bytes.toBytes("defaultRep_rk"));
byte[] q1 = Bytes.toBytes("q1");
byte[] v1 = Bytes.toBytes("v1");
p.addColumn(Bytes.toBytes(fn), q1, v1);
List<Put> puts = new ArrayList<>(2);
puts.add(p);
p = new Put(Bytes.toBytes("rep1_rk"));
p.addColumn(Bytes.toBytes(fn1), q1, v1);
puts.add(p);
try {
table.put(puts);
admin.flush(tableName);
List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName);
for (HRegion r : regions) {
Store store = r.getStore(Bytes.toBytes(fn));
for (StoreFile sf : store.getStorefiles()) {
assertTrue(sf.toString().contains(fn));
assertTrue("Column family " + fn + " should have 3 copies", FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) == (sf.getFileInfo().getFileStatus().getReplication()));
}
store = r.getStore(Bytes.toBytes(fn1));
for (StoreFile sf : store.getStorefiles()) {
assertTrue(sf.toString().contains(fn1));
assertTrue("Column family " + fn1 + " should have only 1 copy", 1 == sf.getFileInfo().getFileStatus().getReplication());
}
}
} finally {
if (admin.isTableEnabled(tableName)) {
this.admin.disableTable(tableName);
this.admin.deleteTable(tableName);
}
}
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestAdmin1 method testDeleteLastColumnFamily.
@Test(timeout = 300000)
public void testDeleteLastColumnFamily() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
while (!this.admin.isTableEnabled(TableName.valueOf(name.getMethodName()))) {
Thread.sleep(10);
}
// test for enabled table
try {
this.admin.deleteColumnFamily(tableName, HConstants.CATALOG_FAMILY);
fail("Should have failed to delete the only column family of a table");
} catch (InvalidFamilyOperationException ex) {
// expected
}
// test for disabled table
this.admin.disableTable(tableName);
try {
this.admin.deleteColumnFamily(tableName, HConstants.CATALOG_FAMILY);
fail("Should have failed to delete the only column family of a table");
} catch (InvalidFamilyOperationException ex) {
// expected
}
this.admin.deleteTable(tableName);
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestAdmin1 method splitTest.
void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize) throws Exception {
TableName tableName = TableName.valueOf("testForceSplit");
StringBuilder sb = new StringBuilder();
// Add tail to String so can see better in logs where a test is running.
for (int i = 0; i < rowCounts.length; i++) {
sb.append("_").append(Integer.toString(rowCounts[i]));
}
assertFalse(admin.tableExists(tableName));
try (final Table table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize);
final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
int rowCount = 0;
byte[] q = new byte[0];
// in a specific column family is decided by rowCounts[familyIndex]
for (int index = 0; index < familyNames.length; index++) {
ArrayList<Put> puts = new ArrayList<>(rowCounts[index]);
for (int i = 0; i < rowCounts[index]; i++) {
byte[] k = Bytes.toBytes(i);
Put put = new Put(k);
put.addColumn(familyNames[index], q, k);
puts.add(put);
}
table.put(puts);
if (rowCount < rowCounts[index]) {
rowCount = rowCounts[index];
}
}
// get the initial layout (should just be one region)
List<HRegionLocation> m = locator.getAllRegionLocations();
LOG.info("Initial regions (" + m.size() + "): " + m);
assertTrue(m.size() == 1);
// Verify row count
Scan scan = new Scan();
ResultScanner scanner = table.getScanner(scan);
int rows = 0;
for (@SuppressWarnings("unused") Result result : scanner) {
rows++;
}
scanner.close();
assertEquals(rowCount, rows);
// Have an outstanding scan going on to make sure we can scan over splits.
scan = new Scan();
scanner = table.getScanner(scan);
// Scan first row so we are into first region before split happens.
scanner.next();
// Split the table
this.admin.split(tableName, splitPoint);
final AtomicInteger count = new AtomicInteger(0);
Thread t = new Thread("CheckForSplit") {
@Override
public void run() {
for (int i = 0; i < 45; i++) {
try {
sleep(1000);
} catch (InterruptedException e) {
continue;
}
// check again
List<HRegionLocation> regions = null;
try {
regions = locator.getAllRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
if (regions == null)
continue;
count.set(regions.size());
if (count.get() >= 2) {
LOG.info("Found: " + regions);
break;
}
LOG.debug("Cycle waiting on split");
}
LOG.debug("CheckForSplit thread exited, current region count: " + count.get());
}
};
t.setPriority(Thread.NORM_PRIORITY - 2);
t.start();
t.join();
// Verify row count
// We counted one row above.
rows = 1;
for (@SuppressWarnings("unused") Result result : scanner) {
rows++;
if (rows > rowCount) {
scanner.close();
assertTrue("Scanned more than expected (" + rowCount + ")", false);
}
}
scanner.close();
assertEquals(rowCount, rows);
List<HRegionLocation> regions = null;
try {
regions = locator.getAllRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
assertEquals(2, regions.size());
if (splitPoint != null) {
// make sure the split point matches our explicit configuration
assertEquals(Bytes.toString(splitPoint), Bytes.toString(regions.get(0).getRegionInfo().getEndKey()));
assertEquals(Bytes.toString(splitPoint), Bytes.toString(regions.get(1).getRegionInfo().getStartKey()));
LOG.debug("Properly split on " + Bytes.toString(splitPoint));
} else {
if (familyNames.length > 1) {
int splitKey = Bytes.toInt(regions.get(0).getRegionInfo().getEndKey());
// check if splitKey is based on the largest column family
// in terms of it store size
int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey);
LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r=" + regions.get(0).getRegionInfo());
for (int index = 0; index < familyNames.length; index++) {
int delta = Math.abs(rowCounts[index] / 2 - splitKey);
if (delta < deltaForLargestFamily) {
assertTrue("Delta " + delta + " for family " + index + " should be at least " + "deltaForLargestFamily " + deltaForLargestFamily, false);
}
}
}
}
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestAdmin1 method testDisableAndEnableTable.
@Test(timeout = 300000)
public void testDisableAndEnableTable() throws IOException {
final byte[] row = Bytes.toBytes("row");
final byte[] qualifier = Bytes.toBytes("qualifier");
final byte[] value = Bytes.toBytes("value");
final TableName table = TableName.valueOf(name.getMethodName());
Table ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY);
Put put = new Put(row);
put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value);
ht.put(put);
Get get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
ht.get(get);
this.admin.disableTable(ht.getName());
assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager().isTableState(ht.getName(), TableState.State.DISABLED));
assertEquals(TableState.State.DISABLED, getStateFromMeta(table));
// Test that table is disabled
get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
boolean ok = false;
try {
ht.get(get);
} catch (TableNotEnabledException e) {
ok = true;
}
ok = false;
// verify that scan encounters correct exception
Scan scan = new Scan();
try {
ResultScanner scanner = ht.getScanner(scan);
Result res = null;
do {
res = scanner.next();
} while (res != null);
} catch (TableNotEnabledException e) {
ok = true;
}
assertTrue(ok);
this.admin.enableTable(table);
assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager().isTableState(ht.getName(), TableState.State.ENABLED));
assertEquals(TableState.State.ENABLED, getStateFromMeta(table));
// Test that table is enabled
try {
ht.get(get);
} catch (RetriesExhaustedException e) {
ok = false;
}
assertTrue(ok);
ht.close();
}
Aggregations