use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAssignmentManagerOnCluster method testAssignDisabledRegion.
/**
* Test force unassign/assign a region of a disabled table
*/
@Test(timeout = 60000)
public void testAssignDisabledRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MyMaster master = null;
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
// Assign the region
master = (MyMaster) cluster.getMaster();
AssignmentManager am = master.getAssignmentManager();
RegionStates regionStates = am.getRegionStates();
assertTrue(TEST_UTIL.assignRegion(hri));
// Disable the table
admin.disableTable(tableName);
assertTrue(regionStates.isRegionOffline(hri));
// You can't assign a disabled region
am.assign(hri, true);
assertTrue(regionStates.isRegionOffline(hri));
// You can't unassign a disabled region either
am.unassign(hri);
assertTrue(regionStates.isRegionOffline(hri));
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestCatalogJanitor method testCleanParent.
@Test
public void testCleanParent() throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
try {
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
// Add a reference under splitA directory so we don't clear out the parent.
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
Path storedir = HStore.getStoreHomedir(tabledir, splita, htd.getColumnFamilies()[0].getName());
Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Path path = ref.write(fs, p);
assertTrue(fs.exists(path));
assertFalse(janitor.cleanParent(parent, r));
// Remove the reference file and try again.
assertTrue(fs.delete(p, true));
assertTrue(janitor.cleanParent(parent, r));
} finally {
services.stop("shutdown");
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestCatalogJanitor method testDuplicateHFileResolution.
/**
* Test that if a store file with the same name is present as those already backed up cause the
* already archived files to be timestamped backup
*/
@Test
public void testDuplicateHFileResolution() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
// create the janitor
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
// archive directory. Otherwise, it just seems to pick the first root directory it can find (so
// the single test passes, but when the full suite is run, things get borked).
FSUtils.setRootDir(fs.getConf(), rootdir);
Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
System.out.println("Old root:" + rootdir);
System.out.println("Old table:" + tabledir);
System.out.println("Old store:" + storedir);
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName());
System.out.println("Old archive:" + storeArchive);
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// now add store files with the same names as before to check backup
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
// cleanup
services.stop("Test finished");
janitor.cancel(true);
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestDistributedLogSplitting method testSameVersionUpdatesRecoveryWithCompaction.
@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testSameVersionUpdatesRecoveryWithCompaction() throws Exception {
LOG.info("testSameVersionUpdatesRecoveryWithWrites");
conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 30 * 1024);
conf.setInt("hbase.hstore.compactionThreshold", 3);
startCluster(NUM_RS);
final AtomicLong sequenceId = new AtomicLong(100);
final int NUM_REGIONS_TO_CREATE = 40;
final int NUM_LOG_LINES = 2000;
// turn off load balancing to prevent regions from moving around otherwise
// they will consume recovered.edits
master.balanceSwitch(false);
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
try {
List<HRegionInfo> regions = null;
HRegionServer hrs = null;
for (int i = 0; i < NUM_RS; i++) {
boolean isCarryingMeta = false;
hrs = rsts.get(i).getRegionServer();
regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
for (HRegionInfo region : regions) {
if (region.isMetaRegion()) {
isCarryingMeta = true;
break;
}
}
if (isCarryingMeta) {
continue;
}
break;
}
LOG.info("#regions = " + regions.size());
Iterator<HRegionInfo> it = regions.iterator();
while (it.hasNext()) {
HRegionInfo region = it.next();
if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
it.remove();
}
}
if (regions.isEmpty())
return;
HRegionInfo curRegionInfo = regions.get(0);
byte[] startRow = curRegionInfo.getStartKey();
if (startRow == null || startRow.length == 0) {
startRow = new byte[] { 0, 0, 0, 0, 1 };
}
byte[] row = Bytes.incrementBytes(startRow, 1);
// use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
row = Arrays.copyOfRange(row, 3, 8);
long value = 0;
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[] family = Bytes.toBytes("family");
byte[] qualifier = Bytes.toBytes("c1");
long timeStamp = System.currentTimeMillis();
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(family));
final WAL wal = hrs.getWAL(curRegionInfo);
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
WALEdit e = new WALEdit();
value++;
e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), e, true);
}
wal.sync();
wal.shutdown();
// wait for abort completes
this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
// verify we got the last value
LOG.info("Verification Starts...");
Get g = new Get(row);
Result r = ht.get(g);
long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
assertEquals(value, theStoredVal);
// after flush & compaction
LOG.info("Verification after flush...");
TEST_UTIL.getAdmin().flush(tableName);
TEST_UTIL.getAdmin().compact(tableName);
// wait for compaction completes
TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (TEST_UTIL.getAdmin().getCompactionState(tableName) == CompactionState.NONE);
}
});
r = ht.get(g);
theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
assertEquals(value, theStoredVal);
} finally {
if (ht != null)
ht.close();
if (zkw != null)
zkw.close();
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestMaster method testMoveThrowsUnknownRegionException.
@Test
public void testMoveThrowsUnknownRegionException() throws IOException {
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor("value");
htd.addFamily(hcd);
admin.createTable(htd, null);
try {
HRegionInfo hri = new HRegionInfo(tableName, Bytes.toBytes("A"), Bytes.toBytes("Z"));
admin.move(hri.getEncodedNameAsBytes(), null);
fail("Region should not be moved since it is fake");
} catch (IOException ioe) {
assertTrue(ioe instanceof UnknownRegionException);
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
Aggregations