use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class HBaseFsck method rebuildMeta.
/**
* Rebuilds meta from information in hdfs/fs. Depends on configuration settings passed into
* hbck constructor to point to a particular fs/dir. Assumes HBase is OFFLINE.
*
* @param fix flag that determines if method should attempt to fix holes
* @return true if successful, false if attempt failed.
*/
public boolean rebuildMeta(boolean fix) throws IOException, InterruptedException {
// TODO check to make sure hbase is offline. (or at least the table
// currently being worked on is off line)
// Determine what's on HDFS
LOG.info("Loading HBase regioninfo from HDFS...");
// populating regioninfo table.
loadHdfsRegionDirs();
int errs = errors.getErrorList().size();
// update tableInfos based on region info in fs.
tablesInfo = loadHdfsRegionInfos();
checkHdfsIntegrity(false, false);
// make sure ok.
if (errors.getErrorList().size() != errs) {
// While in error state, iterate until no more fixes possible
while (true) {
fixes = 0;
suggestFixes(tablesInfo);
errors.clear();
// update tableInfos based on region info in fs.
loadHdfsRegionInfos();
checkHdfsIntegrity(shouldFixHdfsHoles(), shouldFixHdfsOverlaps());
int errCount = errors.getErrorList().size();
if (fixes == 0) {
if (errCount > 0) {
// failed to fix problems.
return false;
} else {
// no fixes and no problems? drop out and fix stuff!
break;
}
}
}
}
// we can rebuild, move old meta out of the way and start
LOG.info("HDFS regioninfo's seems good. Sidelining old hbase:meta");
Path backupDir = sidelineOldMeta();
LOG.info("Creating new hbase:meta");
String walFactoryId = "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8);
HRegion meta = createNewMeta(walFactoryId);
// populate meta
List<Put> puts = generatePuts(tablesInfo);
if (puts == null) {
LOG.fatal("Problem encountered when creating new hbase:meta entries. " + "You may need to restore the previously sidelined hbase:meta");
return false;
}
meta.batchMutate(puts.toArray(new Put[puts.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
meta.close();
if (meta.getWAL() != null) {
meta.getWAL().close();
}
// clean up the temporary hbck meta recovery WAL directory
removeHBCKMetaRecoveryWALDir(walFactoryId);
LOG.info("Success! hbase:meta table rebuilt.");
LOG.info("Old hbase:meta is moved into " + backupDir);
return true;
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class HBaseFsckRepair method createHDFSRegionDir.
/**
* Creates, flushes, and closes a new region.
*/
public static HRegion createHDFSRegionDir(Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException {
// Create HRegion
Path root = FSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
// Close the new region to flush to disk. Close log file too.
region.close();
return region;
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestAsyncTableGetMultiThreaded method test.
@Test
public void test() throws IOException, InterruptedException, ExecutionException {
int numThreads = 20;
AtomicBoolean stop = new AtomicBoolean(false);
ExecutorService executor = Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-"));
List<Future<?>> futures = new ArrayList<>();
IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
run(stop);
return null;
})));
Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
Admin admin = TEST_UTIL.getAdmin();
for (byte[] splitPoint : SPLIT_KEYS) {
admin.split(TABLE_NAME, splitPoint);
for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
region.compact(true);
}
Thread.sleep(5000);
admin.balancer(true);
Thread.sleep(5000);
ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer)).findAny().get();
admin.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), Bytes.toBytes(newMetaServer.getServerName()));
Thread.sleep(5000);
}
stop.set(true);
executor.shutdown();
for (Future<?> future : futures) {
future.get();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestFromClientSide3 method assertNoLocks.
private static void assertNoLocks(final TableName tableName) throws IOException, InterruptedException {
HRegion region = (HRegion) find(tableName);
assertEquals(0, region.getLockedRows().size());
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestCacheOnWrite method testNotCachingDataBlocksDuringCompactionInternals.
private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException, InterruptedException {
// TODO: need to change this test if we add a cache size threshold for
// compactions, or if we implement some other kind of intelligent logic for
// deciding what blocks to cache-on-write on compaction.
final String table = "CompactionCacheOnWrite";
final String cf = "myCF";
final byte[] cfBytes = Bytes.toBytes(cf);
final int maxVersions = 3;
Region region = TEST_UTIL.createTestRegion(table, new HColumnDescriptor(cf).setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions).setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()));
int rowIdx = 0;
long ts = EnvironmentEdgeManager.currentTime();
for (int iFile = 0; iFile < 5; ++iFile) {
for (int iRow = 0; iRow < 500; ++iRow) {
String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow;
Put p = new Put(Bytes.toBytes(rowStr));
++rowIdx;
for (int iCol = 0; iCol < 10; ++iCol) {
String qualStr = "col" + iCol;
String valueStr = "value_" + rowStr + "_" + qualStr;
for (int iTS = 0; iTS < 5; ++iTS) {
if (useTags) {
Tag t = new ArrayBackedTag((byte) 1, "visibility");
Tag[] tags = new Tag[1];
tags[0] = t;
KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
p.add(kv);
} else {
p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
}
}
}
p.setDurability(Durability.ASYNC_WAL);
region.put(p);
}
region.flush(true);
}
clearBlockCache(blockCache);
assertEquals(0, blockCache.getBlockCount());
region.compact(false);
LOG.debug("compactStores() returned");
for (CachedBlock block : blockCache) {
assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
assertNotEquals(BlockType.DATA, block.getBlockType());
}
((HRegion) region).close();
}
Aggregations