use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestSplitTableRegionProcedure method testRollbackAndDoubleExecution.
@Test(timeout = 60000)
public void testRollbackAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
insertData(tableName);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
assertTrue("not able to find a splittable region", regions != null);
assertTrue("not able to find a splittable region", regions.length == 1);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Failing before SPLIT_TABLE_REGION_UPDATE_META we should trigger the
// rollback
// NOTE: the 5 (number before SPLIT_TABLE_REGION_UPDATE_META step) is
// hardcoded, so you have to look at this test at least once when you add a new step.
int numberOfSteps = 5;
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestMobFileCache method createMobStoreFile.
/**
* Create the mob store file
*/
private Path createMobStoreFile(HColumnDescriptor hcd) throws IOException {
// Setting up a Store
TableName tn = TableName.valueOf(TABLE);
HTableDescriptor htd = new HTableDescriptor(tn);
htd.addFamily(hcd);
HMobStore mobStore = (HMobStore) region.getStore(hcd.getName());
KeyValue key1 = new KeyValue(ROW, hcd.getName(), QF1, 1, VALUE);
KeyValue key2 = new KeyValue(ROW, hcd.getName(), QF2, 1, VALUE);
KeyValue key3 = new KeyValue(ROW2, hcd.getName(), QF3, 1, VALUE2);
KeyValue[] keys = new KeyValue[] { key1, key2, key3 };
int maxKeyCount = keys.length;
HRegionInfo regionInfo = new HRegionInfo(tn);
StoreFileWriter mobWriter = mobStore.createWriterInTmp(currentDate, maxKeyCount, hcd.getCompactionCompression(), regionInfo.getStartKey(), false);
Path mobFilePath = mobWriter.getPath();
String fileName = mobFilePath.getName();
mobWriter.append(key1);
mobWriter.append(key2);
mobWriter.append(key3);
mobWriter.close();
String targetPathName = MobUtils.formatDate(currentDate);
Path targetPath = new Path(mobStore.getPath(), targetPathName);
mobStore.commitFile(mobFilePath, targetPath);
return new Path(targetPath, fileName);
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestBlocksRead method initHRegion.
/**
* Callers must afterward call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
* @param tableName
* @param callingMethod
* @param conf
* @param family
* @throws IOException
* @return created and initialized region.
*/
private Region initHRegion(byte[] tableName, String callingMethod, Configuration conf, String family) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HColumnDescriptor familyDesc;
for (int i = 0; i < BLOOM_TYPE.length; i++) {
BloomType bloomType = BLOOM_TYPE[i];
familyDesc = new HColumnDescriptor(family + "_" + bloomType).setBlocksize(1).setBloomFilterType(BLOOM_TYPE[i]);
htd.addFamily(familyDesc);
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
blockCache = new CacheConfig(conf).getBlockCache();
return r;
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestHRegionInfo method testReadAndWriteHRegionInfoFile.
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
Path basedir = htu.getDataTestDir();
// Create a region. That'll write the .regioninfo file.
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME));
// Get modtime on the file.
long modtime = getModTime(r);
HBaseTestingUtility.closeRegionAndWAL(r);
Thread.sleep(1001);
r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration());
// Ensure the file is not written for a second time.
long modtime2 = getModTime(r);
assertEquals(modtime, modtime2);
// Now load the file.
HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
assertTrue(hri.equals(deserializedHri));
HBaseTestingUtility.closeRegionAndWAL(r);
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestHMobStore method init.
private void init(String methodName, Configuration conf, HTableDescriptor htd, HColumnDescriptor hcd, boolean testStore) throws IOException {
//Setting up tje Region and Store
Path basedir = new Path(DIR + methodName);
Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
String logName = "logs";
Path logdir = new Path(basedir, logName);
FileSystem fs = FileSystem.get(conf);
fs.delete(logdir, true);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, basedir);
final WALFactory wals = new WALFactory(walConf, null, methodName);
region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), fs, conf, info, htd, null);
store = new HMobStore(region, hcd, conf);
if (testStore) {
init(conf, hcd);
}
}
Aggregations