use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestWALEntryStream method setUp.
@Before
public void setUp() throws Exception {
walQueue = new PriorityBlockingQueue<>();
List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
pathWatcher = new PathWatcher();
listeners.add(pathWatcher);
final WALFactory wals = new WALFactory(conf, listeners, tn.getMethodName());
log = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class AbstractTestWALReplay method setUp.
@Before
public void setUp() throws Exception {
this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(this.conf);
this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
String serverName = ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis()).toString();
this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName);
this.logDir = new Path(this.hbaseRootDir, logName);
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
use of org.apache.hadoop.hbase.wal.WALFactory in project phoenix by apache.
the class WALReplayWithIndexWritesAndCompressedWALIT method testReplayEditsWrittenViaHRegion.
/**
* Test writing edits into an region, closing it, splitting logs, opening Region again. Verify
* seqids.
* @throws Exception on failure
*/
@SuppressWarnings("deprecation")
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
final String tableNameStr = "testReplayEditsWrittenViaHRegion";
final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr), null, null, false);
final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
deleteDir(basedir);
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
//setup basic indexing for the table
// enable indexing to a non-existant index table
byte[] family = new byte[] { 'a' };
ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
builder.addIndexGroup(fam1);
builder.build(htd);
// create the region + its WAL
// FIXME: Uses private type
HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
region0.close();
region0.getWAL().close();
WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234");
WAL wal = createWAL(this.conf, walFactory);
RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
// mock out some of the internals of the RSS, so we can run CPs
when(mockRS.getWAL(null)).thenReturn(wal);
RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
ServerName mockServerName = Mockito.mock(ServerName.class);
when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234");
when(mockRS.getServerName()).thenReturn(mockServerName);
HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS));
region.initialize();
when(region.getSequenceId()).thenReturn(0l);
//make an attempted write to the primary that should also be indexed
byte[] rowkey = Bytes.toBytes("indexed_row_key");
Put p = new Put(rowkey);
p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
region.put(p);
// we should then see the server go down
Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class));
// then create the index table so we are successful on WAL replay
CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);
// run the WAL split and setup the region
runWALSplit(this.conf, walFactory);
WAL wal2 = createWAL(this.conf, walFactory);
HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);
// initialize the region - this should replay the WALEdits from the WAL
region1.initialize();
// now check to ensure that we wrote to the index table
HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
int indexSize = getKeyValueCount(index);
assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
Get g = new Get(rowkey);
final Result result = region1.get(g);
assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());
// cleanup the index table
HBaseAdmin admin = UTIL.getHBaseAdmin();
admin.disableTable(INDEX_TABLE_NAME);
admin.deleteTable(INDEX_TABLE_NAME);
admin.close();
}
use of org.apache.hadoop.hbase.wal.WALFactory in project phoenix by apache.
the class TestPerRegionIndexWriteCache method setUp.
@SuppressWarnings("deprecation")
@Before
public void setUp() throws Exception {
Path hbaseRootDir = TEST_UTIL.getDataTestDir();
TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString());
FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration());
HRegionInfo hri = new HRegionInfo(tableName, null, null, false);
Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
Random rn = new Random();
tableName = TableName.valueOf("TestPerRegion" + rn.nextInt());
WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache");
wal = walFactory.getWAL(Bytes.toBytes("logs"), null);
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
htd.addFamily(a);
r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {
@Override
public int hashCode() {
return 1;
}
@Override
public String toString() {
return "testRegion1";
}
};
r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {
@Override
public int hashCode() {
return 2;
}
@Override
public String toString() {
return "testRegion1";
}
};
}
use of org.apache.hadoop.hbase.wal.WALFactory in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
WAL hLog = walFactory.getWAL(new byte[] { 1 });
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Aggregations