Search in sources :

Example 36 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class TestWALEntryStream method setUp.

@Before
public void setUp() throws Exception {
    walQueue = new PriorityBlockingQueue<>();
    List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
    pathWatcher = new PathWatcher();
    listeners.add(pathWatcher);
    final WALFactory wals = new WALFactory(conf, listeners, tn.getMethodName());
    log = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
}
Also used : ArrayList(java.util.ArrayList) WALActionsListener(org.apache.hadoop.hbase.regionserver.wal.WALActionsListener) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Before(org.junit.Before)

Example 37 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class AbstractTestWALReplay method setUp.

@Before
public void setUp() throws Exception {
    this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
    this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
    this.hbaseRootDir = FSUtils.getRootDir(this.conf);
    this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
    String serverName = ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis()).toString();
    this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName);
    this.logDir = new Path(this.hbaseRootDir, logName);
    if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
        TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
    }
    this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
    this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
Also used : Path(org.apache.hadoop.fs.Path) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Before(org.junit.Before)

Example 38 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project phoenix by apache.

the class WALReplayWithIndexWritesAndCompressedWALIT method testReplayEditsWrittenViaHRegion.

/**
   * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify
   * seqids.
   * @throws Exception on failure
   */
@SuppressWarnings("deprecation")
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
    final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr), null, null, false);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
    //setup basic indexing for the table
    // enable indexing to a non-existant index table
    byte[] family = new byte[] { 'a' };
    ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
    builder.addIndexGroup(fam1);
    builder.build(htd);
    // create the region + its WAL
    // FIXME: Uses private type
    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    region0.close();
    region0.getWAL().close();
    WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234");
    WAL wal = createWAL(this.conf, walFactory);
    RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
    // mock out some of the internals of the RSS, so we can run CPs
    when(mockRS.getWAL(null)).thenReturn(wal);
    RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
    when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
    ServerName mockServerName = Mockito.mock(ServerName.class);
    when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234");
    when(mockRS.getServerName()).thenReturn(mockServerName);
    HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS));
    region.initialize();
    when(region.getSequenceId()).thenReturn(0l);
    //make an attempted write to the primary that should also be indexed
    byte[] rowkey = Bytes.toBytes("indexed_row_key");
    Put p = new Put(rowkey);
    p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
    region.put(p);
    // we should then see the server go down
    Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class));
    // then create the index table so we are successful on WAL replay
    CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);
    // run the WAL split and setup the region
    runWALSplit(this.conf, walFactory);
    WAL wal2 = createWAL(this.conf, walFactory);
    HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);
    // initialize the region - this should replay the WALEdits from the WAL
    region1.initialize();
    // now check to ensure that we wrote to the index table
    HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
    int indexSize = getKeyValueCount(index);
    assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
    Get g = new Get(rowkey);
    final Result result = region1.get(g);
    assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());
    // cleanup the index table
    HBaseAdmin admin = UTIL.getHBaseAdmin();
    admin.disableTable(INDEX_TABLE_NAME);
    admin.deleteTable(INDEX_TABLE_NAME);
    admin.close();
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) RegionServerAccounting(org.apache.hadoop.hbase.regionserver.RegionServerAccounting) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) CoveredColumn(org.apache.phoenix.hbase.index.covered.example.CoveredColumn) CoveredColumnIndexSpecifierBuilder(org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder) ServerName(org.apache.hadoop.hbase.ServerName) Get(org.apache.hadoop.hbase.client.Get) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) ColumnGroup(org.apache.phoenix.hbase.index.covered.example.ColumnGroup) Test(org.junit.Test) NeedsOwnMiniClusterTest(org.apache.phoenix.end2end.NeedsOwnMiniClusterTest)

Example 39 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project phoenix by apache.

the class TestPerRegionIndexWriteCache method setUp.

@SuppressWarnings("deprecation")
@Before
public void setUp() throws Exception {
    Path hbaseRootDir = TEST_UTIL.getDataTestDir();
    TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString());
    FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration());
    HRegionInfo hri = new HRegionInfo(tableName, null, null, false);
    Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    Random rn = new Random();
    tableName = TableName.valueOf("TestPerRegion" + rn.nextInt());
    WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache");
    wal = walFactory.getWAL(Bytes.toBytes("logs"), null);
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
    htd.addFamily(a);
    r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {

        @Override
        public int hashCode() {
            return 1;
        }

        @Override
        public String toString() {
            return "testRegion1";
        }
    };
    r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {

        @Override
        public int hashCode() {
            return 2;
        }

        @Override
        public String toString() {
            return "testRegion1";
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Random(java.util.Random) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Before(org.junit.Before)

Example 40 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Aggregations

WALFactory (org.apache.hadoop.hbase.wal.WALFactory)42 Path (org.apache.hadoop.fs.Path)30 WAL (org.apache.hadoop.hbase.wal.WAL)24 Configuration (org.apache.hadoop.conf.Configuration)19 Test (org.junit.Test)17 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)14 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)12 MetricsWAL (org.apache.hadoop.hbase.regionserver.wal.MetricsWAL)11 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)10 KeyValue (org.apache.hadoop.hbase.KeyValue)8 WALKey (org.apache.hadoop.hbase.wal.WALKey)8 TreeMap (java.util.TreeMap)7 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)7 Result (org.apache.hadoop.hbase.client.Result)7 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)7 IOException (java.io.IOException)6 ArrayList (java.util.ArrayList)6 Before (org.junit.Before)6