Search in sources :

Example 51 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class TestReplicateToReplica method setUp.

@Before
public void setUp() throws IOException {
    TO_ADD_AFTER_PREPARE_FLUSH = new ArrayList<>();
    tableName = name.getTableName();
    testDir = UTIL.getDataTestDir(tableName.getNameAsString());
    Configuration conf = UTIL.getConfiguration();
    conf.set(HConstants.HBASE_DIR, testDir.toString());
    td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2).setRegionMemStoreReplication(true).build();
    reqAndResps = new ArrayDeque<>();
    queueReqAndResps = true;
    conn = mock(AsyncClusterConnection.class);
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).thenAnswer(i -> {
        if (queueReqAndResps) {
            @SuppressWarnings("unchecked") List<WAL.Entry> entries = i.getArgument(1, List.class);
            CompletableFuture<Void> future = new CompletableFuture<>();
            reqAndResps.add(Pair.newPair(entries, future));
            return future;
        } else {
            return CompletableFuture.completedFuture(null);
        }
    });
    flushRequester = mock(FlushRequester.class);
    rss = mock(RegionServerServices.class);
    when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
    when(rss.getConfiguration()).thenReturn(conf);
    when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(conf));
    when(rss.getExecutorService()).thenReturn(EXEC);
    when(rss.getAsyncClusterConnection()).thenReturn(conn);
    when(rss.getFlushRequester()).thenReturn(flushRequester);
    manager = new RegionReplicationBufferManager(rss);
    when(rss.getRegionReplicationBufferManager()).thenReturn(manager);
    RegionInfo primaryHri = RegionInfoBuilder.newBuilder(td.getTableName()).build();
    RegionInfo secondaryHri = RegionReplicaUtil.getRegionInfoForReplica(primaryHri, 1);
    walFactory = new WALFactory(conf, UUID.randomUUID().toString());
    WAL wal = walFactory.getWAL(primaryHri);
    primary = HRegion.createHRegion(primaryHri, testDir, conf, td, wal);
    primary.close();
    primary = HRegion.openHRegion(testDir, primaryHri, td, wal, conf, rss, null);
    secondary = HRegion.openHRegion(secondaryHri, td, null, conf, rss, null);
    when(rss.getRegions()).then(i -> {
        return Arrays.asList(primary, secondary);
    });
    // process the open events
    replicateAll();
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) Configuration(org.apache.hadoop.conf.Configuration) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) CompletableFuture(java.util.concurrent.CompletableFuture) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) RegionReplicationBufferManager(org.apache.hadoop.hbase.regionserver.regionreplication.RegionReplicationBufferManager) Before(org.junit.Before)

Example 52 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class TestReplicationSourceManager method testLogRoll.

@Test
public void testLogRoll() throws Exception {
    long baseline = 1000;
    long time = baseline;
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    KeyValue kv = new KeyValue(r1, f1, r1);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    WALFactory wals = new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8"));
    ReplicationSourceManager replicationManager = replication.getReplicationManager();
    wals.getWALProvider().addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
    final WAL wal = wals.getWAL(hri);
    manager.init();
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("tableame")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(f1)).build();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getColumnFamilyNames()) {
        scopes.put(fam, 0);
    }
    // Testing normal log rolling every 20
    for (long i = 1; i < 101; i++) {
        if (i > 1 && i % 20 == 0) {
            wal.rollWriter();
        }
        LOG.info(Long.toString(i));
        final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
        wal.sync(txid);
    }
    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);
    for (int i = 0; i < 3; i++) {
        wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
    }
    wal.sync();
    int logNumber = 0;
    for (Map.Entry<String, NavigableSet<String>> entry : manager.getWALs().get(slaveId).entrySet()) {
        logNumber += entry.getValue().size();
    }
    assertEquals(6, logNumber);
    wal.rollWriter();
    ReplicationSourceInterface source = mock(ReplicationSourceInterface.class);
    when(source.getQueueId()).thenReturn("1");
    when(source.isRecovered()).thenReturn(false);
    when(source.isSyncReplication()).thenReturn(false);
    manager.logPositionAndCleanOldLogs(source, new WALEntryBatch(0, manager.getSources().get(0).getCurrentPath()));
    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
    wal.sync();
    assertEquals(1, manager.getWALs().size());
// TODO Need a case with only 2 WALs and we only want to delete the first one
}
Also used : NavigableSet(java.util.NavigableSet) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 53 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class WALEntryStreamTestBase method initWAL.

protected void initWAL() throws IOException {
    ReplicationSource source = mock(ReplicationSource.class);
    MetricsSource metricsSource = new MetricsSource("2");
    // Source with the same id is shared and carries values from the last run
    metricsSource.clear();
    logQueue = new ReplicationSourceLogQueue(CONF, metricsSource, source);
    pathWatcher = new PathWatcher();
    final WALFactory wals = new WALFactory(CONF, TableNameTestRule.cleanUpTestName(tn.getMethodName()));
    wals.getWALProvider().addWALActionsListener(pathWatcher);
    log = wals.getWAL(info);
}
Also used : WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 54 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class HBaseTestingUtility method createWal.

/**
 * Create an unmanaged WAL. Be sure to close it when you're through.
 */
public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri) throws IOException {
    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
    // unless I pass along via the conf.
    Configuration confForWAL = new Configuration(conf);
    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 55 with WALFactory

use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.

the class TestHStore method initHRegion.

private void initHRegion(String methodName, Configuration conf, TableDescriptorBuilder builder, ColumnFamilyDescriptor hcd, MyStoreHook hook, boolean switchToPread) throws IOException {
    TableDescriptor htd = builder.setColumnFamily(hcd).build();
    Path basedir = new Path(DIR + methodName);
    Path tableDir = CommonFSUtils.getTableDir(basedir, htd.getTableName());
    final Path logdir = new Path(basedir, AbstractFSWALProvider.getWALDirectoryName(methodName));
    FileSystem fs = FileSystem.get(conf);
    fs.delete(logdir, true);
    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 1, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
    RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
    Configuration walConf = new Configuration(conf);
    CommonFSUtils.setRootDir(walConf, basedir);
    WALFactory wals = new WALFactory(walConf, methodName);
    region = new HRegion(new HRegionFileSystem(conf, fs, tableDir, info), wals.getWAL(info), conf, htd, null);
    region.regionServicesForStores = Mockito.spy(region.regionServicesForStores);
    ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(1);
    Mockito.when(region.regionServicesForStores.getInMemoryCompactionPool()).thenReturn(pool);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

WALFactory (org.apache.hadoop.hbase.wal.WALFactory)64 Path (org.apache.hadoop.fs.Path)42 Configuration (org.apache.hadoop.conf.Configuration)33 WAL (org.apache.hadoop.hbase.wal.WAL)28 Test (org.junit.Test)23 FileSystem (org.apache.hadoop.fs.FileSystem)22 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)13 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)12 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)12 IOException (java.io.IOException)11 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)11 KeyValue (org.apache.hadoop.hbase.KeyValue)11 WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)10 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)9 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)9 TreeMap (java.util.TreeMap)8 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)8 Before (org.junit.Before)8 Result (org.apache.hadoop.hbase.client.Result)7