Search in sources :

Example 36 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestBulkLoad method verifyBulkLoadEvent.

@Test
public void verifyBulkLoadEvent() throws IOException {
    TableName tableName = TableName.valueOf("test", "test");
    List<Pair<byte[], String>> familyPaths = withFamilyPathsFor(family1);
    byte[] familyName = familyPaths.get(0).getFirst();
    String storeFileName = familyPaths.get(0).getSecond();
    storeFileName = (new Path(storeFileName)).getName();
    List<String> storeFileNames = new ArrayList<>();
    storeFileNames.add(storeFileName);
    when(log.appendMarker(any(), any(), argThat(bulkLogWalEdit(WALEdit.BULK_LOAD, tableName.toBytes(), familyName, storeFileNames)))).thenAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocation) {
            WALKeyImpl walKey = invocation.getArgument(1);
            MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
            if (mvcc != null) {
                MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
                walKey.setWriteEntry(we);
            }
            return 01L;
        }
    });
    testRegionWithFamiliesAndSpecifiedTableName(tableName, family1).bulkLoadHFiles(familyPaths, false, null);
    verify(log).sync(anyLong());
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) TableName(org.apache.hadoop.hbase.TableName) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 37 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationSourceManager method testBulkLoadWALEdits.

@Test
public void testBulkLoadWALEdits() throws Exception {
    // 1. Get the bulk load wal edit event
    NavigableMap<byte[], Integer> scope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    WALEdit logEdit = getBulkLoadWALEdit(scope);
    // 2. Create wal key
    WALKeyImpl logKey = new WALKeyImpl(scope);
    // 3. Enable bulk load hfile replication
    Configuration bulkLoadConf = HBaseConfiguration.create(conf);
    bulkLoadConf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
    // 4. Get the scopes for the key
    ReplicationSourceWALActionListener.scopeWALEdits(logKey, logEdit, bulkLoadConf);
    NavigableMap<byte[], Integer> scopes = logKey.getReplicationScopes();
    // Assert family with replication scope global is present in the key scopes
    assertTrue("This family scope is set to global, should be part of replication key scopes.", scopes.containsKey(f1));
    // Assert family with replication scope local is not present in the key scopes
    assertFalse("This family scope is set to local, should not be part of replication key scopes", scopes.containsKey(f2));
}
Also used : WALEdit(org.apache.hadoop.hbase.wal.WALEdit) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 38 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationSourceManager method testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled.

@Test
public void testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled() throws Exception {
    NavigableMap<byte[], Integer> scope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    // 1. Get the bulk load wal edit event
    WALEdit logEdit = getBulkLoadWALEdit(scope);
    // 2. Create wal key
    WALKeyImpl logKey = new WALKeyImpl(scope);
    // 3. Get the scopes for the key
    ReplicationSourceWALActionListener.scopeWALEdits(logKey, logEdit, conf);
    // 4. Assert that no bulk load entry scopes are added if bulk load hfile replication is disabled
    assertNull("No bulk load entries scope should be added if bulk load replication is disabled.", logKey.getReplicationScopes());
}
Also used : WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 39 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationSourceManager method testCompactionWALEdits.

/**
 * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out the
 * compaction WALEdit.
 */
@Test
public void testCompactionWALEdits() throws Exception {
    TableName tableName = TableName.valueOf("testCompactionWALEdits");
    WALProtos.CompactionDescriptor compactionDescriptor = WALProtos.CompactionDescriptor.getDefaultInstance();
    RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build();
    WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
    ReplicationSourceWALActionListener.scopeWALEdits(new WALKeyImpl(), edit, conf);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) Test(org.junit.Test)

Example 40 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationSourceManager method testLogRoll.

@Test
public void testLogRoll() throws Exception {
    long baseline = 1000;
    long time = baseline;
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    KeyValue kv = new KeyValue(r1, f1, r1);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    WALFactory wals = new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8"));
    ReplicationSourceManager replicationManager = replication.getReplicationManager();
    wals.getWALProvider().addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
    final WAL wal = wals.getWAL(hri);
    manager.init();
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("tableame")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(f1)).build();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getColumnFamilyNames()) {
        scopes.put(fam, 0);
    }
    // Testing normal log rolling every 20
    for (long i = 1; i < 101; i++) {
        if (i > 1 && i % 20 == 0) {
            wal.rollWriter();
        }
        LOG.info(Long.toString(i));
        final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
        wal.sync(txid);
    }
    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);
    for (int i = 0; i < 3; i++) {
        wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
    }
    wal.sync();
    int logNumber = 0;
    for (Map.Entry<String, NavigableSet<String>> entry : manager.getWALs().get(slaveId).entrySet()) {
        logNumber += entry.getValue().size();
    }
    assertEquals(6, logNumber);
    wal.rollWriter();
    ReplicationSourceInterface source = mock(ReplicationSourceInterface.class);
    when(source.getQueueId()).thenReturn("1");
    when(source.isRecovered()).thenReturn(false);
    when(source.isSyncReplication()).thenReturn(false);
    manager.logPositionAndCleanOldLogs(source, new WALEntryBatch(0, manager.getSources().get(0).getCurrentPath()));
    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
    wal.sync();
    assertEquals(1, manager.getWALs().size());
// TODO Need a case with only 2 WALs and we only want to delete the first one
}
Also used : NavigableSet(java.util.NavigableSet) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)59 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)44 Test (org.junit.Test)42 KeyValue (org.apache.hadoop.hbase.KeyValue)24 TreeMap (java.util.TreeMap)22 WAL (org.apache.hadoop.hbase.wal.WAL)20 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)17 Path (org.apache.hadoop.fs.Path)16 IOException (java.io.IOException)13 TableName (org.apache.hadoop.hbase.TableName)12 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)12 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)11 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)10 ArrayList (java.util.ArrayList)9 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)7 Configuration (org.apache.hadoop.conf.Configuration)7