use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestBulkLoad method verifyBulkLoadEvent.
@Test
public void verifyBulkLoadEvent() throws IOException {
TableName tableName = TableName.valueOf("test", "test");
List<Pair<byte[], String>> familyPaths = withFamilyPathsFor(family1);
byte[] familyName = familyPaths.get(0).getFirst();
String storeFileName = familyPaths.get(0).getSecond();
storeFileName = (new Path(storeFileName)).getName();
List<String> storeFileNames = new ArrayList<>();
storeFileNames.add(storeFileName);
when(log.appendMarker(any(), any(), argThat(bulkLogWalEdit(WALEdit.BULK_LOAD, tableName.toBytes(), familyName, storeFileNames)))).thenAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) {
WALKeyImpl walKey = invocation.getArgument(1);
MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
if (mvcc != null) {
MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
walKey.setWriteEntry(we);
}
return 01L;
}
});
testRegionWithFamiliesAndSpecifiedTableName(tableName, family1).bulkLoadHFiles(familyPaths, false, null);
verify(log).sync(anyLong());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestReplicationSourceManager method testBulkLoadWALEdits.
@Test
public void testBulkLoadWALEdits() throws Exception {
// 1. Get the bulk load wal edit event
NavigableMap<byte[], Integer> scope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
WALEdit logEdit = getBulkLoadWALEdit(scope);
// 2. Create wal key
WALKeyImpl logKey = new WALKeyImpl(scope);
// 3. Enable bulk load hfile replication
Configuration bulkLoadConf = HBaseConfiguration.create(conf);
bulkLoadConf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
// 4. Get the scopes for the key
ReplicationSourceWALActionListener.scopeWALEdits(logKey, logEdit, bulkLoadConf);
NavigableMap<byte[], Integer> scopes = logKey.getReplicationScopes();
// Assert family with replication scope global is present in the key scopes
assertTrue("This family scope is set to global, should be part of replication key scopes.", scopes.containsKey(f1));
// Assert family with replication scope local is not present in the key scopes
assertFalse("This family scope is set to local, should not be part of replication key scopes", scopes.containsKey(f2));
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestReplicationSourceManager method testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled.
@Test
public void testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled() throws Exception {
NavigableMap<byte[], Integer> scope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// 1. Get the bulk load wal edit event
WALEdit logEdit = getBulkLoadWALEdit(scope);
// 2. Create wal key
WALKeyImpl logKey = new WALKeyImpl(scope);
// 3. Get the scopes for the key
ReplicationSourceWALActionListener.scopeWALEdits(logKey, logEdit, conf);
// 4. Assert that no bulk load entry scopes are added if bulk load hfile replication is disabled
assertNull("No bulk load entries scope should be added if bulk load replication is disabled.", logKey.getReplicationScopes());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestReplicationSourceManager method testCompactionWALEdits.
/**
* Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out the
* compaction WALEdit.
*/
@Test
public void testCompactionWALEdits() throws Exception {
TableName tableName = TableName.valueOf("testCompactionWALEdits");
WALProtos.CompactionDescriptor compactionDescriptor = WALProtos.CompactionDescriptor.getDefaultInstance();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build();
WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
ReplicationSourceWALActionListener.scopeWALEdits(new WALKeyImpl(), edit, conf);
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestReplicationSourceManager method testLogRoll.
@Test
public void testLogRoll() throws Exception {
long baseline = 1000;
long time = baseline;
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
KeyValue kv = new KeyValue(r1, f1, r1);
WALEdit edit = new WALEdit();
edit.add(kv);
WALFactory wals = new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8"));
ReplicationSourceManager replicationManager = replication.getReplicationManager();
wals.getWALProvider().addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
final WAL wal = wals.getWAL(hri);
manager.init();
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("tableame")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(f1)).build();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
// Testing normal log rolling every 20
for (long i = 1; i < 101; i++) {
if (i > 1 && i % 20 == 0) {
wal.rollWriter();
}
LOG.info(Long.toString(i));
final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
wal.sync(txid);
}
// Simulate a rapid insert that's followed
// by a report that's still not totally complete (missing last one)
LOG.info(baseline + " and " + time);
baseline += 101;
time = baseline;
LOG.info(baseline + " and " + time);
for (int i = 0; i < 3; i++) {
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
}
wal.sync();
int logNumber = 0;
for (Map.Entry<String, NavigableSet<String>> entry : manager.getWALs().get(slaveId).entrySet()) {
logNumber += entry.getValue().size();
}
assertEquals(6, logNumber);
wal.rollWriter();
ReplicationSourceInterface source = mock(ReplicationSourceInterface.class);
when(source.getQueueId()).thenReturn("1");
when(source.isRecovered()).thenReturn(false);
when(source.isSyncReplication()).thenReturn(false);
manager.logPositionAndCleanOldLogs(source, new WALEntryBatch(0, manager.getSources().get(0).getCurrentPath()));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
wal.sync();
assertEquals(1, manager.getWALs().size());
// TODO Need a case with only 2 WALs and we only want to delete the first one
}
Aggregations