use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSourceManager method getBulkLoadWALEdit.
private WALEdit getBulkLoadWALEdit(NavigableMap<byte[], Integer> scope) {
// 1. Create store files for the families
Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
Map<String, Long> storeFilesSize = new HashMap<>(1);
List<Path> p = new ArrayList<>(1);
Path hfilePath1 = new Path(Bytes.toString(f1));
p.add(hfilePath1);
try {
storeFilesSize.put(hfilePath1.getName(), fs.getFileStatus(hfilePath1).getLen());
} catch (IOException e) {
LOG.debug("Failed to calculate the size of hfile " + hfilePath1);
storeFilesSize.put(hfilePath1.getName(), 0L);
}
storeFiles.put(f1, p);
scope.put(f1, 1);
p = new ArrayList<>(1);
Path hfilePath2 = new Path(Bytes.toString(f2));
p.add(hfilePath2);
try {
storeFilesSize.put(hfilePath2.getName(), fs.getFileStatus(hfilePath2).getLen());
} catch (IOException e) {
LOG.debug("Failed to calculate the size of hfile " + hfilePath2);
storeFilesSize.put(hfilePath2.getName(), 0L);
}
storeFiles.put(f2, p);
// 2. Create bulk load descriptor
BulkLoadDescriptor desc = ProtobufUtil.toBulkLoadDescriptor(hri.getTable(), UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
// 3. create bulk load wal edit event
WALEdit logEdit = WALEdit.createBulkLoadEvent(hri, desc);
return logEdit;
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSourceManager method testBulkLoadWALEdits.
@Test
public void testBulkLoadWALEdits() throws Exception {
// 1. Get the bulk load wal edit event
NavigableMap<byte[], Integer> scope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
WALEdit logEdit = getBulkLoadWALEdit(scope);
// 2. Create wal key
WALKeyImpl logKey = new WALKeyImpl(scope);
// 3. Enable bulk load hfile replication
Configuration bulkLoadConf = HBaseConfiguration.create(conf);
bulkLoadConf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
// 4. Get the scopes for the key
ReplicationSourceWALActionListener.scopeWALEdits(logKey, logEdit, bulkLoadConf);
NavigableMap<byte[], Integer> scopes = logKey.getReplicationScopes();
// Assert family with replication scope global is present in the key scopes
assertTrue("This family scope is set to global, should be part of replication key scopes.", scopes.containsKey(f1));
// Assert family with replication scope local is not present in the key scopes
assertFalse("This family scope is set to local, should not be part of replication key scopes", scopes.containsKey(f2));
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSourceManager method testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled.
@Test
public void testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled() throws Exception {
NavigableMap<byte[], Integer> scope = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// 1. Get the bulk load wal edit event
WALEdit logEdit = getBulkLoadWALEdit(scope);
// 2. Create wal key
WALKeyImpl logKey = new WALKeyImpl(scope);
// 3. Get the scopes for the key
ReplicationSourceWALActionListener.scopeWALEdits(logKey, logEdit, conf);
// 4. Assert that no bulk load entry scopes are added if bulk load hfile replication is disabled
assertNull("No bulk load entries scope should be added if bulk load replication is disabled.", logKey.getReplicationScopes());
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSourceManager method testCompactionWALEdits.
/**
* Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out the
* compaction WALEdit.
*/
@Test
public void testCompactionWALEdits() throws Exception {
TableName tableName = TableName.valueOf("testCompactionWALEdits");
WALProtos.CompactionDescriptor compactionDescriptor = WALProtos.CompactionDescriptor.getDefaultInstance();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build();
WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
ReplicationSourceWALActionListener.scopeWALEdits(new WALKeyImpl(), edit, conf);
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSourceManager method testLogRoll.
@Test
public void testLogRoll() throws Exception {
long baseline = 1000;
long time = baseline;
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
KeyValue kv = new KeyValue(r1, f1, r1);
WALEdit edit = new WALEdit();
edit.add(kv);
WALFactory wals = new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8"));
ReplicationSourceManager replicationManager = replication.getReplicationManager();
wals.getWALProvider().addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
final WAL wal = wals.getWAL(hri);
manager.init();
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("tableame")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(f1)).build();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
// Testing normal log rolling every 20
for (long i = 1; i < 101; i++) {
if (i > 1 && i % 20 == 0) {
wal.rollWriter();
}
LOG.info(Long.toString(i));
final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
wal.sync(txid);
}
// Simulate a rapid insert that's followed
// by a report that's still not totally complete (missing last one)
LOG.info(baseline + " and " + time);
baseline += 101;
time = baseline;
LOG.info(baseline + " and " + time);
for (int i = 0; i < 3; i++) {
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
}
wal.sync();
int logNumber = 0;
for (Map.Entry<String, NavigableSet<String>> entry : manager.getWALs().get(slaveId).entrySet()) {
logNumber += entry.getValue().size();
}
assertEquals(6, logNumber);
wal.rollWriter();
ReplicationSourceInterface source = mock(ReplicationSourceInterface.class);
when(source.getQueueId()).thenReturn("1");
when(source.isRecovered()).thenReturn(false);
when(source.isSyncReplication()).thenReturn(false);
manager.logPositionAndCleanOldLogs(source, new WALEntryBatch(0, manager.getSources().get(0).getCurrentPath()));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(), mvcc, scopes), edit);
wal.sync();
assertEquals(1, manager.getWALs().size());
// TODO Need a case with only 2 WALs and we only want to delete the first one
}
Aggregations