use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.
the class TestReplicationSink method testReplicateEntriesForHFiles.
/**
* Test replicateEntries with a bulk load entry for 25 HFiles
*/
@Test
public void testReplicateEntriesForHFiles() throws Exception {
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
int numRows = 10;
List<Path> p = new ArrayList<>(1);
// 1. Generate 25 hfile ranges
Random rng = new SecureRandom();
Set<Integer> numbers = new HashSet<>();
while (numbers.size() < 50) {
numbers.add(rng.nextInt(1000));
}
List<Integer> numberList = new ArrayList<>(numbers);
Collections.sort(numberList);
Map<String, Long> storeFilesSize = new HashMap<>(1);
// 2. Create 25 hfiles
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = dir.getFileSystem(conf);
Iterator<Integer> numbersItr = numberList.iterator();
for (int i = 0; i < 25; i++) {
Path hfilePath = new Path(familyDir, "hfile_" + i);
HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
p.add(hfilePath);
storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
}
// 3. Create a BulkLoadDescriptor and a WALEdit
Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
storeFiles.put(FAM_NAME1, p);
WALEdit edit = null;
WALProtos.BulkLoadDescriptor loadDescriptor = null;
try (Connection c = ConnectionFactory.createConnection(conf);
RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
}
List<WALEntry> entries = new ArrayList<>(1);
// 4. Create a WALEntryBuilder
WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
// 5. Copy the hfile to the path as it is in reality
for (int i = 0; i < 25; i++) {
String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString();
String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
}
entries.add(builder.build());
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 6. Assert no existing data in table
assertEquals(0, scanner.next(numRows).length);
}
// 7. Replicate the bulk loaded entry
SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 8. Assert data is replicated
assertEquals(numRows, scanner.next(numRows).length);
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.
the class TestReplicationSink method testMixedPutTables.
/**
* Insert to 2 different tables
* @throws Exception
*/
@Test
public void testMixedPutTables() throws Exception {
List<WALEntry> entries = new ArrayList<>(BATCH_SIZE / 2);
List<Cell> cells = new ArrayList<>();
for (int i = 0; i < BATCH_SIZE; i++) {
entries.add(createEntry(i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, i, KeyValue.Type.Put, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
Scan scan = new Scan();
ResultScanner scanRes = table2.getScanner(scan);
for (Result res : scanRes) {
assertTrue(Bytes.toInt(res.getRow()) % 2 == 0);
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.
the class TestReplicationSink method createEntry.
private WALEntry createEntry(TableName table, int row, KeyValue.Type type, List<Cell> cells) {
byte[] fam = table.equals(TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2;
byte[] rowBytes = Bytes.toBytes(row);
// same key
try {
Thread.sleep(1);
} catch (InterruptedException e) {
LOG.info("Was interrupted while sleep, meh", e);
}
final long now = System.currentTimeMillis();
KeyValue kv = null;
if (type.getCode() == KeyValue.Type.Put.getCode()) {
kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.Put, Bytes.toBytes(row));
} else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) {
kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.DeleteColumn);
} else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) {
kv = new KeyValue(rowBytes, fam, null, now, KeyValue.Type.DeleteFamily);
}
WALEntry.Builder builder = createWALEntryBuilder(table);
cells.add(kv);
return builder.build();
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.
the class TestReplicationSink method testMixedDeletes.
/**
* Insert then do different types of deletes
* @throws Exception
*/
@Test
public void testMixedDeletes() throws Exception {
List<WALEntry> entries = new ArrayList<>(3);
List<Cell> cells = new ArrayList<>();
for (int i = 0; i < 3; i++) {
entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
entries = new ArrayList<>(3);
cells = new ArrayList<>();
entries.add(createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn, cells));
entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells));
entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells));
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
Scan scan = new Scan();
ResultScanner scanRes = table1.getScanner(scan);
assertEquals(0, scanRes.next(3).length);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.
the class RSRpcServices method replicateWALEntry.
/**
* Replicate WAL entries on the region server.
*
* @param controller the RPC controller
* @param request the request
* @throws ServiceException
*/
@Override
@QosPriority(priority = HConstants.REPLICATION_QOS)
public ReplicateWALEntryResponse replicateWALEntry(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException {
try {
checkOpen();
if (regionServer.replicationSinkHandler != null) {
requestCount.increment();
List<WALEntry> entries = request.getEntryList();
CellScanner cellScanner = ((HBaseRpcController) controller).cellScanner();
regionServer.getRegionServerCoprocessorHost().preReplicateLogEntries(entries, cellScanner);
regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner, request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), request.getSourceHFileArchiveDirPath());
regionServer.getRegionServerCoprocessorHost().postReplicateLogEntries(entries, cellScanner);
return ReplicateWALEntryResponse.newBuilder().build();
} else {
throw new ServiceException("Replication services are not initialized yet");
}
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
Aggregations