use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor in project hbase by apache.
the class TestReplicationSourceManager method getBulkLoadWALEdit.
private WALEdit getBulkLoadWALEdit(NavigableMap<byte[], Integer> scope) {
// 1. Create store files for the families
Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
Map<String, Long> storeFilesSize = new HashMap<>(1);
List<Path> p = new ArrayList<>(1);
Path hfilePath1 = new Path(Bytes.toString(f1));
p.add(hfilePath1);
try {
storeFilesSize.put(hfilePath1.getName(), fs.getFileStatus(hfilePath1).getLen());
} catch (IOException e) {
LOG.debug("Failed to calculate the size of hfile " + hfilePath1);
storeFilesSize.put(hfilePath1.getName(), 0L);
}
storeFiles.put(f1, p);
scope.put(f1, 1);
p = new ArrayList<>(1);
Path hfilePath2 = new Path(Bytes.toString(f2));
p.add(hfilePath2);
try {
storeFilesSize.put(hfilePath2.getName(), fs.getFileStatus(hfilePath2).getLen());
} catch (IOException e) {
LOG.debug("Failed to calculate the size of hfile " + hfilePath2);
storeFilesSize.put(hfilePath2.getName(), 0L);
}
storeFiles.put(f2, p);
// 2. Create bulk load descriptor
BulkLoadDescriptor desc = ProtobufUtil.toBulkLoadDescriptor(hri.getTable(), UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
// 3. create bulk load wal edit event
WALEdit logEdit = WALEdit.createBulkLoadEvent(hri, desc);
return logEdit;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor in project hbase by apache.
the class ReplicationSink method buildBulkLoadHFileMap.
private void buildBulkLoadHFileMap(final Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap, TableName table, Cell cell) throws IOException {
BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell);
List<StoreDescriptor> storesList = bld.getStoresList();
int storesSize = storesList.size();
for (int j = 0; j < storesSize; j++) {
StoreDescriptor storeDescriptor = storesList.get(j);
List<String> storeFileList = storeDescriptor.getStoreFileList();
int storeFilesSize = storeFileList.size();
hfilesReplicated += storeFilesSize;
for (int k = 0; k < storeFilesSize; k++) {
byte[] family = storeDescriptor.getFamilyName().toByteArray();
// Build hfile relative path from its namespace
String pathToHfileFromNS = getHFilePath(table, bld, storeFileList.get(k), family);
String tableName = table.getNameWithNamespaceInclAsString();
if (bulkLoadHFileMap.containsKey(tableName)) {
List<Pair<byte[], List<String>>> familyHFilePathsList = bulkLoadHFileMap.get(tableName);
boolean foundFamily = false;
for (int i = 0; i < familyHFilePathsList.size(); i++) {
Pair<byte[], List<String>> familyHFilePathsPair = familyHFilePathsList.get(i);
if (Bytes.equals(familyHFilePathsPair.getFirst(), family)) {
// Found family already present, just add the path to the existing list
familyHFilePathsPair.getSecond().add(pathToHfileFromNS);
foundFamily = true;
break;
}
}
if (!foundFamily) {
// Family not found, add this family and its hfile paths pair to the list
addFamilyAndItsHFilePathToTableInMap(family, pathToHfileFromNS, familyHFilePathsList);
}
} else {
// Add this table entry into the map
addNewTableEntryInMap(bulkLoadHFileMap, family, pathToHfileFromNS, tableName);
}
}
}
}
Aggregations