use of org.apache.hadoop.hbase.regionserver.CellSet in project hbase by apache.
the class BoundedRecoveredHFilesOutputSink method append.
@Override
void append(RegionEntryBuffer buffer) throws IOException {
Map<String, CellSet> familyCells = new HashMap<>();
Map<String, Long> familySeqIds = new HashMap<>();
boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME);
// sequence id.
for (WAL.Entry entry : buffer.entryBuffer) {
long seqId = entry.getKey().getSequenceId();
List<Cell> cells = entry.getEdit().getCells();
for (Cell cell : cells) {
if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
continue;
}
PrivateCellUtil.setSequenceId(cell, seqId);
String familyName = Bytes.toString(CellUtil.cloneFamily(cell));
// comparator need to be specified for meta
familyCells.computeIfAbsent(familyName, key -> new CellSet(isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)).add(cell);
familySeqIds.compute(familyName, (k, v) -> v == null ? seqId : Math.max(v, seqId));
}
}
// Create a new hfile writer for each column family, write edits then close writer.
String regionName = Bytes.toString(buffer.encodedRegionName);
for (Map.Entry<String, CellSet> cellsEntry : familyCells.entrySet()) {
String familyName = cellsEntry.getKey();
StoreFileWriter writer = createRecoveredHFileWriter(buffer.tableName, regionName, familySeqIds.get(familyName), familyName, isMetaTable);
LOG.trace("Created {}", writer.getPath());
openingWritersNum.incrementAndGet();
try {
for (Cell cell : cellsEntry.getValue()) {
writer.append(cell);
}
// Append the max seqid to hfile, used when recovery.
writer.appendMetadata(familySeqIds.get(familyName), false);
regionEditsWrittenMap.compute(Bytes.toString(buffer.encodedRegionName), (k, v) -> v == null ? buffer.entryBuffer.size() : v + buffer.entryBuffer.size());
splits.add(writer.getPath());
openingWritersNum.decrementAndGet();
} finally {
writer.close();
LOG.trace("Closed {}, edits={}", writer.getPath(), familyCells.size());
}
}
}
Aggregations