use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor in project hbase by apache.
the class HRegion method getStoresToFlush.
private Collection<HStore> getStoresToFlush(FlushDescriptor flushDesc) {
List<HStore> storesToFlush = new ArrayList<>();
for (StoreFlushDescriptor storeFlush : flushDesc.getStoreFlushesList()) {
byte[] family = storeFlush.getFamilyName().toByteArray();
HStore store = getStore(family);
if (store == null) {
LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush start marker from primary, but the family is not found. Ignoring" + " StoreFlushDescriptor:" + TextFormat.shortDebugString(storeFlush));
continue;
}
storesToFlush.add(store);
}
return storesToFlush;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor in project hbase by apache.
the class TestHRegion method testFlushMarkers.
@Test
public void testFlushMarkers() throws Exception {
// tests that flush markers are written to WAL and handled at recovered edits
byte[] family = Bytes.toBytes("family");
Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
CommonFSUtils.setRootDir(walConf, logDir);
final WALFactory wals = new WALFactory(walConf, method);
final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build());
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
long maxSeqId = 3;
long minSeqId = 0;
for (long i = minSeqId; i < maxSeqId; i++) {
Put put = new Put(Bytes.toBytes(i));
put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
region.put(put);
region.flush(true);
}
// this will create a region with 3 files from flush
assertEquals(3, region.getStore(family).getStorefilesCount());
List<String> storeFiles = new ArrayList<>(3);
for (HStoreFile sf : region.getStore(family).getStorefiles()) {
storeFiles.add(sf.getPath().getName());
}
// now verify that the flush markers are written
wal.shutdown();
WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal), TEST_UTIL.getConfiguration());
try {
List<WAL.Entry> flushDescriptors = new ArrayList<>();
long lastFlushSeqId = -1;
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
Cell cell = entry.getEdit().getCells().get(0);
if (WALEdit.isMetaEditFamily(cell)) {
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
assertNotNull(flushDesc);
assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
}
lastFlushSeqId = flushDesc.getFlushSequenceNumber();
assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
// only one store
assertEquals(1, flushDesc.getStoreFlushesCount());
StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
assertEquals("family", storeFlushDesc.getStoreHomeDir());
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
assertEquals(0, storeFlushDesc.getFlushOutputCount());
} else {
// only one file from flush
assertEquals(1, storeFlushDesc.getFlushOutputCount());
assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
}
flushDescriptors.add(entry);
}
}
// START_FLUSH and COMMIT_FLUSH per flush
assertEquals(3 * 2, flushDescriptors.size());
// now write those markers to the recovered edits again.
Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
for (WAL.Entry entry : flushDescriptors) {
writer.append(entry);
}
writer.close();
} finally {
if (null != reader) {
try {
reader.close();
} catch (IOException exception) {
LOG.warn("Problem closing wal: " + exception.getMessage());
LOG.debug("exception details", exception);
}
}
}
// close the region now, and reopen again
HBaseTestingUtil.closeRegionAndWAL(this.region);
region = HRegion.openHRegion(region, null);
// now check whether we have can read back the data from region
for (long i = minSeqId; i < maxSeqId; i++) {
Get get = new Get(Bytes.toBytes(i));
Result result = region.get(get);
byte[] value = result.getValue(family, Bytes.toBytes(i));
assertArrayEquals(Bytes.toBytes(i), value);
}
} finally {
HBaseTestingUtil.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor in project hbase by apache.
the class HRegion method replayFlushInStores.
/**
* Replays the given flush descriptor by opening the flush files in stores and dropping the
* memstore snapshots if requested.
* @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region
* replica implementation.
*/
@Deprecated
private void replayFlushInStores(FlushDescriptor flush, PrepareFlushResult prepareFlushResult, boolean dropMemstoreSnapshot) throws IOException {
for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) {
byte[] family = storeFlush.getFamilyName().toByteArray();
HStore store = getStore(family);
if (store == null) {
LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush commit marker from primary, but the family is not found." + "Ignoring StoreFlushDescriptor:" + storeFlush);
continue;
}
List<String> flushFiles = storeFlush.getFlushOutputList();
StoreFlushContext ctx = null;
long startTime = EnvironmentEdgeManager.currentTime();
if (prepareFlushResult == null || prepareFlushResult.storeFlushCtxs == null) {
ctx = store.createFlushContext(flush.getFlushSequenceNumber(), FlushLifeCycleTracker.DUMMY);
} else {
ctx = prepareFlushResult.storeFlushCtxs.get(family);
startTime = prepareFlushResult.startTime;
}
if (ctx == null) {
LOG.warn(getRegionInfo().getEncodedName() + " : " + "Unexpected: flush commit marker received from store " + Bytes.toString(family) + " but no associated flush context. Ignoring");
continue;
}
// replay the flush
ctx.replayFlush(flushFiles, dropMemstoreSnapshot);
// Record latest flush time
this.lastStoreFlushTimeMap.put(store, startTime);
}
}
Aggregations