Search in sources :

Example 1 with StoreFlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor in project hbase by apache.

the class HRegion method getStoresToFlush.

private Collection<HStore> getStoresToFlush(FlushDescriptor flushDesc) {
    List<HStore> storesToFlush = new ArrayList<>();
    for (StoreFlushDescriptor storeFlush : flushDesc.getStoreFlushesList()) {
        byte[] family = storeFlush.getFamilyName().toByteArray();
        HStore store = getStore(family);
        if (store == null) {
            LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush start marker from primary, but the family is not found. Ignoring" + " StoreFlushDescriptor:" + TextFormat.shortDebugString(storeFlush));
            continue;
        }
        storesToFlush.add(store);
    }
    return storesToFlush;
}
Also used : ArrayList(java.util.ArrayList) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)

Example 2 with StoreFlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor in project hbase by apache.

the class TestHRegion method testFlushMarkers.

@Test
public void testFlushMarkers() throws Exception {
    // tests that flush markers are written to WAL and handled at recovered edits
    byte[] family = Bytes.toBytes("family");
    Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
    CommonFSUtils.setRootDir(walConf, logDir);
    final WALFactory wals = new WALFactory(walConf, method);
    final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build());
    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        long maxSeqId = 3;
        long minSeqId = 0;
        for (long i = minSeqId; i < maxSeqId; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
            region.put(put);
            region.flush(true);
        }
        // this will create a region with 3 files from flush
        assertEquals(3, region.getStore(family).getStorefilesCount());
        List<String> storeFiles = new ArrayList<>(3);
        for (HStoreFile sf : region.getStore(family).getStorefiles()) {
            storeFiles.add(sf.getPath().getName());
        }
        // now verify that the flush markers are written
        wal.shutdown();
        WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal), TEST_UTIL.getConfiguration());
        try {
            List<WAL.Entry> flushDescriptors = new ArrayList<>();
            long lastFlushSeqId = -1;
            while (true) {
                WAL.Entry entry = reader.next();
                if (entry == null) {
                    break;
                }
                Cell cell = entry.getEdit().getCells().get(0);
                if (WALEdit.isMetaEditFamily(cell)) {
                    FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
                    assertNotNull(flushDesc);
                    assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
                    if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                        assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
                    } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                        assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
                    }
                    lastFlushSeqId = flushDesc.getFlushSequenceNumber();
                    assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
                    // only one store
                    assertEquals(1, flushDesc.getStoreFlushesCount());
                    StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
                    assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
                    assertEquals("family", storeFlushDesc.getStoreHomeDir());
                    if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                        assertEquals(0, storeFlushDesc.getFlushOutputCount());
                    } else {
                        // only one file from flush
                        assertEquals(1, storeFlushDesc.getFlushOutputCount());
                        assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
                    }
                    flushDescriptors.add(entry);
                }
            }
            // START_FLUSH and COMMIT_FLUSH per flush
            assertEquals(3 * 2, flushDescriptors.size());
            // now write those markers to the recovered edits again.
            Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            for (WAL.Entry entry : flushDescriptors) {
                writer.append(entry);
            }
            writer.close();
        } finally {
            if (null != reader) {
                try {
                    reader.close();
                } catch (IOException exception) {
                    LOG.warn("Problem closing wal: " + exception.getMessage());
                    LOG.debug("exception details", exception);
                }
            }
        }
        // close the region now, and reopen again
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        region = HRegion.openHRegion(region, null);
        // now check whether we have can read back the data from region
        for (long i = minSeqId; i < maxSeqId; i++) {
            Get get = new Get(Bytes.toBytes(i));
            Result result = region.get(get);
            byte[] value = result.getValue(family, Bytes.toBytes(i));
            assertArrayEquals(Bytes.toBytes(i), value);
        }
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Put(org.apache.hadoop.hbase.client.Put) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) Test(org.junit.Test)

Example 3 with StoreFlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor in project hbase by apache.

the class HRegion method replayFlushInStores.

/**
 * Replays the given flush descriptor by opening the flush files in stores and dropping the
 * memstore snapshots if requested.
 * @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region
 *             replica implementation.
 */
@Deprecated
private void replayFlushInStores(FlushDescriptor flush, PrepareFlushResult prepareFlushResult, boolean dropMemstoreSnapshot) throws IOException {
    for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) {
        byte[] family = storeFlush.getFamilyName().toByteArray();
        HStore store = getStore(family);
        if (store == null) {
            LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush commit marker from primary, but the family is not found." + "Ignoring StoreFlushDescriptor:" + storeFlush);
            continue;
        }
        List<String> flushFiles = storeFlush.getFlushOutputList();
        StoreFlushContext ctx = null;
        long startTime = EnvironmentEdgeManager.currentTime();
        if (prepareFlushResult == null || prepareFlushResult.storeFlushCtxs == null) {
            ctx = store.createFlushContext(flush.getFlushSequenceNumber(), FlushLifeCycleTracker.DUMMY);
        } else {
            ctx = prepareFlushResult.storeFlushCtxs.get(family);
            startTime = prepareFlushResult.startTime;
        }
        if (ctx == null) {
            LOG.warn(getRegionInfo().getEncodedName() + " : " + "Unexpected: flush commit marker received from store " + Bytes.toString(family) + " but no associated flush context. Ignoring");
            continue;
        }
        // replay the flush
        ctx.replayFlush(flushFiles, dropMemstoreSnapshot);
        // Record latest flush time
        this.lastStoreFlushTimeMap.put(store, startTime);
    }
}
Also used : StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)

Aggregations

StoreFlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)3 ArrayList (java.util.ArrayList)2 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 Cell (org.apache.hadoop.hbase.Cell)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)1 Get (org.apache.hadoop.hbase.client.Get)1 Put (org.apache.hadoop.hbase.client.Put)1 Result (org.apache.hadoop.hbase.client.Result)1 FaultyFileSystem (org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem)1 FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)1 AbstractFSWALProvider (org.apache.hadoop.hbase.wal.AbstractFSWALProvider)1 WAL (org.apache.hadoop.hbase.wal.WAL)1 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)1 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)1