Search in sources :

Example 1 with CompactionDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.

the class RSRpcServices method doReplayBatchOp.

/**
   * Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
   * constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
   * @param region
   * @param mutations
   * @param replaySeqId
   * @return an array of OperationStatus which internally contains the OperationStatusCode and the
   *         exceptionMessage if any
   * @throws IOException
   */
private OperationStatus[] doReplayBatchOp(final Region region, final List<WALSplitter.MutationReplay> mutations, long replaySeqId) throws IOException {
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        for (Iterator<WALSplitter.MutationReplay> it = mutations.iterator(); it.hasNext(); ) {
            WALSplitter.MutationReplay m = it.next();
            if (m.type == MutationType.PUT) {
                batchContainsPuts = true;
            } else {
                batchContainsDelete = true;
            }
            NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
            List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
            if (metaCells != null && !metaCells.isEmpty()) {
                for (Cell metaCell : metaCells) {
                    CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
                    boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
                    HRegion hRegion = (HRegion) region;
                    if (compactionDesc != null) {
                        // replay the compaction. Remove the files from stores only if we are the primary
                        // region replica (thus own the files)
                        hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, replaySeqId);
                        continue;
                    }
                    FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
                    if (flushDesc != null && !isDefaultReplica) {
                        hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
                        continue;
                    }
                    RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
                    if (regionEvent != null && !isDefaultReplica) {
                        hRegion.replayWALRegionEventMarker(regionEvent);
                        continue;
                    }
                    BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
                    if (bulkLoadEvent != null) {
                        hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
                        continue;
                    }
                }
                it.remove();
            }
        }
        requestCount.add(mutations.size());
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        return region.batchReplay(mutations.toArray(new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
    } finally {
        if (regionServer.metricsRegionServer != null) {
            long after = EnvironmentEdgeManager.currentTime();
            if (batchContainsPuts) {
                regionServer.metricsRegionServer.updatePut(after - before);
            }
            if (batchContainsDelete) {
                regionServer.metricsRegionServer.updateDelete(after - before);
            }
        }
    }
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) ArrayList(java.util.ArrayList) List(java.util.List) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) WALSplitter(org.apache.hadoop.hbase.wal.WALSplitter) Cell(org.apache.hadoop.hbase.Cell) ByteBufferCell(org.apache.hadoop.hbase.ByteBufferCell)

Example 2 with CompactionDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.

the class HRegion method replayRecoveredEdits.

/*
   * @param edits File of recovered edits.
   * @param maxSeqIdInStores Maximum sequenceid found in each store.  Edits in wal
   * must be larger than this to be replayed for each store.
   * @param reporter
   * @return the sequence id of the last edit added to this region out of the
   * recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
   * @throws IOException
   */
private long replayRecoveredEdits(final Path edits, Map<byte[], Long> maxSeqIdInStores, final CancelableProgressable reporter) throws IOException {
    String msg = "Replaying edits from " + edits;
    LOG.info(msg);
    MonitoredTask status = TaskMonitor.get().createStatus(msg);
    FileSystem fs = this.fs.getFileSystem();
    status.setStatus("Opening recovered edits");
    WAL.Reader reader = null;
    try {
        reader = WALFactory.createReader(fs, edits, conf);
        long currentEditSeqId = -1;
        long currentReplaySeqId = -1;
        long firstSeqIdInLog = -1;
        long skippedEdits = 0;
        long editsCount = 0;
        long intervalEdits = 0;
        WAL.Entry entry;
        HStore store = null;
        boolean reported_once = false;
        ServerNonceManager ng = this.rsServices == null ? null : this.rsServices.getNonceManager();
        try {
            // How many edits seen before we check elapsed time
            int interval = this.conf.getInt("hbase.hstore.report.interval.edits", 2000);
            // How often to send a progress report (default 1/2 master timeout)
            int period = this.conf.getInt("hbase.hstore.report.period", 300000);
            long lastReport = EnvironmentEdgeManager.currentTime();
            if (coprocessorHost != null) {
                coprocessorHost.preReplayWALs(this.getRegionInfo(), edits);
            }
            while ((entry = reader.next()) != null) {
                WALKey key = entry.getKey();
                WALEdit val = entry.getEdit();
                if (ng != null) {
                    // some test, or nonces disabled
                    ng.reportOperationFromWal(key.getNonceGroup(), key.getNonce(), key.getWriteTime());
                }
                if (reporter != null) {
                    intervalEdits += val.size();
                    if (intervalEdits >= interval) {
                        // Number of edits interval reached
                        intervalEdits = 0;
                        long cur = EnvironmentEdgeManager.currentTime();
                        if (lastReport + period <= cur) {
                            status.setStatus("Replaying edits..." + " skipped=" + skippedEdits + " edits=" + editsCount);
                            // Timeout reached
                            if (!reporter.progress()) {
                                msg = "Progressable reporter failed, stopping replay";
                                LOG.warn(msg);
                                status.abort(msg);
                                throw new IOException(msg);
                            }
                            reported_once = true;
                            lastReport = cur;
                        }
                    }
                }
                if (firstSeqIdInLog == -1) {
                    firstSeqIdInLog = key.getLogSeqNum();
                }
                if (currentEditSeqId > key.getLogSeqNum()) {
                    // when this condition is true, it means we have a serious defect because we need to
                    // maintain increasing SeqId for WAL edits per region
                    LOG.error(getRegionInfo().getEncodedName() + " : " + "Found decreasing SeqId. PreId=" + currentEditSeqId + " key=" + key + "; edit=" + val);
                } else {
                    currentEditSeqId = key.getLogSeqNum();
                }
                currentReplaySeqId = (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : currentEditSeqId;
                // instead of a KeyValue.
                if (coprocessorHost != null) {
                    status.setStatus("Running pre-WAL-restore hook in coprocessors");
                    if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
                        // if bypass this wal entry, ignore it ...
                        continue;
                    }
                }
                boolean checkRowWithinBoundary = false;
                // Check this edit is for this region.
                if (!Bytes.equals(key.getEncodedRegionName(), this.getRegionInfo().getEncodedNameAsBytes())) {
                    checkRowWithinBoundary = true;
                }
                boolean flush = false;
                MemstoreSize memstoreSize = new MemstoreSize();
                for (Cell cell : val.getCells()) {
                    // METACOLUMN info such as HBASE::CACHEFLUSH entries
                    if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
                        // if region names don't match, skipp replaying compaction marker
                        if (!checkRowWithinBoundary) {
                            //this is a special edit, we should handle it
                            CompactionDescriptor compaction = WALEdit.getCompaction(cell);
                            if (compaction != null) {
                                //replay the compaction
                                replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE);
                            }
                        }
                        skippedEdits++;
                        continue;
                    }
                    // Figure which store the edit is meant for.
                    if (store == null || !CellUtil.matchingFamily(cell, store.getFamily().getName())) {
                        store = getHStore(cell);
                    }
                    if (store == null) {
                        // This should never happen.  Perhaps schema was changed between
                        // crash and redeploy?
                        LOG.warn("No family for " + cell);
                        skippedEdits++;
                        continue;
                    }
                    if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) {
                        LOG.warn("Row of " + cell + " is not within region boundary");
                        skippedEdits++;
                        continue;
                    }
                    // Now, figure if we should skip this edit.
                    if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily().getName())) {
                        skippedEdits++;
                        continue;
                    }
                    CellUtil.setSequenceId(cell, currentReplaySeqId);
                    restoreEdit(store, cell, memstoreSize);
                    editsCount++;
                }
                if (this.rsAccounting != null) {
                    rsAccounting.addRegionReplayEditsSize(getRegionInfo().getRegionName(), memstoreSize);
                }
                flush = isFlushSize(this.addAndGetMemstoreSize(memstoreSize));
                if (flush) {
                    internalFlushcache(null, currentEditSeqId, stores.values(), status, false);
                }
                if (coprocessorHost != null) {
                    coprocessorHost.postWALRestore(this.getRegionInfo(), key, val);
                }
            }
            if (coprocessorHost != null) {
                coprocessorHost.postReplayWALs(this.getRegionInfo(), edits);
            }
        } catch (EOFException eof) {
            Path p = WALSplitter.moveAsideBadEditsFile(fs, edits);
            msg = "EnLongAddered EOF. Most likely due to Master failure during " + "wal splitting, so we have this data in another edit.  " + "Continuing, but renaming " + edits + " as " + p;
            LOG.warn(msg, eof);
            status.abort(msg);
        } catch (IOException ioe) {
            // then this problem is idempotent and retrying won't help
            if (ioe.getCause() instanceof ParseException) {
                Path p = WALSplitter.moveAsideBadEditsFile(fs, edits);
                msg = "File corruption enLongAddered!  " + "Continuing, but renaming " + edits + " as " + p;
                LOG.warn(msg, ioe);
                status.setStatus(msg);
            } else {
                status.abort(StringUtils.stringifyException(ioe));
                // checksum exception on one datanode, etc).  throw & retry
                throw ioe;
            }
        }
        if (reporter != null && !reported_once) {
            reporter.progress();
        }
        msg = "Applied " + editsCount + ", skipped " + skippedEdits + ", firstSequenceIdInLog=" + firstSeqIdInLog + ", maxSequenceIdInLog=" + currentEditSeqId + ", path=" + edits;
        status.markComplete(msg);
        LOG.debug(msg);
        return currentEditSeqId;
    } finally {
        status.cleanup();
        if (reader != null) {
            reader.close();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) EOFException(java.io.EOFException) ParseException(java.text.ParseException) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) Cell(org.apache.hadoop.hbase.Cell) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask)

Example 3 with CompactionDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.

the class TestHRegion method testRecoveredEditsReplayCompaction.

public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception {
    CONF.setClass(HConstants.REGION_IMPL, HRegionForTesting.class, Region.class);
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        long maxSeqId = 3;
        long minSeqId = 0;
        for (long i = minSeqId; i < maxSeqId; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
            region.put(put);
            region.flush(true);
        }
        // this will create a region with 3 files
        assertEquals(3, region.getStore(family).getStorefilesCount());
        List<Path> storeFiles = new ArrayList<>(3);
        for (HStoreFile sf : region.getStore(family).getStorefiles()) {
            storeFiles.add(sf.getPath());
        }
        // disable compaction completion
        CONF.setBoolean("hbase.hstore.compaction.complete", false);
        region.compactStores();
        // ensure that nothing changed
        assertEquals(3, region.getStore(family).getStorefilesCount());
        // now find the compacted file, and manually add it to the recovered edits
        Path tmpDir = new Path(region.getRegionFileSystem().getTempDir(), Bytes.toString(family));
        FileStatus[] files = CommonFSUtils.listStatus(fs, tmpDir);
        String errorMsg = "Expected to find 1 file in the region temp directory " + "from the compaction, could not find any";
        assertNotNull(errorMsg, files);
        assertEquals(errorMsg, 1, files.length);
        // move the file inside region dir
        Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath());
        byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes();
        byte[] fakeEncodedNameAsBytes = new byte[encodedNameAsBytes.length];
        for (int i = 0; i < encodedNameAsBytes.length; i++) {
            // Mix the byte array to have a new encodedName
            fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1);
        }
        CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region.getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, storeFiles, Lists.newArrayList(newFile), region.getRegionFileSystem().getStoreDir(Bytes.toString(family)));
        WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(), this.region.getRegionInfo(), compactionDescriptor, region.getMVCC(), null);
        Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
        fs.create(recoveredEdits);
        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
        long time = System.nanoTime();
        writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, 10, time, HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(), compactionDescriptor)));
        writer.close();
        // close the region now, and reopen again
        region.getTableDescriptor();
        region.getRegionInfo();
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        try {
            region = HRegion.openHRegion(region, null);
        } catch (WrongRegionException wre) {
            fail("Matching encoded region name should not have produced WrongRegionException");
        }
        // now check whether we have only one store file, the compacted one
        Collection<HStoreFile> sfs = region.getStore(family).getStorefiles();
        for (HStoreFile sf : sfs) {
            LOG.info(Objects.toString(sf.getPath()));
        }
        if (!mismatchedRegionName) {
            assertEquals(1, region.getStore(family).getStorefilesCount());
        }
        files = CommonFSUtils.listStatus(fs, tmpDir);
        assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);
        for (long i = minSeqId; i < maxSeqId; i++) {
            Get get = new Get(Bytes.toBytes(i));
            Result result = region.get(get);
            byte[] value = result.getValue(family, Bytes.toBytes(i));
            assertArrayEquals(Bytes.toBytes(i), value);
        }
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
        CONF.setClass(HConstants.REGION_IMPL, HRegion.class, Region.class);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) WAL(org.apache.hadoop.hbase.wal.WAL) ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider)

Example 4 with CompactionDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.

the class RSRpcServices method doReplayBatchOp.

/**
 * Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
 * constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
 * @return an array of OperationStatus which internally contains the OperationStatusCode and the
 *         exceptionMessage if any
 * @deprecated Since 3.0.0, will be removed in 4.0.0. We do not use this method for replaying
 *             edits for secondary replicas any more, see
 *             {@link #replicateToReplica(RpcController, ReplicateWALEntryRequest)}.
 */
@Deprecated
private OperationStatus[] doReplayBatchOp(final HRegion region, final List<MutationReplay> mutations, long replaySeqId) throws IOException {
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        for (Iterator<MutationReplay> it = mutations.iterator(); it.hasNext(); ) {
            MutationReplay m = it.next();
            if (m.getType() == MutationType.PUT) {
                batchContainsPuts = true;
            } else {
                batchContainsDelete = true;
            }
            NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
            List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
            if (metaCells != null && !metaCells.isEmpty()) {
                for (Cell metaCell : metaCells) {
                    CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
                    boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
                    HRegion hRegion = region;
                    if (compactionDesc != null) {
                        // replay the compaction. Remove the files from stores only if we are the primary
                        // region replica (thus own the files)
                        hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, replaySeqId);
                        continue;
                    }
                    FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
                    if (flushDesc != null && !isDefaultReplica) {
                        hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
                        continue;
                    }
                    RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
                    if (regionEvent != null && !isDefaultReplica) {
                        hRegion.replayWALRegionEventMarker(regionEvent);
                        continue;
                    }
                    BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
                    if (bulkLoadEvent != null) {
                        hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
                        continue;
                    }
                }
                it.remove();
            }
        }
        requestCount.increment();
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        return region.batchReplay(mutations.toArray(new MutationReplay[mutations.size()]), replaySeqId);
    } finally {
        updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
    }
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) MutationReplay(org.apache.hadoop.hbase.wal.WALSplitUtil.MutationReplay) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Example 5 with CompactionDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.

the class HRegion method replayRecoveredEdits.

/**
 * @param edits File of recovered edits.
 * @param maxSeqIdInStores Maximum sequenceid found in each store. Edits in wal must be larger
 *          than this to be replayed for each store.
 * @return the sequence id of the last edit added to this region out of the recovered edits log or
 *         <code>minSeqId</code> if nothing added from editlogs.
 */
private long replayRecoveredEdits(final Path edits, Map<byte[], Long> maxSeqIdInStores, final CancelableProgressable reporter, FileSystem fs) throws IOException {
    String msg = "Replaying edits from " + edits;
    LOG.info(msg);
    MonitoredTask status = TaskMonitor.get().createStatus(msg);
    status.setStatus("Opening recovered edits");
    WAL.Reader reader = null;
    try {
        reader = WALFactory.createReader(fs, edits, conf);
        long currentEditSeqId = -1;
        long currentReplaySeqId = -1;
        long firstSeqIdInLog = -1;
        long skippedEdits = 0;
        long editsCount = 0;
        long intervalEdits = 0;
        WAL.Entry entry;
        HStore store = null;
        boolean reported_once = false;
        ServerNonceManager ng = this.rsServices == null ? null : this.rsServices.getNonceManager();
        try {
            // How many edits seen before we check elapsed time
            int interval = this.conf.getInt("hbase.hstore.report.interval.edits", 2000);
            // How often to send a progress report (default 1/2 master timeout)
            int period = this.conf.getInt("hbase.hstore.report.period", 300000);
            long lastReport = EnvironmentEdgeManager.currentTime();
            if (coprocessorHost != null) {
                coprocessorHost.preReplayWALs(this.getRegionInfo(), edits);
            }
            while ((entry = reader.next()) != null) {
                WALKey key = entry.getKey();
                WALEdit val = entry.getEdit();
                if (ng != null) {
                    // some test, or nonces disabled
                    ng.reportOperationFromWal(key.getNonceGroup(), key.getNonce(), key.getWriteTime());
                }
                if (reporter != null) {
                    intervalEdits += val.size();
                    if (intervalEdits >= interval) {
                        // Number of edits interval reached
                        intervalEdits = 0;
                        long cur = EnvironmentEdgeManager.currentTime();
                        if (lastReport + period <= cur) {
                            status.setStatus("Replaying edits..." + " skipped=" + skippedEdits + " edits=" + editsCount);
                            // Timeout reached
                            if (!reporter.progress()) {
                                msg = "Progressable reporter failed, stopping replay for region " + this;
                                LOG.warn(msg);
                                status.abort(msg);
                                throw new IOException(msg);
                            }
                            reported_once = true;
                            lastReport = cur;
                        }
                    }
                }
                if (firstSeqIdInLog == -1) {
                    firstSeqIdInLog = key.getSequenceId();
                }
                if (currentEditSeqId > key.getSequenceId()) {
                    // when this condition is true, it means we have a serious defect because we need to
                    // maintain increasing SeqId for WAL edits per region
                    LOG.error(getRegionInfo().getEncodedName() + " : " + "Found decreasing SeqId. PreId=" + currentEditSeqId + " key=" + key + "; edit=" + val);
                } else {
                    currentEditSeqId = key.getSequenceId();
                }
                currentReplaySeqId = (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : currentEditSeqId;
                // instead of a KeyValue.
                if (coprocessorHost != null) {
                    status.setStatus("Running pre-WAL-restore hook in coprocessors");
                    if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
                        // if bypass this wal entry, ignore it ...
                        continue;
                    }
                }
                boolean checkRowWithinBoundary = false;
                // Check this edit is for this region.
                if (!Bytes.equals(key.getEncodedRegionName(), this.getRegionInfo().getEncodedNameAsBytes())) {
                    checkRowWithinBoundary = true;
                }
                boolean flush = false;
                MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing();
                for (Cell cell : val.getCells()) {
                    // METACOLUMN info such as HBASE::CACHEFLUSH entries
                    if (WALEdit.isMetaEditFamily(cell)) {
                        // if region names don't match, skipp replaying compaction marker
                        if (!checkRowWithinBoundary) {
                            // this is a special edit, we should handle it
                            CompactionDescriptor compaction = WALEdit.getCompaction(cell);
                            if (compaction != null) {
                                // replay the compaction
                                replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE);
                            }
                        }
                        skippedEdits++;
                        continue;
                    }
                    // Figure which store the edit is meant for.
                    if (store == null || !CellUtil.matchingFamily(cell, store.getColumnFamilyDescriptor().getName())) {
                        store = getStore(cell);
                    }
                    if (store == null) {
                        // This should never happen.  Perhaps schema was changed between
                        // crash and redeploy?
                        LOG.warn("No family for cell {} in region {}", cell, this);
                        skippedEdits++;
                        continue;
                    }
                    if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) {
                        LOG.warn("Row of {} is not within region boundary for region {}", cell, this);
                        skippedEdits++;
                        continue;
                    }
                    // Now, figure if we should skip this edit.
                    if (key.getSequenceId() <= maxSeqIdInStores.get(store.getColumnFamilyDescriptor().getName())) {
                        skippedEdits++;
                        continue;
                    }
                    PrivateCellUtil.setSequenceId(cell, currentReplaySeqId);
                    restoreEdit(store, cell, memStoreSizing);
                    editsCount++;
                }
                MemStoreSize mss = memStoreSizing.getMemStoreSize();
                incMemStoreSize(mss);
                flush = isFlushSize(this.memStoreSizing.getMemStoreSize());
                if (flush) {
                    internalFlushcache(null, currentEditSeqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY);
                }
                if (coprocessorHost != null) {
                    coprocessorHost.postWALRestore(this.getRegionInfo(), key, val);
                }
            }
            if (coprocessorHost != null) {
                coprocessorHost.postReplayWALs(this.getRegionInfo(), edits);
            }
        } catch (EOFException eof) {
            Path p = WALSplitUtil.moveAsideBadEditsFile(walFS, edits);
            msg = "EnLongAddered EOF. Most likely due to Master failure during " + "wal splitting, so we have this data in another edit. Continuing, but renaming " + edits + " as " + p + " for region " + this;
            LOG.warn(msg, eof);
            status.abort(msg);
        } catch (IOException ioe) {
            // then this problem is idempotent and retrying won't help
            if (ioe.getCause() instanceof ParseException) {
                Path p = WALSplitUtil.moveAsideBadEditsFile(walFS, edits);
                msg = "File corruption enLongAddered!  " + "Continuing, but renaming " + edits + " as " + p;
                LOG.warn(msg, ioe);
                status.setStatus(msg);
            } else {
                status.abort(StringUtils.stringifyException(ioe));
                // checksum exception on one datanode, etc).  throw & retry
                throw ioe;
            }
        }
        if (reporter != null && !reported_once) {
            reporter.progress();
        }
        msg = "Applied " + editsCount + ", skipped " + skippedEdits + ", firstSequenceIdInLog=" + firstSeqIdInLog + ", maxSequenceIdInLog=" + currentEditSeqId + ", path=" + edits;
        status.markComplete(msg);
        LOG.debug(msg);
        return currentEditSeqId;
    } finally {
        status.cleanup();
        if (reader != null) {
            reader.close();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) InterruptedIOException(java.io.InterruptedIOException) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) EOFException(java.io.EOFException) ParseException(java.text.ParseException) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask)

Aggregations

CompactionDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor)11 Path (org.apache.hadoop.fs.Path)8 ArrayList (java.util.ArrayList)4 Cell (org.apache.hadoop.hbase.Cell)4 WAL (org.apache.hadoop.hbase.wal.WAL)4 IOException (java.io.IOException)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)3 EOFException (java.io.EOFException)2 InterruptedIOException (java.io.InterruptedIOException)2 ParseException (java.text.ParseException)2 List (java.util.List)2 ByteBufferExtendedCell (org.apache.hadoop.hbase.ByteBufferExtendedCell)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)2 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)2 MonitoredTask (org.apache.hadoop.hbase.monitoring.MonitoredTask)2 BulkLoadDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor)2 RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)2 WALKey (org.apache.hadoop.hbase.wal.WALKey)2