Search in sources :

Example 6 with Reader

use of org.apache.hadoop.hbase.wal.WAL.Reader in project hbase by apache.

the class WALSplitter method splitLogFile.

/**
   * log splitting implementation, splits one log file.
   * @param logfile should be an actual log file.
   */
@VisibleForTesting
boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws IOException {
    Preconditions.checkState(status == null);
    Preconditions.checkArgument(logfile.isFile(), "passed in file status is for something other than a regular file.");
    boolean isCorrupted = false;
    boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", SPLIT_SKIP_ERRORS_DEFAULT);
    int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024);
    Path logPath = logfile.getPath();
    boolean outputSinkStarted = false;
    boolean progress_failed = false;
    int editsCount = 0;
    int editsSkipped = 0;
    status = TaskMonitor.get().createStatus("Splitting log file " + logfile.getPath() + "into a temporary staging area.");
    Reader in = null;
    this.fileBeingSplit = logfile;
    try {
        long logLength = logfile.getLen();
        LOG.info("Splitting wal: " + logPath + ", length=" + logLength);
        LOG.info("DistributedLogReplay = " + this.distributedLogReplay);
        status.setStatus("Opening log file");
        if (reporter != null && !reporter.progress()) {
            progress_failed = true;
            return false;
        }
        in = getReader(logfile, skipErrors, reporter);
        if (in == null) {
            LOG.warn("Nothing to split in log file " + logPath);
            return true;
        }
        int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
        int numOpenedFilesLastCheck = 0;
        outputSink.setReporter(reporter);
        outputSink.startWriterThreads();
        outputSinkStarted = true;
        Entry entry;
        Long lastFlushedSequenceId = -1L;
        // THIS IS BROKEN!!!! GETTING SERVERNAME FROM PATH IS NOT GOING TO WORK IF LAYOUT CHANGES!!!
        // TODO: Fix.
        ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(logPath);
        failedServerName = (serverName == null) ? "" : serverName.getServerName();
        while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) {
            byte[] region = entry.getKey().getEncodedRegionName();
            String encodedRegionNameAsStr = Bytes.toString(region);
            lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr);
            if (lastFlushedSequenceId == null) {
                if (this.distributedLogReplay) {
                    RegionStoreSequenceIds ids = csm.getSplitLogWorkerCoordination().getRegionFlushedSequenceId(failedServerName, encodedRegionNameAsStr);
                    if (ids != null) {
                        lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("DLR Last flushed sequenceid for " + encodedRegionNameAsStr + ": " + TextFormat.shortDebugString(ids));
                        }
                    }
                } else if (sequenceIdChecker != null) {
                    RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region);
                    Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
                    for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) {
                        maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), storeSeqId.getSequenceId());
                    }
                    regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores);
                    lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("DLS Last flushed sequenceid for " + encodedRegionNameAsStr + ": " + TextFormat.shortDebugString(ids));
                    }
                }
                if (lastFlushedSequenceId == null) {
                    lastFlushedSequenceId = -1L;
                }
                lastFlushedSequenceIds.put(encodedRegionNameAsStr, lastFlushedSequenceId);
            }
            if (lastFlushedSequenceId >= entry.getKey().getSequenceId()) {
                editsSkipped++;
                continue;
            }
            // Don't send Compaction/Close/Open region events to recovered edit type sinks.
            if (entry.getEdit().isMetaEdit() && !outputSink.keepRegionEvent(entry)) {
                editsSkipped++;
                continue;
            }
            entryBuffers.appendEntry(entry);
            editsCount++;
            int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck;
            // If sufficient edits have passed, check if we should report progress.
            if (editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) {
                numOpenedFilesLastCheck = this.getNumOpenWriters();
                String countsStr = (editsCount - (editsSkipped + outputSink.getSkippedEdits())) + " edits, skipped " + editsSkipped + " edits.";
                status.setStatus("Split " + countsStr);
                if (reporter != null && !reporter.progress()) {
                    progress_failed = true;
                    return false;
                }
            }
        }
    } catch (InterruptedException ie) {
        IOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    } catch (CorruptedLogFileException e) {
        LOG.warn("Could not parse, corrupted log file " + logPath, e);
        if (this.csm != null) {
            // Some tests pass in a csm of null.
            this.csm.getSplitLogWorkerCoordination().markCorrupted(rootDir, logfile.getPath().getName(), fs);
        } else {
            // for tests only
            ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
        }
        isCorrupted = true;
    } catch (IOException e) {
        e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
        throw e;
    } finally {
        LOG.debug("Finishing writing output logs and closing down.");
        try {
            if (null != in) {
                in.close();
            }
        } catch (IOException exception) {
            LOG.warn("Could not close wal reader: " + exception.getMessage());
            LOG.debug("exception details", exception);
        }
        try {
            if (outputSinkStarted) {
                // Set progress_failed to true as the immediate following statement will reset its value
                // when finishWritingAndClose() throws exception, progress_failed has the right value
                progress_failed = true;
                progress_failed = outputSink.finishWritingAndClose() == null;
            }
        } finally {
            String msg = "Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions() + " regions; edits skipped=" + editsSkipped + "; log file=" + logPath + ", length=" + // See if length got updated post lease recovery
            logfile.getLen() + ", corrupted=" + isCorrupted + ", progress failed=" + progress_failed;
            LOG.info(msg);
            status.markComplete(msg);
        }
    }
    return !progress_failed;
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) Reader(org.apache.hadoop.hbase.wal.WAL.Reader) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RegionStoreSequenceIds(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) StoreSequenceId(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) ServerName(org.apache.hadoop.hbase.ServerName) AtomicLong(java.util.concurrent.atomic.AtomicLong) RemoteException(org.apache.hadoop.ipc.RemoteException) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

Reader (org.apache.hadoop.hbase.wal.WAL.Reader)6 Path (org.apache.hadoop.fs.Path)4 FaultyProtobufLogReader (org.apache.hadoop.hbase.regionserver.wal.FaultyProtobufLogReader)4 ProtobufLogReader (org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader)4 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)4 IOException (java.io.IOException)2 InterruptedIOException (java.io.InterruptedIOException)2 Map (java.util.Map)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)2 MultipleIOException (org.apache.hadoop.io.MultipleIOException)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 TreeMap (java.util.TreeMap)1