Search in sources :

Example 56 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class WALSplitter method splitLogFile.

/**
   * log splitting implementation, splits one log file.
   * @param logfile should be an actual log file.
   */
@VisibleForTesting
boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws IOException {
    Preconditions.checkState(status == null);
    Preconditions.checkArgument(logfile.isFile(), "passed in file status is for something other than a regular file.");
    boolean isCorrupted = false;
    boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", SPLIT_SKIP_ERRORS_DEFAULT);
    int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024);
    Path logPath = logfile.getPath();
    boolean outputSinkStarted = false;
    boolean progress_failed = false;
    int editsCount = 0;
    int editsSkipped = 0;
    status = TaskMonitor.get().createStatus("Splitting log file " + logfile.getPath() + "into a temporary staging area.");
    Reader in = null;
    this.fileBeingSplit = logfile;
    try {
        long logLength = logfile.getLen();
        LOG.info("Splitting wal: " + logPath + ", length=" + logLength);
        LOG.info("DistributedLogReplay = " + this.distributedLogReplay);
        status.setStatus("Opening log file");
        if (reporter != null && !reporter.progress()) {
            progress_failed = true;
            return false;
        }
        in = getReader(logfile, skipErrors, reporter);
        if (in == null) {
            LOG.warn("Nothing to split in log file " + logPath);
            return true;
        }
        int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
        int numOpenedFilesLastCheck = 0;
        outputSink.setReporter(reporter);
        outputSink.startWriterThreads();
        outputSinkStarted = true;
        Entry entry;
        Long lastFlushedSequenceId = -1L;
        // THIS IS BROKEN!!!! GETTING SERVERNAME FROM PATH IS NOT GOING TO WORK IF LAYOUT CHANGES!!!
        // TODO: Fix.
        ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(logPath);
        failedServerName = (serverName == null) ? "" : serverName.getServerName();
        while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) {
            byte[] region = entry.getKey().getEncodedRegionName();
            String encodedRegionNameAsStr = Bytes.toString(region);
            lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr);
            if (lastFlushedSequenceId == null) {
                if (this.distributedLogReplay) {
                    RegionStoreSequenceIds ids = csm.getSplitLogWorkerCoordination().getRegionFlushedSequenceId(failedServerName, encodedRegionNameAsStr);
                    if (ids != null) {
                        lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("DLR Last flushed sequenceid for " + encodedRegionNameAsStr + ": " + TextFormat.shortDebugString(ids));
                        }
                    }
                } else if (sequenceIdChecker != null) {
                    RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region);
                    Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
                    for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) {
                        maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), storeSeqId.getSequenceId());
                    }
                    regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores);
                    lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("DLS Last flushed sequenceid for " + encodedRegionNameAsStr + ": " + TextFormat.shortDebugString(ids));
                    }
                }
                if (lastFlushedSequenceId == null) {
                    lastFlushedSequenceId = -1L;
                }
                lastFlushedSequenceIds.put(encodedRegionNameAsStr, lastFlushedSequenceId);
            }
            if (lastFlushedSequenceId >= entry.getKey().getSequenceId()) {
                editsSkipped++;
                continue;
            }
            // Don't send Compaction/Close/Open region events to recovered edit type sinks.
            if (entry.getEdit().isMetaEdit() && !outputSink.keepRegionEvent(entry)) {
                editsSkipped++;
                continue;
            }
            entryBuffers.appendEntry(entry);
            editsCount++;
            int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck;
            // If sufficient edits have passed, check if we should report progress.
            if (editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) {
                numOpenedFilesLastCheck = this.getNumOpenWriters();
                String countsStr = (editsCount - (editsSkipped + outputSink.getSkippedEdits())) + " edits, skipped " + editsSkipped + " edits.";
                status.setStatus("Split " + countsStr);
                if (reporter != null && !reporter.progress()) {
                    progress_failed = true;
                    return false;
                }
            }
        }
    } catch (InterruptedException ie) {
        IOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    } catch (CorruptedLogFileException e) {
        LOG.warn("Could not parse, corrupted log file " + logPath, e);
        if (this.csm != null) {
            // Some tests pass in a csm of null.
            this.csm.getSplitLogWorkerCoordination().markCorrupted(rootDir, logfile.getPath().getName(), fs);
        } else {
            // for tests only
            ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
        }
        isCorrupted = true;
    } catch (IOException e) {
        e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
        throw e;
    } finally {
        LOG.debug("Finishing writing output logs and closing down.");
        try {
            if (null != in) {
                in.close();
            }
        } catch (IOException exception) {
            LOG.warn("Could not close wal reader: " + exception.getMessage());
            LOG.debug("exception details", exception);
        }
        try {
            if (outputSinkStarted) {
                // Set progress_failed to true as the immediate following statement will reset its value
                // when finishWritingAndClose() throws exception, progress_failed has the right value
                progress_failed = true;
                progress_failed = outputSink.finishWritingAndClose() == null;
            }
        } finally {
            String msg = "Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions() + " regions; edits skipped=" + editsSkipped + "; log file=" + logPath + ", length=" + // See if length got updated post lease recovery
            logfile.getLen() + ", corrupted=" + isCorrupted + ", progress failed=" + progress_failed;
            LOG.info(msg);
            status.markComplete(msg);
        }
    }
    return !progress_failed;
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) Reader(org.apache.hadoop.hbase.wal.WAL.Reader) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RegionStoreSequenceIds(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) StoreSequenceId(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) ServerName(org.apache.hadoop.hbase.ServerName) AtomicLong(java.util.concurrent.atomic.AtomicLong) RemoteException(org.apache.hadoop.ipc.RemoteException) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 57 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class ProtobufUtil method toException.

/**
   * Convert a stringified protocol buffer exception Parameter to a Java Exception
   *
   * @param parameter the protocol buffer Parameter to convert
   * @return the converted Exception
   * @throws IOException if failed to deserialize the parameter
   */
@SuppressWarnings("unchecked")
public static Throwable toException(final NameBytesPair parameter) throws IOException {
    if (parameter == null || !parameter.hasValue())
        return null;
    String desc = parameter.getValue().toStringUtf8();
    String type = parameter.getName();
    try {
        Class<? extends Throwable> c = (Class<? extends Throwable>) Class.forName(type, true, CLASS_LOADER);
        Constructor<? extends Throwable> cn = null;
        try {
            cn = c.getDeclaredConstructor(String.class);
            return cn.newInstance(desc);
        } catch (NoSuchMethodException e) {
            // Could be a raw RemoteException. See HBASE-8987.
            cn = c.getDeclaredConstructor(String.class, String.class);
            return cn.newInstance(type, desc);
        }
    } catch (Exception e) {
        throw new IOException(e);
    }
}
Also used : ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) InvalidProtocolBufferException(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)

Example 58 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class CleanerChore method checkAndDeleteFiles.

/**
   * Run the given files through each of the cleaners to see if it should be deleted, deleting it if
   * necessary.
   * @param files List of FileStatus for the files to check (and possibly delete)
   * @return true iff successfully deleted all files
   */
private boolean checkAndDeleteFiles(List<FileStatus> files) {
    // first check to see if the path is valid
    List<FileStatus> validFiles = Lists.newArrayListWithCapacity(files.size());
    List<FileStatus> invalidFiles = Lists.newArrayList();
    for (FileStatus file : files) {
        if (validate(file.getPath())) {
            validFiles.add(file);
        } else {
            LOG.warn("Found a wrongly formatted file: " + file.getPath() + " - will delete it.");
            invalidFiles.add(file);
        }
    }
    Iterable<FileStatus> deletableValidFiles = validFiles;
    // check each of the cleaners for the valid files
    for (T cleaner : cleanersChain) {
        if (cleaner.isStopped() || this.getStopper().isStopped()) {
            LOG.warn("A file cleaner" + this.getName() + " is stopped, won't delete any more files in:" + this.oldFileDir);
            return false;
        }
        Iterable<FileStatus> filteredFiles = cleaner.getDeletableFiles(deletableValidFiles);
        // trace which cleaner is holding on to each file
        if (LOG.isTraceEnabled()) {
            ImmutableSet<FileStatus> filteredFileSet = ImmutableSet.copyOf(filteredFiles);
            for (FileStatus file : deletableValidFiles) {
                if (!filteredFileSet.contains(file)) {
                    LOG.trace(file.getPath() + " is not deletable according to:" + cleaner);
                }
            }
        }
        deletableValidFiles = filteredFiles;
    }
    Iterable<FileStatus> filesToDelete = Iterables.concat(invalidFiles, deletableValidFiles);
    int deletedFileCount = 0;
    for (FileStatus file : filesToDelete) {
        Path filePath = file.getPath();
        if (LOG.isDebugEnabled()) {
            LOG.debug("Removing: " + filePath + " from archive");
        }
        try {
            boolean success = this.fs.delete(filePath, false);
            if (success) {
                deletedFileCount++;
            } else {
                LOG.warn("Attempted to delete:" + filePath + ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
            }
        } catch (IOException e) {
            e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
            LOG.warn("Error while deleting: " + filePath, e);
        }
    }
    return deletedFileCount == files.size();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 59 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class FSUtils method invokeSetStoragePolicy.

/*
   * All args have been checked and are good. Run the setStoragePolicy invocation.
   */
private static void invokeSetStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy) {
    Method m = null;
    try {
        m = fs.getClass().getDeclaredMethod("setStoragePolicy", new Class<?>[] { Path.class, String.class });
        m.setAccessible(true);
    } catch (NoSuchMethodException e) {
        LOG.info("FileSystem doesn't support setStoragePolicy; HDFS-6584 not available " + "(hadoop-2.6.0+): " + e.getMessage());
    } catch (SecurityException e) {
        LOG.info("Don't have access to setStoragePolicy on FileSystems; HDFS-6584 not available " + "(hadoop-2.6.0+): ", e);
        // could happen on setAccessible()
        m = null;
    }
    if (m != null) {
        try {
            m.invoke(fs, path, storagePolicy);
            LOG.info("Set storagePolicy=" + storagePolicy + " for path=" + path);
        } catch (Exception e) {
            // check for lack of HDFS-7228
            boolean probablyBadPolicy = false;
            if (e instanceof InvocationTargetException) {
                final Throwable exception = e.getCause();
                if (exception instanceof RemoteException && HadoopIllegalArgumentException.class.getName().equals(((RemoteException) exception).getClassName())) {
                    LOG.warn("Given storage policy, '" + storagePolicy + "', was rejected and probably " + "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " + "trying to use SSD related policies then you're likely missing HDFS-7228. For " + "more information see the 'ArchivalStorage' docs for your Hadoop release.");
                    LOG.debug("More information about the invalid storage policy.", exception);
                    probablyBadPolicy = true;
                }
            }
            if (!probablyBadPolicy) {
                // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
                // misuse than a runtime problem with HDFS.
                LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) Method(java.lang.reflect.Method) RemoteException(org.apache.hadoop.ipc.RemoteException) URISyntaxException(java.net.URISyntaxException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) EOFException(java.io.EOFException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) FileNotFoundException(java.io.FileNotFoundException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ExecutionException(java.util.concurrent.ExecutionException) InvocationTargetException(java.lang.reflect.InvocationTargetException)

Example 60 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class FSUtils method checkFileSystemAvailable.

/**
   * Checks to see if the specified file system is available
   *
   * @param fs filesystem
   * @throws IOException e
   */
public static void checkFileSystemAvailable(final FileSystem fs) throws IOException {
    if (!(fs instanceof DistributedFileSystem)) {
        return;
    }
    IOException exception = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
        if (dfs.exists(new Path("/"))) {
            return;
        }
    } catch (IOException e) {
        exception = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
    }
    try {
        fs.close();
    } catch (Exception e) {
        LOG.error("file system close failed: ", e);
    }
    IOException io = new IOException("File system is not available");
    io.initCause(exception);
    throw io;
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) URISyntaxException(java.net.URISyntaxException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) EOFException(java.io.EOFException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) FileNotFoundException(java.io.FileNotFoundException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6