Search in sources :

Example 71 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ZkSplitLogWorkerCoordination method grabTask.

/**
 * try to grab a 'lock' on the task zk node to own and execute the task.
 * <p>
 * @param path zk node for the task
 * @return boolean value when grab a task success return true otherwise false
 */
private boolean grabTask(String path) {
    Stat stat = new Stat();
    byte[] data;
    synchronized (grabTaskLock) {
        currentTask = path;
        workerInGrabTask = true;
        if (Thread.interrupted()) {
            return false;
        }
    }
    try {
        try {
            if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) {
                SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment();
                return false;
            }
        } catch (KeeperException e) {
            LOG.warn("Failed to get data for znode " + path, e);
            SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
            return false;
        }
        SplitLogTask slt;
        try {
            slt = SplitLogTask.parseFrom(data);
        } catch (DeserializationException e) {
            LOG.warn("Failed parse data for znode " + path, e);
            SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
            return false;
        }
        if (!slt.isUnassigned()) {
            SplitLogCounters.tot_wkr_failed_to_grab_task_owned.increment();
            return false;
        }
        currentVersion = attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion());
        if (currentVersion < 0) {
            SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment();
            return false;
        }
        if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
            ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
            splitTaskDetails.setTaskNode(currentTask);
            splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion));
            endTask(new SplitLogTask.Done(server.getServerName()), SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails);
            return false;
        }
        LOG.info("worker " + server.getServerName() + " acquired task " + path);
        SplitLogCounters.tot_wkr_task_acquired.increment();
        getDataSetWatchAsync();
        submitTask(path, currentVersion, reportPeriod);
        // after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
        try {
            int sleepTime = RandomUtils.nextInt(0, 500) + 500;
            Thread.sleep(sleepTime);
        } catch (InterruptedException e) {
            LOG.warn("Interrupted while yielding for other region servers", e);
            Thread.currentThread().interrupt();
        }
        return true;
    } finally {
        synchronized (grabTaskLock) {
            workerInGrabTask = false;
            // clear the interrupt from stopTask() otherwise the next task will
            // suffer
            Thread.interrupted();
        }
    }
}
Also used : Stat(org.apache.zookeeper.data.Stat) MutableInt(org.apache.commons.lang3.mutable.MutableInt) SplitLogTask(org.apache.hadoop.hbase.SplitLogTask) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 72 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class MasterFileSystem method checkRootDir.

/**
 * Get the rootdir. Make sure its wholesome and exists before returning.
 * @return hbase.rootdir (after checks for existence and bootstrapping if needed populating the
 *         directory with necessary bootup files).
 */
private void checkRootDir(final Path rd, final Configuration c, final FileSystem fs) throws IOException {
    int threadWakeFrequency = c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
    // If FS is in safe mode wait till out of it.
    FSUtils.waitOnSafeMode(c, threadWakeFrequency);
    // Filesystem is good. Go ahead and check for hbase.rootdir.
    FileStatus status;
    try {
        status = fs.getFileStatus(rd);
    } catch (FileNotFoundException e) {
        status = null;
    }
    int versionFileWriteAttempts = c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
    try {
        if (status == null) {
            if (!fs.mkdirs(rd)) {
                throw new IOException("Can not create configured '" + HConstants.HBASE_DIR + "' " + rd);
            }
            // DFS leaves safe mode with 0 DNs when there are 0 blocks.
            // We used to handle this by checking the current DN count and waiting until
            // it is nonzero. With security, the check for datanode count doesn't work --
            // it is a privileged op. So instead we adopt the strategy of the jobtracker
            // and simply retry file creation during bootstrap indefinitely. As soon as
            // there is one datanode it will succeed. Permission problems should have
            // already been caught by mkdirs above.
            FSUtils.setVersion(fs, rd, threadWakeFrequency, versionFileWriteAttempts);
        } else {
            if (!status.isDirectory()) {
                throw new IllegalArgumentException("Configured '" + HConstants.HBASE_DIR + "' " + rd + " is not a directory.");
            }
            // as above
            FSUtils.checkVersion(fs, rd, true, threadWakeFrequency, versionFileWriteAttempts);
        }
    } catch (DeserializationException de) {
        LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for '{}' {}", HConstants.HBASE_DIR, rd, de);
        throw new IOException(de);
    } catch (IllegalArgumentException iae) {
        LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for '{}' {}", HConstants.HBASE_DIR, rd, iae);
        throw iae;
    }
    // Make sure cluster ID exists
    if (!FSUtils.checkClusterIdExists(fs, rd, threadWakeFrequency)) {
        FSUtils.setClusterId(fs, rd, new ClusterId(), threadWakeFrequency);
    }
    clusterId = FSUtils.getClusterId(fs, rd);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) ClusterId(org.apache.hadoop.hbase.ClusterId) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 73 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class FSTableDescriptors method getTableDescriptorFromFs.

private static Optional<Pair<FileStatus, TableDescriptor>> getTableDescriptorFromFs(FileSystem fs, Path tableDir, boolean readonly) throws IOException {
    Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
    FileStatus[] descFiles = CommonFSUtils.listStatus(fs, tableInfoDir, TABLEINFO_PATHFILTER);
    if (descFiles == null || descFiles.length < 1) {
        return Optional.empty();
    }
    Arrays.sort(descFiles, TABLEINFO_FILESTATUS_COMPARATOR);
    int i = 0;
    TableDescriptor td = null;
    FileStatus descFile = null;
    for (; i < descFiles.length; i++) {
        descFile = descFiles[i];
        Path file = descFile.getPath();
        // get file length from file name if present
        int fileLength = getTableInfoSequenceIdAndFileLength(file).fileLength;
        byte[] content = new byte[fileLength > 0 ? fileLength : Ints.checkedCast(descFile.getLen())];
        try (FSDataInputStream in = fs.open(file)) {
            in.readFully(content);
        } catch (EOFException e) {
            LOG.info("Failed to load file {} due to EOF, it should be half written: {}", file, e.toString());
            if (!readonly) {
                deleteMalformedFile(fs, file);
            }
            continue;
        }
        try {
            td = TableDescriptorBuilder.parseFrom(content);
            break;
        } catch (DeserializationException e) {
            LOG.info("Failed to parse file {} due to malformed protobuf message: {}", file, e.toString());
            if (!readonly) {
                deleteMalformedFile(fs, file);
            }
        }
    }
    if (!readonly) {
        // i + 1 to skip the one we load
        for (i = i + 1; i < descFiles.length; i++) {
            Path file = descFiles[i].getPath();
            LOG.info("Delete old table descriptor file {}", file);
            if (!fs.delete(file, false)) {
                LOG.info("Failed to delete old table descriptor file {}", file);
            }
        }
    }
    return td != null ? Optional.of(Pair.newPair(descFile, td)) : Optional.empty();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) EOFException(java.io.EOFException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 74 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class VisibilityLabelsCache method refreshUserAuthsCache.

public void refreshUserAuthsCache(byte[] data) throws IOException {
    MultiUserAuthorizations multiUserAuths = null;
    try {
        multiUserAuths = VisibilityUtils.readUserAuthsFromZKData(data);
    } catch (DeserializationException dse) {
        throw new IOException(dse);
    }
    this.lock.writeLock().lock();
    try {
        this.userAuths.clear();
        this.groupAuths.clear();
        for (UserAuthorizations userAuths : multiUserAuths.getUserAuthsList()) {
            String user = Bytes.toString(userAuths.getUser().toByteArray());
            if (AuthUtil.isGroupPrincipal(user)) {
                this.groupAuths.put(AuthUtil.getGroupName(user), new HashSet<>(userAuths.getAuthList()));
            } else {
                this.userAuths.put(user, new HashSet<>(userAuths.getAuthList()));
            }
        }
    } finally {
        this.lock.writeLock().unlock();
    }
}
Also used : IOException(java.io.IOException) UserAuthorizations(org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations) MultiUserAuthorizations(org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) MultiUserAuthorizations(org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations)

Example 75 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ClusterId method parseFrom.

/**
 * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
 * @return An instance of {@link ClusterId} made from <code>bytes</code>
 * @throws DeserializationException
 * @see #toByteArray()
 */
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
    if (ProtobufUtil.isPBMagicPrefix(bytes)) {
        int pblen = ProtobufUtil.lengthOfPBMagic();
        ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
        ClusterIdProtos.ClusterId cid = null;
        try {
            ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
            cid = builder.build();
        } catch (IOException e) {
            throw new DeserializationException(e);
        }
        return convert(cid);
    } else {
        // Presume it was written out this way, the old way.
        return new ClusterId(Bytes.toString(bytes));
    }
}
Also used : ClusterIdProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterIdProtos) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Aggregations

DeserializationException (org.apache.hadoop.hbase.exceptions.DeserializationException)83 IOException (java.io.IOException)57 InvalidProtocolBufferException (org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException)15 FilterProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos)13 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)12 KeeperException (org.apache.zookeeper.KeeperException)12 ArrayList (java.util.ArrayList)11 ServerName (org.apache.hadoop.hbase.ServerName)9 Cell (org.apache.hadoop.hbase.Cell)8 CompareOperator (org.apache.hadoop.hbase.CompareOperator)8 InterruptedIOException (java.io.InterruptedIOException)7 CellVisibility (org.apache.hadoop.hbase.security.visibility.CellVisibility)7 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Tag (org.apache.hadoop.hbase.Tag)6 HBaseProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)6 Map (java.util.Map)5 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)5 TableName (org.apache.hadoop.hbase.TableName)5 FilterList (org.apache.hadoop.hbase.filter.FilterList)5 List (java.util.List)4