use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ZkSplitLogWorkerCoordination method grabTask.
/**
* try to grab a 'lock' on the task zk node to own and execute the task.
* <p>
* @param path zk node for the task
* @return boolean value when grab a task success return true otherwise false
*/
private boolean grabTask(String path) {
Stat stat = new Stat();
byte[] data;
synchronized (grabTaskLock) {
currentTask = path;
workerInGrabTask = true;
if (Thread.interrupted()) {
return false;
}
}
try {
try {
if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) {
SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment();
return false;
}
} catch (KeeperException e) {
LOG.warn("Failed to get data for znode " + path, e);
SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
return false;
}
SplitLogTask slt;
try {
slt = SplitLogTask.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse data for znode " + path, e);
SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
return false;
}
if (!slt.isUnassigned()) {
SplitLogCounters.tot_wkr_failed_to_grab_task_owned.increment();
return false;
}
currentVersion = attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion());
if (currentVersion < 0) {
SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment();
return false;
}
if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
splitTaskDetails.setTaskNode(currentTask);
splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion));
endTask(new SplitLogTask.Done(server.getServerName()), SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails);
return false;
}
LOG.info("worker " + server.getServerName() + " acquired task " + path);
SplitLogCounters.tot_wkr_task_acquired.increment();
getDataSetWatchAsync();
submitTask(path, currentVersion, reportPeriod);
// after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
try {
int sleepTime = RandomUtils.nextInt(0, 500) + 500;
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
LOG.warn("Interrupted while yielding for other region servers", e);
Thread.currentThread().interrupt();
}
return true;
} finally {
synchronized (grabTaskLock) {
workerInGrabTask = false;
// clear the interrupt from stopTask() otherwise the next task will
// suffer
Thread.interrupted();
}
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class MasterFileSystem method checkRootDir.
/**
* Get the rootdir. Make sure its wholesome and exists before returning.
* @return hbase.rootdir (after checks for existence and bootstrapping if needed populating the
* directory with necessary bootup files).
*/
private void checkRootDir(final Path rd, final Configuration c, final FileSystem fs) throws IOException {
int threadWakeFrequency = c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
// If FS is in safe mode wait till out of it.
FSUtils.waitOnSafeMode(c, threadWakeFrequency);
// Filesystem is good. Go ahead and check for hbase.rootdir.
FileStatus status;
try {
status = fs.getFileStatus(rd);
} catch (FileNotFoundException e) {
status = null;
}
int versionFileWriteAttempts = c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
try {
if (status == null) {
if (!fs.mkdirs(rd)) {
throw new IOException("Can not create configured '" + HConstants.HBASE_DIR + "' " + rd);
}
// DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, threadWakeFrequency, versionFileWriteAttempts);
} else {
if (!status.isDirectory()) {
throw new IllegalArgumentException("Configured '" + HConstants.HBASE_DIR + "' " + rd + " is not a directory.");
}
// as above
FSUtils.checkVersion(fs, rd, true, threadWakeFrequency, versionFileWriteAttempts);
}
} catch (DeserializationException de) {
LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for '{}' {}", HConstants.HBASE_DIR, rd, de);
throw new IOException(de);
} catch (IllegalArgumentException iae) {
LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for '{}' {}", HConstants.HBASE_DIR, rd, iae);
throw iae;
}
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, threadWakeFrequency)) {
FSUtils.setClusterId(fs, rd, new ClusterId(), threadWakeFrequency);
}
clusterId = FSUtils.getClusterId(fs, rd);
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class FSTableDescriptors method getTableDescriptorFromFs.
private static Optional<Pair<FileStatus, TableDescriptor>> getTableDescriptorFromFs(FileSystem fs, Path tableDir, boolean readonly) throws IOException {
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
FileStatus[] descFiles = CommonFSUtils.listStatus(fs, tableInfoDir, TABLEINFO_PATHFILTER);
if (descFiles == null || descFiles.length < 1) {
return Optional.empty();
}
Arrays.sort(descFiles, TABLEINFO_FILESTATUS_COMPARATOR);
int i = 0;
TableDescriptor td = null;
FileStatus descFile = null;
for (; i < descFiles.length; i++) {
descFile = descFiles[i];
Path file = descFile.getPath();
// get file length from file name if present
int fileLength = getTableInfoSequenceIdAndFileLength(file).fileLength;
byte[] content = new byte[fileLength > 0 ? fileLength : Ints.checkedCast(descFile.getLen())];
try (FSDataInputStream in = fs.open(file)) {
in.readFully(content);
} catch (EOFException e) {
LOG.info("Failed to load file {} due to EOF, it should be half written: {}", file, e.toString());
if (!readonly) {
deleteMalformedFile(fs, file);
}
continue;
}
try {
td = TableDescriptorBuilder.parseFrom(content);
break;
} catch (DeserializationException e) {
LOG.info("Failed to parse file {} due to malformed protobuf message: {}", file, e.toString());
if (!readonly) {
deleteMalformedFile(fs, file);
}
}
}
if (!readonly) {
// i + 1 to skip the one we load
for (i = i + 1; i < descFiles.length; i++) {
Path file = descFiles[i].getPath();
LOG.info("Delete old table descriptor file {}", file);
if (!fs.delete(file, false)) {
LOG.info("Failed to delete old table descriptor file {}", file);
}
}
}
return td != null ? Optional.of(Pair.newPair(descFile, td)) : Optional.empty();
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class VisibilityLabelsCache method refreshUserAuthsCache.
public void refreshUserAuthsCache(byte[] data) throws IOException {
MultiUserAuthorizations multiUserAuths = null;
try {
multiUserAuths = VisibilityUtils.readUserAuthsFromZKData(data);
} catch (DeserializationException dse) {
throw new IOException(dse);
}
this.lock.writeLock().lock();
try {
this.userAuths.clear();
this.groupAuths.clear();
for (UserAuthorizations userAuths : multiUserAuths.getUserAuthsList()) {
String user = Bytes.toString(userAuths.getUser().toByteArray());
if (AuthUtil.isGroupPrincipal(user)) {
this.groupAuths.put(AuthUtil.getGroupName(user), new HashSet<>(userAuths.getAuthList()));
} else {
this.userAuths.put(user, new HashSet<>(userAuths.getAuthList()));
}
}
} finally {
this.lock.writeLock().unlock();
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ClusterId method parseFrom.
/**
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}
Aggregations