use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hive by apache.
the class ReplUtils method handleException.
public static int handleException(boolean isReplication, Throwable e, String nonRecoverablePath, ReplicationMetricCollector metricCollector, String stageName, HiveConf conf) {
int errorCode;
if (isReplication && e instanceof SnapshotException) {
errorCode = ErrorMsg.getErrorMsg("SNAPSHOT_ERROR").getErrorCode();
} else {
errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
}
if (isReplication) {
try {
if (nonRecoverablePath != null) {
final int recoverableLimit = ErrorMsg.GENERIC_ERROR.getErrorCode();
String metricStage = getMetricStageName(stageName, metricCollector);
if (errorCode > recoverableLimit) {
Path nonRecoverableMarker = new Path(new Path(nonRecoverablePath), ReplAck.NON_RECOVERABLE_MARKER.toString());
Utils.writeStackTrace(e, nonRecoverableMarker, conf);
metricCollector.reportStageEnd(metricStage, Status.FAILED_ADMIN, nonRecoverableMarker.toString());
} else {
metricCollector.reportStageEnd(metricStage, Status.FAILED);
}
}
} catch (Exception ex) {
LOG.error("Failed to collect Metrics ", ex);
}
}
return errorCode;
}
use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hive by apache.
the class DirCopyTask method copyUsingDistCpSnapshots.
boolean copyUsingDistCpSnapshots(Path sourcePath, Path targetPath, UserGroupInformation proxyUser, HiveConf clonedConf) throws IOException {
DistributedFileSystem targetFs = SnapshotUtils.getDFS(targetPath, clonedConf);
boolean result = false;
if (getWork().getCopyMode().equals(SnapshotUtils.SnapshotCopyMode.DIFF_COPY)) {
LOG.info("Using snapshot diff copy for source: {} and target: {}", sourcePath, targetPath);
boolean overwriteTarget = clonedConf.getBoolVar(REPL_SNAPSHOT_OVERWRITE_TARGET_FOR_EXTERNAL_TABLE_COPY);
LOG.debug("Overwrite target in case the target location is modified is turned {}", overwriteTarget ? "on" : "off");
result = FileUtils.distCpWithSnapshot(firstSnapshot(work.getSnapshotPrefix()), secondSnapshot(work.getSnapshotPrefix()), Collections.singletonList(sourcePath), targetPath, overwriteTarget, clonedConf, ShimLoader.getHadoopShims(), proxyUser);
if (result) {
// Delete the older snapshot from last iteration.
targetFs.deleteSnapshot(targetPath, firstSnapshot(work.getSnapshotPrefix()));
} else {
throw new SnapshotException("Can not successfully copy external table data using snapshot diff. source: " + sourcePath + " and " + "target: " + targetPath);
}
} else if (getWork().getCopyMode().equals(SnapshotUtils.SnapshotCopyMode.INITIAL_COPY)) {
LOG.info("Using snapshot initial copy for source: {} and target: {}", sourcePath, targetPath);
// Get the path relative to the initial snapshot for copy.
Path snapRelPath = new Path(sourcePath, HdfsConstants.DOT_SNAPSHOT_DIR + "/" + secondSnapshot(work.getSnapshotPrefix()));
// This is the first time we are copying, check if the target is snapshottable or not, if not attempt to allow
// snapshots.
SnapshotUtils.allowSnapshot(targetFs, work.getFullyQualifiedTargetPath(), clonedConf);
// Attempt to delete the snapshot, in case this is a bootstrap post a failed incremental, Since in case of
// bootstrap we go from start, so delete any pre-existing snapshot.
SnapshotUtils.deleteSnapshotIfExists(targetFs, targetPath, firstSnapshot(work.getSnapshotPrefix()), clonedConf);
// Copy from the initial snapshot path.
result = runFallbackDistCp(snapRelPath, targetPath, proxyUser, clonedConf);
}
// Create a new snapshot at target Filesystem. For the next iteration.
if (result) {
SnapshotUtils.createSnapshot(targetFs, targetPath, firstSnapshot(work.getSnapshotPrefix()), clonedConf);
}
return result;
}
Aggregations