use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class TestFileList method setupFileList.
private FileList setupFileList(boolean... retryParams) throws Exception {
HiveConf hiveConf = Mockito.mock(HiveConf.class);
FileSystem mockFs = Mockito.mock(FileSystem.class);
Path backingFile = Mockito.spy(new Path("/tmp/backingFile"));
FileList fileList = Mockito.spy(new FileList(backingFile, hiveConf));
outStream = Mockito.spy(new FSDataOutputStream(null, null));
Retryable retryable = Retryable.builder().withTotalDuration(60).withInitialDelay(1).withBackoff(1.0).withRetryOnException(IOException.class).build();
if (retryParams.length == 0) {
// setup for normal flow, without failures
Path noRetryPath = new Path(new Path(TEST_DATA_DIR), "noRetry");
testFileStream = Mockito.spy(noRetryPath.getFileSystem(conf).create(noRetryPath));
Mockito.doReturn(retryable).when(fileList).buildRetryable();
Mockito.doReturn(true).when(hiveConf).getBoolVar(HiveConf.ConfVars.REPL_COPY_FILE_LIST_ITERATOR_RETRY);
Mockito.doReturn(testFileStream).when(fileList).initWriter();
} else if (retryParams.length == 1) {
// setup for retries
Mockito.doReturn(true).when(hiveConf).getBoolVar(HiveConf.ConfVars.REPL_COPY_FILE_LIST_ITERATOR_RETRY);
Mockito.doReturn(retryable).when(fileList).buildRetryable();
Mockito.doReturn(mockFs).when(backingFile).getFileSystem(hiveConf);
if (retryParams[0]) {
// setup for retry because of create-failure
Mockito.doReturn(false).when(mockFs).exists(backingFile);
Mockito.doThrow(testException).when(fileList).getWriterCreateMode();
} else {
// setup for retry because of failure during writes
Mockito.when(mockFs.exists(backingFile)).thenReturn(false).thenReturn(true);
Mockito.doReturn(outStream).when(fileList).getWriterAppendMode();
Mockito.doReturn(outStream).when(fileList).getWriterCreateMode();
Mockito.doThrow(testException).when(outStream).writeBytes(Mockito.anyString());
}
} else if (retryParams.length == 2) {
// setup for failure but no retry
Mockito.doReturn(false).when(hiveConf).getBoolVar(HiveConf.ConfVars.REPL_COPY_FILE_LIST_ITERATOR_RETRY);
Mockito.doReturn(outStream).when(fileList).getWriterCreateMode();
Mockito.doThrow(testException).when(outStream).writeBytes(Mockito.anyString());
} else if (retryParams.length == 3) {
// setup for abort case
Mockito.doReturn(true).when(hiveConf).getBoolVar(HiveConf.ConfVars.REPL_COPY_FILE_LIST_ITERATOR_RETRY);
Mockito.doReturn(outStream).when(fileList).initWriter();
}
return fileList;
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class AtlasDumpTask method lastStoredTimeStamp.
private long lastStoredTimeStamp() throws SemanticException {
Path prevMetadataPath = new Path(work.getPrevAtlasDumpDir(), EximUtil.METADATA_NAME);
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).withFailOnException(FileNotFoundException.class).build();
try {
return retryable.executeCallable(() -> {
BufferedReader br = null;
try {
FileSystem fs = prevMetadataPath.getFileSystem(conf);
br = new BufferedReader(new InputStreamReader(fs.open(prevMetadataPath), Charset.defaultCharset()));
String line = br.readLine();
if (line == null) {
throw new SemanticException(ErrorMsg.REPL_INVALID_INTERNAL_CONFIG_FOR_SERVICE.format("Could not read lastStoredTimeStamp from atlas metadata file", ReplUtils.REPL_ATLAS_SERVICE));
}
String[] lineContents = line.split("\t", 5);
return Long.parseLong(lineContents[1]);
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
// Do nothing
}
}
}
});
} catch (SemanticException e) {
throw e;
} catch (Exception e) {
throw new SemanticException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
}
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class AtlasLoadTask method getStoredFsUri.
private String getStoredFsUri(Path atlasDumpDir) throws SemanticException {
Path metadataPath = new Path(atlasDumpDir, EximUtil.METADATA_NAME);
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).build();
try {
return retryable.executeCallable(() -> {
BufferedReader br = null;
try {
FileSystem fs = metadataPath.getFileSystem(conf);
br = new BufferedReader(new InputStreamReader(fs.open(metadataPath), Charset.defaultCharset()));
String line = br.readLine();
if (line == null) {
throw new SemanticException(ErrorMsg.REPL_INVALID_INTERNAL_CONFIG_FOR_SERVICE.format("Could not read stored " + "src FS Uri from atlas metadata file", ReplUtils.REPL_ATLAS_SERVICE));
}
String[] lineContents = line.split("\t", 5);
return lineContents[0];
} finally {
if (br != null) {
br.close();
}
}
});
} catch (Exception e) {
throw new SemanticException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
}
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class DirCopyTask method execute.
@Override
public int execute() {
LOG.info("Started DirCopyTask for source: {} to target: {}", work.getFullyQualifiedSourcePath(), work.getFullyQualifiedTargetPath());
HiveConf clonedConf = getConf(conf);
String distCpDoAsUser = clonedConf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER);
Retryable retryable = Retryable.builder().withHiveConf(clonedConf).withRetryOnException(IOException.class).withFailOnException(SnapshotException.class).build();
long startTime = System.currentTimeMillis();
AtomicInteger retries = new AtomicInteger(-1);
AtomicBoolean result = new AtomicBoolean(false);
try {
return retryable.executeCallable(() -> {
retries.getAndIncrement();
UserGroupInformation proxyUser = null;
Path sourcePath = work.getFullyQualifiedSourcePath();
Path targetPath = work.getFullyQualifiedTargetPath();
try {
if (clonedConf.getBoolVar(HiveConf.ConfVars.REPL_ADD_RAW_RESERVED_NAMESPACE)) {
sourcePath = reservedRawPath(work.getFullyQualifiedSourcePath().toUri());
targetPath = reservedRawPath(work.getFullyQualifiedTargetPath().toUri());
}
UserGroupInformation ugi = Utils.getUGI();
String currentUser = ugi.getShortUserName();
if (distCpDoAsUser != null && !currentUser.equals(distCpDoAsUser)) {
proxyUser = UserGroupInformation.createProxyUser(distCpDoAsUser, UserGroupInformation.getLoginUser());
}
setTargetPathOwner(targetPath, sourcePath, proxyUser, clonedConf);
try {
if (!checkIfPathExist(sourcePath, proxyUser, clonedConf)) {
LOG.info("Source path is missing. Ignoring exception.");
return 0;
}
} catch (Exception ex) {
LOG.warn("Source path missing check failed. ", ex);
// Should be retried
throw new IOException(ex);
}
if (!getWork().getCopyMode().equals(SnapshotUtils.SnapshotCopyMode.FALLBACK_COPY)) {
LOG.info("Using Snapshot mode of copy for source: {} and target: {}", sourcePath, targetPath);
// Use distcp with snapshots for copy.
result.set(copyUsingDistCpSnapshots(sourcePath, targetPath, proxyUser, clonedConf));
} else {
LOG.info("Using Normal copy for source: {} and target: {}", sourcePath, targetPath);
result.set(runFallbackDistCp(sourcePath, targetPath, proxyUser, clonedConf));
}
return 0;
} finally {
if (proxyUser != null) {
FileSystem.closeAllForUGI(proxyUser);
}
}
});
} catch (Exception e) {
LOG.error("Replication failed ", e);
Exception ex = new SecurityException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
setException(ex);
return ReplUtils.handleException(true, ex, work.getDumpDirectory(), work.getMetricCollector(), getName(), clonedConf);
} finally {
String jobId = clonedConf.get(ReplUtils.DISTCP_JOB_ID_CONF, ReplUtils.DISTCP_JOB_ID_CONF_DEFAULT);
LOG.info("DirCopyTask status for source: {} to target: {}. Took {}. DistCp JobId {}. Number of retries {}. " + "Result: {}", work.getFullyQualifiedSourcePath(), work.getFullyQualifiedTargetPath(), ReplUtils.convertToHumanReadableTime(System.currentTimeMillis() - startTime), jobId, retries.get(), result.get() ? "SUCCEEDED" : "FAILED");
}
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class ReplDumpTask method cleanFailedEventDirIfExists.
private void cleanFailedEventDirIfExists(Path dumpDir, long resumeFrom) throws SemanticException {
Path nextEventRoot = new Path(dumpDir, String.valueOf(resumeFrom + 1));
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).build();
try {
retryable.executeCallable((Callable<Void>) () -> {
FileSystem fs = FileSystem.get(nextEventRoot.toUri(), conf);
try {
fs.delete(nextEventRoot, true);
} catch (FileNotFoundException e) {
// no worries
}
return null;
});
} catch (Exception e) {
throw new SemanticException(e);
}
}
Aggregations