use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class ReplDumpWork method managedTableCopyTasks.
public List<Task<?>> managedTableCopyTasks(TaskTracker tracker, HiveConf conf) throws IOException {
if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY)) {
return Collections.emptyList();
}
List<Task<?>> tasks = new ArrayList<>();
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(UncheckedIOException.class).build();
try {
retryable.executeCallable((Callable<Void>) () -> {
try {
int numEntriesToSkip = tasks == null ? 0 : tasks.size();
while (managedTblCopyPathIterator.hasNext() && tracker.canAddMoreTasks()) {
if (numEntriesToSkip > 0) {
// skip tasks added in previous attempts of this retryable block
managedTblCopyPathIterator.next();
numEntriesToSkip--;
continue;
}
ReplicationSpec replSpec = new ReplicationSpec();
replSpec.setIsReplace(true);
replSpec.setInReplicationScope(true);
EximUtil.DataCopyPath managedTableCopyPath = new EximUtil.DataCopyPath(replSpec);
managedTableCopyPath.loadFromString(managedTblCopyPathIterator.next());
// If its incremental, in checkpointing case, dump dir may exist. We will delete the event dir.
// In case of bootstrap checkpointing we will not delete the entire dir and just do a sync
Task<?> copyTask = ReplCopyTask.getDumpCopyTask(managedTableCopyPath.getReplicationSpec(), managedTableCopyPath.getSrcPath(), managedTableCopyPath.getTargetPath(), conf, false, shouldOverwrite, !isBootstrap(), getCurrentDumpPath().toString(), getMetricCollector());
tasks.add(copyTask);
tracker.addTask(copyTask);
LOG.debug("added task for {}", managedTableCopyPath);
}
} catch (UncheckedIOException e) {
LOG.error("Reading entry for data copy failed for managed tables, attempting retry.", e);
throw e;
}
return null;
});
} catch (Exception e) {
throw new IOException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()));
}
return tasks;
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class ReplDumpWork method externalTableCopyTasks.
public List<Task<?>> externalTableCopyTasks(TaskTracker tracker, HiveConf conf) throws IOException {
if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY)) {
return Collections.emptyList();
}
List<Task<?>> tasks = new ArrayList<>();
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(UncheckedIOException.class).build();
try {
retryable.executeCallable((Callable<Void>) () -> {
try {
int numEntriesToSkip = tasks == null ? 0 : tasks.size();
while (externalTblCopyPathIterator.hasNext() && tracker.canAddMoreTasks()) {
if (numEntriesToSkip > 0) {
// skip tasks added in previous attempts of this retryable block
externalTblCopyPathIterator.next();
numEntriesToSkip--;
continue;
}
DirCopyWork dirCopyWork = new DirCopyWork(metricCollector, currentDumpPath.toString());
dirCopyWork.loadFromString(externalTblCopyPathIterator.next());
Task<DirCopyWork> task = TaskFactory.get(dirCopyWork, conf);
tasks.add(task);
tracker.addTask(task);
LOG.debug("added task for {}", dirCopyWork);
}
} catch (UncheckedIOException e) {
LOG.error("Reading entry for data copy failed for external tables, attempting retry.", e);
throw e;
}
return null;
});
} catch (Exception e) {
throw new IOException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()));
}
return tasks;
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class ReplDumpTask method getResumeFrom.
private long getResumeFrom(Path ackFile) throws SemanticException {
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(Exception.class).build();
try {
return retryable.executeCallable(() -> {
BufferedReader br = null;
try {
FileSystem fs = ackFile.getFileSystem(conf);
br = new BufferedReader(new InputStreamReader(fs.open(ackFile), Charset.defaultCharset()));
long lastEventID = Long.parseLong(br.readLine());
return lastEventID;
} finally {
if (br != null) {
try {
br.close();
} catch (Exception e) {
// Do nothing
}
}
}
});
} catch (Exception e) {
throw new SemanticException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
}
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class AtlasRequestBuilder method getFileAsList.
public List<String> getFileAsList(Path listOfTablesFile, HiveConf conf) throws SemanticException {
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).build();
try {
return retryable.executeCallable(() -> {
List<String> list = new ArrayList<>();
InputStream is = null;
try {
FileSystem fs = getFileSystem(listOfTablesFile, conf);
FileStatus fileStatus = fs.getFileStatus(listOfTablesFile);
if (fileStatus == null) {
throw new SemanticException("Table list file not found: " + listOfTablesFile);
}
is = fs.open(listOfTablesFile);
list.addAll(IOUtils.readLines(is, Charset.defaultCharset()));
return list;
} finally {
org.apache.hadoop.io.IOUtils.closeStream(is);
}
});
} catch (Exception e) {
throw new SemanticException(e);
}
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class FileList method writeEntry.
private synchronized void writeEntry(String entry) throws IOException {
// retry only during creating the file, no retry during writes
if (backingFileWriter == null) {
try {
Retryable retryable = buildRetryable();
retryable.executeCallable((Callable<Void>) () -> {
if (this.abortOperation) {
LOG.debug("Aborting write operation for entry {} to file {}.", entry, backingFile);
return null;
}
backingFileWriter = getWriterCreateMode();
return null;
});
} catch (Exception e) {
this.abortOperation = true;
throw new IOException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()));
}
}
if (this.abortOperation) {
LOG.debug("Aborting write operation for entry {} to file {}.", entry, backingFile);
return;
}
try {
backingFileWriter.writeBytes(getEntryWithNewline(entry));
LOG.info("Writing entry {} to file list backed by {}", entry, backingFile);
} catch (IOException e) {
this.abortOperation = true;
LOG.error("Writing entry {} to file list {} failed.", entry, backingFile, e);
throw e;
}
}
Aggregations