use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class ReplLoadWork method externalTableCopyTasks.
public List<Task<?>> externalTableCopyTasks(TaskTracker tracker, HiveConf conf) throws IOException {
if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY)) {
return Collections.emptyList();
}
List<Task<?>> tasks = new ArrayList<>();
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(UncheckedIOException.class).build();
try {
retryable.executeCallable((Callable<Void>) () -> {
try {
int numEntriesToSkip = tasks == null ? 0 : tasks.size();
while (externalTableDataCopyItr.hasNext() && tracker.canAddMoreTasks()) {
if (numEntriesToSkip > 0) {
// skip entries added in the previous attempts of this retryable block
externalTableDataCopyItr.next();
numEntriesToSkip--;
continue;
}
DirCopyWork dirCopyWork = new DirCopyWork(metricCollector, (new Path(dumpDirectory).getParent()).toString());
dirCopyWork.loadFromString(externalTableDataCopyItr.next());
Task<DirCopyWork> task = TaskFactory.get(dirCopyWork, conf);
tasks.add(task);
tracker.addTask(task);
LOG.debug("Added task for {}", dirCopyWork);
}
} catch (UncheckedIOException e) {
LOG.error("Reading entry for data copy failed for external tables, attempting retry.", e);
throw e;
}
return null;
});
} catch (Exception e) {
throw new IOException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()));
}
LOG.info("Added total {} tasks for external table locations copy.", tasks.size());
return tasks;
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class SnapshotUtils method isSnapshotAvailable.
/**
* Checks whether a given snapshot exists or not.
* @param dfs DistributedFileSystem.
* @param path path of snapshot.
* @param snapshotPrefix snapshot name prefix.
* @param snapshotName name of snapshot.
* @param conf Hive configuration.
* @return true if the snapshot exists.
* @throws IOException in case of any error.
*/
public static boolean isSnapshotAvailable(DistributedFileSystem dfs, Path path, String snapshotPrefix, String snapshotName, HiveConf conf) throws IOException {
AtomicBoolean isSnapAvlb = new AtomicBoolean(false);
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).withFailOnException(SnapshotException.class).build();
try {
retryable.executeCallable(() -> {
isSnapAvlb.set(dfs.exists(new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR + "/" + snapshotPrefix + snapshotName)));
LOG.debug("Snapshot for path {} is {}", path, isSnapAvlb.get() ? "available" : "unavailable");
return null;
});
} catch (Exception e) {
throw new SnapshotException("Failed to check if snapshot is available on " + path, e);
}
return isSnapAvlb.get();
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class RangerRestClientImpl method exportRangerPolicies.
public RangerExportPolicyList exportRangerPolicies(String sourceRangerEndpoint, String dbName, String rangerHiveServiceName, HiveConf hiveConf) throws Exception {
LOG.info("Ranger endpoint for cluster " + sourceRangerEndpoint);
if (StringUtils.isEmpty(rangerHiveServiceName)) {
throw new SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.format("Ranger Service Name " + "cannot be empty", ReplUtils.REPL_RANGER_SERVICE));
}
String finalUrl = getRangerExportUrl(sourceRangerEndpoint, rangerHiveServiceName, dbName);
LOG.debug("Url to export policies from source Ranger: {}", finalUrl);
Retryable retryable = Retryable.builder().withHiveConf(hiveConf).withFailOnException(RuntimeException.class).withRetryOnException(Exception.class).build();
try {
return retryable.executeCallable(() -> exportRangerPoliciesPlain(finalUrl, hiveConf));
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new SemanticException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
}
}
use of org.apache.hadoop.hive.ql.exec.util.Retryable in project hive by apache.
the class RangerRestClientImpl method importRangerPolicies.
@Override
public RangerExportPolicyList importRangerPolicies(RangerExportPolicyList rangerExportPolicyList, String dbName, String baseUrl, String rangerHiveServiceName, HiveConf hiveConf) throws Exception {
String sourceClusterServiceName = null;
String serviceMapJsonFileName = "hive_servicemap.json";
String rangerPoliciesJsonFileName = "hive_replicationPolicies.json";
if (!rangerExportPolicyList.getPolicies().isEmpty()) {
sourceClusterServiceName = rangerExportPolicyList.getPolicies().get(0).getService();
}
if (StringUtils.isEmpty(sourceClusterServiceName)) {
sourceClusterServiceName = rangerHiveServiceName;
}
Map<String, String> serviceMap = new LinkedHashMap<String, String>();
if (!StringUtils.isEmpty(sourceClusterServiceName) && !StringUtils.isEmpty(rangerHiveServiceName)) {
serviceMap.put(sourceClusterServiceName, rangerHiveServiceName);
}
Gson gson = new GsonBuilder().create();
String jsonServiceMap = gson.toJson(serviceMap);
String jsonRangerExportPolicyList = gson.toJson(rangerExportPolicyList);
String finalUrl = getRangerImportUrl(baseUrl, dbName);
LOG.debug("URL to import policies on target Ranger: {}", finalUrl);
Retryable retryable = Retryable.builder().withHiveConf(hiveConf).withFailOnException(RuntimeException.class).withRetryOnException(Exception.class).build();
try {
return retryable.executeCallable(() -> importRangerPoliciesPlain(jsonRangerExportPolicyList, rangerPoliciesJsonFileName, serviceMapJsonFileName, jsonServiceMap, finalUrl, rangerExportPolicyList, hiveConf));
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new SemanticException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
}
}
Aggregations