use of com.google.common.util.concurrent.ListenableFuture in project Minigames by AddstarMC.
the class BackendCommand method onCommand.
@Override
public boolean onCommand(final CommandSender sender, Minigame minigame, String label, String[] args) {
if (args == null || args.length != 2) {
return false;
}
BackendManager manager = Minigames.plugin.getBackend();
if (args[0].equalsIgnoreCase("export")) {
try {
ListenableFuture<Void> future = manager.exportTo(args[1], Minigames.plugin.getConfig(), new Notifier(sender));
sender.sendMessage(ChatColor.GOLD + "Exporting backend to " + args[1] + "...");
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onFailure(Throwable t) {
sender.sendMessage(ChatColor.RED + "An internal error occured while exporting.");
}
@Override
public void onSuccess(Void result) {
}
});
} catch (IllegalArgumentException e) {
sender.sendMessage(ChatColor.RED + e.getMessage());
}
} else if (args[0].equalsIgnoreCase("switch")) {
try {
ListenableFuture<Void> future = manager.switchBackend(args[1], Minigames.plugin.getConfig());
sender.sendMessage(ChatColor.GOLD + "Switching minigames backend to " + args[1] + "...");
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onFailure(Throwable t) {
sender.sendMessage(ChatColor.RED + "An internal error occured while switching backend.");
}
@Override
public void onSuccess(Void result) {
sender.sendMessage(ChatColor.GOLD + "The backend has been successfully switched");
sender.sendMessage(ChatColor.GOLD + "!!! This change is " + ChatColor.BOLD + "temporary" + ChatColor.GOLD + ". Please update the config !!!");
}
});
} catch (IllegalArgumentException e) {
sender.sendMessage(ChatColor.RED + e.getMessage());
}
} else {
sender.sendMessage(ChatColor.RED + "Unknown option " + args[0]);
}
return true;
}
use of com.google.common.util.concurrent.ListenableFuture in project metacat by Netflix.
the class ElasticSearchMetacatRefresh method _processDatabases.
/**
* Process the list of databases.
*
* @param catalogName catalog name
* @param databaseNames database names
* @return future
*/
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processDatabases(final QualifiedName catalogName, final List<QualifiedName> databaseNames) {
ListenableFuture<Void> resultFuture = null;
log.info("Full refresh of catalog {} for databases({}): {}", catalogName, databaseNames.size(), databaseNames);
final List<ListenableFuture<DatabaseDto>> getDatabaseFutures = databaseNames.stream().map(databaseName -> service.submit(() -> {
DatabaseDto result = null;
try {
result = getDatabase(databaseName);
} catch (Exception e) {
log.error("Failed to retrieve database: {}", databaseName);
elasticSearchUtil.log("ElasticSearchMetacatRefresh.getDatabase", ElasticSearchDoc.Type.database.name(), databaseName.toString(), null, e.getMessage(), e, true);
}
return result;
})).collect(Collectors.toList());
if (getDatabaseFutures != null && !getDatabaseFutures.isEmpty()) {
resultFuture = Futures.transformAsync(Futures.successfulAsList(getDatabaseFutures), input -> {
final ListenableFuture<Void> processDatabaseFuture = indexDatabaseDtos(catalogName, input);
final List<ListenableFuture<Void>> processDatabaseFutures = input.stream().filter(NOT_NULL).map(databaseDto -> {
final List<QualifiedName> tableNames = databaseDto.getTables().stream().map(s -> QualifiedName.ofTable(databaseDto.getName().getCatalogName(), databaseDto.getName().getDatabaseName(), s)).collect(Collectors.toList());
log.info("Full refresh of database {} for tables({}): {}", databaseDto.getName().toString(), databaseDto.getTables().size(), databaseDto.getTables());
return processTables(databaseDto.getName(), tableNames);
}).filter(NOT_NULL).collect(Collectors.toList());
processDatabaseFutures.add(processDatabaseFuture);
return Futures.transform(Futures.successfulAsList(processDatabaseFutures), Functions.constant(null));
});
}
return resultFuture;
}
use of com.google.common.util.concurrent.ListenableFuture in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getpartitions.
private List<PartitionInfo> getpartitions(@Nonnull @NonNull final String databaseName, @Nonnull @NonNull final String tableName, @Nullable final List<String> partitionIds, final String filterExpression, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
final FilterPartition filter = new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetHandler<List<PartitionDetail>> handler = rs -> {
final List<PartitionDetail> result = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionDetail(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
}
return result;
};
final List<PartitionInfo> partitionInfos = new ArrayList<>();
final List<PartitionDetail> partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL_GET_PARTITIONS, handler, sort, pageable);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionDetail partitionDetail : partitions) {
partIds.add(partitionDetail.getId());
sdIds.add(partitionDetail.getSdId());
serdeIds.add(partitionDetail.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL_GET_SD_PARAMS, "sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
}
try {
Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1, TimeUnit.HOURS);
} catch (Exception e) {
Throwables.propagate(e);
}
for (PartitionDetail partitionDetail : partitions) {
partitionDetail.getPartitionInfo().setMetadata(partitionParams.get(partitionDetail.getId()));
partitionDetail.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionDetail.getSdId()));
partitionDetail.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionDetail.getSerdeId()));
}
}
for (PartitionDetail partitionDetail : partitions) {
partitionInfos.add(partitionDetail.getPartitionInfo());
}
return partitionInfos;
}
use of com.google.common.util.concurrent.ListenableFuture in project gerrit by GerritCodeReview.
the class ReviewDbBatchUpdate method executeChangeOps.
private List<ChangeTask> executeChangeOps(boolean parallel, boolean dryrun) throws UpdateException, RestApiException {
List<ChangeTask> tasks;
boolean success = false;
Stopwatch sw = Stopwatch.createStarted();
try {
logDebug("Executing change ops (parallel? {})", parallel);
ListeningExecutorService executor = parallel ? changeUpdateExector : MoreExecutors.newDirectExecutorService();
tasks = new ArrayList<>(ops.keySet().size());
try {
if (notesMigration.commitChangeWrites() && repoView != null) {
// A NoteDb change may have been rebuilt since the repo was originally
// opened, so make sure we see that.
logDebug("Preemptively scanning for repo changes");
repoView.getRepository().scanForRepoChanges();
}
if (!ops.isEmpty() && notesMigration.failChangeWrites()) {
// Fail fast before attempting any writes if changes are read-only, as
// this is a programmer error.
logDebug("Failing early due to read-only Changes table");
throw new OrmException(NoteDbUpdateManager.CHANGES_READ_ONLY);
}
List<ListenableFuture<?>> futures = new ArrayList<>(ops.keySet().size());
for (Map.Entry<Change.Id, Collection<BatchUpdateOp>> e : ops.asMap().entrySet()) {
ChangeTask task = new ChangeTask(e.getKey(), e.getValue(), Thread.currentThread(), dryrun);
tasks.add(task);
if (!parallel) {
logDebug("Direct execution of task for ops: {}", ops);
}
futures.add(executor.submit(task));
}
if (parallel) {
logDebug("Waiting on futures for {} ops spanning {} changes", ops.size(), ops.keySet().size());
}
Futures.allAsList(futures).get();
if (notesMigration.commitChangeWrites()) {
if (!dryrun) {
executeNoteDbUpdates(tasks);
}
}
success = true;
} catch (ExecutionException | InterruptedException e) {
Throwables.throwIfInstanceOf(e.getCause(), UpdateException.class);
Throwables.throwIfInstanceOf(e.getCause(), RestApiException.class);
throw new UpdateException(e);
} catch (OrmException | IOException e) {
throw new UpdateException(e);
}
} finally {
metrics.executeChangeOpsLatency.record(success, sw.elapsed(NANOSECONDS), NANOSECONDS);
}
return tasks;
}
use of com.google.common.util.concurrent.ListenableFuture in project gerrit by GerritCodeReview.
the class RebuildNoteDb method run.
@Override
public int run() throws Exception {
mustHaveValidSite();
dbInjector = createDbInjector(MULTI_USER);
threads = ThreadLimiter.limitThreads(dbInjector, threads);
LifecycleManager dbManager = new LifecycleManager();
dbManager.add(dbInjector);
dbManager.start();
sysInjector = createSysInjector();
sysInjector.injectMembers(this);
if (!notesMigration.enabled()) {
throw die("NoteDb is not enabled.");
}
LifecycleManager sysManager = new LifecycleManager();
sysManager.add(sysInjector);
sysManager.start();
ListeningExecutorService executor = newExecutor();
System.out.println("Rebuilding the NoteDb");
ImmutableListMultimap<Project.NameKey, Change.Id> changesByProject = getChangesByProject();
boolean ok;
Stopwatch sw = Stopwatch.createStarted();
try (Repository allUsersRepo = repoManager.openRepository(allUsersName)) {
deleteRefs(RefNames.REFS_DRAFT_COMMENTS, allUsersRepo);
List<ListenableFuture<Boolean>> futures = new ArrayList<>();
List<Project.NameKey> projectNames = Ordering.usingToString().sortedCopy(changesByProject.keySet());
for (Project.NameKey project : projectNames) {
ListenableFuture<Boolean> future = executor.submit(() -> {
try (ReviewDb db = unwrapDb(schemaFactory.open())) {
return rebuildProject(db, changesByProject, project, allUsersRepo);
} catch (Exception e) {
log.error("Error rebuilding project " + project, e);
return false;
}
});
futures.add(future);
}
try {
ok = Iterables.all(Futures.allAsList(futures).get(), Predicates.equalTo(true));
} catch (InterruptedException | ExecutionException e) {
log.error("Error rebuilding projects", e);
ok = false;
}
}
double t = sw.elapsed(TimeUnit.MILLISECONDS) / 1000d;
System.out.format("Rebuild %d changes in %.01fs (%.01f/s)\n", changesByProject.size(), t, changesByProject.size() / t);
return ok ? 0 : 1;
}
Aggregations