use of java.util.concurrent.TimeUnit.SECONDS in project che by eclipse.
the class JGitConnection method commit.
@Override
public Revision commit(CommitParams params) throws GitException {
try {
// Check repository state
RepositoryState repositoryState = repository.getRepositoryState();
if (!repositoryState.canCommit()) {
throw new GitException(format(MESSAGE_COMMIT_NOT_POSSIBLE, repositoryState.getDescription()));
}
if (params.isAmend() && !repositoryState.canAmend()) {
throw new GitException(format(MESSAGE_COMMIT_AMEND_NOT_POSSIBLE, repositoryState.getDescription()));
}
// Check committer
GitUser committer = getUser();
if (committer == null) {
throw new GitException("Committer can't be null");
}
String committerName = committer.getName();
String committerEmail = committer.getEmail();
if (committerName == null || committerEmail == null) {
throw new GitException("Git user name and (or) email wasn't set", ErrorCodes.NO_COMMITTER_NAME_OR_EMAIL_DEFINED);
}
// Check commit message
String message = params.getMessage();
if (message == null) {
throw new GitException("Message wasn't set");
}
Status status = status(StatusFormat.SHORT);
List<String> specified = params.getFiles();
List<String> staged = new ArrayList<>();
staged.addAll(status.getAdded());
staged.addAll(status.getChanged());
staged.addAll(status.getRemoved());
List<String> changed = new ArrayList<>(staged);
changed.addAll(status.getModified());
changed.addAll(status.getMissing());
List<String> specifiedStaged = specified.stream().filter(path -> staged.stream().anyMatch(s -> s.startsWith(path))).collect(Collectors.toList());
List<String> specifiedChanged = specified.stream().filter(path -> changed.stream().anyMatch(c -> c.startsWith(path))).collect(Collectors.toList());
// Check that there are changes present for commit, if 'isAmend' is disabled
if (!params.isAmend()) {
// Check that there are staged changes present for commit, or any changes if 'isAll' is enabled
if (status.isClean()) {
throw new GitException("Nothing to commit, working directory clean");
} else if (!params.isAll() && (specified.isEmpty() ? staged.isEmpty() : specifiedStaged.isEmpty())) {
throw new GitException("No changes added to commit");
}
} else {
/*
By default Jgit doesn't allow to commit not changed specified paths. According to setAllowEmpty method documentation,
setting this flag to true must allow such commit, but it won't because Jgit has a bug:
https://bugs.eclipse.org/bugs/show_bug.cgi?id=510685. As a workaround, specified paths of the commit command will contain
only changed and specified paths. If other changes are present, but the list of changed and specified paths is empty,
throw exception to prevent committing other paths. TODO Remove this check when the bug will be fixed.
*/
if (!specified.isEmpty() && !(params.isAll() ? changed.isEmpty() : staged.isEmpty()) && specifiedChanged.isEmpty()) {
throw new GitException(format("Changes are present but not changed path%s specified for commit.", specified.size() > 1 ? "s were" : " was"));
}
}
// TODO add 'setAllowEmpty(params.isAmend())' when https://bugs.eclipse.org/bugs/show_bug.cgi?id=510685 will be fixed
CommitCommand commitCommand = getGit().commit().setCommitter(committerName, committerEmail).setAuthor(committerName, committerEmail).setMessage(message).setAll(params.isAll()).setAmend(params.isAmend());
if (!params.isAll()) {
// TODO change to 'specified.forEach(commitCommand::setOnly)' when https://bugs.eclipse.org/bugs/show_bug.cgi?id=510685 will be fixed. See description above.
specifiedChanged.forEach(commitCommand::setOnly);
}
// Check if repository is configured with Gerrit Support
String gerritSupportConfigValue = repository.getConfig().getString(ConfigConstants.CONFIG_GERRIT_SECTION, null, ConfigConstants.CONFIG_KEY_CREATECHANGEID);
boolean isGerritSupportConfigured = gerritSupportConfigValue != null ? Boolean.valueOf(gerritSupportConfigValue) : false;
commitCommand.setInsertChangeId(isGerritSupportConfigured);
RevCommit result = commitCommand.call();
GitUser gitUser = newDto(GitUser.class).withName(committerName).withEmail(committerEmail);
return newDto(Revision.class).withBranch(getCurrentBranch()).withId(result.getId().getName()).withMessage(result.getFullMessage()).withCommitTime(MILLISECONDS.convert(result.getCommitTime(), SECONDS)).withCommitter(gitUser);
} catch (GitAPIException exception) {
throw new GitException(exception.getMessage(), exception);
}
}
use of java.util.concurrent.TimeUnit.SECONDS in project presto by prestodb.
the class ServerMainModule method setup.
@Override
protected void setup(Binder binder) {
ServerConfig serverConfig = buildConfigObject(ServerConfig.class);
if (serverConfig.isCoordinator()) {
install(new CoordinatorModule());
binder.bind(new TypeLiteral<Optional<QueryPerformanceFetcher>>() {
}).toProvider(QueryPerformanceFetcherProvider.class).in(Scopes.SINGLETON);
} else {
binder.bind(new TypeLiteral<Optional<QueryPerformanceFetcher>>() {
}).toInstance(Optional.empty());
// Install no-op resource group manager on workers, since only coordinators manage resource groups.
binder.bind(ResourceGroupManager.class).to(NoOpResourceGroupManager.class).in(Scopes.SINGLETON);
// HACK: this binding is needed by SystemConnectorModule, but will only be used on the coordinator
binder.bind(QueryManager.class).toInstance(newProxy(QueryManager.class, (proxy, method, args) -> {
throw new UnsupportedOperationException();
}));
}
configBinder(binder).bindConfig(FeaturesConfig.class);
binder.bind(SqlParser.class).in(Scopes.SINGLETON);
binder.bind(SqlParserOptions.class).toInstance(sqlParserOptions);
bindFailureDetector(binder, serverConfig.isCoordinator());
jaxrsBinder(binder).bind(ThrowableMapper.class);
configBinder(binder).bindConfig(QueryManagerConfig.class);
jsonCodecBinder(binder).bindJsonCodec(ViewDefinition.class);
// session properties
binder.bind(SessionPropertyManager.class).in(Scopes.SINGLETON);
binder.bind(SystemSessionProperties.class).in(Scopes.SINGLETON);
// schema properties
binder.bind(SchemaPropertyManager.class).in(Scopes.SINGLETON);
// table properties
binder.bind(TablePropertyManager.class).in(Scopes.SINGLETON);
// node manager
discoveryBinder(binder).bindSelector("presto");
binder.bind(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
binder.bind(InternalNodeManager.class).to(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
newExporter(binder).export(DiscoveryNodeManager.class).withGeneratedName();
httpClientBinder(binder).bindHttpClient("node-manager", ForNodeManager.class).withTracing().withConfigDefaults(config -> {
config.setIdleTimeout(new Duration(30, SECONDS));
config.setRequestTimeout(new Duration(10, SECONDS));
});
// node scheduler
// TODO: remove from NodePartitioningManager and move to CoordinatorModule
configBinder(binder).bindConfig(NodeSchedulerConfig.class);
binder.bind(NodeScheduler.class).in(Scopes.SINGLETON);
binder.bind(NodeSchedulerExporter.class).in(Scopes.SINGLETON);
binder.bind(NodeTaskMap.class).in(Scopes.SINGLETON);
newExporter(binder).export(NodeScheduler.class).withGeneratedName();
// network topology
// TODO: move to CoordinatorModule when NodeScheduler is moved
install(installModuleIf(NodeSchedulerConfig.class, config -> LEGACY.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(LegacyNetworkTopology.class).in(Scopes.SINGLETON)));
install(installModuleIf(NodeSchedulerConfig.class, config -> FLAT.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(FlatNetworkTopology.class).in(Scopes.SINGLETON)));
// task execution
jaxrsBinder(binder).bind(TaskResource.class);
newExporter(binder).export(TaskResource.class).withGeneratedName();
binder.bind(TaskManager.class).to(SqlTaskManager.class).in(Scopes.SINGLETON);
// workaround for CodeCache GC issue
if (JavaVersion.current().getMajor() == 8) {
configBinder(binder).bindConfig(CodeCacheGcConfig.class);
binder.bind(CodeCacheGcTrigger.class).in(Scopes.SINGLETON);
}
// Add monitoring for JVM pauses
binder.bind(PauseMeter.class).in(Scopes.SINGLETON);
newExporter(binder).export(PauseMeter.class).withGeneratedName();
configBinder(binder).bindConfig(MemoryManagerConfig.class);
configBinder(binder).bindConfig(NodeMemoryConfig.class);
configBinder(binder).bindConfig(ReservedSystemMemoryConfig.class);
binder.bind(LocalMemoryManager.class).in(Scopes.SINGLETON);
binder.bind(LocalMemoryManagerExporter.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskManager.class).withGeneratedName();
binder.bind(TaskExecutor.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskExecutor.class).withGeneratedName();
binder.bind(LocalExecutionPlanner.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(CompilerConfig.class);
binder.bind(ExpressionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExpressionCompiler.class).withGeneratedName();
configBinder(binder).bindConfig(TaskManagerConfig.class);
binder.bind(IndexJoinLookupStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(IndexJoinLookupStats.class).withGeneratedName();
binder.bind(AsyncHttpExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(AsyncHttpExecutionMBean.class).withGeneratedName();
binder.bind(JoinFilterFunctionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinFilterFunctionCompiler.class).withGeneratedName();
binder.bind(JoinCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinCompiler.class).withGeneratedName();
binder.bind(OrderingCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(OrderingCompiler.class).withGeneratedName();
binder.bind(PagesIndex.Factory.class).to(PagesIndex.DefaultFactory.class);
binder.bind(JoinProbeCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinProbeCompiler.class).withGeneratedName();
binder.bind(LookupJoinOperators.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(TaskStatus.class);
jsonCodecBinder(binder).bindJsonCodec(StageInfo.class);
jsonCodecBinder(binder).bindJsonCodec(TaskInfo.class);
jaxrsBinder(binder).bind(PagesResponseWriter.class);
// exchange client
binder.bind(new TypeLiteral<ExchangeClientSupplier>() {
}).to(ExchangeClientFactory.class).in(Scopes.SINGLETON);
httpClientBinder(binder).bindHttpClient("exchange", ForExchange.class).withTracing().withConfigDefaults(config -> {
config.setIdleTimeout(new Duration(30, SECONDS));
config.setRequestTimeout(new Duration(10, SECONDS));
config.setMaxConnectionsPerServer(250);
config.setMaxContentLength(new DataSize(32, MEGABYTE));
});
configBinder(binder).bindConfig(ExchangeClientConfig.class);
binder.bind(ExchangeExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExchangeExecutionMBean.class).withGeneratedName();
// execution
binder.bind(LocationFactory.class).to(HttpLocationFactory.class).in(Scopes.SINGLETON);
// memory manager
jaxrsBinder(binder).bind(MemoryResource.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryInfo.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryPoolAssignmentsRequest.class);
// transaction manager
configBinder(binder).bindConfig(TransactionManagerConfig.class);
// data stream provider
binder.bind(PageSourceManager.class).in(Scopes.SINGLETON);
binder.bind(PageSourceProvider.class).to(PageSourceManager.class).in(Scopes.SINGLETON);
// page sink provider
binder.bind(PageSinkManager.class).in(Scopes.SINGLETON);
binder.bind(PageSinkProvider.class).to(PageSinkManager.class).in(Scopes.SINGLETON);
// metadata
binder.bind(StaticCatalogStore.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticCatalogStoreConfig.class);
binder.bind(MetadataManager.class).in(Scopes.SINGLETON);
binder.bind(Metadata.class).to(MetadataManager.class).in(Scopes.SINGLETON);
// type
binder.bind(TypeRegistry.class).in(Scopes.SINGLETON);
binder.bind(TypeManager.class).to(TypeRegistry.class).in(Scopes.SINGLETON);
jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
newSetBinder(binder, Type.class);
// split manager
binder.bind(SplitManager.class).in(Scopes.SINGLETON);
// node partitioning manager
binder.bind(NodePartitioningManager.class).in(Scopes.SINGLETON);
// index manager
binder.bind(IndexManager.class).in(Scopes.SINGLETON);
// handle resolver
binder.install(new HandleJsonModule());
// connector
binder.bind(ConnectorManager.class).in(Scopes.SINGLETON);
// system connector
binder.install(new SystemConnectorModule());
// splits
jsonCodecBinder(binder).bindJsonCodec(TaskUpdateRequest.class);
jsonCodecBinder(binder).bindJsonCodec(ConnectorSplit.class);
jsonBinder(binder).addSerializerBinding(Slice.class).to(SliceSerializer.class);
jsonBinder(binder).addDeserializerBinding(Slice.class).to(SliceDeserializer.class);
jsonBinder(binder).addSerializerBinding(Expression.class).to(ExpressionSerializer.class);
jsonBinder(binder).addDeserializerBinding(Expression.class).to(ExpressionDeserializer.class);
jsonBinder(binder).addDeserializerBinding(FunctionCall.class).to(FunctionCallDeserializer.class);
// query monitor
configBinder(binder).bindConfig(QueryMonitorConfig.class);
binder.bind(QueryMonitor.class).in(Scopes.SINGLETON);
// Determine the NodeVersion
String prestoVersion = serverConfig.getPrestoVersion();
if (prestoVersion == null) {
prestoVersion = getClass().getPackage().getImplementationVersion();
}
checkState(prestoVersion != null, "presto.version must be provided when it cannot be automatically determined");
NodeVersion nodeVersion = new NodeVersion(prestoVersion);
binder.bind(NodeVersion.class).toInstance(nodeVersion);
// presto announcement
discoveryBinder(binder).bindHttpAnnouncement("presto").addProperty("node_version", nodeVersion.toString()).addProperty("coordinator", String.valueOf(serverConfig.isCoordinator())).addProperty("connectorIds", nullToEmpty(serverConfig.getDataSources()));
// server info resource
jaxrsBinder(binder).bind(ServerInfoResource.class);
// plugin manager
binder.bind(PluginManager.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(PluginManagerConfig.class);
binder.bind(CatalogManager.class).in(Scopes.SINGLETON);
// optimizers
binder.bind(PlanOptimizers.class).in(Scopes.SINGLETON);
// block encodings
binder.bind(BlockEncodingManager.class).in(Scopes.SINGLETON);
binder.bind(BlockEncodingSerde.class).to(BlockEncodingManager.class).in(Scopes.SINGLETON);
newSetBinder(binder, new TypeLiteral<BlockEncodingFactory<?>>() {
});
jsonBinder(binder).addSerializerBinding(Block.class).to(BlockJsonSerde.Serializer.class);
jsonBinder(binder).addDeserializerBinding(Block.class).to(BlockJsonSerde.Deserializer.class);
// thread visualizer
jaxrsBinder(binder).bind(ThreadResource.class);
// PageSorter
binder.bind(PageSorter.class).to(PagesIndexPageSorter.class).in(Scopes.SINGLETON);
// PageIndexer
binder.bind(PageIndexerFactory.class).to(GroupByHashPageIndexerFactory.class).in(Scopes.SINGLETON);
// Finalizer
binder.bind(FinalizerService.class).in(Scopes.SINGLETON);
// Spiller
binder.bind(SpillerFactory.class).to(BinarySpillerFactory.class).in(Scopes.SINGLETON);
newExporter(binder).export(SpillerFactory.class).withGeneratedName();
}
use of java.util.concurrent.TimeUnit.SECONDS in project presto by prestodb.
the class TaskResource method getResults.
@GET
@Path("{taskId}/results/{bufferId}/{token}")
@Produces(PRESTO_PAGES)
public void getResults(@PathParam("taskId") TaskId taskId, @PathParam("bufferId") OutputBufferId bufferId, @PathParam("token") final long token, @HeaderParam(PRESTO_MAX_SIZE) DataSize maxSize, @Suspended AsyncResponse asyncResponse) throws InterruptedException {
requireNonNull(taskId, "taskId is null");
requireNonNull(bufferId, "bufferId is null");
long start = System.nanoTime();
ListenableFuture<BufferResult> bufferResultFuture = taskManager.getTaskResults(taskId, bufferId, token, maxSize);
Duration waitTime = randomizeWaitTime(DEFAULT_MAX_WAIT_TIME);
bufferResultFuture = addTimeout(bufferResultFuture, () -> BufferResult.emptyResults(taskManager.getTaskInstanceId(taskId), token, false), waitTime, timeoutExecutor);
ListenableFuture<Response> responseFuture = Futures.transform(bufferResultFuture, result -> {
List<SerializedPage> serializedPages = result.getSerializedPages();
GenericEntity<?> entity = null;
Status status;
if (serializedPages.isEmpty()) {
status = Status.NO_CONTENT;
} else {
entity = new GenericEntity<>(serializedPages, new TypeToken<List<Page>>() {
}.getType());
status = Status.OK;
}
return Response.status(status).entity(entity).header(PRESTO_TASK_INSTANCE_ID, result.getTaskInstanceId()).header(PRESTO_PAGE_TOKEN, result.getToken()).header(PRESTO_PAGE_NEXT_TOKEN, result.getNextToken()).header(PRESTO_BUFFER_COMPLETE, result.isBufferComplete()).build();
});
// For hard timeout, add an additional 5 seconds to max wait for thread scheduling contention and GC
Duration timeout = new Duration(waitTime.toMillis() + 5000, MILLISECONDS);
bindAsyncResponse(asyncResponse, responseFuture, responseExecutor).withTimeout(timeout, Response.status(Status.NO_CONTENT).header(PRESTO_TASK_INSTANCE_ID, taskManager.getTaskInstanceId(taskId)).header(PRESTO_PAGE_TOKEN, token).header(PRESTO_PAGE_NEXT_TOKEN, token).header(PRESTO_BUFFER_COMPLETE, false).build());
responseFuture.addListener(() -> readFromOutputBufferTime.add(Duration.nanosSince(start)), directExecutor());
asyncResponse.register((CompletionCallback) throwable -> resultsRequestTime.add(Duration.nanosSince(start)));
}
use of java.util.concurrent.TimeUnit.SECONDS in project neo4j by neo4j.
the class ReadReplicaReplicationIT method shouldBeAbleToCopyStoresFromCoreToReadReplica.
@Test
public void shouldBeAbleToCopyStoresFromCoreToReadReplica() throws Exception {
// given
Map<String, String> params = stringMap(CausalClusteringSettings.raft_log_rotation_size.name(), "1k", CausalClusteringSettings.raft_log_pruning_frequency.name(), "500ms", CausalClusteringSettings.state_machine_flush_window_size.name(), "1", CausalClusteringSettings.raft_log_pruning_strategy.name(), "1 entries");
Cluster cluster = clusterRule.withNumberOfReadReplicas(0).withSharedCoreParams(params).withRecordFormat(HighLimit.NAME).startCluster();
cluster.coreTx((db, tx) -> {
Node node = db.createNode(Label.label("L"));
for (int i = 0; i < 10; i++) {
node.setProperty("prop-" + i, "this is a quite long string to get to the log limit soonish");
}
tx.success();
});
long baseVersion = versionBy(cluster.awaitLeader().raftLogDirectory(), Math::max);
CoreClusterMember coreGraphDatabase = null;
for (int j = 0; j < 2; j++) {
coreGraphDatabase = cluster.coreTx((db, tx) -> {
Node node = db.createNode(Label.label("L"));
for (int i = 0; i < 10; i++) {
node.setProperty("prop-" + i, "this is a quite long string to get to the log limit soonish");
}
tx.success();
});
}
File raftLogDir = coreGraphDatabase.raftLogDirectory();
assertEventually("pruning happened", () -> versionBy(raftLogDir, Math::min), greaterThan(baseVersion), 5, SECONDS);
// when
cluster.addReadReplicaWithIdAndRecordFormat(4, HighLimit.NAME).start();
// then
for (final ReadReplica readReplica : cluster.readReplicas()) {
assertEventually("read replica available", () -> readReplica.database().isAvailable(0), is(true), 10, SECONDS);
}
}
use of java.util.concurrent.TimeUnit.SECONDS in project presto by prestodb.
the class TestHttpRemoteTask method testRemoteTaskMismatch.
// This timeout should never be reached because a daemon thread in test should fail the test and do proper cleanup.
@Test(timeOut = 30000)
public void testRemoteTaskMismatch() throws InterruptedException, ExecutionException {
Duration idleTimeout = new Duration(3, SECONDS);
Duration failTimeout = new Duration(20, SECONDS);
JsonCodec<TaskStatus> taskStatusCodec = JsonCodec.jsonCodec(TaskStatus.class);
JsonCodec<TaskInfo> taskInfoCodec = JsonCodec.jsonCodec(TaskInfo.class);
TaskManagerConfig taskManagerConfig = new TaskManagerConfig();
// Shorten status refresh wait and info update interval so that we can have a shorter test timeout
taskManagerConfig.setStatusRefreshMaxWait(new Duration(idleTimeout.roundTo(MILLISECONDS) / 100, MILLISECONDS));
taskManagerConfig.setInfoUpdateInterval(new Duration(idleTimeout.roundTo(MILLISECONDS) / 10, MILLISECONDS));
AtomicLong lastActivityNanos = new AtomicLong(System.nanoTime());
HttpProcessor httpProcessor = new HttpProcessor(taskStatusCodec, taskInfoCodec, lastActivityNanos);
TestingHttpClient testingHttpClient = new TestingHttpClient(httpProcessor);
HttpRemoteTaskFactory httpRemoteTaskFactory = new HttpRemoteTaskFactory(new QueryManagerConfig(), taskManagerConfig, testingHttpClient, new TestSqlTaskManager.MockLocationFactory(), taskStatusCodec, taskInfoCodec, JsonCodec.jsonCodec(TaskUpdateRequest.class), new RemoteTaskStats());
RemoteTask remoteTask = httpRemoteTaskFactory.createRemoteTask(TEST_SESSION, new TaskId("test", 1, 2), new PrestoNode("node-id", URI.create("http://192.0.1.2"), new NodeVersion("version"), false), TaskTestUtils.PLAN_FRAGMENT, ImmutableMultimap.of(), createInitialEmptyOutputBuffers(OutputBuffers.BufferType.BROADCAST), new NodeTaskMap.PartitionedSplitCountTracker(i -> {
}), true);
httpProcessor.setInitialTaskInfo(remoteTask.getTaskInfo());
remoteTask.start();
CompletableFuture<Void> testComplete = new CompletableFuture<>();
asyncRun(idleTimeout.roundTo(MILLISECONDS), failTimeout.roundTo(MILLISECONDS), lastActivityNanos, () -> testComplete.complete(null), (message, cause) -> testComplete.completeExceptionally(new AssertionError(message, cause)));
testComplete.get();
httpRemoteTaskFactory.stop();
assertTrue(remoteTask.getTaskStatus().getState().isDone(), format("TaskStatus is not in a done state: %s", remoteTask.getTaskStatus()));
assertEquals(getOnlyElement(remoteTask.getTaskStatus().getFailures()).getErrorCode(), REMOTE_TASK_MISMATCH.toErrorCode());
assertTrue(remoteTask.getTaskInfo().getTaskStatus().getState().isDone(), format("TaskInfo is not in a done state: %s", remoteTask.getTaskInfo()));
}
Aggregations