use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project fess-crawler by codelibs.
the class EsUrlQueueService method updateSessionId.
@Override
public void updateSessionId(final String oldSessionId, final String newSessionId) {
SearchResponse response = null;
while (true) {
if (response == null) {
response = getClient().get(c -> c.prepareSearch(index).setTypes(type).setScroll(new TimeValue(scrollTimeout)).setQuery(QueryBuilders.boolQuery().filter(QueryBuilders.termQuery(SESSION_ID, oldSessionId))).setSize(scrollSize).execute());
} else {
final String scrollId = response.getScrollId();
response = getClient().get(c -> c.prepareSearchScroll(scrollId).setScroll(new TimeValue(scrollTimeout)).execute());
}
final SearchHits searchHits = response.getHits();
if (searchHits.getHits().length == 0) {
break;
}
final BulkResponse bulkResponse = getClient().get(c -> {
final BulkRequestBuilder builder = c.prepareBulk();
for (final SearchHit searchHit : searchHits) {
final UpdateRequestBuilder updateRequest = c.prepareUpdate(index, type, searchHit.getId()).setDoc(SESSION_ID, newSessionId);
builder.add(updateRequest);
}
return builder.execute();
});
if (bulkResponse.hasFailures()) {
throw new EsAccessException(bulkResponse.buildFailureMessage());
}
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project flink by apache.
the class Elasticsearch6ApiCallBridge method configureBulkProcessorBackoff.
@Override
public void configureBulkProcessorBackoff(BulkProcessor.Builder builder, @Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy) {
BackoffPolicy backoffPolicy;
if (flushBackoffPolicy != null) {
switch(flushBackoffPolicy.getBackoffType()) {
case CONSTANT:
backoffPolicy = BackoffPolicy.constantBackoff(new TimeValue(flushBackoffPolicy.getDelayMillis()), flushBackoffPolicy.getMaxRetryCount());
break;
case EXPONENTIAL:
default:
backoffPolicy = BackoffPolicy.exponentialBackoff(new TimeValue(flushBackoffPolicy.getDelayMillis()), flushBackoffPolicy.getMaxRetryCount());
}
} else {
backoffPolicy = BackoffPolicy.noBackoff();
}
builder.setBackoffPolicy(backoffPolicy);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch-jdbc by jprante.
the class ClientTests method testClient.
@Test
public void testClient() throws IOException {
// disable DNS caching for failover
Security.setProperty("networkaddress.cache.ttl", "0");
Settings settings = Settings.settingsBuilder().putArray("elasticsearch.host", new String[] { "localhost:9300", "localhost:9301" }).put("transport.type", "org.elasticsearch.transport.netty.FoundNettyTransport").put("transport.found.ssl-ports", 9443).build();
Integer maxbulkactions = settings.getAsInt("max_bulk_actions", 10000);
Integer maxconcurrentbulkrequests = settings.getAsInt("max_concurrent_bulk_requests", Runtime.getRuntime().availableProcessors() * 2);
ByteSizeValue maxvolume = settings.getAsBytesSize("max_bulk_volume", ByteSizeValue.parseBytesSizeValue("10m", ""));
TimeValue flushinterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
Settings.Builder clientSettings = Settings.settingsBuilder().put("cluster.name", settings.get("elasticsearch.cluster", "elasticsearch")).putArray("host", settings.getAsArray("elasticsearch.host")).put("port", settings.getAsInt("elasticsearch.port", 9300)).put("sniff", settings.getAsBoolean("elasticsearch.sniff", false)).put("autodiscover", settings.getAsBoolean("elasticsearch.autodiscover", false)).put("name", // prevents lookup of names.txt, we don't have it, and marks this node as "feeder". See also module load skipping in JDBCRiverPlugin
"feeder").put("client.transport.ignore_cluster_name", // do not ignore cluster name setting
false).put("client.transport.ping_timeout", // ping timeout
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(10))).put("client.transport.nodes_sampler_interval", // for sniff sampling
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("path.plugins", // pointing to a non-exiting folder means, this disables loading site plugins
".dontexist").put("path.home", System.getProperty("path.home"));
if (settings.get("transport.type") != null) {
clientSettings.put("transport.type", settings.get("transport.type"));
}
// copy found.no transport settings
Settings foundTransportSettings = settings.getAsSettings("transport.found");
if (foundTransportSettings != null) {
Map<String, String> foundTransportSettingsMap = foundTransportSettings.getAsMap();
for (Map.Entry<String, String> entry : foundTransportSettingsMap.entrySet()) {
clientSettings.put("transport.found." + entry.getKey(), entry.getValue());
}
}
try {
ClientAPI clientAPI = ClientBuilder.builder().put(settings).put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxbulkactions).put(ClientBuilder.MAX_CONCURRENT_REQUESTS, maxconcurrentbulkrequests).put(ClientBuilder.MAX_VOLUME_PER_REQUEST, maxvolume).put(ClientBuilder.FLUSH_INTERVAL, flushinterval).setMetric(new ElasticsearchIngestMetric()).toBulkTransportClient();
} catch (NoNodeAvailableException e) {
// ok
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch-jdbc by jprante.
the class ColumnContext method prepareContext.
@Override
protected void prepareContext(S source, Sink sink) throws IOException {
super.prepareContext(source, sink);
source.columnCreatedAt(getSettings().get("created_at", "created_at"));
source.columnUpdatedAt(getSettings().get("updated_at", "updated_at"));
source.columnDeletedAt(getSettings().get("deleted_at"));
source.columnEscape(getSettings().getAsBoolean("column_escape", true));
TimeValue lastRunTimeStampOverlap = getSettings().getAsTime("last_run_timestamp_overlap", TimeValue.timeValueSeconds(0));
setLastRunTimeStampOverlap(lastRunTimeStampOverlap);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class ClusterService method submitStateUpdateTasks.
/**
* Submits a batch of cluster state update tasks; submitted updates are guaranteed to be processed together,
* potentially with more tasks of the same executor.
*
* @param source the source of the cluster state update task
* @param tasks a map of update tasks and their corresponding listeners
* @param config the cluster state update task configuration
* @param executor the cluster state update task executor; tasks
* that share the same executor will be executed
* batches on this executor
* @param <T> the type of the cluster state update task state
*
*/
public <T> void submitStateUpdateTasks(final String source, final Map<T, ClusterStateTaskListener> tasks, final ClusterStateTaskConfig config, final ClusterStateTaskExecutor<T> executor) {
if (!lifecycle.started()) {
return;
}
if (tasks.isEmpty()) {
return;
}
try {
@SuppressWarnings("unchecked") ClusterStateTaskExecutor<Object> taskExecutor = (ClusterStateTaskExecutor<Object>) executor;
// convert to an identity map to check for dups based on update tasks semantics of using identity instead of equal
final IdentityHashMap<Object, ClusterStateTaskListener> tasksIdentity = new IdentityHashMap<>(tasks);
final List<UpdateTask> updateTasks = tasksIdentity.entrySet().stream().map(entry -> new UpdateTask(source, entry.getKey(), config.priority(), taskExecutor, safe(entry.getValue(), logger))).collect(Collectors.toList());
synchronized (updateTasksPerExecutor) {
LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor, k -> new LinkedHashSet<>(updateTasks.size()));
for (UpdateTask existing : existingTasks) {
if (tasksIdentity.containsKey(existing.task)) {
throw new IllegalStateException("task [" + taskExecutor.describeTasks(Collections.singletonList(existing.task)) + "] with source [" + source + "] is already queued");
}
}
existingTasks.addAll(updateTasks);
}
final UpdateTask firstTask = updateTasks.get(0);
final TimeValue timeout = config.timeout();
if (timeout != null) {
threadPoolExecutor.execute(firstTask, threadPool.scheduler(), timeout, () -> onTimeout(updateTasks, source, timeout));
} else {
threadPoolExecutor.execute(firstTask);
}
} catch (EsRejectedExecutionException e) {
// to be done here...
if (!lifecycle.stoppedOrClosed()) {
throw e;
}
}
}
Aggregations