use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class ClusterService method submitStateUpdateTasks.
/**
* Submits a batch of cluster state update tasks; submitted updates are guaranteed to be processed together,
* potentially with more tasks of the same executor.
*
* @param source the source of the cluster state update task
* @param tasks a map of update tasks and their corresponding listeners
* @param config the cluster state update task configuration
* @param executor the cluster state update task executor; tasks
* that share the same executor will be executed
* batches on this executor
* @param <T> the type of the cluster state update task state
*
*/
public <T> void submitStateUpdateTasks(final String source, final Map<T, ClusterStateTaskListener> tasks, final ClusterStateTaskConfig config, final ClusterStateTaskExecutor<T> executor) {
if (!lifecycle.started()) {
return;
}
if (tasks.isEmpty()) {
return;
}
try {
@SuppressWarnings("unchecked") ClusterStateTaskExecutor<Object> taskExecutor = (ClusterStateTaskExecutor<Object>) executor;
// convert to an identity map to check for dups based on update tasks semantics of using identity instead of equal
final IdentityHashMap<Object, ClusterStateTaskListener> tasksIdentity = new IdentityHashMap<>(tasks);
final List<UpdateTask> updateTasks = tasksIdentity.entrySet().stream().map(entry -> new UpdateTask(source, entry.getKey(), config.priority(), taskExecutor, safe(entry.getValue(), logger))).collect(Collectors.toList());
synchronized (updateTasksPerExecutor) {
LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor, k -> new LinkedHashSet<>(updateTasks.size()));
for (UpdateTask existing : existingTasks) {
if (tasksIdentity.containsKey(existing.task)) {
throw new IllegalStateException("task [" + taskExecutor.describeTasks(Collections.singletonList(existing.task)) + "] with source [" + source + "] is already queued");
}
}
existingTasks.addAll(updateTasks);
}
final UpdateTask firstTask = updateTasks.get(0);
final TimeValue timeout = config.timeout();
if (timeout != null) {
threadPoolExecutor.execute(firstTask, threadPool.scheduler(), timeout, () -> onTimeout(updateTasks, source, timeout));
} else {
threadPoolExecutor.execute(firstTask);
}
} catch (EsRejectedExecutionException e) {
// to be done here...
if (!lifecycle.stoppedOrClosed()) {
throw e;
}
}
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class ClusterService method executeTasks.
private ClusterTasksResult<Object> executeTasks(TaskInputs taskInputs, long startTimeNS, ClusterState previousClusterState) {
ClusterTasksResult<Object> clusterTasksResult;
try {
List<Object> inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
clusterTasksResult = taskInputs.executor.execute(previousClusterState, inputs);
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), taskInputs.summary, previousClusterState.nodes(), previousClusterState.routingTable(), previousClusterState.getRoutingNodes()), e);
}
warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary);
clusterTasksResult = ClusterTasksResult.builder().failures(taskInputs.updateTasks.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
}
assert clusterTasksResult.executionResults != null;
assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size() : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", taskInputs.updateTasks.size(), taskInputs.updateTasks.size() == 1 ? "" : "s", clusterTasksResult.executionResults.size());
boolean assertsEnabled = false;
assert (assertsEnabled = true);
if (assertsEnabled) {
for (UpdateTask updateTask : taskInputs.updateTasks) {
assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask;
}
}
return clusterTasksResult;
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class DelayedAllocationService method scheduleIfNeeded.
/**
* Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule.
*/
private void scheduleIfNeeded(long currentNanoTime, ClusterState state) {
assertClusterStateThread();
long nextDelayNanos = UnassignedInfo.findNextDelayedAllocation(currentNanoTime, state);
if (nextDelayNanos < 0) {
logger.trace("no need to schedule reroute - no delayed unassigned shards");
removeTaskAndCancel();
} else {
TimeValue nextDelay = TimeValue.timeValueNanos(nextDelayNanos);
final boolean earlierRerouteNeeded;
DelayedRerouteTask existingTask = delayedRerouteTask.get();
DelayedRerouteTask newTask = new DelayedRerouteTask(nextDelay, currentNanoTime);
if (existingTask == null) {
earlierRerouteNeeded = true;
} else if (newTask.scheduledTimeToRunInNanos() < existingTask.scheduledTimeToRunInNanos()) {
// we need an earlier delayed reroute
logger.trace("cancelling existing delayed reroute task as delayed reroute has to happen [{}] earlier", TimeValue.timeValueNanos(existingTask.scheduledTimeToRunInNanos() - newTask.scheduledTimeToRunInNanos()));
existingTask.cancelScheduling();
earlierRerouteNeeded = true;
} else {
earlierRerouteNeeded = false;
}
if (earlierRerouteNeeded) {
logger.info("scheduling reroute for delayed shards in [{}] ({} delayed shards)", nextDelay, UnassignedInfo.getNumberOfDelayedUnassigned(state));
DelayedRerouteTask currentTask = delayedRerouteTask.getAndSet(newTask);
assert existingTask == currentTask || currentTask == null;
newTask.schedule();
} else {
logger.trace("no need to reschedule delayed reroute - currently scheduled delayed reroute in [{}] is enough", nextDelay);
}
}
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch-jdbc by jprante.
the class ClientTests method testFoundSettings.
@Test
public void testFoundSettings() throws IOException {
// disable DNS caching for failover
Security.setProperty("networkaddress.cache.ttl", "0");
Settings settings = Settings.settingsBuilder().putArray("elasticsearch.host", new String[] { "localhost:9300", "localhost:9301" }).put("transport.type", "org.elasticsearch.transport.netty.FoundNettyTransport").put("transport.found.ssl-ports", 9443).build();
Integer maxbulkactions = settings.getAsInt("max_bulk_actions", 10000);
Integer maxconcurrentbulkrequests = settings.getAsInt("max_concurrent_bulk_requests", Runtime.getRuntime().availableProcessors() * 2);
ByteSizeValue maxvolume = settings.getAsBytesSize("max_bulk_volume", ByteSizeValue.parseBytesSizeValue("10m", ""));
TimeValue flushinterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
Settings.Builder clientSettings = Settings.settingsBuilder().put("cluster.name", settings.get("elasticsearch.cluster", "elasticsearch")).putArray("host", settings.getAsArray("elasticsearch.host")).put("port", settings.getAsInt("elasticsearch.port", 9300)).put("sniff", settings.getAsBoolean("elasticsearch.sniff", false)).put("autodiscover", settings.getAsBoolean("elasticsearch.autodiscover", false)).put("name", // prevents lookup of names.txt, we don't have it, and marks this node as "feeder". See also module load skipping in JDBCRiverPlugin
"feeder").put("client.transport.ignore_cluster_name", // do not ignore cluster name setting
false).put("client.transport.ping_timeout", // ping timeout
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("client.transport.nodes_sampler_interval", // for sniff sampling
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("path.plugins", // pointing to a non-exiting folder means, this disables loading site plugins
".dontexist").put("path.home", System.getProperty("path.home"));
try {
ClientAPI clientAPI = ClientBuilder.builder().put(settings).put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxbulkactions).put(ClientBuilder.MAX_CONCURRENT_REQUESTS, maxconcurrentbulkrequests).put(ClientBuilder.MAX_VOLUME_PER_REQUEST, maxvolume).put(ClientBuilder.FLUSH_INTERVAL, flushinterval).setMetric(new ElasticsearchIngestMetric()).toBulkTransportClient();
} catch (NoNodeAvailableException e) {
// ok
}
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch-jdbc by jprante.
the class StandardContext method prepareContext.
@SuppressWarnings("unchecked")
protected void prepareContext(S source, Sink sink) throws IOException {
Map<String, Object> params = settings.getAsStructuredMap();
List<SQLCommand> sql = SQLCommand.parse(params);
String rounding = XContentMapValues.nodeStringValue(params.get("rounding"), null);
int scale = XContentMapValues.nodeIntegerValue(params.get("scale"), 2);
boolean autocommit = XContentMapValues.nodeBooleanValue(params.get("autocommit"), false);
int fetchsize = 10;
String fetchSizeStr = XContentMapValues.nodeStringValue(params.get("fetchsize"), null);
if ("min".equals(fetchSizeStr)) {
// for MySQL streaming mode
fetchsize = Integer.MIN_VALUE;
} else if (fetchSizeStr != null) {
try {
fetchsize = Integer.parseInt(fetchSizeStr);
} catch (Exception e) {
// ignore unparseable
}
} else {
// if MySQL, enable streaming mode hack by default
String url = XContentMapValues.nodeStringValue(params.get("url"), null);
if (url != null && url.startsWith("jdbc:mysql")) {
// for MySQL streaming mode
fetchsize = Integer.MIN_VALUE;
}
}
int maxrows = XContentMapValues.nodeIntegerValue(params.get("max_rows"), 0);
int maxretries = XContentMapValues.nodeIntegerValue(params.get("max_retries"), 3);
TimeValue maxretrywait = XContentMapValues.nodeTimeValue(params.get("max_retries_wait"), TimeValue.timeValueSeconds(30));
String resultSetType = XContentMapValues.nodeStringValue(params.get("resultset_type"), "TYPE_FORWARD_ONLY");
String resultSetConcurrency = XContentMapValues.nodeStringValue(params.get("resultset_concurrency"), "CONCUR_UPDATABLE");
boolean shouldIgnoreNull = XContentMapValues.nodeBooleanValue(params.get("ignore_null_values"), false);
boolean shouldDetectGeo = XContentMapValues.nodeBooleanValue(params.get("detect_geo"), true);
boolean shouldDetectJson = XContentMapValues.nodeBooleanValue(params.get("detect_json"), true);
boolean shouldPrepareDatabaseMetadata = XContentMapValues.nodeBooleanValue(params.get("prepare_database_metadata"), false);
boolean shouldPrepareResultSetMetadata = XContentMapValues.nodeBooleanValue(params.get("prepare_resultset_metadata"), false);
Map<String, Object> columnNameMap = (Map<String, Object>) params.get("column_name_map");
int queryTimeout = XContentMapValues.nodeIntegerValue(params.get("query_timeout"), 1800);
Map<String, Object> connectionProperties = (Map<String, Object>) params.get("connection_properties");
boolean shouldTreatBinaryAsString = XContentMapValues.nodeBooleanValue(params.get("treat_binary_as_string"), false);
source.setRounding(rounding).setScale(scale).setStatements(sql).setAutoCommit(autocommit).setMaxRows(maxrows).setFetchSize(fetchsize).setRetries(maxretries).setMaxRetryWait(maxretrywait).setResultSetType(resultSetType).setResultSetConcurrency(resultSetConcurrency).shouldIgnoreNull(shouldIgnoreNull).shouldDetectGeo(shouldDetectGeo).shouldDetectJson(shouldDetectJson).shouldPrepareDatabaseMetadata(shouldPrepareDatabaseMetadata).shouldPrepareResultSetMetadata(shouldPrepareResultSetMetadata).setColumnNameMap(columnNameMap).setQueryTimeout(queryTimeout).setConnectionProperties(connectionProperties).shouldTreatBinaryAsString(shouldTreatBinaryAsString);
setSource(source);
setSink(sink);
}
Aggregations