use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class VerifyNodeRepositoryAction method verify.
public void verify(String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
final DiscoveryNodes discoNodes = clusterService.state().nodes();
final DiscoveryNode localNode = discoNodes.getLocalNode();
final ObjectContainer<DiscoveryNode> masterAndDataNodes = discoNodes.getMasterAndDataNodes().values();
final List<DiscoveryNode> nodes = new ArrayList<>();
for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) {
DiscoveryNode node = cursor.value;
nodes.add(node);
}
final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>();
final AtomicInteger counter = new AtomicInteger(nodes.size());
for (final DiscoveryNode node : nodes) {
if (node.equals(localNode)) {
try {
doVerify(repository, verificationToken, localNode);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e);
errors.add(new VerificationFailure(node.getId(), e));
}
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}
} else {
transportService.sendRequest(node, ACTION_NAME, new VerifyNodeRepositoryRequest(repository, verificationToken), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}
}
@Override
public void handleException(TransportException exp) {
errors.add(new VerificationFailure(node.getId(), exp));
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}
}
});
}
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class FsProbe method ioStats.
final FsInfo.IoStats ioStats(final Set<Tuple<Integer, Integer>> devicesNumbers, final FsInfo previous) {
try {
final Map<Tuple<Integer, Integer>, FsInfo.DeviceStats> deviceMap = new HashMap<>();
if (previous != null && previous.getIoStats() != null && previous.getIoStats().devicesStats != null) {
for (int i = 0; i < previous.getIoStats().devicesStats.length; i++) {
FsInfo.DeviceStats deviceStats = previous.getIoStats().devicesStats[i];
deviceMap.put(Tuple.tuple(deviceStats.majorDeviceNumber, deviceStats.minorDeviceNumber), deviceStats);
}
}
List<FsInfo.DeviceStats> devicesStats = new ArrayList<>();
List<String> lines = readProcDiskStats();
if (!lines.isEmpty()) {
for (String line : lines) {
String[] fields = line.trim().split("\\s+");
final int majorDeviceNumber = Integer.parseInt(fields[0]);
final int minorDeviceNumber = Integer.parseInt(fields[1]);
if (!devicesNumbers.contains(Tuple.tuple(majorDeviceNumber, minorDeviceNumber))) {
continue;
}
final String deviceName = fields[2];
final long readsCompleted = Long.parseLong(fields[3]);
final long sectorsRead = Long.parseLong(fields[5]);
final long writesCompleted = Long.parseLong(fields[7]);
final long sectorsWritten = Long.parseLong(fields[9]);
final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats(majorDeviceNumber, minorDeviceNumber, deviceName, readsCompleted, sectorsRead, writesCompleted, sectorsWritten, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)));
devicesStats.add(deviceStats);
}
}
return new FsInfo.IoStats(devicesStats.toArray(new FsInfo.DeviceStats[devicesStats.size()]));
} catch (Exception e) {
// do not fail Elasticsearch if something unexpected
// happens here
logger.debug((Supplier<?>) () -> new ParameterizedMessage("unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
return null;
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class IndicesClusterStateServiceRandomUpdatesTests method testRandomClusterStateUpdates.
public void testRandomClusterStateUpdates() {
// we have an IndicesClusterStateService per node in the cluster
final Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap = new HashMap<>();
ClusterState state = randomInitialClusterState(clusterStateServiceMap, MockIndicesService::new);
// each of the following iterations represents a new cluster state update processed on all nodes
for (int i = 0; i < 30; i++) {
logger.info("Iteration {}", i);
final ClusterState previousState = state;
// calculate new cluster state
for (int j = 0; j < randomInt(3); j++) {
// multiple iterations to simulate batching of cluster states
try {
state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new);
} catch (AssertionError error) {
ClusterState finalState = state;
logger.error((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error);
throw error;
}
}
// apply cluster state to nodes (incl. master)
for (DiscoveryNode node : state.nodes()) {
IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
ClusterState localState = adaptClusterStateToLocalNode(state, node);
ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node);
final ClusterChangedEvent event = new ClusterChangedEvent("simulated change " + i, localState, previousLocalState);
try {
indicesClusterStateService.applyClusterState(event);
} catch (AssertionError error) {
logger.error((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", node, event.previousState(), event.state()), error);
throw error;
}
// check that cluster state has been properly applied to node
assertClusterStateMatchesNodeState(localState, indicesClusterStateService);
}
}
// TODO: check if we can go to green by starting all shards and finishing all iterations
logger.info("Final cluster state: {}", state);
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class TransportCloseIndexAction method masterOperation.
@Override
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest().ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()).indices(concreteIndices);
indexStateService.closeIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new CloseIndexResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Exception t) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class TransportDeleteIndexAction method masterOperation.
@Override
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
final Set<Index> concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request)));
if (concreteIndices.isEmpty()) {
listener.onResponse(new DeleteIndexResponse(true));
return;
}
DeleteIndexClusterStateUpdateRequest deleteRequest = new DeleteIndexClusterStateUpdateRequest().ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()).indices(concreteIndices.toArray(new Index[concreteIndices.size()]));
deleteIndexService.deleteIndices(deleteRequest, new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new DeleteIndexResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Exception t) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
listener.onFailure(t);
}
});
}
Aggregations