use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class Engine method getSegmentInfo.
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
ensureOpen();
Map<String, Segment> segments = new HashMap<>();
// first, go over and compute the search ones...
Searcher searcher = acquireSearcher("segments");
try {
for (LeafReaderContext reader : searcher.reader().leaves()) {
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
assert !segments.containsKey(info.info.name);
Segment segment = new Segment(info.info.name);
segment.search = true;
segment.docCount = reader.reader().numDocs();
segment.delDocCount = reader.reader().numDeletedDocs();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
final SegmentReader segmentReader = segmentReader(reader.reader());
segment.memoryInBytes = segmentReader.ramBytesUsed();
if (verbose) {
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
}
// TODO: add more fine grained mem stats values to per segment info here
segments.put(info.info.name, segment);
}
} finally {
searcher.close();
}
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
SegmentInfos infos = lastCommittedSegmentInfos;
for (SegmentCommitInfo info : infos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
segment.search = false;
segment.committed = true;
segment.docCount = info.info.maxDoc();
segment.delDocCount = info.getDelCount();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
segments.put(info.info.name, segment);
} else {
segment.committed = true;
}
}
}
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
Arrays.sort(segmentsArr, new Comparator<Segment>() {
@Override
public int compare(Segment o1, Segment o2) {
return (int) (o1.getGeneration() - o2.getGeneration());
}
});
return segmentsArr;
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class PeerRecoveryTargetService method doRecovery.
private void doRecovery(final long recoveryId) {
final StartRecoveryRequest request;
final CancellableThreads cancellableThreads;
final RecoveryState.Timer timer;
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
if (recoveryRef == null) {
logger.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId);
return;
}
final RecoveryTarget recoveryTarget = recoveryRef.target();
cancellableThreads = recoveryTarget.cancellableThreads();
timer = recoveryTarget.state().getTimer();
try {
assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
request = getStartRecoveryRequest(recoveryTarget);
logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
recoveryTarget.indexShard().prepareForIndexRecovery();
} catch (final Exception e) {
// this will be logged as warning later on...
logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e);
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true);
return;
}
}
try {
logger.trace("{} starting recovery from {}", request.shardId(), request.sourceNode());
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
cancellableThreads.execute(() -> responseHolder.set(transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
@Override
public RecoveryResponse newInstance() {
return new RecoveryResponse();
}
}).txGet()));
final RecoveryResponse recoveryResponse = responseHolder.get();
final TimeValue recoveryTime = new TimeValue(timer.time());
// do this through ongoing recoveries to remove it from the collection
onGoingRecoveries.markRecoveryAsDone(recoveryId);
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with " + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]").append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']').append("\n");
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with " + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log " + "operations").append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]").append("\n");
logger.trace("{}", sb);
} else {
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime);
}
} catch (CancellableThreads.ExecutionCancelledException e) {
logger.trace("recovery cancelled", e);
} catch (Exception e) {
if (logger.isTraceEnabled()) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
}
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
// this can also come from the source wrapped in a RemoteTransportException
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
return;
}
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// do it twice, in case we have double transport exception
cause = ExceptionsHelper.unwrapCause(cause);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
// if the target is not ready yet, retry
retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof DelayRecoveryException) {
retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof ConnectTransportException) {
logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof AlreadyClosedException) {
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false);
return;
}
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true);
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class RepositoriesService method createRepository.
/**
* Creates repository holder
*/
private Repository createRepository(RepositoryMetaData repositoryMetaData) {
logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name());
Repository.Factory factory = typesRegistry.get(repositoryMetaData.type());
if (factory == null) {
throw new RepositoryException(repositoryMetaData.name(), "repository type [" + repositoryMetaData.type() + "] does not exist");
}
try {
Repository repository = factory.create(repositoryMetaData);
repository.start();
return repository;
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class RepositoriesService method registerRepository.
/**
* Registers new repository in the cluster
* <p>
* This method can be only called on the master node. It tries to create a new repository on the master
* and if it was successful it adds new repository to cluster metadata.
*
* @param request register repository request
* @param listener register repository listener
*/
public void registerRepository(final RegisterRepositoryRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
final RepositoryMetaData newRepositoryMetaData = new RepositoryMetaData(request.name, request.type, request.settings);
final ActionListener<ClusterStateUpdateResponse> registrationListener;
if (request.verify) {
registrationListener = new VerifyingRegisterRepositoryListener(request.name, listener);
} else {
registrationListener = listener;
}
clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, registrationListener) {
@Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged);
}
@Override
public ClusterState execute(ClusterState currentState) throws IOException {
ensureRepositoryNotInUse(currentState, request.name);
// Trying to create the new repository on master to make sure it works
if (!registerRepository(newRepositoryMetaData)) {
// The new repository has the same settings as the old one - ignore
return currentState;
}
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
if (repositories == null) {
logger.info("put repository [{}]", request.name);
repositories = new RepositoriesMetaData(new RepositoryMetaData(request.name, request.type, request.settings));
} else {
boolean found = false;
List<RepositoryMetaData> repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1);
for (RepositoryMetaData repositoryMetaData : repositories.repositories()) {
if (repositoryMetaData.name().equals(newRepositoryMetaData.name())) {
found = true;
repositoriesMetaData.add(newRepositoryMetaData);
} else {
repositoriesMetaData.add(repositoryMetaData);
}
}
if (!found) {
logger.info("put repository [{}]", request.name);
repositoriesMetaData.add(new RepositoryMetaData(request.name, request.type, request.settings));
} else {
logger.info("update repository [{}]", request.name);
}
repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()]));
}
mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories);
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
@Override
public void onFailure(String source, Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e);
super.onFailure(source, e);
}
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return discoveryNode.isMasterNode();
}
});
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class VerifyNodeRepositoryAction method verify.
public void verify(String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
final DiscoveryNodes discoNodes = clusterService.state().nodes();
final DiscoveryNode localNode = discoNodes.getLocalNode();
final ObjectContainer<DiscoveryNode> masterAndDataNodes = discoNodes.getMasterAndDataNodes().values();
final List<DiscoveryNode> nodes = new ArrayList<>();
for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) {
DiscoveryNode node = cursor.value;
nodes.add(node);
}
final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>();
final AtomicInteger counter = new AtomicInteger(nodes.size());
for (final DiscoveryNode node : nodes) {
if (node.equals(localNode)) {
try {
doVerify(repository, verificationToken, localNode);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e);
errors.add(new VerificationFailure(node.getId(), e));
}
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}
} else {
transportService.sendRequest(node, ACTION_NAME, new VerifyNodeRepositoryRequest(repository, verificationToken), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}
}
@Override
public void handleException(TransportException exp) {
errors.add(new VerificationFailure(node.getId(), exp));
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}
}
});
}
}
}
Aggregations