use of org.apache.solr.util.RTimer in project lucene-solr by apache.
the class IndexFetcher method markReplicationStart.
@SuppressForbidden(reason = "Need currentTimeMillis for debugging/stats")
private void markReplicationStart() {
replicationTimer = new RTimer();
replicationStartTimeStamp = new Date();
}
use of org.apache.solr.util.RTimer in project lucene-solr by apache.
the class SolrConfigHandler method waitForAllReplicasState.
/**
* Block up to a specified maximum time until we see agreement on the schema
* version in ZooKeeper across all replicas for a collection.
*/
private static void waitForAllReplicasState(String collection, ZkController zkController, String prop, int expectedVersion, int maxWaitSecs) {
final RTimer timer = new RTimer();
// get a list of active replica cores to query for the schema zk version (skipping this core of course)
List<PerReplicaCallable> concurrentTasks = new ArrayList<>();
for (String coreUrl : getActiveReplicaCoreUrls(zkController, collection)) {
PerReplicaCallable e = new PerReplicaCallable(coreUrl, prop, expectedVersion, maxWaitSecs);
concurrentTasks.add(e);
}
// nothing to wait for ...
if (concurrentTasks.isEmpty())
return;
log.info(formatString("Waiting up to {0} secs for {1} replicas to set the property {2} to be of version {3} for collection {4}", maxWaitSecs, concurrentTasks.size(), prop, expectedVersion, collection));
// use an executor service to invoke schema zk version requests in parallel with a max wait time
int poolSize = Math.min(concurrentTasks.size(), 10);
ExecutorService parallelExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new DefaultSolrThreadFactory("solrHandlerExecutor"));
try {
List<Future<Boolean>> results = parallelExecutor.invokeAll(concurrentTasks, maxWaitSecs, TimeUnit.SECONDS);
// determine whether all replicas have the update
// lazily init'd
List<String> failedList = null;
for (int f = 0; f < results.size(); f++) {
Boolean success = false;
Future<Boolean> next = results.get(f);
if (next.isDone() && !next.isCancelled()) {
// looks to have finished, but need to check if it succeeded
try {
success = next.get();
} catch (ExecutionException e) {
// shouldn't happen since we checked isCancelled
}
}
if (!success) {
String coreUrl = concurrentTasks.get(f).coreUrl;
log.warn("Core " + coreUrl + "could not get the expected version " + expectedVersion);
if (failedList == null)
failedList = new ArrayList<>();
failedList.add(coreUrl);
}
}
// if any tasks haven't completed within the specified timeout, it's an error
if (failedList != null)
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, formatString("{0} out of {1} the property {2} to be of version {3} within {4} seconds! Failed cores: {5}", failedList.size(), concurrentTasks.size() + 1, prop, expectedVersion, maxWaitSecs, failedList));
} catch (InterruptedException ie) {
log.warn(formatString("Core was interrupted . trying to set the property {1} to version {2} to propagate to {3} replicas for collection {4}", prop, expectedVersion, concurrentTasks.size(), collection));
Thread.currentThread().interrupt();
} finally {
ExecutorUtil.shutdownAndAwaitTermination(parallelExecutor);
}
log.info("Took {}ms to set the property {} to be of version {} for collection {}", timer.getTime(), prop, expectedVersion, collection);
}
use of org.apache.solr.util.RTimer in project lucene-solr by apache.
the class IndexFingerprint method getFingerprint.
/** Opens a new realtime searcher and returns it's (possibly cached) fingerprint */
public static IndexFingerprint getFingerprint(SolrCore core, long maxVersion) throws IOException {
RTimer timer = new RTimer();
core.getUpdateHandler().getUpdateLog().openRealtimeSearcher();
RefCounted<SolrIndexSearcher> newestSearcher = core.getUpdateHandler().getUpdateLog().uhandler.core.getRealtimeSearcher();
try {
IndexFingerprint f = newestSearcher.get().getIndexFingerprint(maxVersion);
final double duration = timer.stop();
log.info("IndexFingerprint millis:{} result:{}", duration, f);
return f;
} finally {
if (newestSearcher != null) {
newestSearcher.decref();
}
}
}
use of org.apache.solr.util.RTimer in project lucene-solr by apache.
the class SimpleFacets method getTermCounts.
/**
* Term counts for use in field faceting that resepcts the specified mincount -
* if mincount is null, the "zeros" param is consulted for the appropriate backcompat
* default
*
* @see FacetParams#FACET_ZEROS
*/
private NamedList<Integer> getTermCounts(String field, Integer mincount, ParsedParams parsed) throws IOException {
final SolrParams params = parsed.params;
final DocSet docs = parsed.docs;
final int threads = parsed.threads;
int offset = params.getFieldInt(field, FacetParams.FACET_OFFSET, 0);
int limit = params.getFieldInt(field, FacetParams.FACET_LIMIT, 100);
if (limit == 0)
return new NamedList<>();
if (mincount == null) {
Boolean zeros = params.getFieldBool(field, FacetParams.FACET_ZEROS);
// mincount = (zeros!=null && zeros) ? 0 : 1;
mincount = (zeros != null && !zeros) ? 1 : 0;
// current default is to include zeros.
}
boolean missing = params.getFieldBool(field, FacetParams.FACET_MISSING, false);
// default to sorting if there is a limit.
String sort = params.getFieldParam(field, FacetParams.FACET_SORT, limit > 0 ? FacetParams.FACET_SORT_COUNT : FacetParams.FACET_SORT_INDEX);
String prefix = params.getFieldParam(field, FacetParams.FACET_PREFIX);
final Predicate<BytesRef> termFilter = newBytesRefFilter(field, params);
boolean exists = params.getFieldBool(field, FacetParams.FACET_EXISTS, false);
NamedList<Integer> counts;
SchemaField sf = searcher.getSchema().getField(field);
if (sf.getType().isPointField() && !sf.hasDocValues()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't facet on a PointField without docValues");
}
FieldType ft = sf.getType();
// determine what type of faceting method to use
final String methodStr = params.getFieldParam(field, FacetParams.FACET_METHOD);
final FacetMethod requestedMethod;
if (FacetParams.FACET_METHOD_enum.equals(methodStr)) {
requestedMethod = FacetMethod.ENUM;
} else if (FacetParams.FACET_METHOD_fcs.equals(methodStr)) {
requestedMethod = FacetMethod.FCS;
} else if (FacetParams.FACET_METHOD_fc.equals(methodStr)) {
requestedMethod = FacetMethod.FC;
} else if (FacetParams.FACET_METHOD_uif.equals(methodStr)) {
requestedMethod = FacetMethod.UIF;
} else {
requestedMethod = null;
}
final boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache();
FacetMethod appliedFacetMethod = selectFacetMethod(field, sf, requestedMethod, mincount, exists);
RTimer timer = null;
if (fdebug != null) {
fdebug.putInfoItem("requestedMethod", requestedMethod == null ? "not specified" : requestedMethod.name());
fdebug.putInfoItem("appliedMethod", appliedFacetMethod.name());
fdebug.putInfoItem("inputDocSetSize", docs.size());
fdebug.putInfoItem("field", field);
timer = new RTimer();
}
if (params.getFieldBool(field, GroupParams.GROUP_FACET, false)) {
counts = getGroupedCounts(searcher, docs, field, multiToken, offset, limit, mincount, missing, sort, prefix, termFilter);
} else {
assert appliedFacetMethod != null;
switch(appliedFacetMethod) {
case ENUM:
assert TrieField.getMainValuePrefix(ft) == null;
counts = getFacetTermEnumCounts(searcher, docs, field, offset, limit, mincount, missing, sort, prefix, termFilter, exists);
break;
case FCS:
assert ft.isPointField() || !multiToken;
if (ft.isPointField() || (ft.getNumberType() != null && !sf.multiValued())) {
if (prefix != null) {
throw new SolrException(ErrorCode.BAD_REQUEST, FacetParams.FACET_PREFIX + " is not supported on numeric types");
}
if (termFilter != null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "BytesRef term filters (" + FacetParams.FACET_CONTAINS + ", " + FacetParams.FACET_EXCLUDETERMS + ") are not supported on numeric types");
}
// We should do this, but mincount=0 is currently the default
// if (ft.isPointField() && mincount <= 0) {
// throw new SolrException(ErrorCode.BAD_REQUEST, FacetParams.FACET_MINCOUNT + " <= 0 is not supported on point types");
// }
counts = NumericFacets.getCounts(searcher, docs, field, offset, limit, mincount, missing, sort);
} else {
PerSegmentSingleValuedFaceting ps = new PerSegmentSingleValuedFaceting(searcher, docs, field, offset, limit, mincount, missing, sort, prefix, termFilter);
Executor executor = threads == 0 ? directExecutor : facetExecutor;
ps.setNumThreads(threads);
counts = ps.getFacetCounts(executor);
}
break;
case UIF:
//Emulate the JSON Faceting structure so we can use the same parsing classes
Map<String, Object> jsonFacet = new HashMap<>(13);
jsonFacet.put("type", "terms");
jsonFacet.put("field", field);
jsonFacet.put("offset", offset);
jsonFacet.put("limit", limit);
jsonFacet.put("mincount", mincount);
jsonFacet.put("missing", missing);
jsonFacet.put("prefix", prefix);
jsonFacet.put("numBuckets", params.getFieldBool(field, "numBuckets", false));
jsonFacet.put("allBuckets", params.getFieldBool(field, "allBuckets", false));
jsonFacet.put("method", "uif");
jsonFacet.put("cacheDf", 0);
jsonFacet.put("perSeg", false);
final String sortVal;
switch(sort) {
case FacetParams.FACET_SORT_COUNT_LEGACY:
sortVal = FacetParams.FACET_SORT_COUNT;
break;
case FacetParams.FACET_SORT_INDEX_LEGACY:
sortVal = FacetParams.FACET_SORT_INDEX;
break;
default:
sortVal = sort;
}
jsonFacet.put(SORT, sortVal);
Map<String, Object> topLevel = new HashMap<>();
topLevel.put(field, jsonFacet);
topLevel.put("processEmpty", true);
FacetProcessor fproc = // rb.getResults().docSet
FacetProcessor.createProcessor(// rb.getResults().docSet
rb.req, // rb.getResults().docSet
topLevel, docs);
//TODO do we handle debug? Should probably already be handled by the legacy code
fproc.process();
//Go through the response to build the expected output for SimpleFacets
Object res = fproc.getResponse();
counts = new NamedList<Integer>();
if (res != null) {
SimpleOrderedMap<Object> som = (SimpleOrderedMap<Object>) res;
SimpleOrderedMap<Object> asdf = (SimpleOrderedMap<Object>) som.get(field);
List<SimpleOrderedMap<Object>> buckets = (List<SimpleOrderedMap<Object>>) asdf.get("buckets");
for (SimpleOrderedMap<Object> b : buckets) {
counts.add(b.get("val").toString(), (Integer) b.get("count"));
}
if (missing) {
SimpleOrderedMap<Object> missingCounts = (SimpleOrderedMap<Object>) asdf.get("missing");
counts.add(null, (Integer) missingCounts.get("count"));
}
}
break;
case FC:
counts = DocValuesFacets.getCounts(searcher, docs, field, offset, limit, mincount, missing, sort, prefix, termFilter, fdebug);
break;
default:
throw new AssertionError();
}
}
if (fdebug != null) {
long timeElapsed = (long) timer.getTime();
fdebug.setElapse(timeElapsed);
}
return counts;
}
use of org.apache.solr.util.RTimer in project lucene-solr by apache.
the class LeaderInitiatedRecoveryOnCommitTest method sendCommitWithRetry.
protected void sendCommitWithRetry(Replica replica) throws Exception {
String replicaCoreUrl = replica.getCoreUrl();
log.info("Sending commit request to: " + replicaCoreUrl);
final RTimer timer = new RTimer();
try (HttpSolrClient client = getHttpSolrClient(replicaCoreUrl)) {
try {
client.commit();
log.info("Sent commit request to {} OK, took {}ms", replicaCoreUrl, timer.getTime());
} catch (Exception exc) {
Throwable rootCause = SolrException.getRootCause(exc);
if (rootCause instanceof NoHttpResponseException) {
log.warn("No HTTP response from sending commit request to " + replicaCoreUrl + "; will re-try after waiting 3 seconds");
Thread.sleep(3000);
client.commit();
log.info("Second attempt at sending commit to " + replicaCoreUrl + " succeeded.");
} else {
throw exc;
}
}
}
}
Aggregations