use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class PrepRecoveryOp method execute.
@Override
public void execute(CallInfo it) throws Exception {
assert TestInjection.injectPrepRecoveryOpPauseForever();
final SolrParams params = it.req.getParams();
String cname = params.get(CoreAdminParams.CORE);
if (cname == null) {
cname = "";
}
String nodeName = params.get("nodeName");
String coreNodeName = params.get("coreNodeName");
Replica.State waitForState = Replica.State.getState(params.get(ZkStateReader.STATE_PROP));
Boolean checkLive = params.getBool("checkLive");
Boolean onlyIfLeader = params.getBool("onlyIfLeader");
Boolean onlyIfLeaderActive = params.getBool("onlyIfLeaderActive");
CoreContainer coreContainer = it.handler.coreContainer;
// wait long enough for the leader conflict to work itself out plus a little extra
int conflictWaitMs = coreContainer.getZkController().getLeaderConflictResolveWait();
int maxTries = (int) Math.round(conflictWaitMs / 1000) + 3;
log.info("Going to wait for coreNodeName: {}, state: {}, checkLive: {}, onlyIfLeader: {}, onlyIfLeaderActive: {}, maxTime: {} s", coreNodeName, waitForState, checkLive, onlyIfLeader, onlyIfLeaderActive, maxTries);
Replica.State state = null;
boolean live = false;
int retry = 0;
while (true) {
try (SolrCore core = coreContainer.getCore(cname)) {
if (core == null && retry == Math.min(30, maxTries)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
}
if (core != null) {
if (onlyIfLeader != null && onlyIfLeader) {
if (!core.getCoreDescriptor().getCloudDescriptor().isLeader()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "We are not the leader");
}
}
// wait until we are sure the recovering node is ready
// to accept updates
CloudDescriptor cloudDescriptor = core.getCoreDescriptor().getCloudDescriptor();
String collectionName = cloudDescriptor.getCollectionName();
if (retry % 15 == 0) {
if (retry > 0 && log.isInfoEnabled())
log.info("After " + retry + " seconds, core " + cname + " (" + cloudDescriptor.getShardId() + " of " + cloudDescriptor.getCollectionName() + ") still does not have state: " + waitForState + "; forcing ClusterState update from ZooKeeper");
// force a cluster state update
coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName);
}
ClusterState clusterState = coreContainer.getZkController().getClusterState();
DocCollection collection = clusterState.getCollection(collectionName);
Slice slice = collection.getSlice(cloudDescriptor.getShardId());
if (slice != null) {
final Replica replica = slice.getReplicasMap().get(coreNodeName);
if (replica != null) {
state = replica.getState();
live = clusterState.liveNodesContain(nodeName);
final Replica.State localState = cloudDescriptor.getLastPublished();
// TODO: This is funky but I've seen this in testing where the replica asks the
// leader to be in recovery? Need to track down how that happens ... in the meantime,
// this is a safeguard
boolean leaderDoesNotNeedRecovery = (onlyIfLeader != null && onlyIfLeader && core.getName().equals(replica.getStr("core")) && waitForState == Replica.State.RECOVERING && localState == Replica.State.ACTIVE && state == Replica.State.ACTIVE);
if (leaderDoesNotNeedRecovery) {
log.warn("Leader " + core.getName() + " ignoring request to be in the recovering state because it is live and active.");
}
boolean onlyIfActiveCheckResult = onlyIfLeaderActive != null && onlyIfLeaderActive && localState != Replica.State.ACTIVE;
log.info("In WaitForState(" + waitForState + "): collection=" + collectionName + ", shard=" + slice.getName() + ", thisCore=" + core.getName() + ", leaderDoesNotNeedRecovery=" + leaderDoesNotNeedRecovery + ", isLeader? " + core.getCoreDescriptor().getCloudDescriptor().isLeader() + ", live=" + live + ", checkLive=" + checkLive + ", currentState=" + state.toString() + ", localState=" + localState + ", nodeName=" + nodeName + ", coreNodeName=" + coreNodeName + ", onlyIfActiveCheckResult=" + onlyIfActiveCheckResult + ", nodeProps: " + replica);
if (!onlyIfActiveCheckResult && replica != null && (state == waitForState || leaderDoesNotNeedRecovery)) {
if (checkLive == null) {
break;
} else if (checkLive && live) {
break;
} else if (!checkLive && !live) {
break;
}
}
}
}
}
if (retry++ == maxTries) {
String collection = null;
String leaderInfo = null;
String shardId = null;
try {
CloudDescriptor cloudDescriptor = core.getCoreDescriptor().getCloudDescriptor();
collection = cloudDescriptor.getCollectionName();
shardId = cloudDescriptor.getShardId();
leaderInfo = coreContainer.getZkController().getZkStateReader().getLeaderUrl(collection, shardId, 5000);
} catch (Exception exc) {
leaderInfo = "Not available due to: " + exc;
}
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "I was asked to wait on state " + waitForState + " for " + shardId + " in " + collection + " on " + nodeName + " but I still do not see the requested state. I see state: " + Objects.toString(state) + " live:" + live + " leader from ZK: " + leaderInfo);
}
if (coreContainer.isShutDown()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Solr is shutting down");
}
// solrcloud_debug
if (log.isDebugEnabled() && core != null) {
try {
LocalSolrQueryRequest r = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
CommitUpdateCommand commitCmd = new CommitUpdateCommand(r, false);
commitCmd.softCommit = true;
core.getUpdateHandler().commit(commitCmd);
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(core.getCoreContainer().getZkController().getNodeName() + " to replicate " + searcher.search(new MatchAllDocsQuery(), 1).totalHits + " gen:" + core.getDeletionPolicy().getLatestCommit().getGeneration() + " data:" + core.getDataDir());
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
}
Thread.sleep(1000);
}
log.info("Waited coreNodeName: " + coreNodeName + ", state: " + waitForState + ", checkLive: " + checkLive + ", onlyIfLeader: " + onlyIfLeader + " for: " + retry + " seconds.");
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class RequestSyncShardOp method execute.
@Override
public void execute(CallInfo it) throws Exception {
final SolrParams params = it.req.getParams();
log.info("I have been requested to sync up my shard");
ZkController zkController = it.handler.coreContainer.getZkController();
if (zkController == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only valid for SolrCloud");
}
String cname = params.get(CoreAdminParams.CORE);
if (cname == null) {
throw new IllegalArgumentException(CoreAdminParams.CORE + " is required");
}
SyncStrategy syncStrategy = null;
try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
if (core != null) {
syncStrategy = new SyncStrategy(core.getCoreContainer());
Map<String, Object> props = new HashMap<>();
props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cname);
props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName());
boolean success = syncStrategy.sync(zkController, core, new ZkNodeProps(props), true).isSuccess();
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(core.getCoreContainer().getZkController().getNodeName() + " synched " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
if (!success) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Sync Failed");
}
} else {
SolrException.log(log, "Could not find core to call sync:" + cname);
}
} finally {
// no recoveryStrat close for now
if (syncStrategy != null) {
syncStrategy.close();
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class HttpCacheHeaderUtil method calcLastModified.
/**
* Calculate the appropriate last-modified time for Solr relative the current request.
*
* @return the timestamp to use as a last modified time.
*/
public static long calcLastModified(final SolrQueryRequest solrReq) {
final SolrCore core = solrReq.getCore();
final SolrIndexSearcher searcher = solrReq.getSearcher();
final LastModFrom lastModFrom = core.getSolrConfig().getHttpCachingConfig().getLastModFrom();
long lastMod;
try {
// assume default, change if needed (getOpenTime() should be fast)
lastMod = LastModFrom.DIRLASTMOD == lastModFrom ? IndexDeletionPolicyWrapper.getCommitTimestamp(searcher.getIndexReader().getIndexCommit()) : searcher.getOpenTimeStamp().getTime();
} catch (IOException e) {
// we're pretty freaking screwed if this happens
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
// second granularity
return lastMod - (lastMod % 1000L);
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class DocValuesTest method testDocValues.
@Test
public void testDocValues() throws IOException {
assertU(adoc("id", "1"));
assertU(commit());
try (SolrCore core = h.getCoreInc()) {
final RefCounted<SolrIndexSearcher> searcherRef = core.openNewSearcher(true, true);
final SolrIndexSearcher searcher = searcherRef.get();
try {
final LeafReader reader = searcher.getSlowAtomicReader();
assertEquals(1, reader.numDocs());
final FieldInfos infos = reader.getFieldInfos();
assertEquals(DocValuesType.NUMERIC, infos.fieldInfo("floatdv").getDocValuesType());
assertEquals(DocValuesType.NUMERIC, infos.fieldInfo("intdv").getDocValuesType());
assertEquals(DocValuesType.NUMERIC, infos.fieldInfo("doubledv").getDocValuesType());
assertEquals(DocValuesType.NUMERIC, infos.fieldInfo("longdv").getDocValuesType());
assertEquals(DocValuesType.SORTED, infos.fieldInfo("stringdv").getDocValuesType());
assertEquals(DocValuesType.SORTED, infos.fieldInfo("booldv").getDocValuesType());
NumericDocValues dvs = reader.getNumericDocValues("floatdv");
assertEquals(0, dvs.nextDoc());
assertEquals((long) Float.floatToIntBits(1), dvs.longValue());
dvs = reader.getNumericDocValues("intdv");
assertEquals(0, dvs.nextDoc());
assertEquals(2L, dvs.longValue());
dvs = reader.getNumericDocValues("doubledv");
assertEquals(0, dvs.nextDoc());
assertEquals(Double.doubleToLongBits(3), dvs.longValue());
dvs = reader.getNumericDocValues("longdv");
assertEquals(0, dvs.nextDoc());
assertEquals(4L, dvs.longValue());
SortedDocValues sdv = reader.getSortedDocValues("stringdv");
assertEquals(0, sdv.nextDoc());
assertEquals("solr", sdv.binaryValue().utf8ToString());
sdv = reader.getSortedDocValues("booldv");
assertEquals(0, sdv.nextDoc());
assertEquals("T", sdv.binaryValue().utf8ToString());
final IndexSchema schema = core.getLatestSchema();
final SchemaField floatDv = schema.getField("floatdv");
final SchemaField intDv = schema.getField("intdv");
final SchemaField doubleDv = schema.getField("doubledv");
final SchemaField longDv = schema.getField("longdv");
final SchemaField boolDv = schema.getField("booldv");
FunctionValues values = floatDv.getType().getValueSource(floatDv, null).getValues(null, searcher.getSlowAtomicReader().leaves().get(0));
assertEquals(1f, values.floatVal(0), 0f);
assertEquals(1f, values.objectVal(0));
values = intDv.getType().getValueSource(intDv, null).getValues(null, searcher.getSlowAtomicReader().leaves().get(0));
assertEquals(2, values.intVal(0));
assertEquals(2, values.objectVal(0));
values = doubleDv.getType().getValueSource(doubleDv, null).getValues(null, searcher.getSlowAtomicReader().leaves().get(0));
assertEquals(3d, values.doubleVal(0), 0d);
assertEquals(3d, values.objectVal(0));
values = longDv.getType().getValueSource(longDv, null).getValues(null, searcher.getSlowAtomicReader().leaves().get(0));
assertEquals(4L, values.longVal(0));
assertEquals(4L, values.objectVal(0));
values = boolDv.getType().getValueSource(boolDv, null).getValues(null, searcher.getSlowAtomicReader().leaves().get(0));
assertEquals("true", values.strVal(0));
assertEquals(true, values.objectVal(0));
// check reversibility of created fields
tstToObj(schema.getField("floatdv"), -1.5f);
tstToObj(schema.getField("floatdvs"), -1.5f);
tstToObj(schema.getField("doubledv"), -1.5d);
tstToObj(schema.getField("doubledvs"), -1.5d);
tstToObj(schema.getField("intdv"), -7);
tstToObj(schema.getField("intdvs"), -7);
tstToObj(schema.getField("longdv"), -11L);
tstToObj(schema.getField("longdvs"), -11L);
tstToObj(schema.getField("datedv"), new Date(1000));
tstToObj(schema.getField("datedvs"), new Date(1000));
tstToObj(schema.getField("stringdv"), "foo");
tstToObj(schema.getField("stringdvs"), "foo");
tstToObj(schema.getField("booldv"), true);
tstToObj(schema.getField("booldvs"), true);
} finally {
searcherRef.decref();
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class AddBlockUpdateTest method testXML.
//This is the same as testSolrJXML above but uses the XMLLoader
// to illustrate the structure of the XML documents
@Test
public void testXML() throws IOException, XMLStreamException {
UpdateRequest req = new UpdateRequest();
List<SolrInputDocument> docs = new ArrayList<>();
String xml_doc1 = "<doc >" + " <field name=\"id\">1</field>" + " <field name=\"parent_s\">X</field>" + "<doc> " + " <field name=\"id\" >2</field>" + " <field name=\"child_s\">y</field>" + "</doc>" + "<doc> " + " <field name=\"id\" >3</field>" + " <field name=\"child_s\">z</field>" + "</doc>" + "</doc>";
String xml_doc2 = "<doc >" + " <field name=\"id\">4</field>" + " <field name=\"parent_s\">A</field>" + "<doc> " + " <field name=\"id\" >5</field>" + " <field name=\"child_s\">b</field>" + "</doc>" + "<doc> " + " <field name=\"id\" >6</field>" + " <field name=\"child_s\">c</field>" + "</doc>" + "</doc>";
XMLStreamReader parser = inputFactory.createXMLStreamReader(new StringReader(xml_doc1));
// read the START document...
parser.next();
//null for the processor is all right here
XMLLoader loader = new XMLLoader();
SolrInputDocument document1 = loader.readDoc(parser);
XMLStreamReader parser2 = inputFactory.createXMLStreamReader(new StringReader(xml_doc2));
// read the START document...
parser2.next();
//null for the processor is all right here
//XMLLoader loader = new XMLLoader();
SolrInputDocument document2 = loader.readDoc(parser2);
docs.add(document1);
docs.add(document2);
Collections.shuffle(docs, random());
req.add(docs);
RequestWriter requestWriter = new RequestWriter();
OutputStream os = new ByteArrayOutputStream();
requestWriter.write(req, os);
assertBlockU(os.toString());
assertU(commit());
final SolrIndexSearcher searcher = getSearcher();
assertSingleParentOf(searcher, one("yz"), "X");
assertSingleParentOf(searcher, one("bc"), "A");
}
Aggregations