use of org.apache.solr.core.SolrCore in project lucene-solr by apache.
the class SolrIndexSplitter method split.
public void split() throws IOException {
List<LeafReaderContext> leaves = searcher.getRawReader().leaves();
List<FixedBitSet[]> segmentDocSets = new ArrayList<>(leaves.size());
log.info("SolrIndexSplitter: partitions=" + numPieces + " segments=" + leaves.size());
for (LeafReaderContext readerContext : leaves) {
// make sure we're going in order
assert readerContext.ordInParent == segmentDocSets.size();
FixedBitSet[] docSets = split(readerContext);
segmentDocSets.add(docSets);
}
for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : ""));
boolean success = false;
RefCounted<IndexWriter> iwRef = null;
IndexWriter iw = null;
if (cores != null) {
SolrCore subCore = cores.get(partitionNumber);
iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore);
iw = iwRef.get();
} else {
SolrCore core = searcher.getCore();
String path = paths.get(partitionNumber);
iw = SolrIndexWriter.create(core, "SplittingIndexWriter" + partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path, core.getDirectoryFactory(), true, core.getLatestSchema(), core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
}
try {
// This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
for (int segmentNumber = 0; segmentNumber < leaves.size(); segmentNumber++) {
log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "") + " segment #" + segmentNumber + " segmentCount=" + leaves.size());
CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
}
// we commit explicitly instead of sending a CommitUpdateCommand through the processor chain
// because the sub-shard cores will just ignore such a commit because the update log is not
// in active state at this time.
//TODO no commitUpdateCommand
SolrIndexWriter.setCommitData(iw, -1);
iw.commit();
success = true;
} finally {
if (iwRef != null) {
iwRef.decref();
} else {
if (success) {
iw.close();
} else {
IOUtils.closeWhileHandlingException(iw);
}
}
}
}
}
use of org.apache.solr.core.SolrCore in project lucene-solr by apache.
the class BasicZkTest method testBasic.
@Test
public void testBasic() throws Exception {
// test using ZooKeeper
assertTrue("Not using ZooKeeper", h.getCoreContainer().isZooKeeperAware());
// for the really slow/busy computer, we wait to make sure we have a leader before starting
h.getCoreContainer().getZkController().getZkStateReader().getLeaderUrl("collection1", "shard1", 30000);
ZkController zkController = h.getCoreContainer().getZkController();
SolrCore core = h.getCore();
// test that we got the expected config, not just hardcoded defaults
assertNotNull(core.getRequestHandler("mock"));
lrf.args.put(CommonParams.VERSION, "2.2");
assertQ("test query on empty index", request("qlkciyopsbgzyvkylsjhchghjrdf"), "//result[@numFound='0']");
// test escaping of ";"
assertU("deleting 42 for no reason at all", delI("42"));
assertU("adding doc#42", adoc("id", "42", "val_s", "aa;bb"));
assertU("does commit work?", commit());
assertQ("backslash escaping semicolon", request("id:42 AND val_s:aa\\;bb"), "//*[@numFound='1']", "//int[@name='id'][.='42']");
assertQ("quote escaping semicolon", request("id:42 AND val_s:\"aa;bb\""), "//*[@numFound='1']", "//int[@name='id'][.='42']");
assertQ("no escaping semicolon", request("id:42 AND val_s:aa"), "//*[@numFound='0']");
assertU(delI("42"));
assertU(commit());
assertQ(request("id:42"), "//*[@numFound='0']");
// test overwrite default of true
assertU(adoc("id", "42", "val_s", "AAA"));
assertU(adoc("id", "42", "val_s", "BBB"));
assertU(commit());
assertQ(request("id:42"), "//*[@numFound='1']", "//str[.='BBB']");
assertU(adoc("id", "42", "val_s", "CCC"));
assertU(adoc("id", "42", "val_s", "DDD"));
assertU(commit());
assertQ(request("id:42"), "//*[@numFound='1']", "//str[.='DDD']");
// test deletes
String[] adds = new String[] { add(doc("id", "101"), "overwrite", "true"), add(doc("id", "101"), "overwrite", "true"), add(doc("id", "105"), "overwrite", "false"), add(doc("id", "102"), "overwrite", "true"), add(doc("id", "103"), "overwrite", "false"), add(doc("id", "101"), "overwrite", "true") };
for (String a : adds) {
assertU(a, a);
}
assertU(commit());
int zkPort = zkServer.getPort();
zkServer.shutdown();
// document indexing shouldn't stop immediately after a ZK disconnect
assertU(adoc("id", "201"));
Thread.sleep(300);
// try a reconnect from disconnect
zkServer = new ZkTestServer(zkDir, zkPort);
zkServer.run();
Thread.sleep(300);
// ensure zk still thinks node is up
assertTrue(zkController.getClusterState().getLiveNodes().toString(), zkController.getClusterState().liveNodesContain(zkController.getNodeName()));
// test maxint
assertQ(request("q", "id:[100 TO 110]", "rows", "2147483647"), "//*[@numFound='4']");
// test big limit
assertQ(request("q", "id:[100 TO 111]", "rows", "1147483647"), "//*[@numFound='4']");
assertQ(request("id:[100 TO 110]"), "//*[@numFound='4']");
assertU(delI("102"));
assertU(commit());
assertQ(request("id:[100 TO 110]"), "//*[@numFound='3']");
assertU(delI("105"));
assertU(commit());
assertQ(request("id:[100 TO 110]"), "//*[@numFound='2']");
assertU(delQ("id:[100 TO 110]"));
assertU(commit());
assertQ(request("id:[100 TO 110]"), "//*[@numFound='0']");
// SOLR-2651: test that reload still gets config files from zookeeper
zkController.getZkClient().setData("/configs/conf1/solrconfig.xml", new byte[0], true);
// we set the solrconfig to nothing, so this reload should fail
try {
ignoreException("solrconfig.xml");
h.getCoreContainer().reload(h.getCore().getName());
fail("The reloaded SolrCore did not pick up configs from zookeeper");
} catch (SolrException e) {
resetExceptionIgnores();
assertTrue(e.getMessage().contains("Unable to reload core [collection1]"));
assertTrue(e.getCause().getMessage().contains("Error loading solr config from solrconfig.xml"));
}
// test stats call
Map<String, Metric> metrics = h.getCore().getCoreMetricManager().getRegistry().getMetrics();
assertEquals("collection1", ((Gauge) metrics.get("CORE.coreName")).getValue());
assertEquals("collection1", ((Gauge) metrics.get("CORE.collection")).getValue());
assertEquals("shard1", ((Gauge) metrics.get("CORE.shard")).getValue());
assertTrue(metrics.get("CORE.refCount") != null);
//zkController.getZkClient().printLayoutToStdOut();
}
use of org.apache.solr.core.SolrCore in project lucene-solr by apache.
the class EntityProcessorWrapper method checkIfTrusted.
private void checkIfTrusted(String trans) {
if (docBuilder != null) {
SolrCore core = docBuilder.dataImporter.getCore();
boolean trusted = (core != null) ? core.getCoreDescriptor().isConfigSetTrusted() : true;
if (!trusted) {
Exception ex = new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded " + "without any authentication in place," + " and this transformer is not available for collections with untrusted configsets. To use this transformer," + " re-upload the configset after enabling authentication and authorization.");
String msg = "Transformer: " + trans + ". " + ex.getMessage();
log.error(msg);
wrapAndThrow(SEVERE, ex, msg);
}
}
}
use of org.apache.solr.core.SolrCore in project lucene-solr by apache.
the class OverseerElectionContext method cancelElection.
@Override
public void cancelElection() throws InterruptedException, KeeperException {
String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
try (SolrCore core = cc.getCore(coreName)) {
if (core != null) {
core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
}
}
super.cancelElection();
}
use of org.apache.solr.core.SolrCore in project lucene-solr by apache.
the class OverseerElectionContext method startLeaderInitiatedRecoveryOnReplicas.
private void startLeaderInitiatedRecoveryOnReplicas(String coreName) throws Exception {
try (SolrCore core = cc.getCore(coreName)) {
CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
String coll = cloudDesc.getCollectionName();
String shardId = cloudDesc.getShardId();
String coreNodeName = cloudDesc.getCoreNodeName();
if (coll == null || shardId == null) {
log.error("Cannot start leader-initiated recovery on new leader (core=" + coreName + ",coreNodeName=" + coreNodeName + ") because collection and/or shard is null!");
return;
}
String znodePath = zkController.getLeaderInitiatedRecoveryZnodePath(coll, shardId);
List<String> replicas = null;
try {
replicas = zkClient.getChildren(znodePath, null, false);
} catch (NoNodeException nne) {
// this can be ignored
}
if (replicas != null && replicas.size() > 0) {
for (String replicaCoreNodeName : replicas) {
if (coreNodeName.equals(replicaCoreNodeName))
// added safe-guard so we don't mark this core as down
continue;
final Replica.State lirState = zkController.getLeaderInitiatedRecoveryState(coll, shardId, replicaCoreNodeName);
if (lirState == Replica.State.DOWN || lirState == Replica.State.RECOVERY_FAILED) {
log.info("After core={} coreNodeName={} was elected leader, a replica coreNodeName={} was found in state: " + lirState.toString() + " and needing recovery.", coreName, coreNodeName, replicaCoreNodeName);
List<ZkCoreNodeProps> replicaProps = zkController.getZkStateReader().getReplicaProps(collection, shardId, coreNodeName);
if (replicaProps != null && replicaProps.size() > 0) {
ZkCoreNodeProps coreNodeProps = null;
for (ZkCoreNodeProps p : replicaProps) {
if (((Replica) p.getNodeProps()).getName().equals(replicaCoreNodeName)) {
coreNodeProps = p;
break;
}
}
zkController.ensureReplicaInLeaderInitiatedRecovery(cc, collection, shardId, coreNodeProps, core.getCoreDescriptor(), false);
}
}
}
}
}
// core gets closed automagically
}
Aggregations