use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class TestInPlaceUpdatesStandalone method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
System.setProperty("solr.tests.intClassName", random().nextBoolean() ? "TrieIntField" : "IntPointField");
System.setProperty("solr.tests.longClassName", random().nextBoolean() ? "TrieLongField" : "LongPointField");
System.setProperty("solr.tests.floatClassName", random().nextBoolean() ? "TrieFloatField" : "FloatPointField");
System.setProperty("solr.tests.doubleClassName", random().nextBoolean() ? "TrieDoubleField" : "DoublePointField");
// we need consistent segments that aren't re-ordered on merge because we're
// asserting inplace updates happen by checking the internal [docid]
systemSetPropertySolrTestsMergePolicyFactory(NoMergePolicyFactory.class.getName());
initCore("solrconfig-tlog.xml", "schema-inplace-updates.xml");
// sanity check that autocommits are disabled
assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxTime);
assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxTime);
assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxDocs);
assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxDocs);
// assert that NoMergePolicy was chosen
RefCounted<IndexWriter> iw = h.getCore().getSolrCoreState().getIndexWriter(h.getCore());
try {
IndexWriter writer = iw.get();
assertTrue("Actual merge policy is: " + writer.getConfig().getMergePolicy(), writer.getConfig().getMergePolicy() instanceof NoMergePolicy);
} finally {
iw.decref();
}
// validate that the schema was not changed to an unexpected state
IndexSchema schema = h.getCore().getLatestSchema();
for (String fieldName : Arrays.asList("_version_", "inplace_l_dvo", "inplace_updatable_float", "inplace_updatable_int", "inplace_updatable_float_with_default", "inplace_updatable_int_with_default")) {
// these fields must only be using docValues to support inplace updates
SchemaField field = schema.getField(fieldName);
assertTrue(field.toString(), field.hasDocValues() && !field.indexed() && !field.stored());
}
for (String fieldName : Arrays.asList("title_s", "regular_l", "stored_i")) {
// these fields must support atomic updates, but not inplace updates (ie: stored)
SchemaField field = schema.getField(fieldName);
assertTrue(field.toString(), field.stored());
}
// Don't close this client, it would shutdown the CoreContainer
client = new EmbeddedSolrServer(h.getCoreContainer(), h.coreName);
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class SolrIndexConfigTest method testFailingSolrIndexConfigCreation.
@Test
public void testFailingSolrIndexConfigCreation() {
try {
SolrConfig solrConfig = new SolrConfig("bad-mpf-solrconfig.xml");
SolrIndexConfig solrIndexConfig = new SolrIndexConfig(solrConfig, null, null);
IndexSchema indexSchema = IndexSchemaFactory.buildIndexSchema(schemaFileName, solrConfig);
h.getCore().setLatestSchema(indexSchema);
solrIndexConfig.toIndexWriterConfig(h.getCore());
fail("a mergePolicy should have an empty constructor in order to be instantiated in Solr thus this should fail ");
} catch (Exception e) {
// it failed as expected
}
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class TestXIncludeConfig method testXInclude.
public void testXInclude() throws Exception {
SolrCore core = h.getCore();
assertNotNull("includedHandler is null", core.getRequestHandler("includedHandler"));
UpdateRequestProcessorChain chain = core.getUpdateProcessingChain("special-include");
assertNotNull("chain is missing included processor", chain);
assertEquals("chain with inclued processor is wrong size", 1, chain.getProcessors().size());
assertEquals("chain has wrong included processor", RegexReplaceProcessorFactory.class, chain.getProcessors().get(0).getClass());
IndexSchema schema = core.getLatestSchema();
// xinclude
assertNotNull("ft-included is null", schema.getFieldTypeByName("ft-included"));
assertNotNull("field-included is null", schema.getFieldOrNull("field-included"));
// entity include
assertNotNull("ft-entity-include1 is null", schema.getFieldTypeByName("ft-entity-include1"));
assertNotNull("ft-entity-include2 is null", schema.getFieldTypeByName("ft-entity-include2"));
// sanity check
assertNull(// Does Not Exist Anywhere
"ft-entity-include3 is not null", schema.getFieldTypeByName("ft-entity-include3"));
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class ClusteringComponent method docListToSolrDocumentList.
/**
* Convert a DocList to a SolrDocumentList
*
* The optional param "ids" is populated with the lucene document id
* for each SolrDocument.
*
* @param docs The {@link org.apache.solr.search.DocList} to convert
* @param searcher The {@link org.apache.solr.search.SolrIndexSearcher} to use to load the docs from the Lucene index
* @param fields The names of the Fields to load
* @param ids A map to store the ids of the docs
* @return The new {@link SolrDocumentList} containing all the loaded docs
* @throws IOException if there was a problem loading the docs
* @since solr 1.4
*/
public static SolrDocumentList docListToSolrDocumentList(DocList docs, SolrIndexSearcher searcher, Set<String> fields, Map<SolrDocument, Integer> ids) throws IOException {
IndexSchema schema = searcher.getSchema();
SolrDocumentList list = new SolrDocumentList();
list.setNumFound(docs.matches());
list.setMaxScore(docs.maxScore());
list.setStart(docs.offset());
DocIterator dit = docs.iterator();
while (dit.hasNext()) {
int docid = dit.nextDoc();
Document luceneDoc = searcher.doc(docid, fields);
SolrDocument doc = new SolrDocument();
for (IndexableField field : luceneDoc) {
if (null == fields || fields.contains(field.name())) {
SchemaField sf = schema.getField(field.name());
doc.addField(field.name(), sf.getType().toObject(field));
}
}
if (docs.hasScores() && (null == fields || fields.contains("score"))) {
doc.addField("score", dit.score());
}
list.add(doc);
if (ids != null) {
ids.put(doc, new Integer(docid));
}
}
return list;
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class SolrStopwordsCarrot2LexicalDataFactory method getSolrStopWordsForField.
/**
* Obtains stop words for a field from the associated
* {@link StopFilterFactory}, if any.
*/
private List<CharArraySet> getSolrStopWordsForField(String fieldName) {
// of this class are not used by multiple threads at a time.
synchronized (solrStopWords) {
if (!solrStopWords.containsKey(fieldName)) {
solrStopWords.put(fieldName, new ArrayList<>());
IndexSchema schema = core.getLatestSchema();
final Analyzer fieldAnalyzer = schema.getFieldType(fieldName).getIndexAnalyzer();
if (fieldAnalyzer instanceof TokenizerChain) {
final TokenFilterFactory[] filterFactories = ((TokenizerChain) fieldAnalyzer).getTokenFilterFactories();
for (TokenFilterFactory factory : filterFactories) {
if (factory instanceof StopFilterFactory) {
// StopFilterFactory holds the stop words in a CharArraySet
CharArraySet stopWords = ((StopFilterFactory) factory).getStopWords();
solrStopWords.get(fieldName).add(stopWords);
}
if (factory instanceof CommonGramsFilterFactory) {
CharArraySet commonWords = ((CommonGramsFilterFactory) factory).getCommonWords();
solrStopWords.get(fieldName).add(commonWords);
}
}
}
}
return solrStopWords.get(fieldName);
}
}
Aggregations