use of org.apache.solr.common.util.NamedList in project lucene-solr by apache.
the class TextResponseWriter method writeVal.
public final void writeVal(String name, Object val) throws IOException {
// go in order of most common to least common
if (val == null) {
writeNull(name);
} else if (val instanceof String) {
writeStr(name, val.toString(), true);
// micro-optimization... using toString() avoids a cast first
} else if (val instanceof IndexableField) {
IndexableField f = (IndexableField) val;
SchemaField sf = schema.getFieldOrNull(f.name());
if (sf != null) {
sf.getType().write(this, name, f);
} else {
writeStr(name, f.stringValue(), true);
}
} else if (val instanceof Number) {
writeNumber(name, (Number) val);
} else if (val instanceof Boolean) {
writeBool(name, (Boolean) val);
} else if (val instanceof Date) {
writeDate(name, (Date) val);
} else if (val instanceof Document) {
SolrDocument doc = DocsStreamer.convertLuceneDocToSolrDoc((Document) val, schema);
writeSolrDocument(name, doc, returnFields, 0);
} else if (val instanceof SolrDocument) {
writeSolrDocument(name, (SolrDocument) val, returnFields, 0);
} else if (val instanceof ResultContext) {
// requires access to IndexReader
writeDocuments(name, (ResultContext) val);
} else if (val instanceof DocList) {
// Should not happen normally
ResultContext ctx = new BasicResultContext((DocList) val, returnFields, null, null, req);
writeDocuments(name, ctx);
// }
// else if (val instanceof DocSet) {
// how do we know what fields to read?
// todo: have a DocList/DocSet wrapper that
// restricts the fields to write...?
} else if (val instanceof SolrDocumentList) {
writeSolrDocumentList(name, (SolrDocumentList) val, returnFields);
} else if (val instanceof Map) {
writeMap(name, (Map) val, false, true);
} else if (val instanceof NamedList) {
writeNamedList(name, (NamedList) val);
} else if (val instanceof Path) {
writeStr(name, ((Path) val).toAbsolutePath().toString(), true);
} else if (val instanceof IteratorWriter) {
writeIterator((IteratorWriter) val);
} else if (val instanceof Iterable) {
writeArray(name, ((Iterable) val).iterator());
} else if (val instanceof Object[]) {
writeArray(name, (Object[]) val);
} else if (val instanceof Iterator) {
writeArray(name, (Iterator) val);
} else if (val instanceof byte[]) {
byte[] arr = (byte[]) val;
writeByteArr(name, arr, 0, arr.length);
} else if (val instanceof BytesRef) {
BytesRef arr = (BytesRef) val;
writeByteArr(name, arr.bytes, arr.offset, arr.length);
} else if (val instanceof EnumFieldValue) {
writeStr(name, val.toString(), true);
} else if (val instanceof WriteableValue) {
((WriteableValue) val).write(name, this);
} else if (val instanceof MapWriter) {
writeMap((MapWriter) val);
} else if (val instanceof MapSerializable) {
//todo find a better way to reuse the map more efficiently
writeMap(name, ((MapSerializable) val).toMap(new LinkedHashMap<>()), false, true);
} else {
// default... for debugging only
writeStr(name, val.getClass().getName() + ':' + val.toString(), true);
}
}
use of org.apache.solr.common.util.NamedList in project lucene-solr by apache.
the class ManagedSynonymGraphFilterFactory method onManagedResourceInitialized.
/**
* Called once, during core initialization, to initialize any analysis components
* that depend on the data managed by this resource. It is important that the
* analysis component is only initialized once during core initialization so that
* text analysis is consistent, especially in a distributed environment, as we
* don't want one server applying a different set of stop words than other servers.
*/
@SuppressWarnings("unchecked")
@Override
public void onManagedResourceInitialized(NamedList<?> initArgs, final ManagedResource res) throws SolrException {
NamedList<Object> args = (NamedList<Object>) initArgs;
args.add("synonyms", getResourceId());
args.add("expand", "false");
args.add("format", "solr");
Map<String, String> filtArgs = new HashMap<>();
for (Map.Entry<String, ?> entry : args) {
filtArgs.put(entry.getKey(), entry.getValue().toString());
}
// create the actual filter factory that pulls the synonym mappings
// from synonymMappings using a custom parser implementation
delegate = new SynonymGraphFilterFactory(filtArgs) {
@Override
protected SynonymMap loadSynonyms(ResourceLoader loader, String cname, boolean dedup, Analyzer analyzer) throws IOException, ParseException {
ManagedSynonymParser parser = new ManagedSynonymParser((SynonymManager) res, dedup, analyzer);
// null is safe here because there's no actual parsing done against a input Reader
parser.parse(null);
return parser.build();
}
};
try {
delegate.inform(res.getResourceLoader());
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
}
use of org.apache.solr.common.util.NamedList in project lucene-solr by apache.
the class BaseSolrResource method handleException.
/**
* If there is an exception on the SolrResponse:
* <ul>
* <li>error info is added to the SolrResponse;</li>
* <li>the response status code is set to the error code from the exception; and</li>
* <li>the exception message is added to the list of things to be logged.</li>
* </ul>
*/
protected void handleException(Logger log) {
Exception exception = getSolrResponse().getException();
if (null != exception) {
NamedList info = new SimpleOrderedMap();
int code = ResponseUtils.getErrorInfo(exception, info, log);
setStatus(Status.valueOf(code));
getSolrResponse().add("error", info);
String message = (String) info.get("msg");
if (null != message && !message.trim().isEmpty()) {
getSolrResponse().getToLog().add("msg", "{" + message.trim() + "}");
}
}
}
use of org.apache.solr.common.util.NamedList in project lucene-solr by apache.
the class SolrReturnFields method parseFieldList.
private void parseFieldList(String[] fl, SolrQueryRequest req) {
_wantsScore = false;
_wantsAllFields = false;
if (fl == null || fl.length == 0 || fl.length == 1 && fl[0].length() == 0) {
_wantsAllFields = true;
return;
}
NamedList<String> rename = new NamedList<>();
DocTransformers augmenters = new DocTransformers();
for (String fieldList : fl) {
add(fieldList, rename, augmenters, req);
}
for (int i = 0; i < rename.size(); i++) {
String from = rename.getName(i);
String to = rename.getVal(i);
okFieldNames.add(to);
boolean copy = (reqFieldNames != null && reqFieldNames.contains(from));
if (!copy) {
// Check that subsequent copy/rename requests have the field they need to copy
for (int j = i + 1; j < rename.size(); j++) {
if (from.equals(rename.getName(j))) {
// copy from the current target
rename.setName(j, to);
if (reqFieldNames == null) {
reqFieldNames = new LinkedHashSet<>();
}
// don't rename our current target
reqFieldNames.add(to);
}
}
}
augmenters.addTransformer(new RenameFieldTransformer(from, to, copy));
}
if (rename.size() > 0) {
renameFields = rename.asShallowMap();
}
if (!_wantsAllFields && !globs.isEmpty()) {
// TODO??? need to fill up the fields with matching field names in the index
// and add them to okFieldNames?
// maybe just get all fields?
// this would disable field selection optimization... i think that is OK
// this will get all fields, and use wantsField to limit
fields.clear();
}
if (augmenters.size() == 1) {
transformer = augmenters.getTransformer(0);
} else if (augmenters.size() > 1) {
transformer = augmenters;
}
}
use of org.apache.solr.common.util.NamedList in project lucene-solr by apache.
the class UpdateRequestProcessorChain method init.
/**
* Initializes the chain using the factories specified by the <code>PluginInfo</code>.
* if the chain includes the <code>RunUpdateProcessorFactory</code>, but
* does not include an implementation of the
* <code>DistributingUpdateProcessorFactory</code> interface, then an
* instance of <code>DistributedUpdateProcessorFactory</code> will be
* injected immediately prior to the <code>RunUpdateProcessorFactory</code>.
*
* @see DistributingUpdateProcessorFactory
* @see RunUpdateProcessorFactory
* @see DistributedUpdateProcessorFactory
*/
@Override
public void init(PluginInfo info) {
final String infomsg = "updateRequestProcessorChain \"" + (null != info.name ? info.name : "") + "\"" + (info.isDefault() ? " (default)" : "");
log.debug("creating " + infomsg);
// wrap in an ArrayList so we know we know we can do fast index lookups
// and that add(int,Object) is supported
List<UpdateRequestProcessorFactory> list = new ArrayList<>(solrCore.initPlugins(info.getChildren("processor"), UpdateRequestProcessorFactory.class, null));
if (list.isEmpty()) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, infomsg + " require at least one processor");
}
int numDistrib = 0;
int runIndex = -1;
// (no idea why someone might use multiple run instances, but just in case)
for (int i = list.size() - 1; 0 <= i; i--) {
UpdateRequestProcessorFactory factory = list.get(i);
if (factory instanceof DistributingUpdateProcessorFactory) {
numDistrib++;
}
if (factory instanceof RunUpdateProcessorFactory) {
runIndex = i;
}
}
if (1 < numDistrib) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, infomsg + " may not contain more then one " + "instance of DistributingUpdateProcessorFactory");
}
if (0 <= runIndex && 0 == numDistrib) {
// by default, add distrib processor immediately before run
DistributedUpdateProcessorFactory distrib = new DistributedUpdateProcessorFactory();
distrib.init(new NamedList());
list.add(runIndex, distrib);
log.debug("inserting DistributedUpdateProcessorFactory into " + infomsg);
}
chain = list;
ProcessorInfo processorInfo = new ProcessorInfo(new MapSolrParams(info.attributes));
if (processorInfo.isEmpty())
return;
UpdateRequestProcessorChain newChain = constructChain(this, processorInfo, solrCore);
chain = newChain.chain;
}
Aggregations