use of org.apache.solr.update.processor.UpdateRequestProcessor in project lucene-solr by apache.
the class UIMAUpdateRequestProcessorTest method testProcessorConfiguration.
@Test
public void testProcessorConfiguration() {
SolrCore core = h.getCore();
UpdateRequestProcessorChain chained = core.getUpdateProcessingChain(UIMA_CHAIN);
assertNotNull(chained);
UIMAUpdateRequestProcessorFactory factory = (UIMAUpdateRequestProcessorFactory) chained.getProcessors().get(0);
assertNotNull(factory);
UpdateRequestProcessor processor = factory.getInstance(req(), null, null);
assertTrue(processor instanceof UIMAUpdateRequestProcessor);
}
use of org.apache.solr.update.processor.UpdateRequestProcessor in project lucene-solr by apache.
the class DataImportHandler method handleRequestBody.
@Override
@SuppressWarnings("unchecked")
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
rsp.setHttpCaching(false);
//TODO: figure out why just the first one is OK...
ContentStream contentStream = null;
Iterable<ContentStream> streams = req.getContentStreams();
if (streams != null) {
for (ContentStream stream : streams) {
contentStream = stream;
break;
}
}
SolrParams params = req.getParams();
NamedList defaultParams = (NamedList) initArgs.get("defaults");
RequestInfo requestParams = new RequestInfo(req, getParamsMap(params), contentStream);
String command = requestParams.getCommand();
if (DataImporter.SHOW_CONF_CMD.equals(command)) {
String dataConfigFile = params.get("config");
String dataConfig = params.get("dataConfig");
if (dataConfigFile != null) {
dataConfig = SolrWriter.getResourceAsString(req.getCore().getResourceLoader().openResource(dataConfigFile));
}
if (dataConfig == null) {
rsp.add("status", DataImporter.MSG.NO_CONFIG_FOUND);
} else {
// Modify incoming request params to add wt=raw
ModifiableSolrParams rawParams = new ModifiableSolrParams(req.getParams());
rawParams.set(CommonParams.WT, "raw");
req.setParams(rawParams);
ContentStreamBase content = new ContentStreamBase.StringStream(dataConfig);
rsp.add(RawResponseWriter.CONTENT, content);
}
return;
}
rsp.add("initArgs", initArgs);
String message = "";
if (command != null) {
rsp.add("command", command);
}
// If importer is still null
if (importer == null) {
rsp.add("status", DataImporter.MSG.NO_INIT);
return;
}
if (command != null && DataImporter.ABORT_CMD.equals(command)) {
importer.runCmd(requestParams, null);
} else if (importer.isBusy()) {
message = DataImporter.MSG.CMD_RUNNING;
} else if (command != null) {
if (DataImporter.FULL_IMPORT_CMD.equals(command) || DataImporter.DELTA_IMPORT_CMD.equals(command) || IMPORT_CMD.equals(command)) {
importer.maybeReloadConfiguration(requestParams, defaultParams);
UpdateRequestProcessorChain processorChain = req.getCore().getUpdateProcessorChain(params);
UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp);
SolrResourceLoader loader = req.getCore().getResourceLoader();
DIHWriter sw = getSolrWriter(processor, loader, requestParams, req);
if (requestParams.isDebug()) {
if (debugEnabled) {
// Synchronous request for the debug mode
importer.runCmd(requestParams, sw);
rsp.add("mode", "debug");
rsp.add("documents", requestParams.getDebugInfo().debugDocuments);
if (requestParams.getDebugInfo().debugVerboseOutput != null) {
rsp.add("verbose-output", requestParams.getDebugInfo().debugVerboseOutput);
}
} else {
message = DataImporter.MSG.DEBUG_NOT_ENABLED;
}
} else {
// Asynchronous request for normal mode
if (requestParams.getContentStream() == null && !requestParams.isSyncMode()) {
importer.runAsync(requestParams, sw);
} else {
importer.runCmd(requestParams, sw);
}
}
} else if (DataImporter.RELOAD_CONF_CMD.equals(command)) {
if (importer.maybeReloadConfiguration(requestParams, defaultParams)) {
message = DataImporter.MSG.CONFIG_RELOADED;
} else {
message = DataImporter.MSG.CONFIG_NOT_RELOADED;
}
}
}
rsp.add("status", importer.isBusy() ? "busy" : "idle");
rsp.add("importResponse", message);
rsp.add("statusMessages", importer.getStatusMessages());
}
use of org.apache.solr.update.processor.UpdateRequestProcessor in project lucene-solr by apache.
the class MergeIndexesOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
String cname = params.required().get(CoreAdminParams.CORE);
SolrCore core = it.handler.coreContainer.getCore(cname);
SolrQueryRequest wrappedReq = null;
List<SolrCore> sourceCores = Lists.newArrayList();
List<RefCounted<SolrIndexSearcher>> searchers = Lists.newArrayList();
// stores readers created from indexDir param values
List<DirectoryReader> readersToBeClosed = Lists.newArrayList();
Map<Directory, Boolean> dirsToBeReleased = new HashMap<>();
if (core != null) {
try {
String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
if (dirNames == null || dirNames.length == 0) {
String[] sources = params.getParams("srcCore");
if (sources == null || sources.length == 0)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "At least one indexDir or srcCore must be specified");
for (int i = 0; i < sources.length; i++) {
String source = sources[i];
SolrCore srcCore = it.handler.coreContainer.getCore(source);
if (srcCore == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core: " + source + " does not exist");
sourceCores.add(srcCore);
}
} else {
DirectoryFactory dirFactory = core.getDirectoryFactory();
for (int i = 0; i < dirNames.length; i++) {
boolean markAsDone = false;
if (dirFactory instanceof CachingDirectoryFactory) {
if (!((CachingDirectoryFactory) dirFactory).getLivePaths().contains(dirNames[i])) {
markAsDone = true;
}
}
Directory dir = dirFactory.get(dirNames[i], DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
dirsToBeReleased.put(dir, markAsDone);
// TODO: why doesn't this use the IR factory? what is going on here?
readersToBeClosed.add(DirectoryReader.open(dir));
}
}
List<DirectoryReader> readers = null;
if (readersToBeClosed.size() > 0) {
readers = readersToBeClosed;
} else {
readers = Lists.newArrayList();
for (SolrCore solrCore : sourceCores) {
// record the searchers so that we can decref
RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
searchers.add(searcher);
readers.add(searcher.get().getIndexReader());
}
}
UpdateRequestProcessorChain processorChain = core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
wrappedReq = new LocalSolrQueryRequest(core, it.req.getParams());
UpdateRequestProcessor processor = processorChain.createProcessor(wrappedReq, it.rsp);
processor.processMergeIndexes(new MergeIndexesCommand(readers, it.req));
} catch (Exception e) {
// log and rethrow so that if the finally fails we don't lose the original problem
log.error("ERROR executing merge:", e);
throw e;
} finally {
for (RefCounted<SolrIndexSearcher> searcher : searchers) {
if (searcher != null)
searcher.decref();
}
for (SolrCore solrCore : sourceCores) {
if (solrCore != null)
solrCore.close();
}
IOUtils.closeWhileHandlingException(readersToBeClosed);
Set<Map.Entry<Directory, Boolean>> entries = dirsToBeReleased.entrySet();
for (Map.Entry<Directory, Boolean> entry : entries) {
DirectoryFactory dirFactory = core.getDirectoryFactory();
Directory dir = entry.getKey();
boolean markAsDone = entry.getValue();
if (markAsDone) {
dirFactory.doneWithDirectory(dir);
}
dirFactory.release(dir);
}
if (wrappedReq != null)
wrappedReq.close();
core.close();
}
}
}
use of org.apache.solr.update.processor.UpdateRequestProcessor in project lucene-solr by apache.
the class TestSchemalessBufferedUpdates method processAdd.
private SolrInputDocument processAdd(final SolrInputDocument docIn) throws IOException {
UpdateRequestProcessorChain processorChain = h.getCore().getUpdateProcessingChain(UPDATE_CHAIN);
assertNotNull("Undefined URP chain '" + UPDATE_CHAIN + "'", processorChain);
List<UpdateRequestProcessorFactory> factoriesUpToDUP = new ArrayList<>();
for (UpdateRequestProcessorFactory urpFactory : processorChain.getProcessors()) {
factoriesUpToDUP.add(urpFactory);
if (urpFactory.getClass().equals(DistributedUpdateProcessorFactory.class))
break;
}
UpdateRequestProcessorChain chainUpToDUP = new UpdateRequestProcessorChain(factoriesUpToDUP, h.getCore());
assertNotNull("URP chain '" + UPDATE_CHAIN + "'", chainUpToDUP);
SolrQueryResponse rsp = new SolrQueryResponse();
SolrQueryRequest req = req();
try {
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
AddUpdateCommand cmd = new AddUpdateCommand(req);
cmd.solrDoc = docIn;
UpdateRequestProcessor processor = chainUpToDUP.createProcessor(req, rsp);
processor.processAdd(cmd);
if (cmd.solrDoc.get("f_dt").getValue() instanceof Date) {
// Non-JSON types (Date in this case) aren't handled properly in noggit-0.6. Although this is fixed in
// https://github.com/yonik/noggit/commit/ec3e732af7c9425e8f40297463cbe294154682b1 to call obj.toString(),
// Date::toString produces a Date representation that Solr doesn't like, so we convert using Instant::toString
cmd.solrDoc.get("f_dt").setValue(((Date) cmd.solrDoc.get("f_dt").getValue()).toInstant().toString());
}
return cmd.solrDoc;
} finally {
SolrRequestInfo.clearRequestInfo();
req.close();
}
}
use of org.apache.solr.update.processor.UpdateRequestProcessor in project lucene-solr by apache.
the class TestSearchPerf method createIndex2.
// Skip encoding for updating the index
void createIndex2(int nDocs, String... fields) throws IOException {
Set<String> fieldSet = new HashSet<>(Arrays.asList(fields));
SolrQueryRequest req = lrf.makeRequest();
SolrQueryResponse rsp = new SolrQueryResponse();
UpdateRequestProcessorChain processorChain = req.getCore().getUpdateProcessingChain(null);
UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp);
boolean foomany_s = fieldSet.contains("foomany_s");
boolean foo1_s = fieldSet.contains("foo1_s");
boolean foo2_s = fieldSet.contains("foo2_s");
boolean foo4_s = fieldSet.contains("foo4_s");
boolean foo8_s = fieldSet.contains("foo8_s");
boolean t10_100_ws = fieldSet.contains("t10_100_ws");
for (int i = 0; i < nDocs; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", Float.toString(i));
if (foomany_s) {
doc.addField("foomany_s", t(r.nextInt(nDocs * 10)));
}
if (foo1_s) {
doc.addField("foo1_s", t(0));
}
if (foo2_s) {
doc.addField("foo2_s", r.nextInt(2));
}
if (foo4_s) {
doc.addField("foo4_s", r.nextInt(4));
}
if (foo8_s) {
doc.addField("foo8_s", r.nextInt(8));
}
if (t10_100_ws) {
StringBuilder sb = new StringBuilder(9 * 100);
for (int j = 0; j < 100; j++) {
sb.append(' ');
sb.append(t(r.nextInt(10)));
}
doc.addField("t10_100_ws", sb.toString());
}
AddUpdateCommand cmd = new AddUpdateCommand(req);
cmd.solrDoc = doc;
processor.processAdd(cmd);
}
processor.finish();
processor.close();
req.close();
assertU(commit());
req = lrf.makeRequest();
assertEquals(nDocs, req.getSearcher().maxDoc());
req.close();
}
Aggregations