use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testVariousAdds.
protected static void testVariousAdds(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
UpdateResponse rsp = null;
// 2 docs that are both on shard1, the first one should fail
for (int maxErrors : new int[] { -1, 2, 47, 10 }) {
// regardless of which of these maxErrors values we use, behavior should be the same...
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "" + maxErrors, "commit", "true"), doc(f("id", S_ONE_PRE + "42"), f("foo_i", "bogus_value")), doc(f("id", S_ONE_PRE + "666"), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("single shard, 1st doc should fail", rsp, S_ONE_PRE + "42");
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, S_ONE_PRE + "42");
assertQueryDocIds(client, true, S_ONE_PRE + "666");
// ...only diff should be that we get an accurate report of the effective maxErrors
assertEquals(maxErrors, rsp.getResponseHeader().get("maxErrors"));
}
// 2 docs that are both on shard1, the second one should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-not-set", "commit", "true"), doc(f("id", S_ONE_PRE + "55"), f("foo_i", "1976")), doc(f("id", S_ONE_PRE + "77"), f("foo_i", "bogus_val"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("single shard, 2nd doc should fail", rsp, S_ONE_PRE + "77");
assertQueryDocIds(client, false, S_ONE_PRE + "77");
assertQueryDocIds(client, true, S_ONE_PRE + "666", S_ONE_PRE + "55");
// since maxErrors is unset, we should get an "unlimited" value back
assertEquals(-1, rsp.getResponseHeader().get("maxErrors"));
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// 2 docs on 2 diff shards, first of which should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "42"), f("foo_i", "bogus_value")), doc(f("id", S_TWO_PRE + "666"), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("two shards, 1st doc should fail", rsp, S_ONE_PRE + "42");
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, S_ONE_PRE + "42");
assertQueryDocIds(client, true, S_TWO_PRE + "666");
// 2 docs on 2 diff shards, second of which should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "55"), f("foo_i", "1976")), doc(f("id", S_TWO_PRE + "77"), f("foo_i", "bogus_val"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("two shards, 2nd doc should fail", rsp, S_TWO_PRE + "77");
assertQueryDocIds(client, false, S_TWO_PRE + "77");
assertQueryDocIds(client, true, S_TWO_PRE + "666", S_ONE_PRE + "55");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, 1 from each shard should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23")), doc(f("id", S_ONE_PRE + "14")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16")), doc(f("id", S_TWO_PRE + "26"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("many docs, 1 from each shard should fail", rsp, S_ONE_PRE + "15", S_TWO_PRE + "22");
assertQueryDocIds(client, false, S_TWO_PRE + "22", S_ONE_PRE + "15");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_TWO_PRE + "21", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "23", S_ONE_PRE + "14", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "16", S_TWO_PRE + "26");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, 1 from each shard should fail and 1 w/o uniqueKey
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23")), // no "id"
doc(f("foo_i", "42")), doc(f("id", S_ONE_PRE + "14")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16")), doc(f("id", S_TWO_PRE + "26"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("many docs, 1 from each shard (+ no id) should fail", rsp, S_ONE_PRE + "15", "(unknown)", S_TWO_PRE + "22");
assertQueryDocIds(client, false, S_TWO_PRE + "22", S_ONE_PRE + "15");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_TWO_PRE + "21", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "23", S_ONE_PRE + "14", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "16", S_TWO_PRE + "26");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "14"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "26"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "17")), doc(f("id", S_TWO_PRE + "27")), doc(f("id", S_ONE_PRE + "18"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "28"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "19"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "29"), f("foo_i", "bogus_val")), // may be skipped, more then 10 fails
doc(f("id", S_ONE_PRE + "10")), // may be skipped, more then 10 fails
doc(f("id", S_TWO_PRE + "20"))).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
for (ToleratedUpdateError err : actualKnownErrs) {
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed err msg didn't match expected value: " + err, err.getMessage().contains("bogus_val"));
}
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, // explicitly failed
S_TWO_PRE + "21", S_TWO_PRE + "22", S_TWO_PRE + "23", S_ONE_PRE + "14", S_ONE_PRE + "15", S_ONE_PRE + "16", S_TWO_PRE + "26", S_ONE_PRE + "18", S_TWO_PRE + "28", S_ONE_PRE + "19", S_TWO_PRE + "29");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "17", S_TWO_PRE + "27");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
docs.add(doc(f("id", S_ONE_PRE + i)));
docs.add(doc(f("id", S_TWO_PRE + i), f("foo_i", "bogus_val")));
}
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_ONE_PRE + "x")));
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
for (ToleratedUpdateError err : actualKnownErrs) {
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed id had unexpected prefix: " + err, err.getId().startsWith(S_TWO_PRE));
assertTrue("failed err msg didn't match expected value: " + err, err.getMessage().contains("bogus_val"));
}
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y", //
S_ONE_PRE + "0", S_ONE_PRE + "1", S_ONE_PRE + "2", S_ONE_PRE + "3", S_ONE_PRE + "4", S_ONE_PRE + "5", S_ONE_PRE + "6", S_ONE_PRE + "7", S_ONE_PRE + "8", S_ONE_PRE + "9");
assertQueryDocIds(client, false, // explicitly failed
S_TWO_PRE + "0", S_TWO_PRE + "1", S_TWO_PRE + "2", S_TWO_PRE + "3", S_TWO_PRE + "4", S_TWO_PRE + "5", S_TWO_PRE + "6", S_TWO_PRE + "7", S_TWO_PRE + "8", S_TWO_PRE + "9");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
// no "id" field
docs.add(doc(f("foo_i", "" + i)));
}
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_ONE_PRE + "x")));
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
fail("did not get a top level exception when more then 10 docs mising uniqueKey: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed id didn't match 'unknown': " + err, err.getId().contains("unknown"));
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, more then 10 from a single shard (two) should fail but
// request should still succeed because of maxErrors=-1 param
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
ArrayList<ExpectedErr> expectedErrs = new ArrayList<ExpectedErr>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
docs.add(doc(f("id", S_ONE_PRE + i)));
docs.add(doc(f("id", S_TWO_PRE + i), f("foo_i", "bogus_val")));
expectedErrs.add(addErr(S_TWO_PRE + i));
}
docs.add(doc(f("id", S_ONE_PRE + "x")));
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "-1", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
assertUpdateTolerantErrors("many docs from shard2 fail, but req should succeed", rsp, expectedErrs.toArray(new ExpectedErr[expectedErrs.size()]));
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y", // later
S_ONE_PRE + "x", // later
S_TWO_PRE + "x");
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class TestPullReplica method waitForDeletion.
private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
TimeOut t = new TimeOut(10, TimeUnit.SECONDS);
while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
LOG.info("Collection not yet deleted");
try {
Thread.sleep(100);
if (t.hasTimedOut()) {
fail("Timed out waiting for collection " + collection + " to be deleted.");
}
cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
} catch (SolrException e) {
return;
}
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class TestPullReplicaErrorHandling method waitForDeletion.
private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
TimeOut t = new TimeOut(10, TimeUnit.SECONDS);
while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
LOG.info("Collection not yet deleted");
try {
Thread.sleep(100);
if (t.hasTimedOut()) {
fail("Timed out waiting for collection " + collection + " to be deleted.");
}
cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
} catch (SolrException e) {
return;
}
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class FieldAnalysisRequestHandlerTest method testResolveAnalysisRequest.
/**
* Tests the {@link FieldAnalysisRequestHandler#resolveAnalysisRequest(org.apache.solr.request.SolrQueryRequest)}
*/
@Test
public void testResolveAnalysisRequest() throws Exception {
ModifiableSolrParams params = new ModifiableSolrParams();
params.add(AnalysisParams.FIELD_NAME, "text,nametext");
params.add(AnalysisParams.FIELD_TYPE, "whitetok,keywordtok");
params.add(AnalysisParams.FIELD_VALUE, "the quick red fox jumped over the lazy brown dogs");
params.add(CommonParams.Q, "fox brown");
SolrQueryRequest req = new LocalSolrQueryRequest(h.getCore(), params);
FieldAnalysisRequest request = handler.resolveAnalysisRequest(req);
List<String> fieldNames = request.getFieldNames();
assertEquals("Expecting 2 field names", 2, fieldNames.size());
assertEquals("text", fieldNames.get(0));
assertEquals("nametext", fieldNames.get(1));
List<String> fieldTypes = request.getFieldTypes();
assertEquals("Expecting 2 field types", 2, fieldTypes.size());
assertEquals("whitetok", fieldTypes.get(0));
assertEquals("keywordtok", fieldTypes.get(1));
assertEquals("the quick red fox jumped over the lazy brown dogs", request.getFieldValue());
assertEquals("fox brown", request.getQuery());
assertFalse(request.isShowMatch());
req.close();
// testing overide of query value using analysis.query param
params.add(AnalysisParams.QUERY, "quick lazy");
req = new LocalSolrQueryRequest(h.getCore(), params);
request = handler.resolveAnalysisRequest(req);
assertEquals("quick lazy", request.getQuery());
req.close();
// testing analysis.showmatch param
params.add(AnalysisParams.SHOW_MATCH, "false");
req = new LocalSolrQueryRequest(h.getCore(), params);
request = handler.resolveAnalysisRequest(req);
assertFalse(request.isShowMatch());
req.close();
params.set(AnalysisParams.SHOW_MATCH, "true");
req = new LocalSolrQueryRequest(h.getCore(), params);
request = handler.resolveAnalysisRequest(req);
assertTrue(request.isShowMatch());
req.close();
// testing absence of query value
params.remove(CommonParams.Q);
params.remove(AnalysisParams.QUERY);
req = new LocalSolrQueryRequest(h.getCore(), params);
request = handler.resolveAnalysisRequest(req);
assertNull(request.getQuery());
req.close();
// test absence of index-time value and presence of q
params.remove(AnalysisParams.FIELD_VALUE);
params.add(CommonParams.Q, "quick lazy");
request = handler.resolveAnalysisRequest(req);
assertEquals("quick lazy", request.getQuery());
req.close();
// test absence of index-time value and presence of query
params.remove(CommonParams.Q);
params.add(AnalysisParams.QUERY, "quick lazy");
request = handler.resolveAnalysisRequest(req);
assertEquals("quick lazy", request.getQuery());
req.close();
// must fail if all of q, analysis.query or analysis.value are absent
params.remove(CommonParams.Q);
params.remove(AnalysisParams.QUERY);
params.remove(AnalysisParams.FIELD_VALUE);
try {
request = handler.resolveAnalysisRequest(req);
fail("Analysis request must fail if all of q, analysis.query or analysis.value are absent");
} catch (SolrException e) {
if (e.code() != SolrException.ErrorCode.BAD_REQUEST.code) {
fail("Unexpected exception");
}
} catch (Exception e) {
fail("Unexpected exception");
}
req.close();
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class TestManagedSchema method testAddedFieldIndexableAndQueryable.
public void testAddedFieldIndexableAndQueryable() throws Exception {
assertSchemaResource(collection, "managed-schema");
deleteCore();
File managedSchemaFile = new File(tmpConfDir, "managed-schema");
// Delete managed-schema so it won't block parsing a new schema
Files.delete(managedSchemaFile.toPath());
System.setProperty("managed.schema.mutable", "true");
initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath());
assertTrue(managedSchemaFile.exists());
String managedSchemaContents = FileUtils.readFileToString(managedSchemaFile, "UTF-8");
assertFalse(managedSchemaContents.contains("\"new_field\""));
clearIndex();
String errString = "unknown field 'new_field'";
ignoreException(Pattern.quote(errString));
try {
assertU(adoc("new_field", "thing1 thing2", "str", "X"));
fail();
} catch (Exception e) {
for (Throwable t = e; t != null; t = t.getCause()) {
// short circuit out if we found what we expected
if (t.getMessage() != null && -1 != t.getMessage().indexOf(errString))
return;
}
// otherwise, rethrow it, possibly completely unrelated
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unexpected error, expected error matching: " + errString, e);
} finally {
resetExceptionIgnores();
}
assertU(commit());
assertQ(req("new_field:thing1"), "//*[@numFound='0']");
Map<String, Object> options = new HashMap<>();
options.put("stored", "false");
IndexSchema oldSchema = h.getCore().getLatestSchema();
String fieldName = "new_field";
String fieldType = "text";
SchemaField newField = oldSchema.newField(fieldName, fieldType, options);
IndexSchema newSchema = oldSchema.addField(newField);
h.getCore().setLatestSchema(newSchema);
assertU(adoc("new_field", "thing1 thing2", "str", "X"));
assertU(commit());
assertQ(req("new_field:thing1"), "//*[@numFound='1']");
}
Aggregations