use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class DocumentAnalysisRequestHandlerTest method testCharsetOutsideDocument.
// This test should also test charset detection in UpdateRequestHandler,
// but the DocumentAnalysisRequestHandler is simplier to use/check.
@Test
public void testCharsetOutsideDocument() throws Exception {
final byte[] xmlBytes = ("<docs>\r\n" + " <doc>\r\n" + " <field name=\"id\">Müller</field>\r\n" + " </doc>" + "</docs>").getBytes(StandardCharsets.ISO_8859_1);
// we declare a content stream with charset:
final ContentStream cs = new ByteStream(xmlBytes, "application/xml; charset=ISO-8859-1");
ModifiableSolrParams params = new ModifiableSolrParams();
SolrQueryRequest req = new SolrQueryRequestBase(h.getCore(), params) {
@Override
public Iterable<ContentStream> getContentStreams() {
return Collections.singleton(cs);
}
};
DocumentAnalysisRequest request = handler.resolveAnalysisRequest(req);
assertNotNull(request);
final List<SolrInputDocument> documents = request.getDocuments();
assertNotNull(documents);
assertEquals(1, documents.size());
SolrInputDocument doc = documents.get(0);
assertEquals("Müller", doc.getField("id").getValue());
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class DocumentAnalysisRequestHandlerTest method testCharsetInDocument.
// This test should also test charset detection in UpdateRequestHandler,
// but the DocumentAnalysisRequestHandler is simplier to use/check.
@Test
public void testCharsetInDocument() throws Exception {
final byte[] xmlBytes = ("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\r\n" + "<docs>\r\n" + " <doc>\r\n" + " <field name=\"id\">Müller</field>\r\n" + " </doc>" + "</docs>").getBytes(StandardCharsets.ISO_8859_1);
// we declare a content stream without charset:
final ContentStream cs = new ByteStream(xmlBytes, "application/xml");
ModifiableSolrParams params = new ModifiableSolrParams();
SolrQueryRequest req = new SolrQueryRequestBase(h.getCore(), params) {
@Override
public Iterable<ContentStream> getContentStreams() {
return Collections.singleton(cs);
}
};
DocumentAnalysisRequest request = handler.resolveAnalysisRequest(req);
assertNotNull(request);
final List<SolrInputDocument> documents = request.getDocuments();
assertNotNull(documents);
assertEquals(1, documents.size());
SolrInputDocument doc = documents.get(0);
assertEquals("Müller", doc.getField("id").getValue());
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestSolrCoreSnapshots method testIndexOptimization.
@Test
public void testIndexOptimization() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCoreSnapshots_IndexOptimization";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
try (SolrClient adminClient = getHttpSolrClient(cluster.getJettySolrRunners().get(0).getBaseUrl().toString());
SolrClient masterClient = getHttpSolrClient(replica.getCoreUrl())) {
SnapshotMetaData metaData = createSnapshot(adminClient, coreName, commitName);
int numTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempt = 0; attempt < numTests; attempt++) {
//Modify existing index before we call optimize.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery("id:" + i);
}
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
}
// Before invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 0);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// After invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 1);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Delete the snapshot
deleteSnapshot(adminClient, coreName, metaData.getName());
// Add few documents. Without this the optimize command below does not take effect.
{
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// Verify that the index directory contains only 1 index commit (which is not the same as the snapshotted commit).
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertTrue(commits.size() == 1);
assertFalse(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class NumericFieldsTest method getDoc.
public static SolrInputDocument getDoc(String id, Integer number, String date) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", id);
for (String t : types) {
if ("date".equals(t)) {
doc.addField(t, date);
doc.addField(t + "_last", date);
doc.addField(t + "_first", date);
} else {
doc.addField(t, number);
doc.addField(t + "_last", number);
doc.addField(t + "_first", number);
}
}
return doc;
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestManagedSchemaAPI method testReloadAndAddSimple.
private void testReloadAndAddSimple(String collection) throws IOException, SolrServerException {
CloudSolrClient cloudClient = cluster.getSolrClient();
String fieldName = "myNewField";
addStringField(fieldName, collection, cloudClient);
CollectionAdminRequest.Reload reloadRequest = CollectionAdminRequest.reloadCollection(collection);
CollectionAdminResponse response = reloadRequest.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "1");
doc.addField(fieldName, "val");
UpdateRequest ureq = new UpdateRequest().add(doc);
cloudClient.request(ureq, collection);
}
Aggregations