use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TlogReplayBufferedWhileIndexingTest method indexr.
// skip the randoms - they can deadlock...
@Override
protected void indexr(Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, fields);
addFields(doc, "rnd_b", true);
indexDoc(doc);
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestHdfsBackupRestoreCore method test.
@Test
public void test() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "HdfsBackupRestore";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
boolean testViaReplicationHandler = random().nextBoolean();
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
// Create a backup.
if (testViaReplicationHandler) {
log.info("Running Backup via replication handler");
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
while (!checkBackupStatus.success) {
checkBackupStatus.fetchStatus();
Thread.sleep(1000);
}
} else {
log.info("Running Backup via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
}
int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempts = 0; attempts < numRestoreTests; attempts++) {
//Modify existing index before we call restore.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery(collectionName, "id:" + i);
}
masterClient.commit(collectionName);
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(collectionName, doc);
}
//Purposely not calling commit once in a while. There can be some docs which are not committed
if (usually()) {
masterClient.commit(collectionName);
}
}
// Snapshooter prefixes "snapshot." to the backup name.
if (testViaReplicationHandler) {
log.info("Running Restore via replication handler");
// Snapshooter prefixes "snapshot." to the backup name.
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
Thread.sleep(1000);
}
} else {
log.info("Running Restore via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", "snapshot." + backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
}
//See if restore was successful by checking if all the docs are present again
BackupRestoreUtils.verifyDocs(nDocs, masterClient, coreName);
// Verify the permissions for the backup folder.
FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path("/backup/snapshot." + backupName));
FsPermission perm = status.getPermission();
assertEquals(FsAction.ALL, perm.getUserAction());
assertEquals(FsAction.ALL, perm.getGroupAction());
assertEquals(FsAction.ALL, perm.getOtherAction());
}
}
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class JsonLoaderTest method testSimpleFormat.
public void testSimpleFormat() throws Exception {
String str = "[{'id':'1'},{'id':'2'}]".replace('\'', '"');
SolrQueryRequest req = req("commitWithin", "100", "overwrite", "false");
SolrQueryResponse rsp = new SolrQueryResponse();
BufferingRequestProcessor p = new BufferingRequestProcessor(null);
JsonLoader loader = new JsonLoader();
loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
assertEquals(2, p.addCommands.size());
AddUpdateCommand add = p.addCommands.get(0);
SolrInputDocument d = add.solrDoc;
SolrInputField f = d.getField("id");
assertEquals("1", f.getValue());
assertEquals(add.commitWithin, 100);
assertEquals(add.overwrite, false);
add = p.addCommands.get(1);
d = add.solrDoc;
f = d.getField("id");
assertEquals("2", f.getValue());
assertEquals(add.commitWithin, 100);
assertEquals(add.overwrite, false);
req.close();
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class JsonLoaderTest method testExtendedFieldValues.
public void testExtendedFieldValues() throws Exception {
String str = "[{'id':'1', 'val_s':{'add':'foo'}}]".replace('\'', '"');
SolrQueryRequest req = req();
SolrQueryResponse rsp = new SolrQueryResponse();
BufferingRequestProcessor p = new BufferingRequestProcessor(null);
JsonLoader loader = new JsonLoader();
loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
assertEquals(1, p.addCommands.size());
AddUpdateCommand add = p.addCommands.get(0);
assertEquals(add.commitWithin, -1);
assertEquals(add.overwrite, true);
SolrInputDocument d = add.solrDoc;
SolrInputField f = d.getField("id");
assertEquals("1", f.getValue());
f = d.getField("val_s");
Map<String, Object> map = (Map<String, Object>) f.getValue();
assertEquals("foo", map.get("add"));
req.close();
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class JsonLoaderTest method testGrandChildDocs.
@Test
public void testGrandChildDocs() throws Exception {
String str = "{\n" + " \"add\": {\n" + " \"doc\": {\n" + " \"id\": \"1\",\n" + " \"_childDocuments_\": [\n" + " {\n" + " \"id\": \"2\",\n" + " \"_childDocuments_\": [\n" + " {\n" + " \"id\": \"4\",\n" + " \"foo_s\": \"Baz\"\n" + " }\n" + " ],\n" + " \"foo_s\": \"Yaz\"\n" + " },\n" + " {\n" + " \"id\": \"3\",\n" + " \"foo_s\": \"Bar\"\n" + " }\n" + " ]\n" + " }\n" + " }\n" + "}";
SolrQueryRequest req = req("commit", "true");
SolrQueryResponse rsp = new SolrQueryResponse();
BufferingRequestProcessor p = new BufferingRequestProcessor(null);
JsonLoader loader = new JsonLoader();
loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
assertEquals(1, p.addCommands.size());
AddUpdateCommand add = p.addCommands.get(0);
SolrInputDocument one = add.solrDoc;
assertEquals("1", one.getFieldValue("id"));
SolrInputDocument two = one.getChildDocuments().get(0);
assertEquals("2", two.getFieldValue("id"));
assertEquals("Yaz", two.getFieldValue("foo_s"));
SolrInputDocument four = two.getChildDocuments().get(0);
assertEquals("4", four.getFieldValue("id"));
assertEquals("Baz", four.getFieldValue("foo_s"));
SolrInputDocument three = one.getChildDocuments().get(1);
assertEquals("3", three.getFieldValue("id"));
assertEquals("Bar", three.getFieldValue("foo_s"));
req.close();
}
Aggregations