use of org.apache.solr.core.CoreContainer in project lucene-solr by apache.
the class CoreAdminHandlerTest method testCreateWithSysVars.
@Test
public void testCreateWithSysVars() throws Exception {
// I require FS-based indexes for this test.
useFactory(null);
final File workDir = createTempDir(getCoreName()).toFile();
String coreName = "with_sys_vars";
File instDir = new File(workDir, coreName);
File subHome = new File(instDir, "conf");
assertTrue("Failed to make subdirectory ", subHome.mkdirs());
// Be sure we pick up sysvars when we create this
String srcDir = SolrTestCaseJ4.TEST_HOME() + "/collection1/conf";
FileUtils.copyFile(new File(srcDir, "schema-tiny.xml"), new File(subHome, "schema_ren.xml"));
FileUtils.copyFile(new File(srcDir, "solrconfig-minimal.xml"), new File(subHome, "solrconfig_ren.xml"));
FileUtils.copyFile(new File(srcDir, "solrconfig.snippet.randomindexconfig.xml"), new File(subHome, "solrconfig.snippet.randomindexconfig.xml"));
final CoreContainer cores = h.getCoreContainer();
final CoreAdminHandler admin = new CoreAdminHandler(cores);
// create a new core (using CoreAdminHandler) w/ properties
System.setProperty("INSTDIR_TEST", instDir.getAbsolutePath());
System.setProperty("CONFIG_TEST", "solrconfig_ren.xml");
System.setProperty("SCHEMA_TEST", "schema_ren.xml");
File dataDir = new File(workDir.getAbsolutePath(), "data_diff");
System.setProperty("DATA_TEST", dataDir.getAbsolutePath());
SolrQueryResponse resp = new SolrQueryResponse();
admin.handleRequestBody(req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString(), CoreAdminParams.NAME, getCoreName(), CoreAdminParams.INSTANCE_DIR, "${INSTDIR_TEST}", CoreAdminParams.CONFIG, "${CONFIG_TEST}", CoreAdminParams.SCHEMA, "${SCHEMA_TEST}", CoreAdminParams.DATA_DIR, "${DATA_TEST}"), resp);
assertNull("Exception on create", resp.getException());
// Now assert that certain values are properly dereferenced in the process of creating the core, see
// SOLR-4982.
// Should NOT be a datadir named ${DATA_TEST} (literal). This is the bug after all
File badDir = new File(instDir, "${DATA_TEST}");
assertFalse("Should have substituted the sys var, found file " + badDir.getAbsolutePath(), badDir.exists());
// For the other 3 vars, we couldn't get past creating the core fi dereferencing didn't work correctly.
// Should have segments in the directory pointed to by the ${DATA_TEST}.
File test = new File(dataDir, "index");
assertTrue("Should have found index dir at " + test.getAbsolutePath(), test.exists());
}
use of org.apache.solr.core.CoreContainer in project lucene-solr by apache.
the class CoreAdminRequestStatusTest method testCoreAdminRequestStatus.
@Test
public void testCoreAdminRequestStatus() throws Exception {
final File workDir = createTempDir().toFile();
final CoreContainer cores = h.getCoreContainer();
final CoreAdminHandler admin = new CoreAdminHandler(cores);
Path instDir;
try (SolrCore template = cores.getCore("collection1")) {
assertNotNull(template);
instDir = template.getCoreDescriptor().getInstanceDir();
}
assertTrue("instDir doesn't exist: " + instDir, Files.exists(instDir));
final File instPropFile = new File(workDir, "instProp");
FileUtils.copyDirectory(instDir.toFile(), instPropFile);
// create a new core (using CoreAdminHandler) w/ properties
SolrQueryResponse resp = new SolrQueryResponse();
admin.handleRequestBody(req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString(), CoreAdminParams.INSTANCE_DIR, instPropFile.getAbsolutePath(), CoreAdminParams.NAME, "dummycore", CommonAdminParams.ASYNC, "42"), resp);
assertNull("Exception on create", resp.getException());
int maxRetries = 10;
while (maxRetries-- > 0) {
resp = new SolrQueryResponse();
admin.handleRequestBody(req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTSTATUS.toString(), CoreAdminParams.REQUESTID, "42"), resp);
if (resp.getValues().get("STATUS") != null && resp.getValues().get("STATUS").equals("completed"))
break;
Thread.sleep(1000);
}
assertEquals("The status of request was expected to be completed", "completed", resp.getValues().get("STATUS"));
resp = new SolrQueryResponse();
admin.handleRequestBody(req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTSTATUS.toString(), CoreAdminParams.REQUESTID, "9999999"), resp);
assertEquals("Was expecting it to be invalid but found a task with the id.", "notfound", resp.getValues().get("STATUS"));
admin.shutdown();
}
use of org.apache.solr.core.CoreContainer in project lucene-solr by apache.
the class InfoHandlerTest method testCoreAdminHandler.
@Test
public void testCoreAdminHandler() throws Exception {
final CoreContainer cores = h.getCoreContainer();
InfoHandler infoHandler = cores.getInfoHandler();
SolrQueryResponse rsp = handleRequest(infoHandler, "properties");
assertNotNull(rsp.getValues().get("system.properties"));
rsp = handleRequest(infoHandler, "threads");
assertNotNull(rsp.getValues().get("system"));
rsp = handleRequest(infoHandler, "logging");
assertNotNull(rsp.getValues().get("watcher"));
try {
rsp = handleRequest(infoHandler, "info");
fail("Should have failed with not found");
} catch (SolrException e) {
assertEquals(404, e.code());
}
try {
rsp = handleRequest(infoHandler, "");
fail("Should have failed with not found");
} catch (SolrException e) {
assertEquals(404, e.code());
}
}
use of org.apache.solr.core.CoreContainer in project lucene-solr by apache.
the class VersionInfoTest method testMaxVersionLogic.
protected void testMaxVersionLogic(SolrQueryRequest req) throws Exception {
UpdateHandler uhandler = req.getCore().getUpdateHandler();
UpdateLog ulog = uhandler.getUpdateLog();
ulog.init(uhandler, req.getCore());
clearIndex();
assertU(commit());
// index the first doc
String docId = Integer.toString(1);
assertU(adoc("id", docId));
assertU(commit());
// max from index should not be 0 or null
Long maxVersionFromIndex = ulog.getMaxVersionFromIndex();
assertNotNull(maxVersionFromIndex);
assertTrue(maxVersionFromIndex != 0L);
// version from index should be less than or equal the version of the first doc indexed
VersionInfo vInfo = ulog.getVersionInfo();
Long version = vInfo.getVersionFromIndex(new BytesRef(docId));
assertNotNull("version info should not be null for test doc: " + docId, version);
assertTrue("max version from index should be less than or equal to the version of first doc added, diff: " + (version - maxVersionFromIndex), maxVersionFromIndex <= version);
BytesRef idBytes = new BytesRef(docId);
int bucketHash = Hash.murmurhash3_x86_32(idBytes.bytes, idBytes.offset, idBytes.length, 0);
VersionBucket bucket = vInfo.bucket(bucketHash);
assertTrue(bucket.highest == version.longValue());
// send 2nd doc ...
docId = Integer.toString(2);
assertU(adoc("id", docId));
assertU(commit());
maxVersionFromIndex = ulog.getMaxVersionFromIndex();
assertNotNull(maxVersionFromIndex);
assertTrue(maxVersionFromIndex != 0L);
vInfo = ulog.getVersionInfo();
version = vInfo.getVersionFromIndex(new BytesRef(docId));
assertNotNull("version info should not be null for test doc: " + docId, version);
assertTrue("max version from index should be less than version of last doc added, diff: " + (version - maxVersionFromIndex), maxVersionFromIndex < version);
idBytes = new BytesRef(docId);
bucketHash = Hash.murmurhash3_x86_32(idBytes.bytes, idBytes.offset, idBytes.length, 0);
bucket = vInfo.bucket(bucketHash);
assertTrue(bucket.highest == version.longValue());
Long versionFromTLog = ulog.lookupVersion(idBytes);
Long versionFromIndex = vInfo.getVersionFromIndex(idBytes);
assertEquals("version from tlog and version from index should be the same", versionFromTLog, versionFromIndex);
// reload the core, which should reset the max
CoreContainer coreContainer = req.getCore().getCoreContainer();
coreContainer.reload(req.getCore().getName());
maxVersionFromIndex = ulog.getMaxVersionFromIndex();
assertEquals("max version from index should be equal to version of last doc added after reload", maxVersionFromIndex, version);
// one more doc after reload
docId = Integer.toString(3);
assertU(adoc("id", docId));
assertU(commit());
maxVersionFromIndex = ulog.getMaxVersionFromIndex();
assertNotNull(maxVersionFromIndex);
assertTrue(maxVersionFromIndex != 0L);
vInfo = ulog.getVersionInfo();
version = vInfo.getVersionFromIndex(new BytesRef(docId));
assertNotNull("version info should not be null for test doc: " + docId, version);
assertTrue("max version from index should be less than version of last doc added, diff: " + (version - maxVersionFromIndex), maxVersionFromIndex < version);
idBytes = new BytesRef(docId);
bucketHash = Hash.murmurhash3_x86_32(idBytes.bytes, idBytes.offset, idBytes.length, 0);
bucket = vInfo.bucket(bucketHash);
assertTrue(bucket.highest == version.longValue());
}
use of org.apache.solr.core.CoreContainer in project lucene-solr by apache.
the class TestRandomRequestDistribution method testQueryAgainstDownReplica.
/**
* Asserts that requests against a collection are only served by a 'active' local replica
*/
private void testQueryAgainstDownReplica() throws Exception {
log.info("Creating collection 'football' with 1 shard and 2 replicas");
CollectionAdminRequest.createCollection("football", 1, 2).setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1)).process(cloudClient);
waitForRecoveriesToFinish("football", true);
cloudClient.getZkStateReader().forceUpdateCollection("football");
Replica leader = null;
Replica notLeader = null;
Collection<Replica> replicas = cloudClient.getZkStateReader().getClusterState().getSlice("football", "shard1").getReplicas();
for (Replica replica : replicas) {
if (replica.getStr(ZkStateReader.LEADER_PROP) != null) {
leader = replica;
} else {
notLeader = replica;
}
}
//Simulate a replica being in down state.
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, notLeader.getStr(ZkStateReader.BASE_URL_PROP), ZkStateReader.NODE_NAME_PROP, notLeader.getStr(ZkStateReader.NODE_NAME_PROP), ZkStateReader.COLLECTION_PROP, "football", ZkStateReader.SHARD_ID_PROP, "shard1", ZkStateReader.CORE_NAME_PROP, notLeader.getStr(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
log.info("Forcing {} to go into 'down' state", notLeader.getStr(ZkStateReader.CORE_NAME_PROP));
DistributedQueue q = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
q.offer(Utils.toJSON(m));
verifyReplicaStatus(cloudClient.getZkStateReader(), "football", "shard1", notLeader.getName(), Replica.State.DOWN);
//Query against the node which hosts the down replica
String baseUrl = notLeader.getStr(ZkStateReader.BASE_URL_PROP);
if (!baseUrl.endsWith("/"))
baseUrl += "/";
String path = baseUrl + "football";
log.info("Firing queries against path=" + path);
try (HttpSolrClient client = getHttpSolrClient(path)) {
client.setSoTimeout(5000);
client.setConnectionTimeout(2000);
SolrCore leaderCore = null;
for (JettySolrRunner jetty : jettys) {
CoreContainer container = jetty.getCoreContainer();
for (SolrCore core : container.getCores()) {
if (core.getName().equals(leader.getStr(ZkStateReader.CORE_NAME_PROP))) {
leaderCore = core;
break;
}
}
}
assertNotNull(leaderCore);
SolrMetricManager leaderMetricManager = leaderCore.getCoreContainer().getMetricManager();
String leaderRegistry = leaderCore.getCoreMetricManager().getRegistryName();
Counter cnt = leaderMetricManager.counter(null, leaderRegistry, "requests", "QUERY.standard");
// All queries should be served by the active replica
// To make sure that's true we keep querying the down replica
// If queries are getting processed by the down replica then the cluster state hasn't updated for that replica
// locally
// So we keep trying till it has updated and then verify if ALL queries go to the active replica
long count = 0;
while (true) {
count++;
client.query(new SolrQuery("*:*"));
long c = cnt.getCount();
if (c == 1) {
// cluster state has got update locally
break;
} else {
Thread.sleep(100);
}
if (count > 10000) {
fail("After 10k queries we still see all requests being processed by the down replica");
}
}
// Now we fire a few additional queries and make sure ALL of them
// are served by the active replica
int moreQueries = TestUtil.nextInt(random(), 4, 10);
// Since 1 query has already hit the leader
count = 1;
for (int i = 0; i < moreQueries; i++) {
client.query(new SolrQuery("*:*"));
count++;
long c = cnt.getCount();
assertEquals("Query wasn't served by leader", count, c);
}
}
}
Aggregations