use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestTlogReplica method testOutOfOrderDBQWithInPlaceUpdates.
public void testOutOfOrderDBQWithInPlaceUpdates() throws Exception {
createAndWaitForCollection(1, 0, 2, 0);
assertFalse(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").indexed());
assertFalse(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").stored());
assertTrue(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").hasDocValues());
List<UpdateRequest> updates = new ArrayList<>();
// full update
updates.add(simulatedUpdateRequest(null, "id", 1, "title_s", "title0_new", "inplace_updatable_int", 5, "_version_", 1L));
updates.add(simulatedDBQ("inplace_updatable_int:5", 3L));
updates.add(simulatedUpdateRequest(1L, "id", 1, "inplace_updatable_int", 6, "_version_", 2L));
for (JettySolrRunner solrRunner : getSolrRunner(false)) {
try (SolrClient client = solrRunner.newClient()) {
for (UpdateRequest up : updates) {
up.process(client, collectionName);
}
}
}
JettySolrRunner oldLeaderJetty = getSolrRunner(true).get(0);
ChaosMonkey.kill(oldLeaderJetty);
waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
ChaosMonkey.start(oldLeaderJetty);
waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
checkRTG(1, 1, cluster.getJettySolrRunners());
SolrDocument doc = cluster.getSolrClient().getById(collectionName, "1");
assertNotNull(doc.get("title_s"));
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestSolrCloudWithKerberosAlt method testCollectionCreateSearchDelete.
protected void testCollectionCreateSearchDelete() throws Exception {
String collectionName = "testkerberoscollection";
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), JettyConfig.builder().setContext("/solr").build());
CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
cloudSolrClient.setDefaultCollection(collectionName);
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// create collection
String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR);
Properties properties = new Properties();
properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
properties.put("solr.tests.maxBufferedDocs", "100000");
properties.put("solr.tests.ramBufferSizeMB", "100");
// use non-test classes so RandomizedRunner isn't necessary
properties.put(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
properties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
properties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
createRequest.setProperties(properties);
createRequest.process(cloudSolrClient);
try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
zkStateReader.createClusterStateWatchersAndUpdate();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify/query collection
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
cloudSolrClient.add(doc);
cloudSolrClient.commit();
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(1, rsp.getResults().getNumFound());
// delete the collection we created earlier
CollectionAdminRequest.deleteCollection(collectionName).process(cloudSolrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
}
} finally {
cloudSolrClient.close();
miniCluster.shutdown();
}
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestSolrCloudWithSecureImpersonation method testForwarding.
@Test
public void testForwarding() throws Exception {
String collectionName = "forwardingCollection";
miniCluster.uploadConfigSet(TEST_PATH().resolve("collection1/conf"), "conf1");
create1ShardCollection(collectionName, "conf1", miniCluster);
// try a command to each node, one of them must be forwarded
for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
HttpSolrClient client = new HttpSolrClient.Builder(jetty.getBaseUrl().toString() + "/" + collectionName).build();
try {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
params.set(USER_PARAM, "user");
client.query(params);
} finally {
client.close();
}
}
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestSolrCloudWithDelegationTokens method startup.
@BeforeClass
public static void startup() throws Exception {
System.setProperty("authenticationPlugin", HttpParamDelegationTokenPlugin.class.getName());
System.setProperty(KerberosPlugin.DELEGATION_TOKEN_ENABLED, "true");
System.setProperty("solr.kerberos.cookie.domain", "127.0.0.1");
miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), buildJettyConfig("/solr"));
JettySolrRunner runnerPrimary = miniCluster.getJettySolrRunners().get(0);
solrClientPrimary = new HttpSolrClient.Builder(runnerPrimary.getBaseUrl().toString()).build();
JettySolrRunner runnerSecondary = miniCluster.getJettySolrRunners().get(1);
solrClientSecondary = new HttpSolrClient.Builder(runnerSecondary.getBaseUrl().toString()).build();
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestPullReplicaErrorHandling method testPullReplicaDisconnectsFromZooKeeper.
public void testPullReplicaDisconnectsFromZooKeeper() throws Exception {
int numShards = 1;
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, 1, 0, 1).setMaxShardsPerNode(1).process(cluster.getSolrClient());
addDocs(10);
DocCollection docCollection = assertNumberOfReplicas(numShards, 0, numShards, false, true);
Slice s = docCollection.getSlices().iterator().next();
try (HttpSolrClient pullReplicaClient = getHttpSolrClient(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
assertNumDocs(10, pullReplicaClient);
}
addDocs(20);
JettySolrRunner jetty = getJettyForReplica(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0));
cluster.expireZkSession(jetty);
addDocs(30);
waitForState("Expecting node to be disconnected", collectionName, activeReplicaCount(1, 0, 0));
addDocs(40);
waitForState("Expecting node to be disconnected", collectionName, activeReplicaCount(1, 0, 1));
try (HttpSolrClient pullReplicaClient = getHttpSolrClient(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
assertNumDocs(40, pullReplicaClient);
}
}
Aggregations