use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class SegmentTerminateEarlyTestState method queryTimestampAscendingSegmentTerminateEarlyYes.
void queryTimestampAscendingSegmentTerminateEarlyYes(CloudSolrClient cloudSolrClient) throws Exception {
TestMiniSolrCloudCluster.assertFalse(minTimestampDocKeys.isEmpty());
TestMiniSolrCloudCluster.assertTrue("numDocs=" + numDocs + " is not even", (numDocs % 2) == 0);
final Long oddFieldValue = new Long(minTimestampDocKeys.iterator().next().intValue() % 2);
final SolrQuery query = new SolrQuery(ODD_FIELD + ":" + oddFieldValue);
// a sort order that is _not_ compatible with the merge sort order
query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.asc);
query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
query.setRows(1);
query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
final QueryResponse rsp = cloudSolrClient.query(query);
// check correctness of the results count
TestMiniSolrCloudCluster.assertEquals("numFound", numDocs / 2, rsp.getResults().getNumFound());
// check correctness of the first result
if (rsp.getResults().getNumFound() > 0) {
final SolrDocument solrDocument0 = rsp.getResults().get(0);
TestMiniSolrCloudCluster.assertTrue(KEY_FIELD + " of (" + solrDocument0 + ") is not in minTimestampDocKeys(" + minTimestampDocKeys + ")", minTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
}
// check segmentTerminatedEarly flag
TestMiniSolrCloudCluster.assertNotNull("responseHeader.segmentTerminatedEarly missing in " + rsp.getResponseHeader(), rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
// segmentTerminateEarly cannot be used with incompatible sort orders
TestMiniSolrCloudCluster.assertTrue("responseHeader.segmentTerminatedEarly missing/true in " + rsp.getResponseHeader(), Boolean.FALSE.equals(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY)));
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class SegmentTerminateEarlyTestState method queryTimestampDescending.
void queryTimestampDescending(CloudSolrClient cloudSolrClient) throws Exception {
TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
TestMiniSolrCloudCluster.assertTrue("numDocs=" + numDocs + " is not even", (numDocs % 2) == 0);
final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue() % 2);
final SolrQuery query = new SolrQuery(ODD_FIELD + ":" + oddFieldValue);
query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
query.setRows(1);
// CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent
final QueryResponse rsp = cloudSolrClient.query(query);
// check correctness of the results count
TestMiniSolrCloudCluster.assertEquals("numFound", numDocs / 2, rsp.getResults().getNumFound());
// check correctness of the first result
if (rsp.getResults().getNumFound() > 0) {
final SolrDocument solrDocument0 = rsp.getResults().get(0);
TestMiniSolrCloudCluster.assertTrue(KEY_FIELD + " of (" + solrDocument0 + ") is not in maxTimestampDocKeys(" + maxTimestampDocKeys + ")", maxTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
}
// check segmentTerminatedEarly flag
TestMiniSolrCloudCluster.assertNull("responseHeader.segmentTerminatedEarly present in " + rsp.getResponseHeader(), rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class HdfsWriteToMultipleCollectionsTest method test.
@Test
public void test() throws Exception {
int docCount = random().nextInt(1313) + 1;
int cnt = random().nextInt(4) + 1;
for (int i = 0; i < cnt; i++) {
createCollection(ACOLLECTION + i, 2, 2, 9);
}
for (int i = 0; i < cnt; i++) {
waitForRecoveriesToFinish(ACOLLECTION + i, false);
}
List<CloudSolrClient> cloudClients = new ArrayList<>();
List<StoppableIndexingThread> threads = new ArrayList<>();
for (int i = 0; i < cnt; i++) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress());
client.setDefaultCollection(ACOLLECTION + i);
cloudClients.add(client);
StoppableIndexingThread indexThread = new StoppableIndexingThread(null, client, "1", true, docCount, 1, true);
threads.add(indexThread);
indexThread.start();
}
int addCnt = 0;
for (StoppableIndexingThread thread : threads) {
thread.join();
addCnt += thread.getNumAdds() - thread.getNumDeletes();
}
long collectionsCount = 0;
for (CloudSolrClient client : cloudClients) {
client.commit();
collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
}
IOUtils.close(cloudClients);
assertEquals(addCnt, collectionsCount);
BlockCache lastBlockCache = null;
// assert that we are using the block directory and that write and read caching are being used
for (JettySolrRunner jetty : jettys) {
CoreContainer cores = jetty.getCoreContainer();
Collection<SolrCore> solrCores = cores.getCores();
for (SolrCore core : solrCores) {
if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().startsWith(ACOLLECTION)) {
DirectoryFactory factory = core.getDirectoryFactory();
assertTrue("Found: " + core.getDirectoryFactory().getClass().getName(), factory instanceof HdfsDirectoryFactory);
Directory dir = factory.get(core.getDataDir(), null, null);
try {
long dataDirSize = factory.size(dir);
FileSystem fileSystem = null;
fileSystem = FileSystem.newInstance(new Path(core.getDataDir()).toUri(), new Configuration());
long size = fileSystem.getContentSummary(new Path(core.getDataDir())).getLength();
assertEquals(size, dataDirSize);
} finally {
core.getDirectoryFactory().release(dir);
}
RefCounted<IndexWriter> iwRef = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
try {
IndexWriter iw = iwRef.get();
NRTCachingDirectory directory = (NRTCachingDirectory) iw.getDirectory();
BlockDirectory blockDirectory = (BlockDirectory) directory.getDelegate();
assertTrue(blockDirectory.isBlockCacheReadEnabled());
// see SOLR-6424
assertFalse(blockDirectory.isBlockCacheWriteEnabled());
Cache cache = blockDirectory.getCache();
// we know it's a BlockDirectoryCache, but future proof
assertTrue(cache instanceof BlockDirectoryCache);
BlockCache blockCache = ((BlockDirectoryCache) cache).getBlockCache();
if (lastBlockCache != null) {
if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
assertEquals(lastBlockCache, blockCache);
} else {
assertNotSame(lastBlockCache, blockCache);
}
}
lastBlockCache = blockCache;
} finally {
iwRef.decref();
}
}
}
}
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class TestSolrCloudWithHadoopAuthPlugin method testCollectionCreateSearchDelete.
protected void testCollectionCreateSearchDelete() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "testkerberoscollection";
// create collection
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, REPLICATION_FACTOR);
create.process(solrClient);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
solrClient.add(collectionName, doc);
solrClient.commit(collectionName);
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = solrClient.query(collectionName, query);
assertEquals(1, rsp.getResults().getNumFound());
CollectionAdminRequest.Delete deleteReq = CollectionAdminRequest.deleteCollection(collectionName);
deleteReq.process(solrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, solrClient.getZkStateReader(), true, true, 330);
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method assertDocCounts.
protected void assertDocCounts(boolean verbose) throws Exception {
// and node/client to shard?
if (verbose)
System.err.println("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
long controlCount = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
// do some really inefficient mapping...
Map<String, Slice> slices = null;
ClusterState clusterState;
try (ZkStateReader zk = new ZkStateReader(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT)) {
zk.createClusterStateWatchersAndUpdate();
clusterState = zk.getClusterState();
slices = clusterState.getSlicesMap(DEFAULT_COLLECTION);
}
if (slices == null) {
throw new RuntimeException("Could not find collection " + DEFAULT_COLLECTION + " in " + clusterState.getCollectionsMap().keySet());
}
for (CloudJettyRunner cjetty : cloudJettys) {
CloudSolrServerClient client = cjetty.client;
for (Map.Entry<String, Slice> slice : slices.entrySet()) {
Map<String, Replica> theShards = slice.getValue().getReplicasMap();
for (Map.Entry<String, Replica> shard : theShards.entrySet()) {
String shardName = new URI(((HttpSolrClient) client.solrClient).getBaseURL()).getPort() + "_solr_";
if (verbose && shard.getKey().endsWith(shardName)) {
System.err.println("shard:" + slice.getKey());
System.err.println(shard.getValue());
}
}
}
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
long count = 0;
final Replica.State currentState = Replica.State.getState(cjetty.info.getStr(ZkStateReader.STATE_PROP));
if (currentState == Replica.State.ACTIVE && zkStateReader.getClusterState().liveNodesContain(cjetty.info.getStr(ZkStateReader.NODE_NAME_PROP))) {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
count = client.solrClient.query(query).getResults().getNumFound();
}
if (verbose)
System.err.println("client docs:" + count + "\n\n");
}
if (verbose)
System.err.println("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
SolrQuery query = new SolrQuery("*:*");
assertEquals("Doc Counts do not add up", controlCount, cloudClient.query(query).getResults().getNumFound());
}
Aggregations