use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class FieldProps method testGroupingSorting.
// We should be able to sort thing with missing first/last and that are _NOT_ present at all on one server.
@Test
public void testGroupingSorting() throws IOException, SolrServerException {
CloudSolrClient client = cluster.getSolrClient();
// The point of these is to have at least one shard w/o the value.
// While getting values for each of these fields starts _out_ random, each successive
// _value_ increases.
List<SolrInputDocument> docs = new ArrayList<>(3);
docs.add(makeGSDoc(2, fieldsToTestGroupSortFirst, fieldsToTestGroupSortLast));
docs.add(makeGSDoc(1, fieldsToTestGroupSortFirst, fieldsToTestGroupSortLast));
docs.add(makeGSDoc(3, fieldsToTestGroupSortFirst, fieldsToTestGroupSortLast));
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", 4);
docs.add(doc);
new UpdateRequest().add(docs).commit(client, COLLECTION);
checkSortOrder(client, fieldsToTestGroupSortFirst, "asc", new String[] { "4", "2", "1", "3" }, new String[] { "4", "1", "2", "3" });
checkSortOrder(client, fieldsToTestGroupSortFirst, "desc", new String[] { "3", "1", "2", "4" }, new String[] { "2", "3", "1", "4" });
checkSortOrder(client, fieldsToTestGroupSortLast, "asc", new String[] { "4", "2", "1", "3" }, new String[] { "4", "1", "2", "3" });
checkSortOrder(client, fieldsToTestGroupSortLast, "desc", new String[] { "3", "1", "2", "4" }, new String[] { "2", "3", "1", "4" });
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.
@Test
public void moveReplicaTest() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
for (int i = 0; i < 10; i++) {
cloudClient.add(coll, sdoc("id", String.valueOf(i)));
cloudClient.commit(coll);
}
List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
Collections.shuffle(slices, random());
Slice slice = null;
Replica replica = null;
for (Slice s : slices) {
slice = s;
for (Replica r : s.getReplicas()) {
if (s.getLeader() != r) {
replica = r;
}
}
}
String dataDir = getDataDir(replica);
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String targetNode = null;
for (String node : liveNodes) {
if (!replica.getNodeName().equals(node)) {
targetNode = node;
break;
}
}
assertNotNull(targetNode);
CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
boolean found = false;
for (Replica newReplica : slice.getReplicas()) {
if (getDataDir(newReplica).equals(dataDir)) {
found = true;
}
}
assertTrue(found);
// data dir is reused so replication will be skipped
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
if (counter != null) {
assertEquals(0, counter.getCount());
}
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class HdfsWriteToMultipleCollectionsTest method test.
@Test
public void test() throws Exception {
int docCount = random().nextInt(1313) + 1;
int cnt = random().nextInt(4) + 1;
for (int i = 0; i < cnt; i++) {
createCollection(ACOLLECTION + i, 2, 2, 9);
}
for (int i = 0; i < cnt; i++) {
waitForRecoveriesToFinish(ACOLLECTION + i, false);
}
List<CloudSolrClient> cloudClients = new ArrayList<>();
List<StoppableIndexingThread> threads = new ArrayList<>();
for (int i = 0; i < cnt; i++) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress());
client.setDefaultCollection(ACOLLECTION + i);
cloudClients.add(client);
StoppableIndexingThread indexThread = new StoppableIndexingThread(null, client, "1", true, docCount, 1, true);
threads.add(indexThread);
indexThread.start();
}
int addCnt = 0;
for (StoppableIndexingThread thread : threads) {
thread.join();
addCnt += thread.getNumAdds() - thread.getNumDeletes();
}
long collectionsCount = 0;
for (CloudSolrClient client : cloudClients) {
client.commit();
collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
}
IOUtils.close(cloudClients);
assertEquals(addCnt, collectionsCount);
BlockCache lastBlockCache = null;
// assert that we are using the block directory and that write and read caching are being used
for (JettySolrRunner jetty : jettys) {
CoreContainer cores = jetty.getCoreContainer();
Collection<SolrCore> solrCores = cores.getCores();
for (SolrCore core : solrCores) {
if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().startsWith(ACOLLECTION)) {
DirectoryFactory factory = core.getDirectoryFactory();
assertTrue("Found: " + core.getDirectoryFactory().getClass().getName(), factory instanceof HdfsDirectoryFactory);
Directory dir = factory.get(core.getDataDir(), null, null);
try {
long dataDirSize = factory.size(dir);
FileSystem fileSystem = null;
fileSystem = FileSystem.newInstance(new Path(core.getDataDir()).toUri(), new Configuration());
long size = fileSystem.getContentSummary(new Path(core.getDataDir())).getLength();
assertEquals(size, dataDirSize);
} finally {
core.getDirectoryFactory().release(dir);
}
RefCounted<IndexWriter> iwRef = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
try {
IndexWriter iw = iwRef.get();
NRTCachingDirectory directory = (NRTCachingDirectory) iw.getDirectory();
BlockDirectory blockDirectory = (BlockDirectory) directory.getDelegate();
assertTrue(blockDirectory.isBlockCacheReadEnabled());
// see SOLR-6424
assertFalse(blockDirectory.isBlockCacheWriteEnabled());
Cache cache = blockDirectory.getCache();
// we know it's a BlockDirectoryCache, but future proof
assertTrue(cache instanceof BlockDirectoryCache);
BlockCache blockCache = ((BlockDirectoryCache) cache).getBlockCache();
if (lastBlockCache != null) {
if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
assertEquals(lastBlockCache, blockCache);
} else {
assertNotSame(lastBlockCache, blockCache);
}
}
lastBlockCache = blockCache;
} finally {
iwRef.decref();
}
}
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class BlobRepositoryCloudTest method setupCluster.
@BeforeClass
public static void setupCluster() throws Exception {
// only sharing *within* a node
configureCluster(1).addConfig("configname", TEST_PATH.resolve("resource-sharing")).configure();
// Thread.sleep(2000);
HashMap<String, String> params = new HashMap<>();
CollectionAdminRequest.createCollection(".system", null, 1, 1).process(cluster.getSolrClient());
// test component will fail if it cant' find a blob with this data by this name
TestBlobHandler.postData(cluster.getSolrClient(), findLiveNodeURI(), "testResource", ByteBuffer.wrap("foo,bar\nbaz,bam".getBytes(StandardCharsets.UTF_8)));
// Thread.sleep(2000);
// if these don't load we probably failed to post the blob above
CollectionAdminRequest.createCollection("col1", "configname", 1, 1).process(cluster.getSolrClient());
CollectionAdminRequest.createCollection("col2", "configname", 1, 1).process(cluster.getSolrClient());
SolrInputDocument document = new SolrInputDocument();
document.addField("id", "1");
document.addField("text", "col1");
CloudSolrClient solrClient = cluster.getSolrClient();
solrClient.add("col1", document);
solrClient.commit("col1");
document = new SolrInputDocument();
document.addField("id", "1");
document.addField("text", "col2");
solrClient.add("col2", document);
solrClient.commit("col2");
Thread.sleep(2000);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestHdfsBackupRestoreCore method test.
@Test
public void test() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "HdfsBackupRestore";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
boolean testViaReplicationHandler = random().nextBoolean();
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
// Create a backup.
if (testViaReplicationHandler) {
log.info("Running Backup via replication handler");
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
while (!checkBackupStatus.success) {
checkBackupStatus.fetchStatus();
Thread.sleep(1000);
}
} else {
log.info("Running Backup via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
}
int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempts = 0; attempts < numRestoreTests; attempts++) {
//Modify existing index before we call restore.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery(collectionName, "id:" + i);
}
masterClient.commit(collectionName);
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(collectionName, doc);
}
//Purposely not calling commit once in a while. There can be some docs which are not committed
if (usually()) {
masterClient.commit(collectionName);
}
}
// Snapshooter prefixes "snapshot." to the backup name.
if (testViaReplicationHandler) {
log.info("Running Restore via replication handler");
// Snapshooter prefixes "snapshot." to the backup name.
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
Thread.sleep(1000);
}
} else {
log.info("Running Restore via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", "snapshot." + backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
}
//See if restore was successful by checking if all the docs are present again
BackupRestoreUtils.verifyDocs(nDocs, masterClient, coreName);
// Verify the permissions for the backup folder.
FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path("/backup/snapshot." + backupName));
FsPermission perm = status.getPermission();
assertEquals(FsAction.ALL, perm.getUserAction());
assertEquals(FsAction.ALL, perm.getGroupAction());
assertEquals(FsAction.ALL, perm.getOtherAction());
}
}
}
Aggregations