use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class OverseerTest method testReplay.
@Test
public void testReplay() throws Exception {
String zkDir = createTempDir().toFile().getAbsolutePath() + File.separator + "zookeeper/server1/data";
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
SolrZkClient overseerClient = null;
ZkStateReader reader = null;
try {
server.run();
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
ZkController.createClusterZkNodes(zkClient);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
//prepopulate work queue with some items to emulate previous overseer died before persisting state
DistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Overseer.Stats());
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
queue.offer(Utils.toJSON(m));
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core2", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
queue.offer(Utils.toJSON(m));
overseerClient = electNewOverseer(server.getZkAddress());
//submit to proper queue
queue = Overseer.getStateUpdateQueue(zkClient);
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core3", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
queue.offer(Utils.toJSON(m));
for (int i = 0; i < 100; i++) {
Slice s = reader.getClusterState().getSlice(COLLECTION, "s1");
if (s != null && s.getReplicasMap().size() == 3)
break;
Thread.sleep(100);
}
assertNotNull(reader.getClusterState().getSlice(COLLECTION, "s1"));
assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "s1").getReplicasMap().size());
} finally {
close(overseerClient);
close(zkClient);
close(reader);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class SSLMigrationTest method getReplicas.
private List<Replica> getReplicas() {
List<Replica> replicas = new ArrayList<Replica>();
DocCollection collection = this.cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
for (Slice slice : collection.getSlices()) {
replicas.addAll(slice.getReplicas());
}
return replicas;
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.
@Test
public void moveReplicaTest() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
for (int i = 0; i < 10; i++) {
cloudClient.add(coll, sdoc("id", String.valueOf(i)));
cloudClient.commit(coll);
}
List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
Collections.shuffle(slices, random());
Slice slice = null;
Replica replica = null;
for (Slice s : slices) {
slice = s;
for (Replica r : s.getReplicas()) {
if (s.getLeader() != r) {
replica = r;
}
}
}
String dataDir = getDataDir(replica);
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String targetNode = null;
for (String node : liveNodes) {
if (!replica.getNodeName().equals(node)) {
targetNode = node;
break;
}
}
assertNotNull(targetNode);
CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
boolean found = false;
for (Replica newReplica : slice.getReplicas()) {
if (getDataDir(newReplica).equals(dataDir)) {
found = true;
}
}
assertTrue(found);
// data dir is reused so replication will be skipped
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
if (counter != null) {
assertEquals(0, counter.getCount());
}
}
}
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class TestHdfsBackupRestoreCore method test.
@Test
public void test() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "HdfsBackupRestore";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
boolean testViaReplicationHandler = random().nextBoolean();
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
// Create a backup.
if (testViaReplicationHandler) {
log.info("Running Backup via replication handler");
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
while (!checkBackupStatus.success) {
checkBackupStatus.fetchStatus();
Thread.sleep(1000);
}
} else {
log.info("Running Backup via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
}
int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempts = 0; attempts < numRestoreTests; attempts++) {
//Modify existing index before we call restore.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery(collectionName, "id:" + i);
}
masterClient.commit(collectionName);
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(collectionName, doc);
}
//Purposely not calling commit once in a while. There can be some docs which are not committed
if (usually()) {
masterClient.commit(collectionName);
}
}
// Snapshooter prefixes "snapshot." to the backup name.
if (testViaReplicationHandler) {
log.info("Running Restore via replication handler");
// Snapshooter prefixes "snapshot." to the backup name.
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
Thread.sleep(1000);
}
} else {
log.info("Running Restore via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", "snapshot." + backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
}
//See if restore was successful by checking if all the docs are present again
BackupRestoreUtils.verifyDocs(nDocs, masterClient, coreName);
// Verify the permissions for the backup folder.
FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path("/backup/snapshot." + backupName));
FsPermission perm = status.getPermission();
assertEquals(FsAction.ALL, perm.getUserAction());
assertEquals(FsAction.ALL, perm.getGroupAction());
assertEquals(FsAction.ALL, perm.getOtherAction());
}
}
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class TestConfigReload method checkConfReload.
private void checkConfReload(SolrZkClient client, String resPath, String name, String uri) throws Exception {
Stat stat = new Stat();
byte[] data = null;
try {
data = client.getData(resPath, null, stat, true);
} catch (KeeperException.NoNodeException e) {
data = "{}".getBytes(StandardCharsets.UTF_8);
log.info("creating_node {}", resPath);
client.create(resPath, data, CreateMode.PERSISTENT, true);
}
long startTime = System.nanoTime();
Stat newStat = client.setData(resPath, data, true);
client.setData("/configs/conf1", new byte[] { 1 }, true);
assertTrue(newStat.getVersion() > stat.getVersion());
log.info("new_version " + newStat.getVersion());
Integer newVersion = newStat.getVersion();
long maxTimeoutSeconds = 20;
DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) urls.add("" + replica.get(ZkStateReader.BASE_URL_PROP) + "/" + replica.get(ZkStateReader.CORE_NAME_PROP));
}
HashSet<String> succeeded = new HashSet<>();
while (TimeUnit.SECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) < maxTimeoutSeconds) {
Thread.sleep(50);
for (String url : urls) {
Map respMap = getAsMap(url + uri + "?wt=json");
if (String.valueOf(newVersion).equals(String.valueOf(getObjectByPath(respMap, true, asList(name, "znodeVersion"))))) {
succeeded.add(url);
}
}
if (succeeded.size() == urls.size())
break;
succeeded.clear();
}
assertEquals(StrUtils.formatString("tried these servers {0} succeeded only in {1} ", urls, succeeded), urls.size(), succeeded.size());
}
Aggregations