use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class JdbcTest method testJDBCMethods.
private void testJDBCMethods(String collection, String connectionString, Properties properties, String sql) throws Exception {
try (Connection con = DriverManager.getConnection(connectionString, properties)) {
assertTrue(con.isValid(DEFAULT_CONNECTION_TIMEOUT));
assertEquals(zkHost, con.getCatalog());
con.setCatalog(zkHost);
assertEquals(zkHost, con.getCatalog());
assertEquals(null, con.getSchema());
con.setSchema("myschema");
assertEquals(null, con.getSchema());
DatabaseMetaData databaseMetaData = con.getMetaData();
assertNotNull(databaseMetaData);
assertEquals(con, databaseMetaData.getConnection());
assertEquals(connectionString, databaseMetaData.getURL());
assertEquals(4, databaseMetaData.getJDBCMajorVersion());
assertEquals(0, databaseMetaData.getJDBCMinorVersion());
assertEquals("Apache Solr", databaseMetaData.getDatabaseProductName());
// The following tests require package information that is not available when running via Maven
// assertEquals(this.getClass().getPackage().getSpecificationVersion(), databaseMetaData.getDatabaseProductVersion());
// assertEquals(0, databaseMetaData.getDatabaseMajorVersion());
// assertEquals(0, databaseMetaData.getDatabaseMinorVersion());
// assertEquals(this.getClass().getPackage().getSpecificationTitle(), databaseMetaData.getDriverName());
// assertEquals(this.getClass().getPackage().getSpecificationVersion(), databaseMetaData.getDriverVersion());
// assertEquals(0, databaseMetaData.getDriverMajorVersion());
// assertEquals(0, databaseMetaData.getDriverMinorVersion());
List<String> tableSchemas = new ArrayList<>(Arrays.asList(zkHost, "metadata"));
try (ResultSet rs = databaseMetaData.getSchemas()) {
assertTrue(rs.next());
assertTrue(tableSchemas.contains(rs.getString("tableSchem")));
tableSchemas.remove(rs.getString("tableSchem"));
assertNull(rs.getString("tableCat"));
assertTrue(rs.next());
assertTrue(tableSchemas.contains(rs.getString("tableSchem")));
tableSchemas.remove(rs.getString("tableSchem"));
assertNull(rs.getString("tableCat"));
assertFalse(rs.next());
assertTrue(tableSchemas.isEmpty());
}
try (ResultSet rs = databaseMetaData.getCatalogs()) {
assertTrue(rs.next());
assertNull(rs.getString("tableCat"));
assertFalse(rs.next());
}
CloudSolrClient solrClient = cluster.getSolrClient();
solrClient.connect();
ZkStateReader zkStateReader = solrClient.getZkStateReader();
SortedSet<String> tables = new TreeSet<>();
Set<String> collectionsSet = zkStateReader.getClusterState().getCollectionsMap().keySet();
tables.addAll(collectionsSet);
Aliases aliases = zkStateReader.getAliases();
if (aliases != null) {
Map<String, String> collectionAliasMap = aliases.getCollectionAliasMap();
if (collectionAliasMap != null) {
Set<String> aliasesSet = collectionAliasMap.keySet();
tables.addAll(aliasesSet);
}
}
try (ResultSet rs = databaseMetaData.getTables(null, zkHost, "%", null)) {
for (String table : tables) {
assertTrue(rs.next());
assertNull(rs.getString("tableCat"));
assertEquals(zkHost, rs.getString("tableSchem"));
assertEquals(table, rs.getString("tableName"));
assertEquals("TABLE", rs.getString("tableType"));
assertNull(rs.getString("remarks"));
}
assertFalse(rs.next());
}
assertTrue(con.isReadOnly());
con.setReadOnly(true);
assertTrue(con.isReadOnly());
assertNull(con.getWarnings());
con.clearWarnings();
assertNull(con.getWarnings());
try (Statement statement = con.createStatement()) {
checkStatement(con, statement);
try (ResultSet rs = statement.executeQuery(sql)) {
assertEquals(statement, rs.getStatement());
checkResultSetMetadata(rs);
checkResultSet(rs);
}
assertTrue(statement.execute(sql));
assertEquals(-1, statement.getUpdateCount());
try (ResultSet rs = statement.getResultSet()) {
assertEquals(statement, rs.getStatement());
checkResultSetMetadata(rs);
checkResultSet(rs);
}
assertFalse(statement.getMoreResults());
}
try (PreparedStatement statement = con.prepareStatement(sql)) {
checkStatement(con, statement);
try (ResultSet rs = statement.executeQuery()) {
assertEquals(statement, rs.getStatement());
checkResultSetMetadata(rs);
checkResultSet(rs);
}
assertTrue(statement.execute());
assertEquals(-1, statement.getUpdateCount());
try (ResultSet rs = statement.getResultSet()) {
assertEquals(statement, rs.getStatement());
checkResultSetMetadata(rs);
checkResultSet(rs);
}
assertFalse(statement.getMoreResults());
}
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class ForceLeaderTest method setReplicaState.
protected void setReplicaState(String collection, String slice, Replica replica, Replica.State state) throws SolrServerException, IOException, KeeperException, InterruptedException {
DistributedQueue inQueue = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
String baseUrl = zkStateReader.getBaseUrlForNodeName(replica.getNodeName());
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, baseUrl, ZkStateReader.NODE_NAME_PROP, replica.getNodeName(), ZkStateReader.SHARD_ID_PROP, slice, ZkStateReader.COLLECTION_PROP, collection, ZkStateReader.CORE_NAME_PROP, replica.getStr(CORE_NAME_PROP), ZkStateReader.CORE_NODE_NAME_PROP, replica.getName(), ZkStateReader.STATE_PROP, state.toString());
inQueue.offer(Utils.toJSON(m));
boolean transition = false;
Replica.State replicaState = null;
for (int counter = 10; counter > 0; counter--) {
ClusterState clusterState = zkStateReader.getClusterState();
replicaState = clusterState.getSlice(collection, slice).getReplica(replica.getName()).getState();
if (replicaState == state) {
transition = true;
break;
}
Thread.sleep(1000);
}
if (!transition) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not set replica [" + replica.getName() + "] as " + state + ". Last known state of the replica: " + replicaState);
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class OverseerTest method testShardLeaderChange.
@Test
public void testShardLeaderChange() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
final ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient controllerClient = null;
ZkStateReader reader = null;
MockZKController mockController = null;
MockZKController mockController2 = null;
OverseerRestarter killer = null;
Thread killerThread = null;
try {
server.run();
controllerClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
ZkController.createClusterZkNodes(controllerClient);
killer = new OverseerRestarter(server.getZkAddress());
killerThread = new Thread(killer);
killerThread.start();
reader = new ZkStateReader(controllerClient);
reader.createClusterStateWatchersAndUpdate();
for (int i = 0; i < atLeast(4); i++) {
//for each round allow 1 kill
killCounter.incrementAndGet();
mockController = new MockZKController(server.getZkAddress(), "node1");
mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE, 1);
if (mockController2 != null) {
mockController2.close();
mockController2 = null;
}
mockController.publishState(COLLECTION, "core1", "node1", Replica.State.RECOVERING, 1);
mockController2 = new MockZKController(server.getZkAddress(), "node2");
mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE, 1);
verifyShardLeader(reader, COLLECTION, "shard1", "core1");
mockController2.publishState(COLLECTION, "core4", "node2", Replica.State.ACTIVE, 1);
mockController.close();
mockController = null;
verifyShardLeader(reader, COLLECTION, "shard1", "core4");
}
} finally {
if (killer != null) {
killer.run = false;
if (killerThread != null) {
killerThread.join();
}
}
close(mockController);
close(mockController2);
close(controllerClient);
close(reader);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class OverseerTest method testShardAssignmentBigger.
@Test
public void testShardAssignmentBigger() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
//how many simulated nodes (num of threads)
final int nodeCount = random().nextInt(TEST_NIGHTLY ? 50 : 10) + (TEST_NIGHTLY ? 50 : 10) + 1;
//how many cores to register
final int coreCount = random().nextInt(TEST_NIGHTLY ? 100 : 11) + (TEST_NIGHTLY ? 100 : 11) + 1;
//how many slices
final int sliceCount = random().nextInt(TEST_NIGHTLY ? 20 : 5) + 1;
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
ZkStateReader reader = null;
SolrZkClient overseerClient = null;
final MockZKController[] controllers = new MockZKController[nodeCount];
final ExecutorService[] nodeExecutors = new ExecutorService[nodeCount];
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
overseerClient = electNewOverseer(server.getZkAddress());
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
for (int i = 0; i < nodeCount; i++) {
controllers[i] = new MockZKController(server.getZkAddress(), "node" + i);
}
for (int i = 0; i < nodeCount; i++) {
nodeExecutors[i] = ExecutorUtil.newMDCAwareFixedThreadPool(1, new DefaultSolrThreadFactory("testShardAssignment"));
}
final String[] ids = new String[coreCount];
//register total of coreCount cores
for (int i = 0; i < coreCount; i++) {
final int slot = i;
nodeExecutors[i % nodeCount].submit((Runnable) () -> {
final String coreName = "core" + slot;
try {
ids[slot] = controllers[slot % nodeCount].publishState(COLLECTION, coreName, "node" + slot, Replica.State.ACTIVE, sliceCount);
} catch (Throwable e) {
e.printStackTrace();
fail("register threw exception:" + e.getClass());
}
});
}
for (int i = 0; i < nodeCount; i++) {
nodeExecutors[i].shutdown();
}
for (int i = 0; i < nodeCount; i++) {
while (!nodeExecutors[i].awaitTermination(100, TimeUnit.MILLISECONDS)) ;
}
// make sure all cores have been assigned a id in cloudstate
int cloudStateSliceCount = 0;
for (int i = 0; i < 40; i++) {
cloudStateSliceCount = 0;
ClusterState state = reader.getClusterState();
final Map<String, Slice> slices = state.getSlicesMap(COLLECTION);
if (slices != null) {
for (String name : slices.keySet()) {
cloudStateSliceCount += slices.get(name).getReplicasMap().size();
}
if (coreCount == cloudStateSliceCount)
break;
}
Thread.sleep(200);
}
assertEquals("Unable to verify all cores have been assigned an id in cloudstate", coreCount, cloudStateSliceCount);
// make sure all cores have been returned an id
int assignedCount = 0;
for (int i = 0; i < 240; i++) {
assignedCount = 0;
for (int j = 0; j < coreCount; j++) {
if (ids[j] != null) {
assignedCount++;
}
}
if (coreCount == assignedCount) {
break;
}
Thread.sleep(1000);
}
assertEquals("Unable to verify all cores have been returned an id", coreCount, assignedCount);
final HashMap<String, AtomicInteger> counters = new HashMap<>();
for (int i = 1; i < sliceCount + 1; i++) {
counters.put("shard" + i, new AtomicInteger());
}
for (int i = 0; i < coreCount; i++) {
final AtomicInteger ai = counters.get(ids[i]);
assertNotNull("could not find counter for shard:" + ids[i], ai);
ai.incrementAndGet();
}
for (String counter : counters.keySet()) {
int count = counters.get(counter).intValue();
int expectedCount = coreCount / sliceCount;
int min = expectedCount - 1;
int max = expectedCount + 1;
if (count < min || count > max) {
fail("Unevenly assigned shard ids, " + counter + " had " + count + ", expected: " + min + "-" + max);
}
}
//make sure leaders are in cloud state
for (int i = 0; i < sliceCount; i++) {
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard" + (i + 1), 15000));
}
} finally {
close(zkClient);
close(overseerClient);
close(reader);
for (int i = 0; i < controllers.length; i++) if (controllers[i] != null) {
controllers[i].close();
}
server.shutdown();
for (int i = 0; i < nodeCount; i++) {
if (nodeExecutors[i] != null) {
nodeExecutors[i].shutdownNow();
}
}
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class OverseerTest method testReplay.
@Test
public void testReplay() throws Exception {
String zkDir = createTempDir().toFile().getAbsolutePath() + File.separator + "zookeeper/server1/data";
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
SolrZkClient overseerClient = null;
ZkStateReader reader = null;
try {
server.run();
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
ZkController.createClusterZkNodes(zkClient);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
//prepopulate work queue with some items to emulate previous overseer died before persisting state
DistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Overseer.Stats());
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
queue.offer(Utils.toJSON(m));
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core2", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
queue.offer(Utils.toJSON(m));
overseerClient = electNewOverseer(server.getZkAddress());
//submit to proper queue
queue = Overseer.getStateUpdateQueue(zkClient);
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core3", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
queue.offer(Utils.toJSON(m));
for (int i = 0; i < 100; i++) {
Slice s = reader.getClusterState().getSlice(COLLECTION, "s1");
if (s != null && s.getReplicasMap().size() == 3)
break;
Thread.sleep(100);
}
assertNotNull(reader.getClusterState().getSlice(COLLECTION, "s1"));
assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "s1").getReplicasMap().size());
} finally {
close(overseerClient);
close(zkClient);
close(reader);
server.shutdown();
}
}
Aggregations