use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class ZkStateWriterTest method testExternalModificationToStateFormat2.
public void testExternalModificationToStateFormat2() throws Exception {
String zkDir = createTempDir("testExternalModificationToStateFormat2").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
reader.createClusterStateWatchersAndUpdate();
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
ClusterState state = reader.getClusterState();
// create collection 2 with stateFormat = 2
ZkWriteCommand c2 = new ZkWriteCommand("c2", new DocCollection("c2", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.getCollectionPath("c2")));
state = writer.enqueueUpdate(reader.getClusterState(), c2, null);
// first write is flushed immediately
assertFalse(writer.hasPendingUpdates());
int sharedClusterStateVersion = state.getZkClusterStateVersion();
int stateFormat2Version = state.getCollection("c2").getZNodeVersion();
// Simulate an external modification to /collections/c2/state.json
byte[] data = zkClient.getData(ZkStateReader.getCollectionPath("c2"), null, null, true);
zkClient.setData(ZkStateReader.getCollectionPath("c2"), data, true);
// get the most up-to-date state
reader.forceUpdateCollection("c2");
state = reader.getClusterState();
log.info("Cluster state: {}", state);
assertTrue(state.hasCollection("c2"));
assertEquals(sharedClusterStateVersion, (int) state.getZkClusterStateVersion());
assertEquals(stateFormat2Version + 1, state.getCollection("c2").getZNodeVersion());
// enqueue an update to stateFormat2 collection such that update is pending
state = writer.enqueueUpdate(state, c2, null);
assertTrue(writer.hasPendingUpdates());
// get the most up-to-date state
reader.forceUpdateCollection("c2");
state = reader.getClusterState();
// enqueue a stateFormat=1 collection which should cause a flush
ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
try {
writer.enqueueUpdate(state, c1, null);
fail("Enqueue should not have succeeded");
} catch (KeeperException.BadVersionException bve) {
// expected
}
try {
writer.enqueueUpdate(reader.getClusterState(), c2, null);
fail("enqueueUpdate after BadVersionException should not have succeeded");
} catch (IllegalStateException e) {
// expected
}
try {
writer.writePendingUpdates();
fail("writePendingUpdates after BadVersionException should not have succeeded");
} catch (IllegalStateException e) {
// expected
}
}
} finally {
IOUtils.close(zkClient);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class ZkStateWriterTest method testSingleExternalCollection.
public void testSingleExternalCollection() throws Exception {
String zkDir = createTempDir("testSingleExternalCollection").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
reader.createClusterStateWatchersAndUpdate();
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
// create new collection with stateFormat = 2
ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json"));
ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), c1, null);
writer.writePendingUpdates();
Map map = (Map) Utils.fromJSON(zkClient.getData("/clusterstate.json", null, null, true));
assertNull(map.get("c1"));
map = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", null, null, true));
assertNotNull(map.get("c1"));
}
} finally {
IOUtils.close(zkClient);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class ZkStateWriterTest method testExternalModificationToSharedClusterState.
public void testExternalModificationToSharedClusterState() throws Exception {
String zkDir = createTempDir("testExternalModification").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
reader.createClusterStateWatchersAndUpdate();
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
// create collection 1 with stateFormat = 1
ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
writer.enqueueUpdate(reader.getClusterState(), c1, null);
writer.writePendingUpdates();
reader.forceUpdateCollection("c1");
reader.forceUpdateCollection("c2");
// keep a reference to the current cluster state object
ClusterState clusterState = reader.getClusterState();
assertTrue(clusterState.hasCollection("c1"));
assertFalse(clusterState.hasCollection("c2"));
// Simulate an external modification to /clusterstate.json
byte[] data = zkClient.getData("/clusterstate.json", null, null, true);
zkClient.setData("/clusterstate.json", data, true);
// enqueue another c1 so that ZkStateWriter has pending updates
writer.enqueueUpdate(clusterState, c1, null);
assertTrue(writer.hasPendingUpdates());
// create collection 2 with stateFormat = 1
ZkWriteCommand c2 = new ZkWriteCommand("c2", new DocCollection("c2", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.getCollectionPath("c2")));
try {
// we are sending in the old cluster state object
writer.enqueueUpdate(clusterState, c2, null);
fail("Enqueue should not have succeeded");
} catch (KeeperException.BadVersionException bve) {
// expected
}
try {
writer.enqueueUpdate(reader.getClusterState(), c2, null);
fail("enqueueUpdate after BadVersionException should not have succeeded");
} catch (IllegalStateException e) {
// expected
}
try {
writer.writePendingUpdates();
fail("writePendingUpdates after BadVersionException should not have succeeded");
} catch (IllegalStateException e) {
// expected
}
}
} finally {
IOUtils.close(zkClient);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestHdfsBackupRestoreCore method test.
@Test
public void test() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "HdfsBackupRestore";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
boolean testViaReplicationHandler = random().nextBoolean();
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
// Create a backup.
if (testViaReplicationHandler) {
log.info("Running Backup via replication handler");
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
while (!checkBackupStatus.success) {
checkBackupStatus.fetchStatus();
Thread.sleep(1000);
}
} else {
log.info("Running Backup via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
}
int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempts = 0; attempts < numRestoreTests; attempts++) {
//Modify existing index before we call restore.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery(collectionName, "id:" + i);
}
masterClient.commit(collectionName);
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(collectionName, doc);
}
//Purposely not calling commit once in a while. There can be some docs which are not committed
if (usually()) {
masterClient.commit(collectionName);
}
}
// Snapshooter prefixes "snapshot." to the backup name.
if (testViaReplicationHandler) {
log.info("Running Restore via replication handler");
// Snapshooter prefixes "snapshot." to the backup name.
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
Thread.sleep(1000);
}
} else {
log.info("Running Restore via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", "snapshot." + backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
}
//See if restore was successful by checking if all the docs are present again
BackupRestoreUtils.verifyDocs(nDocs, masterClient, coreName);
// Verify the permissions for the backup folder.
FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path("/backup/snapshot." + backupName));
FsPermission perm = status.getPermission();
assertEquals(FsAction.ALL, perm.getUserAction());
assertEquals(FsAction.ALL, perm.getGroupAction());
assertEquals(FsAction.ALL, perm.getOtherAction());
}
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestBlobHandler method doBlobHandlerTest.
@Test
public void doBlobHandlerTest() throws Exception {
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
CollectionAdminResponse response1;
CollectionAdminRequest.Create createCollectionRequest = CollectionAdminRequest.createCollection(".system", 1, 2);
response1 = createCollectionRequest.process(client);
assertEquals(0, response1.getStatus());
assertTrue(response1.isSuccess());
DocCollection sysColl = cloudClient.getZkStateReader().getClusterState().getCollection(".system");
Replica replica = sysColl.getActiveSlicesMap().values().iterator().next().getLeader();
String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
String url = baseUrl + "/.system/config/requestHandler";
Map map = TestSolrConfigHandlerConcurrent.getAsMap(url, cloudClient);
assertNotNull(map);
assertEquals("solr.BlobHandler", getObjectByPath(map, true, Arrays.asList("config", "requestHandler", "/blob", "class")));
map = TestSolrConfigHandlerConcurrent.getAsMap(baseUrl + "/.system/schema/fields/blob", cloudClient);
assertNotNull(map);
assertEquals("blob", getObjectByPath(map, true, Arrays.asList("field", "name")));
assertEquals("bytes", getObjectByPath(map, true, Arrays.asList("field", "type")));
checkBlobPost(baseUrl, cloudClient);
}
}
Aggregations