use of org.apache.geode.test.dunit.rules.MemberVM in project geode by apache.
the class ClusterConfigWithSecurityDUnitTest method testSecurityPropsInheritance.
@Test
@Ignore("GEODE-2315")
public void testSecurityPropsInheritance() throws Exception {
locatorProps.clear();
locatorProps.setProperty(LOCATORS, "localhost[" + locator0.getPort() + "]");
locatorProps.setProperty("security-username", "cluster");
locatorProps.setProperty("security-password", "cluster");
MemberVM locator1 = lsRule.startLocatorVM(1, locatorProps);
// the second locator should inherit the first locator's security props
locator1.invoke(() -> {
InternalLocator locator = LocatorServerStartupRule.locatorStarter.getLocator();
ClusterConfigurationService sc = locator.getSharedConfiguration();
Properties clusterConfigProps = sc.getConfiguration("cluster").getGemfireProperties();
assertThat(clusterConfigProps.getProperty(SECURITY_MANAGER)).isEqualTo(SimpleTestSecurityManager.class.getName());
assertThat(locator.getConfig().getSecurityManager()).isNotEmpty();
});
}
use of org.apache.geode.test.dunit.rules.MemberVM in project geode by apache.
the class ClusterConfig method verifyServer.
public void verifyServer(MemberVM<Server> serverVM) {
// verify files exist in filesystem
Set<String> expectedJarNames = this.getJarNames().stream().collect(toSet());
String[] actualJarFiles = serverVM.getWorkingDir().list((dir, filename) -> filename.contains(".jar"));
Set<String> actualJarNames = Stream.of(actualJarFiles).map(jar -> jar.replaceAll("\\.v\\d+\\.jar", ".jar")).collect(toSet());
// We will end up with extra jars on disk if they are deployed and then undeployed
assertThat(expectedJarNames).isSubsetOf(actualJarNames);
// verify config exists in memory
serverVM.invoke(() -> {
Cache cache = GemFireCacheImpl.getInstance();
// TODO: set compare to fail if there are extra regions
for (String region : this.getRegions()) {
assertThat(cache.getRegion(region)).isNotNull();
}
if (StringUtils.isNotBlank(this.getMaxLogFileSize())) {
Properties props = cache.getDistributedSystem().getProperties();
assertThat(props.getProperty(LOG_FILE_SIZE_LIMIT)).isEqualTo(this.getMaxLogFileSize());
}
for (String jar : this.getJarNames()) {
DeployedJar deployedJar = ClassPathLoader.getLatest().getJarDeployer().findDeployedJar(jar);
assertThat(deployedJar).isNotNull();
assertThat(Class.forName(nameOfClassContainedInJar(jar), true, new URLClassLoader(new URL[] { deployedJar.getFileURL() }))).isNotNull();
}
// If we have extra jars on disk left over from undeploy, make sure they aren't used
Set<String> undeployedJarNames = new HashSet<>(actualJarNames);
undeployedJarNames.removeAll(expectedJarNames);
for (String jar : undeployedJarNames) {
System.out.println("Verifying undeployed jar: " + jar);
DeployedJar undeployedJar = ClassPathLoader.getLatest().getJarDeployer().findDeployedJar(jar);
assertThat(undeployedJar).isNull();
}
});
}
use of org.apache.geode.test.dunit.rules.MemberVM in project geode by apache.
the class ClusterConfigDeployJarDUnitTest method testUndeploy.
@Test
public void testUndeploy() throws Exception {
// set up the locator/servers
MemberVM locator = lsRule.startLocatorVM(0, locatorProps);
serverProps.setProperty(GROUPS, "group1");
MemberVM server1 = lsRule.startServerVM(1, serverProps, locator.getPort());
serverProps.setProperty(GROUPS, "group2");
MemberVM server2 = lsRule.startServerVM(2, serverProps, locator.getPort());
serverProps.setProperty(GROUPS, "group1,group2");
serverProps.setProperty(LOG_LEVEL, "info");
MemberVM server3 = lsRule.startServerVM(3, serverProps, locator.getPort());
ConfigGroup cluster = new ConfigGroup("cluster");
ConfigGroup group1 = new ConfigGroup("group1");
ConfigGroup group2 = new ConfigGroup("group2");
ClusterConfig expectedClusterConfig = new ClusterConfig(cluster);
ClusterConfig server1Config = new ClusterConfig(cluster, group1);
ClusterConfig server2Config = new ClusterConfig(cluster, group2);
ClusterConfig server3Config = new ClusterConfig(cluster, group1, group2);
gfshConnector.connect(locator);
assertThat(gfshConnector.isConnected()).isTrue();
gfshConnector.executeAndVerifyCommand("deploy --jar=" + clusterJar);
// deploy cluster.jar to the cluster
cluster.addJar("cluster.jar");
expectedClusterConfig.verify(locator);
expectedClusterConfig.verify(server1);
expectedClusterConfig.verify(server2);
expectedClusterConfig.verify(server3);
// deploy group1.jar to both group1 and group2
gfshConnector.executeAndVerifyCommand("deploy --jar=" + group1Jar + " --group=group1,group2");
group1.addJar("group1.jar");
group2.addJar("group1.jar");
server3Config.verify(locator);
server1Config.verify(server1);
server2Config.verify(server2);
server3Config.verify(server3);
// test undeploy cluster
gfshConnector.executeAndVerifyCommand("undeploy --jar=cluster.jar");
cluster = cluster.removeJar("cluster.jar");
server3Config.verify(locator);
server1Config.verify(server1);
server2Config.verify(server2);
server3Config.verify(server3);
gfshConnector.executeAndVerifyCommand("undeploy --jar=group1.jar --group=group1");
group1 = group1.removeJar("group1.jar");
/*
* TODO: This is the current (weird) behavior If you started server4 with group1,group2 after
* this undeploy command, it would have group1.jar (brought from
* cluster_config/group2/group1.jar on locator) whereas server3 (also in group1,group2) does not
* have this jar.
*/
ClusterConfig weirdServer3Config = new ClusterConfig(cluster, group1, new ConfigGroup(group2).removeJar("group1.jar"));
server3Config.verify(locator);
server1Config.verify(server1);
server2Config.verify(server2);
weirdServer3Config.verify(server3);
}
use of org.apache.geode.test.dunit.rules.MemberVM in project geode by apache.
the class ClusterConfigDistributionDUnitTest method testIndexAndAsyncEventQueueCommands.
@Test
public void testIndexAndAsyncEventQueueCommands() throws Exception {
final String DESTROY_REGION = "regionToBeDestroyed";
gfshConnector.executeAndVerifyCommand("create region --name=" + REPLICATE_REGION + " --type=REPLICATE");
gfshConnector.executeAndVerifyCommand("create region --name=" + PARTITION_REGION + " --type=PARTITION");
gfshConnector.executeAndVerifyCommand("create region --name=" + DESTROY_REGION + " --type=REPLICATE");
gfshConnector.executeAndVerifyCommand("create index --name=" + INDEX1 + " --expression=AAPL --region=" + REPLICATE_REGION);
gfshConnector.executeAndVerifyCommand("create index --name=" + INDEX2 + " --expression=VMW --region=" + PARTITION_REGION);
String asyncEventQueueJarPath = createAsyncEventQueueJar();
gfshConnector.executeAndVerifyCommand("deploy --jar=" + asyncEventQueueJarPath);
CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_ASYNC_EVENT_QUEUE);
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ID, AsyncEventQueue1);
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER, "com.qcdunit.QueueCommandsDUnitTestListener");
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISK_STORE, null);
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCH_SIZE, "1000");
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__GROUP, null);
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PERSISTENT, "false");
csb.addOptionWithValueCheck(CliStrings.CREATE_ASYNC_EVENT_QUEUE__MAXIMUM_QUEUE_MEMORY, "1000");
gfshConnector.executeAndVerifyCommand(csb.getCommandString());
gfshConnector.executeAndVerifyCommand("destroy region --name=" + DESTROY_REGION);
gfshConnector.executeAndVerifyCommand("destroy index --name=" + INDEX2 + " --region=" + PARTITION_REGION);
gfshConnector.executeAndVerifyCommand("alter runtime --copy-on-read=true");
// Start a new member which receives the shared configuration
// Verify the config creation on this member
MemberVM server = lsRule.startServerVM(2, new Properties(), locator.getPort());
server.invoke(() -> {
Cache cache = LocatorServerStartupRule.serverStarter.getCache();
assertNotNull(cache);
assertTrue(cache.getCopyOnRead());
Region region1 = cache.getRegion(REPLICATE_REGION);
assertNotNull(region1);
Region region2 = cache.getRegion(PARTITION_REGION);
assertNotNull(region2);
Region region3 = cache.getRegion(DESTROY_REGION);
assertNull(region3);
// Index verification
Index index1 = cache.getQueryService().getIndex(region1, INDEX1);
assertNotNull(index1);
assertNull(cache.getQueryService().getIndex(region2, INDEX2));
// ASYNC-EVENT-QUEUE verification
AsyncEventQueue aeq = cache.getAsyncEventQueue(AsyncEventQueue1);
assertNotNull(aeq);
assertFalse(aeq.isPersistent());
assertTrue(aeq.getBatchSize() == 1000);
assertTrue(aeq.getMaximumQueueMemory() == 1000);
});
}
use of org.apache.geode.test.dunit.rules.MemberVM in project geode by apache.
the class ClusterConfigImportDUnitTest method testImportWithRunningServer.
@Test
public void testImportWithRunningServer() throws Exception {
MemberVM server1 = lsRule.startServerVM(1, serverProps, locatorVM.getPort());
serverProps.setProperty("groups", "group2");
MemberVM server2 = lsRule.startServerVM(2, serverProps, locatorVM.getPort());
// even though we have a region recreated, we can still import since there is no data
// in the region
CommandResult result = gfshConnector.executeCommand("import cluster-configuration --zip-file-name=" + clusterConfigZipPath);
assertThat(result.getContent().toString()).contains("Successfully applied the imported cluster configuration on server-1");
assertThat(result.getContent().toString()).contains("Successfully applied the imported cluster configuration on server-2");
new ClusterConfig(CLUSTER).verify(server1);
new ClusterConfig(CLUSTER, GROUP2).verify(server2);
}
Aggregations