use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class TokenManagerTests method testMultiNodesCacheUpdates.
/**
* Tests out of sync cache behavior with multiple nodes.
*
* @throws Exception
*/
@Test
public void testMultiNodesCacheUpdates() throws Exception {
// For this test, we need our custom setup, with several
// tokenManagers sharing a common TestCoordinator. This will
// simulate shared zookeeper data on the cluster. And the different
// tokenManagers/KeyGenerators will simulate the different nodes with
// out of sync caches.
final long ROTATION_INTERVAL_MSECS = 5000;
DbClient dbClient = getDbClient();
CoordinatorClient coordinator = new TestCoordinator();
// Node 1
CassandraTokenManager tokenManager1 = new CassandraTokenManager();
Base64TokenEncoder encoder1 = new Base64TokenEncoder();
TokenKeyGenerator tokenKeyGenerator1 = new TokenKeyGenerator();
TokenMaxLifeValuesHolder holder1 = new TokenMaxLifeValuesHolder();
// means that once a token is created,
holder1.setKeyRotationIntervalInMSecs(ROTATION_INTERVAL_MSECS);
// if the next token being requested happens 5 seconds later or more, the keys will
// rotate. This is to test the built in logic that triggers rotation.
tokenManager1.setTokenMaxLifeValuesHolder(holder1);
tokenManager1.setDbClient(dbClient);
tokenManager1.setCoordinator(coordinator);
encoder1.setCoordinator(coordinator);
tokenKeyGenerator1.setTokenMaxLifeValuesHolder(holder1);
encoder1.setTokenKeyGenerator(tokenKeyGenerator1);
encoder1.managerInit();
tokenManager1.setTokenEncoder(encoder1);
// Node 2
CassandraTokenManager tokenManager2 = new CassandraTokenManager();
Base64TokenEncoder encoder2 = new Base64TokenEncoder();
TokenKeyGenerator tokenKeyGenerator2 = new TokenKeyGenerator();
TokenMaxLifeValuesHolder holder2 = new TokenMaxLifeValuesHolder();
holder2.setKeyRotationIntervalInMSecs(ROTATION_INTERVAL_MSECS);
tokenManager2.setTokenMaxLifeValuesHolder(holder2);
tokenManager2.setDbClient(dbClient);
tokenManager2.setCoordinator(coordinator);
encoder2.setCoordinator(coordinator);
tokenKeyGenerator2.setTokenMaxLifeValuesHolder(holder2);
encoder2.setTokenKeyGenerator(tokenKeyGenerator2);
encoder2.managerInit();
tokenManager2.setTokenEncoder(encoder2);
// We do not need to use multi threads for these tests. We are using
// a determined sequence of events to cause caches to be out of sync and
// see how the keyGenerators react.
// SCENARIO 1 -----------------------------------------------------------------
// Cause a rotation on node1, then go with that token to node 2 to validate the
// token. Node2 should update the cache automatically to find the new key and
// validate the token successfully.
resetCoordinatorData(coordinator, tokenManager1, tokenManager2, encoder1, encoder2, tokenKeyGenerator1, tokenKeyGenerator2);
// cause the rotation
Thread.sleep((ROTATION_INTERVAL_MSECS) + 1000);
StorageOSUserDAO userDAO = new StorageOSUserDAO();
userDAO.setUserName("user1");
// get a new token from node 1 (it will be encoded with a new key)
final String token3 = tokenManager1.getToken(userDAO);
Assert.assertNotNull(token3);
// validate it on node 2
StorageOSUserDAO gotUser = tokenManager2.validateToken(token3);
Assert.assertNotNull(gotUser);
// SCENARIO 2 -----------------------------------------------------------------
// Create a token with the current key on node 1. Cause 2 rotations on node1, then go with that
// token to node 2 to validate. At that point, node 2 still has the token's key in cache. But
// that key is now 2 rotations old and should not be accepted. We want to test that node 2
// appropriately updates its cache, then refuses the key, rejects the token.
// reset coordinator data, start from scratch with fresh keys.
resetCoordinatorData(coordinator, tokenManager1, tokenManager2, encoder1, encoder2, tokenKeyGenerator1, tokenKeyGenerator2);
final String token4 = tokenManager1.getToken(userDAO);
Assert.assertNotNull(token4);
Thread.sleep((ROTATION_INTERVAL_MSECS + 1000));
final String token5 = tokenManager1.getToken(userDAO);
Assert.assertNotNull(token5);
Thread.sleep((ROTATION_INTERVAL_MSECS + 1000));
final String token6 = tokenManager1.getToken(userDAO);
Assert.assertNotNull(token6);
try {
gotUser = tokenManager2.validateToken(token4);
Assert.fail("The token validation should fail because of the token rotation.");
} catch (UnauthorizedException ex) {
// This exception is an expected one.
Assert.assertTrue(true);
}
// SCENARIO 3 -----------------------------------------------------------------
// Cause a rotation on node 1. Then go to node 2 to get a new token. Node 2 should realize
// that the key it is about to use for signing is not the latest and refresh its cache. It should
// not however cause a rotation, because it already just happened.
resetCoordinatorData(coordinator, tokenManager1, tokenManager2, encoder1, encoder2, tokenKeyGenerator1, tokenKeyGenerator2);
// cause a rotation
Thread.sleep((ROTATION_INTERVAL_MSECS + 1000));
final String token7 = tokenManager1.getToken(userDAO);
Assert.assertNotNull(token7);
TokenOnWire tw7 = encoder1.decode(token7);
String key7 = tw7.getEncryptionKeyId();
final String token8 = tokenManager2.getToken(userDAO);
Assert.assertNotNull(token8);
TokenOnWire tw8 = encoder1.decode(token8);
String key8 = tw8.getEncryptionKeyId();
// see that the key id that was used to encode both tokens are the same.
Assert.assertEquals(key7, key8);
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class VersionCheckerTest method setup.
@BeforeClass
public static void setup() {
CoordinatorClient coordinator = createMock(CoordinatorClient.class);
Map<String, String> properties = new HashMap<String, String>();
properties.put("controller_brocade_firmware_version", "11.2.1");
properties.put("controller_mds_firmware_version", "5.0(1a)");
properties.put("controller_rp_firmware_version", "4.1");
properties.put("controller_vmax_firmware_version", "5876.221");
properties.put("controller_vnxblock_firmware_version", "05.32");
properties.put("controller_vnxfile_firmware_version", "7.1.71");
properties.put("controller_isilon_firmware_version", "7.0.2.0");
properties.put("controller_netapp_firmware_version", "8.1.1");
properties.put("controller_vplex_firmware_version", "5.2");
properties.put("controller_smis_provider_version", "4.6.1.1");
properties.put("compute_windows_version", "6.0.6002");
properties.put("compute_suse_linux_version", "11");
properties.put("compute_redhat_linux_version", "5.9");
properties.put("compute_hpux_version", "11.31");
PropertyInfo propertyInfo = new PropertyInfo(properties);
EasyMock.expect(coordinator.getPropertyInfo()).andReturn(propertyInfo).anyTimes();
EasyMock.replay(coordinator);
new VersionChecker().setCoordinator(coordinator);
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class GeoDbSvcStartupTest method checkDbConfig.
@Test
public void checkDbConfig() throws Exception {
CoordinatorClient coordinator = runner.getCoordinator();
// Check dbconfig
String kind = coordinator.getDbConfigPath(Constants.GEODBSVC_NAME);
Configuration config = coordinator.queryConfiguration(coordinator.getSiteId(), kind, DbSvcRunner.GEOSVC_ID);
Assert.assertNotNull("No dbconfig found", config);
String value = config.getConfig(DbConfigConstants.JOINED);
Assert.assertTrue("dbconfig joined flag is false", "true".equalsIgnoreCase(value));
// Check dbconfig/global
config = coordinator.queryConfiguration(coordinator.getSiteId(), kind, Constants.GLOBAL_ID);
Assert.assertNotNull("No dbconfig/global found", config);
value = config.getConfig(Constants.SCHEMA_VERSION);
Assert.assertTrue("Unexpected dbconfig/global schemaversion", DbSvcRunner.SVC_VERSION.equalsIgnoreCase(value));
// Check versioned dbconfig
kind = coordinator.getVersionedDbConfigPath(Constants.GEODBSVC_NAME, DbSvcRunner.SVC_VERSION);
config = coordinator.queryConfiguration(coordinator.getSiteId(), kind, DbSvcRunner.GEOSVC_ID);
Assert.assertNotNull("No versioned dbconfig found", config);
value = config.getConfig(DbConfigConstants.INIT_DONE);
Assert.assertTrue("Unexpected versioned dbconfig initdone", "true".equalsIgnoreCase(value));
log.info("Check db config OK");
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class Maintenance method fail.
public static void fail(String targetUrl) {
CoordinatorClient coordinatorClient = StorageOsPlugin.getInstance().getCoordinatorClient();
UpgradeFailureInfo failureInfo = coordinatorClient.queryRuntimeState(Constants.UPGRADE_FAILURE_INFO, UpgradeFailureInfo.class);
Logger.info("UpgradeFailureInfo=%s", failureInfo);
render(failureInfo);
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class Hosts method list.
public static void list() {
TenantSelector.addRenderArgs();
renderArgs.put("dataTable", new HostDataTable());
CoordinatorClient coordinatorClient = StorageOsPlugin.getInstance().getCoordinatorClient();
String limit = coordinatorClient.getPropertyInfo().getProperty(Constants.RESOURCE_LIMIT_TENANT_HOSTS);
renderArgs.put(Constants.RESOURCE_LIMIT_TENANT_HOSTS, limit);
render();
}
Aggregations