use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class TokenManagerTests method testLegitimateTokenKeyIdRange.
@Test
public void testLegitimateTokenKeyIdRange() throws Exception {
CoordinatorClient coordinator = new TestCoordinator();
TokenMaxLifeValuesHolder holder = new TokenMaxLifeValuesHolder();
TokenKeyGenerator tokenKeyGenerator1 = new TokenKeyGenerator();
tokenKeyGenerator1.setTokenMaxLifeValuesHolder(holder);
Base64TokenEncoder encoder1 = new Base64TokenEncoder();
encoder1.setCoordinator(coordinator);
encoder1.setTokenKeyGenerator(tokenKeyGenerator1);
encoder1.managerInit();
TokenKeysBundle bundle = tokenKeyGenerator1.readBundle();
InterVDCTokenCacheHelper cacheHelper = new InterVDCTokenCacheHelper();
cacheHelper.setCoordinator(coordinator);
cacheHelper.setMaxLifeValuesHolder(holder);
// assumption, there should be one key to start with:
Assert.assertEquals(1, bundle.getKeyEntries().size());
// POS try with the exact key that is in the bundle
Assert.assertTrue(cacheHelper.sanitizeRequestedKeyIds(bundle, bundle.getKeyEntries().get(0)));
// POS try with a key that is a little in the future
String inputKeyStr = bundle.getCurrentKeyEntry();
Long inputKey = Long.parseLong(inputKeyStr);
inputKey += 3000;
Assert.assertTrue(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// NEG try with a key that is more than a rotation +20 minutes in the future
inputKeyStr = bundle.getCurrentKeyEntry();
inputKey = Long.parseLong(inputKeyStr);
inputKey += tokenKeyGenerator1.getKeyRotationIntervalInMSecs() + CassandraTokenValidator.FOREIGN_TOKEN_KEYS_BUNDLE_REFRESH_RATE_IN_MINS * 60 * 1000 + 3000;
Assert.assertFalse(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// NEG try with a key that is in the past
inputKeyStr = bundle.getCurrentKeyEntry();
inputKey = Long.parseLong(inputKeyStr);
inputKey -= 1000;
Assert.assertFalse(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// rotate the keys
tokenKeyGenerator1.rotateKeys();
bundle = tokenKeyGenerator1.readBundle();
// assumption, there should be 2 keys now:
Assert.assertEquals(2, bundle.getKeyEntries().size());
// POS try with the previous and current key
Assert.assertTrue(cacheHelper.sanitizeRequestedKeyIds(bundle, bundle.getKeyEntries().get(0)));
Assert.assertTrue(cacheHelper.sanitizeRequestedKeyIds(bundle, bundle.getKeyEntries().get(1)));
// POS try with a key that is a little bit in the future of the current key
inputKeyStr = bundle.getCurrentKeyEntry();
inputKey = Long.parseLong(inputKeyStr);
inputKey += 3000;
Assert.assertTrue(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// NEG try with a key that is more than a rotation + 20 minutes in the future
inputKeyStr = bundle.getCurrentKeyEntry();
inputKey = Long.parseLong(inputKeyStr);
inputKey += tokenKeyGenerator1.getKeyRotationIntervalInMSecs() + CassandraTokenValidator.FOREIGN_TOKEN_KEYS_BUNDLE_REFRESH_RATE_IN_MINS * 60 * 1000 + 3000;
Assert.assertFalse(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// NEG try with a key that is in the recent past of the current key,
// so in between low and high bounds without being any of them
inputKeyStr = bundle.getCurrentKeyEntry();
inputKey = Long.parseLong(inputKeyStr);
inputKey -= 1000;
Assert.assertFalse(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// POS try with a key that is one rotation in the future of the current key
inputKeyStr = bundle.getCurrentKeyEntry();
inputKey = Long.parseLong(inputKeyStr);
inputKey += tokenKeyGenerator1.getKeyRotationIntervalInMSecs();
Assert.assertTrue(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
// NEG try with a key that is in the past of previous key
inputKeyStr = bundle.getKeyEntries().get(0);
inputKey = Long.parseLong(inputKeyStr);
inputKey -= 1000;
Assert.assertFalse(cacheHelper.sanitizeRequestedKeyIds(bundle, inputKey.toString()));
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class TokenManagerTests method testMultiNodesGlobalInits.
/**
* Test that when 15 nodes launch their globalInit, at the end of the day,
* there is one unique agreed upon current key id. This tests the locking in KeyGenerator.globalInit()
*
* @throws Exception
*/
@Test
public void testMultiNodesGlobalInits() throws Exception {
// For this test, we need our custom setup, with several
// tokenManagers sharing a common TestCoordinator. This will
// simulate shared zookeeper data on the cluster. And the different
// tokenManagers/KeyGenerators will simulate the different nodes.
DbClient dbClient = getDbClient();
CoordinatorClient coordinator = new TestCoordinator();
int numThreads = 15;
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
final CountDownLatch waiter = new CountDownLatch(numThreads);
final class InitTester implements Callable {
CoordinatorClient _coordinator = null;
DbClient _client = null;
KeyIdsHolder _holder = null;
public InitTester(CoordinatorClient coord, DbClient client, KeyIdsHolder holder) {
_coordinator = coord;
_client = client;
_holder = holder;
}
@Override
public Object call() throws Exception {
// create node artifacts
CassandraTokenManager tokenManager1 = new CassandraTokenManager();
Base64TokenEncoder encoder1 = new Base64TokenEncoder();
TokenKeyGenerator tokenKeyGenerator1 = new TokenKeyGenerator();
tokenManager1.setDbClient(_client);
tokenManager1.setCoordinator(_coordinator);
TokenMaxLifeValuesHolder holder = new TokenMaxLifeValuesHolder();
tokenManager1.setTokenMaxLifeValuesHolder(holder);
encoder1.setCoordinator(_coordinator);
tokenKeyGenerator1.setTokenMaxLifeValuesHolder(holder);
encoder1.setTokenKeyGenerator(tokenKeyGenerator1);
tokenManager1.setTokenEncoder(encoder1);
// synchronize all threads
waiter.countDown();
waiter.await();
// every thread calls init at the same time
encoder1.managerInit();
// then get a token and save the key for later
StorageOSUserDAO userDAO = new StorageOSUserDAO();
userDAO.setUserName("user1");
final String token = tokenManager1.getToken(userDAO);
Assert.assertNotNull(token);
TokenOnWire tw = encoder1.decode(token);
_holder.addToSet(tw.getEncryptionKeyId());
return null;
}
}
KeyIdsHolder holder = new KeyIdsHolder();
for (int i = 0; i < numThreads; i++) {
executor.submit(new InitTester(coordinator, dbClient, holder));
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(60, TimeUnit.SECONDS));
// after all is said and done, all tokens created in all 15 threads, should have been
// created with the same key id.
Assert.assertEquals(1, holder.getSetSize());
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class TokenManagerTests method testBasicTokenKeysRotation.
/**
* Basic rotation functionality is tested here using overridden rotation interval values
*
* @throws Exception
*/
@Test
public void testBasicTokenKeysRotation() throws Exception {
TokenMaxLifeValuesHolder holder = new TokenMaxLifeValuesHolder();
holder.setMaxTokenIdleTimeInMins(2);
holder.setMaxTokenLifeTimeInMins(4);
holder.setTokenIdleTimeGraceInMins(1);
holder.setKeyRotationIntervalInMSecs(5000);
CassandraTokenManager tokenManager = new CassandraTokenManager();
Base64TokenEncoder encoder = new Base64TokenEncoder();
TokenKeyGenerator tokenKeyGenerator = new TokenKeyGenerator();
DbClient dbClient = getDbClient();
CoordinatorClient coordinator = new TestCoordinator();
tokenManager.setTokenMaxLifeValuesHolder(holder);
tokenManager.setDbClient(dbClient);
tokenManager.setCoordinator(coordinator);
encoder.setCoordinator(coordinator);
tokenKeyGenerator.setTokenMaxLifeValuesHolder(holder);
encoder.setTokenKeyGenerator(tokenKeyGenerator);
encoder.managerInit();
tokenManager.setTokenEncoder(encoder);
StorageOSUserDAO userDAO = new StorageOSUserDAO();
userDAO.setUserName("user1");
userDAO.setIsLocal(true);
// get a regular token
final String token = tokenManager.getToken(userDAO);
Assert.assertNotNull(token);
TokenOnWire tw1 = encoder.decode(token);
Token tokenObj = dbClient.queryObject(Token.class, tw1.getTokenId());
Assert.assertNotNull(tokenObj);
// verify token
StorageOSUserDAO gotUser = tokenManager.validateToken(token);
Assert.assertNotNull(gotUser);
// get a proxy token
final String proxyToken = tokenManager.getProxyToken(gotUser);
Assert.assertNotNull(proxyToken);
// wait 6 seconds, this next token request will triggers a rotation
Thread.sleep(6000);
final String token2 = tokenManager.getToken(userDAO);
Assert.assertNotNull(token2);
// at this point, the first token should still be usable
gotUser = tokenManager.validateToken(token);
Assert.assertNotNull(gotUser);
// wait another 6 seconds, trigger another rotation.
Thread.sleep(6000);
final String token3 = tokenManager.getToken(userDAO);
Assert.assertNotNull(token3);
// has been rotated out from the current, then previous spot. It is gone.
try {
gotUser = tokenManager.validateToken(token);
Assert.fail("The token should not be usable.");
} catch (UnauthorizedException ex) {
// this exception is an expected one.
Assert.assertTrue(true);
}
// after several rotations, proxy token should be unaffected
gotUser = tokenManager.validateToken(proxyToken);
Assert.assertNotNull(gotUser);
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class TokenManagerTests method concurrentTokenKeyBundleMapUpdatesSingleCache.
/**
* Here, we test that in one node of a VDC (one cache), multiple threads
* can add various tokenkeys bundle from 5 other vdcs at the same time
* and the result is a consistent 5 entries in the cache
*
* @throws Exception
*/
@Test
public void concurrentTokenKeyBundleMapUpdatesSingleCache() throws Exception {
// Create 10 distinct bundles (recreating a new TestCoordinator each time
// to simulate 10 vdcs
final HashMap<String, TokenKeysBundle> verifyingMap = new HashMap<String, TokenKeysBundle>();
for (int i = 0; i < 10; i++) {
CoordinatorClient coordinator = new TestCoordinator();
TokenMaxLifeValuesHolder holder = new TokenMaxLifeValuesHolder();
TokenKeyGenerator tokenKeyGenerator1 = new TokenKeyGenerator();
tokenKeyGenerator1.setTokenMaxLifeValuesHolder(holder);
Base64TokenEncoder encoder1 = new Base64TokenEncoder();
encoder1.setCoordinator(coordinator);
encoder1.setTokenKeyGenerator(tokenKeyGenerator1);
encoder1.managerInit();
TokenKeysBundle bundle = tokenKeyGenerator1.readBundle();
verifyingMap.put(String.format("vdc%d", i), bundle);
}
// 1 db, 1 coordinator, 1 cache. Shared across 10 threads
// We are simulating the various services of a node all wanting to
// cache the same stuff at the same time
final DbClient sharedDbClient = getDbClient();
final CoordinatorClient sharedCoordinator = new TestCoordinator();
final InterVDCTokenCacheHelper sharedCacheHelper = new InterVDCTokenCacheHelper();
sharedCacheHelper.setCoordinator(sharedCoordinator);
sharedCacheHelper.setDbClient(sharedDbClient);
TokenMaxLifeValuesHolder holder = new TokenMaxLifeValuesHolder();
sharedCacheHelper.setMaxLifeValuesHolder(holder);
int numThreads = 10;
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
final CountDownLatch waiter = new CountDownLatch(numThreads);
final class InitTester implements Callable {
@Override
public Object call() throws Exception {
// synchronize all threads
waiter.countDown();
waiter.await();
for (int i = 0; i < verifyingMap.size(); i++) {
String vdc = String.format("vdc%d", i);
TokenResponseArtifacts rspArtifacts = new TokenResponseArtifacts(null, null, verifyingMap.get(vdc));
sharedCacheHelper.cacheForeignTokenAndKeys(rspArtifacts, vdc);
}
return null;
}
}
for (int i = 0; i < numThreads; i++) {
executor.submit(new InitTester());
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(30, TimeUnit.SECONDS));
if (verifyingMap.size() != sharedCacheHelper.getAllCachedBundles().size()) {
log.error("Mismatched cache and verifying map size: ");
for (Entry<String, TokenKeysBundle> e : sharedCacheHelper.getAllCachedBundles().entrySet()) {
log.error("vdc entry: {}", e.getKey());
}
}
Assert.assertEquals(verifyingMap.size(), sharedCacheHelper.getAllCachedBundles().size());
for (int i = 0; i < verifyingMap.size(); i++) {
String vdc = String.format("vdc%d", i);
TokenKeysBundle fromCache = sharedCacheHelper.getTokenKeysBundle(vdc);
Assert.assertNotNull(fromCache);
Assert.assertTrue(fromCache.getKeyEntries().size() == verifyingMap.get(vdc).getKeyEntries().size() && fromCache.getKeyEntries().get(0).equals(verifyingMap.get(vdc).getKeyEntries().get(0)));
}
}
use of com.emc.storageos.coordinator.client.service.CoordinatorClient in project coprhd-controller by CoprHD.
the class TokenManagerTests method testCrossVDCTokenValidation.
/**
* testCrossVDCTokenValidation
* Tests that a token from VDC2 and VDC3 can both be validated in VDC1
* given that VDC1's cache has these tokens and keys available.
*
* @throws Exception
*/
@Test
public void testCrossVDCTokenValidation() throws Exception {
commonDefaultSetupForSingleNodeTests();
TokenMaxLifeValuesHolder holder = new TokenMaxLifeValuesHolder();
// VDC1 (validator)
CoordinatorClient coordinatorVDC1 = new TestCoordinator();
InterVDCTokenCacheHelper cacheHelperVDC1 = new InterVDCTokenCacheHelper();
cacheHelperVDC1.setCoordinator(coordinatorVDC1);
cacheHelperVDC1.setDbClient(_dbClient);
cacheHelperVDC1.setMaxLifeValuesHolder(holder);
TokenKeyGenerator tokenKeyGeneratorVDC1 = new TokenKeyGenerator();
tokenKeyGeneratorVDC1.setTokenMaxLifeValuesHolder(holder);
Base64TokenEncoder encoderVDC1 = new Base64TokenEncoder();
encoderVDC1.setCoordinator(coordinatorVDC1);
encoderVDC1.setInterVDCTokenCacheHelper(cacheHelperVDC1);
encoderVDC1.setTokenKeyGenerator(tokenKeyGeneratorVDC1);
encoderVDC1.managerInit();
CassandraTokenManager tokenManagerVDC1 = new CassandraTokenManager();
tokenManagerVDC1.setDbClient(_dbClient);
tokenManagerVDC1.setCoordinator(coordinatorVDC1);
tokenManagerVDC1.setInterVDCTokenCacheHelper(cacheHelperVDC1);
tokenManagerVDC1.setTokenEncoder(encoderVDC1);
tokenManagerVDC1.setTokenMaxLifeValuesHolder(holder);
// VDC2 (creator of token)
CoordinatorClient coordinatorVDC2 = new TestCoordinator();
TokenKeyGenerator tokenKeyGeneratorVDC2 = new TokenKeyGenerator();
tokenKeyGeneratorVDC2.setTokenMaxLifeValuesHolder(holder);
Base64TokenEncoder encoderVDC2 = new Base64TokenEncoder();
encoderVDC2.setCoordinator(coordinatorVDC2);
encoderVDC2.setTokenKeyGenerator(tokenKeyGeneratorVDC2);
encoderVDC2.managerInit();
CassandraTokenManager tokenManagerVDC2 = new CassandraTokenManager();
tokenManagerVDC2.setDbClient(_dbClient);
tokenManagerVDC2.setCoordinator(coordinatorVDC2);
tokenManagerVDC2.setTokenEncoder(encoderVDC2);
tokenManagerVDC2.setTokenMaxLifeValuesHolder(holder);
// VDC3 (creator of token)
CoordinatorClient coordinatorVDC3 = new TestCoordinator();
TokenKeyGenerator tokenKeyGeneratorVDC3 = new TokenKeyGenerator();
tokenKeyGeneratorVDC3.setTokenMaxLifeValuesHolder(holder);
Base64TokenEncoder encoderVDC3 = new Base64TokenEncoder();
encoderVDC3.setCoordinator(coordinatorVDC3);
encoderVDC3.setTokenKeyGenerator(tokenKeyGeneratorVDC3);
encoderVDC3.managerInit();
CassandraTokenManager tokenManagerVDC3 = new CassandraTokenManager();
tokenManagerVDC3.setDbClient(_dbClient);
tokenManagerVDC3.setCoordinator(coordinatorVDC3);
tokenManagerVDC3.setTokenEncoder(encoderVDC3);
tokenManagerVDC3.setTokenMaxLifeValuesHolder(holder);
// VDC2 create a token
// set VdcUtil localvdcid to vdc2 to resulting token is identified as such
VirtualDataCenter localVdc = VdcUtil.getLocalVdc();
localVdc.setShortId("vdc2");
_dbClient.persistObject(localVdc);
VdcUtil.invalidateVdcUrnCache();
StorageOSUserDAO userDAOVDC2 = new StorageOSUserDAO();
userDAOVDC2.setUserName("user1@domain.com");
userDAOVDC2.setIsLocal(false);
String tokenVDC2 = tokenManagerVDC2.getToken(userDAOVDC2);
Assert.assertNotNull(tokenVDC2);
TokenOnWire twVDC2 = encoderVDC2.decode(tokenVDC2);
final Token tokenObjVDC2 = _dbClient.queryObject(Token.class, twVDC2.getTokenId());
Assert.assertNotNull(tokenObjVDC2);
URI userIdVDC2 = tokenObjVDC2.getUserId();
Assert.assertNotNull(userIdVDC2);
final StorageOSUserDAO gotUserVDC2 = tokenManagerVDC2.validateToken(tokenVDC2);
Assert.assertNotNull(gotUserVDC2);
// because we are running this on the same "db" as opposed to 2 different VDCs,
// there will be a conflict when caching the token, since the original is already there
// with the same id. So we are changing the token id and user record id for this
// purpose.
tokenObjVDC2.setId(URIUtil.createId(Token.class));
gotUserVDC2.setId(URIUtil.createId(StorageOSUserDAO.class));
tokenObjVDC2.setUserId(gotUserVDC2.getId());
TokenOnWire tokenToBeCachedVDC2 = TokenOnWire.createTokenOnWire(tokenObjVDC2);
// this re-encoded alternate token is the token that will be cached and validated
// from cache.
final String newEncodedVDC2 = encoderVDC2.encode(tokenToBeCachedVDC2);
// VDC3 create a token
// set VdcUtil localvdcid to vdc3 to resulting token is identified as such
localVdc.setShortId("vdc3");
_dbClient.persistObject(localVdc);
VdcUtil.invalidateVdcUrnCache();
StorageOSUserDAO userDAOVDC3 = new StorageOSUserDAO();
userDAOVDC3.setUserName("user2@domain.com");
userDAOVDC3.setIsLocal(false);
String tokenVDC3 = tokenManagerVDC3.getToken(userDAOVDC3);
Assert.assertNotNull(tokenVDC3);
TokenOnWire twVDC3 = encoderVDC3.decode(tokenVDC3);
final Token tokenObjVDC3 = _dbClient.queryObject(Token.class, twVDC3.getTokenId());
Assert.assertNotNull(tokenObjVDC3);
URI userIdVDC3 = tokenObjVDC3.getUserId();
Assert.assertNotNull(userIdVDC3);
final StorageOSUserDAO gotUserVDC3 = tokenManagerVDC3.validateToken(tokenVDC3);
Assert.assertNotNull(gotUserVDC3);
tokenObjVDC3.setId(URIUtil.createId(Token.class));
gotUserVDC3.setId(URIUtil.createId(StorageOSUserDAO.class));
tokenObjVDC3.setUserId(gotUserVDC3.getId());
TokenOnWire tokenToBeCachedVDC3 = TokenOnWire.createTokenOnWire(tokenObjVDC3);
// this re-encoded alternate token is the token that will be cached and validated
// from cache.
final String newEncodedVDC3 = encoderVDC3.encode(tokenToBeCachedVDC3);
// Cache VDC2 &3's tokens and keys in VDC1.cache
TokenKeysBundle bundleVDC2 = tokenKeyGeneratorVDC2.readBundle();
TokenKeysBundle bundleVDC3 = tokenKeyGeneratorVDC3.readBundle();
TokenResponseArtifacts artifactsVDC2 = new TokenResponseArtifacts(gotUserVDC2, tokenObjVDC2, bundleVDC2);
TokenResponseArtifacts artifactsVDC3 = new TokenResponseArtifacts(gotUserVDC3, tokenObjVDC3, bundleVDC3);
cacheHelperVDC1.cacheForeignTokenAndKeys(artifactsVDC2, "vdc2");
cacheHelperVDC1.cacheForeignTokenAndKeys(artifactsVDC3, "vdc3");
Assert.assertEquals(2, cacheHelperVDC1.getAllCachedBundles().size());
// Validate both tokens using VDC1
// set VdcUtil localvdcid to vdc1 to resulting token is identified as such
localVdc.setShortId("vdc1");
_dbClient.persistObject(localVdc);
VdcUtil.invalidateVdcUrnCache();
StorageOSUserDAO userValidate = tokenManagerVDC1.validateToken(newEncodedVDC2);
Assert.assertNotNull(userValidate);
Assert.assertEquals(userValidate.getUserName(), userDAOVDC2.getUserName());
StorageOSUserDAO userValidate2 = tokenManagerVDC1.validateToken(newEncodedVDC3);
Assert.assertNotNull(userValidate2);
Assert.assertEquals(userValidate2.getUserName(), userDAOVDC3.getUserName());
}
Aggregations