use of org.voltdb.compiler.VoltProjectBuilder.UserInfo in project voltdb by VoltDB.
the class TestSecuritySuite method suite.
/**
* Build a list of the tests that will be run when TestSecuritySuite gets run by JUnit.
* Use helper classes that are part of the RegressionSuite framework.
* This particular class runs all tests on the the local JNI backend with both
* one and two partition configurations, as well as on the hsql backend.
*
* @return The TestSuite containing all the tests to be run.
*/
public static Test suite() {
VoltServerConfig config = null;
// the suite made here will all be using the tests from this class
MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSecuritySuite.class);
// build up a project builder for the workload
TPCCProjectBuilder project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
ArrayList<ProcedureInfo> procedures = new ArrayList<>();
procedures.add(new ProcedureInfo(new String[0], PROCEDURES[0]));
procedures.add(new ProcedureInfo(new String[] { "group1" }, PROCEDURES[1]));
procedures.add(new ProcedureInfo(new String[] { "group1", "group2" }, PROCEDURES[2]));
project.addProcedures(procedures);
UserInfo[] users = new UserInfo[] { new UserInfo("user1", "password", new String[] { "grouP1" }), new UserInfo("user2", "password", new String[] { "grouP2" }), new UserInfo("user3", "password", new String[] { "grouP3" }), new UserInfo("user4", "password", new String[] { "AdMINISTRATOR" }), new UserInfo("userWithDefaultUserPerm", "password", new String[] { "User" }), new UserInfo("userWithAllProc", "password", new String[] { "GroupWithAllProcPerm" }), new UserInfo("userWithDefaultProcPerm", "password", new String[] { "groupWithDefaultProcPerm" }), new UserInfo("userWithoutDefaultProcPerm", "password", new String[] { "groupWiThoutDefaultProcPerm" }), new UserInfo("userWithDefaultProcReadPerm", "password", new String[] { "groupWiThDefaultProcReadPerm" }) };
project.addUsers(users);
RoleInfo[] groups = new RoleInfo[] { new RoleInfo("Group1", false, false, false, false, false, false), new RoleInfo("Group2", true, false, false, false, false, false), new RoleInfo("Group3", true, false, false, false, false, false), new RoleInfo("GroupWithDefaultUserPerm", true, false, false, false, false, true), new RoleInfo("GroupWithAllProcPerm", false, false, false, false, false, true), new RoleInfo("GroupWithDefaultProcPerm", false, false, false, true, false, false), new RoleInfo("GroupWithoutDefaultProcPerm", false, false, false, false, false, false), new RoleInfo("GroupWithDefaultProcReadPerm", false, false, false, false, true, false) };
project.addRoles(groups);
// suite defines its own ADMINISTRATOR user
project.setSecurityEnabled(true, false);
// export disabled in community
if (MiscUtils.isPro()) {
project.addExport(true);
}
/////////////////////////////////////////////////////////////
// CONFIG #1: 1 Local Site/Partitions running on JNI backend
/////////////////////////////////////////////////////////////
// get a server config for the native backend with one sites/partitions
config = new LocalCluster("security-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);
// build the jarfile
if (!config.compile(project))
fail();
// add this config to the set of tests to run
builder.addServerConfig(config, false);
return builder;
}
use of org.voltdb.compiler.VoltProjectBuilder.UserInfo in project voltdb by VoltDB.
the class TestVoltDB method testCompileDeploymentAddUserToNonExistentGroup.
/**
* ENG-7088: Validate that deployment file users that want to belong to roles which
* don't yet exist don't render the deployment file invalid.
*/
@Test
public void testCompileDeploymentAddUserToNonExistentGroup() throws IOException {
TPCCProjectBuilder project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addDefaultProcedures();
project.setSecurityEnabled(true, true);
RoleInfo[] groups = new RoleInfo[] { new RoleInfo("foo", false, false, false, false, false, false), new RoleInfo("blah", false, false, false, false, false, false) };
project.addRoles(groups);
UserInfo[] users = new UserInfo[] { new UserInfo("john", "hugg", new String[] { "foo" }), new UserInfo("ryan", "betts", new String[] { "foo", "bar" }), new UserInfo("ariel", "weisberg", new String[] { "bar" }) };
project.addUsers(users);
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String jarName = "compile-deployment.jar";
String catalogJar = testDir + File.separator + jarName;
assertTrue("Project failed to compile", project.compile(catalogJar));
byte[] bytes = MiscUtils.fileToBytes(new File(catalogJar));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
assertNotNull("Error loading catalog from jar", serializedCatalog);
Catalog catalog = new Catalog();
catalog.execute(serializedCatalog);
// this should succeed even though group "bar" does not exist
assertTrue("Deployment file should have been able to validate", CatalogUtil.compileDeployment(catalog, project.getPathToDeployment(), true) == null);
}
use of org.voltdb.compiler.VoltProjectBuilder.UserInfo in project voltdb by VoltDB.
the class TestCatalogDiffs method testChangeSecurityProvider.
public void testChangeSecurityProvider() throws IOException {
RoleInfo[] gi = new RoleInfo[2];
gi[0] = new RoleInfo("group1", true, true, true, true, false, false);
gi[1] = new RoleInfo("group2", true, true, true, true, false, false);
UserInfo[] ui = new UserInfo[2];
ui[0] = new UserInfo("user1", "password", new String[] { "group1" });
ui[1] = new UserInfo("user2", "password", new String[] { "group2" });
String original = compileWithGroups(true, "hash", gi, ui, "base", BASEPROCS);
Catalog catOriginal = catalogForJar(original);
// just turn on security
String updated = compileWithGroups(true, "kerberos", gi, ui, "base", BASEPROCS);
Catalog catUpdated = catalogForJar(updated);
verifyDiff(catOriginal, catUpdated, false);
}
use of org.voltdb.compiler.VoltProjectBuilder.UserInfo in project voltdb by VoltDB.
the class TestCatalogUpdateSuite method suite.
/**
* Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit.
* Use helper classes that are part of the RegressionSuite framework.
* This particular class runs all tests on the the local JNI backend with both
* one and two partition configurations, as well as on the hsql backend.
*
* @return The TestSuite containing all the tests to be run.
* @throws Exception
*/
public static Test suite() throws Exception {
TheHashinator.initialize(TheHashinator.getConfiguredHashinatorClass(), TheHashinator.getConfigureBytes(2));
// the suite made here will all be using the tests from this class
MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestCatalogUpdateSuite.class);
/////////////////////////////////////////////////////////////
// CONFIG #1: 1 Local Site/Partitions running on JNI backend
/////////////////////////////////////////////////////////////
// get a server config for the native backend with one sites/partitions
VoltServerConfig config = new LocalCluster("catalogupdate-cluster-base.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
// Catalog upgrade test(s) sporadically fail if there's a local server because
// a file pipe isn't available for grepping local server output.
((LocalCluster) config).setHasLocalServer(true);
// build up a project builder for the workload
TPCCProjectBuilder project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
// build the jarfile
boolean basecompile = config.compile(project);
assertTrue(basecompile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-base.xml"));
// add this config to the set of tests to run
builder.addServerConfig(config, false);
/////////////////////////////////////////////////////////////
// DELTA CATALOGS FOR TESTING
/////////////////////////////////////////////////////////////
// As catalogupdate-cluster-base but with security enabled. This requires users and groups..
// We piggy-back the heartbeat change here.
RoleInfo[] groups = new RoleInfo[] { new RoleInfo("group1", false, false, true, false, false, false) };
UserInfo[] users = new UserInfo[] { new UserInfo("user1", "userpass1", new String[] { "group1" }) };
ProcedureInfo procInfo = new ProcedureInfo(new String[] { "group1" }, InsertNewOrder.class);
config = new LocalCluster("catalogupdate-cluster-base-secure.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addUsers(users);
project.addRoles(groups);
project.addProcedures(procInfo);
project.setSecurityEnabled(true, true);
project.setDeadHostTimeout(6000);
boolean compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-base-secure.xml"));
//config = new LocalSingleProcessServer("catalogupdate-local-addtables.jar", 2, BackendTarget.NATIVE_EE_JNI);
config = new LocalCluster("catalogupdate-cluster-addtables.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addSchema(TestCatalogUpdateSuite.class.getResource("testorderby-ddl.sql").getPath());
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS_OPROCS);
project.setElasticDuration(100);
project.setElasticThroughput(50);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-addtables.xml"));
// as above but also with a materialized view added to O1
try {
config = new LocalCluster("catalogupdate-cluster-addtableswithmatview.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addSchema(TestCatalogUpdateSuite.class.getResource("testorderby-ddl.sql").getPath());
project.addLiteralSchema("CREATE VIEW MATVIEW_O1(C1, C2, NUM) AS SELECT A_INT, PKEY, COUNT(*) FROM O1 GROUP BY A_INT, PKEY;");
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS_OPROCS);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-addtableswithmatview.xml"));
} catch (IOException e) {
fail();
}
config = new LocalCluster("catalogupdate-cluster-addindex.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addLiteralSchema("CREATE INDEX NEWINDEX ON NEW_ORDER (NO_O_ID);");
// history is good because this new index is the only one (no pkey)
project.addLiteralSchema("CREATE INDEX NEWINDEX2 ON HISTORY (H_C_ID);");
// unique index
project.addLiteralSchema("CREATE UNIQUE INDEX NEWINDEX3 ON STOCK (S_I_ID, S_W_ID, S_QUANTITY);");
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-addindex.xml"));
config = new LocalCluster("catalogupdate-cluster-addexpressindex.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addLiteralSchema("CREATE INDEX NEWEXPRESSINDEX ON NEW_ORDER ((NO_O_ID+NO_O_ID)-NO_O_ID);");
// history is good because this new index is the only one (no pkey)
project.addLiteralSchema("CREATE INDEX NEWEXPRESSINDEX2 ON HISTORY ((H_C_ID+H_C_ID)-H_C_ID);");
// unique index
// This needs to wait until the test for unique index coverage for indexed expressions can parse out any simple column expressions
// and discover a unique index on some subset.
//TODO: project.addLiteralSchema("CREATE UNIQUE INDEX NEWEXPRESSINDEX3 ON STOCK (S_I_ID, S_W_ID, S_QUANTITY+S_QUANTITY-S_QUANTITY);");
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-addexpressindex.xml"));
//config = new LocalSingleProcessServer("catalogupdate-local-expanded.jar", 2, BackendTarget.NATIVE_EE_JNI);
config = new LocalCluster("catalogupdate-cluster-expanded.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(EXPANDEDPROCS);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-expanded.xml"));
config = new LocalCluster("catalogupdate-cluster-adhocproc.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addStmtProcedure("adhocproc1", "SELECT * from WAREHOUSE");
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-adhocproc.xml"));
config = new LocalCluster("catalogupdate-cluster-adhocschema.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addLiteralSchema("CREATE TABLE CATALOG_MODE_DDL_TEST (fld1 INTEGER NOT NULL);");
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-adhocschema.xml"));
//config = new LocalSingleProcessServer("catalogupdate-local-conflict.jar", 2, BackendTarget.NATIVE_EE_JNI);
config = new LocalCluster("catalogupdate-cluster-conflict.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(CONFLICTPROCS);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-conflict.xml"));
//config = new LocalSingleProcessServer("catalogupdate-local-many.jar", 2, BackendTarget.NATIVE_EE_JNI);
config = new LocalCluster("catalogupdate-cluster-many.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(SOMANYPROCS);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-many.xml"));
// A catalog change that enables snapshots
config = new LocalCluster("catalogupdate-cluster-enable_snapshot.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
project.setSnapshotSettings("1s", 3, "/tmp/snapshotdir1", "foo1");
// build the jarfile
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-enable_snapshot.xml"));
//Another catalog change to modify the schedule
config = new LocalCluster("catalogupdate-cluster-change_snapshot.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
project.setSnapshotSettings("1s", 3, "/tmp/snapshotdir2", "foo2");
// build the jarfile
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-change_snapshot.xml"));
//Another catalog change to modify the schedule
config = new LocalCluster("catalogupdate-cluster-change_snapshot_dir_not_exist.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
project.setSnapshotSettings("1s", 3, "/tmp/snapshotdirasda2", "foo2");
// build the jarfile
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-change_snapshot_dir_not_exist.xml"));
//A huge catalog update to test size limits
config = new LocalCluster("catalogupdate-cluster-huge.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
long t = System.currentTimeMillis();
String hugeSchemaURL = generateRandomDDL("catalogupdate-cluster-huge", HUGE_TABLES, HUGE_COLUMNS, HUGE_NAME_SIZE);
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addSchema(hugeSchemaURL);
project.addProcedures(BASEPROCS);
compile = config.compile(project);
assertTrue(compile);
hugeCompileElapsed = (System.currentTimeMillis() - t) / 1000.0;
hugeCatalogXMLPath = Configuration.getPathToCatalogForTest("catalogupdate-cluster-huge.xml");
hugeCatalogJarPath = Configuration.getPathToCatalogForTest("catalogupdate-cluster-huge.jar");
MiscUtils.copyFile(project.getPathToDeployment(), hugeCatalogXMLPath);
config = new LocalCluster("catalogupdate-cluster-change_snapshot_dir_not_exist.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addProcedures(BASEPROCS);
project.setSnapshotSettings("1s", 3, "/tmp/snapshotdirasda2", "foo2");
// build the jarfile
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-change_snapshot_dir_not_exist.xml"));
// Catalogs with different system settings on query time out
config = new LocalCluster("catalogupdate-cluster-timeout-1000.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.setQueryTimeout(1000);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-timeout-1000.xml"));
config = new LocalCluster("catalogupdate-cluster-timeout-5000.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.setQueryTimeout(5000);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-timeout-5000.xml"));
config = new LocalCluster("catalogupdate-cluster-timeout-600.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.setQueryTimeout(600);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-timeout-600.xml"));
// elastic duration and throughput catalog update tests
config = new LocalCluster("catalogupdate-cluster-elastic-100-5.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
project = new TPCCProjectBuilder();
project.addDefaultSchema();
// build the jarfile
project.setElasticDuration(100);
project.setElasticThroughput(5);
compile = config.compile(project);
assertTrue(compile);
MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-elastic-100-5.xml"));
return builder;
}
use of org.voltdb.compiler.VoltProjectBuilder.UserInfo in project voltdb by VoltDB.
the class TestCatalogDiffs method testDeleteGroupAndUser.
public void testDeleteGroupAndUser() throws IOException {
RoleInfo[] gi = new RoleInfo[1];
gi[0] = new RoleInfo("group1", true, true, true, true, false, false);
UserInfo[] ui = new UserInfo[1];
ui[0] = new UserInfo("user1", "password", new String[] { "group1" });
String original = compileWithGroups(false, null, gi, ui, "base", BASEPROCS);
Catalog catOriginal = catalogForJar(original);
// no groups or users this time
String updated = compileWithGroups(false, null, null, null, "base", BASEPROCS);
Catalog catUpdated = catalogForJar(updated);
verifyDiff(catOriginal, catUpdated, false);
}
Aggregations