Search in sources :

Example 6 with TPCCProjectBuilder

use of org.voltdb.benchmark.tpcc.TPCCProjectBuilder in project voltdb by VoltDB.

the class TestDDLCompiler method testNullAnnotation.

public void testNullAnnotation() throws IOException {
    Catalog catalog = new TPCCProjectBuilder().createTPCCSchemaCatalog();
    Database catalog_db = catalog.getClusters().get("cluster").getDatabases().get("database");
    for (Table t : catalog_db.getTables()) {
        assertNotNull(((TableAnnotation) t.getAnnotation()).ddl);
    }
}
Also used : Table(org.voltdb.catalog.Table) Database(org.voltdb.catalog.Database) TPCCProjectBuilder(org.voltdb.benchmark.tpcc.TPCCProjectBuilder) Catalog(org.voltdb.catalog.Catalog)

Example 7 with TPCCProjectBuilder

use of org.voltdb.benchmark.tpcc.TPCCProjectBuilder in project voltdb by VoltDB.

the class TestMaliciousClientSuite method suite.

//
//    /*
//     * Expect a variety of failure conditions like OOMm out of file descriptors etc.
//     * In reality this test doesn't do a good a job of checking that resources are being freed on the server. The main
//     * goal is to crash the server process one way or another via this behavior.
//     *
//     * This test can only be run to around 500 harassments. After that the server OOMs, but not because it isn't
//     * removing the data associated with old clients correctly. After a certain amount of pressure
//     * it stops detecting that client have disconnected and isn't removing them as fast as they connect.
//     */
//    @org.junit.Test(timeout=600000)
//    public void testManyClientsQueuingAndLeaving() throws Exception {
//        System.gc();
//        System.out.println("Have " + (Runtime.getRuntime().freeMemory() / 1024) + "kb available");
//        final ThreadLocal<ConnectionUtil.ExecutorPair> m_executors = new ThreadLocal<ConnectionUtil.ExecutorPair>() {
//            @Override
//            protected ConnectionUtil.ExecutorPair initialValue() {
//                return new ConnectionUtil.ExecutorPair();
//            }
//        };
//        final ExecutorService executor = Executors.newFixedThreadPool( 4, new ThreadFactory() {
//            private int harasserCount = 0;
//            @Override
//            public Thread newThread(Runnable r) {
//                return new Thread(Thread.currentThread().getThreadGroup(), r, "Harasser " + harasserCount++, 131072);
//            }
//        });
//
//        int numHarassments = 2000;
//        final int numRequests = 4000;
//        ArrayList<Future<Object>> harassments = new ArrayList<Future<Object>>();
//        for (int ii = 0; ii < numHarassments; ii++) {
//            harassments.add(executor.submit(new Callable<Object>() {
//
//                @Override
//                public Object call() throws Exception {
//                    final SocketChannel sc = getClientChannel(true);
//                    final ArrayList<Future<Long>> requests = new ArrayList<Future<Long>>();
//                    final ExecutorService m_executor = m_executors.get().m_writeExecutor;
//                    for (int ii = 0; ii < numRequests; ii++) {
//                        requests.add(ConnectionUtil.sendInvocation( m_executor, sc, "GoSleep", 0, 1, null));
//                    }
//                    for (Future<Long> request : requests) {
//                        request.get();
//                    }
//                    sc.close();
//                    return null;
//                }
//            }));
//        }
//
//        int harassmentsComplete = 0;
//        for (Future<Object> harassment : harassments) {
//            harassmentsComplete++;
//            if (harassmentsComplete % 100 == 0) {
//                System.out.println("Completed " + harassmentsComplete + " harassments with "
//                        + (Runtime.getRuntime().freeMemory() /1024) + " kb free memory");
//            }
//            harassment.get();
//        }
//
//        executor.shutdown();
//        executor.awaitTermination( 1, TimeUnit.DAYS);
//    }
//
//    /**
//     * Test for backpressure generated by the DTXN because there are too many transactions in flight
//     * @throws Exception
//     */
//    public void testDTXNBackPressure() throws Exception {
//        System.gc();
//        System.out.println("Start the test with " + Runtime.getRuntime().freeMemory() + " free");
//        byte junkData[] = new byte[2048];
//        long numRequests = 40000;
//        long sleepTime = 20000;
//        final ArrayDeque<Future<Long>> pendingRequests = new ArrayDeque<Future<Long>>();
//        final ArrayDeque<Future<ClientResponse>> pendingResponses = new ArrayDeque<Future<ClientResponse>>();
//        final ArrayDeque<SocketChannel> connections = new ArrayDeque<SocketChannel>();
//        for (int ii = 0; ii < 4; ii++) {
//            final SocketChannel channel = getClientChannel();
//            connections.add(channel);
//        }
//
//        /**
//         * Queue a request and and the read for the response.
//         * The parameter to GoSleep will cause the first invocation to not return
//         * for a while.
//         * Most of these invocations should never make it into the server due
//         * to DTXN backpressure
//         */
//        for (int ii = 0; ii < numRequests; ii++) {
//            final SocketChannel channel = connections.poll();
//            pendingRequests.offer(ConnectionUtil.sendInvocation(channel, "GoSleep", ii == 0 ? sleepTime : 0, 0, junkData));
//            pendingResponses.offer(ConnectionUtil.readResponse(channel));
//            connections.offer(channel);
//        }
//        System.out.println("Sent " + numRequests + " requests with the first requesting a sleep of " +
//                (sleepTime / 1000) + " seconds");
//
//        /**
//         * Give the TCP stack time to transfer as many invocations as the server will accept
//         */
//        Thread.sleep(10000);
//
//        System.out.println("Slept 10 seconds so the server could transfer invocations");
//
//        /**
//         * Count the number of requests that didn't make it onto the wire due to backpressure
//         */
//        long pendingRequestCount = 0;
//        Future<Long> f = null;
//        while ( (f = pendingRequests.poll()) != null) {
//            if (!f.isDone()) {
//                pendingRequestCount++;
//            } else {
//                f.get();
//            }
//        }
//        pendingRequests.clear();
//
//        System.out.println("Counted " + pendingRequestCount + " requests that didn't make it on the wire");
//
//        /**
//         * The number should be quite large
//         */
//        assertTrue(pendingRequestCount > 30000);
//
//        /**
//         * Now ensure that the backpressure condition can end by waiting for all the responses.
//         */
//        long responseCount = 0;
//        int lastPercentComplete = 0;
//        Future<ClientResponse> response = null;
//        while ( (response = pendingResponses.poll()) != null) {
//            response.get();
//            responseCount++;
//            int percentComplete = (int)Math.floor((responseCount / (double)numRequests) * 100);
//            if (percentComplete > lastPercentComplete) {
//                lastPercentComplete = percentComplete;
//                if (lastPercentComplete % 5 == 0) {
//                    System.out.println(lastPercentComplete + "% complete reading responses with " +  Runtime.getRuntime().freeMemory() + " free");
//                }
//            }
//        }
//
//        System.out.println("Read all the responses for the transactions that couldn't previously make it on the wire");
//
//        assertEquals(responseCount, numRequests);
//
//        /**
//         * Now queue and read another round just to prove it still works
//         */
//        for (final SocketChannel channel : connections) {
//            ConnectionUtil.sendInvocation(channel, "GoSleep", 0).get();
//        }
//
//        for (final SocketChannel channel : connections) {
//            ConnectionUtil.readResponse(channel).get();
//        }
//
//        System.out.println("Was able to queue and read across the other 4 connections");
//    }
//
//    /**
//     * Can't get this to pass reliably
//     */
////    /**
////     * Test for backpressure because a client is not reading his responses. This is difficult because
////     * the server will boot connections that don't read. Only the individual rude client should be blocked.
////     * A polite client should still be able to work.
////     * @throws Exception
////     */
////    public void testIndividualClientBackPressure() throws Exception {
////        System.gc();
////        final ArrayDeque<Future<Long>> pendingRudeRequests = new ArrayDeque<Future<Long>>();
////        final ArrayDeque<Future<Long>> pendingPoliteRequests = new ArrayDeque<Future<Long>>();
////        final ArrayDeque<Future<ClientResponse>> pendingRudeResponses = new ArrayDeque<Future<ClientResponse>>();
////        final ArrayDeque<Future<ClientResponse>> pendingPoliteResponses = new ArrayDeque<Future<ClientResponse>>();
////        final SocketChannel rudeChannel = getClientChannel();
////        /**
////         * Makes it easier to control when data is pulled from the remote side. This value would be
////         * tuned very large otherwise.
////         */
////        rudeChannel.socket().setReceiveBufferSize(16384);
////        System.out.println("Rude channel is called " + rudeChannel.socket().getLocalSocketAddress());
////        final SocketChannel politeChannel = getClientChannel();
////        System.out.println("Polite channel is called " + politeChannel.socket().getLocalSocketAddress());
////        final int numRequests = 15000;
////        final int sleepTime = 0;
////        int rudeReadsSent = 0;
////
////        /**
////         * Send a ton of invocations on the rude channel that will complete immediately and return a relatively large
////         * result table to help fill I/O buffers.
////         */
////        for (int ii = 0; ii < numRequests; ii++) {
////            pendingRudeRequests.add(ConnectionUtil.sendInvocation(rudeChannel, "GoSleep", sleepTime, 1, null));
////            if (ii % 600 == 0) {
////                pendingRudeResponses.add(ConnectionUtil.readResponse(rudeChannel));
////                rudeReadsSent++;
////            }
////        }
////
////        System.out.println("Sent " + numRequests + " requests with the first requesting a sleep of " +
////                (sleepTime / 1000) + " seconds and " + rudeReadsSent + " reads sent to avoid getting booted");
////
////        /**
////         * Give the server time to finish processing the previous requests.
////         */
////        for(int ii = 0; ii < 100; ii++) {
////            Thread.sleep(100);
////            for (int zz = 0; zz < 10; zz++) {
////                pendingRudeResponses.add(ConnectionUtil.readResponse(rudeChannel));
////                rudeReadsSent++;
////            }
////        }
////
////        System.out.println("Slept 10 seconds so the server could transfer invocations and sent " + rudeReadsSent +
////                " reads to avoid getting booted");
////
////        /**
////         * Count the number of requests that didn't make it onto the wire due to backpressure
////         */
////        long pendingRequestCount = 0;
////        for (Future<Long> f : pendingRudeRequests) {
////            if (!f.isDone()) {
////                pendingRequestCount++;
////            } else {
////                f.get();
////            }
////        }
////
////        System.out.println("Counted " + pendingRequestCount + " requests that didn't make it on the wire");
////
////        /**
////         * The number should be quite large
////         */
////        assertTrue(pendingRequestCount > 0);
////
////        System.out.println("Using a  polite channel to send " + numRequests);
////        /**
////         * Now use the polite channel to send requests. These should have no trouble going through the system since
////         * this is also queuing the reads for the responses.
////         */
////        for (int ii = 0; ii < numRequests; ii++) {
////            pendingPoliteRequests.add(ConnectionUtil.sendInvocation(politeChannel, "GoSleep", sleepTime, 0, null));
////            pendingPoliteResponses.add(ConnectionUtil.readResponse(politeChannel));
////            if (ii % 600 == 0) {
////                pendingRudeResponses.add(ConnectionUtil.readResponse(rudeChannel));
////                rudeReadsSent++;
////                Thread.yield();
////            }
////        }
////
////        int numPoliteResponses = 0;
////        int lastPercentPoliteResponses = 0;
////        int rudeReadsSentHere = 0;
////        long startTime = System.currentTimeMillis() - 100;
////        System.out.println("Waiting for all polite requests and responses to make it on the wire");
////        Future<Long> request = null;
////        while ((request = pendingPoliteRequests.poll()) != null) {
////            request.get();
////            pendingPoliteResponses.poll().get();
////            numPoliteResponses++;
////            int percentComplete = (int)Math.floor((numPoliteResponses / (double)numRequests) * 100);
////            if (percentComplete > lastPercentPoliteResponses) {
////                lastPercentPoliteResponses = percentComplete;
////                if (lastPercentPoliteResponses % 10 == 0) {
////                    System.out.println(lastPercentPoliteResponses + "% complete reading polite responses");
////                    System.out.println("Free memory " + Runtime.getRuntime().freeMemory());
////                }
////            }
////
////            final long now = System.currentTimeMillis();
////            if (now - startTime > 100) {
////                //System.out.println("Sending rude reads " + now);
////                startTime = now;
////                for (int zz = 0; zz < 10; zz++) {
////                    pendingRudeResponses.add(ConnectionUtil.readResponse(rudeChannel));
////                    rudeReadsSentHere++;
////                }
////            }
////        }
////
////        rudeReadsSent += rudeReadsSentHere;
////        System.out.println("All polite requests and responses made it onto the wire and had to send " + rudeReadsSentHere +
////                " rude reads to avoid getting booted");
////
////        System.out.println("Queuing reads for all rude requests");
////        /**
////         * Now make sure that if the rude channel becomes polite it can get everything through
////         */
////        for (; rudeReadsSent < numRequests; rudeReadsSent++) {
////            pendingRudeResponses.add(ConnectionUtil.readResponse(rudeChannel));
////        }
////
////        int numRudeRequests = 0;
////        int lastPercentRudeRequests = 0;
////        while ((request = pendingRudeRequests.poll()) != null) {
////            request.get();
////            pendingRudeResponses.poll().get();
////            numRudeRequests++;
////            int percentComplete = (int)Math.floor((numRudeRequests / (double)numRequests) * 100);
////            if (percentComplete > lastPercentRudeRequests) {
////                lastPercentRudeRequests = percentComplete;
////                if (lastPercentRudeRequests % 10 == 0) {
////                    System.out.println(lastPercentRudeRequests + "% complete sending rude requests and receiving rude responses");
////                    System.out.println("Free memory " + Runtime.getRuntime().freeMemory());
////                }
////            }
////        }
////        pendingRudeRequests.clear();
////    }
//
//    /**
//     * Check that the server enforces a limit on the maximum number of connections
//     * @throws Exception
//     */
//    public void testMaxNumConnections() throws Exception {
//        final ExecutorService executor = Executors.newFixedThreadPool( 8, new ThreadFactory() {
//            private int harasserCount = 0;
//            @Override
//            public Thread newThread(Runnable r) {
//                return new Thread(Thread.currentThread().getThreadGroup(), r, "Harasser " + harasserCount++, 131072);
//            }
//        });
//
//        ArrayList<Future<SocketChannel>> attempts = new ArrayList<Future<SocketChannel>>();
//
//        final int connectionsToAttempt = 20000;
//        for (int ii = 0; ii < connectionsToAttempt; ii++) {
//            attempts.add(executor.submit(new Callable<SocketChannel>() {
//                @Override
//                public SocketChannel call() throws Exception {
//                    return getClientChannel(true);
//                }
//            }));
//        }
//
//        for (Future<SocketChannel> attempt : attempts) {
//            try {
//                attempt.get();
//            } catch (Exception e) {
//
//            }
//        }
//
//        int successfulAttempts = 0;
//        for (Future<SocketChannel> attempt : attempts) {
//            try {
//                final SocketChannel sc = attempt.get();
//                successfulAttempts++;
//                sc.close();
//            } catch (Exception e) {
//
//            }
//        }
//
//        executor.shutdown();
//        executor.awaitTermination(1, TimeUnit.DAYS);
//        assertTrue(successfulAttempts < 10000);
//        System.out.println("Had " + successfulAttempts + " successful connection attempts");
//    }
/**
     * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit.
     * Use helper classes that are part of the RegressionSuite framework.
     * This particular class runs all tests on the the local JNI backend with both
     * one and two partition configurations, as well as on the hsql backend.
     *
     * @return The TestSuite containing all the tests to be run.
     */
public static Test suite() {
    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestMaliciousClientSuite.class);
    // build up a project builder for the workload
    TPCCProjectBuilder project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(PROCEDURES);
    boolean success;
    /////////////////////////////////////////////////////////////
    // CONFIG #1: 1 Local Site/Partitions running on JNI backend
    /////////////////////////////////////////////////////////////
    // get a server config for the native backend with one sites/partitions
    VoltServerConfig config = new LocalCluster("malicious-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);
    // build the jarfile
    success = config.compile(project);
    assertTrue(success);
    // add this config to the set of tests to run
    builder.addServerConfig(config);
    /////////////////////////////////////////////////////////////
    // CONFIG #2: Local Cluster (of processes)
    /////////////////////////////////////////////////////////////
    config = new LocalCluster("malicious-cluster.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI);
    success = config.compile(project);
    assertTrue(success);
    builder.addServerConfig(config);
    return builder;
}
Also used : TPCCProjectBuilder(org.voltdb.benchmark.tpcc.TPCCProjectBuilder)

Example 8 with TPCCProjectBuilder

use of org.voltdb.benchmark.tpcc.TPCCProjectBuilder in project voltdb by VoltDB.

the class TestCatalogDiffs method compileWithGroups.

protected String compileWithGroups(boolean securityEnabled, String securityProvider, RoleInfo[] gi, UserInfo[] ui, String name, Class<?>... procList) {
    TPCCProjectBuilder builder = new TPCCProjectBuilder();
    builder.addDefaultSchema();
    builder.addDefaultPartitioning();
    builder.addProcedures(procList);
    builder.setSecurityEnabled(securityEnabled, true);
    if (gi != null && gi.length > 0)
        builder.addRoles(gi);
    if (ui != null && ui.length > 0)
        builder.addUsers(ui);
    String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
    String retval = testDir + File.separator + "tpcc-catalogcheck-" + name + ".jar";
    assertTrue("Failed to compile schema", builder.compile(retval));
    return retval;
}
Also used : TPCCProjectBuilder(org.voltdb.benchmark.tpcc.TPCCProjectBuilder)

Example 9 with TPCCProjectBuilder

use of org.voltdb.benchmark.tpcc.TPCCProjectBuilder in project voltdb by VoltDB.

the class TestSecuritySuite method suite.

/**
     * Build a list of the tests that will be run when TestSecuritySuite gets run by JUnit.
     * Use helper classes that are part of the RegressionSuite framework.
     * This particular class runs all tests on the the local JNI backend with both
     * one and two partition configurations, as well as on the hsql backend.
     *
     * @return The TestSuite containing all the tests to be run.
     */
public static Test suite() {
    VoltServerConfig config = null;
    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSecuritySuite.class);
    // build up a project builder for the workload
    TPCCProjectBuilder project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    ArrayList<ProcedureInfo> procedures = new ArrayList<>();
    procedures.add(new ProcedureInfo(new String[0], PROCEDURES[0]));
    procedures.add(new ProcedureInfo(new String[] { "group1" }, PROCEDURES[1]));
    procedures.add(new ProcedureInfo(new String[] { "group1", "group2" }, PROCEDURES[2]));
    project.addProcedures(procedures);
    UserInfo[] users = new UserInfo[] { new UserInfo("user1", "password", new String[] { "grouP1" }), new UserInfo("user2", "password", new String[] { "grouP2" }), new UserInfo("user3", "password", new String[] { "grouP3" }), new UserInfo("user4", "password", new String[] { "AdMINISTRATOR" }), new UserInfo("userWithDefaultUserPerm", "password", new String[] { "User" }), new UserInfo("userWithAllProc", "password", new String[] { "GroupWithAllProcPerm" }), new UserInfo("userWithDefaultProcPerm", "password", new String[] { "groupWithDefaultProcPerm" }), new UserInfo("userWithoutDefaultProcPerm", "password", new String[] { "groupWiThoutDefaultProcPerm" }), new UserInfo("userWithDefaultProcReadPerm", "password", new String[] { "groupWiThDefaultProcReadPerm" }) };
    project.addUsers(users);
    RoleInfo[] groups = new RoleInfo[] { new RoleInfo("Group1", false, false, false, false, false, false), new RoleInfo("Group2", true, false, false, false, false, false), new RoleInfo("Group3", true, false, false, false, false, false), new RoleInfo("GroupWithDefaultUserPerm", true, false, false, false, false, true), new RoleInfo("GroupWithAllProcPerm", false, false, false, false, false, true), new RoleInfo("GroupWithDefaultProcPerm", false, false, false, true, false, false), new RoleInfo("GroupWithoutDefaultProcPerm", false, false, false, false, false, false), new RoleInfo("GroupWithDefaultProcReadPerm", false, false, false, false, true, false) };
    project.addRoles(groups);
    // suite defines its own ADMINISTRATOR user
    project.setSecurityEnabled(true, false);
    // export disabled in community
    if (MiscUtils.isPro()) {
        project.addExport(true);
    }
    /////////////////////////////////////////////////////////////
    // CONFIG #1: 1 Local Site/Partitions running on JNI backend
    /////////////////////////////////////////////////////////////
    // get a server config for the native backend with one sites/partitions
    config = new LocalCluster("security-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);
    // build the jarfile
    if (!config.compile(project))
        fail();
    // add this config to the set of tests to run
    builder.addServerConfig(config, false);
    return builder;
}
Also used : RoleInfo(org.voltdb.compiler.VoltProjectBuilder.RoleInfo) ProcedureInfo(org.voltdb.compiler.VoltProjectBuilder.ProcedureInfo) ArrayList(java.util.ArrayList) UserInfo(org.voltdb.compiler.VoltProjectBuilder.UserInfo) TPCCProjectBuilder(org.voltdb.benchmark.tpcc.TPCCProjectBuilder)

Example 10 with TPCCProjectBuilder

use of org.voltdb.benchmark.tpcc.TPCCProjectBuilder in project voltdb by VoltDB.

the class TestUpdateDeployment method suite.

/**
     * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit.
     * Use helper classes that are part of the RegressionSuite framework.
     * This particular class runs all tests on the the local JNI backend with both
     * one and two partition configurations, as well as on the hsql backend.
     *
     * @return The TestSuite containing all the tests to be run.
     * @throws Exception
     */
public static Test suite() throws Exception {
    TheHashinator.initialize(TheHashinator.getConfiguredHashinatorClass(), TheHashinator.getConfigureBytes(2));
    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestUpdateDeployment.class);
    /////////////////////////////////////////////////////////////
    // CONFIG #1: 1 Local Site/Partitions running on JNI backend
    /////////////////////////////////////////////////////////////
    // get a server config for the native backend with one sites/partitions
    VoltServerConfig config = new LocalCluster("catalogupdate-cluster-base.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    // Catalog upgrade test(s) sporadically fail if there's a local server because
    // a file pipe isn't available for grepping local server output.
    ((LocalCluster) config).setHasLocalServer(true);
    // build up a project builder for the workload
    TPCCProjectBuilder project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    // build the jarfile
    boolean basecompile = config.compile(project);
    assertTrue(basecompile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-base.xml"));
    // add this config to the set of tests to run
    builder.addServerConfig(config, false);
    /////////////////////////////////////////////////////////////
    // DELTA CATALOGS FOR TESTING
    /////////////////////////////////////////////////////////////
    // Generate a catalog that adds a table and a deployment file that changes the dead host timeout.
    config = new LocalCluster("catalogupdate-cluster-addtable.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addLiteralSchema("CREATE TABLE NEWTABLE (A1 INTEGER, PRIMARY KEY (A1));");
    project.setDeadHostTimeout(6);
    boolean compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-addtable.xml"));
    // A catalog change that enables snapshots
    config = new LocalCluster("catalogupdate-cluster-enable_snapshot.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setSnapshotSettings("1s", 3, "/tmp/snapshotdir1", "foo1");
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-enable_snapshot.xml"));
    //Another catalog change to modify the schedule
    config = new LocalCluster("catalogupdate-cluster-change_snapshot.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setSnapshotSettings("1s", 3, "/tmp/snapshotdir2", "foo2");
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-change_snapshot.xml"));
    //Another catalog change to modify the schedule
    config = new LocalCluster("catalogupdate-cluster-change_snapshot_dir_not_exist.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setSnapshotSettings("1s", 3, "/tmp/snapshotdirasda2", "foo2");
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-change_snapshot_dir_not_exist.xml"));
    // A deployment change that changes the schema change mechanism
    config = new LocalCluster("catalogupdate-cluster-change_schema_update.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setUseDDLSchema(true);
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-cluster-change_schema_update.xml"));
    // A deployment change that changes the schema change mechanism
    config = new LocalCluster("catalogupdate-security-no-users.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setSecurityEnabled(true, false);
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-security-no-users.xml"));
    // A deployment change that changes the schema change mechanism
    config = new LocalCluster("catalogupdate-bad-username.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setSecurityEnabled(true, true);
    project.addRoles(GROUPS);
    project.addUsers(USERS);
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-bad-username.xml"));
    // A deployment change that has bad masked password
    config = new LocalCluster("catalogupdate-bad-masked-password.jar", SITES_PER_HOST, HOSTS, K, BackendTarget.NATIVE_EE_JNI);
    project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(BASEPROCS);
    project.setSecurityEnabled(true, true);
    project.addRoles(GROUPS);
    project.addUsers(USERS_BAD_PASSWORD);
    // build the jarfile
    compile = config.compile(project);
    assertTrue(compile);
    MiscUtils.copyFile(project.getPathToDeployment(), Configuration.getPathToCatalogForTest("catalogupdate-bad-masked-password.xml"));
    return builder;
}
Also used : TPCCProjectBuilder(org.voltdb.benchmark.tpcc.TPCCProjectBuilder)

Aggregations

TPCCProjectBuilder (org.voltdb.benchmark.tpcc.TPCCProjectBuilder)23 File (java.io.File)6 Catalog (org.voltdb.catalog.Catalog)4 UserInfo (org.voltdb.compiler.VoltProjectBuilder.UserInfo)4 IOException (java.io.IOException)3 HashinatorConfig (org.voltdb.TheHashinator.HashinatorConfig)3 RoleInfo (org.voltdb.compiler.VoltProjectBuilder.RoleInfo)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 ServerThread (org.voltdb.ServerThread)2 Procedure (org.voltdb.catalog.Procedure)2 Client (org.voltdb.client.Client)2 ClientConfig (org.voltdb.client.ClientConfig)2 ProcedureInfo (org.voltdb.compiler.VoltProjectBuilder.ProcedureInfo)2 InMemoryJarfile (org.voltdb.utils.InMemoryJarfile)2 Date (java.util.Date)1 Properties (java.util.Properties)1 Callable (java.util.concurrent.Callable)1 CyclicBarrier (java.util.concurrent.CyclicBarrier)1 ExecutorService (java.util.concurrent.ExecutorService)1