Search in sources :

Example 11 with VoltLogger

use of org.voltcore.logging.VoltLogger in project voltdb by VoltDB.

the class KafkaImportBenchmark method main.

/**
     * Main routine creates a benchmark instance and kicks off the run method.
     *
     * @param args Command line arguments.
     * @throws Exception if anything goes wrong.
     */
public static void main(String[] args) throws Exception {
    VoltLogger log = new VoltLogger("Benchmark.main");
    boolean testResult = true;
    // create a configuration from the arguments
    Config config = new Config();
    config.parse(KafkaImportBenchmark.class.getName(), args);
    // connect to one or more servers, method loops until success
    dbconnect(config.servers, config.ratelimit);
    // minute to settle
    if (config.expected_rows == 0) {
        testResult = verifyZero();
        endTest(testResult, config);
    }
    // instance handles inserts to Kafka export table and its mirror DB table
    exportProc = new InsertExport(config.alltypes, client, rowsAdded);
    log.info("Starting KafkaImportBenchmark...");
    KafkaImportBenchmark benchmark = new KafkaImportBenchmark(config);
    BenchmarkRunner runner = new BenchmarkRunner(benchmark);
    runner.start();
    // writers are done
    runner.join();
    long exportRowCount = 0;
    if (config.useexport) {
        exportRowCount = MatchChecks.getExportRowCount(client);
        log.info("Export phase complete, " + exportRowCount + " rows exported, waiting for import to drain...");
    }
    // not all the rows got to Kafka or not all the rows got imported back.
    do {
        Thread.sleep(END_WAIT * 1000);
    //}
    // importProgress is an array of sampled counts of the importedcounts table, showing import progress
    // samples are recorded by the checkTimer thread
    } while (!RUNNING_STATE.equalsIgnoreCase(MatchChecks.getClusterState(client)) || importProgress.size() < 4 || importProgress.get(importProgress.size() - 1) > importProgress.get(importProgress.size() - 2) || importProgress.get(importProgress.size() - 1) > importProgress.get(importProgress.size() - 3) || importProgress.get(importProgress.size() - 1) > importProgress.get(importProgress.size() - 4));
    long[] importStatValues = MatchChecks.getImportValues(client);
    long mirrorRows = 0;
    if (!config.streamtest)
        mirrorRows = MatchChecks.getMirrorTableRowCount(config.alltypes, client);
    long importRows = MatchChecks.getImportTableRowCount(config.alltypes, client);
    long importRowCount = 0;
    if (!config.streamtest)
        importRowCount = MatchChecks.getImportRowCount(client);
    // in case of pause / resume tweak, let it drain longer
    int trial = 3;
    while (!RUNNING_STATE.equalsIgnoreCase(MatchChecks.getClusterState(client)) || ((--trial > 0) && ((importStatValues[OUTSTANDING_REQUESTS] > 0) || (importRows < config.expected_rows)))) {
        Thread.sleep(PAUSE_WAIT * 1000);
        importStatValues = MatchChecks.getImportValues(client);
        if (!config.streamtest)
            mirrorRows = MatchChecks.getMirrorTableRowCount(config.alltypes, client);
        importRows = MatchChecks.getImportTableRowCount(config.alltypes, client);
    // importRowCount = MatchChecks.getImportRowCount(client);
    }
    // some counts that might help debugging....
    log.info("importer outstanding requests: " + importStatValues[OUTSTANDING_REQUESTS]);
    log.info("importRows: " + importRows);
    if (!config.streamtest) {
        log.info("mirrorRows: " + mirrorRows);
        log.info("importRowCount: " + importRowCount);
    }
    if (config.useexport) {
        log.info("exportRowCount: " + exportRowCount);
    }
    if (config.useexport) {
        log.info("Total rows exported: " + finalInsertCount);
        log.info("Unmatched Rows remaining in the export Mirror Table: " + mirrorRows);
        log.info("Unmatched Rows received from Kafka to Import Table (duplicate rows): " + importRows);
        if (mirrorRows != 0) {
            log.error(mirrorRows + " Rows are missing from the import stream, failing test");
            testResult = false;
        }
    }
    if ((exportRowCount != (importStatValues[SUCCESSES] + importStatValues[FAILURES])) && config.useexport) {
        log.error("Export count '" + exportRowCount + "' does not match import stats count '" + (importStatValues[SUCCESSES] + importStatValues[FAILURES]) + "' test fails.");
        testResult = false;
    }
    if (!config.useexport && !config.streamtest) {
        testResult = MatchChecks.checkPounderResults(config.expected_rows, client);
    }
    endTest(testResult, config);
}
Also used : ClientConfig(org.voltdb.client.ClientConfig) CLIConfig(org.voltdb.CLIConfig) VoltLogger(org.voltcore.logging.VoltLogger)

Example 12 with VoltLogger

use of org.voltcore.logging.VoltLogger in project voltdb by VoltDB.

the class AsyncBenchmark method main.

/**
     * Main routine creates a benchmark instance and kicks off the run method.
     *
     * @param args Command line arguments.
     * @throws Exception if anything goes wrong.
     * @see {@link VoterConfig}
     */
public static void main(String[] args) throws Exception {
    VoltLogger log = new VoltLogger("Benchmark.main");
    // 5 minutes in milliseconds
    final long WAIT_FOR_A_WHILE = 100 * 1000;
    // create a configuration from the arguments
    Config config = new Config();
    config.parse(AsyncBenchmark.class.getName(), args);
    System.out.print(HORIZONTAL_RULE);
    log.info(" Command Line Configuration");
    log.info(HORIZONTAL_RULE);
    log.info(config.getConfigDumpString());
    if (config.latencyreport) {
        log.info("NOTICE: Not implemented in this benchmark client.\n");
    }
    // connect to one or more servers, loop until success
    dbconnect(config.servers);
    log.info("Setting up DDL");
    checkDB = new DataUtils(queue, dqueue, client, config.partitioned);
    checkDB.ddlSetup(config.partitioned);
    connect(config.sockservers);
    CountDownLatch cdl = new CountDownLatch(haplist.size());
    for (HostAndPort hap : haplist.keySet()) {
        AsyncBenchmark benchmark = new AsyncBenchmark(config);
        BenchmarkRunner runner = new BenchmarkRunner(benchmark, cdl, hap);
        runner.start();
    }
    schedulePeriodicStats();
    if (!config.perftest) {
        // start checking the table that's being populated by the socket injester(s)
        while (queue.size() == 0) {
            try {
                // one second.
                Thread.sleep(1000);
            } catch (InterruptedException ex) {
                Thread.currentThread().interrupt();
            }
        }
        log.info("Starting CheckData methods. Queue size: " + queue.size());
        checkDB.processQueue();
    }
    log.info("-- waiting for socket writers.");
    // this hangs occasionally, so adding a timeout with a margin
    cdl.await(config.duration + config.warmup + 1, TimeUnit.SECONDS);
    // close socket connections...
    for (HostAndPort hap : haplist.keySet()) {
        OutputStream writer = haplist.get(hap);
        writer.flush();
        writer.close();
    }
    // print the summary results
    printResults();
    if (!config.perftest) {
        log.info("...starting timed check looping... " + queue.size());
        final long queueEndTime = System.currentTimeMillis() + WAIT_FOR_A_WHILE;
        log.info("Continue checking for " + (queueEndTime - System.currentTimeMillis()) / 1000 + " seconds.");
        while (queueEndTime > System.currentTimeMillis()) {
            checkDB.processQueue();
        }
    }
    // final exit criteria -- queue of outstanding importer requests goes to zero
    // but with checking for no-progress so we don't get stuck forever.
    long outstandingRequests = UtilQueries.getImportOutstandingRequests(client);
    long prev_outstandingRequests = outstandingRequests;
    // kinda arbitrary but if outstanding requests is not changing for this interval...
    int waitloops = 10;
    while (outstandingRequests != 0 && waitloops > 0) {
        log.info("Importer outstanding requests is " + outstandingRequests + ". Waiting for zero.");
        outstandingRequests = UtilQueries.getImportOutstandingRequests(client);
        if (prev_outstandingRequests == outstandingRequests) {
            log.info("Outstanding requests unchanged since last interval.");
            waitloops--;
        }
        prev_outstandingRequests = outstandingRequests;
        Thread.sleep(config.displayinterval * 1000);
    }
    client.drain();
    client.close();
    if (!config.perftest) {
        log.info("Queued tuples remaining: " + queue.size());
        log.info("Rows checked against database: " + rowsChecked.get());
        log.info("Mismatch rows (value imported <> value in DB): " + rowsMismatch.get());
    }
    log.info("Total rows added by Socket Injester: " + (warmupCount.get() + runCount.get()));
    log.info("Socket write count: " + socketWrites.get());
    log.info("Socket write exception count: " + socketWriteExceptions.get());
    System.exit(0);
}
Also used : HostAndPort(com.google_voltpatches.common.net.HostAndPort) CLIConfig(org.voltdb.CLIConfig) VoltLogger(org.voltcore.logging.VoltLogger) OutputStream(java.io.OutputStream) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 13 with VoltLogger

use of org.voltcore.logging.VoltLogger in project voltdb by VoltDB.

the class SocketJoiner method start.

public boolean start(final CountDownLatch externalInitBarrier) {
    boolean retval = false;
    /*
         * probe coordinator host list for leader candidates that may are operational
         * (i.e. node state is operational)
         */
    m_coordIp = null;
    for (String coordHost : m_acceptor.getCoordinators()) {
        if (m_coordIp != null) {
            break;
        }
        HostAndPort host = HostAndPort.fromString(coordHost).withDefaultPort(org.voltcore.common.Constants.DEFAULT_INTERNAL_PORT);
        InetSocketAddress ip = !host.getHostText().isEmpty() ? new InetSocketAddress(host.getHostText(), host.getPort()) : new InetSocketAddress(host.getPort());
        /*
             * On an operational leader (i.e. node is up) the request to join the cluster
             * may be rejected, e.g. multiple hosts rejoining at the same time. In this case,
             * the code will retry.
             */
        long retryInterval = RETRY_INTERVAL;
        final Random salt = new Random();
        while (true) {
            try {
                connectToPrimary(ip, ConnectStrategy.PROBE);
                break;
            } catch (CoreUtils.RetryException e) {
                LOG.warn(String.format("Request to join cluster mesh is rejected, retrying in %d seconds. %s", retryInterval, e.getMessage()));
                try {
                    Thread.sleep(TimeUnit.SECONDS.toMillis(retryInterval));
                } catch (InterruptedException ignoreIt) {
                }
                // exponential back off with a salt to avoid collision. Max is 5 minutes.
                retryInterval = (Math.min(retryInterval * 2, TimeUnit.MINUTES.toSeconds(5)) + salt.nextInt(RETRY_INTERVAL_SALT));
                //after 5 min + salt. Reset waiting time to avoid over waiting.
                if (retryInterval > TimeUnit.MINUTES.toSeconds(5)) {
                    retryInterval = RETRY_INTERVAL;
                }
            } catch (Exception e) {
                hostLog.error("Failed to establish socket mesh.", e);
                throw new RuntimeException("Failed to establish socket mesh with " + m_coordIp, e);
            }
        }
    }
    boolean haveMeshedLeader = m_coordIp != null;
    /*
         *  if none were found pick the first one in lexicographical order
         */
    if (m_coordIp == null) {
        HostAndPort leader = m_acceptor.getLeader();
        m_coordIp = !leader.getHostText().isEmpty() ? new InetSocketAddress(leader.getHostText(), leader.getPort()) : new InetSocketAddress(leader.getPort());
    }
    if (!haveMeshedLeader && m_coordIp.getPort() == m_internalPort) {
        try {
            hostLog.info("Attempting to bind to leader ip " + m_coordIp);
            ServerSocketChannel listenerSocket = ServerSocketChannel.open();
            listenerSocket.socket().bind(m_coordIp);
            listenerSocket.socket().setPerformancePreferences(0, 2, 1);
            listenerSocket.configureBlocking(false);
            m_listenerSockets.add(listenerSocket);
        } catch (IOException e) {
            if (!m_listenerSockets.isEmpty()) {
                try {
                    m_listenerSockets.get(0).close();
                    m_listenerSockets.clear();
                } catch (IOException ex) {
                    new VoltLogger(SocketJoiner.class.getName()).l7dlog(Level.FATAL, null, ex);
                }
            }
        }
    }
    if (!m_listenerSockets.isEmpty()) {
        // of the forms of the leader address we've bound to.
        if (m_internalInterface != null && !m_internalInterface.equals("")) {
            if (!m_internalInterface.equals(ReverseDNSCache.hostnameOrAddress(m_coordIp.getAddress())) && !m_internalInterface.equals(m_coordIp.getAddress().getCanonicalHostName()) && !m_internalInterface.equals(m_coordIp.getAddress().getHostAddress())) {
                org.voltdb.VoltDB.crashLocalVoltDB(String.format("The provided internal interface (%s) does not match the " + "specified leader address (%s, %s). " + "This will result in either a cluster which fails to start or an unintended network topology. " + "The leader will now exit; correct your specified leader and interface and try restarting.", m_internalInterface, ReverseDNSCache.hostnameOrAddress(m_coordIp.getAddress()), m_coordIp.getAddress().getHostAddress()), false, null);
            }
        }
        retval = true;
        consoleLog.info("Connecting to VoltDB cluster as the leader...");
        /*
             * Need to wait for external initialization to complete before
             * accepting new connections. This is slang for the leader
             * creating an agreement site that agrees with itself
             */
        m_es.submit(new Callable<Object>() {

            @Override
            public Object call() throws Exception {
                externalInitBarrier.await();
                return null;
            }
        });
    } else if (!haveMeshedLeader) {
        consoleLog.info("Connecting to the VoltDB cluster leader " + m_coordIp);
        try {
            connectToPrimary(m_coordIp, ConnectStrategy.CONNECT);
        } catch (Exception e) {
            hostLog.error("Failed to establish socket mesh.", e);
            throw new RuntimeException("Failed to establish socket mesh with " + m_coordIp, e);
        }
    }
    /*
         * Submit a task to start the main run loop,
         * will wait for agreement to be initialized if this
         * is the leader using the previously queued runnable
         */
    m_es.submit(new Runnable() {

        @Override
        public void run() {
            try {
                runPrimary();
            } catch (InterruptedException e) {
            } catch (Throwable e) {
                org.voltdb.VoltDB.crashLocalVoltDB("Error in socket joiner run loop", true, e);
            }
        }
    });
    return retval;
}
Also used : InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) IOException(java.io.IOException) EOFException(java.io.EOFException) JSONException(org.json_voltpatches.JSONException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) ClosedSelectorException(java.nio.channels.ClosedSelectorException) HostAndPort(com.google_voltpatches.common.net.HostAndPort) Random(java.util.Random) VoltLogger(org.voltcore.logging.VoltLogger) JSONObject(org.json_voltpatches.JSONObject) CoreUtils(org.voltcore.utils.CoreUtils) ServerSocketChannel(java.nio.channels.ServerSocketChannel)

Example 14 with VoltLogger

use of org.voltcore.logging.VoltLogger in project voltdb by VoltDB.

the class ProcedureCompiler method getValidSQLStmts.

public static Map<String, SQLStmt> getValidSQLStmts(VoltCompiler compiler, String procName, Class<?> procClass, Object procInstance, boolean withPrivate) throws VoltCompilerException {
    Map<String, SQLStmt> retval = new HashMap<>();
    Field[] fields = procClass.getDeclaredFields();
    for (Field f : fields) {
        // skip non SQL fields
        if (f.getType() != SQLStmt.class)
            continue;
        int modifiers = f.getModifiers();
        // skip private fields if asked (usually a superclass)
        if (Modifier.isPrivate(modifiers) && (!withPrivate))
            continue;
        // don't allow non-final SQLStmts
        if (Modifier.isFinal(modifiers) == false) {
            String msg = "Procedure " + procName + " contains a non-final SQLStmt field.";
            if (procClass.getSimpleName().equals(procName) == false) {
                msg = "Superclass " + procClass.getSimpleName() + " of procedure " + procName + " contains a non-final SQLStmt field.";
            }
            if (compiler != null)
                throw compiler.new VoltCompilerException(msg);
            else
                new VoltLogger("HOST").warn(msg);
        }
        f.setAccessible(true);
        SQLStmt stmt = null;
        try {
            stmt = (SQLStmt) f.get(procInstance);
        }// it's weird, but seems rather hard to hit
         catch (Exception e) {
            e.printStackTrace();
            continue;
        }
        retval.put(f.getName(), stmt);
    }
    Class<?> superClass = procClass.getSuperclass();
    if (superClass != null) {
        Map<String, SQLStmt> superStmts = getValidSQLStmts(compiler, procName, superClass, procInstance, false);
        for (Entry<String, SQLStmt> e : superStmts.entrySet()) {
            if (retval.containsKey(e.getKey()) == false)
                retval.put(e.getKey(), e.getValue());
        }
    }
    return retval;
}
Also used : Field(java.lang.reflect.Field) SQLStmt(org.voltdb.SQLStmt) HashMap(java.util.HashMap) VoltLogger(org.voltcore.logging.VoltLogger) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) VoltCompilerException(org.voltdb.compiler.VoltCompiler.VoltCompilerException) VoltTypeException(org.voltdb.VoltTypeException)

Example 15 with VoltLogger

use of org.voltcore.logging.VoltLogger in project voltdb by VoltDB.

the class RealVoltDB method halt.

@Override
public void halt() {
    SnmpTrapSender snmp = getSnmpTrapSender();
    if (snmp != null) {
        try {
            snmp.hostDown(FaultLevel.INFO, m_messenger.getHostId(), "Host is shutting down because of @StopNode");
            snmp.shutdown();
        } catch (Throwable t) {
            VoltLogger log = new VoltLogger("HOST");
            log.warn("failed to issue a crash SNMP trap", t);
        }
    }
    Thread shutdownThread = new Thread() {

        @Override
        public void run() {
            hostLog.warn("VoltDB node shutting down as requested by @StopNode command.");
            System.exit(0);
        }
    };
    shutdownThread.start();
}
Also used : VoltLogger(org.voltcore.logging.VoltLogger) SnmpTrapSender(org.voltdb.snmp.SnmpTrapSender) DummySnmpTrapSender(org.voltdb.snmp.DummySnmpTrapSender)

Aggregations

VoltLogger (org.voltcore.logging.VoltLogger)20 File (java.io.File)3 IOException (java.io.IOException)3 HostAndPort (com.google_voltpatches.common.net.HostAndPort)2 PrintWriter (java.io.PrintWriter)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 ArrayList (java.util.ArrayList)2 Date (java.util.Date)2 ThreadFactory (java.util.concurrent.ThreadFactory)2 Test (org.junit.Test)2 CLIConfig (org.voltdb.CLIConfig)2 VoltTable (org.voltdb.VoltTable)2 SnmpTrapSender (org.voltdb.snmp.SnmpTrapSender)2 VoltFile (org.voltdb.utils.VoltFile)2 EOFException (java.io.EOFException)1 OutputStream (java.io.OutputStream)1 Field (java.lang.reflect.Field)1 InetSocketAddress (java.net.InetSocketAddress)1 ClosedByInterruptException (java.nio.channels.ClosedByInterruptException)1 ClosedSelectorException (java.nio.channels.ClosedSelectorException)1