Search in sources :

Example 6 with VoltFile

use of org.voltdb.utils.VoltFile in project voltdb by VoltDB.

the class LocalCluster method startOne.

void startOne(int hostId, boolean clearLocalDataDirectories, StartAction startAction, boolean waitForReady, String placementGroup) throws IOException {
    PipeToFile ptf = null;
    CommandLine cmdln = (templateCmdLine.makeCopy());
    cmdln.setJavaProperty(clusterHostIdProperty, String.valueOf(hostId));
    if (isNewCli) {
        cmdln.m_startAction = StartAction.PROBE;
        cmdln.enableAdd(startAction == StartAction.JOIN);
        cmdln.hostCount(m_hostCount);
        String hostIdStr = cmdln.getJavaProperty(clusterHostIdProperty);
        String root = m_hostRoots.get(hostIdStr);
        //For new CLI dont pass deployment for probe.
        cmdln.voltdbRoot(root);
        cmdln.pathToDeployment(null);
        cmdln.setForceVoltdbCreate(clearLocalDataDirectories);
    }
    if (this.m_additionalProcessEnv != null) {
        for (String name : this.m_additionalProcessEnv.keySet()) {
            cmdln.setJavaProperty(name, this.m_additionalProcessEnv.get(name));
        }
    }
    try {
        cmdln.internalPort(internalPortGenerator.nextInternalPort(hostId));
        cmdln.coordinators(internalPortGenerator.getCoordinators());
        if (m_replicationPort != -1) {
            int index = m_hasLocalServer ? hostId + 1 : hostId;
            cmdln.drAgentStartPort(m_replicationPort + index);
        } else {
            // set the dragent port. it uses the start value and
            // the next two sequential port numbers - so burn those two.
            cmdln.drAgentStartPort(portGenerator.nextReplicationPort());
            portGenerator.next();
            portGenerator.next();
        }
        // add the ipc ports
        if (m_target == BackendTarget.NATIVE_EE_IPC) {
            // set 1 port for the EE process
            cmdln.ipcPort(portGenerator.next());
        }
        if (m_target == BackendTarget.NATIVE_EE_VALGRIND_IPC) {
            EEProcess proc = m_eeProcs.get(hostId);
            assert (proc != null);
            cmdln.m_ipcPort = proc.port();
        }
        cmdln.port(portGenerator.nextClient());
        cmdln.adminPort(portGenerator.nextAdmin());
        if (cmdln.m_httpPort != Constants.HTTP_PORT_DISABLED)
            cmdln.httpPort(portGenerator.nextHttp());
        cmdln.timestampSalt(getRandomTimestampSalt());
        cmdln.setPlacementGroup(placementGroup);
        if (m_debug) {
            cmdln.debugPort(portGenerator.next());
        }
        cmdln.zkport(portGenerator.nextZkPort());
        if (!isNewCli && startAction == StartAction.JOIN) {
            cmdln.startCommand(startAction);
            int portNoToRejoin = m_cmdLines.get(0).internalPort();
            cmdln.leader(":" + portNoToRejoin);
            cmdln.enableAdd(true);
        }
        // If local directories are being cleared
        // generate a new subroot, otherwise reuse the existing directory
        File subroot = null;
        if (!isNewCli) {
            if (m_filePrefix != null) {
                subroot = m_filePrefix;
                m_subRoots.add(subroot);
            } else if (clearLocalDataDirectories) {
                subroot = VoltFile.getNewSubroot();
                m_subRoots.add(subroot);
            } else {
                if (m_subRoots.size() <= hostId) {
                    m_subRoots.add(VoltFile.getNewSubroot());
                }
                subroot = m_subRoots.get(hostId);
            }
            cmdln.voltFilePrefix(subroot.getPath());
            cmdln.voltRoot(subroot.getPath() + File.separator + m_voltdbroot);
        }
        if ((m_versionOverrides != null) && (m_versionOverrides.length > hostId)) {
            assert (m_versionOverrides[hostId] != null);
            assert (m_versionCheckRegexOverrides[hostId] != null);
            cmdln.m_versionStringOverrideForTest = m_versionOverrides[hostId];
            cmdln.m_versionCompatibilityRegexOverrideForTest = m_versionCheckRegexOverrides[hostId];
            if ((m_buildStringOverrides != null) && (m_buildStringOverrides.length > hostId)) {
                assert (m_buildStringOverrides[hostId] != null);
                cmdln.m_buildStringOverrideForTest = m_buildStringOverrides[hostId];
            }
        }
        if ((m_modeOverrides != null) && (m_modeOverrides.length > hostId)) {
            assert (m_modeOverrides[hostId] != null);
            cmdln.m_modeOverrideForTest = m_modeOverrides[hostId];
        }
        if ((m_sitesperhostOverrides != null) && (m_sitesperhostOverrides.size() > hostId)) {
            assert (m_sitesperhostOverrides.containsKey(hostId));
            cmdln.m_sitesperhost = m_sitesperhostOverrides.get(hostId);
        }
        cmdln.setMissingHostCount(m_missingHostCount);
        m_cmdLines.add(cmdln);
        m_procBuilder.command().clear();
        List<String> cmdlnList = cmdln.createCommandLine();
        String cmdLineFull = "Start cmd host=" + String.valueOf(hostId) + " :";
        for (String element : cmdlnList) {
            assert (element != null);
            cmdLineFull += " " + element;
        }
        log.info(cmdLineFull);
        System.out.println(cmdLineFull);
        m_procBuilder.command().addAll(cmdlnList);
        // write output to obj/release/testoutput/<test name>-n.txt
        // this may need to be more unique? Also very useful to just
        // set this to a hardcoded path and use "tail -f" to debug.
        String testoutputdir = cmdln.buildDir() + File.separator + "testoutput";
        System.out.println("Process output will be redirected to: " + testoutputdir);
        // make sure the directory exists
        File dir = new File(testoutputdir);
        if (dir.exists()) {
            assert (dir.isDirectory());
        } else {
            boolean status = dir.mkdirs();
            assert (status);
        }
        File dirFile = new VoltFile(testoutputdir);
        if (dirFile.listFiles() != null) {
            for (File f : dirFile.listFiles()) {
                if (f.getName().startsWith(getName() + "-" + hostId)) {
                    f.delete();
                }
            }
        }
        Process proc = m_procBuilder.start();
        m_cluster.add(proc);
        String fileName = testoutputdir + File.separator + "LC-" + getFileName() + "-" + m_clusterId + "-" + hostId + "-" + "idx" + String.valueOf(perLocalClusterExtProcessIndex++) + ".txt";
        System.out.println("Process output can be found in: " + fileName);
        ptf = new PipeToFile(fileName, proc.getInputStream(), PipeToFile.m_initToken, false, proc);
        m_pipes.add(ptf);
        ptf.setName("ClusterPipe:" + String.valueOf(hostId));
        ptf.start();
    } catch (IOException ex) {
        log.error("Failed to start cluster process:" + ex.getMessage(), ex);
        assert (false);
    }
    if (waitForReady && (startAction == StartAction.JOIN || startAction == StartAction.PROBE || startAction == StartAction.REJOIN)) {
        waitOnPTFReady(ptf, true, System.currentTimeMillis(), System.currentTimeMillis(), hostId);
    }
    if (hostId > (m_hostCount - 1)) {
        m_hostCount++;
        //Host count changed, should recompile
        this.m_compiled = false;
    }
}
Also used : CommandLine(org.voltdb.utils.CommandLine) VoltFile(org.voltdb.utils.VoltFile) IOException(java.io.IOException) VoltFile(org.voltdb.utils.VoltFile) File(java.io.File)

Example 7 with VoltFile

use of org.voltdb.utils.VoltFile in project voltdb by VoltDB.

the class RealVoltDB method initialize.

/**
     * Initialize all the global components, then initialize all the m_sites.
     * @param config configuration that gets passed in from commandline.
     */
@Override
public void initialize(Configuration config) {
    hostLog.info("PID of this Volt process is " + CLibrary.getpid());
    ShutdownHooks.enableServerStopLogging();
    synchronized (m_startAndStopLock) {
        exitAfterMessage = false;
        // Handle multiple invocations of server thread in the same JVM.
        // by clearing static variables/properties which ModuleManager,
        // and Settings depend on
        ConfigFactory.clearProperty(Settings.CONFIG_DIR);
        ModuleManager.resetCacheRoot();
        CipherExecutor.SERVER.shutdown();
        m_isRunningWithOldVerb = config.m_startAction.isLegacy();
        // check that this is a 64 bit VM
        if (System.getProperty("java.vm.name").contains("64") == false) {
            hostLog.fatal("You are running on an unsupported (probably 32 bit) JVM. Exiting.");
            System.exit(-1);
        }
        // print the ascii art!.
        // determine the edition
        // Check license availability
        // All above - not for init
        String edition = "Community Edition";
        if (config.m_startAction != StartAction.INITIALIZE) {
            consoleLog.l7dlog(Level.INFO, LogKeys.host_VoltDB_StartupString.name(), null);
            // load license API
            if (config.m_pathToLicense == null) {
                m_licenseApi = MiscUtils.licenseApiFactory();
                if (m_licenseApi == null) {
                    hostLog.fatal("Unable to open license file in default directories");
                }
            } else {
                m_licenseApi = MiscUtils.licenseApiFactory(config.m_pathToLicense);
                if (m_licenseApi == null) {
                    hostLog.fatal("Unable to open license file in provided path: " + config.m_pathToLicense);
                }
            }
            if (m_licenseApi == null) {
                hostLog.fatal("Please contact sales@voltdb.com to request a license.");
                VoltDB.crashLocalVoltDB("Failed to initialize license verifier. " + "See previous log message for details.", false, null);
            }
            if (config.m_isEnterprise) {
                if (m_licenseApi.isEnterprise())
                    edition = "Enterprise Edition";
                if (m_licenseApi.isPro())
                    edition = "Pro Edition";
                if (m_licenseApi.isEnterpriseTrial())
                    edition = "Enterprise Edition";
                if (m_licenseApi.isProTrial())
                    edition = "Pro Edition";
                if (m_licenseApi.isAWSMarketplace())
                    edition = "AWS Marketplace Pro Edition";
            }
            // this also prints out the license type on the console
            readBuildInfo(edition);
            // print out the licensee on the license
            if (config.m_isEnterprise) {
                String licensee = m_licenseApi.licensee();
                if ((licensee != null) && (licensee.length() > 0)) {
                    consoleLog.info(String.format("Licensed to: %s", licensee));
                }
            }
        }
        // Replay command line args that we can see
        StringBuilder sb = new StringBuilder(2048).append("Command line arguments: ");
        sb.append(System.getProperty("sun.java.command", "[not available]"));
        hostLog.info(sb.toString());
        List<String> iargs = ManagementFactory.getRuntimeMXBean().getInputArguments();
        sb.delete(0, sb.length()).append("Command line JVM arguments:");
        for (String iarg : iargs) sb.append(" ").append(iarg);
        if (iargs.size() > 0)
            hostLog.info(sb.toString());
        else
            hostLog.info("No JVM command line args known.");
        sb.delete(0, sb.length()).append("Command line JVM classpath: ");
        sb.append(System.getProperty("java.class.path", "[not available]"));
        hostLog.info(sb.toString());
        if (config.m_startAction == StartAction.INITIALIZE) {
            if (config.m_forceVoltdbCreate) {
                deleteInitializationMarkers(config);
            }
        }
        // If there's no deployment provide a default and put it under voltdbroot.
        if (config.m_pathToDeployment == null) {
            try {
                config.m_pathToDeployment = setupDefaultDeployment(hostLog, config.m_voltdbRoot);
                config.m_deploymentDefault = true;
            } catch (IOException e) {
                VoltDB.crashLocalVoltDB("Failed to write default deployment.", false, null);
                return;
            }
        }
        ReadDeploymentResults readDepl = readPrimedDeployment(config);
        if (config.m_startAction == StartAction.INITIALIZE) {
            if (config.m_forceVoltdbCreate && m_nodeSettings.clean()) {
                String msg = "Archived previous snapshot directory to " + m_nodeSettings.getSnapshoth() + ".1";
                consoleLog.info(msg);
                hostLog.info(msg);
            }
            stageDeploymentFileForInitialize(config, readDepl.deployment);
            stageSchemaFiles(config);
            stageInitializedMarker(config);
            hostLog.info("Initialized VoltDB root directory " + config.m_voltdbRoot.getPath());
            consoleLog.info("Initialized VoltDB root directory " + config.m_voltdbRoot.getPath());
            VoltDB.exit(0);
        }
        if (config.m_startAction.isLegacy()) {
            consoleLog.warn("The \"" + config.m_startAction.m_verb + "\" command is deprecated, please use \"init\" and \"start\" for your cluster operations.");
        }
        // config UUID is part of the status tracker.
        m_statusTracker = new NodeStateTracker();
        final File stagedCatalogLocation = new VoltFile(RealVoltDB.getStagedCatalogPath(config.m_voltdbRoot.getAbsolutePath()));
        if (config.m_startAction.isLegacy()) {
            File rootFH = CatalogUtil.getVoltDbRoot(readDepl.deployment.getPaths());
            File inzFH = new VoltFile(rootFH, VoltDB.INITIALIZED_MARKER);
            if (inzFH.exists()) {
                VoltDB.crashLocalVoltDB("Cannot use legacy start action " + config.m_startAction + " on voltdbroot " + rootFH + " that was initialized with the init command");
                return;
            }
            //Case where you give primed deployment with -d look in ../../ for initialized marker.
            //Also check if parents are config and voltdbroot
            File cfile = (new File(config.m_pathToDeployment)).getParentFile();
            if (cfile != null) {
                rootFH = cfile.getParentFile();
                if ("config".equals(cfile.getName()) && VoltDB.DBROOT.equals(rootFH.getName())) {
                    inzFH = new VoltFile(rootFH, VoltDB.INITIALIZED_MARKER);
                    if (inzFH.exists()) {
                        VoltDB.crashLocalVoltDB("Can not use legacy start action " + config.m_startAction + " on voltdbroot " + rootFH + " that was initialized with the init command");
                        return;
                    }
                }
            }
            if (stagedCatalogLocation.isFile()) {
                hostLog.warn("Initialized schema is present, but is being ignored and may be removed.");
            }
        } else {
            assert (config.m_startAction == StartAction.PROBE);
            if (stagedCatalogLocation.isFile()) {
                assert (config.m_pathToCatalog == null) : config.m_pathToCatalog;
                config.m_pathToCatalog = stagedCatalogLocation.getAbsolutePath();
            }
        }
        List<String> failed = m_nodeSettings.ensureDirectoriesExist();
        if (!failed.isEmpty()) {
            String msg = "Unable to access or create the following directories:\n  - " + Joiner.on("\n  - ").join(failed);
            VoltDB.crashLocalVoltDB(msg);
            return;
        }
        if (config.m_hostCount == VoltDB.UNDEFINED) {
            config.m_hostCount = readDepl.deployment.getCluster().getHostcount();
        }
        // set the mode first thing
        m_mode = OperationMode.INITIALIZING;
        m_config = config;
        m_startMode = null;
        // set a bunch of things to null/empty/new for tests
        // which reusue the process
        m_safeMpTxnId = Long.MAX_VALUE;
        m_lastSeenMpTxnId = Long.MIN_VALUE;
        m_clientInterface = null;
        m_adminListener = null;
        m_commandLog = new DummyCommandLog();
        m_snmp = new DummySnmpTrapSender();
        m_messenger = null;
        m_opsRegistrar = new OpsRegistrar();
        m_snapshotCompletionMonitor = null;
        m_catalogContext = null;
        m_partitionCountStats = null;
        m_ioStats = null;
        m_memoryStats = null;
        m_commandLogStats = null;
        m_statsManager = null;
        m_restoreAgent = null;
        m_recoveryStartTime = System.currentTimeMillis();
        m_hostIdWithStartupCatalog = 0;
        m_pathToStartupCatalog = m_config.m_pathToCatalog;
        m_replicationActive = new AtomicBoolean(false);
        m_configLogger = null;
        ActivePlanRepository.clear();
        updateMaxThreadsLimit();
        // set up site structure
        final int computationThreads = Math.max(2, CoreUtils.availableProcessors() / 4);
        m_computationService = CoreUtils.getListeningExecutorService("Computation service thread", computationThreads, m_config.m_computationCoreBindings);
        // Set std-out/err to use the UTF-8 encoding and fail if UTF-8 isn't supported
        try {
            System.setOut(new PrintStream(System.out, true, "UTF-8"));
            System.setErr(new PrintStream(System.err, true, "UTF-8"));
        } catch (UnsupportedEncodingException e) {
            hostLog.fatal("Support for the UTF-8 encoding is required for VoltDB. This means you are likely running an unsupported JVM. Exiting.");
            VoltDB.exit(-1);
        }
        m_snapshotCompletionMonitor = new SnapshotCompletionMonitor();
        // use CLI overrides for testing hotfix version compatibility
        if (m_config.m_versionStringOverrideForTest != null) {
            m_versionString = m_config.m_versionStringOverrideForTest;
        }
        if (m_config.m_versionCompatibilityRegexOverrideForTest != null) {
            m_hotfixableRegexPattern = m_config.m_versionCompatibilityRegexOverrideForTest;
        }
        if (m_config.m_buildStringOverrideForTest != null) {
            m_buildString = m_config.m_buildStringOverrideForTest;
        }
        // Prime cluster settings from configuration parameters
        // evaluate properties with the following sources in terms of priority
        // 1) properties from command line options
        // 2) properties from the cluster.properties files
        // 3) properties from the deployment file
        // this reads the file config/cluster.properties
        ClusterSettings fromPropertyFile = ClusterSettings.create();
        // handle case we recover clusters that were elastically expanded
        if (m_config.m_startAction.doesRecover()) {
            m_config.m_hostCount = fromPropertyFile.hostcount();
        }
        Map<String, String> fromCommandLine = m_config.asClusterSettingsMap();
        Map<String, String> fromDeploymentFile = CatalogUtil.asClusterSettingsMap(readDepl.deployment);
        ClusterSettings clusterSettings = ClusterSettings.create(fromCommandLine, fromPropertyFile.asMap(), fromDeploymentFile);
        // persist the merged settings
        clusterSettings.store();
        m_clusterSettings.set(clusterSettings, 1);
        MeshProber.Determination determination = buildClusterMesh(readDepl);
        if (m_config.m_startAction == StartAction.PROBE) {
            String action = "Starting a new database cluster";
            if (determination.startAction.doesRejoin()) {
                action = "Rejoining a running cluster";
            } else if (determination.startAction == StartAction.JOIN) {
                action = "Adding this node to a running cluster";
            } else if (determination.startAction.doesRecover()) {
                action = "Restarting the database cluster from the command logs";
            }
            hostLog.info(action);
            consoleLog.info(action);
        }
        m_config.m_startAction = determination.startAction;
        m_config.m_hostCount = determination.hostCount;
        m_terminusNonce = determination.terminusNonce;
        // determine if this is a rejoining node
        // (used for license check and later the actual rejoin)
        m_rejoining = m_config.m_startAction.doesRejoin();
        m_rejoinDataPending = m_config.m_startAction.doesJoin();
        m_joining = m_config.m_startAction == StartAction.JOIN;
        if (m_rejoining || m_joining) {
            m_statusTracker.setNodeState(NodeState.REJOINING);
        }
        //Register dummy agents immediately
        m_opsRegistrar.registerMailboxes(m_messenger);
        //Start validating the build string in the background
        final Future<?> buildStringValidation = validateBuildString(getBuildString(), m_messenger.getZK());
        // race to create start action nodes and then verify theirs compatibility.
        m_messenger.getZK().create(VoltZK.start_action, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, new ZKUtil.StringCallback(), null);
        VoltZK.createStartActionNode(m_messenger.getZK(), m_messenger.getHostId(), m_config.m_startAction);
        validateStartAction();
        // durable means commandlogging is enabled.
        boolean durable = readDeploymentAndCreateStarterCatalogContext(config);
        if (config.m_isEnterprise && m_config.m_startAction.doesRequireEmptyDirectories() && !config.m_forceVoltdbCreate && durable) {
            managedPathsEmptyCheck(config);
        }
        //If terminus is present we will recover from shutdown save so dont move.
        if (!durable && m_config.m_startAction.doesRecover() && determination.terminusNonce == null) {
            if (m_nodeSettings.clean()) {
                String msg = "Archiving old snapshots to " + m_nodeSettings.getSnapshoth() + ".1 and starting an empty database." + " Use voltadmin restore if you wish to restore an old database instance.";
                consoleLog.info(msg);
                hostLog.info(msg);
            }
        }
        // wait to make sure every host actually *see* each other's ZK node state.
        final int numberOfNodes = m_messenger.getLiveHostIds().size();
        Map<Integer, HostInfo> hostInfos = m_messenger.waitForGroupJoin(numberOfNodes);
        Map<Integer, String> hostGroups = Maps.newHashMap();
        Map<Integer, Integer> sitesPerHostMap = Maps.newHashMap();
        hostInfos.forEach((k, v) -> {
            hostGroups.put(k, v.m_group);
            sitesPerHostMap.put(k, v.m_localSitesCount);
        });
        if (m_messenger.isPaused() || m_config.m_isPaused) {
            setStartMode(OperationMode.PAUSED);
        }
        // Create the thread pool here. It's needed by buildClusterMesh()
        m_periodicWorkThread = CoreUtils.getScheduledThreadPoolExecutor("Periodic Work", 1, CoreUtils.SMALL_STACK_SIZE);
        m_periodicPriorityWorkThread = CoreUtils.getScheduledThreadPoolExecutor("Periodic Priority Work", 1, CoreUtils.SMALL_STACK_SIZE);
        Class<?> snapshotIOAgentClass = MiscUtils.loadProClass("org.voltdb.SnapshotIOAgentImpl", "Snapshot", true);
        if (snapshotIOAgentClass != null) {
            try {
                m_snapshotIOAgent = (SnapshotIOAgent) snapshotIOAgentClass.getConstructor(HostMessenger.class, long.class).newInstance(m_messenger, m_messenger.getHSIdForLocalSite(HostMessenger.SNAPSHOT_IO_AGENT_ID));
                m_messenger.createMailbox(m_snapshotIOAgent.getHSId(), m_snapshotIOAgent);
            } catch (Exception e) {
                VoltDB.crashLocalVoltDB("Failed to instantiate snapshot IO agent", true, e);
            }
        }
        try {
            SimpleDateFormat sdf = new SimpleDateFormat("EEE MMM d, yyyy");
            JSONObject jo = new JSONObject();
            jo.put("trial", m_licenseApi.isAnyKindOfTrial());
            jo.put("hostcount", m_licenseApi.maxHostcount());
            jo.put("commandlogging", m_licenseApi.isCommandLoggingAllowed());
            jo.put("wanreplication", m_licenseApi.isDrReplicationAllowed());
            jo.put("expiration", sdf.format(m_licenseApi.expires().getTime()));
            m_licenseInformation = jo.toString();
        } catch (JSONException ex) {
        //Ignore
        }
        // Create the GlobalServiceElector.  Do this here so we can register the MPI with it
        // when we construct it below
        m_globalServiceElector = new GlobalServiceElector(m_messenger.getZK(), m_messenger.getHostId());
        // Start the GlobalServiceElector.  Not sure where this will actually belong.
        try {
            m_globalServiceElector.start();
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB("Unable to start GlobalServiceElector", true, e);
        }
        // Always create a mailbox for elastic join data transfer
        if (m_config.m_isEnterprise) {
            long elasticHSId = m_messenger.getHSIdForLocalSite(HostMessenger.REBALANCE_SITE_ID);
            m_messenger.createMailbox(elasticHSId, new SiteMailbox(m_messenger, elasticHSId));
        }
        if (m_joining) {
            Class<?> elasticJoinCoordClass = MiscUtils.loadProClass("org.voltdb.join.ElasticJoinNodeCoordinator", "Elastic", false);
            try {
                Constructor<?> constructor = elasticJoinCoordClass.getConstructor(HostMessenger.class, String.class);
                m_joinCoordinator = (JoinCoordinator) constructor.newInstance(m_messenger, VoltDB.instance().getVoltDBRootPath());
                m_messenger.registerMailbox(m_joinCoordinator);
                m_joinCoordinator.initialize(m_catalogContext.getDeployment().getCluster().getKfactor());
            } catch (Exception e) {
                VoltDB.crashLocalVoltDB("Failed to instantiate join coordinator", true, e);
            }
        }
        /*
             * Construct all the mailboxes for things that need to be globally addressable so they can be published
             * in one atomic shot.
             *
             * The starting state for partition assignments are statically derived from the host id generated
             * by host messenger and the k-factor/host count/sites per host. This starting state
             * is published to ZK as the topology metadata node.
             *
             * On join and rejoin the node has to inspect the topology meta node to find out what is missing
             * and then update the topology listing itself as the replica for those partitions.
             * Then it does a compare and set of the topology.
             *
             * Ning: topology may not reflect the true partitions in the cluster during join. So if another node
             * is trying to rejoin, it should rely on the cartographer's view to pick the partitions to replace.
             */
        AbstractTopology topo = getTopology(config.m_startAction, hostGroups, sitesPerHostMap, m_joinCoordinator);
        m_partitionsToSitesAtStartupForExportInit = new ArrayList<>();
        try {
            // IV2 mailbox stuff
            m_configuredReplicationFactor = topo.getReplicationFactor();
            m_cartographer = new Cartographer(m_messenger, m_configuredReplicationFactor, m_catalogContext.cluster.getNetworkpartition());
            m_partitionZeroLeader = new Supplier<Boolean>() {

                @Override
                public Boolean get() {
                    return m_cartographer.isPartitionZeroLeader();
                }
            };
            List<Integer> partitions = null;
            if (m_rejoining) {
                m_configuredNumberOfPartitions = m_cartographer.getPartitionCount();
                partitions = recoverPartitions(topo, hostGroups.get(m_messenger.getHostId()));
                if (partitions == null) {
                    partitions = m_cartographer.getIv2PartitionsToReplace(m_configuredReplicationFactor, m_catalogContext.getNodeSettings().getLocalSitesCount(), m_messenger.getHostId(), hostGroups);
                }
                if (partitions.size() == 0) {
                    VoltDB.crashLocalVoltDB("The VoltDB cluster already has enough nodes to satisfy " + "the requested k-safety factor of " + m_configuredReplicationFactor + ".\n" + "No more nodes can join.", false, null);
                }
            } else {
                m_configuredNumberOfPartitions = topo.getPartitionCount();
                partitions = topo.getPartitionIdList(m_messenger.getHostId());
            }
            for (int ii = 0; ii < partitions.size(); ii++) {
                Integer partition = partitions.get(ii);
                m_iv2InitiatorStartingTxnIds.put(partition, TxnEgo.makeZero(partition).getTxnId());
            }
            m_iv2Initiators = createIv2Initiators(partitions, m_config.m_startAction, m_partitionsToSitesAtStartupForExportInit);
            m_iv2InitiatorStartingTxnIds.put(MpInitiator.MP_INIT_PID, TxnEgo.makeZero(MpInitiator.MP_INIT_PID).getTxnId());
            // Pass the local HSIds to the MPI so it can farm out buddy sites
            // to the RO MP site pool
            List<Long> localHSIds = new ArrayList<>();
            for (Initiator ii : m_iv2Initiators.values()) {
                localHSIds.add(ii.getInitiatorHSId());
            }
            m_MPI = new MpInitiator(m_messenger, localHSIds, getStatsAgent());
            m_iv2Initiators.put(MpInitiator.MP_INIT_PID, m_MPI);
            // Make a list of HDIds to join
            Map<Integer, Long> partsToHSIdsToRejoin = new HashMap<>();
            for (Initiator init : m_iv2Initiators.values()) {
                if (init.isRejoinable()) {
                    partsToHSIdsToRejoin.put(init.getPartitionId(), init.getInitiatorHSId());
                }
            }
            OnDemandBinaryLogger.path = VoltDB.instance().getVoltDBRootPath();
            if (m_rejoining) {
                SnapshotSaveAPI.recoveringSiteCount.set(partsToHSIdsToRejoin.size());
                hostLog.info("Set recovering site count to " + partsToHSIdsToRejoin.size());
                m_joinCoordinator = new Iv2RejoinCoordinator(m_messenger, partsToHSIdsToRejoin.values(), VoltDB.instance().getVoltDBRootPath(), m_config.m_startAction == StartAction.LIVE_REJOIN);
                m_joinCoordinator.initialize(m_configuredReplicationFactor);
                m_messenger.registerMailbox(m_joinCoordinator);
                if (m_config.m_startAction == StartAction.LIVE_REJOIN) {
                    hostLog.info("Using live rejoin.");
                } else {
                    hostLog.info("Using blocking rejoin.");
                }
            } else if (m_joining) {
                m_joinCoordinator.setPartitionsToHSIds(partsToHSIdsToRejoin);
            }
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
        }
        // do the many init tasks in the Inits class
        Inits inits = new Inits(m_statusTracker, this, 1, durable);
        inits.doInitializationWork();
        // Need the catalog so that we know how many tables so we can guess at the necessary heap size
        // This is done under Inits.doInitializationWork(), so need to wait until we get here.
        // Current calculation needs pro/community knowledge, number of tables, and the sites/host,
        // which is the number of initiators (minus the possibly idle MPI initiator)
        checkHeapSanity(MiscUtils.isPro(), m_catalogContext.tables.size(), (m_iv2Initiators.size() - 1), m_configuredReplicationFactor);
        if (m_joining && getReplicationRole() == ReplicationRole.REPLICA) {
            VoltDB.crashLocalVoltDB("Elastic join is prohibited on a replica cluster.", false, null);
        }
        collectLocalNetworkMetadata();
        /*
             * Construct an adhoc planner for the initial catalog
             */
        final CatalogSpecificPlanner csp = new CatalogSpecificPlanner(m_catalogContext);
        // Initialize stats
        m_ioStats = new IOStats();
        getStatsAgent().registerStatsSource(StatsSelector.IOSTATS, 0, m_ioStats);
        m_memoryStats = new MemoryStats();
        getStatsAgent().registerStatsSource(StatsSelector.MEMORY, 0, m_memoryStats);
        getStatsAgent().registerStatsSource(StatsSelector.TOPO, 0, m_cartographer);
        m_partitionCountStats = new PartitionCountStats(m_cartographer);
        getStatsAgent().registerStatsSource(StatsSelector.PARTITIONCOUNT, 0, m_partitionCountStats);
        m_initiatorStats = new InitiatorStats(m_myHostId);
        m_liveClientsStats = new LiveClientsStats();
        getStatsAgent().registerStatsSource(StatsSelector.LIVECLIENTS, 0, m_liveClientsStats);
        m_latencyStats = new LatencyStats();
        getStatsAgent().registerStatsSource(StatsSelector.LATENCY, 0, m_latencyStats);
        m_latencyCompressedStats = new LatencyHistogramStats(m_myHostId);
        getStatsAgent().registerStatsSource(StatsSelector.LATENCY_COMPRESSED, 0, m_latencyCompressedStats);
        m_latencyHistogramStats = new LatencyUncompressedHistogramStats(m_myHostId);
        getStatsAgent().registerStatsSource(StatsSelector.LATENCY_HISTOGRAM, 0, m_latencyHistogramStats);
        BalancePartitionsStatistics rebalanceStats = new BalancePartitionsStatistics();
        getStatsAgent().registerStatsSource(StatsSelector.REBALANCE, 0, rebalanceStats);
        KSafetyStats kSafetyStats = new KSafetyStats();
        getStatsAgent().registerStatsSource(StatsSelector.KSAFETY, 0, kSafetyStats);
        m_cpuStats = new CpuStats();
        getStatsAgent().registerStatsSource(StatsSelector.CPU, 0, m_cpuStats);
        m_gcStats = new GcStats();
        getStatsAgent().registerStatsSource(StatsSelector.GC, 0, m_gcStats);
        // ENG-6321
        m_commandLogStats = new CommandLogStats(m_commandLog);
        getStatsAgent().registerStatsSource(StatsSelector.COMMANDLOG, 0, m_commandLogStats);
        // Dummy DRCONSUMER stats
        replaceDRConsumerStatsWithDummy();
        /*
             * Initialize the command log on rejoin and join before configuring the IV2
             * initiators.  This will prevent them from receiving transactions
             * which need logging before the internal file writers are
             * initialized.  Root cause of ENG-4136.
             *
             * If sync command log is on, not initializing the command log before the initiators
             * are up would cause deadlock.
             */
        if ((m_commandLog != null) && (m_commandLog.needsInitialization())) {
            consoleLog.l7dlog(Level.INFO, LogKeys.host_VoltDB_StayTunedForLogging.name(), null);
        } else {
            consoleLog.l7dlog(Level.INFO, LogKeys.host_VoltDB_StayTunedForNoLogging.name(), null);
        }
        if (m_commandLog != null && (m_rejoining || m_joining)) {
            //On rejoin the starting IDs are all 0 so technically it will load any snapshot
            //but the newest snapshot will always be the truncation snapshot taken after rejoin
            //completes at which point the node will mark itself as actually recovered.
            //
            // Use the partition count from the cluster config instead of the cartographer
            // here. Since the initiators are not started yet, the cartographer still doesn't
            // know about the new partitions at this point.
            m_commandLog.initForRejoin(m_catalogContext.cluster.getLogconfig().get("log").getLogsize(), Long.MIN_VALUE, m_configuredNumberOfPartitions, true, m_config.m_commandLogBinding, m_iv2InitiatorStartingTxnIds);
        }
        // Create the client interface
        try {
            InetAddress clientIntf = null;
            InetAddress adminIntf = null;
            if (!m_config.m_externalInterface.trim().equals("")) {
                clientIntf = InetAddress.getByName(m_config.m_externalInterface);
                //client and admin interfaces are same by default.
                adminIntf = clientIntf;
            }
            //If user has specified on command line host:port override client and admin interfaces.
            if (m_config.m_clientInterface != null && m_config.m_clientInterface.trim().length() > 0) {
                clientIntf = InetAddress.getByName(m_config.m_clientInterface);
            }
            if (m_config.m_adminInterface != null && m_config.m_adminInterface.trim().length() > 0) {
                adminIntf = InetAddress.getByName(m_config.m_adminInterface);
            }
            m_clientInterface = ClientInterface.create(m_messenger, m_catalogContext, getReplicationRole(), m_cartographer, clientIntf, config.m_port, adminIntf, config.m_adminPort, m_config.m_sslContext);
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
        }
        // DR overflow directory
        if (VoltDB.instance().getLicenseApi().isDrReplicationAllowed()) {
            try {
                Class<?> ndrgwClass = null;
                ndrgwClass = Class.forName("org.voltdb.dr2.DRProducer");
                Constructor<?> ndrgwConstructor = ndrgwClass.getConstructor(File.class, File.class, boolean.class, boolean.class, boolean.class, int.class, int.class);
                m_producerDRGateway = (ProducerDRGateway) ndrgwConstructor.newInstance(new VoltFile(VoltDB.instance().getDROverflowPath()), new VoltFile(VoltDB.instance().getSnapshotPath()), (m_config.m_startAction.doesRecover() && (durable || determination.terminusNonce != null)), m_config.m_startAction.doesRejoin(), m_replicationActive.get(), m_configuredNumberOfPartitions, (m_catalogContext.getClusterSettings().hostcount() - m_config.m_missingHostCount));
            } catch (Exception e) {
                VoltDB.crashLocalVoltDB("Unable to load DR system", true, e);
            }
        } else {
            // set up empty stats for the DR Producer
            getStatsAgent().registerStatsSource(StatsSelector.DRPRODUCERNODE, 0, new DRProducerStatsBase.DRProducerNodeStatsBase());
            getStatsAgent().registerStatsSource(StatsSelector.DRPRODUCERPARTITION, 0, new DRProducerStatsBase.DRProducerPartitionStatsBase());
        }
        m_drRoleStats = new DRRoleStats(this);
        getStatsAgent().registerStatsSource(StatsSelector.DRROLE, 0, m_drRoleStats);
        /*
             * Configure and start all the IV2 sites
             */
        try {
            final String serializedCatalog = m_catalogContext.catalog.serialize();
            for (Initiator iv2init : m_iv2Initiators.values()) {
                iv2init.configure(getBackendTargetType(), m_catalogContext, serializedCatalog, csp, m_configuredNumberOfPartitions, m_config.m_startAction, getStatsAgent(), m_memoryStats, m_commandLog, m_config.m_executionCoreBindings.poll(), shouldInitiatorCreateMPDRGateway(iv2init));
            }
            // LeaderAppointer startup blocks if the initiators are not initialized.
            // So create the LeaderAppointer after the initiators.
            boolean expectSyncSnapshot = getReplicationRole() == ReplicationRole.REPLICA && config.m_startAction == StartAction.CREATE;
            m_leaderAppointer = new LeaderAppointer(m_messenger, m_configuredNumberOfPartitions, m_catalogContext.getDeployment().getCluster().getKfactor(), topo.topologyToJSON(), m_MPI, kSafetyStats, expectSyncSnapshot);
            m_globalServiceElector.registerService(m_leaderAppointer);
        } catch (Exception e) {
            Throwable toLog = e;
            if (e instanceof ExecutionException) {
                toLog = ((ExecutionException) e).getCause();
            }
            VoltDB.crashLocalVoltDB("Error configuring IV2 initiator.", true, toLog);
        }
        // Create the statistics manager and register it to JMX registry
        m_statsManager = null;
        try {
            final Class<?> statsManagerClass = MiscUtils.loadProClass("org.voltdb.management.JMXStatsManager", "JMX", true);
            if (statsManagerClass != null && !DISABLE_JMX) {
                m_statsManager = (StatsManager) statsManagerClass.newInstance();
                m_statsManager.initialize();
            }
        } catch (Exception e) {
        //JMXStatsManager will log and we continue.
        }
        try {
            m_snapshotCompletionMonitor.init(m_messenger.getZK());
        } catch (Exception e) {
            hostLog.fatal("Error initializing snapshot completion monitor", e);
            VoltDB.crashLocalVoltDB("Error initializing snapshot completion monitor", true, e);
        }
        /*
             * Make sure the build string successfully validated
             * before continuing to do operations
             * that might return wrongs answers or lose data.
             */
        try {
            buildStringValidation.get();
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB("Failed to validate cluster build string", false, e);
        }
        //so that the secondary connections can be created.
        if (m_joining) {
            int expectedHosts = m_configuredReplicationFactor + 1;
            m_messenger.waitForJoiningHostsToBeReady(expectedHosts, this.m_myHostId);
        } else if (!m_rejoining) {
            // initial start or recover
            int expectedHosts = m_catalogContext.getClusterSettings().hostcount() - m_config.m_missingHostCount;
            m_messenger.waitForAllHostsToBeReady(expectedHosts);
        }
        // Create secondary connections within partition group
        createSecondaryConnections(m_rejoining);
        if (!m_joining && (m_cartographer.getPartitionCount()) != m_configuredNumberOfPartitions) {
            for (Map.Entry<Integer, ImmutableList<Long>> entry : getSiteTrackerForSnapshot().m_partitionsToSitesImmutable.entrySet()) {
                hostLog.info(entry.getKey() + " -- " + CoreUtils.hsIdCollectionToString(entry.getValue()));
            }
            VoltDB.crashGlobalVoltDB("Mismatch between configured number of partitions (" + m_configuredNumberOfPartitions + ") and actual (" + m_cartographer.getPartitionCount() + ")", true, null);
        }
        schedulePeriodicWorks();
        m_clientInterface.schedulePeriodicWorks();
        // print out a bunch of useful system info
        logDebuggingInfo(m_config.m_adminPort, m_config.m_httpPort, m_httpPortExtraLogMessage, m_jsonEnabled);
        // warn the user on the console if k=0 or if no command logging
        if (m_configuredReplicationFactor == 0) {
            consoleLog.warn("This is not a highly available cluster. K-Safety is set to 0.");
        }
        boolean usingCommandLog = m_config.m_isEnterprise && (m_catalogContext.cluster.getLogconfig() != null) && (m_catalogContext.cluster.getLogconfig().get("log") != null) && m_catalogContext.cluster.getLogconfig().get("log").getEnabled();
        if (!usingCommandLog) {
            // figure out if using a snapshot schedule
            boolean usingPeridoicSnapshots = false;
            for (SnapshotSchedule ss : m_catalogContext.database.getSnapshotschedule()) {
                if (ss.getEnabled()) {
                    usingPeridoicSnapshots = true;
                }
            }
            // print the right warning depending on durability settings
            if (usingPeridoicSnapshots) {
                consoleLog.warn("Durability is limited to periodic snapshots. Command logging is off.");
            } else {
                consoleLog.warn("Durability is turned off. Command logging is off.");
            }
        }
        // warn if cluster is partitionable, but partition detection is off
        if ((m_catalogContext.cluster.getNetworkpartition() == false) && (m_configuredReplicationFactor > 0)) {
            hostLog.warn("Running a redundant (k-safe) cluster with network " + "partition detection disabled is not recommended for production use.");
        // we decided not to include the stronger language below for the 3.0 version (ENG-4215)
        //hostLog.warn("With partition detection disabled, data may be lost or " +
        //      "corrupted by certain classes of network failures.");
        }
        assert (m_clientInterface != null);
        m_clientInterface.initializeSnapshotDaemon(m_messenger, m_globalServiceElector);
        // Start elastic join service
        try {
            if (m_config.m_isEnterprise && TheHashinator.getCurrentConfig().type == HashinatorType.ELASTIC) {
                Class<?> elasticServiceClass = MiscUtils.loadProClass("org.voltdb.join.ElasticJoinCoordinator", "Elastic join", false);
                if (elasticServiceClass == null) {
                    VoltDB.crashLocalVoltDB("Missing the ElasticJoinCoordinator class file in the enterprise " + "edition", false, null);
                }
                Constructor<?> constructor = elasticServiceClass.getConstructor(HostMessenger.class, ClientInterface.class, Cartographer.class, BalancePartitionsStatistics.class, String.class, int.class, Supplier.class);
                m_elasticJoinService = (ElasticJoinService) constructor.newInstance(m_messenger, m_clientInterface, m_cartographer, rebalanceStats, VoltDB.instance().getCommandLogSnapshotPath(), m_catalogContext.getDeployment().getCluster().getKfactor(), m_clusterSettings);
                m_elasticJoinService.updateConfig(m_catalogContext);
            }
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB("Failed to instantiate elastic join service", false, e);
        }
        // set additional restore agent stuff
        if (m_restoreAgent != null) {
            m_restoreAgent.setInitiator(new Iv2TransactionCreator(m_clientInterface));
        }
        // Start the stats agent at the end, after everything has been constructed
        m_opsRegistrar.setDummyMode(false);
        m_configLogger = new Thread(new ConfigLogging());
        m_configLogger.start();
        scheduleDailyLoggingWorkInNextCheckTime();
    }
}
Also used : ClusterSettings(org.voltdb.settings.ClusterSettings) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ZKUtil(org.voltcore.zk.ZKUtil) InitiatorStats(org.voltdb.dtxn.InitiatorStats) SiteMailbox(org.voltcore.messaging.SiteMailbox) MpInitiator(org.voltdb.iv2.MpInitiator) SpInitiator(org.voltdb.iv2.SpInitiator) Initiator(org.voltdb.iv2.Initiator) BaseInitiator(org.voltdb.iv2.BaseInitiator) HostMessenger(org.voltcore.messaging.HostMessenger) KSafetyStats(org.voltdb.iv2.KSafetyStats) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LeaderAppointer(org.voltdb.iv2.LeaderAppointer) UnsupportedEncodingException(java.io.UnsupportedEncodingException) MpInitiator(org.voltdb.iv2.MpInitiator) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DummySnmpTrapSender(org.voltdb.snmp.DummySnmpTrapSender) JSONObject(org.json_voltpatches.JSONObject) Iv2RejoinCoordinator(org.voltdb.rejoin.Iv2RejoinCoordinator) VoltFile(org.voltdb.utils.VoltFile) File(java.io.File) InetAddress(java.net.InetAddress) Map(java.util.Map) CatalogMap(org.voltdb.catalog.CatalogMap) TreeMap(java.util.TreeMap) ImmutableMap(com.google_voltpatches.common.collect.ImmutableMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) ImmutableList(com.google_voltpatches.common.collect.ImmutableList) BalancePartitionsStatistics(org.voltdb.join.BalancePartitionsStatistics) LatencyHistogramStats(org.voltdb.dtxn.LatencyHistogramStats) Cartographer(org.voltdb.iv2.Cartographer) LatencyStats(org.voltdb.dtxn.LatencyStats) SnapshotSchedule(org.voltdb.catalog.SnapshotSchedule) ExecutionException(java.util.concurrent.ExecutionException) PrintStream(java.io.PrintStream) JSONException(org.json_voltpatches.JSONException) IOException(java.io.IOException) MeshProber(org.voltdb.probe.MeshProber) SocketException(java.net.SocketException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) JSONException(org.json_voltpatches.JSONException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KeeperException(org.apache.zookeeper_voltpatches.KeeperException) SettingsException(org.voltdb.settings.SettingsException) VoltFile(org.voltdb.utils.VoltFile) LatencyUncompressedHistogramStats(org.voltdb.dtxn.LatencyUncompressedHistogramStats) HostInfo(org.voltcore.messaging.HostMessenger.HostInfo) SimpleDateFormat(java.text.SimpleDateFormat)

Example 8 with VoltFile

use of org.voltdb.utils.VoltFile in project voltdb by VoltDB.

the class TestInitStartAction method testInitStartAction.

/** Tests starting an empty database with the NewCLI commands "init" and "start",
     * plus a few error cases.
     */
@Test
public void testInitStartAction() throws Exception {
    File deplFH = new VoltFile(new VoltFile(new VoltFile(rootDH, "voltdbroot"), "config"), "deployment.xml");
    Configuration c1 = new Configuration(new String[] { "initialize", "voltdbroot", rootDH.getPath(), "force", "deployment", legacyDeploymentFH.getPath() });
    ServerThread server = new ServerThread(c1);
    server.setUncaughtExceptionHandler(handleUncaught);
    server.start();
    server.join();
    expectSimulatedExit(0);
    assertTrue(deplFH.exists() && deplFH.isFile() && deplFH.canRead());
    if (c1.m_isEnterprise) {
        assertTrue(cmdlogDH.exists() && cmdlogDH.isDirectory() && cmdlogDH.canRead() && cmdlogDH.canWrite() && cmdlogDH.canExecute());
        for (int i = 0; i < 10; ++i) {
            new FileOutputStream(new File(cmdlogDH, String.format("dummy-%02d.log", i))).close();
        }
        assertEquals(10, cmdlogDH.list().length);
    }
    serverException.set(null);
    // server thread sets m_forceVoltdbCreate to true by default
    c1 = new Configuration(new String[] { "initialize", "voltdbroot", rootDH.getPath(), "force", "deployment", legacyDeploymentFH.getPath() });
    assertTrue(c1.m_forceVoltdbCreate);
    server = new ServerThread(c1);
    server.setUncaughtExceptionHandler(handleUncaught);
    server.start();
    server.join();
    expectSimulatedExit(0);
    assertTrue(deplFH.exists() && deplFH.isFile() && deplFH.canRead());
    if (c1.m_isEnterprise) {
        assertTrue(cmdlogDH.exists() && cmdlogDH.isDirectory() && cmdlogDH.canRead() && cmdlogDH.canWrite() && cmdlogDH.canExecute());
        assertEquals(0, cmdlogDH.list().length);
    }
    try {
        c1 = new Configuration(new String[] { "initialize", "voltdbroot", rootDH.getPath() });
        fail("did not detect prexisting initialization");
    } catch (VoltDB.SimulatedExitException e) {
        assertEquals(-1, e.getStatus());
    }
    VoltDB.wasCrashCalled = false;
    VoltDB.crashMessage = null;
    serverException.set(null);
    c1 = new Configuration(new String[] { "create", "deployment", legacyDeploymentFH.getPath(), "host", "localhost" });
    server = new ServerThread(c1);
    server.setUncaughtExceptionHandler(handleUncaught);
    server.start();
    server.join();
    assertNotNull(serverException.get());
    assertTrue(serverException.get() instanceof AssertionError);
    assertTrue(VoltDB.wasCrashCalled);
    assertTrue(VoltDB.crashMessage.contains("Cannot use legacy start action"));
    if (!c1.m_isEnterprise)
        return;
    clearCrash();
    c1 = new Configuration(new String[] { "recover", "deployment", legacyDeploymentFH.getPath(), "host", "localhost" });
    server = new ServerThread(c1);
    server.setUncaughtExceptionHandler(handleUncaught);
    server.start();
    server.join();
    assertNotNull(serverException.get());
    assertTrue(serverException.get() instanceof AssertionError);
    assertTrue(VoltDB.wasCrashCalled);
    assertTrue(VoltDB.crashMessage.contains("Cannot use legacy start action"));
    // this test which action should be considered legacy
    EnumSet<StartAction> legacyOnes = EnumSet.complementOf(EnumSet.of(StartAction.INITIALIZE, StartAction.PROBE, StartAction.GET));
    assertTrue(legacyOnes.stream().allMatch(StartAction::isLegacy));
}
Also used : Configuration(org.voltdb.VoltDB.Configuration) VoltFile(org.voltdb.utils.VoltFile) FileOutputStream(java.io.FileOutputStream) SimulatedExitException(org.voltdb.VoltDB.SimulatedExitException) VoltFile(org.voltdb.utils.VoltFile) File(java.io.File) Test(org.junit.Test)

Example 9 with VoltFile

use of org.voltdb.utils.VoltFile in project voltdb by VoltDB.

the class RealVoltDB method outputDeployment.

private int outputDeployment(Configuration config) {
    try {
        File configInfoDir = new VoltFile(config.m_voltdbRoot, Constants.CONFIG_DIR);
        File depFH = new VoltFile(configInfoDir, "deployment.xml");
        if (!depFH.isFile() || !depFH.canRead()) {
            consoleLog.fatal("Failed to get configuration or deployment configuration is invalid. " + depFH.getAbsolutePath());
            return -1;
        }
        config.m_pathToDeployment = depFH.getCanonicalPath();
    } catch (IOException e) {
        consoleLog.fatal("Failed to read deployment: " + e.getMessage());
        return -1;
    }
    ReadDeploymentResults readDepl = readPrimedDeployment(config);
    try {
        DeploymentType dt = CatalogUtil.updateRuntimeDeploymentPaths(readDepl.deployment);
        // We don't have catalog context so host count is not there.
        String out;
        if ((out = CatalogUtil.getDeployment(dt, true)) != null) {
            if ((new File(config.m_getOutput)).exists() && !config.m_forceGetCreate) {
                consoleLog.fatal("Failed to save deployment, file already exists: " + config.m_getOutput);
                return -1;
            }
            try (FileOutputStream fos = new FileOutputStream(config.m_getOutput.trim())) {
                fos.write(out.getBytes());
            } catch (IOException e) {
                consoleLog.fatal("Failed to write deployment to " + config.m_getOutput + " : " + e.getMessage());
                return -1;
            }
            consoleLog.info("Deployment configuration saved in " + config.m_getOutput.trim());
        } else {
            consoleLog.fatal("Failed to get configuration or deployment configuration is invalid.");
            return -1;
        }
    } catch (Exception e) {
        consoleLog.fatal("Failed to get configuration or deployment configuration is invalid. " + "Please make sure voltdbroot is a valid directory. " + e.getMessage());
        return -1;
    }
    return 0;
}
Also used : VoltFile(org.voltdb.utils.VoltFile) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) DeploymentType(org.voltdb.compiler.deploymentfile.DeploymentType) VoltFile(org.voltdb.utils.VoltFile) File(java.io.File) SocketException(java.net.SocketException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) JSONException(org.json_voltpatches.JSONException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KeeperException(org.apache.zookeeper_voltpatches.KeeperException) SettingsException(org.voltdb.settings.SettingsException)

Example 10 with VoltFile

use of org.voltdb.utils.VoltFile in project voltdb by VoltDB.

the class RealVoltDB method setupDefaultDeployment.

/**
     * Create default deployment.xml file in voltdbroot if the deployment path is null.
     *
     * @return pathto default deployment file
     * @throws IOException
     */
static String setupDefaultDeployment(VoltLogger logger, File voltdbroot) throws IOException {
    File configInfoDir = new VoltFile(voltdbroot, Constants.CONFIG_DIR);
    configInfoDir.mkdirs();
    File depFH = new VoltFile(configInfoDir, "deployment.xml");
    if (!depFH.exists()) {
        logger.info("Generating default deployment file \"" + depFH.getAbsolutePath() + "\"");
        try (BufferedWriter bw = new BufferedWriter(new FileWriter(depFH))) {
            for (String line : defaultDeploymentXML) {
                bw.write(line);
                bw.newLine();
            }
        } finally {
        }
    }
    return depFH.getAbsolutePath();
}
Also used : VoltFile(org.voltdb.utils.VoltFile) FileWriter(java.io.FileWriter) VoltFile(org.voltdb.utils.VoltFile) File(java.io.File) BufferedWriter(java.io.BufferedWriter)

Aggregations

File (java.io.File)19 VoltFile (org.voltdb.utils.VoltFile)19 IOException (java.io.IOException)8 FileOutputStream (java.io.FileOutputStream)5 ByteBuffer (java.nio.ByteBuffer)3 ArrayList (java.util.ArrayList)3 Map (java.util.Map)3 JSONException (org.json_voltpatches.JSONException)3 FileWriter (java.io.FileWriter)2 UnsupportedEncodingException (java.io.UnsupportedEncodingException)2 SocketException (java.net.SocketException)2 HashMap (java.util.HashMap)2 TreeMap (java.util.TreeMap)2 ExecutionException (java.util.concurrent.ExecutionException)2 KeeperException (org.apache.zookeeper_voltpatches.KeeperException)2 JSONObject (org.json_voltpatches.JSONObject)2 CatalogMap (org.voltdb.catalog.CatalogMap)2 VoltCompiler (org.voltdb.compiler.VoltCompiler)2 InMemoryJarfile (org.voltdb.utils.InMemoryJarfile)2 ImmutableList (com.google_voltpatches.common.collect.ImmutableList)1