Search in sources :

Example 1 with Pair

use of org.voltcore.utils.Pair in project voltdb by VoltDB.

the class ExtensibleSnapshotDigestData method writeExportSequenceNumbersToSnapshot.

private void writeExportSequenceNumbersToSnapshot(JSONStringer stringer) throws IOException {
    try {
        stringer.key("exportSequenceNumbers").array();
        for (Map.Entry<String, Map<Integer, Pair<Long, Long>>> entry : m_exportSequenceNumbers.entrySet()) {
            stringer.object();
            stringer.keySymbolValuePair("exportTableName", entry.getKey());
            stringer.key("sequenceNumberPerPartition").array();
            for (Map.Entry<Integer, Pair<Long, Long>> sequenceNumber : entry.getValue().entrySet()) {
                stringer.object();
                stringer.keySymbolValuePair("partition", sequenceNumber.getKey());
                //First value is the ack offset which matters for pauseless rejoin, but not persistence
                stringer.keySymbolValuePair("exportSequenceNumber", sequenceNumber.getValue().getSecond());
                stringer.endObject();
            }
            stringer.endArray();
            stringer.endObject();
        }
        stringer.endArray();
    } catch (JSONException e) {
        throw new IOException(e);
    }
}
Also used : JSONException(org.json_voltpatches.JSONException) IOException(java.io.IOException) Map(java.util.Map) HashMap(java.util.HashMap) Pair(org.voltcore.utils.Pair)

Example 2 with Pair

use of org.voltcore.utils.Pair in project voltdb by VoltDB.

the class VoltZK method updateClusterMetadata.

public static void updateClusterMetadata(Map<Integer, String> clusterMetadata) throws Exception {
    ZooKeeper zk = VoltDB.instance().getHostMessenger().getZK();
    List<String> metadataNodes = zk.getChildren(VoltZK.cluster_metadata, false);
    Set<Integer> hostIds = new HashSet<Integer>();
    for (String hostId : metadataNodes) {
        hostIds.add(Integer.valueOf(hostId));
    }
    /*
         * Remove anything that is no longer part of the cluster
         */
    Set<Integer> keySetCopy = new HashSet<Integer>(clusterMetadata.keySet());
    keySetCopy.removeAll(hostIds);
    for (Integer failedHostId : keySetCopy) {
        clusterMetadata.remove(failedHostId);
    }
    /*
         * Add anything that is new
         */
    Set<Integer> hostIdsCopy = new HashSet<Integer>(hostIds);
    hostIdsCopy.removeAll(clusterMetadata.keySet());
    List<Pair<Integer, ZKUtil.ByteArrayCallback>> callbacks = new ArrayList<Pair<Integer, ZKUtil.ByteArrayCallback>>();
    for (Integer hostId : hostIdsCopy) {
        ZKUtil.ByteArrayCallback cb = new ZKUtil.ByteArrayCallback();
        callbacks.add(Pair.of(hostId, cb));
        zk.getData(VoltZK.cluster_metadata + "/" + hostId, false, cb, null);
    }
    for (Pair<Integer, ZKUtil.ByteArrayCallback> p : callbacks) {
        Integer hostId = p.getFirst();
        ZKUtil.ByteArrayCallback cb = p.getSecond();
        try {
            clusterMetadata.put(hostId, new String(cb.getData(), "UTF-8"));
        } catch (KeeperException.NoNodeException e) {
        }
    }
}
Also used : ArrayList(java.util.ArrayList) ZKUtil(org.voltcore.zk.ZKUtil) ZooKeeper(org.apache.zookeeper_voltpatches.ZooKeeper) KeeperException(org.apache.zookeeper_voltpatches.KeeperException) HashSet(java.util.HashSet) Pair(org.voltcore.utils.Pair)

Example 3 with Pair

use of org.voltcore.utils.Pair in project voltdb by VoltDB.

the class ExportManager method updateProcessorConfig.

private void updateProcessorConfig(final CatalogMap<Connector> connectors) {
    Map<String, Pair<Properties, Set<String>>> config = new HashMap<>();
    // If the export source changes before the previous generation drains
    // then the outstanding exports will go to the new source when export resumes.
    int connCount = 0;
    int tableCount = 0;
    for (Connector conn : connectors) {
        // skip disabled connectors
        if (!conn.getEnabled() || conn.getTableinfo().isEmpty()) {
            continue;
        }
        connCount++;
        Properties properties = new Properties();
        Set<String> tables = new HashSet<>();
        String targetName = conn.getTypeName();
        for (ConnectorTableInfo ti : conn.getTableinfo()) {
            tables.add(ti.getTable().getTypeName());
            tableCount++;
        }
        if (conn.getConfig() != null) {
            Iterator<ConnectorProperty> connPropIt = conn.getConfig().iterator();
            while (connPropIt.hasNext()) {
                ConnectorProperty prop = connPropIt.next();
                properties.put(prop.getName(), prop.getValue().trim());
                if (!prop.getName().toLowerCase().contains("password")) {
                    properties.put(prop.getName(), prop.getValue().trim());
                } else {
                    //Dont trim passwords
                    properties.put(prop.getName(), prop.getValue());
                }
            }
        }
        Pair<Properties, Set<String>> connConfig = new Pair<>(properties, tables);
        config.put(targetName, connConfig);
    }
    m_connCount = connCount;
    m_exportTablesCount = tableCount;
    m_processorConfig = config;
}
Also used : Connector(org.voltdb.catalog.Connector) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) ConnectorProperty(org.voltdb.catalog.ConnectorProperty) Properties(java.util.Properties) ConnectorTableInfo(org.voltdb.catalog.ConnectorTableInfo) Pair(org.voltcore.utils.Pair) HashSet(java.util.HashSet)

Example 4 with Pair

use of org.voltcore.utils.Pair in project voltdb by VoltDB.

the class ExportOnServerVerifier method checkForMoreFilesRemote.

@SuppressWarnings("unchecked")
private void checkForMoreFilesRemote(Comparator<String> comparator) throws Exception {
    int onDoneRetries = 6;
    long start_time = System.currentTimeMillis();
    while (m_exportFiles.isEmpty()) {
        /*
             * Collect the list of remote files at each node
             * Sort the list from each node
             */
        int activeFound = 0;
        List<Pair<ChannelSftp, List<String>>> pathsFromAllNodes = new ArrayList<Pair<ChannelSftp, List<String>>>();
        for (RemoteHost rh : m_hosts) {
            Vector<LsEntry> files = rh.channel.ls(rh.path);
            List<String> paths = new ArrayList<String>();
            final int trackerModifyTime = rh.channel.stat(rh.path + "/" + TRACKER_FILENAME).getMTime();
            boolean activeInRemote = false;
            boolean filesInRemote = false;
            for (LsEntry entry : files) {
                activeInRemote = activeInRemote || entry.getFilename().trim().toLowerCase().startsWith("active");
                filesInRemote = filesInRemote || entry.getFilename().trim().toLowerCase().startsWith("active");
                if (!entry.getFilename().equals(".") && !entry.getFilename().equals("..") && !entry.getAttrs().isDir()) {
                    final String entryFileName = rh.path + "/" + entry.getFilename();
                    final int entryModifyTime = entry.getAttrs().getMTime();
                    if (!entry.getFilename().contains("active")) {
                        Matcher mtc = EXPORT_FILENAME_REGEXP.matcher(entry.getFilename());
                        if (mtc.matches()) {
                            paths.add(entryFileName);
                            activeInRemote = activeInRemote || entryModifyTime > trackerModifyTime;
                            filesInRemote = true;
                        } else {
                            System.err.println("ERROR: " + entryFileName + " does not match expected export file name pattern");
                        }
                    } else if (entry.getFilename().trim().toLowerCase().startsWith("active-")) {
                        if ((trackerModifyTime - entryModifyTime) > 120) {
                            final String renamed = rh.path + "/" + entry.getFilename().substring("active-".length());
                            rh.channel.rename(entryFileName, renamed);
                            paths.add(renamed);
                        }
                    }
                }
            }
            touchActiveTracker(rh);
            rh.activeSeen = rh.activeSeen || activeInRemote;
            rh.fileSeen = rh.fileSeen || filesInRemote;
            if (activeInRemote)
                activeFound++;
            Collections.sort(paths, comparator);
            if (!paths.isEmpty())
                pathsFromAllNodes.add(Pair.of(rh.channel, paths));
        }
        if (!m_clientComplete.isEmpty()) {
            printExportFileSituation(pathsFromAllNodes, activeFound);
        }
        if (pathsFromAllNodes.isEmpty() && activeFound == 0 && allActiveSeen()) {
            if (--onDoneRetries <= 0)
                return;
            Thread.sleep(5000);
        }
        // add them to m_exportFiles as ordered by the comparator
        TreeMap<String, Pair<ChannelSftp, String>> hadPaths = new TreeMap<String, Pair<ChannelSftp, String>>(comparator);
        for (Pair<ChannelSftp, List<String>> p : pathsFromAllNodes) {
            final ChannelSftp c = p.getFirst();
            for (String path : p.getSecond()) {
                hadPaths.put(path, Pair.of(c, path));
            }
        }
        boolean hadOne = !hadPaths.isEmpty();
        Iterator<Map.Entry<String, Pair<ChannelSftp, String>>> itr = hadPaths.entrySet().iterator();
        while (itr.hasNext()) {
            Map.Entry<String, Pair<ChannelSftp, String>> entry = itr.next();
            m_exportFiles.offer(entry.getValue());
            itr.remove();
        }
        long now = System.currentTimeMillis();
        if ((now - start_time) > FILE_TIMEOUT_MS) {
            throw new ValidationErr("Timed out waiting on new files.\n" + "This indicates a mismatch in the transaction streams between the client logs and the export data or the death of something important.", null, null);
        } else if (!hadOne) {
            Thread.sleep(1200);
        }
    }
}
Also used : Matcher(java.util.regex.Matcher) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) ChannelSftp(com.jcraft.jsch.ChannelSftp) LsEntry(com.jcraft.jsch.ChannelSftp.LsEntry) ArrayList(java.util.ArrayList) List(java.util.List) LsEntry(com.jcraft.jsch.ChannelSftp.LsEntry) HashMap(java.util.HashMap) Map(java.util.Map) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap) Pair(org.voltcore.utils.Pair)

Example 5 with Pair

use of org.voltcore.utils.Pair in project voltdb by VoltDB.

the class SnapshotCompletionMonitor method processSnapshotData.

private void processSnapshotData(byte[] data) throws Exception {
    if (data == null) {
        return;
    }
    JSONObject jsonObj = new JSONObject(new String(data, "UTF-8"));
    long txnId = jsonObj.getLong("txnId");
    int hostCount = jsonObj.getInt("hostCount");
    String path = jsonObj.getString(SnapshotUtil.JSON_PATH);
    SnapshotPathType stype = SnapshotPathType.valueOf(jsonObj.getString(SnapshotUtil.JSON_PATH_TYPE));
    String nonce = jsonObj.getString(SnapshotUtil.JSON_NONCE);
    boolean truncation = jsonObj.getBoolean("isTruncation");
    boolean didSucceed = jsonObj.getBoolean("didSucceed");
    // A truncation request ID is not always provided. It's used for
    // snapshots triggered indirectly via ZooKeeper so that the
    // triggerer can recognize the snapshot when it finishes.
    String truncReqId = jsonObj.optString("truncReqId");
    if (hostCount == 0) {
        /*
             * Convert the JSON object containing the export sequence numbers for each
             * table and partition to a regular map
             */
        Map<String, Map<Integer, Pair<Long, Long>>> exportSequenceNumbers = null;
        final JSONObject exportSequenceJSON = jsonObj.getJSONObject("exportSequenceNumbers");
        final ImmutableMap.Builder<String, Map<Integer, Pair<Long, Long>>> builder = ImmutableMap.builder();
        @SuppressWarnings("unchecked") final Iterator<String> tableKeys = exportSequenceJSON.keys();
        while (tableKeys.hasNext()) {
            final String tableName = tableKeys.next();
            final JSONObject tableSequenceNumbers = exportSequenceJSON.getJSONObject(tableName);
            ImmutableMap.Builder<Integer, Pair<Long, Long>> tableBuilder = ImmutableMap.builder();
            @SuppressWarnings("unchecked") final Iterator<String> partitionKeys = tableSequenceNumbers.keys();
            while (partitionKeys.hasNext()) {
                final String partitionString = partitionKeys.next();
                final Integer partitionId = Integer.valueOf(partitionString);
                JSONObject sequenceNumbers = tableSequenceNumbers.getJSONObject(partitionString);
                final Long ackOffset = sequenceNumbers.getLong("ackOffset");
                final Long sequenceNumber = sequenceNumbers.getLong("sequenceNumber");
                tableBuilder.put(partitionId, Pair.of(ackOffset, sequenceNumber));
            }
            builder.put(tableName, tableBuilder.build());
        }
        exportSequenceNumbers = builder.build();
        long clusterCreateTime = jsonObj.optLong("clusterCreateTime", -1);
        Map<Integer, Long> drSequenceNumbers = new HashMap<>();
        JSONObject drTupleStreamJSON = jsonObj.getJSONObject("drTupleStreamStateInfo");
        Iterator<String> partitionKeys = drTupleStreamJSON.keys();
        int drVersion = 0;
        while (partitionKeys.hasNext()) {
            String partitionIdString = partitionKeys.next();
            JSONObject stateInfo = drTupleStreamJSON.getJSONObject(partitionIdString);
            drVersion = (int) stateInfo.getLong("drVersion");
            drSequenceNumbers.put(Integer.valueOf(partitionIdString), stateInfo.getLong("sequenceNumber"));
        }
        Map<Integer, Long> partitionTxnIdsMap = ImmutableMap.of();
        synchronized (m_snapshotTxnIdsToPartitionTxnIds) {
            Map<Integer, Long> partitionTxnIdsList = m_snapshotTxnIdsToPartitionTxnIds.get(txnId);
            if (partitionTxnIdsList != null) {
                partitionTxnIdsMap = ImmutableMap.copyOf(partitionTxnIdsList);
            }
        }
        /*
             * Collect all the last seen ids from the remote data centers so they can
             * be used by live rejoin to initialize a starting state for applying DR
             * data
             */
        Map<Integer, Map<Integer, Map<Integer, DRConsumerDrIdTracker>>> drMixedClusterSizeConsumerState = new HashMap<>();
        JSONObject consumerPartitions = jsonObj.getJSONObject("drMixedClusterSizeConsumerState");
        Iterator<String> cpKeys = consumerPartitions.keys();
        while (cpKeys.hasNext()) {
            final String consumerPartitionIdStr = cpKeys.next();
            final Integer consumerPartitionId = Integer.valueOf(consumerPartitionIdStr);
            JSONObject siteInfo = consumerPartitions.getJSONObject(consumerPartitionIdStr);
            drMixedClusterSizeConsumerState.put(consumerPartitionId, ExtensibleSnapshotDigestData.buildConsumerSiteDrIdTrackersFromJSON(siteInfo));
        }
        Iterator<SnapshotCompletionInterest> iter = m_interests.iterator();
        while (iter.hasNext()) {
            SnapshotCompletionInterest interest = iter.next();
            try {
                interest.snapshotCompleted(new SnapshotCompletionEvent(path, stype, nonce, txnId, partitionTxnIdsMap, truncation, didSucceed, truncReqId, exportSequenceNumbers, Collections.unmodifiableMap(drSequenceNumbers), Collections.unmodifiableMap(drMixedClusterSizeConsumerState), drVersion, clusterCreateTime));
            } catch (Exception e) {
                SNAP_LOG.warn("Exception while executing snapshot completion interest", e);
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) SnapshotCompletionEvent(org.voltdb.SnapshotCompletionInterest.SnapshotCompletionEvent) SnapshotPathType(org.voltdb.sysprocs.saverestore.SnapshotPathType) ImmutableMap(com.google_voltpatches.common.collect.ImmutableMap) NoNodeException(org.apache.zookeeper_voltpatches.KeeperException.NoNodeException) JSONObject(org.json_voltpatches.JSONObject) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google_voltpatches.common.collect.ImmutableMap) Pair(org.voltcore.utils.Pair)

Aggregations

Pair (org.voltcore.utils.Pair)15 HashMap (java.util.HashMap)6 Map (java.util.Map)5 ArrayList (java.util.ArrayList)4 HashSet (java.util.HashSet)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 IOException (java.io.IOException)2 ByteBuffer (java.nio.ByteBuffer)2 List (java.util.List)2 KeeperException (org.apache.zookeeper_voltpatches.KeeperException)2 JSONObject (org.json_voltpatches.JSONObject)2 ZKUtil (org.voltcore.zk.ZKUtil)2 Table (org.voltdb.catalog.Table)2 ImmutableList (com.google_voltpatches.common.collect.ImmutableList)1 ImmutableMap (com.google_voltpatches.common.collect.ImmutableMap)1 ChannelSftp (com.jcraft.jsch.ChannelSftp)1 LsEntry (com.jcraft.jsch.ChannelSftp.LsEntry)1 SecureRandom (java.security.SecureRandom)1 NavigableMap (java.util.NavigableMap)1 Properties (java.util.Properties)1