use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class DRConsumerDrIdTracker method toJSON.
public JSONObject toJSON() throws JSONException {
JSONObject obj = new JSONObject();
obj.put("spUniqueId", m_lastSpUniqueId);
obj.put("mpUniqueId", m_lastMpUniqueId);
obj.put("producerPartitionId", m_producerPartitionId);
JSONArray drIdRanges = new JSONArray();
for (Range<Long> sequenceRange : m_map.asRanges()) {
JSONObject range = new JSONObject();
range.put(Long.toString(start(sequenceRange)), end(sequenceRange));
drIdRanges.put(range);
}
obj.put("drIdRanges", drIdRanges);
return obj;
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class ExtensibleSnapshotDigestData method writeDRStateToSnapshot.
private void writeDRStateToSnapshot(JSONStringer stringer) throws IOException {
try {
long clusterCreateTime = VoltDB.instance().getClusterCreateTime();
stringer.keySymbolValuePair("clusterCreateTime", clusterCreateTime);
Iterator<Entry<Integer, TupleStreamStateInfo>> iter = m_drTupleStreamInfo.entrySet().iterator();
if (iter.hasNext()) {
stringer.keySymbolValuePair("drVersion", iter.next().getValue().drVersion);
}
writeDRTupleStreamInfoToSnapshot(stringer);
stringer.key("drMixedClusterSizeConsumerState");
stringer.object();
for (Entry<Integer, JSONObject> e : m_drMixedClusterSizeConsumerState.entrySet()) {
// Consumer partitionId
stringer.key(e.getKey().toString());
// Trackers from that site
stringer.value(e.getValue());
}
stringer.endObject();
} catch (JSONException e) {
throw new IOException(e);
}
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class ExtensibleSnapshotDigestData method mergeExportSequenceNumbersToZK.
/*
* When recording snapshot completion in ZooKeeper we also record export
* sequence numbers as JSON. Need to merge our sequence numbers with
* existing numbers since multiple replicas will submit the sequence number
*/
private void mergeExportSequenceNumbersToZK(JSONObject jsonObj, VoltLogger log) throws JSONException {
JSONObject tableSequenceMap;
if (jsonObj.has("exportSequenceNumbers")) {
tableSequenceMap = jsonObj.getJSONObject("exportSequenceNumbers");
} else {
tableSequenceMap = new JSONObject();
jsonObj.put("exportSequenceNumbers", tableSequenceMap);
}
for (Map.Entry<String, Map<Integer, Pair<Long, Long>>> tableEntry : m_exportSequenceNumbers.entrySet()) {
JSONObject sequenceNumbers;
final String tableName = tableEntry.getKey();
if (tableSequenceMap.has(tableName)) {
sequenceNumbers = tableSequenceMap.getJSONObject(tableName);
} else {
sequenceNumbers = new JSONObject();
tableSequenceMap.put(tableName, sequenceNumbers);
}
for (Map.Entry<Integer, Pair<Long, Long>> partitionEntry : tableEntry.getValue().entrySet()) {
final Integer partitionId = partitionEntry.getKey();
final String partitionIdString = partitionId.toString();
final Long ackOffset = partitionEntry.getValue().getFirst();
final Long partitionSequenceNumber = partitionEntry.getValue().getSecond();
/*
* Check that the sequence number is the same everywhere and log if it isn't.
* Not going to crash because we are worried about poison pill transactions.
*/
if (sequenceNumbers.has(partitionIdString)) {
JSONObject existingEntry = sequenceNumbers.getJSONObject(partitionIdString);
Long existingSequenceNumber = existingEntry.getLong("sequenceNumber");
if (!existingSequenceNumber.equals(partitionSequenceNumber)) {
log.debug("Found a mismatch in export sequence numbers of export table " + tableName + " while recording snapshot metadata for partition " + partitionId + ". This is expected only on replicated, write-to-file export streams (remote node reported " + existingSequenceNumber + " and the local node reported " + partitionSequenceNumber + ")");
}
existingEntry.put(partitionIdString, Math.max(existingSequenceNumber, partitionSequenceNumber));
Long existingAckOffset = existingEntry.getLong("ackOffset");
existingEntry.put("ackOffset", Math.max(ackOffset, existingAckOffset));
} else {
JSONObject newObj = new JSONObject();
newObj.put("sequenceNumber", partitionSequenceNumber);
newObj.put("ackOffset", ackOffset);
sequenceNumbers.put(partitionIdString, newObj);
}
}
}
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class ExtensibleSnapshotDigestData method mergeDRTupleStreamInfoToZK.
private void mergeDRTupleStreamInfoToZK(JSONObject jsonObj, VoltLogger log) throws JSONException {
JSONObject stateInfoMap;
// clusterCreateTime should be same across the cluster
long clusterCreateTime = VoltDB.instance().getClusterCreateTime();
assert (!jsonObj.has("clusterCreateTime") || (clusterCreateTime == jsonObj.getLong("clusterCreateTime")));
jsonObj.put("clusterCreateTime", clusterCreateTime);
if (jsonObj.has("drTupleStreamStateInfo")) {
stateInfoMap = jsonObj.getJSONObject("drTupleStreamStateInfo");
} else {
stateInfoMap = new JSONObject();
jsonObj.put("drTupleStreamStateInfo", stateInfoMap);
}
for (Map.Entry<Integer, TupleStreamStateInfo> e : m_drTupleStreamInfo.entrySet()) {
final String partitionId = e.getKey().toString();
DRLogSegmentId partitionStateInfo;
if (e.getKey() != MpInitiator.MP_INIT_PID) {
partitionStateInfo = e.getValue().partitionInfo;
} else {
partitionStateInfo = e.getValue().replicatedInfo;
}
JSONObject existingStateInfo = stateInfoMap.optJSONObject(partitionId);
boolean addEntry = false;
if (existingStateInfo == null) {
addEntry = true;
} else if (partitionStateInfo.drId != existingStateInfo.getLong("sequenceNumber")) {
if (partitionStateInfo.drId > existingStateInfo.getLong("sequenceNumber")) {
addEntry = true;
}
log.debug("Found a mismatch in dr sequence numbers for partition " + partitionId + " the DRId should be the same at all replicas, but one node had " + DRLogSegmentId.getDebugStringFromDRId(existingStateInfo.getLong("sequenceNumber")) + " and the local node reported " + DRLogSegmentId.getDebugStringFromDRId(partitionStateInfo.drId));
}
if (addEntry) {
JSONObject stateInfo = new JSONObject();
stateInfo.put("sequenceNumber", partitionStateInfo.drId);
stateInfo.put("spUniqueId", partitionStateInfo.spUniqueId);
stateInfo.put("mpUniqueId", partitionStateInfo.mpUniqueId);
stateInfo.put("drVersion", e.getValue().drVersion);
stateInfoMap.put(partitionId, stateInfo);
}
}
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class ExtensibleSnapshotDigestData method buildConsumerSiteDrIdTrackersFromJSON.
public static Map<Integer, Map<Integer, DRConsumerDrIdTracker>> buildConsumerSiteDrIdTrackersFromJSON(JSONObject siteTrackers) throws JSONException {
Map<Integer, Map<Integer, DRConsumerDrIdTracker>> perSiteTrackers = new HashMap<Integer, Map<Integer, DRConsumerDrIdTracker>>();
Iterator<String> clusterKeys = siteTrackers.keys();
while (clusterKeys.hasNext()) {
Map<Integer, DRConsumerDrIdTracker> perProducerPartitionTrackers = new HashMap<Integer, DRConsumerDrIdTracker>();
String clusterIdStr = clusterKeys.next();
int clusterId = Integer.valueOf(clusterIdStr);
JSONObject producerPartitionInfo = siteTrackers.getJSONObject(clusterIdStr);
Iterator<String> producerPartitionKeys = producerPartitionInfo.keys();
while (producerPartitionKeys.hasNext()) {
String producerPartitionIdStr = producerPartitionKeys.next();
int producerPartitionId = Integer.valueOf(producerPartitionIdStr);
DRConsumerDrIdTracker producerPartitionTracker = new DRConsumerDrIdTracker(producerPartitionInfo.getJSONObject(producerPartitionIdStr));
perProducerPartitionTrackers.put(producerPartitionId, producerPartitionTracker);
}
perSiteTrackers.put(clusterId, perProducerPartitionTrackers);
}
return perSiteTrackers;
}
Aggregations