use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class HashRangeExpression method loadFromJSONObject.
@Override
protected void loadFromJSONObject(JSONObject obj) throws JSONException {
m_hashColumn = obj.getInt(Members.HASH_COLUMN.name());
JSONArray array = obj.getJSONArray(Members.RANGES.name());
ImmutableSortedMap.Builder<Integer, Integer> b = ImmutableSortedMap.naturalOrder();
for (int ii = 0; ii < array.length(); ii++) {
JSONObject range = array.getJSONObject(ii);
b.put(range.getInt(Members.RANGE_START.name()), range.getInt(Members.RANGE_END.name()));
}
m_ranges = b.build();
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class SnapshotSaveAPI method createSnapshotCompletionNode.
/**
* Create the completion node for the snapshot identified by the txnId. It
* assumes that all hosts will race to call this, so it doesn't fail if the
* node already exists.
*
* @param nonce Nonce of the snapshot
* @param txnId
* @param hostId The local host ID
* @param isTruncation Whether or not this is a truncation snapshot
* @param truncReqId Optional unique ID fed back to the monitor for identification
* @return true if the node is created successfully, false if the node already exists.
*/
public static ZKUtil.StringCallback createSnapshotCompletionNode(String path, String pathType, String nonce, long txnId, boolean isTruncation, String truncReqId) {
if (!(txnId > 0)) {
VoltDB.crashGlobalVoltDB("Txnid must be greather than 0", true, null);
}
byte[] nodeBytes = null;
try {
JSONStringer stringer = new JSONStringer();
stringer.object();
stringer.keySymbolValuePair("txnId", txnId);
stringer.keySymbolValuePair("isTruncation", isTruncation);
stringer.keySymbolValuePair("didSucceed", false);
stringer.keySymbolValuePair("hostCount", -1);
stringer.keySymbolValuePair(SnapshotUtil.JSON_PATH, path);
stringer.keySymbolValuePair(SnapshotUtil.JSON_PATH_TYPE, pathType);
stringer.keySymbolValuePair(SnapshotUtil.JSON_NONCE, nonce);
stringer.keySymbolValuePair("truncReqId", truncReqId);
stringer.key("exportSequenceNumbers").object().endObject();
stringer.endObject();
JSONObject jsonObj = new JSONObject(stringer.toString());
nodeBytes = jsonObj.toString(4).getBytes(Charsets.UTF_8);
} catch (Exception e) {
VoltDB.crashLocalVoltDB("Error serializing snapshot completion node JSON", true, e);
}
ZKUtil.StringCallback cb = new ZKUtil.StringCallback();
final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId;
VoltDB.instance().getHostMessenger().getZK().create(snapshotPath, nodeBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null);
return cb;
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class SnapshotSaveAPI method startSnapshotting.
/**
* The only public method: do all the work to start a snapshot.
* Assumes that a snapshot is feasible, that the caller has validated it can
* be accomplished, that the caller knows this is a consistent or useful
* transaction point at which to snapshot.
*
* @param file_path
* @param file_nonce
* @param format
* @param block
* @param txnId
* @param data
* @param context
* @param hostname
* @return VoltTable describing the results of the snapshot attempt
*/
public VoltTable startSnapshotting(final String file_path, final String pathType, final String file_nonce, final SnapshotFormat format, final byte block, final long multiPartTxnId, final long partitionTxnId, final long[] legacyPerPartitionTxnIds, final String data, final SystemProcedureExecutionContext context, final String hostname, final HashinatorSnapshotData hashinatorData, final long timestamp) {
TRACE_LOG.trace("Creating snapshot target and handing to EEs");
final VoltTable result = SnapshotUtil.constructNodeResultsTable();
JSONObject jsData = null;
if (data != null && !data.isEmpty()) {
try {
jsData = new JSONObject(data);
} catch (JSONException e) {
SNAP_LOG.error(String.format("JSON exception on snapshot data \"%s\".", data), e);
}
}
final JSONObject finalJsData = jsData;
JSONObject perSiteRemoteDataCenterDrIds;
try {
perSiteRemoteDataCenterDrIds = ExtensibleSnapshotDigestData.serializeSiteConsumerDrIdTrackersToJSON(context.getDrAppliedTrackers());
} catch (JSONException e) {
SNAP_LOG.warn("Failed to serialize the Remote DataCenter's Last applied DRIds");
perSiteRemoteDataCenterDrIds = new JSONObject();
}
// number of snapshot permits.
synchronized (SnapshotSiteProcessor.m_snapshotCreateLock) {
SnapshotSiteProcessor.m_snapshotCreateSetupBarrierActualAction.set(new Runnable() {
@Override
public void run() {
Map<Integer, Long> partitionTransactionIds = m_partitionLastSeenTransactionIds;
SNAP_LOG.debug("Last seen partition transaction ids " + partitionTransactionIds);
m_partitionLastSeenTransactionIds = new HashMap<Integer, Long>();
partitionTransactionIds.put(TxnEgo.getPartitionId(multiPartTxnId), multiPartTxnId);
Map<Integer, JSONObject> remoteDataCenterLastIds = m_remoteDataCenterLastIds;
m_remoteDataCenterLastIds = new HashMap<Integer, JSONObject>();
/*
* Do a quick sanity check that the provided IDs
* don't conflict with currently active partitions. If they do
* it isn't fatal we can just skip it.
*/
for (long txnId : legacyPerPartitionTxnIds) {
final int legacyPartition = TxnEgo.getPartitionId(txnId);
if (partitionTransactionIds.containsKey(legacyPartition)) {
SNAP_LOG.warn("While saving a snapshot and propagating legacy " + "transaction ids found an id that matches currently active partition" + partitionTransactionIds.get(legacyPartition));
} else {
partitionTransactionIds.put(legacyPartition, txnId);
}
}
m_allLocalSiteSnapshotDigestData = new ExtensibleSnapshotDigestData(SnapshotSiteProcessor.getExportSequenceNumbers(), SnapshotSiteProcessor.getDRTupleStreamStateInfo(), remoteDataCenterLastIds, finalJsData);
createSetupIv2(file_path, pathType, file_nonce, format, multiPartTxnId, partitionTransactionIds, finalJsData, context, result, m_allLocalSiteSnapshotDigestData, context.getSiteTrackerForSnapshot(), hashinatorData, timestamp);
}
});
// Create a barrier to use with the current number of sites to wait for
// or if the barrier is already set up check if it is broken and reset if necessary
final int numLocalSites = context.getLocalSitesCount();
SnapshotSiteProcessor.readySnapshotSetupBarriers(numLocalSites);
//From within this EE, record the sequence numbers as of the start of the snapshot (now)
//so that the info can be put in the digest.
SnapshotSiteProcessor.populateSequenceNumbersForExecutionSite(context);
Integer partitionId = TxnEgo.getPartitionId(partitionTxnId);
SNAP_LOG.debug("Registering transaction id " + partitionTxnId + " for " + TxnEgo.getPartitionId(partitionTxnId));
m_partitionLastSeenTransactionIds.put(partitionId, partitionTxnId);
m_remoteDataCenterLastIds.put(partitionId, perSiteRemoteDataCenterDrIds);
}
boolean runPostTasks = false;
VoltTable earlyResultTable = null;
try {
SnapshotSiteProcessor.m_snapshotCreateSetupBarrier.await();
try {
synchronized (m_createLock) {
SNAP_LOG.debug("Found tasks for HSIds: " + CoreUtils.hsIdCollectionToString(m_taskListsForHSIds.keySet()));
SNAP_LOG.debug("Looking for local HSID: " + CoreUtils.hsIdToString(context.getSiteId()));
Deque<SnapshotTableTask> taskList = m_taskListsForHSIds.remove(context.getSiteId());
// switch to figure out what flavor of empty SnapshotSave result table to return.
if (!m_createSuccess.get()) {
// There shouldn't be any work for any site if we failed
assert (m_taskListsForHSIds.isEmpty());
VoltTable finalresult = m_createResult.get();
if (finalresult != null) {
m_createResult.set(null);
earlyResultTable = finalresult;
} else {
// We returned a non-empty NodeResultsTable with the failures in it,
// every other site needs to return a NodeResultsTable as well.
earlyResultTable = SnapshotUtil.constructNodeResultsTable();
}
} else if (taskList == null) {
SNAP_LOG.debug("No task for this site, block " + block);
// Send back an appropriate empty table based on the block flag
if (block != 0) {
runPostTasks = true;
earlyResultTable = SnapshotUtil.constructPartitionResultsTable();
earlyResultTable.addRow(context.getHostId(), hostname, CoreUtils.getSiteIdFromHSId(context.getSiteId()), "SUCCESS", "");
} else {
//If doing snapshot for only replicated table(s), earlyResultTable here
//may not be empty even if the taskList of this site is null.
//In that case, snapshot result is preserved by earlyResultTable.
earlyResultTable = result;
}
} else {
context.getSiteSnapshotConnection().initiateSnapshots(format, taskList, multiPartTxnId, m_allLocalSiteSnapshotDigestData);
}
if (m_deferredSetupFuture != null && taskList != null) {
// Add a listener to the deferred setup so that it can kick off the snapshot
// task once the setup is done.
m_deferredSetupFuture.addListener(new Runnable() {
@Override
public void run() {
DeferredSnapshotSetup deferredSnapshotSetup = null;
try {
deferredSnapshotSetup = m_deferredSetupFuture.get();
} catch (Exception e) {
// it doesn't throw
}
assert deferredSnapshotSetup != null;
context.getSiteSnapshotConnection().startSnapshotWithTargets(deferredSnapshotSetup.getPlan().getSnapshotDataTargets());
}
}, CoreUtils.SAMETHREADEXECUTOR);
}
}
} finally {
SnapshotSiteProcessor.m_snapshotCreateFinishBarrier.await(120, TimeUnit.SECONDS);
}
} catch (TimeoutException e) {
VoltDB.crashLocalVoltDB("Timed out waiting 120 seconds for all threads to arrive and start snapshot", true, null);
} catch (InterruptedException e) {
result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
earlyResultTable = result;
} catch (BrokenBarrierException e) {
result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
earlyResultTable = result;
} catch (IllegalArgumentException e) {
result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
earlyResultTable = result;
}
// If earlyResultTable is set, return here
if (earlyResultTable != null) {
if (runPostTasks) {
// Need to run post-snapshot tasks before finishing
SnapshotSiteProcessor.runPostSnapshotTasks(context);
}
return earlyResultTable;
}
if (block != 0) {
HashSet<Exception> failures = Sets.newHashSet();
String status = "SUCCESS";
String err = "";
try {
// For blocking snapshot, propogate the error from deferred setup back to the client
final DeferredSnapshotSetup deferredSnapshotSetup = m_deferredSetupFuture.get();
if (deferredSnapshotSetup != null && deferredSnapshotSetup.getError() != null) {
status = "FAILURE";
err = deferredSnapshotSetup.getError().toString();
failures.add(deferredSnapshotSetup.getError());
}
failures.addAll(context.getSiteSnapshotConnection().completeSnapshotWork());
SnapshotSiteProcessor.runPostSnapshotTasks(context);
} catch (Exception e) {
status = "FAILURE";
err = e.toString();
failures.add(e);
}
final VoltTable blockingResult = SnapshotUtil.constructPartitionResultsTable();
if (failures.isEmpty()) {
blockingResult.addRow(context.getHostId(), hostname, CoreUtils.getSiteIdFromHSId(context.getSiteId()), status, err);
} else {
status = "FAILURE";
for (Exception e : failures) {
err = e.toString();
}
blockingResult.addRow(context.getHostId(), hostname, CoreUtils.getSiteIdFromHSId(context.getSiteId()), status, err);
}
return blockingResult;
}
return result;
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class SnapshotScanAgent method collectStatsImpl.
@Override
protected void collectStatsImpl(Connection c, long clientHandle, OpsSelector selector, ParameterSet params) throws Exception {
JSONObject obj = new JSONObject();
obj.put("selector", "SNAPSHOTSCAN");
String err = null;
if (selector == OpsSelector.SNAPSHOTSCAN) {
err = parseParams(params, obj);
} else {
err = "SnapshotScanAgent received non-SNAPSHOTSCAN selector: " + selector.name();
}
if (err != null) {
// Maintain old @SnapshotScan behavior.
ColumnInfo[] result_columns = new ColumnInfo[1];
result_columns[0] = new ColumnInfo("ERR_MSG", VoltType.STRING);
VoltTable[] results = new VoltTable[] { new VoltTable(result_columns) };
results[0].addRow(err);
ClientResponseImpl errorResponse = new ClientResponseImpl(ClientResponse.SUCCESS, ClientResponse.UNINITIALIZED_APP_STATUS_CODE, null, results, err);
errorResponse.setClientHandle(clientHandle);
ByteBuffer buf = ByteBuffer.allocate(errorResponse.getSerializedSize() + 4);
buf.putInt(buf.capacity() - 4);
errorResponse.flattenToBuffer(buf).flip();
c.writeStream().enqueue(buf);
return;
}
String subselector = obj.getString("subselector");
PendingOpsRequest psr = new PendingOpsRequest(selector, subselector, c, clientHandle, System.currentTimeMillis(), obj);
distributeOpsWork(psr, obj);
}
use of org.json_voltpatches.JSONObject in project voltdb by VoltDB.
the class RestoreAgent method checkSnapshotIsComplete.
private SnapshotInfo checkSnapshotIsComplete(Long key, Snapshot s) {
int partitionCount = -1;
for (TableFiles tf : s.m_tableFiles.values()) {
// Check if the snapshot is complete
if (tf.m_completed.stream().anyMatch(b -> !b)) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it was not completed.");
return null;
}
// Replicated table doesn't check partition count
if (tf.m_isReplicated) {
continue;
}
// Everyone has to agree on the total partition count
for (int count : tf.m_totalPartitionCounts) {
if (partitionCount == -1) {
partitionCount = count;
} else if (count != partitionCount) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had the wrong partition count ").append(count).append(", expecting ").append(partitionCount);
return null;
}
}
}
if (s.m_digests.isEmpty()) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had no valid digest file.");
return null;
}
File digest = s.m_digests.get(0);
Long catalog_crc = null;
Map<Integer, Long> pidToTxnMap = new TreeMap<Integer, Long>();
Set<String> digestTableNames = new HashSet<String>();
// Create a valid but meaningless InstanceId to support pre-instanceId checking versions
InstanceId instanceId = new InstanceId(0, 0);
int newParitionCount = -1;
try {
JSONObject digest_detail = SnapshotUtil.CRCCheck(digest, LOG);
if (digest_detail == null)
throw new IOException();
catalog_crc = digest_detail.getLong("catalogCRC");
if (digest_detail.has("partitionTransactionIds")) {
JSONObject pidToTxnId = digest_detail.getJSONObject("partitionTransactionIds");
Iterator<String> it = pidToTxnId.keys();
while (it.hasNext()) {
String pidkey = it.next();
Long txnidval = pidToTxnId.getLong(pidkey);
pidToTxnMap.put(Integer.valueOf(pidkey), txnidval);
}
}
if (digest_detail.has("instanceId")) {
instanceId = new InstanceId(digest_detail.getJSONObject("instanceId"));
}
if (digest_detail.has("newPartitionCount")) {
newParitionCount = digest_detail.getInt("newPartitionCount");
}
if (digest_detail.has("tables")) {
JSONArray tableObj = digest_detail.getJSONArray("tables");
for (int i = 0; i < tableObj.length(); i++) {
digestTableNames.add(tableObj.getString(i));
}
}
} catch (IOException ioe) {
m_snapshotErrLogStr.append("\nUnable to read digest file: ").append(digest.getAbsolutePath()).append(" due to: ").append(ioe.getMessage());
return null;
} catch (JSONException je) {
m_snapshotErrLogStr.append("\nUnable to extract catalog CRC from digest: ").append(digest.getAbsolutePath()).append(" due to: ").append(je.getMessage());
return null;
}
if (s.m_catalogFile == null) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had no catalog.");
return null;
}
try {
byte[] bytes = MiscUtils.fileToBytes(s.m_catalogFile);
InMemoryJarfile jarfile = CatalogUtil.loadInMemoryJarFile(bytes);
if (jarfile.getCRC() != catalog_crc) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because catalog CRC did not match digest.");
return null;
}
// Make sure this is not a partial snapshot.
// Compare digestTableNames with all normal table names in catalog file.
// A normal table is one that's NOT a materialized view, nor an export table.
Set<String> catalogNormalTableNames = CatalogUtil.getNormalTableNamesFromInMemoryJar(jarfile);
if (!catalogNormalTableNames.equals(digestTableNames)) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because this is a partial snapshot.");
return null;
}
} catch (IOException ioe) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because catalog file could not be validated");
return null;
}
SnapshotInfo info = new SnapshotInfo(key, digest.getParent(), SnapshotUtil.parseNonceFromDigestFilename(digest.getName()), partitionCount, newParitionCount, catalog_crc, m_hostId, instanceId, digestTableNames, s.m_stype);
// populate table to partition map.
for (Entry<String, TableFiles> te : s.m_tableFiles.entrySet()) {
TableFiles tableFile = te.getValue();
HashSet<Integer> ids = new HashSet<Integer>();
for (Set<Integer> idSet : tableFile.m_validPartitionIds) {
ids.addAll(idSet);
}
if (!tableFile.m_isReplicated) {
info.partitions.put(te.getKey(), ids);
}
// keep track of tables for which we've seen files while we're here
info.fileTables.add(te.getKey());
}
info.setPidToTxnIdMap(pidToTxnMap);
return info;
}
Aggregations