use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class SnapshotDaemon method initiateSnapshotSave.
private void initiateSnapshotSave(final long handle, final Object[] params, boolean blocking) {
boolean success = true;
VoltTable checkResult = SnapshotUtil.constructNodeResultsTable();
final String jsString = String.class.cast(params[0]);
if (m_lastInitiationTs != null) {
final long elapsedMs = System.currentTimeMillis() - m_lastInitiationTs.getFirst();
// Blocking snapshot may take a long time to finish, don't time it out if it's blocking
if (!m_lastInitiationTs.getSecond() && elapsedMs > INITIATION_RESPONSE_TIMEOUT_MS) {
SNAP_LOG.warn(String.format("A snapshot was initiated %d minutes ago and hasn't received a response yet.", TimeUnit.MILLISECONDS.toMinutes(elapsedMs)));
m_lastInitiationTs = null;
} else {
checkResult.addRow(CoreUtils.getHostIdFromHSId(m_mb.getHSId()), CoreUtils.getHostnameOrAddress(), null, "FAILURE", "SNAPSHOT IN PROGRESS");
success = false;
}
}
if (success) {
try {
final JSONObject jsObj = new JSONObject(jsString);
boolean initiateSnapshot;
// Do scan work on all known live hosts
VoltMessage msg = new SnapshotCheckRequestMessage(jsString);
SnapshotPathType pathType = SnapshotPathType.valueOf(jsObj.getString(SnapshotUtil.JSON_PATH_TYPE));
Set<Integer> liveHosts = VoltDB.instance().getHostMessenger().getLiveHostIds();
for (int hostId : liveHosts) {
m_mb.send(CoreUtils.getHSIdFromHostAndSite(hostId, HostMessenger.SNAPSHOT_IO_AGENT_ID), msg);
}
// Wait for responses from all hosts for a certain amount of time
Map<Integer, VoltTable> responses = Maps.newHashMap();
// 10s timeout
final long timeoutMs = 10 * 1000;
final long endTime = System.currentTimeMillis() + timeoutMs;
SnapshotCheckResponseMessage response;
while ((response = (SnapshotCheckResponseMessage) m_mb.recvBlocking(timeoutMs)) != null) {
final String nonce = jsObj.getString(SnapshotUtil.JSON_NONCE);
boolean nonceFound = false;
if (pathType == SnapshotPathType.SNAP_PATH) {
// If request was explicitely PATH check path too.
if (nonce.equals(response.getNonce()) && response.getPath().equals(jsObj.getString(SnapshotUtil.JSON_PATH))) {
nonceFound = true;
}
} else {
// If request is with type other than path just check type.
if (nonce.equals(response.getNonce()) && response.getSnapshotPathType() == pathType) {
nonceFound = true;
}
}
if (nonceFound) {
responses.put(CoreUtils.getHostIdFromHSId(response.m_sourceHSId), response.getResponse());
}
if (responses.size() == liveHosts.size() || System.currentTimeMillis() > endTime) {
break;
}
}
if (responses.size() != liveHosts.size()) {
checkResult.addRow(CoreUtils.getHostIdFromHSId(m_mb.getHSId()), CoreUtils.getHostnameOrAddress(), null, "FAILURE", "TIMED OUT CHECKING SNAPSHOT FEASIBILITY");
success = false;
}
if (success) {
// TRAIL [TruncSnap:12] all participating nodes have initiated successfully
// Call @SnapshotSave if check passed, return the failure otherwise
checkResult = VoltTableUtil.unionTables(responses.values());
initiateSnapshot = SnapshotUtil.didSnapshotRequestSucceed(new VoltTable[] { checkResult });
if (initiateSnapshot) {
m_lastInitiationTs = Pair.of(System.currentTimeMillis(), blocking);
m_initiator.initiateSnapshotDaemonWork("@SnapshotSave", handle, params);
} else {
success = false;
}
}
} catch (JSONException e) {
success = false;
checkResult.addRow(CoreUtils.getHostIdFromHSId(m_mb.getHSId()), CoreUtils.getHostnameOrAddress(), null, "FAILURE", "ERROR PARSING JSON");
SNAP_LOG.warn("Error parsing JSON string: " + jsString, e);
}
}
if (!success) {
final ClientResponseImpl failureResponse = new ClientResponseImpl(ClientResponseImpl.SUCCESS, new VoltTable[] { checkResult }, null);
failureResponse.setClientHandle(handle);
processClientResponse(Callables.returning(failureResponse));
}
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class RestoreAgent method generatePlans.
/**
* Generate restore and replay plans.
*
* @return The snapshot to restore, or null if there is none.
* @throws Exception
* If any exception is thrown, it means that the plan generation
* has failed. Should crash the cluster.
*/
SnapshotInfo generatePlans() throws Exception {
Map<String, Snapshot> snapshots = new HashMap<String, SnapshotUtil.Snapshot>();
// Only scan if startup might require a snapshot restore.
if (m_action.doesRecover()) {
snapshots = getSnapshots();
}
final Long maxLastSeenTxn = m_replayAgent.getMaxLastSeenTxn();
Set<SnapshotInfo> snapshotInfos = new HashSet<SnapshotInfo>();
for (Snapshot e : snapshots.values()) {
SnapshotInfo info = checkSnapshotIsComplete(e.getTxnId(), e);
// if the cluster instance IDs in the snapshot and command log don't match, just move along
if (m_replayAgent.getInstanceId() != null && info != null && !m_replayAgent.getInstanceId().equals(info.instanceId)) {
// Exceptions are not well tolerated here, so don't throw over something
// as trivial as error message formatting.
String agentIdString;
String infoIdString;
try {
agentIdString = m_replayAgent.getInstanceId().serializeToJSONObject().toString();
} catch (JSONException e1) {
agentIdString = "<failed to serialize id>";
}
try {
infoIdString = info.instanceId.serializeToJSONObject().toString();
} catch (JSONException e1) {
infoIdString = "<failed to serialize id>";
}
m_snapshotErrLogStr.append("\nRejected snapshot ").append(info.nonce).append(" due to mismatching instance IDs.").append(" Command log ID: ").append(agentIdString).append(" Snapshot ID: ").append(infoIdString);
continue;
}
if (info != null) {
final Map<Integer, Long> cmdlogmap = m_replayAgent.getMaxLastSeenTxnByPartition();
final Map<Integer, Long> snapmap = info.partitionToTxnId;
// don't do any TXN ID consistency checking between command log and snapshot
if (cmdlogmap != null) {
for (Integer cmdpart : cmdlogmap.keySet()) {
Long snaptxnId = snapmap.get(cmdpart);
if (snaptxnId == null) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(info.nonce).append(" due to missing partition: ").append(cmdpart);
info = null;
break;
} else if (snaptxnId < cmdlogmap.get(cmdpart)) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(info.nonce).append(" because it does not overlap the command log").append(" for partition: ").append(cmdpart).append(" command log txn ID: ").append(cmdlogmap.get(cmdpart)).append(" snapshot txn ID: ").append(snaptxnId);
info = null;
break;
}
}
}
}
if (info != null) {
snapshotInfos.add(info);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Gathered " + snapshotInfos.size() + " snapshot information");
}
sendLocalRestoreInformation(maxLastSeenTxn, snapshotInfos);
// Negotiate with other hosts about which snapshot to restore
SnapshotInfo infoWithMinHostId = getRestorePlan();
if (infoWithMinHostId != null && infoWithMinHostId.nonce.equals(m_terminusNonce)) {
m_replayAgent.returnAllSegments();
initialize(StartAction.CREATE);
m_planned = true;
return infoWithMinHostId;
}
/*
* Generate the replay plan here so that we don't have to wait until the
* snapshot restore finishes.
*/
if (m_action.doesRecover()) {
if (infoWithMinHostId != null) {
// The expected partition count could be determined by the new partition count recorded
// in the truncation snapshot. Truncation snapshot taken at the end of the join process
// actually records the new partition count in the digest.
m_replayAgent.generateReplayPlan(infoWithMinHostId.instanceId.getTimestamp(), infoWithMinHostId.txnId, infoWithMinHostId.newPartitionCount, m_isLeader);
}
}
m_planned = true;
return infoWithMinHostId;
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class RestoreAgent method serializeRestoreInformation.
/**
* @param max
* @param snapshots
* @return
*/
private String serializeRestoreInformation(Long max, Set<SnapshotInfo> snapshots) {
try {
JSONStringer stringer = new JSONStringer();
stringer.object();
// optional max value.
if (max != null) {
stringer.keySymbolValuePair("max", max);
}
// 1 means recover, 0 means to create new DB
stringer.keySymbolValuePair("action", m_action.ordinal());
stringer.key("snapInfos").array();
for (SnapshotInfo snapshot : snapshots) {
stringer.value(snapshot.toJSONObject());
}
stringer.endArray();
stringer.endObject();
return stringer.toString();
} catch (JSONException je) {
VoltDB.crashLocalVoltDB("Error exchanging snapshot info", true, je);
}
throw new RuntimeException("impossible codepath.");
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class RestoreAgent method fetchSnapshotTxnId.
/**
* Get the txnId of the snapshot the cluster is restoring from from ZK.
* NOTE that the barrier for this is now completely contained
* in run() in the restorePlanner thread; nobody gets out of there until
* someone wins the leader election and successfully writes the VoltZK.restore_snapshot_id
* node, so we just need to read it here.
*/
private void fetchSnapshotTxnId() {
try {
byte[] data = m_zk.getData(VoltZK.restore_snapshot_id, false, null);
String jsonData = new String(data, Constants.UTF8ENCODING);
if (!jsonData.equals("{}")) {
m_hasRestored = true;
JSONObject jo = new JSONObject(jsonData);
SnapshotInfo info = new SnapshotInfo(jo);
m_replayAgent.setSnapshotTxnId(info);
} else {
m_hasRestored = false;
m_replayAgent.setSnapshotTxnId(null);
}
} catch (KeeperException e2) {
VoltDB.crashGlobalVoltDB(e2.getMessage(), false, e2);
} catch (InterruptedException e2) {
} catch (JSONException je) {
VoltDB.crashLocalVoltDB(je.getMessage(), true, je);
}
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class DDLCompiler method addIndexToCatalog.
private static void addIndexToCatalog(Database db, Table table, VoltXMLElement node, Map<String, String> indexReplacementMap, HashMap<String, Index> indexMap, HashMap<String, Column> columnMap, VoltCompiler compiler) throws VoltCompilerException {
assert node.name.equals("index");
String name = node.attributes.get("name");
boolean unique = Boolean.parseBoolean(node.attributes.get("unique"));
boolean assumeUnique = Boolean.parseBoolean(node.attributes.get("assumeunique"));
AbstractParsedStmt dummy = new ParsedSelectStmt(null, db);
dummy.setDDLIndexedTable(table);
StringBuffer msg = new StringBuffer(String.format("Index \"%s\" ", name));
// "parse" the expression trees for an expression-based index (vs. a simple column value index)
List<AbstractExpression> exprs = null;
// "parse" the WHERE expression for partial index if any
AbstractExpression predicate = null;
// Some expressions have special validation in indices. Not all the expression
// can be indexed. We scan for result type at first here and block those which
// can't be indexed like boolean, geo ... We gather rest of expression into
// checkExpressions list. We will check on them all at once.
List<AbstractExpression> checkExpressions = new ArrayList<>();
for (VoltXMLElement subNode : node.children) {
if (subNode.name.equals("exprs")) {
exprs = new ArrayList<>();
for (VoltXMLElement exprNode : subNode.children) {
AbstractExpression expr = dummy.parseExpressionTree(exprNode);
expr.resolveForTable(table);
expr.finalizeValueTypes();
// string will be populated with an expression's details when
// its value type is not indexable
StringBuffer exprMsg = new StringBuffer();
if (!expr.isValueTypeIndexable(exprMsg)) {
// indexing on expression with boolean result is not supported.
throw compiler.new VoltCompilerException("Cannot create index \"" + name + "\" because it contains " + exprMsg + ", which is not supported.");
}
if ((unique || assumeUnique) && !expr.isValueTypeUniqueIndexable(exprMsg)) {
// indexing on expression with boolean result is not supported.
throw compiler.new VoltCompilerException("Cannot create unique index \"" + name + "\" because it contains " + exprMsg + ", which is not supported.");
}
// rest of the validity guards will be evaluated after collecting all the expressions.
checkExpressions.add(expr);
exprs.add(expr);
}
} else if (subNode.name.equals("predicate")) {
assert (subNode.children.size() == 1);
VoltXMLElement predicateXML = subNode.children.get(0);
assert (predicateXML != null);
predicate = buildPartialIndexPredicate(dummy, name, predicateXML, table, compiler);
}
}
// Check all the subexpressions we gathered up.
if (!AbstractExpression.validateExprsForIndexesAndMVs(checkExpressions, msg)) {
// The error message will be in the StringBuffer msg.
throw compiler.new VoltCompilerException(msg.toString());
}
String colList = node.attributes.get("columns");
String[] colNames = colList.split(",");
Column[] columns = new Column[colNames.length];
boolean has_nonint_col = false;
boolean has_geo_col = false;
String nonint_col_name = null;
for (int i = 0; i < colNames.length; i++) {
columns[i] = columnMap.get(colNames[i]);
if (columns[i] == null) {
return;
}
}
UnsafeOperatorsForDDL unsafeOps = new UnsafeOperatorsForDDL();
if (exprs == null) {
for (int i = 0; i < colNames.length; i++) {
VoltType colType = VoltType.get((byte) columns[i].getType());
if (!colType.isIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values are not currently supported as index keys: \"" + colNames[i] + "\"";
throw compiler.new VoltCompilerException(emsg);
}
if ((unique || assumeUnique) && !colType.isUniqueIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values are not currently supported as unique index keys: \"" + colNames[i] + "\"";
throw compiler.new VoltCompilerException(emsg);
}
if (!colType.isBackendIntegerType()) {
has_nonint_col = true;
nonint_col_name = colNames[i];
has_geo_col = colType.equals(VoltType.GEOGRAPHY);
if (has_geo_col && colNames.length > 1) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values must be the only component of an index key: \"" + nonint_col_name + "\"";
throw compiler.new VoltCompilerException(emsg);
}
}
}
} else {
for (AbstractExpression expression : exprs) {
VoltType colType = expression.getValueType();
if (!colType.isIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " valued expressions are not currently supported as index keys.";
throw compiler.new VoltCompilerException(emsg);
}
if ((unique || assumeUnique) && !colType.isUniqueIndexable()) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " valued expressions are not currently supported as unique index keys.";
throw compiler.new VoltCompilerException(emsg);
}
if (!colType.isBackendIntegerType()) {
has_nonint_col = true;
nonint_col_name = "<expression>";
has_geo_col = colType.equals(VoltType.GEOGRAPHY);
if (has_geo_col) {
if (exprs.size() > 1) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " values must be the only component of an index key.";
throw compiler.new VoltCompilerException(emsg);
} else if (!(expression instanceof TupleValueExpression)) {
String emsg = "Cannot create index \"" + name + "\" because " + colType.getName() + " expressions must be simple column expressions.";
throw compiler.new VoltCompilerException(emsg);
}
}
}
expression.findUnsafeOperatorsForDDL(unsafeOps);
}
}
Index index = table.getIndexes().add(name);
index.setCountable(false);
index.setIssafewithnonemptysources(!unsafeOps.isUnsafe());
// Set the index type. It will be one of:
// - Covering cell index (geo index for CONTAINS predicates)
// - HASH index (set in HSQL because "hash" is in the name of the
// constraint or the index
// - TREE index, which is the default
boolean isHashIndex = node.attributes.get("ishashindex").equals("true");
if (has_geo_col) {
index.setType(IndexType.COVERING_CELL_INDEX.getValue());
} else if (isHashIndex) {
// warn user that hash index will be deprecated
compiler.addWarn("Hash indexes are deprecated. In a future release, VoltDB will only support tree indexes, even if the index name contains the string \"hash\"");
// make the index a hash.
if (has_nonint_col) {
String emsg = "Index " + name + " in table " + table.getTypeName() + " uses a non-hashable column " + nonint_col_name;
throw compiler.new VoltCompilerException(emsg);
}
index.setType(IndexType.HASH_TABLE.getValue());
} else {
index.setType(IndexType.BALANCED_TREE.getValue());
index.setCountable(true);
}
// but they still represent the columns that will trigger an index update when their values change.
for (int i = 0; i < columns.length; i++) {
ColumnRef cref = index.getColumns().add(columns[i].getTypeName());
cref.setColumn(columns[i]);
cref.setIndex(i);
}
if (exprs != null) {
try {
index.setExpressionsjson(convertToJSONArray(exprs));
} catch (JSONException e) {
throw compiler.new VoltCompilerException("Unexpected error serializing non-column expressions for index '" + name + "' on type '" + table.getTypeName() + "': " + e.toString());
}
}
index.setUnique(unique);
if (assumeUnique) {
index.setUnique(true);
}
index.setAssumeunique(assumeUnique);
if (predicate != null) {
try {
index.setPredicatejson(convertToJSONObject(predicate));
} catch (JSONException e) {
throw compiler.new VoltCompilerException("Unexpected error serializing predicate for partial index '" + name + "' on type '" + table.getTypeName() + "': " + e.toString());
}
}
// will make two indexes different
for (Index existingIndex : table.getIndexes()) {
// skip thineself
if (existingIndex == index) {
continue;
}
if (indexesAreDups(existingIndex, index)) {
// replace any constraints using one index with the other
//for () TODO
// get ready for replacements from constraints created later
indexReplacementMap.put(index.getTypeName(), existingIndex.getTypeName());
// if the index is a user-named index...
if (index.getTypeName().startsWith(HSQLInterface.AUTO_GEN_PREFIX) == false) {
// on dup-detection, add a warning but don't fail
String emsg = String.format("Dropping index %s on table %s because it duplicates index %s.", index.getTypeName(), table.getTypeName(), existingIndex.getTypeName());
compiler.addWarn(emsg);
}
// drop the index and GTFO
table.getIndexes().delete(index.getTypeName());
return;
}
}
String smsg = "Created index: " + name + " on table: " + table.getTypeName() + " of type: " + IndexType.get(index.getType()).name();
compiler.addInfo(smsg);
indexMap.put(name, index);
}
Aggregations