use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class StreamSnapshotRequestConfig method parseStreamPairs.
private static Multimap<Long, Long> parseStreamPairs(JSONObject jsData) {
ArrayListMultimap<Long, Long> streamPairs = ArrayListMultimap.create();
if (jsData != null) {
try {
JSONObject sp = jsData.getJSONObject("streamPairs");
Iterator<String> it = sp.keys();
while (it.hasNext()) {
String key = it.next();
long sourceHSId = Long.valueOf(key);
JSONArray destJSONArray = sp.getJSONArray(key);
for (int i = 0; i < destJSONArray.length(); i++) {
long destHSId = destJSONArray.getLong(i);
streamPairs.put(sourceHSId, destHSId);
}
}
} catch (JSONException e) {
SNAP_LOG.warn("Failed to parse stream pair information", e);
}
}
return streamPairs;
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class SnapshotRequestConfig method getTablesToInclude.
private static Table[] getTablesToInclude(JSONObject jsData, Database catalogDatabase) {
final List<Table> tables = SnapshotUtil.getTablesToSave(catalogDatabase);
Set<String> tableNamesToInclude = null;
Set<String> tableNamesToExclude = null;
if (jsData != null) {
JSONArray tableNames = jsData.optJSONArray("tables");
if (tableNames != null) {
tableNamesToInclude = Sets.newHashSet();
for (int i = 0; i < tableNames.length(); i++) {
try {
final String s = tableNames.getString(i).trim().toUpperCase();
if (!s.isEmpty()) {
tableNamesToInclude.add(s);
}
} catch (JSONException e) {
SNAP_LOG.warn("Unable to parse tables to include for snapshot", e);
}
}
}
JSONArray excludeTableNames = jsData.optJSONArray("skiptables");
if (excludeTableNames != null) {
tableNamesToExclude = Sets.newHashSet();
for (int i = 0; i < excludeTableNames.length(); i++) {
try {
final String s = excludeTableNames.getString(i).trim().toUpperCase();
if (!s.isEmpty()) {
tableNamesToExclude.add(s);
}
} catch (JSONException e) {
SNAP_LOG.warn("Unable to parse tables to exclude for snapshot", e);
}
}
}
}
if (tableNamesToInclude != null && tableNamesToInclude.isEmpty()) {
// Stream snapshot may specify empty snapshot sometimes.
tables.clear();
} else {
ListIterator<Table> iter = tables.listIterator();
while (iter.hasNext()) {
Table table = iter.next();
if ((tableNamesToInclude != null && !tableNamesToInclude.remove(table.getTypeName())) || (tableNamesToExclude != null && tableNamesToExclude.remove(table.getTypeName()))) {
// If the table index is not in the list to include or
// is in the list to exclude, remove it
iter.remove();
}
}
}
if (tableNamesToInclude != null && !tableNamesToInclude.isEmpty()) {
throw new IllegalArgumentException("The following tables were specified to include in the snapshot, but are not present in the database: " + Joiner.on(", ").join(tableNamesToInclude));
}
if (tableNamesToExclude != null && !tableNamesToExclude.isEmpty()) {
throw new IllegalArgumentException("The following tables were specified to exclude from the snapshot, but are not present in the database: " + Joiner.on(", ").join(tableNamesToExclude));
}
return tables.toArray(new Table[0]);
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class TopologyZKUtils method updateTopologyToZK.
public static void updateTopologyToZK(ZooKeeper zk, AbstractTopology topology) {
Stat stat = new Stat();
try {
zk.getData(VoltZK.topology, false, stat);
byte[] payload = topology.topologyToJSON().toString().getBytes(Charsets.UTF_8);
zk.setData(VoltZK.topology, payload, stat.getVersion());
} catch (KeeperException | InterruptedException | JSONException e) {
VoltDB.crashLocalVoltDB("Unable to update topology to ZK, dying", true, e);
}
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class AbstractParsedStmt method orderByColumnsCoverUniqueKeys.
/**
* Order by Columns or expressions has to operate on the display columns or expressions.
* @return
*/
protected boolean orderByColumnsCoverUniqueKeys() {
// In theory, if EVERY table in the query has a uniqueness constraint
// (primary key or other unique index) on columns that are all listed in the ORDER BY values,
// the result is deterministic.
// This holds regardless of whether the associated index is actually used in the selected plan,
// so this check is plan-independent.
//
// baseTableAliases associates table aliases with the order by
// expressions which reference them. Presumably by using
// table aliases we will map table scans to expressions rather
// than tables to expressions, and not confuse ourselves with
// different instances of the same table in self joins.
HashMap<String, List<AbstractExpression>> baseTableAliases = new HashMap<>();
for (ParsedColInfo col : orderByColumns()) {
AbstractExpression expr = col.expression;
//
// Compute the set of tables mentioned in the expression.
// 1. Search out all the TVEs.
// 2. Throw the aliases of the tables of each of these into a HashSet.
// The table must have an alias. It might not have a name.
// 3. If the HashSet has size > 1 we can't use this expression.
//
List<TupleValueExpression> baseTVEExpressions = expr.findAllTupleValueSubexpressions();
Set<String> baseTableNames = new HashSet<>();
for (TupleValueExpression tve : baseTVEExpressions) {
String tableAlias = tve.getTableAlias();
assert (tableAlias != null);
baseTableNames.add(tableAlias);
}
if (baseTableNames.size() != 1) {
// Neither are (nonsense) constant (table-less) expressions.
continue;
}
// Everything in the baseTVEExpressions table is a column
// in the same table and has the same alias. So just grab the first one.
// All we really want is the alias.
AbstractExpression baseTVE = baseTVEExpressions.get(0);
String nextTableAlias = ((TupleValueExpression) baseTVE).getTableAlias();
// and disappear.
assert (nextTableAlias != null);
List<AbstractExpression> perTable = baseTableAliases.get(nextTableAlias);
if (perTable == null) {
perTable = new ArrayList<>();
baseTableAliases.put(nextTableAlias, perTable);
}
perTable.add(expr);
}
if (m_tableAliasMap.size() > baseTableAliases.size()) {
// like Unique Index nested loop join, etc.
return false;
}
boolean allScansAreDeterministic = true;
for (Entry<String, List<AbstractExpression>> orderedAlias : baseTableAliases.entrySet()) {
List<AbstractExpression> orderedAliasExprs = orderedAlias.getValue();
StmtTableScan tableScan = getStmtTableScanByAlias(orderedAlias.getKey());
if (tableScan == null) {
assert (false);
return false;
}
if (tableScan instanceof StmtSubqueryScan) {
// don't yet handle FROM clause subquery, here.
return false;
}
Table table = ((StmtTargetTableScan) tableScan).getTargetTable();
// This table's scans need to be proven deterministic.
allScansAreDeterministic = false;
// Search indexes for one that makes the order by deterministic
for (Index index : table.getIndexes()) {
// skip non-unique indexes
if (!index.getUnique()) {
continue;
}
// get the list of expressions for the index
List<AbstractExpression> indexExpressions = new ArrayList<>();
String jsonExpr = index.getExpressionsjson();
// if this is a pure-column index...
if (jsonExpr.isEmpty()) {
for (ColumnRef cref : index.getColumns()) {
Column col = cref.getColumn();
TupleValueExpression tve = new TupleValueExpression(table.getTypeName(), orderedAlias.getKey(), col.getName(), col.getName(), col.getIndex());
indexExpressions.add(tve);
}
} else // if this is a fancy expression-based index...
{
try {
indexExpressions = AbstractExpression.fromJSONArrayString(jsonExpr, tableScan);
} catch (JSONException e) {
e.printStackTrace();
assert (false);
continue;
}
}
// ORDER BY A.unique_id, A.b_id
if (orderedAliasExprs.containsAll(indexExpressions)) {
allScansAreDeterministic = true;
break;
}
}
// ALL tables' scans need to have proved deterministic
if (!allScansAreDeterministic) {
return false;
}
}
return true;
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class plannerTester method loadPlanFromFile.
public static AbstractPlanNode loadPlanFromFile(String path, ArrayList<String> getsql) {
BufferedReader reader;
try {
reader = new BufferedReader(new FileReader(path));
} catch (FileNotFoundException e1) {
e1.printStackTrace();
String message = "ERROR: Plan file " + path + " doesn't exist.\n" + "Use -s (the Compile/Save option) or 'ant plannertestrefresh'" + " ' to generate plans to the baseline directory.\n";
System.err.print(message);
try {
m_reportWriter.write(message);
} catch (IOException e2) {
e2.printStackTrace();
}
return null;
}
try {
String json = "";
try {
String line = reader.readLine();
getsql.add(line);
while ((line = reader.readLine()) != null) {
json += line;
}
} catch (IOException e2) {
e2.printStackTrace();
return null;
}
try {
PlanNodeTree pnt = new PlanNodeTree();
JSONObject jobj = new JSONObject(json);
Database db = s_singleton.getDatabase();
pnt.loadFromJSONPlan(jobj, db);
return pnt.getRootPlanNode();
} catch (JSONException e3) {
e3.printStackTrace();
System.out.println("Failed on input from file: " + path + " with JSON text: \n'" + json + "'");
return null;
}
} finally {
try {
reader.close();
} catch (IOException e2) {
e2.printStackTrace();
}
}
}
Aggregations