use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class SnapshotUtil method transformRestoreParamsToJSON.
/*
* Do parameter checking for the pre-JSON version of @SnapshotRestore old version
*/
public static ClientResponseImpl transformRestoreParamsToJSON(StoredProcedureInvocation task) {
Object[] params = task.getParams().toArray();
if (params.length == 1) {
try {
JSONObject jsObj = new JSONObject((String) params[0]);
String path = jsObj.optString(JSON_PATH);
String dupPath = jsObj.optString(JSON_DUPLICATES_PATH);
if (!path.isEmpty() && dupPath.isEmpty()) {
jsObj.put(JSON_DUPLICATES_PATH, path);
}
task.setParams(jsObj.toString());
} catch (JSONException e) {
Throwables.propagate(e);
}
return null;
} else if (params.length == 2) {
if (params[0] == null) {
return new ClientResponseImpl(ClientResponseImpl.GRACEFUL_FAILURE, new VoltTable[0], "@SnapshotRestore parameter 0 was null", task.getClientHandle());
}
if (params[1] == null) {
return new ClientResponseImpl(ClientResponseImpl.GRACEFUL_FAILURE, new VoltTable[0], "@SnapshotRestore parameter 1 was null", task.getClientHandle());
}
if (!(params[0] instanceof String)) {
return new ClientResponseImpl(ClientResponseImpl.GRACEFUL_FAILURE, new VoltTable[0], "@SnapshotRestore param 0 (path) needs to be a string, but was type " + params[0].getClass().getSimpleName(), task.getClientHandle());
}
if (!(params[1] instanceof String)) {
return new ClientResponseImpl(ClientResponseImpl.GRACEFUL_FAILURE, new VoltTable[0], "@SnapshotRestore param 1 (nonce) needs to be a string, but was type " + params[1].getClass().getSimpleName(), task.getClientHandle());
}
JSONObject jsObj = new JSONObject();
try {
jsObj.put(SnapshotUtil.JSON_PATH, params[0]);
if (VoltDB.instance().isRunningWithOldVerbs()) {
jsObj.put(SnapshotUtil.JSON_PATH_TYPE, SnapshotPathType.SNAP_PATH);
}
jsObj.put(SnapshotUtil.JSON_NONCE, params[1]);
jsObj.put(SnapshotUtil.JSON_DUPLICATES_PATH, params[0]);
} catch (JSONException e) {
Throwables.propagate(e);
}
task.setParams(jsObj.toString());
return null;
} else {
return new ClientResponseImpl(ClientResponseImpl.GRACEFUL_FAILURE, new VoltTable[0], "@SnapshotRestore supports a single json document parameter or two parameters (path, nonce), " + params.length + " parameters provided", task.getClientHandle());
}
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class SnapshotUtil method writeSnapshotDigest.
/**
* Create a digest for a snapshot
* @param txnId transaction ID when snapshot was initiated
* @param path path to which snapshot files will be written
* @param nonce nonce used to distinguish this snapshot
* @param tables List of tables present in this snapshot
* @param hostId Host ID where this is happening
* @param extraSnapshotData persisted export, DR, etc state
* @throws IOException
*/
public static Runnable writeSnapshotDigest(long txnId, long catalogCRC, String path, String pathType, String nonce, List<Table> tables, int hostId, Map<Integer, Long> partitionTransactionIds, ExtensibleSnapshotDigestData extraSnapshotData, InstanceId instanceId, long timestamp, int newPartitionCount, int clusterId) throws IOException {
final File f = new VoltFile(path, constructDigestFilenameForNonce(nonce, hostId));
if (f.exists()) {
if (!f.delete()) {
throw new IOException("Unable to write table list file " + f);
}
}
boolean success = false;
try {
final FileOutputStream fos = new FileOutputStream(f);
StringWriter sw = new StringWriter();
JSONStringer stringer = new JSONStringer();
try {
stringer.object();
stringer.keySymbolValuePair("version", 1);
stringer.keySymbolValuePair("clusterid", clusterId);
stringer.keySymbolValuePair("txnId", txnId);
stringer.keySymbolValuePair("timestamp", timestamp);
stringer.keySymbolValuePair("timestampString", SnapshotUtil.formatHumanReadableDate(timestamp));
stringer.keySymbolValuePair("newPartitionCount", newPartitionCount);
stringer.key("tables").array();
for (int ii = 0; ii < tables.size(); ii++) {
stringer.value(tables.get(ii).getTypeName());
}
stringer.endArray();
stringer.key("partitionTransactionIds").object();
for (Map.Entry<Integer, Long> entry : partitionTransactionIds.entrySet()) {
stringer.key(entry.getKey().toString()).value(entry.getValue());
}
stringer.endObject();
stringer.keySymbolValuePair("catalogCRC", catalogCRC);
stringer.key("instanceId").value(instanceId.serializeToJSONObject());
extraSnapshotData.writeToSnapshotDigest(stringer);
stringer.endObject();
} catch (JSONException e) {
throw new IOException(e);
}
sw.append(stringer.toString());
final byte[] tableListBytes = sw.getBuffer().toString().getBytes(StandardCharsets.UTF_8);
final PureJavaCrc32 crc = new PureJavaCrc32();
crc.update(tableListBytes);
ByteBuffer fileBuffer = ByteBuffer.allocate(tableListBytes.length + 4);
fileBuffer.putInt((int) crc.getValue());
fileBuffer.put(tableListBytes);
fileBuffer.flip();
fos.getChannel().write(fileBuffer);
success = true;
return new Runnable() {
@Override
public void run() {
try {
fos.getChannel().force(true);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
fos.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
};
} finally {
if (!success) {
f.delete();
}
}
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class SnapshotUtil method retrieveSnapshotFilesInternal.
private static void retrieveSnapshotFilesInternal(File directory, NamedSnapshots namedSnapshots, FileFilter filter, boolean validate, SnapshotPathType stype, VoltLogger logger, int recursion) {
if (recursion == 32) {
return;
}
if (!directory.exists()) {
System.err.println("Error: Directory " + directory.getPath() + " doesn't exist");
return;
}
if (!directory.canRead()) {
System.err.println("Error: Directory " + directory.getPath() + " is not readable");
return;
}
if (!directory.canExecute()) {
System.err.println("Error: Directory " + directory.getPath() + " is not executable");
return;
}
for (File f : directory.listFiles(filter)) {
if (f.isDirectory()) {
if (!f.canRead() || !f.canExecute()) {
System.err.println("Warning: Skipping directory " + f.getPath() + " due to lack of read permission");
} else {
retrieveSnapshotFilesInternal(f, namedSnapshots, filter, validate, stype, logger, recursion++);
}
continue;
}
if (!f.canRead()) {
System.err.println("Warning: " + f.getPath() + " is not readable");
continue;
}
FileInputStream fis = null;
try {
fis = new FileInputStream(f);
} catch (FileNotFoundException e1) {
System.err.println(e1.getMessage());
continue;
}
try {
if (f.getName().endsWith(".digest")) {
JSONObject digest = CRCCheck(f, logger);
if (digest == null)
continue;
Long snapshotTxnId = digest.getLong("txnId");
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(snapshotTxnId);
InstanceId iid = new InstanceId(0, 0);
if (digest.has("instanceId")) {
iid = new InstanceId(digest.getJSONObject("instanceId"));
}
named_s.setInstanceId(iid);
TreeSet<String> tableSet = new TreeSet<String>();
JSONArray tables = digest.getJSONArray("tables");
for (int ii = 0; ii < tables.length(); ii++) {
tableSet.add(tables.getString(ii));
}
named_s.m_digestTables.add(tableSet);
named_s.m_digests.add(f);
} else if (f.getName().endsWith(".jar")) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.m_catalogFile = f;
} else if (f.getName().endsWith(HASH_EXTENSION)) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
if (validate) {
try {
// Retrieve hashinator config data for validation only.
// Throws IOException when the CRC check fails.
HashinatorSnapshotData hashData = new HashinatorSnapshotData();
hashData.restoreFromFile(f);
named_s.m_hashConfig = f;
} catch (IOException e) {
logger.warn(String.format("Skipping bad hashinator snapshot file '%s'", f.getPath()));
// Skip bad hashinator files.
continue;
}
}
} else {
HashSet<Integer> partitionIds = new HashSet<Integer>();
TableSaveFile saveFile = new TableSaveFile(fis, 1, null, true);
try {
for (Integer partitionId : saveFile.getPartitionIds()) {
partitionIds.add(partitionId);
}
if (validate && saveFile.getCompleted()) {
while (saveFile.hasMoreChunks()) {
BBContainer cont = saveFile.getNextChunk();
if (cont != null) {
cont.discard();
}
}
}
partitionIds.removeAll(saveFile.getCorruptedPartitionIds());
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(saveFile.getTxnId());
TableFiles namedTableFiles = named_s.m_tableFiles.get(saveFile.getTableName());
if (namedTableFiles == null) {
namedTableFiles = new TableFiles(saveFile.isReplicated());
named_s.m_tableFiles.put(saveFile.getTableName(), namedTableFiles);
}
namedTableFiles.m_files.add(f);
namedTableFiles.m_completed.add(saveFile.getCompleted());
namedTableFiles.m_validPartitionIds.add(partitionIds);
namedTableFiles.m_corruptParititionIds.add(saveFile.getCorruptedPartitionIds());
namedTableFiles.m_totalPartitionCounts.add(saveFile.getTotalPartitions());
} finally {
saveFile.close();
}
}
} catch (IOException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} catch (JSONException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} finally {
try {
if (fis != null) {
fis.close();
}
} catch (IOException e) {
}
}
}
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class StreamSnapshotRequestConfig method parseStreamPairs.
private static Multimap<Long, Long> parseStreamPairs(JSONObject jsData) {
ArrayListMultimap<Long, Long> streamPairs = ArrayListMultimap.create();
if (jsData != null) {
try {
JSONObject sp = jsData.getJSONObject("streamPairs");
Iterator<String> it = sp.keys();
while (it.hasNext()) {
String key = it.next();
long sourceHSId = Long.valueOf(key);
JSONArray destJSONArray = sp.getJSONArray(key);
for (int i = 0; i < destJSONArray.length(); i++) {
long destHSId = destJSONArray.getLong(i);
streamPairs.put(sourceHSId, destHSId);
}
}
} catch (JSONException e) {
SNAP_LOG.warn("Failed to parse stream pair information", e);
}
}
return streamPairs;
}
use of org.json_voltpatches.JSONException in project voltdb by VoltDB.
the class SnapshotRequestConfig method getTablesToInclude.
private static Table[] getTablesToInclude(JSONObject jsData, Database catalogDatabase) {
final List<Table> tables = SnapshotUtil.getTablesToSave(catalogDatabase);
Set<String> tableNamesToInclude = null;
Set<String> tableNamesToExclude = null;
if (jsData != null) {
JSONArray tableNames = jsData.optJSONArray("tables");
if (tableNames != null) {
tableNamesToInclude = Sets.newHashSet();
for (int i = 0; i < tableNames.length(); i++) {
try {
final String s = tableNames.getString(i).trim().toUpperCase();
if (!s.isEmpty()) {
tableNamesToInclude.add(s);
}
} catch (JSONException e) {
SNAP_LOG.warn("Unable to parse tables to include for snapshot", e);
}
}
}
JSONArray excludeTableNames = jsData.optJSONArray("skiptables");
if (excludeTableNames != null) {
tableNamesToExclude = Sets.newHashSet();
for (int i = 0; i < excludeTableNames.length(); i++) {
try {
final String s = excludeTableNames.getString(i).trim().toUpperCase();
if (!s.isEmpty()) {
tableNamesToExclude.add(s);
}
} catch (JSONException e) {
SNAP_LOG.warn("Unable to parse tables to exclude for snapshot", e);
}
}
}
}
if (tableNamesToInclude != null && tableNamesToInclude.isEmpty()) {
// Stream snapshot may specify empty snapshot sometimes.
tables.clear();
} else {
ListIterator<Table> iter = tables.listIterator();
while (iter.hasNext()) {
Table table = iter.next();
if ((tableNamesToInclude != null && !tableNamesToInclude.remove(table.getTypeName())) || (tableNamesToExclude != null && tableNamesToExclude.remove(table.getTypeName()))) {
// If the table index is not in the list to include or
// is in the list to exclude, remove it
iter.remove();
}
}
}
if (tableNamesToInclude != null && !tableNamesToInclude.isEmpty()) {
throw new IllegalArgumentException("The following tables were specified to include in the snapshot, but are not present in the database: " + Joiner.on(", ").join(tableNamesToInclude));
}
if (tableNamesToExclude != null && !tableNamesToExclude.isEmpty()) {
throw new IllegalArgumentException("The following tables were specified to exclude from the snapshot, but are not present in the database: " + Joiner.on(", ").join(tableNamesToExclude));
}
return tables.toArray(new Table[0]);
}
Aggregations