use of org.json_voltpatches.JSONArray in project voltdb by VoltDB.
the class SnapshotScanAgent method getSnapshotDigestScanResults.
private VoltTable getSnapshotDigestScanResults(String path) {
VoltTable results = constructDigestResultsTable();
List<File> relevantFiles = retrieveRelevantFiles(path);
if (relevantFiles == null) {
results.addRow(m_messenger.getHostId(), "", "", "", "FAILURE", m_errorString);
} else {
for (final File f : relevantFiles) {
if (f.getName().endsWith(".vpt")) {
continue;
}
if (f.canRead()) {
try {
Set<String> tableNames = new HashSet<String>();
JSONObject digest = SnapshotUtil.CRCCheck(f, SNAP_LOG);
if (digest == null)
continue;
JSONArray tables = digest.getJSONArray("tables");
for (int ii = 0; ii < tables.length(); ii++) {
tableNames.add(tables.getString(ii));
}
final StringWriter sw = new StringWriter();
int ii = 0;
for (String name : tableNames) {
sw.append(name);
if (ii != tableNames.size() - 1) {
sw.append(',');
}
ii++;
}
results.addRow(m_messenger.getHostId(), path, f.getName(), sw.toString(), "SUCCESS", "");
} catch (Exception e) {
SNAP_LOG.warn(e);
}
}
}
}
return results;
}
use of org.json_voltpatches.JSONArray in project voltdb by VoltDB.
the class LeaderAppointer method assignLeader.
private long assignLeader(int partitionId, List<Long> children) {
// We used masterHostId = -1 as a way to force the leader choice to be
// the first replica in the list, if we don't have some other mechanism
// which has successfully overridden it.
int masterHostId = -1;
if (m_state.get() == AppointerState.CLUSTER_START) {
try {
// find master in topo
JSONArray parts = m_topo.getJSONArray(AbstractTopology.TOPO_PARTITIONS);
for (int p = 0; p < parts.length(); p++) {
JSONObject aPartition = parts.getJSONObject(p);
int pid = aPartition.getInt(AbstractTopology.TOPO_PARTITION_ID);
if (pid == partitionId) {
masterHostId = aPartition.getInt(AbstractTopology.TOPO_MASTER);
break;
}
}
} catch (JSONException jse) {
tmLog.error("Failed to find master for partition " + partitionId + ", defaulting to 0");
jse.printStackTrace();
// stupid default
masterHostId = -1;
}
} else {
// For now, if we're appointing a new leader as a result of a
// failure, just pick the first replica in the children list.
// Could eventually do something more complex here to try to keep a
// semi-balance, but it's unclear that this has much utility until
// we add rebalancing on rejoin as well.
masterHostId = -1;
}
long masterHSId = children.get(0);
for (Long child : children) {
if (CoreUtils.getHostIdFromHSId(child) == masterHostId) {
masterHSId = child;
break;
}
}
tmLog.info("Appointing HSId " + CoreUtils.hsIdToString(masterHSId) + " as leader for partition " + partitionId);
try {
m_iv2appointees.put(partitionId, masterHSId);
} catch (Exception e) {
VoltDB.crashLocalVoltDB("Unable to appoint new master for partition " + partitionId, true, e);
}
return masterHSId;
}
use of org.json_voltpatches.JSONArray in project voltdb by VoltDB.
the class AbstractPlanNode method loadStringListMemberFromJSON.
/**
* @param jobj
* @param key
* @return
* @throws JSONException
*/
List<String> loadStringListMemberFromJSON(JSONObject jobj, String key) throws JSONException {
if (jobj.isNull(key)) {
return null;
}
JSONArray jarray = jobj.getJSONArray(key);
int numElems = jarray.length();
List<String> result = new ArrayList<>(numElems);
for (int ii = 0; ii < numElems; ++ii) {
result.add(jarray.getString(ii));
}
return result;
}
use of org.json_voltpatches.JSONArray in project voltdb by VoltDB.
the class BalancePartitionsRequest method parseRanges.
private List<PartitionPair> parseRanges(JSONObject jsObj) throws JSONException {
ImmutableList.Builder<PartitionPair> builder = ImmutableList.builder();
JSONArray pairsArray = jsObj.getJSONArray("partitionPairs");
for (int i = 0; i < pairsArray.length(); i++) {
JSONObject pairObj = pairsArray.getJSONObject(i);
builder.add(new PartitionPair(pairObj.getInt("srcPartition"), pairObj.getInt("destPartition"), pairObj.getInt("rangeStart"), pairObj.getInt("rangeEnd")));
}
return builder.build();
}
use of org.json_voltpatches.JSONArray in project voltdb by VoltDB.
the class SnapshotUtil method retrieveSnapshotFilesInternal.
private static void retrieveSnapshotFilesInternal(File directory, NamedSnapshots namedSnapshots, FileFilter filter, boolean validate, SnapshotPathType stype, VoltLogger logger, int recursion) {
if (recursion == 32) {
return;
}
if (!directory.exists()) {
System.err.println("Error: Directory " + directory.getPath() + " doesn't exist");
return;
}
if (!directory.canRead()) {
System.err.println("Error: Directory " + directory.getPath() + " is not readable");
return;
}
if (!directory.canExecute()) {
System.err.println("Error: Directory " + directory.getPath() + " is not executable");
return;
}
for (File f : directory.listFiles(filter)) {
if (f.isDirectory()) {
if (!f.canRead() || !f.canExecute()) {
System.err.println("Warning: Skipping directory " + f.getPath() + " due to lack of read permission");
} else {
retrieveSnapshotFilesInternal(f, namedSnapshots, filter, validate, stype, logger, recursion++);
}
continue;
}
if (!f.canRead()) {
System.err.println("Warning: " + f.getPath() + " is not readable");
continue;
}
FileInputStream fis = null;
try {
fis = new FileInputStream(f);
} catch (FileNotFoundException e1) {
System.err.println(e1.getMessage());
continue;
}
try {
if (f.getName().endsWith(".digest")) {
JSONObject digest = CRCCheck(f, logger);
if (digest == null)
continue;
Long snapshotTxnId = digest.getLong("txnId");
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(snapshotTxnId);
InstanceId iid = new InstanceId(0, 0);
if (digest.has("instanceId")) {
iid = new InstanceId(digest.getJSONObject("instanceId"));
}
named_s.setInstanceId(iid);
TreeSet<String> tableSet = new TreeSet<String>();
JSONArray tables = digest.getJSONArray("tables");
for (int ii = 0; ii < tables.length(); ii++) {
tableSet.add(tables.getString(ii));
}
named_s.m_digestTables.add(tableSet);
named_s.m_digests.add(f);
} else if (f.getName().endsWith(".jar")) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.m_catalogFile = f;
} else if (f.getName().endsWith(HASH_EXTENSION)) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
if (validate) {
try {
// Retrieve hashinator config data for validation only.
// Throws IOException when the CRC check fails.
HashinatorSnapshotData hashData = new HashinatorSnapshotData();
hashData.restoreFromFile(f);
named_s.m_hashConfig = f;
} catch (IOException e) {
logger.warn(String.format("Skipping bad hashinator snapshot file '%s'", f.getPath()));
// Skip bad hashinator files.
continue;
}
}
} else {
HashSet<Integer> partitionIds = new HashSet<Integer>();
TableSaveFile saveFile = new TableSaveFile(fis, 1, null, true);
try {
for (Integer partitionId : saveFile.getPartitionIds()) {
partitionIds.add(partitionId);
}
if (validate && saveFile.getCompleted()) {
while (saveFile.hasMoreChunks()) {
BBContainer cont = saveFile.getNextChunk();
if (cont != null) {
cont.discard();
}
}
}
partitionIds.removeAll(saveFile.getCorruptedPartitionIds());
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(saveFile.getTxnId());
TableFiles namedTableFiles = named_s.m_tableFiles.get(saveFile.getTableName());
if (namedTableFiles == null) {
namedTableFiles = new TableFiles(saveFile.isReplicated());
named_s.m_tableFiles.put(saveFile.getTableName(), namedTableFiles);
}
namedTableFiles.m_files.add(f);
namedTableFiles.m_completed.add(saveFile.getCompleted());
namedTableFiles.m_validPartitionIds.add(partitionIds);
namedTableFiles.m_corruptParititionIds.add(saveFile.getCorruptedPartitionIds());
namedTableFiles.m_totalPartitionCounts.add(saveFile.getTotalPartitions());
} finally {
saveFile.close();
}
}
} catch (IOException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} catch (JSONException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} finally {
try {
if (fis != null) {
fis.close();
}
} catch (IOException e) {
}
}
}
}
Aggregations