use of org.voltdb.sysprocs.saverestore.SnapshotUtil.TableFiles in project voltdb by VoltDB.
the class SnapshotConverter method main.
/**
* @param args
*/
public static void main(String[] args) {
String snapshotName = null;
ArrayList<File> directories = new ArrayList<File>();
ArrayList<String> tables = new ArrayList<String>();
File outdir = null;
String type = null;
char delimiter = '\0';
for (int ii = 0; ii < args.length; ii++) {
String arg = args[ii];
if (arg.equals("--help")) {
printHelpAndQuit(0);
} else if (arg.equals("--dir")) {
if (args.length < ii + 1) {
System.err.println("Error: Not enough args following --dirs");
printHelpAndQuit(-1);
}
boolean invalidDir = false;
String dir = args[ii + 1];
ii++;
File f = new File(dir);
if (!f.exists()) {
System.err.println("Error: " + dir + " does not exist");
invalidDir = true;
}
if (!f.canRead()) {
System.err.println("Error: " + dir + " does not have read permission set");
invalidDir = true;
}
if (!f.canExecute()) {
System.err.println("Error: " + dir + " does not have execute permission set");
invalidDir = true;
}
directories.add(f);
if (invalidDir) {
System.exit(-1);
}
} else if (arg.equals("--timezone")) {
if (args.length < ii + 1) {
System.err.println("Error: Not enough args following --timezone");
printHelpAndQuit(-1);
}
String tzId = args[ii + 1];
ii++;
VoltTableUtil.tz = TimeZone.getTimeZone(tzId);
} else if (arg.equals("--table")) {
if (args.length < ii + 1) {
System.err.println("Error: Not enough args following --tables");
printHelpAndQuit(-1);
}
tables.add(args[ii + 1].toUpperCase());
ii++;
} else if (arg.equals("--outdir")) {
if (args.length < ii + 1) {
System.err.println("Error: Not enough args following --outdir");
printHelpAndQuit(-1);
}
boolean invalidDir = false;
outdir = new File(args[ii + 1]);
if (!outdir.exists()) {
System.err.println("Error: " + outdir.getPath() + " does not exist");
invalidDir = true;
}
if (!outdir.canRead()) {
System.err.println("Error: " + outdir.getPath() + " does not have read permission set");
invalidDir = true;
}
if (!outdir.canExecute()) {
System.err.println("Error: " + outdir.getPath() + " does not have execute permission set");
invalidDir = true;
}
if (!outdir.canWrite()) {
System.err.println("Error: " + outdir.getPath() + " does not have write permission set");
invalidDir = true;
}
if (invalidDir) {
System.exit(-1);
}
ii++;
} else if (arg.equals("--type")) {
if (args.length < ii + 1) {
System.err.println("Error: Not enough args following --type");
printHelpAndQuit(-1);
}
type = args[ii + 1];
if (type.equalsIgnoreCase("csv")) {
delimiter = ',';
} else if (type.equalsIgnoreCase("tsv")) {
delimiter = '\t';
} else {
System.err.println("Error: --type must be one of CSV or TSV");
printHelpAndQuit(-1);
}
ii++;
} else {
if (snapshotName != null) {
System.err.println("Error: Multiple snapshots specified for conversion. First - " + snapshotName + " second " + args[ii]);
printHelpAndQuit(-1);
}
snapshotName = args[ii];
}
}
boolean fail = false;
if (snapshotName == null) {
System.err.println("Error: No --name specified");
fail = true;
}
if (directories.isEmpty()) {
directories.add(new File("."));
}
if (tables.isEmpty()) {
System.err.println("Error: No --tables specified");
fail = true;
}
if (outdir == null) {
outdir = new File(".");
}
if (type == null) {
System.err.println("Error: No --type specified");
fail = true;
}
if (fail) {
printHelpAndQuit(-1);
}
Map<String, Snapshot> snapshots = new TreeMap<String, Snapshot>();
HashSet<String> snapshotNames = new HashSet<String>();
snapshotNames.add(snapshotName);
SpecificSnapshotFilter filter = new SpecificSnapshotFilter(snapshotNames);
for (File directory : directories) {
SnapshotUtil.retrieveSnapshotFiles(directory, snapshots, filter, false, SnapshotPathType.SNAP_PATH, CONSOLE_LOG);
}
if (snapshots.size() > 1) {
System.err.println("Error: Found " + snapshots.size() + " snapshots with specified name");
int ii = 0;
for (Snapshot entry : snapshots.values()) {
System.err.println("Snapshot " + ii + " taken " + new Date(entry.getInstanceId().getTimestamp()));
System.err.println("Files: ");
for (File digest : entry.m_digests) {
System.err.println("\t" + digest.getPath());
}
for (Map.Entry<String, TableFiles> e2 : entry.m_tableFiles.entrySet()) {
System.err.println("\t" + e2.getKey());
for (File tableFile : e2.getValue().m_files) {
System.err.println("\t\t" + tableFile.getPath());
}
}
ii++;
}
System.exit(-1);
}
if (snapshots.size() < 1) {
System.err.println("Error: Did not find any snapshots with the specified name");
System.exit(-1);
}
/*
* Build a plan for what partitions to pull from which save file
*/
final Snapshot snapshot = snapshots.values().iterator().next();
Map<String, Map<File, Set<Integer>>> tableToFilesWithPartitions = new TreeMap<String, Map<File, Set<Integer>>>();
for (String tableName : tables) {
if (!snapshot.m_tableFiles.containsKey(tableName)) {
System.err.println("Error: Snapshot does not contain table " + tableName);
System.exit(-1);
}
TableFiles tableFiles = snapshot.m_tableFiles.get(tableName);
if (!tableFiles.m_isReplicated) {
TreeSet<Integer> partitionsIds = new TreeSet<Integer>();
Map<File, Set<Integer>> partitionsFromFile = new TreeMap<File, Set<Integer>>();
for (int ii = 0; ii < tableFiles.m_files.size(); ii++) {
Set<Integer> validParititions = tableFiles.m_validPartitionIds.get(ii);
TreeSet<Integer> partitionsToTake = new TreeSet<Integer>(validParititions);
partitionsToTake.removeAll(partitionsIds);
partitionsIds.addAll(validParititions);
if (!partitionsToTake.isEmpty()) {
partitionsFromFile.put(tableFiles.m_files.get(ii), partitionsToTake);
}
}
int totalPartitionCount = tableFiles.m_totalPartitionCounts.get(0);
if (!((partitionsIds.size() == totalPartitionCount) && (partitionsIds.first() == 0) && (partitionsIds.last() == totalPartitionCount - 1))) {
System.err.println("Error: Not all partitions present for table " + tableName);
fail = true;
} else {
tableToFilesWithPartitions.put(tableName, partitionsFromFile);
}
} else {
Map<File, Set<Integer>> partitionsFromFile = new TreeMap<File, Set<Integer>>();
partitionsFromFile.put(tableFiles.m_files.get(0), null);
tableToFilesWithPartitions.put(tableName, partitionsFromFile);
}
}
if (fail) {
System.exit(-1);
}
/*
* For every output file that will be created attempt to instantiate and print an error
* if the file already exists or couldn't be created.
*/
for (Map.Entry<String, Map<File, Set<Integer>>> entry : tableToFilesWithPartitions.entrySet()) {
String tableName = entry.getKey();
File outfile = new File(outdir.getPath() + File.separator + tableName + "." + type.toLowerCase());
try {
if (!outfile.createNewFile()) {
System.err.println("Error: Failed to create output file " + outfile.getPath() + " for table " + tableName + "\n File already exists");
fail = true;
}
} catch (IOException e) {
System.err.println(e.getMessage());
System.err.println("Error: Failed to create output file " + outfile.getPath() + " for table " + tableName);
fail = true;
}
}
if (fail) {
System.exit(-1);
}
/*
* Actually convert the tables and write the data to the appropriate destination
*/
for (Map.Entry<String, Map<File, Set<Integer>>> entry : tableToFilesWithPartitions.entrySet()) {
String tableName = entry.getKey();
File outfile = new File(outdir.getPath() + File.separator + tableName + "." + type.toLowerCase());
Map<File, Set<Integer>> partitionsFromFile = entry.getValue();
for (Map.Entry<File, Set<Integer>> e2 : partitionsFromFile.entrySet()) {
File infile = e2.getKey();
Set<Integer> partitionSet = e2.getValue();
Integer[] partitions = null;
if (partitionSet != null) {
partitions = new Integer[partitionSet.size()];
int ii = 0;
for (Integer partition : partitionSet) {
partitions[ii++] = partition;
}
}
try {
CSVTableSaveFile.convertTableSaveFile(delimiter, partitions, outfile, infile);
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println("Error: Failed to convert " + infile.getPath() + " to " + outfile.getPath());
}
}
}
if (fail) {
System.exit(-1);
}
}
use of org.voltdb.sysprocs.saverestore.SnapshotUtil.TableFiles in project voltdb by VoltDB.
the class RestoreAgent method checkSnapshotIsComplete.
private SnapshotInfo checkSnapshotIsComplete(Long key, Snapshot s) {
int partitionCount = -1;
for (TableFiles tf : s.m_tableFiles.values()) {
// Check if the snapshot is complete
if (tf.m_completed.stream().anyMatch(b -> !b)) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it was not completed.");
return null;
}
// Replicated table doesn't check partition count
if (tf.m_isReplicated) {
continue;
}
// Everyone has to agree on the total partition count
for (int count : tf.m_totalPartitionCounts) {
if (partitionCount == -1) {
partitionCount = count;
} else if (count != partitionCount) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had the wrong partition count ").append(count).append(", expecting ").append(partitionCount);
return null;
}
}
}
if (s.m_digests.isEmpty()) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had no valid digest file.");
return null;
}
File digest = s.m_digests.get(0);
Long catalog_crc = null;
Map<Integer, Long> pidToTxnMap = new TreeMap<Integer, Long>();
Set<String> digestTableNames = new HashSet<String>();
// Create a valid but meaningless InstanceId to support pre-instanceId checking versions
InstanceId instanceId = new InstanceId(0, 0);
int newParitionCount = -1;
try {
JSONObject digest_detail = SnapshotUtil.CRCCheck(digest, LOG);
if (digest_detail == null)
throw new IOException();
catalog_crc = digest_detail.getLong("catalogCRC");
if (digest_detail.has("partitionTransactionIds")) {
JSONObject pidToTxnId = digest_detail.getJSONObject("partitionTransactionIds");
Iterator<String> it = pidToTxnId.keys();
while (it.hasNext()) {
String pidkey = it.next();
Long txnidval = pidToTxnId.getLong(pidkey);
pidToTxnMap.put(Integer.valueOf(pidkey), txnidval);
}
}
if (digest_detail.has("instanceId")) {
instanceId = new InstanceId(digest_detail.getJSONObject("instanceId"));
}
if (digest_detail.has("newPartitionCount")) {
newParitionCount = digest_detail.getInt("newPartitionCount");
}
if (digest_detail.has("tables")) {
JSONArray tableObj = digest_detail.getJSONArray("tables");
for (int i = 0; i < tableObj.length(); i++) {
digestTableNames.add(tableObj.getString(i));
}
}
} catch (IOException ioe) {
m_snapshotErrLogStr.append("\nUnable to read digest file: ").append(digest.getAbsolutePath()).append(" due to: ").append(ioe.getMessage());
return null;
} catch (JSONException je) {
m_snapshotErrLogStr.append("\nUnable to extract catalog CRC from digest: ").append(digest.getAbsolutePath()).append(" due to: ").append(je.getMessage());
return null;
}
if (s.m_catalogFile == null) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had no catalog.");
return null;
}
try {
byte[] bytes = MiscUtils.fileToBytes(s.m_catalogFile);
InMemoryJarfile jarfile = CatalogUtil.loadInMemoryJarFile(bytes);
if (jarfile.getCRC() != catalog_crc) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because catalog CRC did not match digest.");
return null;
}
// Make sure this is not a partial snapshot.
// Compare digestTableNames with all normal table names in catalog file.
// A normal table is one that's NOT a materialized view, nor an export table.
Set<String> catalogNormalTableNames = CatalogUtil.getNormalTableNamesFromInMemoryJar(jarfile);
if (!catalogNormalTableNames.equals(digestTableNames)) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because this is a partial snapshot.");
return null;
}
} catch (IOException ioe) {
m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because catalog file could not be validated");
return null;
}
SnapshotInfo info = new SnapshotInfo(key, digest.getParent(), SnapshotUtil.parseNonceFromDigestFilename(digest.getName()), partitionCount, newParitionCount, catalog_crc, m_hostId, instanceId, digestTableNames, s.m_stype);
// populate table to partition map.
for (Entry<String, TableFiles> te : s.m_tableFiles.entrySet()) {
TableFiles tableFile = te.getValue();
HashSet<Integer> ids = new HashSet<Integer>();
for (Set<Integer> idSet : tableFile.m_validPartitionIds) {
ids.addAll(idSet);
}
if (!tableFile.m_isReplicated) {
info.partitions.put(te.getKey(), ids);
}
// keep track of tables for which we've seen files while we're here
info.fileTables.add(te.getKey());
}
info.setPidToTxnIdMap(pidToTxnMap);
return info;
}
Aggregations