use of java.io.CharArrayWriter in project robovm by robovm.
the class InvocationTargetExceptionTest method test_printStackTraceLjava_io_PrintWriter.
/**
* java.lang.reflect.InvocationTargetException#printStackTrace(java.io.PrintWriter)
*/
public void test_printStackTraceLjava_io_PrintWriter() {
// java.lang.reflect.InvocationTargetException.printStackTrace(java.io.PrintWriter)
try {
PrintWriter pw;
InvocationTargetException ite;
String s;
CharArrayWriter caw = new CharArrayWriter();
pw = new PrintWriter(caw);
ite = new InvocationTargetException(new InvocationTargetException(null));
ite.printStackTrace(pw);
s = caw.toString();
assertTrue("printStackTrace failed." + s.length(), s != null && s.length() > 400);
pw.close();
ByteArrayOutputStream bao = new ByteArrayOutputStream();
pw = new PrintWriter(bao);
ite = new InvocationTargetException(new InvocationTargetException(null));
ite.printStackTrace(pw);
// Test will fail if this line removed.
pw.flush();
s = bao.toString();
assertTrue("printStackTrace failed." + s.length(), s != null && s.length() > 400);
} catch (Exception e) {
fail("Exception during test : " + e.getMessage());
}
}
use of java.io.CharArrayWriter in project voltdb by VoltDB.
the class SnapshotUtil method generateSnapshotReport.
/**
* Returns a detailed report and a boolean indicating whether the snapshot can be successfully loaded
* The implementation supports disabling the hashinator check, e.g. for old snapshots in tests.
* @param snapshotTime
* @param snapshot
* @param expectHashinator
*/
public static Pair<Boolean, String> generateSnapshotReport(Long snapshotTxnId, Snapshot snapshot, boolean expectHashinator) {
CharArrayWriter caw = new CharArrayWriter();
PrintWriter pw = new PrintWriter(caw);
boolean snapshotConsistent = true;
String indentString = "";
pw.println(indentString + "TxnId: " + snapshotTxnId);
pw.println(indentString + "Date: " + new Date(org.voltcore.TransactionIdManager.getTimestampFromTransactionId(snapshotTxnId)));
pw.println(indentString + "Digests:");
indentString = "\t";
TreeSet<String> digestTablesSeen = new TreeSet<String>();
if (snapshot.m_digests.isEmpty()) {
pw.println(indentString + "No digests found.");
snapshotConsistent = false;
} else {
boolean inconsistent = false;
/*
* Iterate over the digests and ensure that they all contain the same list of tables
*/
Map<Integer, List<Integer>> inconsistentDigests = new HashMap<Integer, List<Integer>>();
for (int ii = 0; ii < snapshot.m_digests.size(); ii++) {
inconsistentDigests.put(ii, new ArrayList<Integer>());
Set<String> tables = snapshot.m_digestTables.get(ii);
for (int zz = 0; zz < snapshot.m_digests.size(); zz++) {
if (zz == ii) {
continue;
}
if (!tables.equals(snapshot.m_digestTables.get(zz))) {
snapshotConsistent = false;
inconsistent = true;
inconsistentDigests.get(ii).add(zz);
}
}
}
/*
* Summarize what was inconsistent/consistent
*/
if (!inconsistent) {
for (int ii = 0; ii < snapshot.m_digests.size(); ii++) {
pw.println(indentString + snapshot.m_digests.get(ii).getPath());
}
} else {
pw.println(indentString + "Not all digests are consistent");
indentString = indentString + "\t";
for (Map.Entry<Integer, List<Integer>> entry : inconsistentDigests.entrySet()) {
File left = snapshot.m_digests.get(entry.getKey());
pw.println(indentString + left.getPath() + " is inconsistent with:");
indentString = indentString + "\t";
for (Integer id : entry.getValue()) {
File right = snapshot.m_digests.get(id);
pw.println(indentString + right.getPath());
}
indentString = indentString.substring(1);
}
}
/*
* Print the list of tables found in the digests
*/
indentString = indentString.substring(1);
pw.print(indentString + "Tables: ");
int ii = 0;
for (int jj = 0; jj < snapshot.m_digestTables.size(); jj++) {
for (String table : snapshot.m_digestTables.get(jj)) {
digestTablesSeen.add(table);
}
}
for (String table : digestTablesSeen) {
if (ii != 0) {
pw.print(", ");
}
ii++;
pw.print(table);
}
pw.print("\n");
}
/*
* Check the hash data (if expected).
*/
if (expectHashinator) {
pw.print(indentString + "Hash configuration: ");
if (snapshot.m_hashConfig != null) {
pw.println(indentString + "present");
} else {
pw.println(indentString + "not present");
snapshotConsistent = false;
}
}
/*
* Check that the total partition count is the same in every table file
*/
Integer totalPartitionCount = null;
indentString = indentString + "\t";
for (Map.Entry<String, TableFiles> entry : snapshot.m_tableFiles.entrySet()) {
if (entry.getValue().m_isReplicated) {
continue;
}
for (Integer partitionCount : entry.getValue().m_totalPartitionCounts) {
if (totalPartitionCount == null) {
totalPartitionCount = partitionCount;
} else if (!totalPartitionCount.equals(partitionCount)) {
snapshotConsistent = false;
pw.println(indentString + "Partition count is not consistent throughout snapshot files for " + entry.getKey() + ". Saw " + partitionCount + " and " + totalPartitionCount);
}
}
}
/*
* Now check that each individual table has enough information to be restored.
* It is possible for a valid partition set to be available and still have a restore
* fail because the restore plan loads a save file with a corrupt partition.
*/
TreeSet<String> consistentTablesSeen = new TreeSet<String>();
for (Map.Entry<String, TableFiles> entry : snapshot.m_tableFiles.entrySet()) {
TableFiles tableFiles = entry.getValue();
/*
* Calculate the set of visible partitions not corrupted partitions
*/
TreeSet<Integer> partitionsAvailable = new TreeSet<Integer>();
int kk = 0;
for (Set<Integer> validPartitionIds : tableFiles.m_validPartitionIds) {
if (tableFiles.m_completed.get(kk++)) {
partitionsAvailable.addAll(validPartitionIds);
}
}
/*
* Ensure the correct range of partition ids is present
*/
boolean partitionsPresent = false;
if ((partitionsAvailable.size() == (tableFiles.m_isReplicated ? 1 : totalPartitionCount)) && (partitionsAvailable.first() == 0) && (partitionsAvailable.last() == (tableFiles.m_isReplicated ? 1 : totalPartitionCount) - 1)) {
partitionsPresent = true;
}
/*
* Report if any of the files have corrupt partitions
*/
boolean hasCorruptPartitions = false;
for (Set<Integer> corruptIds : tableFiles.m_corruptParititionIds) {
if (!corruptIds.isEmpty()) {
hasCorruptPartitions = true;
snapshotConsistent = false;
}
}
pw.println(indentString + "Table name: " + entry.getKey());
indentString = indentString + "\t";
pw.println(indentString + "Replicated: " + entry.getValue().m_isReplicated);
pw.println(indentString + "Valid partition set available: " + partitionsPresent);
pw.println(indentString + "Corrupt partitions present: " + hasCorruptPartitions);
/*
* Print information about individual files such as the partitions present and whether
* they are corrupted
*/
pw.println(indentString + "Files: ");
indentString = indentString + "\t";
for (int ii = 0; ii < tableFiles.m_files.size(); ii++) {
String corruptPartitionIdString = "";
int zz = 0;
for (Integer partitionId : tableFiles.m_corruptParititionIds.get(ii)) {
if (zz != 0) {
corruptPartitionIdString = corruptPartitionIdString + ", ";
}
zz++;
corruptPartitionIdString = corruptPartitionIdString + partitionId;
}
String validPartitionIdString = "";
zz = 0;
for (Integer partitionId : tableFiles.m_validPartitionIds.get(ii)) {
if (zz != 0) {
validPartitionIdString = validPartitionIdString + ", ";
}
zz++;
validPartitionIdString = validPartitionIdString + partitionId;
}
if (corruptPartitionIdString.isEmpty()) {
consistentTablesSeen.add(entry.getKey());
pw.println(indentString + tableFiles.m_files.get(ii).getPath() + " Completed: " + tableFiles.m_completed.get(ii) + " Partitions: " + validPartitionIdString);
} else {
pw.println(indentString + tableFiles.m_files.get(ii).getPath() + " Completed: " + tableFiles.m_completed.get(ii) + " Valid Partitions: " + validPartitionIdString + " Corrupt Partitions: " + corruptPartitionIdString);
}
}
indentString = indentString.substring(2);
}
indentString = indentString.substring(1);
StringBuilder missingTables = new StringBuilder(8192);
if (!consistentTablesSeen.containsAll(digestTablesSeen)) {
snapshotConsistent = false;
missingTables.append("Missing tables: ");
Set<String> missingTablesSet = new TreeSet<String>(digestTablesSeen);
missingTablesSet.removeAll(consistentTablesSeen);
int hh = 0;
for (String tableName : missingTablesSet) {
if (hh > 0) {
missingTables.append(", ");
}
missingTables.append(tableName);
hh++;
}
missingTables.append('\n');
}
/*
* Tack on a summary at the beginning to indicate whether a restore is guaranteed to succeed
* with this file set.
*/
if (snapshotConsistent) {
return Pair.of(true, "Snapshot valid\n" + caw.toString());
} else {
StringBuilder sb = new StringBuilder(8192);
sb.append("Snapshot corrupted\n").append(missingTables).append(caw.toCharArray());
return Pair.of(false, sb.toString());
}
}
use of java.io.CharArrayWriter in project voltdb by VoltDB.
the class SnapshotUtil method CRCCheck.
/**
*
* This isn't just a CRC check. It also loads the file and returns it as
* a JSON object.
* Check if the CRC of the snapshot digest. Note that this only checks if
* the CRC at the beginning of the digest file matches the CRC of the digest
* file itself.
*
* @param f
* The snapshot digest file object
* @return The table list as a string
* @throws IOException
* If CRC does not match
*/
public static JSONObject CRCCheck(File f, VoltLogger logger) throws IOException {
final FileInputStream fis = new FileInputStream(f);
try {
final BufferedInputStream bis = new BufferedInputStream(fis);
ByteBuffer crcBuffer = ByteBuffer.allocate(4);
if (4 != bis.read(crcBuffer.array())) {
logger.warn("EOF while attempting to read CRC from snapshot digest " + f + " on host " + CoreUtils.getHostnameOrAddress());
return null;
}
final int crc = crcBuffer.getInt();
final InputStreamReader isr = new InputStreamReader(bis, StandardCharsets.UTF_8);
CharArrayWriter caw = new CharArrayWriter();
while (true) {
int nextChar = isr.read();
if (nextChar == -1) {
break;
}
//digests
if (nextChar == '\n') {
break;
}
caw.write(nextChar);
}
/*
* Try and parse the contents as a JSON object. If it succeeds then assume
* it is a the new version of the digest file. It is unlikely the old version
* will successfully parse as JSON because it starts with a number
* instead of an open brace.
*/
JSONObject obj = null;
try {
obj = new JSONObject(caw.toString());
} catch (JSONException e) {
//assume it is the old format
}
/*
* Convert the old style file to a JSONObject so it can be presented
* via a consistent interface.
*/
if (obj == null) {
String tableList = caw.toString();
byte[] tableListBytes = tableList.getBytes(StandardCharsets.UTF_8);
PureJavaCrc32 tableListCRC = new PureJavaCrc32();
tableListCRC.update(tableListBytes);
tableListCRC.update("\n".getBytes(StandardCharsets.UTF_8));
final int calculatedValue = (int) tableListCRC.getValue();
if (crc != calculatedValue) {
logger.warn("CRC of snapshot digest " + f + " did not match digest contents");
return null;
}
String[] tableNames = tableList.split(",");
long txnId = Long.valueOf(tableNames[0]);
obj = new JSONObject();
try {
obj.put("version", 0);
obj.put("txnId", txnId);
for (int ii = 1; ii < tableNames.length; ii++) {
obj.append("tables", tableNames[ii]);
}
} catch (JSONException e) {
logger.warn("Exception parsing JSON of digest " + f, e);
return null;
}
return obj;
} else {
/*
* Verify the CRC and then return the data as a JSON object.
*/
String tableList = caw.toString();
byte[] tableListBytes = tableList.getBytes(StandardCharsets.UTF_8);
PureJavaCrc32 tableListCRC = new PureJavaCrc32();
tableListCRC.update(tableListBytes);
final int calculatedValue = (int) tableListCRC.getValue();
if (crc != calculatedValue) {
logger.warn("CRC of snapshot digest " + f + " did not match digest contents");
return null;
}
return obj;
}
} catch (Exception e) {
logger.warn("Exception while parsing snapshot digest " + f, e);
return null;
} finally {
try {
if (fis != null)
fis.close();
} catch (IOException e) {
}
}
}
use of java.io.CharArrayWriter in project jdk8u_jdk by JetBrains.
the class ClassDump method traceFixedWidthInt.
/**
* Print an integer so that it takes 'length' characters in
* the output. Temporary until formatting code is stable.
*/
private void traceFixedWidthInt(int x, int length) {
if (verbose) {
CharArrayWriter baStream = new CharArrayWriter();
PrintWriter pStream = new PrintWriter(baStream);
pStream.print(x);
String str = baStream.toString();
for (int cnt = length - str.length(); cnt > 0; --cnt) trace(" ");
trace(str);
}
}
use of java.io.CharArrayWriter in project jena by apache.
the class TestRDFWriter method rdfwriter_6.
@SuppressWarnings("deprecation")
@Test
public void rdfwriter_6() {
Writer w = new CharArrayWriter();
RDFWriter.create().source(graph).lang(Lang.NT).build().output(w);
String s = w.toString();
assertTrue(s.contains("example/s"));
}
Aggregations