use of org.apache.hadoop_voltpatches.util.PureJavaCrc32 in project voltdb by VoltDB.
the class VoltTable method varbinaryToPrintableString.
/**
* Make a printable, short string for a varbinary.
* String includes a CRC and the contents of the varbinary in hex.
* Contents longer than 13 chars are truncated and elipsized.
* Yes, "elipsized" is totally a word.
*
* Example: "bin[crc:1298399436,value:0xABCDEF12345...]"
*
* @param bin The bytes to print out.
* @return A string representation that is printable and short.
*/
public static String varbinaryToPrintableString(byte[] bin) {
PureJavaCrc32 crc = new PureJavaCrc32();
StringBuilder sb = new StringBuilder();
sb.append("bin[crc:");
crc.update(bin);
sb.append(crc.getValue());
sb.append(",value:0x");
String hex = Encoder.hexEncode(bin);
if (hex.length() > 13) {
sb.append(hex.substring(0, 10));
sb.append("...");
} else {
sb.append(hex);
}
sb.append("]");
return sb.toString();
}
use of org.apache.hadoop_voltpatches.util.PureJavaCrc32 in project voltdb by VoltDB.
the class CatalogUtil method calculateDrTableSignatureAndCrc.
/**
* Deterministically serializes all DR table signatures into a string and calculates the CRC checksum.
* @param catalog The catalog
* @return A pair of CRC checksum and the serialized signature string.
*/
public static Pair<Long, String> calculateDrTableSignatureAndCrc(Database catalog) {
SortedSet<Table> tables = Sets.newTreeSet();
tables.addAll(getNormalTables(catalog, true));
tables.addAll(getNormalTables(catalog, false));
final PureJavaCrc32 crc = new PureJavaCrc32();
final StringBuilder sb = new StringBuilder();
String delimiter = "";
for (Table t : tables) {
if (t.getIsdred()) {
crc.update(t.getSignature().getBytes(Charsets.UTF_8));
sb.append(delimiter).append(t.getSignature());
delimiter = SIGNATURE_DELIMITER;
}
}
return Pair.of(crc.getValue(), sb.toString());
}
use of org.apache.hadoop_voltpatches.util.PureJavaCrc32 in project voltdb by VoltDB.
the class SnapshotUtil method writeSnapshotDigest.
/**
* Create a digest for a snapshot
* @param txnId transaction ID when snapshot was initiated
* @param path path to which snapshot files will be written
* @param nonce nonce used to distinguish this snapshot
* @param tables List of tables present in this snapshot
* @param hostId Host ID where this is happening
* @param extraSnapshotData persisted export, DR, etc state
* @throws IOException
*/
public static Runnable writeSnapshotDigest(long txnId, long catalogCRC, String path, String pathType, String nonce, List<Table> tables, int hostId, Map<Integer, Long> partitionTransactionIds, ExtensibleSnapshotDigestData extraSnapshotData, InstanceId instanceId, long timestamp, int newPartitionCount, int clusterId) throws IOException {
final File f = new VoltFile(path, constructDigestFilenameForNonce(nonce, hostId));
if (f.exists()) {
if (!f.delete()) {
throw new IOException("Unable to write table list file " + f);
}
}
boolean success = false;
try {
final FileOutputStream fos = new FileOutputStream(f);
StringWriter sw = new StringWriter();
JSONStringer stringer = new JSONStringer();
try {
stringer.object();
stringer.keySymbolValuePair("version", 1);
stringer.keySymbolValuePair("clusterid", clusterId);
stringer.keySymbolValuePair("txnId", txnId);
stringer.keySymbolValuePair("timestamp", timestamp);
stringer.keySymbolValuePair("timestampString", SnapshotUtil.formatHumanReadableDate(timestamp));
stringer.keySymbolValuePair("newPartitionCount", newPartitionCount);
stringer.key("tables").array();
for (int ii = 0; ii < tables.size(); ii++) {
stringer.value(tables.get(ii).getTypeName());
}
stringer.endArray();
stringer.key("partitionTransactionIds").object();
for (Map.Entry<Integer, Long> entry : partitionTransactionIds.entrySet()) {
stringer.key(entry.getKey().toString()).value(entry.getValue());
}
stringer.endObject();
stringer.keySymbolValuePair("catalogCRC", catalogCRC);
stringer.key("instanceId").value(instanceId.serializeToJSONObject());
extraSnapshotData.writeToSnapshotDigest(stringer);
stringer.endObject();
} catch (JSONException e) {
throw new IOException(e);
}
sw.append(stringer.toString());
final byte[] tableListBytes = sw.getBuffer().toString().getBytes(StandardCharsets.UTF_8);
final PureJavaCrc32 crc = new PureJavaCrc32();
crc.update(tableListBytes);
ByteBuffer fileBuffer = ByteBuffer.allocate(tableListBytes.length + 4);
fileBuffer.putInt((int) crc.getValue());
fileBuffer.put(tableListBytes);
fileBuffer.flip();
fos.getChannel().write(fileBuffer);
success = true;
return new Runnable() {
@Override
public void run() {
try {
fos.getChannel().force(true);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
fos.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
};
} finally {
if (!success) {
f.delete();
}
}
}
use of org.apache.hadoop_voltpatches.util.PureJavaCrc32 in project voltdb by VoltDB.
the class HashinatorSnapshotData method restoreFromBuffer.
/**
* Restore and check hashinator config data.
* @param buf input buffer
* @return instance ID read from buffer
* @throws I/O exception on failure
*/
public InstanceId restoreFromBuffer(ByteBuffer buf) throws IOException {
buf.rewind();
// Assumes config data is the last field.
int dataSize = buf.remaining() - OFFSET_DATA;
if (dataSize <= 0) {
throw new IOException("Hashinator snapshot data is too small.");
}
// Get the CRC, zero out its buffer field, and compare to calculated CRC.
long crcHeader = buf.getLong(OFFSET_CRC);
buf.putLong(OFFSET_CRC, 0);
final PureJavaCrc32 crcBuffer = new PureJavaCrc32();
assert (buf.hasArray());
crcBuffer.update(buf.array());
if (crcHeader != crcBuffer.getValue()) {
throw new IOException("Hashinator snapshot data CRC mismatch.");
}
// Slurp the data.
int coord = buf.getInt(OFFSET_INSTID_COORD);
long timestamp = buf.getLong(OFFSET_INSTID_TIMESTAMP);
InstanceId instId = new InstanceId(coord, timestamp);
m_version = buf.getLong(OFFSET_VERSION);
m_serData = new byte[dataSize];
buf.position(OFFSET_DATA);
buf.get(m_serData);
return instId;
}
use of org.apache.hadoop_voltpatches.util.PureJavaCrc32 in project voltdb by VoltDB.
the class HashinatorSnapshotData method saveToBuffer.
/**
* Save to output buffer, including header and config data.
* @return byte buffer ready to write to a file.
* @throws I/O exception on failure
*/
public ByteBuffer saveToBuffer(InstanceId instId) throws IOException {
if (instId == null) {
throw new IOException("Null instance ID.");
}
if (m_serData == null) {
throw new IOException("Uninitialized hashinator snapshot data.");
}
// Assume config data is the last field.
ByteBuffer buf = ByteBuffer.allocate(m_serData.length + OFFSET_DATA);
// Make sure the CRC starts at zero since those bytes figure into the CRC calculation.
buf.putLong(OFFSET_CRC, 0);
buf.putInt(OFFSET_INSTID_COORD, instId.getCoord());
buf.putLong(OFFSET_INSTID_TIMESTAMP, instId.getTimestamp());
buf.putLong(OFFSET_VERSION, m_version);
buf.position(OFFSET_DATA);
buf.put(m_serData);
// Finalize the CRC based on the entire buffer and reset the current position.
final PureJavaCrc32 crc = new PureJavaCrc32();
crc.update(buf.array());
buf.putLong(OFFSET_CRC, crc.getValue());
buf.rewind();
return buf;
}
Aggregations