use of org.apache.cassandra.io.util.DataOutputBuffer in project eiger by wlloyd.
the class SuperColumnSerializer method updateDigest.
@Override
public void updateDigest(MessageDigest digest) {
assert name != null;
digest.update(name.duplicate());
DataOutputBuffer buffer = new DataOutputBuffer();
try {
buffer.writeLong(getMarkedForDeleteAt());
} catch (IOException e) {
throw new RuntimeException(e);
}
digest.update(buffer.getData(), 0, buffer.getLength());
for (IColumn column : getSubColumns()) {
column.updateDigest(digest);
}
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project eiger by wlloyd.
the class ColumnIndexer method writeBloomFilter.
/**
* Write a Bloom filter into file
*
* @param dos file to serialize Bloom Filter
* @param bf Bloom Filter
*
* @throws IOException on any I/O error.
*/
private static void writeBloomFilter(DataOutput dos, BloomFilter bf) throws IOException {
DataOutputBuffer bufOut = new DataOutputBuffer();
BloomFilter.serializer().serialize(bf, bufOut);
dos.writeInt(bufOut.getLength());
dos.write(bufOut.getData(), 0, bufOut.getLength());
bufOut.flush();
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project eiger by wlloyd.
the class LazilyCompactedRow method update.
public void update(MessageDigest digest) {
assert !closed;
// no special-case for rows.size == 1, we're actually skipping some bytes here so just
// blindly updating everything wouldn't be correct
DataOutputBuffer out = new DataOutputBuffer();
try {
ColumnFamily.serializer().serializeCFInfo(emptyColumnFamily, out);
out.writeInt(columnCount);
digest.update(out.getData(), 0, out.getLength());
} catch (IOException e) {
throw new IOError(e);
}
Iterator<IColumn> iter = iterator();
while (iter.hasNext()) {
iter.next().updateDigest(digest);
}
close();
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project eiger by wlloyd.
the class LazilyCompactedRow method write.
public long write(DataOutput out) throws IOException {
assert !closed;
DataOutputBuffer clockOut = new DataOutputBuffer();
ColumnFamily.serializer().serializeCFInfo(emptyColumnFamily, clockOut);
long dataSize = headerBuffer.getLength() + clockOut.getLength() + columnSerializedSize;
if (logger.isDebugEnabled())
logger.debug(String.format("header / clock / column sizes are %s / %s / %s", headerBuffer.getLength(), clockOut.getLength(), columnSerializedSize));
assert dataSize > 0;
out.writeLong(dataSize);
out.write(headerBuffer.getData(), 0, headerBuffer.getLength());
out.write(clockOut.getData(), 0, clockOut.getLength());
out.writeInt(columnCount);
Iterator<IColumn> iter = iterator();
while (iter.hasNext()) {
IColumn column = iter.next();
emptyColumnFamily.getColumnSerializer().serialize(column, out);
}
long secondPassColumnSize = reducer == null ? 0 : reducer.serializedSize;
assert secondPassColumnSize == columnSerializedSize : "originally calculated column size of " + columnSerializedSize + " but now it is " + secondPassColumnSize;
close();
return dataSize;
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project eiger by wlloyd.
the class MessagingService method constructStreamHeader.
public ByteBuffer constructStreamHeader(StreamHeader streamHeader, boolean compress, int version) {
/*
Setting up the protocol header. This is 4 bytes long
represented as an integer. The first 2 bits indicate
the serializer type. The 3rd bit indicates if compression
is turned on or off. It is turned off by default. The 4th
bit indicates if we are in streaming mode. It is turned off
by default. The following 4 bits are reserved for future use.
The next 8 bits indicate a version number. Remaining 15 bits
are not used currently.
*/
int header = 0;
// Setting up the serializer bit
header |= serializerType_.ordinal();
// set compression bit.
if (compress)
header |= 4;
// set streaming bit
header |= 8;
// Setting up the version bit
header |= (version << 8);
/* Finished the protocol header setup */
/* Adding the StreamHeader which contains the session Id along
* with the pendingfile info for the stream.
* | Session Id | Pending File Size | Pending File | Bool more files |
* | No. of Pending files | Pending Files ... |
*/
byte[] bytes;
try {
DataOutputBuffer buffer = new DataOutputBuffer();
StreamHeader.serializer().serialize(streamHeader, buffer, version);
bytes = buffer.getData();
} catch (IOException e) {
throw new RuntimeException(e);
}
assert bytes.length > 0;
ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + 4 + bytes.length);
buffer.putInt(PROTOCOL_MAGIC);
buffer.putInt(header);
buffer.putInt(bytes.length);
buffer.put(bytes);
buffer.flip();
return buffer;
}
Aggregations