use of org.apache.cassandra.io.util.FileDataInput in project eiger by wlloyd.
the class SSTableReaderTest method testSpannedIndexPositions.
@Test
public void testSpannedIndexPositions() throws IOException, ExecutionException, InterruptedException {
// each index entry is ~11 bytes, so this will generate lots of segments
MmappedSegmentedFile.MAX_SEGMENT_SIZE = 40;
Table table = Table.open("Keyspace1");
ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");
// insert a bunch of data and compact to a single sstable
CompactionManager.instance.disableAutoCompaction();
for (int j = 0; j < 100; j += 2) {
ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
RowMutation rm = new RowMutation("Keyspace1", key);
rm.add(new QueryPath("Standard1", null, ByteBufferUtil.bytes("0")), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
rm.apply();
}
store.forceBlockingFlush();
CompactionManager.instance.performMaximal(store);
// check that all our keys are found correctly
SSTableReader sstable = store.getSSTables().iterator().next();
for (int j = 0; j < 100; j += 2) {
DecoratedKey dk = Util.dk(String.valueOf(j));
FileDataInput file = sstable.getFileDataInput(dk, DatabaseDescriptor.getIndexedReadBufferSizeInKB() * 1024);
DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(dk) : String.format("%s != %s in %s", keyInDisk, dk, file.getPath());
}
// check no false positives
for (int j = 1; j < 110; j += 2) {
DecoratedKey dk = Util.dk(String.valueOf(j));
assert sstable.getPosition(dk, SSTableReader.Operator.EQ) == -1;
}
}
use of org.apache.cassandra.io.util.FileDataInput in project cassandra by apache.
the class CommitLogDescriptorTest method testDescriptorPersistence.
// migrated from CommitLogTest
private void testDescriptorPersistence(CommitLogDescriptor desc) throws IOException {
ByteBuffer buf = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buf, desc);
long length = buf.position();
// Put some extra data in the stream.
buf.putDouble(0.1);
buf.flip();
FileDataInput input = new FileSegmentInputStream(buf, "input", 0);
CommitLogDescriptor read = CommitLogDescriptor.readHeader(input, neverEnabledEncryption);
Assert.assertEquals("Descriptor length", length, input.getFilePointer());
Assert.assertEquals("Descriptors", desc, read);
}
use of org.apache.cassandra.io.util.FileDataInput in project eiger by wlloyd.
the class ColumnSerializer method deserialize.
@Override
public Column deserialize(DataInput dis, IColumnSerializer.Flag flag, int expireBefore) throws IOException {
ByteBuffer name = ByteBufferUtil.readWithShortLength(dis);
if (name.remaining() <= 0) {
String format = "invalid column name length %d%s";
String details = "";
if (dis instanceof FileDataInput) {
FileDataInput fdis = (FileDataInput) dis;
details = String.format(" (%s, %d bytes remaining)", fdis.getPath(), fdis.bytesRemaining());
}
throw new CorruptColumnException(String.format(format, name.remaining(), details));
}
int b = dis.readUnsignedByte();
if ((b & COUNTER_MASK) != 0) {
long timestampOfLastDelete = dis.readLong();
long ts = dis.readLong();
ByteBuffer value = ByteBufferUtil.readWithLength(dis);
Long lastAccessTime = dis.readLong();
if (lastAccessTime == Long.MIN_VALUE) {
lastAccessTime = null;
}
Long previousVersionLastAccessTime = dis.readLong();
if (previousVersionLastAccessTime == Long.MIN_VALUE) {
previousVersionLastAccessTime = null;
}
Long earliestValidTime = dis.readLong();
if (earliestValidTime == Long.MIN_VALUE) {
earliestValidTime = null;
}
Long latestValidTime = dis.readLong();
if (latestValidTime == Long.MIN_VALUE) {
latestValidTime = null;
}
NavigableSet<IColumn> previousVersions;
int previousVersionsLength = dis.readInt();
if (previousVersionsLength == -1) {
previousVersions = null;
} else {
previousVersions = new TreeSet<IColumn>(new EVTComparator());
for (int i = 0; i < previousVersionsLength; ++i) {
previousVersions.add(deserialize(dis));
}
}
int transactionCoordinatorKeyLength = dis.readInt();
ByteBuffer transactionCoordinatorKey = null;
if (transactionCoordinatorKeyLength > 0) {
ByteBufferUtil.readWithLength(dis);
}
return CounterColumn.create(name, value, ts, timestampOfLastDelete, flag, lastAccessTime, previousVersionLastAccessTime, earliestValidTime, latestValidTime, previousVersions);
} else if ((b & EXPIRATION_MASK) != 0) {
int ttl = dis.readInt();
int expiration = dis.readInt();
long ts = dis.readLong();
ByteBuffer value = ByteBufferUtil.readWithLength(dis);
Long lastAccessTime = dis.readLong();
if (lastAccessTime == Long.MIN_VALUE) {
lastAccessTime = null;
}
Long previousVersionLastAccessTime = dis.readLong();
if (previousVersionLastAccessTime == Long.MIN_VALUE) {
previousVersionLastAccessTime = null;
}
Long earliestValidTime = dis.readLong();
if (earliestValidTime == Long.MIN_VALUE) {
earliestValidTime = null;
}
Long latestValidTime = dis.readLong();
if (latestValidTime == Long.MIN_VALUE) {
latestValidTime = null;
}
SortedSet<IColumn> previousVersions;
int previousVersionsLength = dis.readInt();
if (previousVersionsLength == -1) {
previousVersions = null;
} else {
previousVersions = new TreeSet<IColumn>(new EVTComparator());
for (int i = 0; i < previousVersionsLength; ++i) {
previousVersions.add(deserialize(dis));
}
}
int transactionCoordinatorKeyLength = dis.readInt();
ByteBuffer transactionCoordinatorKey = null;
if (transactionCoordinatorKeyLength > 0) {
ByteBufferUtil.readWithLength(dis);
}
return ExpiringColumn.create(name, value, ts, ttl, expiration, expireBefore, flag);
} else {
long ts = dis.readLong();
ByteBuffer value = ByteBufferUtil.readWithLength(dis);
Long lastAccessTime = dis.readLong();
if (lastAccessTime == Long.MIN_VALUE) {
lastAccessTime = null;
}
Long previousVersionLastAccessTime = dis.readLong();
if (previousVersionLastAccessTime == Long.MIN_VALUE) {
previousVersionLastAccessTime = null;
}
Long earliestValidTime = dis.readLong();
if (earliestValidTime == Long.MIN_VALUE) {
earliestValidTime = null;
}
Long latestValidTime = dis.readLong();
if (latestValidTime == Long.MIN_VALUE) {
latestValidTime = null;
}
NavigableSet<IColumn> previousVersions;
int previousVersionsLength = dis.readInt();
if (previousVersionsLength == -1) {
previousVersions = null;
} else {
previousVersions = new TreeSet<IColumn>(new EVTComparator());
for (int i = 0; i < previousVersionsLength; ++i) {
previousVersions.add(deserialize(dis));
}
}
int transactionCoordinatorKeyLength = dis.readInt();
ByteBuffer transactionCoordinatorKey = null;
if (transactionCoordinatorKeyLength > 0) {
ByteBufferUtil.readWithLength(dis);
}
return (b & COUNTER_UPDATE_MASK) != 0 ? new CounterUpdateColumn(name, value, ts, lastAccessTime, previousVersionLastAccessTime, earliestValidTime, latestValidTime, previousVersions, transactionCoordinatorKey) : ((b & DELETION_MASK) != 0) ? new DeletedColumn(name, value, ts, lastAccessTime, previousVersionLastAccessTime, earliestValidTime, latestValidTime, previousVersions, transactionCoordinatorKey) : ((b & PENDING_TRANSACTION_MASK) != 0) ? new PendingTransactionColumn(name, value, ts, lastAccessTime, previousVersionLastAccessTime, earliestValidTime, latestValidTime, previousVersions, transactionCoordinatorKey) : new Column(name, value, ts, lastAccessTime, previousVersionLastAccessTime, earliestValidTime, latestValidTime, previousVersions, transactionCoordinatorKey);
}
}
Aggregations