use of org.apache.jackrabbit.oak.segment.RecordNumbers.Entry in project jackrabbit-oak by apache.
the class MutableRecordNumbersTest method iteratingShouldBeCorrect.
@Test
public void iteratingShouldBeCorrect() {
MutableRecordNumbers table = new MutableRecordNumbers();
Map<Integer, Integer> expected = new HashMap<>();
for (int i = 0; i < 100000; i++) {
expected.put(table.addRecord(RecordType.VALUE, i), i);
}
Map<Integer, Integer> iterated = new HashMap<>();
for (Entry entry : table) {
iterated.put(entry.getRecordNumber(), entry.getOffset());
}
assertEquals(expected, iterated);
}
use of org.apache.jackrabbit.oak.segment.RecordNumbers.Entry in project jackrabbit-oak by apache.
the class ImmutableRecordNumbersTest method iteratingShouldBeCorrect.
@Test
public void iteratingShouldBeCorrect() {
Map<Integer, Integer> entries = new HashMap<>();
entries.put(1, 2);
entries.put(3, 4);
entries.put(5, 6);
ImmutableRecordNumbers table = new ImmutableRecordNumbers(offsets(entries), types(entries));
Map<Integer, Integer> iterated = new HashMap<>();
for (Entry entry : table) {
iterated.put(entry.getRecordNumber(), entry.getOffset());
}
assertEquals(entries, iterated);
}
use of org.apache.jackrabbit.oak.segment.RecordNumbers.Entry in project jackrabbit-oak by apache.
the class Segment method toString.
// ------------------------------------------------------------< Object >--
@Override
public String toString() {
StringWriter string = new StringWriter();
try (PrintWriter writer = new PrintWriter(string)) {
writer.format("Segment %s (%d bytes)%n", id, data.size());
String segmentInfo = getSegmentInfo();
if (segmentInfo != null) {
writer.format("Info: %s, Generation: %s%n", segmentInfo, getGcGeneration());
}
if (id.isDataSegmentId()) {
writer.println("--------------------------------------------------------------------------");
int i = 1;
for (SegmentId segmentId : segmentReferences) {
writer.format("reference %02x: %s%n", i++, segmentId);
}
for (Entry entry : recordNumbers) {
int offset = entry.getOffset();
int address = data.size() - (MAX_SEGMENT_SIZE - offset);
writer.format("%10s record %08x: %08x @ %08x%n", entry.getType(), entry.getRecordNumber(), offset, address);
}
}
writer.println("--------------------------------------------------------------------------");
try {
data.hexDump(new WriterOutputStream(writer, Charsets.UTF_8));
} catch (IOException e) {
throw new IllegalStateException(e);
}
writer.println("--------------------------------------------------------------------------");
}
return string.toString();
}
use of org.apache.jackrabbit.oak.segment.RecordNumbers.Entry in project jackrabbit-oak by apache.
the class SegmentBufferWriter method flush.
/**
* Adds a segment header to the buffer and writes a segment to the segment
* store. This is done automatically (called from prepare) when there is not
* enough space for a record. It can also be called explicitly.
*/
@Override
public void flush(@Nonnull SegmentStore store) throws IOException {
if (dirty) {
int referencedSegmentIdCount = segmentReferences.size();
BinaryUtils.writeInt(buffer, Segment.REFERENCED_SEGMENT_ID_COUNT_OFFSET, referencedSegmentIdCount);
statistics.segmentIdCount = referencedSegmentIdCount;
int recordNumberCount = recordNumbers.size();
BinaryUtils.writeInt(buffer, Segment.RECORD_NUMBER_COUNT_OFFSET, recordNumberCount);
int totalLength = align(HEADER_SIZE + referencedSegmentIdCount * SEGMENT_REFERENCE_SIZE + recordNumberCount * RECORD_SIZE + length, 16);
if (totalLength > buffer.length) {
throw new IllegalStateException(String.format("Too much data for a segment %s (referencedSegmentIdCount=%d, recordNumberCount=%d, length=%d, totalLength=%d)", segment.getSegmentId(), referencedSegmentIdCount, recordNumberCount, length, totalLength));
}
statistics.size = length = totalLength;
int pos = HEADER_SIZE;
if (pos + length <= buffer.length) {
// the whole segment fits to the space *after* the referenced
// segment identifiers we've already written, so we can safely
// copy those bits ahead even if concurrent code is still
// reading from that part of the buffer
arraycopy(buffer, 0, buffer, buffer.length - length, pos);
pos += buffer.length - length;
} else {
// this might leave some empty space between the header and
// the record data, but this case only occurs when the
// segment is >252kB in size and the maximum overhead is <<4kB,
// which is acceptable
length = buffer.length;
}
for (SegmentId segmentId : segmentReferences) {
pos = BinaryUtils.writeLong(buffer, pos, segmentId.getMostSignificantBits());
pos = BinaryUtils.writeLong(buffer, pos, segmentId.getLeastSignificantBits());
}
for (Entry entry : recordNumbers) {
pos = BinaryUtils.writeInt(buffer, pos, entry.getRecordNumber());
pos = BinaryUtils.writeByte(buffer, pos, (byte) entry.getType().ordinal());
pos = BinaryUtils.writeInt(buffer, pos, entry.getOffset());
}
SegmentId segmentId = segment.getSegmentId();
LOG.debug("Writing data segment: {} ", statistics);
store.writeSegment(segmentId, buffer, buffer.length - length, length);
newSegment(store);
}
}
Aggregations