use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class TokenTreeTest method generateTree.
private static TokenTree generateTree(final long minToken, final long maxToken, boolean isStatic) throws IOException {
final SortedMap<Long, LongSet> toks = new TreeMap<Long, LongSet>() {
{
for (long i = minToken; i <= maxToken; i++) {
LongSet offsetSet = new LongOpenHashSet();
offsetSet.add(i);
put(i, offsetSet);
}
}
};
final TokenTreeBuilder builder = isStatic ? new StaticTokenTreeBuilder(new FakeCombinedTerm(toks)) : new DynamicTokenTreeBuilder(toks);
builder.finish();
final File treeFile = File.createTempFile("token-tree-get-test", "tt");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, DEFAULT_OPT)) {
builder.write(writer);
writer.sync();
}
RandomAccessReader reader = null;
try {
reader = RandomAccessReader.open(treeFile);
return new TokenTree(new MappedBuffer(reader));
} finally {
FileUtils.closeQuietly(reader);
}
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class TokenTreeTest method buildTree.
private static TokenTree buildTree(TokenTreeBuilder builder) throws Exception {
builder.finish();
final File treeFile = File.createTempFile("token-tree-", "db");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, DEFAULT_OPT)) {
builder.write(writer);
writer.sync();
}
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
return new TokenTree(new MappedBuffer(reader));
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class CommitLogReader method readCommitLogSegment.
/**
* Reads mutations from file, handing them off to handler
* @param handler Handler that will take action based on deserialized Mutations
* @param file CommitLogSegment file to read
* @param minPosition Optional minimum CommitLogPosition - all segments with id larger or matching w/greater position will be read
* @param mutationLimit Optional limit on # of mutations to replay. Local ALL_MUTATIONS serves as marker to play all.
* @param tolerateTruncation Whether or not we should allow truncation of this file or throw if EOF found
*
* @throws IOException
*/
public void readCommitLogSegment(CommitLogReadHandler handler, File file, CommitLogPosition minPosition, int mutationLimit, boolean tolerateTruncation) throws IOException {
// just transform from the file name (no reading of headers) to determine version
CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName());
try (RandomAccessReader reader = RandomAccessReader.open(file)) {
final long segmentIdFromFilename = desc.id;
try {
// The following call can either throw or legitimately return null. For either case, we need to check
// desc outside this block and set it to null in the exception case.
desc = CommitLogDescriptor.readHeader(reader, DatabaseDescriptor.getEncryptionContext());
} catch (Exception e) {
desc = null;
}
if (desc == null) {
// don't care about whether or not the handler thinks we can continue. We can't w/out descriptor.
handler.handleUnrecoverableError(new CommitLogReadException(String.format("Could not read commit log descriptor in file %s", file), CommitLogReadErrorReason.UNRECOVERABLE_DESCRIPTOR_ERROR, false));
return;
}
if (segmentIdFromFilename != desc.id) {
if (handler.shouldSkipSegmentOnError(new CommitLogReadException(String.format("Segment id mismatch (filename %d, descriptor %d) in file %s", segmentIdFromFilename, desc.id, file), CommitLogReadErrorReason.RECOVERABLE_DESCRIPTOR_ERROR, false))) {
return;
}
}
if (shouldSkipSegmentId(file, desc, minPosition))
return;
CommitLogSegmentReader segmentReader;
try {
segmentReader = new CommitLogSegmentReader(handler, desc, reader, tolerateTruncation);
} catch (Exception e) {
handler.handleUnrecoverableError(new CommitLogReadException(String.format("Unable to create segment reader for commit log file: %s", e), CommitLogReadErrorReason.UNRECOVERABLE_UNKNOWN_ERROR, tolerateTruncation));
return;
}
try {
ReadStatusTracker statusTracker = new ReadStatusTracker(mutationLimit, tolerateTruncation);
for (CommitLogSegmentReader.SyncSegment syncSegment : segmentReader) {
// Only tolerate truncation if we allow in both global and segment
statusTracker.tolerateErrorsInSection = tolerateTruncation & syncSegment.toleratesErrorsInSection;
// Skip segments that are completely behind the desired minPosition
if (desc.id == minPosition.segmentId && syncSegment.endPosition < minPosition.position)
continue;
statusTracker.errorContext = String.format("Next section at %d in %s", syncSegment.fileStartPosition, desc.fileName());
readSection(handler, syncSegment.input, minPosition, syncSegment.endPosition, statusTracker, desc);
if (!statusTracker.shouldContinue())
break;
}
}// is wrapping an IOException.
catch (RuntimeException re) {
if (re.getCause() instanceof IOException)
throw (IOException) re.getCause();
throw re;
}
logger.debug("Finished reading {}", file);
}
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class MetadataSerializer method deserialize.
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException {
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists()) {
logger.trace("No sstable stats for {}", descriptor);
components = new EnumMap<>(MetadataType.class);
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
} else {
try (RandomAccessReader r = RandomAccessReader.open(statsFile)) {
components = deserialize(descriptor, r, types);
}
}
return components;
}
use of org.apache.cassandra.io.util.RandomAccessReader in project eiger by wlloyd.
the class SSTableIdentityIterator method reset.
public void reset() {
if (!(input instanceof RandomAccessReader))
throw new UnsupportedOperationException();
RandomAccessReader file = (RandomAccessReader) input;
try {
file.seek(columnPosition);
} catch (IOException e) {
throw new IOError(e);
}
inputWithTracker.reset(headerSize());
}
Aggregations