use of org.apache.cassandra.io.util.FileHandle in project cassandra by apache.
the class LogTransactionTest method sstable.
private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
for (Component component : components) {
File file = new File(descriptor.filenameFor(component));
if (!file.exists())
assertTrue(file.createFileIfNotExists());
Util.setFileLength(file, size);
}
FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
FileHandle iFile = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).complete();
SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, false, header).get(MetadataType.STATS);
SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, iFile, dFile, MockSchema.indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
reader.first = reader.last = MockSchema.readerBounds(generation);
return reader;
}
use of org.apache.cassandra.io.util.FileHandle in project cassandra by apache.
the class MockSchema method sstable.
public static SSTableReader sstable(int generation, int size, boolean keepRef, long firstToken, long lastToken, int level, ColumnFamilyStore cfs) {
Descriptor descriptor = new Descriptor(cfs.getDirectories().getDirectoryForNewSSTables(), cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
for (Component component : components) {
File file = new File(descriptor.filenameFor(component));
file.createFileIfNotExists();
}
// .complete() with size to make sstable.onDiskLength work
try (FileHandle.Builder builder = new FileHandle.Builder(new ChannelProxy(tempFile)).bufferSize(size);
FileHandle fileHandle = builder.complete(size)) {
if (size > 0) {
try {
File file = new File(descriptor.filenameFor(Component.DATA));
Util.setFileLength(file, size);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).sstableLevel(level).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, UNREPAIRED_SSTABLE, null, false, header).get(MetadataType.STATS);
SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, fileHandle.sharedCopy(), fileHandle.sharedCopy(), indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
reader.first = readerBounds(firstToken);
reader.last = readerBounds(lastToken);
if (!keepRef)
reader.selfRef().release();
return reader;
}
}
use of org.apache.cassandra.io.util.FileHandle in project cassandra by apache.
the class BigTableZeroCopyWriterTest method getSSTableComponentData.
private Pair<DataInputPlus, Long> getSSTableComponentData(SSTableReader sstable, Component component, Function<ByteBuffer, DataInputPlus> bufferMapper) {
FileHandle componentFile = new FileHandle.Builder(sstable.descriptor.filenameFor(component)).bufferSize(1024).complete();
ByteBuffer buffer = ByteBuffer.allocate((int) componentFile.channel.size());
componentFile.channel.read(buffer, 0);
buffer.flip();
DataInputPlus inputPlus = bufferMapper.apply(buffer);
return Pair.create(inputPlus, componentFile.channel.size());
}
Aggregations