use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.
the class LogTransactionTest method sstable.
private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
for (Component component : components) {
File file = new File(descriptor.filenameFor(component));
if (!file.exists())
assertTrue(file.createFileIfNotExists());
Util.setFileLength(file, size);
}
FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
FileHandle iFile = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).complete();
SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, false, header).get(MetadataType.STATS);
SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, iFile, dFile, MockSchema.indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
reader.first = reader.last = MockSchema.readerBounds(generation);
return reader;
}
use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.
the class CassandraStreamHeaderTest method header.
private CassandraStreamHeader header(boolean entireSSTable, boolean compressed) {
List<Range<Token>> requestedRanges = Collections.singletonList(new Range<>(store.getPartitioner().getMinimumToken(), sstable.last.getToken()));
requestedRanges = Range.normalize(requestedRanges);
List<SSTableReader.PartitionPositionBounds> sections = sstable.getPositionsForRanges(requestedRanges);
CompressionInfo compressionInfo = compressed ? CompressionInfo.newLazyInstance(sstable.getCompressionMetadata(), sections) : null;
TableMetadata metadata = store.metadata();
SerializationHeader.Component serializationHeader = SerializationHeader.makeWithoutStats(metadata).toComponent();
ComponentManifest componentManifest = entireSSTable ? ComponentManifest.create(sstable.descriptor) : null;
DecoratedKey firstKey = entireSSTable ? sstable.first : null;
return CassandraStreamHeader.builder().withSSTableFormat(SSTableFormat.Type.BIG).withSSTableVersion(BigFormat.latestVersion).withSSTableLevel(0).withEstimatedKeys(10).withCompressionInfo(compressionInfo).withSections(sections).isEntireSSTable(entireSSTable).withComponentManifest(componentManifest).withFirstKey(firstKey).withSerializationHeader(serializationHeader).withTableId(metadata.id).build();
}
use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.
the class SSTableFlushObserverTest method testFlushObserver.
@Test
public void testFlushObserver() {
TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
File directory = new File(sstableDirectory + File.pathSeparator() + KS_NAME + File.pathSeparator() + CF_NAME);
directory.deleteOnExit();
if (!directory.exists() && !directory.tryCreateDirectories())
throw new FSWriteError(new IOException("failed to create tmp directory"), directory.absolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, false, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
SSTableReader reader = null;
Multimap<ByteBuffer, Cell<?>> expected = ArrayListMultimap.create();
try {
final long now = System.currentTimeMillis();
ByteBuffer key = UTF8Type.instance.fromString("key1");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key2");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key3");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
reader = writer.finish(true);
} finally {
FileUtils.closeQuietly(writer);
}
Assert.assertTrue(observer.isComplete);
Assert.assertEquals(expected.size(), observer.rows.size());
for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
ByteBuffer key = e.left;
Long indexPosition = e.right;
try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
} catch (IOException ex) {
throw new FSReadError(ex, reader.getIndexFilename());
}
Assert.assertEquals(expected.get(key), observer.rows.get(e));
}
}
use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.
the class SSTableRewriterTest method writeFiles.
public static Set<SSTableReader> writeFiles(ColumnFamilyStore cfs, int fileCount, int partitionCount, int cellCount) {
int i = 0;
Set<SSTableReader> result = new LinkedHashSet<>();
for (int f = 0; f < fileCount; f++) {
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
Descriptor desc = cfs.newSSTableDescriptor(dir);
try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, null, false, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
int end = f == fileCount - 1 ? partitionCount : ((f + 1) * partitionCount) / fileCount;
for (; i < end; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), ByteBufferUtil.bytes(i));
for (int j = 0; j < cellCount; j++) builder.newRow(Integer.toString(i)).add("val", random(0, 1000));
writer.append(builder.build().unfilteredIterator());
}
result.addAll(writer.finish(true));
}
}
return result;
}
Aggregations