use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.
the class SSTableTxnWriter method create.
// log and writer closed during doPostCleanup
@SuppressWarnings("resource")
public static SSTableTxnWriter create(TableMetadataRef metadata, Descriptor descriptor, long keyCount, long repairedAt, UUID pendingRepair, int sstableLevel, SerializationHeader header, Collection<Index> indexes) {
// if the column family store does not exist, we create a new default SSTableMultiWriter to use:
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
MetadataCollector collector = new MetadataCollector(metadata.get().comparator).sstableLevel(sstableLevel);
SSTableMultiWriter writer = SimpleSSTableMultiWriter.create(descriptor, keyCount, repairedAt, pendingRepair, metadata, collector, header, indexes, txn);
return new SSTableTxnWriter(txn, writer);
}
use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.
the class MaxSSTableSizeWriter method switchCompactionLocation.
@Override
public void switchCompactionLocation(Directories.DataDirectory location) {
sstableDirectory = location;
@SuppressWarnings("resource") SSTableWriter writer = SSTableWriter.create(cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(sstableDirectory)), estimatedTotalKeys / estimatedSSTables, minRepairedAt, pendingRepair, cfs.metadata, new MetadataCollector(allSSTables, cfs.metadata().comparator, level), SerializationHeader.make(cfs.metadata(), nonExpiredSSTables), cfs.indexManager.listIndexes(), txn);
sstableWriter.switchWriter(writer);
}
use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.
the class ScrubTest method createTestWriter.
private static SSTableMultiWriter createTestWriter(Descriptor descriptor, long keyCount, TableMetadataRef metadata, LifecycleTransaction txn) {
SerializationHeader header = new SerializationHeader(true, metadata.get(), metadata.get().regularAndStaticColumns(), EncodingStats.NO_STATS);
MetadataCollector collector = new MetadataCollector(metadata.get().comparator).sstableLevel(0);
return new TestMultiWriter(new TestWriter(descriptor, keyCount, 0, null, metadata, collector, header, txn), txn);
}
use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.
the class CompressedRandomAccessReaderTest method testResetAndTruncate.
private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize, double minCompressRatio) throws IOException {
final String filename = f.getAbsolutePath();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try (SequentialWriter writer = compressed ? new CompressedSequentialWriter(f, filename + ".metadata", null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(), sstableMetadataCollector) : new SequentialWriter(f)) {
writer.write("The quick ".getBytes());
DataPosition mark = writer.mark();
writer.write("blue fox jumps over the lazy dog".getBytes());
// write enough to be sure to change chunk
for (int i = 0; i < junkSize; ++i) {
writer.write((byte) 1);
}
writer.resetAndTruncate(mark);
writer.write("brown fox jumps over the lazy dog".getBytes());
writer.finish();
}
assert f.exists();
CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), true) : null;
try (FileHandle.Builder builder = new FileHandle.Builder(filename).mmapped(usemmap).withCompressionMetadata(compressionMetadata);
FileHandle fh = builder.complete();
RandomAccessReader reader = fh.createReader()) {
String expected = "The quick brown fox jumps over the lazy dog";
assertEquals(expected.length(), reader.length());
byte[] b = new byte[expected.length()];
reader.readFully(b);
assert new String(b).equals(expected) : "Expecting '" + expected + "', got '" + new String(b) + '\'';
} finally {
if (f.exists())
assertTrue(f.delete());
File metadata = new File(filename + ".metadata");
if (compressed && metadata.exists())
metadata.delete();
}
}
use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.
the class CompressedSequentialWriterTest method testWrite.
private void testWrite(File f, int bytesToTest, boolean useMemmap) throws IOException {
final String filename = f.getAbsolutePath();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance)));
byte[] dataPre = new byte[bytesToTest];
byte[] rawPost = new byte[bytesToTest];
try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", null, SequentialWriterOption.DEFAULT, compressionParameters, sstableMetadataCollector)) {
Random r = new Random(42);
// Test both write with byte[] and ByteBuffer
r.nextBytes(dataPre);
r.nextBytes(rawPost);
ByteBuffer dataPost = makeBB(bytesToTest);
dataPost.put(rawPost);
dataPost.flip();
writer.write(dataPre);
DataPosition mark = writer.mark();
// Write enough garbage to transition chunk
for (int i = 0; i < CompressionParams.DEFAULT_CHUNK_LENGTH; i++) {
writer.write((byte) i);
}
writer.resetAndTruncate(mark);
writer.write(dataPost);
writer.finish();
}
assert f.exists();
try (FileHandle.Builder builder = new FileHandle.Builder(filename).withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length(), true));
FileHandle fh = builder.complete();
RandomAccessReader reader = fh.createReader()) {
assertEquals(dataPre.length + rawPost.length, reader.length());
byte[] result = new byte[(int) reader.length()];
reader.readFully(result);
assert (reader.isEOF());
reader.close();
byte[] fullInput = new byte[bytesToTest * 2];
System.arraycopy(dataPre, 0, fullInput, 0, dataPre.length);
System.arraycopy(rawPost, 0, fullInput, bytesToTest, rawPost.length);
assert Arrays.equals(result, fullInput);
} finally {
if (f.exists())
f.delete();
File metadata = new File(f + ".metadata");
if (metadata.exists())
metadata.delete();
}
}
Aggregations