use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class SASIIndexBuilder method build.
public void build() {
AbstractType<?> keyValidator = cfs.metadata().partitionKeyType;
for (Map.Entry<SSTableReader, Map<ColumnMetadata, ColumnIndex>> e : sstables.entrySet()) {
SSTableReader sstable = e.getKey();
Map<ColumnMetadata, ColumnIndex> indexes = e.getValue();
try (RandomAccessReader dataFile = sstable.openDataReader()) {
PerSSTableIndexWriter indexWriter = SASIIndex.newWriter(keyValidator, sstable.descriptor, indexes, OperationType.COMPACTION);
long previousKeyPosition = 0;
try (KeyIterator keys = new KeyIterator(sstable.descriptor, cfs.metadata())) {
while (keys.hasNext()) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
final DecoratedKey key = keys.next();
final long keyPosition = keys.getKeyPosition();
indexWriter.startPartition(key, keyPosition);
try {
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
dataFile.seek(indexEntry.position);
// key
ByteBufferUtil.readWithShortLength(dataFile);
try (SSTableIdentityIterator partition = SSTableIdentityIterator.create(sstable, dataFile, key)) {
// if the row has statics attached, it has to be indexed separately
if (cfs.metadata().hasStaticColumns())
indexWriter.nextUnfilteredCluster(partition.staticRow());
while (partition.hasNext()) indexWriter.nextUnfilteredCluster(partition.next());
}
} catch (IOException ex) {
throw new FSReadError(ex, sstable.getFilename());
}
bytesProcessed += keyPosition - previousKeyPosition;
previousKeyPosition = keyPosition;
}
completeSSTable(indexWriter, sstable, indexes.values());
}
}
}
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class StreamWriter method write.
/**
* Stream file of specified sections to given channel.
*
* StreamWriter uses LZF compression on wire to decrease size to transfer.
*
* @param output where this writes data to
* @throws IOException on any I/O error
*/
public void write(DataOutputStreamPlus output) throws IOException {
long totalSize = totalSize();
logger.debug("[Stream #{}] Start streaming file {} to {}, repairedAt = {}, totalSize = {}", session.planId(), sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize);
try (RandomAccessReader file = sstable.openDataReader();
ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists() ? DataIntegrityMetadata.checksumValidator(sstable.descriptor) : null) {
transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
// setting up data compression stream
compressedOutput = new LZFOutputStream(output);
long progress = 0L;
// stream each of the required sections of the file
for (Pair<Long, Long> section : sections) {
long start = validator == null ? section.left : validator.chunkStart(section.left);
int readOffset = (int) (section.left - start);
// seek to the beginning of the section
file.seek(start);
if (validator != null)
validator.seek(start);
// length of the section to read
long length = section.right - start;
// tracks write progress
long bytesRead = 0;
while (bytesRead < length) {
long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
bytesRead += lastBytesRead;
progress += (lastBytesRead - readOffset);
session.progress(sstable.descriptor.filenameFor(Component.DATA), ProgressInfo.Direction.OUT, progress, totalSize);
readOffset = 0;
}
// make sure that current section is sent
compressedOutput.flush();
}
logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}", session.planId(), sstable.getFilename(), session.peer, FBUtilities.prettyPrintMemory(progress), FBUtilities.prettyPrintMemory(totalSize));
}
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class CrcCheckChanceTest method testChangingCrcCheckChance.
public void testChangingCrcCheckChance(boolean newFormat) throws Throwable {
//Start with crc_check_chance of 99%
if (newFormat)
createTable("CREATE TABLE %s (p text, c text, v text, s text static, PRIMARY KEY (p, c)) WITH compression = {'sstable_compression': 'LZ4Compressor'} AND crc_check_chance = 0.99;");
else
createTable("CREATE TABLE %s (p text, c text, v text, s text static, PRIMARY KEY (p, c)) WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance' : 0.99}");
execute("CREATE INDEX foo ON %s(v)");
execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
ColumnFamilyStore cfs = Keyspace.open(CQLTester.KEYSPACE).getColumnFamilyStore(currentTable());
ColumnFamilyStore indexCfs = cfs.indexManager.getAllIndexColumnFamilyStores().iterator().next();
cfs.forceBlockingFlush();
Assert.assertEquals(0.99, cfs.getCrcCheckChance());
Assert.assertEquals(0.99, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
Assert.assertEquals(0.99, indexCfs.getCrcCheckChance());
Assert.assertEquals(0.99, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
//Test for stack overflow
if (newFormat)
alterTable("ALTER TABLE %s WITH crc_check_chance = 0.99");
else
alterTable("ALTER TABLE %s WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance': 0.99}");
assertRows(execute("SELECT * FROM %s WHERE p=?", "p1"), row("p1", "k1", "sv1", "v1"), row("p1", "k2", "sv1", "v2"));
assertRows(execute("SELECT * FROM %s WHERE v=?", "v1"), row("p1", "k1", "sv1", "v1"));
//Write a few SSTables then Compact
execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
cfs.forceBlockingFlush();
execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
cfs.forceBlockingFlush();
execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
//Now let's change via JMX
cfs.setCrcCheckChance(0.01);
Assert.assertEquals(0.01, cfs.getCrcCheckChance());
Assert.assertEquals(0.01, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
Assert.assertEquals(0.01, indexCfs.getCrcCheckChance());
Assert.assertEquals(0.01, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
assertRows(execute("SELECT * FROM %s WHERE p=?", "p1"), row("p1", "k1", "sv1", "v1"), row("p1", "k2", "sv1", "v2"));
assertRows(execute("SELECT * FROM %s WHERE v=?", "v1"), row("p1", "k1", "sv1", "v1"));
//Alter again via schema
if (newFormat)
alterTable("ALTER TABLE %s WITH crc_check_chance = 0.5");
else
alterTable("ALTER TABLE %s WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance': 0.5}");
//We should be able to get the new value by accessing directly the schema metadata
Assert.assertEquals(0.5, cfs.metadata().params.crcCheckChance);
//but previous JMX-set value will persist until next restart
Assert.assertEquals(0.01, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
Assert.assertEquals(0.01, indexCfs.getCrcCheckChance());
Assert.assertEquals(0.01, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
//Verify the call used by JMX still works
cfs.setCrcCheckChance(0.03);
Assert.assertEquals(0.03, cfs.getCrcCheckChance());
Assert.assertEquals(0.03, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
Assert.assertEquals(0.03, indexCfs.getCrcCheckChance());
Assert.assertEquals(0.03, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
// Also check that any open readers also use the updated value
// note: only compressed files currently perform crc checks, so only the dfile reader is relevant here
SSTableReader baseSSTable = cfs.getLiveSSTables().iterator().next();
SSTableReader idxSSTable = indexCfs.getLiveSSTables().iterator().next();
try (RandomAccessReader baseDataReader = baseSSTable.openDataReader();
RandomAccessReader idxDataReader = idxSSTable.openDataReader()) {
Assert.assertEquals(0.03, baseDataReader.getCrcCheckChance());
Assert.assertEquals(0.03, idxDataReader.getCrcCheckChance());
cfs.setCrcCheckChance(0.31);
Assert.assertEquals(0.31, baseDataReader.getCrcCheckChance());
Assert.assertEquals(0.31, idxDataReader.getCrcCheckChance());
}
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class SegmentReaderTest method compressedSegmenter.
private void compressedSegmenter(ICompressor compressor) throws IOException {
int rawSize = (1 << 15) - 137;
ByteBuffer plainTextBuffer = compressor.preferredBufferType().allocate(rawSize);
byte[] b = new byte[rawSize];
random.nextBytes(b);
plainTextBuffer.put(b);
plainTextBuffer.flip();
// need to add in the plain text size to the block we write out
int uncompressedHeaderSize = 4;
int length = compressor.initialCompressedBufferLength(rawSize);
ByteBuffer compBuffer = ByteBufferUtil.ensureCapacity(null, length + uncompressedHeaderSize, true, compressor.preferredBufferType());
compBuffer.putInt(rawSize);
compressor.compress(plainTextBuffer, compBuffer);
compBuffer.flip();
File compressedFile = File.createTempFile("compressed-segment-", ".log");
compressedFile.deleteOnExit();
FileOutputStream fos = new FileOutputStream(compressedFile);
fos.getChannel().write(compBuffer);
fos.close();
try (RandomAccessReader reader = RandomAccessReader.open(compressedFile)) {
CompressedSegmenter segmenter = new CompressedSegmenter(compressor, reader);
int fileLength = (int) compressedFile.length();
SyncSegment syncSegment = segmenter.nextSegment(0, fileLength);
FileDataInput fileDataInput = syncSegment.input;
ByteBuffer fileBuffer = readBytes(fileDataInput, rawSize);
plainTextBuffer.flip();
Assert.assertEquals(plainTextBuffer, fileBuffer);
// CompressedSegmenter includes the Sync header length in the syncSegment.endPosition (value)
Assert.assertEquals(rawSize, syncSegment.endPosition - CommitLogSegment.SYNC_MARKER_SIZE);
}
}
use of org.apache.cassandra.io.util.RandomAccessReader in project cassandra by apache.
the class SegmentReaderTest method underlyingEncryptedSegmenterTest.
public void underlyingEncryptedSegmenterTest(BiFunction<FileDataInput, Integer, ByteBuffer> readFun) throws IOException {
EncryptionContext context = EncryptionContextGenerator.createContext(true);
CipherFactory cipherFactory = new CipherFactory(context.getTransparentDataEncryptionOptions());
int plainTextLength = (1 << 13) - 137;
ByteBuffer plainTextBuffer = ByteBuffer.allocate(plainTextLength);
random.nextBytes(plainTextBuffer.array());
ByteBuffer compressedBuffer = EncryptionUtils.compress(plainTextBuffer, null, true, context.getCompressor());
Cipher cipher = cipherFactory.getEncryptor(context.getTransparentDataEncryptionOptions().cipher, context.getTransparentDataEncryptionOptions().key_alias);
File encryptedFile = File.createTempFile("encrypted-segment-", ".log");
encryptedFile.deleteOnExit();
FileChannel channel = new RandomAccessFile(encryptedFile, "rw").getChannel();
channel.write(ByteBufferUtil.bytes(plainTextLength));
EncryptionUtils.encryptAndWrite(compressedBuffer, channel, true, cipher);
channel.close();
try (RandomAccessReader reader = RandomAccessReader.open(encryptedFile)) {
context = EncryptionContextGenerator.createContext(cipher.getIV(), true);
EncryptedSegmenter segmenter = new EncryptedSegmenter(reader, context);
SyncSegment syncSegment = segmenter.nextSegment(0, (int) reader.length());
// EncryptedSegmenter includes the Sync header length in the syncSegment.endPosition (value)
Assert.assertEquals(plainTextLength, syncSegment.endPosition - CommitLogSegment.SYNC_MARKER_SIZE);
ByteBuffer fileBuffer = readFun.apply(syncSegment.input, plainTextLength);
plainTextBuffer.position(0);
Assert.assertEquals(plainTextBuffer, fileBuffer);
}
}
Aggregations