use of org.apache.cassandra.db.ClusteringComparator in project cassandra by apache.
the class GroupMakerTest method testIsNewGroupWithStaticClusteringColumns.
@Test
public void testIsNewGroupWithStaticClusteringColumns() {
ClusteringComparator comparator = newComparator(false, false, false);
GroupMaker groupMaker = GroupMaker.newInstance(comparator, 2);
assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 1)));
assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 2)));
assertTrue(groupMaker.isNewGroup(partitionKey(2), Clustering.STATIC_CLUSTERING));
assertTrue(groupMaker.isNewGroup(partitionKey(3), Clustering.STATIC_CLUSTERING));
assertTrue(groupMaker.isNewGroup(partitionKey(4), clustering(1, 1, 2)));
}
use of org.apache.cassandra.db.ClusteringComparator in project cassandra by apache.
the class GroupMakerTest method testIsNewGroupWithOneReversedClusteringColumns.
@Test
public void testIsNewGroupWithOneReversedClusteringColumns() {
ClusteringComparator comparator = newComparator(true, false, false);
GroupMaker groupMaker = GroupMaker.newInstance(comparator, 2);
assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 3, 1)));
assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 3, 2)));
assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 2, 1)));
assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 1)));
assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 2)));
assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(1, 1, 3)));
assertTrue(groupMaker.isNewGroup(partitionKey(1), clustering(2, 1, 1)));
assertFalse(groupMaker.isNewGroup(partitionKey(1), clustering(2, 1, 2)));
assertTrue(groupMaker.isNewGroup(partitionKey(2), clustering(2, 2, 1)));
}
use of org.apache.cassandra.db.ClusteringComparator in project cassandra by apache.
the class GroupMakerTest method testIsNewGroupWithOnlyPartitionKeyComponents.
@Test
public void testIsNewGroupWithOnlyPartitionKeyComponents() {
ClusteringComparator comparator = newComparator(false, false, false);
GroupMaker goupMaker = GroupMaker.newInstance(comparator, 2);
assertTrue(goupMaker.isNewGroup(partitionKey(1, 1), clustering(1, 1, 1)));
assertFalse(goupMaker.isNewGroup(partitionKey(1, 1), clustering(1, 1, 2)));
assertTrue(goupMaker.isNewGroup(partitionKey(1, 2), clustering(1, 1, 2)));
assertTrue(goupMaker.isNewGroup(partitionKey(1, 2), clustering(2, 2, 2)));
assertTrue(goupMaker.isNewGroup(partitionKey(2, 2), clustering(1, 1, 2)));
}
use of org.apache.cassandra.db.ClusteringComparator in project cassandra by apache.
the class CompressedRandomAccessReaderTest method testDataCorruptionDetection.
/**
* If the data read out doesn't match the checksum, an exception should be thrown
*/
@Test
public void testDataCorruptionDetection() throws IOException {
String CONTENT = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam vitae.";
File file = new File("testDataCorruptionDetection");
file.deleteOnExit();
File metadata = new File(file.getPath() + ".meta");
metadata.deleteOnExit();
assertTrue(file.createNewFile());
assertTrue(metadata.createNewFile());
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(), sstableMetadataCollector)) {
writer.write(CONTENT.getBytes());
writer.finish();
}
// open compression metadata and get chunk information
CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true);
CompressionMetadata.Chunk chunk = meta.chunkFor(0);
try (FileHandle.Builder builder = new FileHandle.Builder(file.getPath()).withCompressionMetadata(meta);
FileHandle fh = builder.complete();
RandomAccessReader reader = fh.createReader()) {
// read and verify compressed data
assertEquals(CONTENT, reader.readLine());
Random random = new Random();
try (RandomAccessFile checksumModifier = new RandomAccessFile(file, "rw")) {
byte[] checksum = new byte[4];
// seek to the end of the compressed chunk
checksumModifier.seek(chunk.length);
// read checksum bytes
checksumModifier.read(checksum);
byte[] corruptChecksum = new byte[4];
do {
random.nextBytes(corruptChecksum);
} while (Arrays.equals(corruptChecksum, checksum));
updateChecksum(checksumModifier, chunk.length, corruptChecksum);
try (final RandomAccessReader r = fh.createReader()) {
Throwable exception = null;
try {
r.readLine();
} catch (Throwable t) {
exception = t;
}
assertNotNull(exception);
assertSame(exception.getClass(), CorruptSSTableException.class);
assertSame(exception.getCause().getClass(), CorruptBlockException.class);
}
// lets write original checksum and check if we can read data
updateChecksum(checksumModifier, chunk.length, checksum);
// read and verify compressed data
try (RandomAccessReader cr = fh.createReader()) {
assertEquals(CONTENT, cr.readLine());
}
}
}
}
use of org.apache.cassandra.db.ClusteringComparator in project cassandra by apache.
the class CompressedRandomAccessReaderTest method test6791.
@Test
public void test6791() throws IOException, ConfigurationException {
File f = File.createTempFile("compressed6791_", "3");
String filename = f.getAbsolutePath();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(32), sstableMetadataCollector)) {
for (int i = 0; i < 20; i++) writer.write("x".getBytes());
DataPosition mark = writer.mark();
// write enough garbage to create new chunks:
for (int i = 0; i < 40; ++i) writer.write("y".getBytes());
writer.resetAndTruncate(mark);
for (int i = 0; i < 20; i++) writer.write("x".getBytes());
writer.finish();
}
try (FileHandle.Builder builder = new FileHandle.Builder(filename).withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length(), true));
FileHandle fh = builder.complete();
RandomAccessReader reader = fh.createReader()) {
String res = reader.readLine();
assertEquals(res, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
assertEquals(40, res.length());
} finally {
if (f.exists())
assertTrue(f.delete());
File metadata = new File(filename + ".metadata");
if (metadata.exists())
metadata.delete();
}
}
Aggregations