use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class LeveledCompactionStrategyTest method testGrouperLevels.
/**
* Ensure that the grouping operation preserves the levels of grouped tables
*/
@Test
public void testGrouperLevels() throws Exception {
// 100 KB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
//Need entropy to prevent compression so size is predictable with compression enabled/disabled
new Random().nextBytes(value.array());
// Enough data to have a level 1 and 2
int rows = 40;
int columns = 20;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
waitForLeveling(cfs);
CompactionStrategyManager strategyManager = cfs.getCompactionStrategyManager();
// Checking we're not completely bad at math
int l1Count = strategyManager.getSSTableCountPerLevel()[1];
int l2Count = strategyManager.getSSTableCountPerLevel()[2];
if (l1Count == 0 || l2Count == 0) {
logger.error("L1 or L2 has 0 sstables. Expected > 0 on both.");
logger.error("L1: " + l1Count);
logger.error("L2: " + l2Count);
Assert.fail();
}
Collection<Collection<SSTableReader>> groupedSSTables = cfs.getCompactionStrategyManager().groupSSTablesForAntiCompaction(cfs.getLiveSSTables());
for (Collection<SSTableReader> sstableGroup : groupedSSTables) {
int groupLevel = -1;
Iterator<SSTableReader> it = sstableGroup.iterator();
while (it.hasNext()) {
SSTableReader sstable = it.next();
int tableLevel = sstable.getSSTableLevel();
if (groupLevel == -1)
groupLevel = tableLevel;
assert groupLevel == tableLevel;
}
}
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class LeveledCompactionStrategyTest method testValidationMultipleSSTablePerLevel.
/*
* This exercises in particular the code of #4142
*/
@Test
public void testValidationMultipleSSTablePerLevel() throws Exception {
byte[] b = new byte[100 * 1024];
new Random().nextBytes(b);
// 100 KB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(b);
// Enough data to have a level 1 and 2
int rows = 40;
int columns = 20;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
waitForLeveling(cfs);
CompactionStrategyManager strategyManager = cfs.getCompactionStrategyManager();
// Checking we're not completely bad at math
assertTrue(strategyManager.getSSTableCountPerLevel()[1] > 0);
assertTrue(strategyManager.getSSTableCountPerLevel()[2] > 0);
Range<Token> range = new Range<>(Util.token(""), Util.token(""));
int gcBefore = keyspace.getColumnFamilyStore(CF_STANDARDDLEVELED).gcBefore(FBUtilities.nowInSeconds());
UUID parentRepSession = UUID.randomUUID();
ActiveRepairService.instance.registerParentRepairSession(parentRepSession, FBUtilities.getBroadcastAddress(), Arrays.asList(cfs), Arrays.asList(range), false, ActiveRepairService.UNREPAIRED_SSTABLE, true);
RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), KEYSPACE1, CF_STANDARDDLEVELED, Arrays.asList(range));
Validator validator = new Validator(desc, FBUtilities.getBroadcastAddress(), gcBefore);
CompactionManager.instance.submitValidation(cfs, validator).get();
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class LeveledCompactionStrategyTest method testTokenRangeCompaction.
@Test
public void testTokenRangeCompaction() throws Exception {
// Remove any existing data so we can start out clean with predictable number of sstables
cfs.truncateBlocking();
// Disable auto compaction so cassandra does not compact
CompactionManager.instance.disableAutoCompaction();
// 100 KB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
DecoratedKey key1 = Util.dk(String.valueOf(1));
DecoratedKey key2 = Util.dk(String.valueOf(2));
List<DecoratedKey> keys = new ArrayList<>(Arrays.asList(key1, key2));
int numIterations = 10;
int columns = 2;
// create 10 sstables that contain data for both key1 and key2
for (int i = 0; i < numIterations; i++) {
for (DecoratedKey key : keys) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
}
cfs.forceBlockingFlush();
}
// create 20 more sstables with 10 containing data for key1 and other 10 containing data for key2
for (int i = 0; i < numIterations; i++) {
for (DecoratedKey key : keys) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
}
// We should have a total of 30 sstables by now
assertEquals(30, cfs.getLiveSSTables().size());
// Compact just the tables with key2
// Bit hackish to use the key1.token as the prior key but works in BytesToken
Range<Token> tokenRange = new Range<>(key2.getToken(), key2.getToken());
Collection<Range<Token>> tokenRanges = new ArrayList<>(Arrays.asList(tokenRange));
cfs.forceCompactionForTokenRange(tokenRanges);
while (CompactionManager.instance.isCompacting(Arrays.asList(cfs))) {
Thread.sleep(100);
}
// 20 tables that have key2 should have been compacted in to 1 table resulting in 11 (30-20+1)
assertEquals(11, cfs.getLiveSSTables().size());
// Compact just the tables with key1. At this point all 11 tables should have key1
Range<Token> tokenRange2 = new Range<>(key1.getToken(), key1.getToken());
Collection<Range<Token>> tokenRanges2 = new ArrayList<>(Arrays.asList(tokenRange2));
cfs.forceCompactionForTokenRange(tokenRanges2);
while (CompactionManager.instance.isCompacting(Arrays.asList(cfs))) {
Thread.sleep(100);
}
// the 11 tables containing key1 should all compact to 1 table
assertEquals(1, cfs.getLiveSSTables().size());
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class AntiCompactionTest method writeFile.
private SSTableReader writeFile(ColumnFamilyStore cfs, int count) {
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
Descriptor desc = cfs.newSSTableDescriptor(dir);
try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, NO_PENDING_REPAIR, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
for (int i = 0; i < count; i++) {
UpdateBuilder builder = UpdateBuilder.create(metadata, ByteBufferUtil.bytes(i));
for (int j = 0; j < count * 5; j++) builder.newRow("c" + j).add("val", "value1");
writer.append(builder.build().unfilteredIterator());
}
Collection<SSTableReader> sstables = writer.finish(true);
assertNotNull(sstables);
assertEquals(1, sstables.size());
return sstables.iterator().next();
}
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class SSTableRewriterTest method writeFiles.
public static Set<SSTableReader> writeFiles(ColumnFamilyStore cfs, int fileCount, int partitionCount, int cellCount, int cellSize) {
int i = 0;
Set<SSTableReader> result = new LinkedHashSet<>();
for (int f = 0; f < fileCount; f++) {
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
Descriptor desc = cfs.newSSTableDescriptor(dir);
try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, null, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
int end = f == fileCount - 1 ? partitionCount : ((f + 1) * partitionCount) / fileCount;
for (; i < end; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), ByteBufferUtil.bytes(i));
for (int j = 0; j < cellCount; j++) builder.newRow(Integer.toString(i)).add("val", random(0, 1000));
writer.append(builder.build().unfilteredIterator());
}
result.addAll(writer.finish(true));
}
}
return result;
}
Aggregations