use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class StandaloneSplitter method main.
public static void main(String[] args) {
Options options = Options.parseArgs(args);
Util.initDatabaseDescriptor();
try {
// load keyspace descriptions.
Schema.instance.loadFromDisk(false);
String ksName = null;
String cfName = null;
Map<Descriptor, Set<Component>> parsedFilenames = new HashMap<Descriptor, Set<Component>>();
for (String filename : options.filenames) {
File file = new File(filename);
if (!file.exists()) {
System.out.println("Skipping inexisting file " + file);
continue;
}
Descriptor desc = SSTable.tryDescriptorFromFilename(file);
if (desc == null) {
System.out.println("Skipping non sstable file " + file);
continue;
}
if (ksName == null)
ksName = desc.ksname;
else if (!ksName.equals(desc.ksname))
throw new IllegalArgumentException("All sstables must be part of the same keyspace");
if (cfName == null)
cfName = desc.cfname;
else if (!cfName.equals(desc.cfname))
throw new IllegalArgumentException("All sstables must be part of the same table");
Set<Component> components = new HashSet<Component>(Arrays.asList(new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.COMPRESSION_INFO, Component.STATS }));
Iterator<Component> iter = components.iterator();
while (iter.hasNext()) {
Component component = iter.next();
if (!(new File(desc.filenameFor(component)).exists()))
iter.remove();
}
parsedFilenames.put(desc, components);
}
if (ksName == null || cfName == null) {
System.err.println("No valid sstables to split");
System.exit(1);
}
// Do not load sstables since they might be broken
Keyspace keyspace = Keyspace.openWithoutSSTables(ksName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
String snapshotName = "pre-split-" + System.currentTimeMillis();
List<SSTableReader> sstables = new ArrayList<>();
for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet()) {
try {
SSTableReader sstable = SSTableReader.openNoValidation(fn.getKey(), fn.getValue(), cfs);
if (!isSSTableLargerEnough(sstable, options.sizeInMB)) {
System.out.println(String.format("Skipping %s: it's size (%.3f MB) is less than the split size (%d MB)", sstable.getFilename(), ((sstable.onDiskLength() * 1.0d) / 1024L) / 1024L, options.sizeInMB));
continue;
}
sstables.add(sstable);
if (options.snapshot) {
File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName);
sstable.createLinks(snapshotDirectory.getPath());
}
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(String.format("Error Loading %s: %s", fn.getKey(), e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
}
}
if (sstables.isEmpty()) {
System.out.println("No sstables needed splitting.");
System.exit(0);
}
if (options.snapshot)
System.out.println(String.format("Pre-split sstables snapshotted into snapshot %s", snapshotName));
for (SSTableReader sstable : sstables) {
try (LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.UNKNOWN, sstable)) {
new SSTableSplitter(cfs, transaction, options.sizeInMB).split();
} catch (Exception e) {
System.err.println(String.format("Error splitting %s: %s", sstable, e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
sstable.selfRef().release();
}
}
CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
LifecycleTransaction.waitForDeletions();
// We need that to stop non daemonized threads
System.exit(0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class LongCompactionsTest method testCompaction.
protected void testCompaction(int sstableCount, int partitionsPerSSTable, int rowsPerPartition) throws Exception {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
ArrayList<SSTableReader> sstables = new ArrayList<>();
for (int k = 0; k < sstableCount; k++) {
SortedMap<String, PartitionUpdate> rows = new TreeMap<>();
for (int j = 0; j < partitionsPerSSTable; j++) {
String key = String.valueOf(j);
// last sstable has highest timestamps
UpdateBuilder builder = UpdateBuilder.create(store.metadata(), String.valueOf(j)).withTimestamp(k);
for (int i = 0; i < rowsPerPartition; i++) builder.newRow(String.valueOf(i)).add("val", String.valueOf(i));
rows.put(key, builder.build());
}
Collection<SSTableReader> readers = SSTableUtils.prepare().write(rows);
sstables.addAll(readers);
store.addSSTables(readers);
}
// give garbage collection a bit of time to catch up
Thread.sleep(1000);
long start = System.nanoTime();
final int gcBefore = (int) (System.currentTimeMillis() / 1000) - Schema.instance.getTableMetadata(KEYSPACE1, "Standard1").params.gcGraceSeconds;
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.COMPACTION)) {
assert txn != null : "Cannot markCompacting all sstables";
new CompactionTask(store, txn, gcBefore).execute(null);
}
System.out.println(String.format("%s: sstables=%d rowsper=%d colsper=%d: %d ms", this.getClass().getName(), sstableCount, partitionsPerSSTable, rowsPerPartition, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)));
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class AntiCompactionTest method antiCompactOne.
private void antiCompactOne(long repairedAt, UUID pendingRepair) throws Exception {
assert repairedAt != UNREPAIRED_SSTABLE || pendingRepair != null;
ColumnFamilyStore store = prepareColumnFamilyStore();
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("4".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
int repairedKeys = 0;
int pendingKeys = 0;
int nonRepairedKeys = 0;
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
if (txn == null)
throw new IllegalStateException();
UUID parentRepairSession = UUID.randomUUID();
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, pendingRepair, parentRepairSession);
}
assertEquals(2, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
UnfilteredRowIterator row = scanner.next();
if (sstable.isRepaired() || sstable.isPendingRepair()) {
assertTrue(range.contains(row.partitionKey().getToken()));
repairedKeys += sstable.isRepaired() ? 1 : 0;
pendingKeys += sstable.isPendingRepair() ? 1 : 0;
} else {
assertFalse(range.contains(row.partitionKey().getToken()));
nonRepairedKeys++;
}
}
}
}
for (SSTableReader sstable : store.getLiveSSTables()) {
assertFalse(sstable.isMarkedCompacted());
assertEquals(1, sstable.selfRef().globalCount());
}
assertEquals(0, store.getTracker().getCompacting().size());
assertEquals(repairedKeys, repairedAt != UNREPAIRED_SSTABLE ? 4 : 0);
assertEquals(pendingKeys, pendingRepair != NO_PENDING_REPAIR ? 4 : 0);
assertEquals(nonRepairedKeys, 6);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class AntiCompactionTest method shouldSkipAntiCompactionForNonIntersectingRange.
@Test
public void shouldSkipAntiCompactionForNonIntersectingRange() throws InterruptedException, IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.disableAutoCompaction();
for (int table = 0; table < 10; table++) {
generateSStable(store, Integer.toString(table));
}
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("-1".getBytes()), new BytesToken("-10".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
UUID parentRepairSession = UUID.randomUUID();
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, 1, NO_PENDING_REPAIR, parentRepairSession);
}
assertThat(store.getLiveSSTables().size(), is(10));
assertThat(Iterables.get(store.getLiveSSTables(), 0).isRepaired(), is(false));
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class CompactionAwareWriterTest method testSplittingSizeTieredCompactionWriter.
@Test
public void testSplittingSizeTieredCompactionWriter() throws Throwable {
ColumnFamilyStore cfs = getColumnFamilyStore();
cfs.disableAutoCompaction();
int rowCount = 10000;
populate(rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
CompactionAwareWriter writer = new SplittingSizeTieredCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), 0);
int rows = compact(cfs, txn, writer);
long expectedSize = beforeSize / 2;
List<SSTableReader> sortedSSTables = new ArrayList<>(cfs.getLiveSSTables());
Collections.sort(sortedSSTables, new Comparator<SSTableReader>() {
@Override
public int compare(SSTableReader o1, SSTableReader o2) {
return Longs.compare(o2.onDiskLength(), o1.onDiskLength());
}
});
for (SSTableReader sstable : sortedSSTables) {
// we dont create smaller files than this, everything will be in the last file
if (expectedSize > SplittingSizeTieredCompactionWriter.DEFAULT_SMALLEST_SSTABLE_BYTES)
// allow 1% diff in estimated vs actual size
assertEquals(expectedSize, sstable.onDiskLength(), expectedSize / 100);
expectedSize /= 2;
}
assertEquals(rowCount, rows);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
Aggregations