use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class SSTableExpiredBlockers method main.
public static void main(String[] args) {
PrintStream out = System.out;
if (args.length < 2) {
out.println("Usage: sstableexpiredblockers <keyspace> <table>");
System.exit(1);
}
Util.initDatabaseDescriptor();
String keyspace = args[args.length - 2];
String columnfamily = args[args.length - 1];
Schema.instance.loadFromDisk(false);
TableMetadata metadata = Schema.instance.validateTable(keyspace, columnfamily);
Keyspace ks = Keyspace.openWithoutSSTables(keyspace);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnfamily);
Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
Set<SSTableReader> sstables = new HashSet<>();
for (Map.Entry<Descriptor, Set<Component>> sstable : lister.list().entrySet()) {
if (sstable.getKey() != null) {
try {
SSTableReader reader = SSTableReader.open(sstable.getKey());
sstables.add(reader);
} catch (Throwable t) {
out.println("Couldn't open sstable: " + sstable.getKey().filenameFor(Component.DATA) + " (" + t.getMessage() + ")");
}
}
}
if (sstables.isEmpty()) {
out.println("No sstables for " + keyspace + "." + columnfamily);
System.exit(1);
}
int gcBefore = (int) (System.currentTimeMillis() / 1000) - metadata.params.gcGraceSeconds;
Multimap<SSTableReader, SSTableReader> blockers = checkForExpiredSSTableBlockers(sstables, gcBefore);
for (SSTableReader blocker : blockers.keySet()) {
out.println(String.format("%s blocks %d expired sstables from getting dropped: %s%n", formatForExpiryTracing(Collections.singleton(blocker)), blockers.get(blocker).size(), formatForExpiryTracing(blockers.get(blocker))));
}
System.exit(0);
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class BatchlogManagerTest method testTruncatedReplay.
@Test
public void testTruncatedReplay() throws InterruptedException, ExecutionException {
TableMetadata cf2 = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD2);
TableMetadata cf3 = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD3);
// In the middle of the process, 'truncate' Standard2.
for (int i = 0; i < 1000; i++) {
Mutation mutation1 = new RowUpdateBuilder(cf2, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + i).add("val", "val" + i).build();
Mutation mutation2 = new RowUpdateBuilder(cf3, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + i).add("val", "val" + i).build();
List<Mutation> mutations = Lists.newArrayList(mutation1, mutation2);
// Make sure it's ready to be replayed, so adjust the timestamp.
long timestamp = System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout();
if (i == 500)
SystemKeyspace.saveTruncationRecord(Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD2), timestamp, CommitLogPosition.NONE);
// Adjust the timestamp (slightly) to make the test deterministic.
if (i >= 500)
timestamp++;
else
timestamp--;
BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), FBUtilities.timestampMicros(), mutations));
}
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// We should see half of Standard2-targeted mutations written after the replay and all of Standard3 mutations applied.
for (int i = 0; i < 1000; i++) {
UntypedResultSet result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD2, i));
assertNotNull(result);
if (i >= 500) {
assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
assertEquals("name" + i, result.one().getString("name"));
assertEquals("val" + i, result.one().getString("val"));
} else {
assertTrue(result.isEmpty());
}
}
for (int i = 0; i < 1000; i++) {
UntypedResultSet result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD3, i));
assertNotNull(result);
assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
assertEquals("name" + i, result.one().getString("name"));
assertEquals("val" + i, result.one().getString("val"));
}
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class CacheProviderTest method testKeys.
@Test
public void testKeys() {
TableId id1 = TableId.generate();
byte[] b1 = { 1, 2, 3, 4 };
RowCacheKey key1 = new RowCacheKey(id1, null, ByteBuffer.wrap(b1));
TableId id2 = TableId.fromString(id1.toString());
byte[] b2 = { 1, 2, 3, 4 };
RowCacheKey key2 = new RowCacheKey(id2, null, ByteBuffer.wrap(b2));
assertEquals(key1, key2);
assertEquals(key1.hashCode(), key2.hashCode());
TableMetadata tm = TableMetadata.builder("ks", "tab", id1).addPartitionKeyColumn("pk", UTF8Type.instance).build();
assertTrue(key1.sameTable(tm));
byte[] b3 = { 1, 2, 3, 5 };
RowCacheKey key3 = new RowCacheKey(id1, null, ByteBuffer.wrap(b3));
assertNotSame(key1, key3);
assertNotSame(key1.hashCode(), key3.hashCode());
// with index name
key1 = new RowCacheKey(id1, "indexFoo", ByteBuffer.wrap(b1));
assertNotSame(key1, key2);
assertNotSame(key1.hashCode(), key2.hashCode());
key2 = new RowCacheKey(id2, "indexFoo", ByteBuffer.wrap(b2));
assertEquals(key1, key2);
assertEquals(key1.hashCode(), key2.hashCode());
tm = TableMetadata.builder("ks", "tab.indexFoo", id1).addPartitionKeyColumn("pk", UTF8Type.instance).indexes(Indexes.of(IndexMetadata.fromSchemaMetadata("indexFoo", IndexMetadata.Kind.KEYS, Collections.emptyMap()))).build();
assertTrue(key1.sameTable(tm));
key3 = new RowCacheKey(id1, "indexFoo", ByteBuffer.wrap(b3));
assertNotSame(key1, key3);
assertNotSame(key1.hashCode(), key3.hashCode());
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class BatchlogManagerTest method testReplay.
@Test
@SuppressWarnings("deprecation")
public void testReplay() throws Exception {
long initialAllBatches = BatchlogManager.instance.countAllBatches();
long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
// Half batches (50) ready to be replayed, half not.
for (int i = 0; i < 100; i++) {
List<Mutation> mutations = new ArrayList<>(10);
for (int j = 0; j < 10; j++) {
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + j).add("val", "val" + j).build());
}
long timestamp = i < 50 ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout()) : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
}
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
assertEquals(100, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// Ensure that the first half, and only the first half, got replayed.
assertEquals(50, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(50, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
for (int i = 0; i < 100; i++) {
String query = String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD1, i);
UntypedResultSet result = executeInternal(query);
assertNotNull(result);
if (i < 50) {
Iterator<UntypedResultSet.Row> it = result.iterator();
assertNotNull(it);
for (int j = 0; j < 10; j++) {
assertTrue(it.hasNext());
UntypedResultSet.Row row = it.next();
assertEquals(ByteBufferUtil.bytes(i), row.getBytes("key"));
assertEquals("name" + j, row.getString("name"));
assertEquals("val" + j, row.getString("val"));
}
assertFalse(it.hasNext());
} else {
assertTrue(result.isEmpty());
}
}
// Ensure that no stray mutations got somehow applied.
UntypedResultSet result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", KEYSPACE1, CF_STANDARD1));
assertNotNull(result);
assertEquals(500, result.one().getLong("count"));
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class BatchlogManagerTest method testDelete.
@Test
public void testDelete() {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
TableMetadata cfm = cfs.metadata();
new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("1234")).clustering("c").add("val", "val" + 1234).build().applyUnsafe();
DecoratedKey dk = cfs.decorateKey(ByteBufferUtil.bytes("1234"));
ImmutableBTreePartition results = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, dk).build());
Iterator<Row> iter = results.iterator();
assert iter.hasNext();
Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(cfm, dk, FBUtilities.timestampMicros(), FBUtilities.nowInSeconds()));
mutation.applyUnsafe();
Util.assertEmpty(Util.cmd(cfs, dk).build());
}
Aggregations