use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class SASIIndexTest method update.
private static void update(Mutation rm, List<Cell> cells) {
TableMetadata metadata = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME).metadata();
rm.add(PartitionUpdate.singleRowUpdate(metadata, rm.key(), buildRow(cells)));
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class SASIIndexTest method update.
private static void update(Mutation rm, ByteBuffer name, ByteBuffer value, long timestamp) {
TableMetadata metadata = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME).metadata();
rm.add(PartitionUpdate.singleRowUpdate(metadata, rm.key(), buildRow(buildCell(metadata, name, value, timestamp))));
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class LengthPartitioner method describeOwnership.
public Map<Token, Float> describeOwnership(List<Token> sortedTokens) {
// allTokens will contain the count and be returned, sorted_ranges is shorthand for token<->token math.
Map<Token, Float> allTokens = new HashMap<Token, Float>();
List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>();
// this initializes the counts to 0 and calcs the ranges in order.
Token lastToken = sortedTokens.get(sortedTokens.size() - 1);
for (Token node : sortedTokens) {
allTokens.put(node, new Float(0.0));
sortedRanges.add(new Range<Token>(lastToken, node));
lastToken = node;
}
for (String ks : Schema.instance.getKeyspaces()) {
for (TableMetadata cfmd : Schema.instance.getTablesAndViews(ks)) {
for (Range<Token> r : sortedRanges) {
// Looping over every KS:CF:Range, get the splits size and add it to the count
allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.name, r, 1).size());
}
}
}
// Sum every count up and divide count/total for the fractional ownership.
Float total = new Float(0.0);
for (Float f : allTokens.values()) total += f;
for (Map.Entry<Token, Float> row : allTokens.entrySet()) allTokens.put(row.getKey(), row.getValue() / total);
return allTokens;
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class SSTableFlushObserverTest method testFlushObserver.
@Test
public void testFlushObserver() {
TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
directory.deleteOnExit();
if (!directory.exists() && !directory.mkdirs())
throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
SSTableReader reader = null;
Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
try {
final long now = System.currentTimeMillis();
ByteBuffer key = UTF8Type.instance.fromString("key1");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key2");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key3");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
reader = writer.finish(true);
} finally {
FileUtils.closeQuietly(writer);
}
Assert.assertTrue(observer.isComplete);
Assert.assertEquals(expected.size(), observer.rows.size());
for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
ByteBuffer key = e.left;
Long indexPosition = e.right;
try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
} catch (IOException ex) {
throw new FSReadError(ex, reader.getIndexFilename());
}
Assert.assertEquals(expected.get(key), observer.rows.get(e));
}
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class MetadataSerializerTest method constructMetadata.
public Map<MetadataType, MetadataComponent> constructMetadata() {
CommitLogPosition club = new CommitLogPosition(11L, 12);
CommitLogPosition cllb = new CommitLogPosition(9L, 12);
TableMetadata cfm = SchemaLoader.standardCFMD("ks1", "cf1").build();
MetadataCollector collector = new MetadataCollector(cfm.comparator).commitLogIntervals(new IntervalSet<>(cllb, club));
String partitioner = RandomPartitioner.class.getCanonicalName();
double bfFpChance = 0.1;
Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0, null, SerializationHeader.make(cfm, Collections.emptyList()));
return originalMetadata;
}
Aggregations