use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.
the class DirectoriesTest method testSecondaryIndexDirectories.
@Test
public void testSecondaryIndexDirectories() {
TableMetadata.Builder builder = TableMetadata.builder(KS, "cf").addPartitionKeyColumn("thekey", UTF8Type.instance).addClusteringColumn("col", UTF8Type.instance);
ColumnIdentifier col = ColumnIdentifier.getInterned("col", true);
IndexMetadata indexDef = IndexMetadata.fromIndexTargets(Collections.singletonList(new IndexTarget(col, IndexTarget.Type.VALUES)), "idx", IndexMetadata.Kind.KEYS, Collections.emptyMap());
builder.indexes(Indexes.of(indexDef));
TableMetadata PARENT_CFM = builder.build();
TableMetadata INDEX_CFM = CassandraIndex.indexCfsMetadata(PARENT_CFM, indexDef);
Directories parentDirectories = new Directories(PARENT_CFM);
Directories indexDirectories = new Directories(INDEX_CFM);
// secondary index has its own directory
for (File dir : indexDirectories.getCFDirectories()) {
assertEquals(cfDir(INDEX_CFM), dir);
}
Descriptor parentDesc = new Descriptor(parentDirectories.getDirectoryForNewSSTables(), KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
Descriptor indexDesc = new Descriptor(indexDirectories.getDirectoryForNewSSTables(), KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
// snapshot dir should be created under its parent's
File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, "test");
File indexSnapshotDirectory = Directories.getSnapshotDirectory(indexDesc, "test");
assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.getParentFile());
// check if snapshot directory exists
parentSnapshotDirectory.mkdirs();
assertTrue(parentDirectories.snapshotExists("test"));
assertTrue(indexDirectories.snapshotExists("test"));
// check their creation time
assertEquals(parentDirectories.snapshotCreationTime("test"), indexDirectories.snapshotCreationTime("test"));
// check true snapshot size
Descriptor parentSnapshot = new Descriptor(parentSnapshotDirectory, KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
createFile(parentSnapshot.filenameFor(Component.DATA), 30);
Descriptor indexSnapshot = new Descriptor(indexSnapshotDirectory, KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
createFile(indexSnapshot.filenameFor(Component.DATA), 40);
assertEquals(30, parentDirectories.trueSnapshotsSize());
assertEquals(40, indexDirectories.trueSnapshotsSize());
// check snapshot details
Map<String, Pair<Long, Long>> parentSnapshotDetail = parentDirectories.getSnapshotDetails();
assertTrue(parentSnapshotDetail.containsKey("test"));
assertEquals(30L, parentSnapshotDetail.get("test").right.longValue());
Map<String, Pair<Long, Long>> indexSnapshotDetail = indexDirectories.getSnapshotDetails();
assertTrue(indexSnapshotDetail.containsKey("test"));
assertEquals(40L, indexSnapshotDetail.get("test").right.longValue());
// check backup directory
File parentBackupDirectory = Directories.getBackupsDirectory(parentDesc);
File indexBackupDirectory = Directories.getBackupsDirectory(indexDesc);
assertEquals(parentBackupDirectory, indexBackupDirectory.getParentFile());
}
use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.
the class CommitLogReaderTest method confirmReadOrder.
/**
* Since we have both table and non mixed into the CL, we ignore updates that aren't for the table the test handler
* is configured to check.
* @param handler
* @param offset integer offset of count we expect to see in record
*/
private void confirmReadOrder(TestCLRHandler handler, int offset) {
ColumnMetadata cd = currentTableMetadata().getColumn(new ColumnIdentifier("data", false));
int i = 0;
int j = 0;
while (i + j < handler.seenMutationCount()) {
PartitionUpdate pu = handler.seenMutations.get(i + j).get(currentTableMetadata());
if (pu == null) {
j++;
continue;
}
for (Row r : pu) {
String expected = Integer.toString(i + offset);
String seen = new String(r.getCell(cd).value().array());
if (!expected.equals(seen))
Assert.fail("Mismatch at index: " + i + ". Offset: " + offset + " Expected: " + expected + " Seen: " + seen);
}
i++;
}
}
use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.
the class RowTest method testHashCode.
@Test
public void testHashCode() {
ColumnMetadata defA = metadata.getColumn(new ColumnIdentifier("a", true));
ColumnMetadata defB = metadata.getColumn(new ColumnIdentifier("b", true));
Row.Builder builder = BTreeRow.unsortedBuilder(nowInSeconds);
builder.newRow(metadata.comparator.make("c1"));
writeSimpleCellValue(builder, defA, "a1", 0);
writeSimpleCellValue(builder, defA, "a2", 1);
writeSimpleCellValue(builder, defB, "b1", 1);
Row row = builder.build();
Map<Row, Integer> map = new HashMap<>();
map.put(row, 1);
assertEquals(Integer.valueOf(1), map.get(row));
}
use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.
the class RowTest method testResolve.
@Test
public void testResolve() {
ColumnMetadata defA = metadata.getColumn(new ColumnIdentifier("a", true));
ColumnMetadata defB = metadata.getColumn(new ColumnIdentifier("b", true));
Row.Builder builder = BTreeRow.unsortedBuilder(nowInSeconds);
builder.newRow(metadata.comparator.make("c1"));
writeSimpleCellValue(builder, defA, "a1", 0);
writeSimpleCellValue(builder, defA, "a2", 1);
writeSimpleCellValue(builder, defB, "b1", 1);
Row row = builder.build();
PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, dk, row);
Unfiltered unfiltered = update.unfilteredIterator().next();
assertTrue(unfiltered.kind() == Unfiltered.Kind.ROW);
row = (Row) unfiltered;
assertEquals("a2", defA.cellValueType().getString(row.getCell(defA).value()));
assertEquals("b1", defB.cellValueType().getString(row.getCell(defB).value()));
assertEquals(2, row.columns().size());
}
use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.
the class PartitionImplementationTest method makeRow.
Row makeRow(Clustering clustering, String colValue) {
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
Row.Builder row = BTreeRow.unsortedBuilder(TIMESTAMP);
row.newRow(clustering);
row.addCell(BufferCell.live(defCol, TIMESTAMP, ByteBufferUtil.bytes(colValue)));
return row.build();
}
Aggregations