use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class SSTableExport method export.
/**
* Export an SSTable and write the resulting JSON to a PrintStream.
*
* @param ssTableFile the SSTable to export
* @param outs PrintStream to write the output to
* @param excludes keys to exclude from export
*
* @throws IOException on failure to read/write input/output
*/
public static void export(String ssTableFile, PrintStream outs, String[] excludes) throws IOException {
Descriptor descriptor = Descriptor.fromFilename(ssTableFile);
CFMetaData metadata;
if (descriptor.cfname.contains(".")) {
// look up index metadata from parent
int i = descriptor.cfname.indexOf(".");
String parentName = descriptor.cfname.substring(0, i);
CFMetaData parent = Schema.instance.getCFMetaData(descriptor.ksname, parentName);
ColumnDefinition def = parent.getColumnDefinitionForIndex(descriptor.cfname.substring(i + 1));
metadata = CFMetaData.newIndexMetadata(parent, def, KeysIndex.indexComparator());
} else {
metadata = Schema.instance.getCFMetaData(descriptor.ksname, descriptor.cfname);
}
export(SSTableReader.open(descriptor, metadata), outs, excludes);
}
use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class SSTableExport method serializeRow.
/**
* Get portion of the columns and serialize in loop while not more columns left in the row
* @param row SSTableIdentityIterator row representation with Column Family
* @param key Decorated Key for the required row
* @param out output stream
*/
private static void serializeRow(SSTableIdentityIterator row, DecoratedKey key, PrintStream out) {
ColumnFamily columnFamily = row.getColumnFamily();
boolean isSuperCF = columnFamily.isSuper();
CFMetaData cfMetaData = columnFamily.metadata();
AbstractType comparator = columnFamily.getComparator();
writeKey(out, bytesToHex(key.key));
out.print(isSuperCF ? "{" : "[");
if (isSuperCF) {
while (row.hasNext()) {
IColumn column = row.next();
writeKey(out, comparator.getString(column.name()));
out.print("{");
writeKey(out, "deletedAt");
out.print(column.getMarkedForDeleteAt());
out.print(", ");
writeKey(out, "subColumns");
out.print("[");
serializeColumns(column.getSubColumns().iterator(), out, columnFamily.getSubComparator(), cfMetaData);
out.print("]");
out.print("}");
if (row.hasNext())
out.print(", ");
}
} else {
serializeColumns(row, out, comparator, cfMetaData);
}
out.print(isSuperCF ? "}" : "]");
}
use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class SSTableImport method addToSuperCF.
/**
* Add super columns to a column family.
*
* @param row the super columns associated with a row
* @param cfamily the column family to add columns to
*/
private static void addToSuperCF(Map<?, ?> row, ColumnFamily cfamily) {
CFMetaData metaData = cfamily.metadata();
assert metaData != null;
AbstractType comparator = metaData.comparator;
// Super columns
for (Map.Entry<?, ?> entry : row.entrySet()) {
Map<?, ?> data = (Map<?, ?>) entry.getValue();
addColumnsToCF((List<?>) data.get("subColumns"), stringAsType((String) entry.getKey(), comparator), cfamily);
// *WARNING* markForDeleteAt has been DEPRECATED at Cassandra side
//BigInteger deletedAt = (BigInteger) data.get("deletedAt");
//SuperColumn superColumn = (SuperColumn) cfamily.getColumn(superName);
//superColumn.markForDeleteAt((int) (System.currentTimeMillis()/1000), deletedAt);
}
}
use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class MultiDcCops2Test method setup.
@BeforeClass
public static void setup() throws IOException, InterruptedException, ConfigurationException, InvalidRequestException, SchemaDisagreementException, TException {
Integer numDatacenters = Integer.getInteger("cassandra.multiDcTest.numDatacenters");
assert numDatacenters != null : "You must set the numDatacenters to run the multiDc Tests";
Integer nodesPerDatacenter = Integer.getInteger("cassandra.multiDcTest.nodesPerDatacenter");
assert nodesPerDatacenter != null : "You must set nodesPerDatacenter to run the multiDc Tests";
//Create a keyspace with a replication factor of 1 for each datacenter
TTransport tr = new TFramedTransport(new TSocket("127.0.0.1", DEFAULT_THRIFT_PORT));
TProtocol proto = new TBinaryProtocol(tr);
Cassandra.Client client = new Cassandra.Client(proto);
tr.open();
//set the replication factor to 1 for each datacenter
Map<String, String> ntsOptions = new HashMap<String, String>();
assert numDatacenters > 0;
for (int i = 0; i < numDatacenters; ++i) {
ntsOptions.put("DC" + i, "1");
}
//We'll use the same set of columns as is used in the EmbeddedCassandraService
// and we'll set the index type to KEYS so thrift doesn't complain
Map<String, CFMetaData> cfDefs = schemaDefinition().iterator().next().cfMetaData();
for (Entry<String, CFMetaData> cfEntry : cfDefs.entrySet()) {
assert cfEntry.getKey() == cfEntry.getValue().cfName;
for (ColumnDefinition colDef : cfEntry.getValue().getColumn_metadata().values()) {
colDef.setIndexType(IndexType.KEYS, null);
}
cfEntry.getValue().readRepairChance(0);
}
KSMetaData keyspace1 = KSMetaData.testMetadataNotDurable("Keyspace1", NetworkTopologyStrategy.class, ntsOptions, cfDefs.values());
client.system_add_keyspace(keyspace1.toThrift());
//setup the normal test
HashMap<String, Integer> localServerIPAndPorts = new HashMap<String, Integer>();
for (int i = 1; i <= nodesPerDatacenter; ++i) {
localServerIPAndPorts.put("127.0.0." + i, DEFAULT_THRIFT_PORT);
}
List<Map<String, Integer>> dcToServerIPAndPorts = new ArrayList();
for (int dc = 0; dc < numDatacenters; ++dc) {
HashMap<String, Integer> serverIPAndPorts = new HashMap<String, Integer>();
for (int i = 0; i < nodesPerDatacenter; ++i) {
int ipIndex = 1 + dc * nodesPerDatacenter + i;
serverIPAndPorts.put("127.0.0." + ipIndex, DEFAULT_THRIFT_PORT);
}
dcToServerIPAndPorts.add(serverIPAndPorts);
}
Cops2Test.setLocalServerIPAndPorts(localServerIPAndPorts);
Cops2Test.setDcToServerIPAndPorts(dcToServerIPAndPorts);
Cops2Test.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
//wait for the keyspace to show up at all nodes
HashMap<String, Integer> allServerIPAndPorts = new HashMap<String, Integer>();
for (int i = 1; i <= numDatacenters * nodesPerDatacenter; ++i) {
allServerIPAndPorts.put("127.0.0." + i, DEFAULT_THRIFT_PORT);
}
waitForKeyspacePropagation(allServerIPAndPorts, "Keyspace1");
}
use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class MeteredFlusherTest method testManyMemtables.
@Test
public void testManyMemtables() throws IOException, ConfigurationException {
Table table = Table.open("Keyspace1");
for (int i = 0; i < 100; i++) {
CFMetaData metadata = new CFMetaData(table.name, "_CF" + i, ColumnFamilyType.Standard, UTF8Type.instance, null);
new AddColumnFamily(metadata).apply();
}
ByteBuffer name = ByteBufferUtil.bytes("c");
for (int j = 0; j < 200; j++) {
for (int i = 0; i < 100; i++) {
RowMutation rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key" + j));
ColumnFamily cf = ColumnFamily.create("Keyspace1", "_CF" + i);
// don't cheat by allocating this outside of the loop; that defeats the purpose of deliberately using lots of memory
ByteBuffer value = ByteBuffer.allocate(100000);
cf.addColumn(new Column(name, value));
rm.add(cf);
rm.applyUnsafe();
}
}
int flushes = 0;
for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
if (cfs.getColumnFamilyName().startsWith("_CF"))
flushes += cfs.getMemtableSwitchCount();
}
assert flushes > 0;
}
Aggregations