use of org.apache.cassandra.cql3.QueryProcessor.executeInternal in project cassandra by apache.
the class LegacySSTableTest method readClusteringTable.
private static void readClusteringTable(String legacyVersion, String compactSuffix, int ck, String ckValue, String pkValue) {
logger.debug("Read legacy_{}_clust{}", legacyVersion, compactSuffix);
UntypedResultSet rs;
rs = QueryProcessor.executeInternal(String.format("SELECT val FROM legacy_tables.legacy_%s_clust%s WHERE pk=? AND ck=?", legacyVersion, compactSuffix), pkValue, ckValue);
assertLegacyClustRows(1, rs);
String ckValue2 = Integer.toString(ck < 10 ? 40 : ck - 1) + longString;
String ckValue3 = Integer.toString(ck > 39 ? 10 : ck + 1) + longString;
rs = QueryProcessor.executeInternal(String.format("SELECT val FROM legacy_tables.legacy_%s_clust%s WHERE pk=? AND ck IN (?, ?, ?)", legacyVersion, compactSuffix), pkValue, ckValue, ckValue2, ckValue3);
assertLegacyClustRows(3, rs);
}
use of org.apache.cassandra.cql3.QueryProcessor.executeInternal in project cassandra by apache.
the class MigrationManagerTest method createEmptyKsAddNewCf.
@Test
public void createEmptyKsAddNewCf() throws ConfigurationException {
assertNull(Schema.instance.getKeyspaceMetadata(EMPTY_KEYSPACE));
KeyspaceMetadata newKs = KeyspaceMetadata.create(EMPTY_KEYSPACE, KeyspaceParams.simple(5));
MigrationManager.announceNewKeyspace(newKs);
assertNotNull(Schema.instance.getKeyspaceMetadata(EMPTY_KEYSPACE));
String tableName = "added_later";
TableMetadata newCf = addTestTable(EMPTY_KEYSPACE, tableName, "A new CF to add to an empty KS");
//should not exist until apply
assertFalse(Schema.instance.getKeyspaceMetadata(newKs.name).tables.get(newCf.name).isPresent());
//add the new CF to the empty space
MigrationManager.announceNewTable(newCf);
assertTrue(Schema.instance.getKeyspaceMetadata(newKs.name).tables.get(newCf.name).isPresent());
assertEquals(Schema.instance.getKeyspaceMetadata(newKs.name).tables.get(newCf.name).get(), newCf);
// now read and write to it.
QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (key, col, val) VALUES (?, ?, ?)", EMPTY_KEYSPACE, tableName), "key0", "col0", "val0");
ColumnFamilyStore cfs = Keyspace.open(newKs.name).getColumnFamilyStore(newCf.name);
assertNotNull(cfs);
cfs.forceBlockingFlush();
UntypedResultSet rows = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s", EMPTY_KEYSPACE, tableName));
assertRows(rows, row("key0", "col0", "val0"));
}
use of org.apache.cassandra.cql3.QueryProcessor.executeInternal in project cassandra by apache.
the class MigrationManagerTest method addNewKS.
@Test
public void addNewKS() throws ConfigurationException {
TableMetadata cfm = addTestTable("newkeyspace1", "newstandard1", "A new cf for a new ks");
KeyspaceMetadata newKs = KeyspaceMetadata.create(cfm.keyspace, KeyspaceParams.simple(5), Tables.of(cfm));
MigrationManager.announceNewKeyspace(newKs);
assertNotNull(Schema.instance.getKeyspaceMetadata(cfm.keyspace));
assertEquals(Schema.instance.getKeyspaceMetadata(cfm.keyspace), newKs);
// test reads and writes.
QueryProcessor.executeInternal("INSERT INTO newkeyspace1.newstandard1 (key, col, val) VALUES (?, ?, ?)", "key0", "col0", "val0");
ColumnFamilyStore store = Keyspace.open(cfm.keyspace).getColumnFamilyStore(cfm.name);
assertNotNull(store);
store.forceBlockingFlush();
UntypedResultSet rows = QueryProcessor.executeInternal("SELECT * FROM newkeyspace1.newstandard1");
assertRows(rows, row("key0", "col0", "val0"));
}
use of org.apache.cassandra.cql3.QueryProcessor.executeInternal in project cassandra by apache.
the class StreamingTransferTest method doTransferTable.
private void doTransferTable(boolean transferSSTables) throws Exception {
final Keyspace keyspace = Keyspace.open(KEYSPACE1);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_INDEX);
List<String> keys = createAndTransfer(cfs, new Mutator() {
public void mutate(String key, String col, long timestamp) throws Exception {
long val = key.hashCode();
RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), timestamp, key);
builder.clustering(col).add("birthdate", ByteBufferUtil.bytes(val));
builder.build().applyUnsafe();
}
}, transferSSTables);
// confirm that the secondary index was recovered
for (String key : keys) {
long val = key.hashCode();
// test we can search:
UntypedResultSet result = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE birthdate = %d", cfs.metadata.keyspace, cfs.metadata.name, val));
assertEquals(1, result.size());
assert result.iterator().next().getBytes("key").equals(ByteBufferUtil.bytes(key));
}
}
use of org.apache.cassandra.cql3.QueryProcessor.executeInternal in project cassandra by apache.
the class LongStreamingTest method testCompressedStream.
@Test
public void testCompressedStream() throws InvalidRequestException, IOException, ExecutionException, InterruptedException {
String KS = "cql_keyspace";
String TABLE = "table1";
File tempdir = Files.createTempDir();
File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
assert dataDir.mkdirs();
String schema = "CREATE TABLE cql_keyspace.table1 (" + " k int PRIMARY KEY," + " v1 text," + " v2 int" + // with compression = {};";
");";
String insert = "INSERT INTO cql_keyspace.table1 (k, v1, v2) VALUES (?, ?, ?)";
CQLSSTableWriter writer = CQLSSTableWriter.builder().sorted().inDirectory(dataDir).forTable(schema).using(insert).build();
long start = System.nanoTime();
for (int i = 0; i < 10_000_000; i++) writer.addRow(i, "test1", 24);
writer.close();
System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start)));
File[] dataFiles = dataDir.listFiles((dir, name) -> name.endsWith("-Data.db"));
long dataSize = 0l;
for (File file : dataFiles) {
System.err.println("File : " + file.getAbsolutePath());
dataSize += file.length();
}
SSTableLoader loader = new SSTableLoader(dataDir, new SSTableLoader.Client() {
private String ks;
public void init(String keyspace) {
for (Range<Token> range : StorageService.instance.getLocalRanges("cql_keyspace")) addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
this.ks = keyspace;
}
public TableMetadataRef getTableMetadata(String cfName) {
return Schema.instance.getTableMetadataRef(ks, cfName);
}
}, new OutputHandler.SystemOutput(false, false));
start = System.nanoTime();
loader.stream().get();
long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec", millis / 1000d, (dataSize / (1 << 20) / (millis / 1000d)) * 8));
//Stream again
loader = new SSTableLoader(dataDir, new SSTableLoader.Client() {
private String ks;
public void init(String keyspace) {
for (Range<Token> range : StorageService.instance.getLocalRanges("cql_keyspace")) addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
this.ks = keyspace;
}
public TableMetadataRef getTableMetadata(String cfName) {
return Schema.instance.getTableMetadataRef(ks, cfName);
}
}, new OutputHandler.SystemOutput(false, false));
start = System.nanoTime();
loader.stream().get();
millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec", millis / 1000d, (dataSize / (1 << 20) / (millis / 1000d)) * 8));
//Compact them both
start = System.nanoTime();
Keyspace.open(KS).getColumnFamilyStore(TABLE).forceMajorCompaction();
millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.err.println(String.format("Finished Compacting in %.2f seconds: %.2f Mb/sec", millis / 1000d, (dataSize * 2 / (1 << 20) / (millis / 1000d)) * 8));
UntypedResultSet rs = QueryProcessor.executeInternal("SELECT * FROM cql_keyspace.table1 limit 100;");
assertEquals(100, rs.size());
}
Aggregations