use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class SystemKeyspace method getPreferredIP.
/**
* Get preferred IP for given endpoint if it is known. Otherwise this returns given endpoint itself.
*
* @param ep endpoint address to check
* @return Preferred IP for given endpoint if present, otherwise returns given ep
*/
public static InetAddress getPreferredIP(InetAddress ep) {
String req = "SELECT preferred_ip FROM system.%s WHERE peer=?";
UntypedResultSet result = executeInternal(format(req, PEERS), ep);
if (!result.isEmpty() && result.one().has("preferred_ip"))
return result.one().getInetAddress("preferred_ip");
return ep;
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class CassandraAuthorizer method convertLegacyData.
/**
* Copy legacy authz data from the system_auth.permissions table to the new system_auth.role_permissions table and
* also insert entries into the reverse lookup table.
* In theory, we could simply rename the existing table as the schema is structurally the same, but this would
* break mixed clusters during a rolling upgrade.
* This setup is not performed if AllowAllAuthenticator is configured (see Auth#setup).
*/
private void convertLegacyData() {
try {
if (Schema.instance.getTableMetadata("system_auth", "permissions") != null) {
logger.info("Converting legacy permissions data");
CQLStatement insertStatement = QueryProcessor.getStatement(String.format("INSERT INTO %s.%s (role, resource, permissions) " + "VALUES (?, ?, ?)", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS), ClientState.forInternalCalls()).statement;
CQLStatement indexStatement = QueryProcessor.getStatement(String.format("INSERT INTO %s.%s (resource, role) VALUES (?,?)", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.RESOURCE_ROLE_INDEX), ClientState.forInternalCalls()).statement;
UntypedResultSet permissions = process("SELECT * FROM system_auth.permissions");
for (UntypedResultSet.Row row : permissions) {
final IResource resource = Resources.fromName(row.getString("resource"));
Predicate<String> isApplicable = new Predicate<String>() {
public boolean apply(String s) {
return resource.applicablePermissions().contains(Permission.valueOf(s));
}
};
SetSerializer<String> serializer = SetSerializer.getInstance(UTF8Serializer.instance, UTF8Type.instance);
Set<String> originalPerms = serializer.deserialize(row.getBytes("permissions"));
Set<String> filteredPerms = ImmutableSet.copyOf(Iterables.filter(originalPerms, isApplicable));
insertStatement.execute(QueryState.forInternalCalls(), QueryOptions.forInternalCalls(ConsistencyLevel.ONE, Lists.newArrayList(row.getBytes("username"), row.getBytes("resource"), serializer.serialize(filteredPerms))), System.nanoTime());
indexStatement.execute(QueryState.forInternalCalls(), QueryOptions.forInternalCalls(ConsistencyLevel.ONE, Lists.newArrayList(row.getBytes("resource"), row.getBytes("username"))), System.nanoTime());
}
logger.info("Completed conversion of legacy permissions");
}
} catch (Exception e) {
logger.info("Unable to complete conversion of legacy permissions data (perhaps not enough nodes are upgraded yet). " + "Conversion should not be considered complete");
logger.trace("Conversion error", e);
}
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class SystemDistributedKeyspace method viewStatus.
public static Map<UUID, String> viewStatus(String keyspace, String view) {
String query = "SELECT host_id, status FROM %s.%s WHERE keyspace_name = ? AND view_name = ?";
UntypedResultSet results;
try {
results = QueryProcessor.execute(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS), ConsistencyLevel.ONE, keyspace, view);
} catch (Exception e) {
return Collections.emptyMap();
}
Map<UUID, String> status = new HashMap<>();
for (UntypedResultSet.Row row : results) {
status.put(row.getUUID("host_id"), row.getString("status"));
}
return status;
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class LegacySSTableTest method verifyReads.
private static void verifyReads(String legacyVersion) {
for (int compact = 0; compact <= 1; compact++) {
for (int ck = 0; ck < 50; ck++) {
String ckValue = Integer.toString(ck) + longString;
for (int pk = 0; pk < 5; pk++) {
logger.debug("for pk={} ck={}", pk, ck);
String pkValue = Integer.toString(pk);
UntypedResultSet rs;
if (ck == 0) {
readSimpleTable(legacyVersion, getCompactNameSuffix(compact), pkValue);
readSimpleCounterTable(legacyVersion, getCompactNameSuffix(compact), pkValue);
}
readClusteringTable(legacyVersion, getCompactNameSuffix(compact), ck, ckValue, pkValue);
readClusteringCounterTable(legacyVersion, getCompactNameSuffix(compact), ckValue, pkValue);
}
}
}
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class LongStreamingTest method testCompressedStream.
@Test
public void testCompressedStream() throws InvalidRequestException, IOException, ExecutionException, InterruptedException {
String KS = "cql_keyspace";
String TABLE = "table1";
File tempdir = Files.createTempDir();
File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
assert dataDir.mkdirs();
String schema = "CREATE TABLE cql_keyspace.table1 (" + " k int PRIMARY KEY," + " v1 text," + " v2 int" + // with compression = {};";
");";
String insert = "INSERT INTO cql_keyspace.table1 (k, v1, v2) VALUES (?, ?, ?)";
CQLSSTableWriter writer = CQLSSTableWriter.builder().sorted().inDirectory(dataDir).forTable(schema).using(insert).build();
long start = System.nanoTime();
for (int i = 0; i < 10_000_000; i++) writer.addRow(i, "test1", 24);
writer.close();
System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start)));
File[] dataFiles = dataDir.listFiles((dir, name) -> name.endsWith("-Data.db"));
long dataSize = 0l;
for (File file : dataFiles) {
System.err.println("File : " + file.getAbsolutePath());
dataSize += file.length();
}
SSTableLoader loader = new SSTableLoader(dataDir, new SSTableLoader.Client() {
private String ks;
public void init(String keyspace) {
for (Range<Token> range : StorageService.instance.getLocalRanges("cql_keyspace")) addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
this.ks = keyspace;
}
public TableMetadataRef getTableMetadata(String cfName) {
return Schema.instance.getTableMetadataRef(ks, cfName);
}
}, new OutputHandler.SystemOutput(false, false));
start = System.nanoTime();
loader.stream().get();
long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec", millis / 1000d, (dataSize / (1 << 20) / (millis / 1000d)) * 8));
//Stream again
loader = new SSTableLoader(dataDir, new SSTableLoader.Client() {
private String ks;
public void init(String keyspace) {
for (Range<Token> range : StorageService.instance.getLocalRanges("cql_keyspace")) addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
this.ks = keyspace;
}
public TableMetadataRef getTableMetadata(String cfName) {
return Schema.instance.getTableMetadataRef(ks, cfName);
}
}, new OutputHandler.SystemOutput(false, false));
start = System.nanoTime();
loader.stream().get();
millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec", millis / 1000d, (dataSize / (1 << 20) / (millis / 1000d)) * 8));
//Compact them both
start = System.nanoTime();
Keyspace.open(KS).getColumnFamilyStore(TABLE).forceMajorCompaction();
millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.err.println(String.format("Finished Compacting in %.2f seconds: %.2f Mb/sec", millis / 1000d, (dataSize * 2 / (1 << 20) / (millis / 1000d)) * 8));
UntypedResultSet rs = QueryProcessor.executeInternal("SELECT * FROM cql_keyspace.table1 limit 100;");
assertEquals(100, rs.size());
}
Aggregations