use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class SystemKeyspace method getSSTableReadMeter.
/**
* Returns a RestorableMeter tracking the average read rate of a particular SSTable, restoring the last-seen rate
* from values in system.sstable_activity if present.
* @param keyspace the keyspace the sstable belongs to
* @param table the table the sstable belongs to
* @param generation the generation number for the sstable
*/
public static RestorableMeter getSSTableReadMeter(String keyspace, String table, int generation) {
String cql = "SELECT * FROM system.%s WHERE keyspace_name=? and columnfamily_name=? and generation=?";
UntypedResultSet results = executeInternal(format(cql, SSTABLE_ACTIVITY), keyspace, table, generation);
if (results.isEmpty())
return new RestorableMeter();
UntypedResultSet.Row row = results.one();
double m15rate = row.getDouble("rate_15m");
double m120rate = row.getDouble("rate_120m");
return new RestorableMeter(m15rate, m120rate);
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class SystemKeyspaceMigrator40 method migratePeers.
private static void migratePeers() {
ColumnFamilyStore newPeers = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PEERS_V2);
if (!newPeers.isEmpty())
return;
logger.info("{} table was empty, migrating legacy {}, if this fails you should fix the issue and then truncate {} to have it try again.", peersName, legacyPeersName, peersName);
String query = String.format("SELECT * FROM %s", legacyPeersName);
String insert = String.format("INSERT INTO %s ( " + "peer, " + "peer_port, " + "data_center, " + "host_id, " + "preferred_ip, " + "preferred_port, " + "rack, " + "release_version, " + "native_address, " + "native_port, " + "schema_version, " + "tokens) " + " values ( ?, ?, ? , ? , ?, ?, ?, ?, ?, ?, ?, ?)", peersName);
UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
int transferred = 0;
logger.info("Migrating rows from legacy {} to {}", legacyPeersName, peersName);
for (UntypedResultSet.Row row : rows) {
logger.debug("Transferring row {}", transferred);
QueryProcessor.executeInternal(insert, row.has("peer") ? row.getInetAddress("peer") : null, DatabaseDescriptor.getStoragePort(), row.has("data_center") ? row.getString("data_center") : null, row.has("host_id") ? row.getUUID("host_id") : null, row.has("preferred_ip") ? row.getInetAddress("preferred_ip") : null, DatabaseDescriptor.getStoragePort(), row.has("rack") ? row.getString("rack") : null, row.has("release_version") ? row.getString("release_version") : null, row.has("rpc_address") ? row.getInetAddress("rpc_address") : null, DatabaseDescriptor.getNativeTransportPort(), row.has("schema_version") ? row.getUUID("schema_version") : null, row.has("tokens") ? row.getSet("tokens", UTF8Type.instance) : null);
transferred++;
}
logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyPeersName, peersName);
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class SystemKeyspaceMigrator40 method migratePeerEvents.
private static void migratePeerEvents() {
ColumnFamilyStore newPeerEvents = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.PEER_EVENTS_V2);
if (!newPeerEvents.isEmpty())
return;
logger.info("{} table was empty, migrating legacy {} to {}", peerEventsName, legacyPeerEventsName, peerEventsName);
String query = String.format("SELECT * FROM %s", legacyPeerEventsName);
String insert = String.format("INSERT INTO %s ( " + "peer, " + "peer_port, " + "hints_dropped) " + " values ( ?, ?, ? )", peerEventsName);
UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
int transferred = 0;
for (UntypedResultSet.Row row : rows) {
logger.debug("Transferring row {}", transferred);
QueryProcessor.executeInternal(insert, row.has("peer") ? row.getInetAddress("peer") : null, DatabaseDescriptor.getStoragePort(), row.has("hints_dropped") ? row.getMap("hints_dropped", UUIDType.instance, Int32Type.instance) : null);
transferred++;
}
logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyPeerEventsName, peerEventsName);
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class SystemKeyspaceMigrator40 method migrateAvailableRanges.
static void migrateAvailableRanges() {
ColumnFamilyStore newAvailableRanges = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.AVAILABLE_RANGES_V2);
if (!newAvailableRanges.isEmpty())
return;
logger.info("{} table was empty, migrating legacy {} to {}", availableRangesName, legacyAvailableRangesName, availableRangesName);
String query = String.format("SELECT * FROM %s", legacyAvailableRangesName);
String insert = String.format("INSERT INTO %s (" + "keyspace_name, " + "full_ranges, " + "transient_ranges) " + " values ( ?, ?, ? )", availableRangesName);
UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, 1000);
int transferred = 0;
for (UntypedResultSet.Row row : rows) {
logger.debug("Transferring row {}", transferred);
String keyspace = row.getString("keyspace_name");
Set<ByteBuffer> ranges = Optional.ofNullable(row.getSet("ranges", BytesType.instance)).orElse(Collections.emptySet());
QueryProcessor.executeInternal(insert, keyspace, ranges, Collections.emptySet());
transferred++;
}
logger.info("Migrated {} rows from legacy {} to {}", transferred, legacyAvailableRangesName, availableRangesName);
}
use of org.apache.cassandra.cql3.UntypedResultSet in project cassandra by apache.
the class PartitionDenylist method getDenylistForAllTablesFromCQL.
/**
* This method relies on {@link #getDenylistForTableFromCQL(TableId, int)} to pull a limited amount of keys
* on a per-table basis from CQL to load into the cache. We need to navigate both respecting the max cache size limit
* as well as respecting the per-table limit.
* @return non-null mapping of TableId to DenylistEntry
*/
private Map<TableId, DenylistEntry> getDenylistForAllTablesFromCQL() {
// While we warn the user in this case, we continue with the reload anyway.
checkDenylistNodeAvailability();
final String allDeniedTables = String.format("SELECT DISTINCT ks_name, table_name FROM %s.%s", SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE);
try {
final UntypedResultSet deniedTableResults = process(allDeniedTables, DatabaseDescriptor.getDenylistConsistencyLevel());
if (deniedTableResults == null || deniedTableResults.isEmpty())
return Collections.emptyMap();
int totalProcessed = 0;
final Map<TableId, DenylistEntry> results = new HashMap<>();
for (final UntypedResultSet.Row row : deniedTableResults) {
final String ks = row.getString("ks_name");
final String table = row.getString("table_name");
final TableId tid = getTableId(ks, table);
if (DatabaseDescriptor.getDenylistMaxKeysTotal() - totalProcessed <= 0) {
logger.error("Hit limit on allowable denylisted keys in total. Processed {} total entries. Not adding all entries to denylist for {}/{}." + " Remove denylist entries in system_distributed.{} or increase your denylist_max_keys_total param in cassandra.yaml.", totalProcessed, ks, table, SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE);
results.put(tid, new DenylistEntry());
} else {
// Determine whether we can get up to table max or we need a subset at edge condition of max overflow.
int allowedTableRecords = Math.min(DatabaseDescriptor.getDenylistMaxKeysPerTable(), DatabaseDescriptor.getDenylistMaxKeysTotal() - totalProcessed);
DenylistEntry tableDenylist = getDenylistForTableFromCQL(tid, allowedTableRecords);
if (tableDenylist != null)
totalProcessed += tableDenylist.keys.size();
results.put(tid, tableDenylist);
}
}
return results;
} catch (final RequestExecutionException e) {
logger.error("Error reading full partition denylist from " + SchemaConstants.DISTRIBUTED_KEYSPACE_NAME + "." + SystemDistributedKeyspace.PARTITION_DENYLIST_TABLE + ". Partition Denylisting will be compromised. Exception: " + e);
return Collections.emptyMap();
}
}
Aggregations