use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class CassandraAuthorizer method convertLegacyData.
/**
* Copy legacy authz data from the system_auth.permissions table to the new system_auth.role_permissions table and
* also insert entries into the reverse lookup table.
* In theory, we could simply rename the existing table as the schema is structurally the same, but this would
* break mixed clusters during a rolling upgrade.
* This setup is not performed if AllowAllAuthenticator is configured (see Auth#setup).
*/
private void convertLegacyData() {
try {
if (Schema.instance.getTableMetadata("system_auth", "permissions") != null) {
logger.info("Converting legacy permissions data");
CQLStatement insertStatement = QueryProcessor.getStatement(String.format("INSERT INTO %s.%s (role, resource, permissions) " + "VALUES (?, ?, ?)", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS), ClientState.forInternalCalls()).statement;
CQLStatement indexStatement = QueryProcessor.getStatement(String.format("INSERT INTO %s.%s (resource, role) VALUES (?,?)", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.RESOURCE_ROLE_INDEX), ClientState.forInternalCalls()).statement;
UntypedResultSet permissions = process("SELECT * FROM system_auth.permissions");
for (UntypedResultSet.Row row : permissions) {
final IResource resource = Resources.fromName(row.getString("resource"));
Predicate<String> isApplicable = new Predicate<String>() {
public boolean apply(String s) {
return resource.applicablePermissions().contains(Permission.valueOf(s));
}
};
SetSerializer<String> serializer = SetSerializer.getInstance(UTF8Serializer.instance, UTF8Type.instance);
Set<String> originalPerms = serializer.deserialize(row.getBytes("permissions"));
Set<String> filteredPerms = ImmutableSet.copyOf(Iterables.filter(originalPerms, isApplicable));
insertStatement.execute(QueryState.forInternalCalls(), QueryOptions.forInternalCalls(ConsistencyLevel.ONE, Lists.newArrayList(row.getBytes("username"), row.getBytes("resource"), serializer.serialize(filteredPerms))), System.nanoTime());
indexStatement.execute(QueryState.forInternalCalls(), QueryOptions.forInternalCalls(ConsistencyLevel.ONE, Lists.newArrayList(row.getBytes("resource"), row.getBytes("username"))), System.nanoTime());
}
logger.info("Completed conversion of legacy permissions");
}
} catch (Exception e) {
logger.info("Unable to complete conversion of legacy permissions data (perhaps not enough nodes are upgraded yet). " + "Conversion should not be considered complete");
logger.trace("Conversion error", e);
}
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class LocalSessions method loadUnsafe.
/**
* Loads a session directly from the table. Should be used for testing only
*/
@VisibleForTesting
LocalSession loadUnsafe(UUID sessionId) {
String query = "SELECT * FROM %s.%s WHERE parent_id=?";
UntypedResultSet result = QueryProcessor.executeInternal(String.format(query, keyspace, table), sessionId);
if (result.isEmpty())
return null;
UntypedResultSet.Row row = result.one();
return load(row);
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class SystemDistributedKeyspace method viewStatus.
public static Map<UUID, String> viewStatus(String keyspace, String view) {
String query = "SELECT host_id, status FROM %s.%s WHERE keyspace_name = ? AND view_name = ?";
UntypedResultSet results;
try {
results = QueryProcessor.execute(format(query, SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, VIEW_BUILD_STATUS), ConsistencyLevel.ONE, keyspace, view);
} catch (Exception e) {
return Collections.emptyMap();
}
Map<UUID, String> status = new HashMap<>();
for (UntypedResultSet.Row row : results) {
status.put(row.getUUID("host_id"), row.getString("status"));
}
return status;
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class StressCQLSSTableWriter method rawAddRow.
/**
* Adds a new row to the writer given already serialized values.
* <p>
* This is a shortcut for {@code rawAddRow(Arrays.asList(values))}.
*
* @param values the row values (corresponding to the bind variables of the
* insertion statement used when creating by this writer) as binary.
* @return this writer.
*/
public StressCQLSSTableWriter rawAddRow(List<ByteBuffer> values) throws InvalidRequestException, IOException {
if (values.size() != boundNames.size())
throw new InvalidRequestException(String.format("Invalid number of arguments, expecting %d values but got %d", boundNames.size(), values.size()));
QueryOptions options = QueryOptions.forInternalCalls(null, values);
List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
SortedSet<Clustering> clusterings = insert.createClustering(options);
long now = System.currentTimeMillis() * 1000;
// Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
// and that forces a lot of initialization that we don't want.
UpdateParameters params = new UpdateParameters(insert.metadata(), insert.updatedColumns(), options, insert.getTimestamp(now, options), insert.getTimeToLive(options), Collections.<DecoratedKey, Partition>emptyMap());
try {
for (ByteBuffer key : keys) {
for (Clustering clustering : clusterings) insert.addUpdateForKey(writer.getUpdateFor(key), clustering, params);
}
return this;
} catch (SSTableSimpleUnsortedWriter.SyncException e) {
// wrapped in a SyncException (see BufferedWriter below). We want to extract that IOE.
throw (IOException) e.getCause();
}
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class BatchlogManager method processBatchlogEntries.
private void processBatchlogEntries(UntypedResultSet batches, int pageSize, RateLimiter rateLimiter) {
int positionInPage = 0;
ArrayList<ReplayingBatch> unfinishedBatches = new ArrayList<>(pageSize);
Set<InetAddress> hintedNodes = new HashSet<>();
Set<UUID> replayedBatches = new HashSet<>();
// Sending out batches for replay without waiting for them, so that one stuck batch doesn't affect others
for (UntypedResultSet.Row row : batches) {
UUID id = row.getUUID("id");
int version = row.getInt("version");
try {
ReplayingBatch batch = new ReplayingBatch(id, version, row.getList("mutations", BytesType.instance));
if (batch.replay(rateLimiter, hintedNodes) > 0) {
unfinishedBatches.add(batch);
} else {
// no write mutations were sent (either expired or all CFs involved truncated).
remove(id);
++totalBatchesReplayed;
}
} catch (IOException e) {
logger.warn("Skipped batch replay of {} due to {}", id, e);
remove(id);
}
if (++positionInPage == pageSize) {
// We have reached the end of a batch. To avoid keeping more than a page of mutations in memory,
// finish processing the page before requesting the next row.
finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches);
positionInPage = 0;
}
}
finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches);
// to preserve batch guarantees, we must ensure that hints (if any) have made it to disk, before deleting the batches
HintsService.instance.flushAndFsyncBlockingly(transform(hintedNodes, StorageService.instance::getHostIdForEndpoint));
// once all generated hints are fsynced, actually delete the batches
replayedBatches.forEach(BatchlogManager::remove);
}
Aggregations