use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class AbstractReadExecutor method getReadExecutor.
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(SinglePartitionReadCommand command, ConsistencyLevel consistencyLevel, long queryStartNanoTime) throws UnavailableException {
Keyspace keyspace = Keyspace.open(command.metadata().keyspace);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.partitionKey());
// 11980: Excluding EACH_QUORUM reads from potential RR, so that we do not miscount DC responses
ReadRepairDecision repairDecision = consistencyLevel == ConsistencyLevel.EACH_QUORUM ? ReadRepairDecision.NONE : newReadRepairDecision(command.metadata());
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE) {
Tracing.trace("Read-repair {}", repairDecision);
ReadRepairMetrics.attempted.mark();
}
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.metadata().id);
SpeculativeRetryParam retry = cfs.metadata().params.speculativeRetry;
// 11980: Disable speculative retry if using EACH_QUORUM in order to prevent miscounting DC responses
if (retry.equals(SpeculativeRetryParam.NONE) || consistencyLevel == ConsistencyLevel.EACH_QUORUM || consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(keyspace, command, consistencyLevel, targetReplicas, queryStartNanoTime);
if (targetReplicas.size() == allReplicas.size()) {
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas, queryStartNanoTime);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica)) {
for (InetAddress address : allReplicas) {
if (!targetReplicas.contains(address)) {
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retry.equals(SpeculativeRetryParam.ALWAYS))
return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas, queryStartNanoTime);
else
// PERCENTILE or CUSTOM.
return new SpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas, queryStartNanoTime);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class Schema method getColumnFamilyStoreInstance.
public ColumnFamilyStore getColumnFamilyStoreInstance(TableId id) {
TableMetadata metadata = getTableMetadata(id);
if (metadata == null)
return null;
Keyspace instance = getKeyspaceInstance(metadata.keyspace);
if (instance == null)
return null;
return instance.hasColumnFamilyStore(metadata.id) ? instance.getColumnFamilyStore(metadata.id) : null;
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class ViewTest method complexTimestampUpdateTest.
public void complexTimestampUpdateTest(boolean flush) throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b))");
execute("USE " + keyspace());
executeNet(protocolVersion, "USE " + keyspace());
Keyspace ks = Keyspace.open(keyspace());
createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, a, b)");
ks.getColumnFamilyStore("mv").disableAutoCompaction();
//Set initial values TS=0, leaving e null and verify view
executeNet(protocolVersion, "INSERT INTO %s (a, b, c, d) VALUES (0, 0, 1, 0) USING TIMESTAMP 0");
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//update c's timestamp TS=2
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//change c's value and TS=3, tombstones c=1 and adds c=0 record
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? and b = ? ", 0, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0));
if (flush) {
ks.getColumnFamilyStore("mv").forceMajorCompaction();
FBUtilities.waitOnFutures(ks.flush());
}
//change c's value back to 1 with TS=4, check we can see d
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
if (flush) {
ks.getColumnFamilyStore("mv").forceMajorCompaction();
FBUtilities.waitOnFutures(ks.flush());
}
assertRows(execute("SELECT d,e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
//Add e value @ TS=1
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 1 SET e = ? WHERE a = ? and b = ? ", 1, 0, 0);
assertRows(execute("SELECT d,e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//Change d value @ TS=2
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET d = ? WHERE a = ? and b = ? ", 2, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//Change d value @ TS=3
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? and b = ? ", 1, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
//Tombstone c
executeNet(protocolVersion, "DELETE FROM %s WHERE a = ? and b = ?", 0, 0);
assertRows(execute("SELECT d from mv"));
//Add back without D
executeNet(protocolVersion, "INSERT INTO %s (a, b, c) VALUES (0, 0, 1)");
//Make sure D doesn't pop back in.
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
//New partition
// insert a row with timestamp 0
executeNet(protocolVersion, "INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?) USING TIMESTAMP 0", 1, 0, 0, 0, 0);
// overwrite pk and e with timestamp 1, but don't overwrite d
executeNet(protocolVersion, "INSERT INTO %s (a, b, c, e) VALUES (?, ?, ?, ?) USING TIMESTAMP 1", 1, 0, 0, 0);
// delete with timestamp 0 (which should only delete d)
executeNet(protocolVersion, "DELETE FROM %s USING TIMESTAMP 0 WHERE a = ? AND b = ?", 1, 0);
assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 0, 1, 0), row(1, 0, 0, null, 0));
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? AND b = ?", 1, 1, 0);
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? AND b = ?", 0, 1, 0);
assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 0, 1, 0), row(1, 0, 0, null, 0));
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? AND b = ?", 0, 1, 0);
assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 0, 1, 0), row(1, 0, 0, 0, 0));
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableMetadataTest method testMaxMinComposites.
@Test
public void testMaxMinComposites() throws CharacterCodingException, ExecutionException, InterruptedException {
/*
creates two sstables, columns like this:
---------------------
k |a0:9|a1:8|..|a9:0
---------------------
and
---------------------
k2 |b0:9|b1:8|..|b9:0
---------------------
meaning max columns are b9 and 9, min is a0 and 0
*/
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardComposite2");
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k").clustering("a" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k2").clustering("b" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
assertEquals(cfs.getLiveSSTables().size(), 1);
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals("b9", ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)));
assertEquals(9, ByteBufferUtil.toInt(sstable.getSSTableMetadata().maxClusteringValues.get(1)));
assertEquals("a0", ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)));
assertEquals(0, ByteBufferUtil.toInt(sstable.getSSTableMetadata().minClusteringValues.get(1)));
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class AlterTableStatement method announceMigration.
public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
TableMetadata current = Schema.instance.validateTable(keyspace(), columnFamily());
if (current.isView())
throw new InvalidRequestException("Cannot use ALTER TABLE on Materialized View");
TableMetadata.Builder builder = current.unbuild();
ColumnIdentifier columnName = null;
ColumnMetadata def = null;
CQL3Type.Raw dataType = null;
boolean isStatic = false;
CQL3Type validator = null;
List<ViewMetadata> viewUpdates = new ArrayList<>();
Iterable<ViewMetadata> views = View.findAll(keyspace(), columnFamily());
switch(oType) {
case ALTER:
throw new InvalidRequestException("Altering of types is not allowed");
case ADD:
if (current.isDense())
throw new InvalidRequestException("Cannot add new column to a COMPACT STORAGE table");
for (AlterTableStatementColumn colData : colNameList) {
columnName = colData.getColumnName().getIdentifier(current);
def = builder.getColumn(columnName);
dataType = colData.getColumnType();
assert dataType != null;
isStatic = colData.getStaticType();
validator = dataType.prepare(keyspace());
if (isStatic) {
if (!current.isCompound())
throw new InvalidRequestException("Static columns are not allowed in COMPACT STORAGE tables");
if (current.clusteringColumns().isEmpty())
throw new InvalidRequestException("Static columns are only useful (and thus allowed) if the table has at least one clustering column");
}
if (def != null) {
switch(def.kind) {
case PARTITION_KEY:
case CLUSTERING:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with a PRIMARY KEY part", columnName));
default:
throw new InvalidRequestException(String.format("Invalid column name %s because it conflicts with an existing column", columnName));
}
}
// Cannot re-add a dropped counter column. See #7831.
if (current.isCounter() && current.getDroppedColumn(columnName.bytes) != null)
throw new InvalidRequestException(String.format("Cannot re-add previously dropped counter column %s", columnName));
AbstractType<?> type = validator.getType();
if (type.isCollection() && type.isMultiCell()) {
if (!current.isCompound())
throw new InvalidRequestException("Cannot use non-frozen collections in COMPACT STORAGE tables");
if (current.isSuper())
throw new InvalidRequestException("Cannot use non-frozen collections with super column families");
// If there used to be a non-frozen collection column with the same name (that has been dropped),
// we could still have some data using the old type, and so we can't allow adding a collection
// with the same name unless the types are compatible (see #6276).
DroppedColumn dropped = current.droppedColumns.get(columnName.bytes);
if (dropped != null && dropped.column.type instanceof CollectionType && dropped.column.type.isMultiCell() && !type.isCompatibleWith(dropped.column.type)) {
String message = String.format("Cannot add a collection with the name %s because a collection with the same name" + " and a different type (%s) has already been used in the past", columnName, dropped.column.type.asCQL3Type());
throw new InvalidRequestException(message);
}
}
builder.addColumn(isStatic ? ColumnMetadata.staticColumn(current, columnName.bytes, type) : ColumnMetadata.regularColumn(current, columnName.bytes, type));
// as well
if (!isStatic)
for (ViewMetadata view : views) if (view.includeAllColumns)
viewUpdates.add(view.withAddedRegularColumn(ColumnMetadata.regularColumn(view.metadata, columnName.bytes, type)));
}
break;
case DROP:
if (!current.isCQLTable())
throw new InvalidRequestException("Cannot drop columns from a non-CQL3 table");
for (AlterTableStatementColumn colData : colNameList) {
columnName = colData.getColumnName().getIdentifier(current);
def = builder.getColumn(columnName);
if (def == null)
throw new InvalidRequestException(String.format("Column %s was not found in table %s", columnName, columnFamily()));
switch(def.kind) {
case PARTITION_KEY:
case CLUSTERING:
throw new InvalidRequestException(String.format("Cannot drop PRIMARY KEY part %s", columnName));
case REGULAR:
case STATIC:
builder.removeRegularOrStaticColumn(def.name);
builder.recordColumnDrop(def, deleteTimestamp == null ? queryState.getTimestamp() : deleteTimestamp);
break;
}
// If the dropped column is required by any secondary indexes
// we reject the operation, as the indexes must be dropped first
Indexes allIndexes = current.indexes;
if (!allIndexes.isEmpty()) {
ColumnFamilyStore store = Keyspace.openAndGetStore(current);
Set<IndexMetadata> dependentIndexes = store.indexManager.getDependentIndexes(def);
if (!dependentIndexes.isEmpty()) {
throw new InvalidRequestException(String.format("Cannot drop column %s because it has " + "dependent secondary indexes (%s)", def, dependentIndexes.stream().map(i -> i.name).collect(Collectors.joining(","))));
}
}
// If a column is dropped which is included in a view, we don't allow the drop to take place.
boolean rejectAlter = false;
StringBuilder viewNames = new StringBuilder();
for (ViewMetadata view : views) {
if (!view.includes(columnName))
continue;
if (rejectAlter)
viewNames.append(',');
rejectAlter = true;
viewNames.append(view.name);
}
if (rejectAlter)
throw new InvalidRequestException(String.format("Cannot drop column %s, depended on by materialized views (%s.{%s})", columnName.toString(), keyspace(), viewNames.toString()));
}
break;
case OPTS:
if (attrs == null)
throw new InvalidRequestException("ALTER TABLE WITH invoked, but no parameters found");
attrs.validate();
TableParams params = attrs.asAlteredTableParams(current.params);
if (!Iterables.isEmpty(views) && params.gcGraceSeconds == 0) {
throw new InvalidRequestException("Cannot alter gc_grace_seconds of the base table of a " + "materialized view to 0, since this value is used to TTL " + "undelivered updates. Setting gc_grace_seconds too low might " + "cause undelivered updates to expire " + "before being replayed.");
}
if (current.isCounter() && params.defaultTimeToLive > 0)
throw new InvalidRequestException("Cannot set default_time_to_live on a table with counters");
builder.params(params);
break;
case RENAME:
for (Map.Entry<ColumnMetadata.Raw, ColumnMetadata.Raw> entry : renames.entrySet()) {
ColumnIdentifier from = entry.getKey().getIdentifier(current);
ColumnIdentifier to = entry.getValue().getIdentifier(current);
def = current.getColumn(from);
if (def == null)
throw new InvalidRequestException(String.format("Cannot rename unknown column %s in table %s", from, current.name));
if (current.getColumn(to) != null)
throw new InvalidRequestException(String.format("Cannot rename column %s to %s in table %s; another column of that name already exist", from, to, current.name));
if (!def.isPrimaryKeyColumn())
throw new InvalidRequestException(String.format("Cannot rename non PRIMARY KEY part %s", from));
if (!current.indexes.isEmpty()) {
ColumnFamilyStore store = Keyspace.openAndGetStore(current);
Set<IndexMetadata> dependentIndexes = store.indexManager.getDependentIndexes(def);
if (!dependentIndexes.isEmpty())
throw new InvalidRequestException(String.format("Cannot rename column %s because it has " + "dependent secondary indexes (%s)", from, dependentIndexes.stream().map(i -> i.name).collect(Collectors.joining(","))));
}
builder.renamePrimaryKeyColumn(from, to);
// If the view includes a renamed column, it must be renamed in the view table and the definition.
for (ViewMetadata view : views) {
if (!view.includes(from))
continue;
ColumnIdentifier viewFrom = entry.getKey().getIdentifier(view.metadata);
ColumnIdentifier viewTo = entry.getValue().getIdentifier(view.metadata);
viewUpdates.add(view.renamePrimaryKeyColumn(viewFrom, viewTo));
}
}
break;
}
// FIXME: Should really be a single announce for the table and views.
MigrationManager.announceTableUpdate(builder.build(), isLocalOnly);
for (ViewMetadata viewUpdate : viewUpdates) MigrationManager.announceViewUpdate(viewUpdate, isLocalOnly);
return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
}
Aggregations