use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class CQLSSTableWriter method rawAddRow.
/**
* Adds a new row to the writer given already serialized values.
* <p>
* This is equivalent to the other rawAddRow methods, but takes a map whose
* keys are the names of the columns to add instead of taking a list of the
* values in the order of the insert statement used during construction of
* this write.
*
* @param values a map of colum name to column values representing the new
* row to add. Note that if a column is not part of the map, it's value will
* be {@code null}. If the map contains keys that does not correspond to one
* of the column of the insert statement used when creating this writer, the
* the corresponding value is ignored.
* @return this writer.
*/
public CQLSSTableWriter rawAddRow(Map<String, ByteBuffer> values) throws InvalidRequestException, IOException {
int size = Math.min(values.size(), boundNames.size());
List<ByteBuffer> rawValues = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
ColumnSpecification spec = boundNames.get(i);
rawValues.add(values.get(spec.name.toString()));
}
return rawAddRow(rawValues);
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class CQLSSTableWriter method rawAddRow.
/**
* Adds a new row to the writer given already serialized values.
* <p>
* This is a shortcut for {@code rawAddRow(Arrays.asList(values))}.
*
* @param values the row values (corresponding to the bind variables of the
* insertion statement used when creating by this writer) as binary.
* @return this writer.
*/
public CQLSSTableWriter rawAddRow(List<ByteBuffer> values) throws InvalidRequestException, IOException {
if (values.size() != boundNames.size())
throw new InvalidRequestException(String.format("Invalid number of arguments, expecting %d values but got %d", boundNames.size(), values.size()));
QueryOptions options = QueryOptions.forInternalCalls(null, values);
List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
SortedSet<Clustering> clusterings = insert.createClustering(options);
long now = System.currentTimeMillis() * 1000;
// Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
// and that forces a lot of initialization that we don't want.
UpdateParameters params = new UpdateParameters(insert.metadata, insert.updatedColumns(), options, insert.getTimestamp(now, options), insert.getTimeToLive(options), Collections.<DecoratedKey, Partition>emptyMap());
try {
for (ByteBuffer key : keys) {
for (Clustering clustering : clusterings) insert.addUpdateForKey(writer.getUpdateFor(key), clustering, params);
}
return this;
} catch (SSTableSimpleUnsortedWriter.SyncException e) {
// wrapped in a SyncException (see BufferedWriter below). We want to extract that IOE.
throw (IOException) e.getCause();
}
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class NativeSSTableLoaderClient method createDefinitionFromRow.
private static ColumnMetadata createDefinitionFromRow(Row row, String keyspace, String table, Types types) {
ClusteringOrder order = ClusteringOrder.valueOf(row.getString("clustering_order").toUpperCase());
AbstractType<?> type = CQLTypeParser.parse(keyspace, row.getString("type"), types);
if (order == ClusteringOrder.DESC)
type = ReversedType.getInstance(type);
ColumnIdentifier name = ColumnIdentifier.getInterned(type, row.getBytes("column_name_bytes"), row.getString("column_name"));
int position = row.getInt("position");
org.apache.cassandra.schema.ColumnMetadata.Kind kind = ColumnMetadata.Kind.valueOf(row.getString("kind").toUpperCase());
return new ColumnMetadata(keyspace, table, name, type, position, kind);
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class OperationFctsTest method testPrecedenceAndParentheses.
@Test
public void testPrecedenceAndParentheses() throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY(a, b))");
execute("INSERT INTO %S (a, b, c, d) VALUES (2, 5, 25, 4)");
UntypedResultSet rs = execute("SELECT a - c / b + d FROM %s");
assertColumnNames(rs, "a - c / b + d");
assertRows(rs, row(1));
rs = execute("SELECT (c - b) / a + d FROM %s");
assertColumnNames(rs, "(c - b) / a + d");
assertRows(rs, row(14));
rs = execute("SELECT c / a / b FROM %s");
assertColumnNames(rs, "c / a / b");
assertRows(rs, row(2));
rs = execute("SELECT c / b / d FROM %s");
assertColumnNames(rs, "c / b / d");
assertRows(rs, row(1));
rs = execute("SELECT (c - a) %% d / a FROM %s");
assertColumnNames(rs, "(c - a) % d / a");
assertRows(rs, row(1));
rs = execute("SELECT (c - a) %% d / a + d FROM %s");
assertColumnNames(rs, "(c - a) % d / a + d");
assertRows(rs, row(5));
rs = execute("SELECT -(c - a) %% d / a + d FROM %s");
assertColumnNames(rs, "-(c - a) % d / a + d");
assertRows(rs, row(3));
rs = execute("SELECT (-c - a) %% d / a + d FROM %s");
assertColumnNames(rs, "(-c - a) % d / a + d");
assertRows(rs, row(3));
rs = execute("SELECT c - a %% d / a + d FROM %s");
assertColumnNames(rs, "c - a % d / a + d");
assertRows(rs, row(28));
rs = execute("SELECT (int)((c - a) %% d / (a + d)) FROM %s");
assertColumnNames(rs, "(int)((c - a) % d / (a + d))");
assertRows(rs, row(0));
// test with aliases
rs = execute("SELECT (int)((c - a) %% d / (a + d)) as result FROM %s");
assertColumnNames(rs, "result");
assertRows(rs, row(0));
rs = execute("SELECT c / a / b as divisions FROM %s");
assertColumnNames(rs, "divisions");
assertRows(rs, row(2));
assertRows(execute("SELECT * FROM %s WHERE a = ? AND b = (int) ? / 2 - 5", 2, 20), row(2, 5, 25, 4));
assertRows(execute("SELECT * FROM %s WHERE a = ? AND b = (int) ? / (2 + 2)", 2, 20), row(2, 5, 25, 4));
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class RowTest method testExpiringColumnExpiration.
@Test
public void testExpiringColumnExpiration() throws IOException {
int ttl = 1;
ColumnMetadata def = metadata.getColumn(new ColumnIdentifier("a", true));
Cell cell = BufferCell.expiring(def, 0, ttl, nowInSeconds, ((AbstractType) def.cellValueType()).decompose("a1"));
PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, dk, BTreeRow.singleCellRow(metadata.comparator.make("c1"), cell));
new Mutation(update).applyUnsafe();
// when we read with a nowInSeconds before the cell has expired,
// the PartitionIterator includes the row we just wrote
Row row = Util.getOnlyRow(Util.cmd(cfs, dk).includeRow("c1").withNowInSeconds(nowInSeconds).build());
assertEquals("a1", ByteBufferUtil.string(row.getCell(def).value()));
// when we read with a nowInSeconds after the cell has expired, the row is filtered
// so the PartitionIterator is empty
Util.assertEmpty(Util.cmd(cfs, dk).includeRow("c1").withNowInSeconds(nowInSeconds + ttl + 1).build());
}
Aggregations