use of com.datastax.driver.core.RegularStatement in project newts by OpenNMS.
the class CassandraIndexer method definitelyRemoveMetricName.
private void definitelyRemoveMetricName(List<RegularStatement> statement, Context context, Resource resource, ConsistencyLevel writeConsistency) {
RegularStatement delete = QueryBuilder.delete().from(Constants.Schema.T_METRICS).where(QueryBuilder.eq(Constants.Schema.C_METRICS_CONTEXT, context.getId())).and(QueryBuilder.eq(Constants.Schema.C_METRICS_RESOURCE, resource.getId()));
delete.setConsistencyLevel(writeConsistency);
statement.add(delete);
}
use of com.datastax.driver.core.RegularStatement in project newts by OpenNMS.
the class StatementUtils method getStatements.
public static List<Statement> getStatements(ContextConfigurations contextConfigurations, int maxBatchSize, Set<StatementGenerator> generators) {
List<Statement> statementsToExecute = Lists.newArrayList();
Map<String, List<Statement>> statementsByKey = Maps.newHashMap();
for (StatementGenerator generator : generators) {
Statement statement = generator.toStatement().setConsistencyLevel(contextConfigurations.getWriteConsistency(generator.getContext()));
String key = generator.getKey();
if (key == null) {
// Don't try batching these
statementsToExecute.add(statement);
continue;
}
// Group these by key
List<Statement> statementsForKey = statementsByKey.get(key);
if (statementsForKey == null) {
statementsForKey = Lists.newArrayList();
statementsByKey.put(key, statementsForKey);
}
statementsForKey.add(statement);
}
// Consolidate the grouped statements into batches
for (List<Statement> statementsForKey : statementsByKey.values()) {
for (List<Statement> partition : Lists.partition(statementsForKey, maxBatchSize)) {
statementsToExecute.add(unloggedBatch(partition.toArray(new RegularStatement[partition.size()])));
}
}
return statementsToExecute;
}
use of com.datastax.driver.core.RegularStatement in project pinpoint by naver.
the class CassandraStatementExecuteQueryInterceptor method before.
@Override
public void before(Object target, Object[] args) {
if (isDebug) {
logger.beforeInterceptor(target, args);
}
Trace trace = traceContext.currentTraceObject();
if (trace == null) {
return;
}
SpanEventRecorder recorder = trace.traceBlockBegin();
try {
DatabaseInfo databaseInfo = (target instanceof DatabaseInfoAccessor) ? ((DatabaseInfoAccessor) target)._$PINPOINT$_getDatabaseInfo() : null;
if (databaseInfo == null) {
databaseInfo = UnKnownDatabaseInfo.INSTANCE;
}
recorder.recordServiceType(databaseInfo.getExecuteQueryType());
recorder.recordEndPoint(databaseInfo.getMultipleHost());
recorder.recordDestinationId(databaseInfo.getDatabaseId());
String sql;
if (args[0] instanceof BoundStatement) {
sql = ((BoundStatement) args[0]).preparedStatement().getQueryString();
} else if (args[0] instanceof RegularStatement) {
sql = ((RegularStatement) args[0]).getQueryString();
} else {
// we have string
sql = (String) args[0];
}
ParsingResult parsingResult = traceContext.parseSql(sql);
if (parsingResult != null) {
((ParsingResultAccessor) target)._$PINPOINT$_setParsingResult(parsingResult);
} else {
if (logger.isErrorEnabled()) {
logger.error("sqlParsing fail. parsingResult is null sql:{}", sql);
}
}
Map<Integer, String> bindValue = ((BindValueAccessor) target)._$PINPOINT$_getBindValue();
// Extracting bind variables from already-serialized is too risky
if (bindValue != null && !bindValue.isEmpty()) {
String bindString = toBindVariable(bindValue);
recorder.recordSqlParsingResult(parsingResult, bindString);
} else {
recorder.recordSqlParsingResult(parsingResult);
}
recorder.recordApi(descriptor);
clean(target);
} catch (Exception e) {
if (logger.isWarnEnabled()) {
logger.warn(e.getMessage(), e);
}
}
}
use of com.datastax.driver.core.RegularStatement in project cassandra-driver-mapping by valchkou.
the class SchemaSync method alterTableStatements.
/**
* Compare TableMetadata against Entity metadata and generate alter statements if necessary.
* <p>
* Cannot alter clustered and primary key columns.
*
* @param class the class to generate statements for or indexed
* @return a new {@code List<RegularStatement>}.
*/
private static <T> List<RegularStatement> alterTableStatements(String keyspace, Session session, EntityTypeMetadata entityMetadata, SyncOptions syncOptions) {
boolean doNotAddCols = false;
boolean doDropCols = true;
if (syncOptions != null) {
List<SyncOptionTypes> opts = syncOptions.getOptions(entityMetadata.getEntityClass());
doNotAddCols = opts.contains(SyncOptionTypes.DoNotAddColumns);
doDropCols = !opts.contains(SyncOptionTypes.DoNotDropColumns);
}
List<RegularStatement> statements = new ArrayList<RegularStatement>();
// get EntityTypeMetadata
String table = entityMetadata.getTableName();
// get TableMetadata - requires connection to cassandra
Cluster cluster = session.getCluster();
KeyspaceMetadata keyspaceMetadata = cluster.getMetadata().getKeyspace(keyspace);
TableMetadata tableMetadata = keyspaceMetadata.getTable(table);
// build statements for a new column or a columns with changed datatype.
for (EntityFieldMetaData field : entityMetadata.getFields()) {
String column = field.getColumnName();
String fieldType = field.getDataType().name();
ColumnMetadata columnMetadata = tableMetadata.getColumn(column);
String colIndex = null;
/*if (columnMetadata!= null && columnMetadata.getIndex() != null) {
colIndex = columnMetadata.getIndex().getName();
}*/
String fieldIndex = null;
if (entityMetadata.getIndex(column) != null) {
fieldIndex = entityMetadata.getIndex(column);
}
if (columnMetadata == null) {
if (doNotAddCols)
continue;
// if column not exists in Cassandra then build add column Statement
String colType = fieldType;
if (field.isGenericType()) {
colType = field.getGenericDef();
}
AlterTable statement = new AlterTable.Builder().addColumn(keyspace, table, column, colType);
statements.add(statement);
if (fieldIndex != null) {
statements.add(new DropIndex(column, fieldIndex));
statements.add(new CreateIndex(keyspace, table, column, fieldIndex));
}
} else if (colIndex != null || fieldIndex != null) {
if (colIndex == null) {
statements.add(new CreateIndex(keyspace, table, column, fieldIndex));
} else if (fieldIndex == null) {
statements.add(new DropIndex(column, colIndex));
} else if (!"".equals(fieldIndex) && !fieldIndex.equals(colIndex)) {
statements.add(new DropIndex(column, colIndex));
statements.add(new CreateIndex(keyspace, table, column, fieldIndex));
}
} else if (!fieldType.equals(columnMetadata.getType().getName().name())) {
// can't change datatype for clustered columns
if (tableMetadata.getClusteringColumns().contains(columnMetadata)) {
continue;
}
// can't change datatype for PK columns
if (tableMetadata.getPrimaryKey().contains(columnMetadata)) {
continue;
}
// drop index if any
/*if (columnMetadata.getIndex() != null) {
statements.add(new DropIndex(column, columnMetadata.getIndex().getName()));
}*/
// alter column datatype
statements.add(new AlterTable.Builder().alterColumn(keyspace, table, column, fieldType));
// create index if any
if (entityMetadata.getIndex(column) != null) {
statements.add(new CreateIndex(keyspace, table, column, entityMetadata.getIndex(column)));
}
}
}
// column is in Cassandra but not in entity anymore
if (doDropCols) {
for (ColumnMetadata colmeta : tableMetadata.getColumns()) {
colmeta.getName();
boolean exists = false;
for (EntityFieldMetaData field : entityMetadata.getFields()) {
if (colmeta.getName().equalsIgnoreCase(field.getColumnName())) {
exists = true;
break;
}
}
if (!exists) {
AlterTable statement = new AlterTable.Builder().dropColumn(keyspace, table, colmeta.getName());
statements.add(statement);
}
}
}
return statements;
}
use of com.datastax.driver.core.RegularStatement in project cassandra-driver-mapping by valchkou.
the class SchemaSync method buildSyncStatements.
/**
* Generate alter, drop or create statements for the given Entity
*
* @param keyspace
* @param session
* @param entityMetadata
* @param syncOptions
* @return RegularStatements
*/
public static List<RegularStatement> buildSyncStatements(String keyspace, Session session, EntityTypeMetadata entityMetadata, SyncOptions syncOptions) {
String table = entityMetadata.getTableName();
session.execute("USE " + keyspace);
Cluster cluster = session.getCluster();
KeyspaceMetadata keyspaceMetadata = cluster.getMetadata().getKeyspace(keyspace);
TableMetadata tableMetadata = keyspaceMetadata.getTable(table);
List<RegularStatement> statements = new ArrayList<RegularStatement>();
if (tableMetadata == null) {
statements = createTableStatements(keyspace, entityMetadata);
} else {
statements = alterTableStatements(keyspace, session, entityMetadata, syncOptions);
}
return statements;
}
Aggregations