use of com.datastax.driver.core.PreparedStatement in project smscgateway by RestComm.
the class NN_DBOper method doGetStatementCollection.
private synchronized PreparedStatementCollection2 doGetStatementCollection(Date dt, String tName) throws PersistenceException {
PreparedStatementCollection2 psc = dataTableRead.get(tName);
if (psc != null)
return psc;
try {
try {
// checking if a datatable exists
String s1 = "SELECT * FROM \"" + Schema.FAMILY_DATA + tName + "\";";
PreparedStatement ps = session.prepare(s1);
} catch (InvalidQueryException e) {
// datatable does not exist
// DATA
StringBuilder sb = new StringBuilder();
sb.append("CREATE TABLE \"" + Schema.FAMILY_DATA);
sb.append(tName);
sb.append("\" (");
appendField(sb, Schema.COLUMN_ADDR_DST_DIGITS, "ascii");
appendField(sb, Schema.COLUMN_ADDR_DST_TON, "int");
appendField(sb, Schema.COLUMN_ADDR_DST_NPI, "int");
appendField(sb, Schema.COLUMN_ID, "uuid");
appendField(sb, Schema.COLUMN_TARGET_ID, "ascii");
appendField(sb, Schema.COLUMN_DUE_SLOT, "bigint");
appendField(sb, Schema.COLUMN_IN_SYSTEM_SLOT, "bigint");
appendField(sb, Schema.COLUMN_ADDR_SRC_DIGITS, "ascii");
appendField(sb, Schema.COLUMN_ADDR_SRC_TON, "int");
appendField(sb, Schema.COLUMN_ADDR_SRC_NPI, "int");
appendField(sb, Schema.COLUMN_DUE_DELAY, "int");
appendField(sb, Schema.COLUMN_ALERTING_SUPPORTED, "boolean");
appendField(sb, Schema.COLUMN_MESSAGE_ID, "bigint");
appendField(sb, Schema.COLUMN_MO_MESSAGE_REF, "int");
appendField(sb, Schema.COLUMN_ORIG_ESME_NAME, "text");
appendField(sb, Schema.COLUMN_ORIG_SYSTEM_ID, "text");
appendField(sb, Schema.COLUMN_DEST_CLUSTER_NAME, "text");
appendField(sb, Schema.COLUMN_DEST_ESME_NAME, "text");
appendField(sb, Schema.COLUMN_DEST_SYSTEM_ID, "text");
appendField(sb, Schema.COLUMN_SUBMIT_DATE, "timestamp");
appendField(sb, Schema.COLUMN_DELIVERY_DATE, "timestamp");
appendField(sb, Schema.COLUMN_SERVICE_TYPE, "text");
appendField(sb, Schema.COLUMN_ESM_CLASS, "int");
appendField(sb, Schema.COLUMN_PROTOCOL_ID, "int");
appendField(sb, Schema.COLUMN_PRIORITY, "int");
appendField(sb, Schema.COLUMN_REGISTERED_DELIVERY, "int");
appendField(sb, Schema.COLUMN_REPLACE, "int");
appendField(sb, Schema.COLUMN_DATA_CODING, "int");
appendField(sb, Schema.COLUMN_DEFAULT_MSG_ID, "int");
appendField(sb, Schema.COLUMN_MESSAGE, "blob");
appendField(sb, Schema.COLUMN_OPTIONAL_PARAMETERS, "text");
appendField(sb, Schema.COLUMN_SCHEDULE_DELIVERY_TIME, "timestamp");
appendField(sb, Schema.COLUMN_VALIDITY_PERIOD, "timestamp");
appendField(sb, Schema.COLUMN_IMSI, "ascii");
appendField(sb, Schema.COLUMN_NNN_DIGITS, "ascii");
appendField(sb, Schema.COLUMN_NNN_AN, "int");
appendField(sb, Schema.COLUMN_NNN_NP, "int");
appendField(sb, Schema.COLUMN_SM_STATUS, "int");
appendField(sb, Schema.COLUMN_SM_TYPE, "int");
appendField(sb, Schema.COLUMN_DELIVERY_COUNT, "int");
sb.append("PRIMARY KEY (\"");
sb.append(Schema.COLUMN_TARGET_ID);
sb.append("\", \"");
sb.append(Schema.COLUMN_ID);
sb.append("\"");
sb.append("));");
String s2 = sb.toString();
PreparedStatement ps = session.prepare(s2);
BoundStatement boundStatement = new BoundStatement(ps);
ResultSet res = session.execute(boundStatement);
// appendIndex("DATA" + tName, Schema.COLUMN_TARGET_ID);
// appendIndex(tName, Schema.COLUMN_DUE_SLOT);
// appendIndex("DATA" + tName, Schema.COLUMN_IN_SYSTEM_SLOT);
// SLOTS
sb = new StringBuilder();
sb.append("CREATE TABLE \"" + Schema.FAMILY_SLOTS);
sb.append(tName);
sb.append("\" (");
appendField(sb, Schema.COLUMN_DUE_SLOT, "bigint");
appendField(sb, Schema.COLUMN_TARGET_ID, "ascii");
// !!!!- temproary - delete it
appendField(sb, "PROCESSED", "boolean");
// !!!!- temproary - delete it
sb.append("PRIMARY KEY (\"");
sb.append(Schema.COLUMN_DUE_SLOT);
sb.append("\", \"");
sb.append(Schema.COLUMN_TARGET_ID);
sb.append("\"");
sb.append("));");
s2 = sb.toString();
ps = session.prepare(s2);
boundStatement = new BoundStatement(ps);
res = session.execute(boundStatement);
// DESTS
sb = new StringBuilder();
sb.append("CREATE TABLE \"" + Schema.FAMILY_DESTS);
sb.append(tName);
sb.append("\" (");
appendField(sb, Schema.COLUMN_TARGET_ID, "ascii");
appendField(sb, Schema.COLUMN_ID, "uuid");
appendField(sb, Schema.COLUMN_SENT, "boolean");
sb.append("PRIMARY KEY (\"");
sb.append(Schema.COLUMN_TARGET_ID);
sb.append("\", \"");
sb.append(Schema.COLUMN_ID);
sb.append("\"");
sb.append("));");
s2 = sb.toString();
ps = session.prepare(s2);
boundStatement = new BoundStatement(ps);
res = session.execute(boundStatement);
}
} catch (Exception e1) {
String msg = "Failed to access or create table " + tName + "!";
throw new PersistenceException(msg, e1);
}
psc = new PreparedStatementCollection2(tName);
dataTableRead.putEntry(tName, psc);
return psc;
}
use of com.datastax.driver.core.PreparedStatement in project data-transfer-project by google.
the class CosmosStore method create.
private void create(UUID id, Object instance, String query) {
PreparedStatement statement = session.prepare(query);
BoundStatement boundStatement = new BoundStatement(statement);
try {
boundStatement.setUUID(0, id);
boundStatement.setString(1, mapper.writeValueAsString(instance));
session.execute(boundStatement);
} catch (JsonProcessingException e) {
throw new MicrosoftStorageException("Error creating data: " + id, e);
}
}
use of com.datastax.driver.core.PreparedStatement in project atlasdb by palantir.
the class CqlKeyValueService method loadWithTs.
private void loadWithTs(final TableReference tableRef, final Set<Cell> cells, final long startTs, boolean loadAllTs, final Visitor<Multimap<Cell, Value>> visitor, final ConsistencyLevel consistency) throws Exception {
List<ResultSetFuture> resultSetFutures = Lists.newArrayListWithCapacity(cells.size());
SortedSetMultimap<byte[], Cell> cellsByCol = TreeMultimap.create(UnsignedBytes.lexicographicalComparator(), Ordering.natural());
for (Cell cell : cells) {
cellsByCol.put(cell.getColumnName(), cell);
}
for (Entry<byte[], SortedSet<Cell>> entry : Multimaps.asMap(cellsByCol).entrySet()) {
if (entry.getValue().size() > config.fetchBatchCount()) {
log.warn("A call to {} is performing a multiget {} cells; this may indicate overly-large batching " + "on a higher level.\n{}", tableRef, entry.getValue().size(), CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
for (List<Cell> batch : Iterables.partition(entry.getValue(), config.fetchBatchCount())) {
String rowBinds = Joiner.on(",").join(Iterables.limit(Iterables.cycle('?'), batch.size()));
final String loadWithTsQuery = "SELECT * FROM " + getFullTableName(tableRef) + " WHERE " + fieldNameProvider.row() + " IN (" + rowBinds + ") AND " + fieldNameProvider.column() + " = ? AND " + fieldNameProvider.timestamp() + " > ?" + (!loadAllTs ? " LIMIT 1" : "");
final PreparedStatement preparedStatement = getPreparedStatement(tableRef, loadWithTsQuery, session).setConsistencyLevel(consistency);
Object[] args = new Object[batch.size() + 2];
for (int i = 0; i < batch.size(); i++) {
args[i] = ByteBuffer.wrap(batch.get(i).getRowName());
}
args[batch.size()] = ByteBuffer.wrap(entry.getKey());
args[batch.size() + 1] = ~startTs;
ResultSetFuture resultSetFuture = session.executeAsync(preparedStatement.bind(args));
resultSetFutures.add(resultSetFuture);
}
}
String loggedLoadWithTsQuery = "SELECT * FROM " + getFullTableName(tableRef) + " " + "WHERE " + fieldNameProvider.row() + " IN (?, ...) AND " + fieldNameProvider.column() + " = ? AND " + fieldNameProvider.timestamp() + " > ?" + (!loadAllTs ? " LIMIT 1" : "");
for (ResultSetFuture rsf : resultSetFutures) {
visitResults(rsf.getUninterruptibly(), visitor, loggedLoadWithTsQuery, loadAllTs);
}
}
use of com.datastax.driver.core.PreparedStatement in project atlasdb by palantir.
the class CqlKeyValueService method delete.
@Override
public void delete(final TableReference tableRef, final Multimap<Cell, Long> keys) {
int cellCount = 0;
String deleteQuery = "DELETE FROM " + getFullTableName(tableRef) + " WHERE " + fieldNameProvider.row() + " = ?" + " AND " + fieldNameProvider.column() + " = ?" + " AND " + fieldNameProvider.timestamp() + " = ?";
int fetchBatchCount = config.fetchBatchCount();
for (final List<Cell> batch : Iterables.partition(keys.keySet(), fetchBatchCount)) {
cellCount += batch.size();
PreparedStatement deleteStatement = getPreparedStatement(tableRef, deleteQuery, longRunningQuerySession).setConsistencyLevel(deleteConsistency);
List<ResultSetFuture> resultSetFutures = Lists.newArrayList();
for (Cell key : batch) {
for (long ts : Ordering.natural().immutableSortedCopy(keys.get(key))) {
BoundStatement boundStatement = deleteStatement.bind(ByteBuffer.wrap(key.getRowName()), ByteBuffer.wrap(key.getColumnName()), ~ts);
resultSetFutures.add(longRunningQuerySession.executeAsync(boundStatement));
}
}
for (ResultSetFuture resultSetFuture : resultSetFutures) {
ResultSet resultSet;
try {
resultSet = resultSetFuture.getUninterruptibly();
resultSet.all();
} catch (Throwable t) {
throw Throwables.throwUncheckedException(t);
}
cqlKeyValueServices.logTracedQuery(deleteQuery, resultSet, session, cqlStatementCache.normalQuery);
}
}
if (cellCount > fetchBatchCount) {
log.warn("Rebatched in delete a call to {} that attempted to delete {} cells; " + "this may indicate overly-large batching on a higher level.\n{}", tableRef, cellCount, CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
}
use of com.datastax.driver.core.PreparedStatement in project atlasdb by palantir.
the class CqlKeyValueService method getRowsAllColsInternal.
private Map<Cell, Value> getRowsAllColsInternal(final TableReference tableRef, final Iterable<byte[]> rows, final long startTs) throws Exception {
int rowCount = 0;
Map<Cell, Value> result = Maps.newHashMap();
int fetchBatchCount = config.fetchBatchCount();
List<ResultSetFuture> resultSetFutures = Lists.newArrayListWithExpectedSize(rowCount);
for (final List<byte[]> batch : Iterables.partition(rows, fetchBatchCount)) {
rowCount += batch.size();
String getRowsQuery = String.format("SELECT * FROM %s WHERE %s IN (%s)", getFullTableName(tableRef), fieldNameProvider.row(), Joiner.on(",").join(Iterables.limit(Iterables.cycle("?"), batch.size())));
PreparedStatement preparedStatement = getPreparedStatement(tableRef, getRowsQuery, session);
Object[] args = batch.stream().map(ByteBuffer::wrap).toArray();
resultSetFutures.add(session.executeAsync(preparedStatement.bind(args)));
for (ResultSetFuture resultSetFuture : resultSetFutures) {
ResultSet resultSet;
try {
resultSet = resultSetFuture.getUninterruptibly();
} catch (Throwable t) {
throw Throwables.throwUncheckedException(t);
}
for (Row row : resultSet.all()) {
Cell cell = Cell.create(getRowName(row), getColName(row));
if ((getTs(row) < startTs) && (!result.containsKey(cell) || (result.get(cell).getTimestamp() < getTs(row)))) {
result.put(Cell.create(getRowName(row), getColName(row)), Value.create(getValue(row), getTs(row)));
}
}
cqlKeyValueServices.logTracedQuery(getRowsQuery, resultSet, session, cqlStatementCache.normalQuery);
}
}
if (rowCount > fetchBatchCount) {
log.warn("Rebatched in getRows a call to {} that attempted to multiget {} rows; " + "this may indicate overly-large batching on a higher level.\n{}", tableRef.getQualifiedName(), rowCount, CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
return result;
}
Aggregations