use of com.datastax.driver.core.querybuilder.Batch in project java-driver by datastax.
the class LargeDataTest method testWideBatchRows.
/*
* Test a batch that writes a row of size 4,000
* @param c The cluster object
* @param key The key value that will receive the data
* @throws Throwable
*/
private void testWideBatchRows(int key) throws Throwable {
// Write data
Batch q = batch();
for (int i = 0; i < 4000; ++i) {
q = q.add(insertInto("wide_batch_rows").value("k", key).value("i", i));
}
session().execute(q.setConsistencyLevel(ConsistencyLevel.QUORUM));
// Read data
ResultSet rs = session().execute(select("i").from("wide_batch_rows").where(eq("k", key)));
// Verify data
int i = 0;
for (Row row : rs) {
assertTrue(row.getInt("i") == i++);
}
}
use of com.datastax.driver.core.querybuilder.Batch in project newts by OpenNMS.
the class CassandraSampleRepository method insert.
@Override
public void insert(Collection<Sample> samples, boolean calculateTimeToLive) {
Timer.Context timer = m_insertTimer.time();
Timestamp now = Timestamp.now();
Batch batch = unloggedBatch();
for (Sample m : samples) {
int ttl = m_ttl;
if (calculateTimeToLive) {
ttl -= (int) (now.asSeconds() - m.getTimestamp().asSeconds());
if (ttl <= 0) {
LOG.debug("Skipping expired sample: {}", m);
continue;
}
}
Duration resourceShard = m_contextConfigurations.getResourceShard(m.getContext());
Insert insert = insertInto(SchemaConstants.T_SAMPLES).value(SchemaConstants.F_CONTEXT, m.getContext().getId()).value(SchemaConstants.F_PARTITION, m.getTimestamp().stepFloor(resourceShard).asSeconds()).value(SchemaConstants.F_RESOURCE, m.getResource().getId()).value(SchemaConstants.F_COLLECTED, m.getTimestamp().asMillis()).value(SchemaConstants.F_METRIC_NAME, m.getName()).value(SchemaConstants.F_VALUE, ValueType.decompose(m.getValue()));
// for any sample that has not specified them.
if (m.getAttributes() != null) {
insert.value(SchemaConstants.F_ATTRIBUTES, m.getAttributes());
}
// Use the context specific consistency level
insert.setConsistencyLevel(m_contextConfigurations.getWriteConsistency(m.getContext()));
batch.add(insert.using(ttl(ttl)));
}
try {
m_session.execute(batch);
if (m_processorService != null) {
m_processorService.submit(samples);
}
m_samplesInserted.mark(samples.size());
} finally {
timer.stop();
}
}
use of com.datastax.driver.core.querybuilder.Batch in project pentaho-cassandra-plugin by pentaho.
the class DriverCQLRowHandler method batchInsert.
public void batchInsert(RowMetaInterface inputMeta, Iterable<Object[]> rows, ITableMetaData tableMeta, String consistencyLevel, boolean insertFieldsNotInMetadata, LogChannelInterface log) throws Exception {
String[] columnNames = getColumnNames(inputMeta);
Batch batch = unloggedBatch ? QueryBuilder.unloggedBatch() : QueryBuilder.batch();
if (!Utils.isEmpty(consistencyLevel)) {
try {
batch.setConsistencyLevel(ConsistencyLevel.valueOf(consistencyLevel));
} catch (Exception e) {
log.logError(e.getLocalizedMessage(), e);
}
}
List<Integer> toRemove = new ArrayList<>();
if (!insertFieldsNotInMetadata) {
for (int i = 0; i < columnNames.length; i++) {
if (!tableMeta.columnExistsInSchema(columnNames[i])) {
toRemove.add(i);
}
}
if (toRemove.size() > 0) {
columnNames = copyExcluding(columnNames, new String[columnNames.length - toRemove.size()], toRemove);
}
}
for (Object[] row : rows) {
Object[] values = toRemove.size() == 0 ? Arrays.copyOf(row, columnNames.length) : copyExcluding(row, new Object[columnNames.length], toRemove);
Insert insert = QueryBuilder.insertInto(keyspace.getName(), tableMeta.getTableName());
insert = ttlSec > 0 ? insert.using(QueryBuilder.ttl(ttlSec)).values(columnNames, values) : insert.values(columnNames, values);
batch.add(insert);
}
if (batchInsertTimeout > 0) {
try {
getSession().executeAsync(batch).getUninterruptibly(batchInsertTimeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
log.logError(BaseMessages.getString(DriverCQLRowHandler.class, "DriverCQLRowHandler.Error.TimeoutReached"));
}
} else {
getSession().execute(batch);
}
}
Aggregations