use of com.datastax.driver.core.Metadata in project smscgateway by RestComm.
the class NN_DBOper method start.
public void start(String ip, int port, String keyspace, int dataTableDaysTimeArea, int slotSecondsTimeArea) throws Exception {
if (this.started) {
throw new Exception("DBOperations already started");
}
this.dataTableDaysTimeArea = dataTableDaysTimeArea;
this.slotSecondsTimeArea = slotSecondsTimeArea;
Builder builder = Cluster.builder();
builder.withPort(port);
builder.addContactPoint(ip);
this.cluster = builder.build();
Metadata metadata = cluster.getMetadata();
logger.info(String.format("Connected to cluster: %s\n", metadata.getClusterName()));
for (Host host : metadata.getAllHosts()) {
logger.info(String.format("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()));
}
session = cluster.connect();
session.execute("USE \"" + keyspace + "\"");
// only for #2
// this.checkNextDueSlot();
//
// String sa = "SELECT \"" + Schema.COLUMN_NEXT_SLOT + "\" FROM \"" + Schema.FAMILY_CURRENT_SLOT_TABLE + "\" where \"" + Schema.COLUMN_ID + "\"=0;";
// selectCurrentSlotTable = session.prepare(sa);
// sa = "INSERT INTO \"" + Schema.FAMILY_CURRENT_SLOT_TABLE + "\" (\"" + Schema.COLUMN_ID + "\", \"" + Schema.COLUMN_NEXT_SLOT + "\") VALUES (?, ?);";
// updateCurrentSlotTable = session.prepare(sa);
this.started = true;
}
use of com.datastax.driver.core.Metadata in project smscgateway by RestComm.
the class DBOperations method start.
public void start(String hosts, int port, String keyspace, String user, String password, int secondsForwardStoring, int reviseSecondsOnSmscStart, int processingSmsSetTimeout, long minMessageId, long maxMessageId) throws Exception {
if (this.started) {
throw new IllegalStateException("DBOperations already started");
}
if (secondsForwardStoring < 3)
secondsForwardStoring = 3;
this.dueSlotForwardStoring = secondsForwardStoring * 1000 / slotMSecondsTimeArea;
this.dueSlotReviseOnSmscStart = reviseSecondsOnSmscStart * 1000 / slotMSecondsTimeArea;
this.processingSmsSetTimeout = processingSmsSetTimeout;
this.minMessageId = minMessageId;
this.maxMessageId = maxMessageId;
this.pcsDate = null;
currentSessionUUID = UUID.randomUUID();
Builder builder = Cluster.builder();
try {
String[] cassHostsArray = hosts.split(",");
builder.addContactPoints(cassHostsArray);
builder.withPort(port);
builder.withCredentials(user, password);
this.cluster = builder.build().init();
} catch (Exception e) {
logger.error(String.format("Failure of connecting to cassandra database. : host=%s, port=%d. SMSC GW will work without database support\n", hosts, port), e);
this.started = true;
return;
}
ProtocolVersion protVersion = DBOperations.getProtocolVersion(cluster);
if (protVersion == ProtocolVersion.V1) {
// we do not support more cassandra database 1.2
logger.error("We do not support more cassandra database 1.2. You need to install cassandra database V2.0, 2.1 or 3.0");
this.started = true;
return;
}
databaseAvailable = true;
Metadata metadata = cluster.getMetadata();
logger.info(String.format("Connected to cluster: %s\n", metadata.getClusterName()));
for (Host host : metadata.getAllHosts()) {
logger.info(String.format("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()));
}
session = cluster.connect();
session.execute("USE \"" + keyspace + "\"");
this.checkCurrentSlotTableExists();
// let's update tables structure if needed
// try {
// addFieldsPacket_1(keyspace);
// } catch (PersistenceException e) {
// // structure update failure - we can not continue
// logger.error("Can not update database version !!!!, we can not use it more", e);
// databaseAvailable = false;
// this.started = true;
// return;
// }
String sa = "SELECT \"" + Schema.COLUMN_NEXT_SLOT + "\" FROM \"" + Schema.FAMILY_CURRENT_SLOT_TABLE + "\" where \"" + Schema.COLUMN_ID + "\"=?;";
selectCurrentSlotTable = session.prepare(sa);
sa = "INSERT INTO \"" + Schema.FAMILY_CURRENT_SLOT_TABLE + "\" (\"" + Schema.COLUMN_ID + "\", \"" + Schema.COLUMN_NEXT_SLOT + "\") VALUES (?, ?);";
updateCurrentSlotTable = session.prepare(sa);
getSmppSmsRoutingRule = session.prepare("select * from \"" + Schema.FAMILY_SMPP_SMS_ROUTING_RULE + "\" where \"" + Schema.COLUMN_ADDRESS + "\"=? and \"" + Schema.COLUMN_NETWORK_ID + "\"=?;");
getSipSmsRoutingRule = session.prepare("select * from \"" + Schema.FAMILY_SIP_SMS_ROUTING_RULE + "\" where \"" + Schema.COLUMN_ADDRESS + "\"=? and \"" + Schema.COLUMN_NETWORK_ID + "\"=?;");
updateSmppSmsRoutingRule = session.prepare("INSERT INTO \"" + Schema.FAMILY_SMPP_SMS_ROUTING_RULE + "\" (\"" + Schema.COLUMN_ADDRESS + "\", \"" + Schema.COLUMN_NETWORK_ID + "\", \"" + Schema.COLUMN_CLUSTER_NAME + "\") VALUES (?, ?, ?);");
updateSipSmsRoutingRule = session.prepare("INSERT INTO \"" + Schema.FAMILY_SIP_SMS_ROUTING_RULE + "\" (\"" + Schema.COLUMN_ADDRESS + "\", \"" + Schema.COLUMN_NETWORK_ID + "\", \"" + Schema.COLUMN_CLUSTER_NAME + "\") VALUES (?, ?, ?);");
deleteSmppSmsRoutingRule = session.prepare("delete from \"" + Schema.FAMILY_SMPP_SMS_ROUTING_RULE + "\" where \"" + Schema.COLUMN_ADDRESS + "\"=? and \"" + Schema.COLUMN_NETWORK_ID + "\"=?;");
deleteSipSmsRoutingRule = session.prepare("delete from \"" + Schema.FAMILY_SIP_SMS_ROUTING_RULE + "\" where \"" + Schema.COLUMN_ADDRESS + "\"=? and \"" + Schema.COLUMN_NETWORK_ID + "\"=?;");
int row_count = 100;
getSmppSmsRoutingRulesRange = session.prepare("select * from \"" + Schema.FAMILY_SMPP_SMS_ROUTING_RULE + "\" where token(\"" + Schema.COLUMN_ADDRESS + "\") >= token(?) LIMIT " + row_count + ";");
getSmppSmsRoutingRulesRange2 = session.prepare("select * from \"" + Schema.FAMILY_SMPP_SMS_ROUTING_RULE + "\" LIMIT " + row_count + ";");
getSipSmsRoutingRulesRange = session.prepare("select * from \"" + Schema.FAMILY_SIP_SMS_ROUTING_RULE + "\" where token(\"" + Schema.COLUMN_ADDRESS + "\") >= token(?) LIMIT " + row_count + ";");
getSipSmsRoutingRulesRange2 = session.prepare("select * from \"" + Schema.FAMILY_SIP_SMS_ROUTING_RULE + "\" LIMIT " + row_count + ";");
getStoredMessagesCounter = session.prepare("SELECT * FROM \"" + Schema.FAMILY_PENDING_MESSAGES + "\" WHERE \"" + Schema.COLUMN_DAY + "\" = ?");
getSentMessagesCounter = session.prepare("SELECT * FROM \"" + Schema.FAMILY_PENDING_MESSAGES + "\" WHERE \"" + Schema.COLUMN_DAY + "\" = ?");
try {
currentDueSlot = c2_getCurrentSlotTable(CURRENT_DUE_SLOT);
if (currentDueSlot == 0) {
// not yet set
long l1 = this.c2_getDueSlotForTime(new Date());
this.c2_setCurrentDueSlot(l1);
} else {
this.c2_setCurrentDueSlot(currentDueSlot - dueSlotReviseOnSmscStart);
}
messageId = c2_getCurrentSlotTable(NEXT_MESSAGE_ID);
messageId += MESSAGE_ID_LAG;
c2_setCurrentSlotTable(NEXT_MESSAGE_ID, messageId);
} catch (Exception e1) {
String msg = "Failed reading a currentDueSlot !";
throw new PersistenceException(msg, e1);
}
this.started = true;
}
use of com.datastax.driver.core.Metadata in project atlasdb by palantir.
the class CqlKeyValueService method performInitialSetup.
protected void performInitialSetup() {
Metadata metadata = cluster.getMetadata();
CassandraVerifier.validatePartitioner(metadata.getPartitioner(), config);
Set<Peer> peers = CqlKeyValueServices.getPeers(session);
boolean allNodesHaveSaneNumberOfVnodes = Iterables.all(peers, peer -> peer.tokens.size() > CassandraConstants.ABSOLUTE_MINIMUM_NUMBER_OF_TOKENS_PER_NODE);
// node we're querying doesn't count itself as a peer
if (peers.size() > 0 && !allNodesHaveSaneNumberOfVnodes) {
throw new IllegalStateException("All nodes in cluster must have sane number of vnodes" + " (or cluster must consist of a single node).");
}
Set<String> dcsInCluster = Sets.newHashSet();
for (Peer peer : peers) {
dcsInCluster.add(peer.dataCenter);
if (peer.dataCenter == null) {
throw new IllegalStateException("Cluster should not mix datacenter-aware" + " and non-datacenter-aware nodes.");
}
}
dcsInCluster.add(getLocalDataCenter());
if (metadata.getKeyspace(config.getKeyspaceOrThrow()) == null) {
// keyspace previously didn't exist; we need to set it up
createKeyspace(config.getKeyspaceOrThrow(), dcsInCluster);
return;
}
createTables(ImmutableMap.of(AtlasDbConstants.DEFAULT_METADATA_TABLE, AtlasDbConstants.EMPTY_TABLE_METADATA));
}
use of com.datastax.driver.core.Metadata in project teiid by teiid.
the class TestCassandraConnectionImpl method testKeyspaceQuoting.
@Test
public void testKeyspaceQuoting() throws Exception {
CassandraManagedConnectionFactory config = new CassandraManagedConnectionFactory();
config.setKeyspace("\"x\"");
Metadata metadata = Mockito.mock(Metadata.class);
CassandraConnectionImpl cci = new CassandraConnectionImpl(config, metadata);
KeyspaceMetadata key_metadata = Mockito.mock(KeyspaceMetadata.class);
Mockito.stub(metadata.getKeyspace("x")).toReturn(key_metadata);
assertNotNull(cci.keyspaceInfo());
}
use of com.datastax.driver.core.Metadata in project YCSB by brianfrankcooper.
the class CassandraCQLClient method init.
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
// Keep track of number of calls to init (for later cleanup)
INIT_COUNT.incrementAndGet();
// cluster/session instance for all the threads.
synchronized (INIT_COUNT) {
// Check if the cluster has already been initialized
if (cluster != null) {
return;
}
try {
debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false"));
trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT));
String host = getProperties().getProperty(HOSTS_PROPERTY);
if (host == null) {
throw new DBException(String.format("Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY));
}
String[] hosts = host.split(",");
String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT);
String username = getProperties().getProperty(USERNAME_PROPERTY);
String password = getProperties().getProperty(PASSWORD_PROPERTY);
String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT);
readConsistencyLevel = ConsistencyLevel.valueOf(getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
writeConsistencyLevel = ConsistencyLevel.valueOf(getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION));
if ((username != null) && !username.isEmpty()) {
Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password).withPort(Integer.valueOf(port)).addContactPoints(hosts);
if (useSSL) {
clusterBuilder = clusterBuilder.withSSL();
}
cluster = clusterBuilder.build();
} else {
cluster = Cluster.builder().withPort(Integer.valueOf(port)).addContactPoints(hosts).build();
}
String maxConnections = getProperties().getProperty(MAX_CONNECTIONS_PROPERTY);
if (maxConnections != null) {
cluster.getConfiguration().getPoolingOptions().setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections));
}
String coreConnections = getProperties().getProperty(CORE_CONNECTIONS_PROPERTY);
if (coreConnections != null) {
cluster.getConfiguration().getPoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections));
}
String connectTimoutMillis = getProperties().getProperty(CONNECT_TIMEOUT_MILLIS_PROPERTY);
if (connectTimoutMillis != null) {
cluster.getConfiguration().getSocketOptions().setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis));
}
String readTimoutMillis = getProperties().getProperty(READ_TIMEOUT_MILLIS_PROPERTY);
if (readTimoutMillis != null) {
cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.valueOf(readTimoutMillis));
}
Metadata metadata = cluster.getMetadata();
logger.info("Connected to cluster: {}\n", metadata.getClusterName());
for (Host discoveredHost : metadata.getAllHosts()) {
logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack());
}
session = cluster.connect(keyspace);
} catch (Exception e) {
throw new DBException(e);
}
}
// synchronized
}
Aggregations