use of org.apache.cassandra.thrift.Cassandra in project janusgraph by JanusGraph.
the class CTConnectionFactory method makeRawConnection.
/**
* Create a Cassandra-Thrift connection, but do not attempt to
* set a keyspace on the connection.
*
* @return A CTConnection ready to talk to a Cassandra cluster
* @throws TTransportException on any Thrift transport failure
*/
public CTConnection makeRawConnection() throws TTransportException {
final Config cfg = cfgRef.get();
String hostname = cfg.getRandomHost();
log.debug("Creating TSocket({}, {}, {}, {}, {})", hostname, cfg.port, cfg.username, cfg.password, cfg.timeoutMS);
TSocket socket;
if (null != cfg.sslTruststoreLocation && !cfg.sslTruststoreLocation.isEmpty()) {
TSSLTransportFactory.TSSLTransportParameters params = new TSSLTransportFactory.TSSLTransportParameters() {
{
setTrustStore(cfg.sslTruststoreLocation, cfg.sslTruststorePassword);
}
};
socket = TSSLTransportFactory.getClientSocket(hostname, cfg.port, cfg.timeoutMS, params);
} else {
socket = new TSocket(hostname, cfg.port, cfg.timeoutMS);
}
TTransport transport = new TFramedTransport(socket, cfg.frameSize);
log.trace("Created transport {}", transport);
TBinaryProtocol protocol = new TBinaryProtocol(transport);
Cassandra.Client client = new Cassandra.Client(protocol);
if (!transport.isOpen()) {
transport.open();
}
if (cfg.username != null) {
Map<String, String> credentials = new HashMap<String, String>() {
{
put(IAuthenticator.USERNAME_KEY, cfg.username);
put(IAuthenticator.PASSWORD_KEY, cfg.password);
}
};
try {
client.login(new AuthenticationRequest(credentials));
} catch (Exception e) {
// TTransportException will propagate authentication/authorization failure
throw new TTransportException(e);
}
}
return new CTConnection(transport, client, cfg);
}
use of org.apache.cassandra.thrift.Cassandra in project titan by thinkaurelius.
the class CassandraThriftStoreManager method clearStorage.
/**
* Connect to Cassandra via Thrift on the specified host and port and attempt to truncate the named keyspace.
* <p/>
* This is a utility method intended mainly for testing. It is
* equivalent to issuing 'truncate <cf>' for each of the column families in keyspace using
* the cassandra-cli tool.
* <p/>
* Using truncate is better for a number of reasons, most significantly because it doesn't
* involve any schema modifications which can take time to propagate across the cluster such
* leaves nodes in the inconsistent state and could result in read/write failures.
* Any schema modifications are discouraged until there is no traffic to Keyspace or ColumnFamilies.
*
* @throws StorageException if any checked Thrift or UnknownHostException is thrown in the body of this method
*/
public void clearStorage() throws StorageException {
openStores.clear();
// "log prefix"
final String lp = "ClearStorage: ";
/*
* log4j is capable of automatically writing the name of a method that
* generated a log message, but the docs warn that "generating caller
* location information is extremely slow and should be avoided unless
* execution speed is not an issue."
*/
CTConnection conn = null;
try {
conn = pool.borrowObject(SYSTEM_KS);
Cassandra.Client client = conn.getClient();
KsDef ksDef;
try {
client.set_keyspace(keySpaceName);
ksDef = client.describe_keyspace(keySpaceName);
} catch (NotFoundException e) {
log.debug(lp + "Keyspace {} does not exist, not attempting to truncate.", keySpaceName);
return;
} catch (InvalidRequestException e) {
log.debug(lp + "InvalidRequestException when attempting to describe keyspace {}, not attempting to truncate.", keySpaceName);
return;
}
if (null == ksDef) {
log.debug(lp + "Received null KsDef for keyspace {}; not truncating its CFs", keySpaceName);
return;
}
List<CfDef> cfDefs = ksDef.getCf_defs();
if (null == cfDefs) {
log.debug(lp + "Received empty CfDef list for keyspace {}; not truncating CFs", keySpaceName);
return;
}
for (CfDef cfDef : ksDef.getCf_defs()) {
client.truncate(cfDef.name);
log.info(lp + "Truncated CF {} in keyspace {}", cfDef.name, keySpaceName);
}
/*
* Clearing the CTConnectionPool is unnecessary. This method
* removes no keyspaces. All open Cassandra connections will
* remain valid.
*/
} catch (Exception e) {
throw new TemporaryStorageException(e);
} finally {
if (conn != null && conn.getClient() != null) {
try {
conn.getClient().set_keyspace(SYSTEM_KS);
} catch (InvalidRequestException e) {
log.warn("Failed to reset keyspace", e);
} catch (TException e) {
log.warn("Failed to reset keyspace", e);
}
}
pool.returnObjectUnsafe(SYSTEM_KS, conn);
}
}
use of org.apache.cassandra.thrift.Cassandra in project titan by thinkaurelius.
the class CassandraThriftKeyColumnValueStore method getNamesSlice.
public Map<ByteBuffer, List<Entry>> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws StorageException {
Preconditions.checkArgument(query.getLimit() >= 0);
if (0 == query.getLimit())
return Collections.emptyMap();
ColumnParent parent = new ColumnParent(columnFamily);
/*
* Cassandra cannot handle columnStart = columnEnd.
* Cassandra's Thrift getSlice() throws InvalidRequestException
* if columnStart = columnEnd.
*/
if (ByteBufferUtil.compare(query.getSliceStart(), query.getSliceEnd()) >= 0) {
// Check for invalid arguments where columnEnd < columnStart
if (ByteBufferUtil.isSmallerThan(query.getSliceEnd(), query.getSliceStart())) {
throw new PermanentStorageException("columnStart=" + query.getSliceStart() + " is greater than columnEnd=" + query.getSliceEnd() + ". " + "columnStart must be less than or equal to columnEnd");
}
if (0 != query.getSliceStart().length() && 0 != query.getSliceEnd().length()) {
logger.debug("Return empty list due to columnEnd==columnStart and neither empty");
return Collections.emptyMap();
}
}
// true: columnStart < columnEnd
ConsistencyLevel consistency = getTx(txh).getReadConsistencyLevel().getThriftConsistency();
SlicePredicate predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.setCount(query.getLimit());
range.setStart(query.getSliceStart().asByteBuffer());
range.setFinish(query.getSliceEnd().asByteBuffer());
predicate.setSlice_range(range);
CTConnection conn = null;
try {
conn = pool.borrowObject(keyspace);
Cassandra.Client client = conn.getClient();
List<ByteBuffer> requestKeys = new ArrayList<ByteBuffer>(keys.size());
{
for (StaticBuffer key : keys) {
requestKeys.add(key.asByteBuffer());
}
}
Map<ByteBuffer, List<ColumnOrSuperColumn>> rows = client.multiget_slice(requestKeys, parent, predicate, consistency);
/*
* The final size of the "result" List may be at most rows.size().
* However, "result" could also be up to two elements smaller than
* rows.size(), depending on startInclusive and endInclusive
*/
Map<ByteBuffer, List<Entry>> results = new HashMap<ByteBuffer, List<Entry>>();
ByteBuffer sliceEndBB = query.getSliceEnd().asByteBuffer();
for (ByteBuffer key : rows.keySet()) {
results.put(key, excludeLastColumn(rows.get(key), sliceEndBB));
}
return results;
} catch (Exception e) {
throw convertException(e);
} finally {
pool.returnObjectUnsafe(keyspace, conn);
}
}
use of org.apache.cassandra.thrift.Cassandra in project titan by thinkaurelius.
the class CassandraThriftStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws StorageException {
Preconditions.checkNotNull(mutations);
final Timestamp timestamp = getTimestamp(txh);
ConsistencyLevel consistency = getTx(txh).getWriteConsistencyLevel().getThriftConsistency();
// Generate Thrift-compatible batch_mutate() datastructure
// key -> cf -> cassmutation
int size = 0;
for (Map<StaticBuffer, KCVMutation> mutation : mutations.values()) size += mutation.size();
Map<ByteBuffer, Map<String, List<org.apache.cassandra.thrift.Mutation>>> batch = new HashMap<ByteBuffer, Map<String, List<org.apache.cassandra.thrift.Mutation>>>(size);
for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> keyMutation : mutations.entrySet()) {
String columnFamily = keyMutation.getKey();
for (Map.Entry<StaticBuffer, KCVMutation> mutEntry : keyMutation.getValue().entrySet()) {
StaticBuffer key = mutEntry.getKey();
ByteBuffer keyBB = key.asByteBuffer();
// Get or create the single Cassandra Mutation object responsible for this key
Map<String, List<org.apache.cassandra.thrift.Mutation>> cfmutation = batch.get(keyBB);
if (cfmutation == null) {
// TODO where did the magic number 3 come from?
cfmutation = new HashMap<String, List<org.apache.cassandra.thrift.Mutation>>(3);
batch.put(keyBB, cfmutation);
}
KCVMutation mutation = mutEntry.getValue();
List<org.apache.cassandra.thrift.Mutation> thriftMutation = new ArrayList<org.apache.cassandra.thrift.Mutation>(mutations.size());
if (mutation.hasDeletions()) {
for (StaticBuffer buf : mutation.getDeletions()) {
Deletion d = new Deletion();
SlicePredicate sp = new SlicePredicate();
sp.addToColumn_names(buf.asByteBuffer());
d.setPredicate(sp);
d.setTimestamp(timestamp.deletionTime);
org.apache.cassandra.thrift.Mutation m = new org.apache.cassandra.thrift.Mutation();
m.setDeletion(d);
thriftMutation.add(m);
}
}
if (mutation.hasAdditions()) {
for (Entry ent : mutation.getAdditions()) {
ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
Column column = new Column(ent.getColumn().asByteBuffer());
column.setValue(ent.getValue().asByteBuffer());
column.setTimestamp(timestamp.additionTime);
cosc.setColumn(column);
org.apache.cassandra.thrift.Mutation m = new org.apache.cassandra.thrift.Mutation();
m.setColumn_or_supercolumn(cosc);
thriftMutation.add(m);
}
}
cfmutation.put(columnFamily, thriftMutation);
}
}
CTConnection conn = null;
try {
conn = pool.borrowObject(keySpaceName);
Cassandra.Client client = conn.getClient();
client.batch_mutate(batch, consistency);
} catch (Exception ex) {
throw CassandraThriftKeyColumnValueStore.convertException(ex);
} finally {
pool.returnObjectUnsafe(keySpaceName, conn);
}
}
use of org.apache.cassandra.thrift.Cassandra in project logprocessing by cloudian.
the class CassandraClient method open.
public void open() throws IOException {
try {
this.currentServer = this.serverSet.get();
} catch (ServerSet.NoServersAvailableException e) {
throw new IOException("No Cassandra servers available.");
}
int splitIndex = this.currentServer.indexOf(':');
if (splitIndex == -1) {
throw new IOException("Bad host:port pair: " + this.currentServer);
}
String host = this.currentServer.substring(0, splitIndex);
int port = Integer.parseInt(this.currentServer.substring(splitIndex + 1));
TSocket sock = new TSocket(host, port);
this.transport = new TFramedTransport(sock);
TProtocol protocol = new TBinaryProtocol(transport);
this.client = new Cassandra.Client(protocol);
try {
this.transport.open();
this.client.set_keyspace(this.keyspace);
} catch (TException texc) {
throw new IOException(texc.getMessage());
} catch (InvalidRequestException exc) {
throw new IOException(exc.getMessage());
}
}
Aggregations