use of com.thinkaurelius.titan.diskstorage.StorageException in project titan by thinkaurelius.
the class CassandraThriftStoreManager method createColumnFamily.
private void createColumnFamily(Cassandra.Client client, String ksName, String cfName, String comparator) throws StorageException {
CfDef createColumnFamily = new CfDef();
createColumnFamily.setName(cfName);
createColumnFamily.setKeyspace(ksName);
createColumnFamily.setComparator_type(comparator);
ImmutableMap.Builder<String, String> compressionOptions = new ImmutableMap.Builder<String, String>();
if (compressionEnabled) {
compressionOptions.put("sstable_compression", compressionClass).put("chunk_length_kb", Integer.toString(compressionChunkSizeKB));
}
createColumnFamily.setCompression_options(compressionOptions.build());
// Hard-coded caching settings
if (cfName.startsWith(Backend.EDGESTORE_NAME)) {
createColumnFamily.setCaching("keys_only");
} else if (cfName.startsWith(Backend.VERTEXINDEX_STORE_NAME)) {
createColumnFamily.setCaching("rows_only");
}
log.debug("Adding column family {} to keyspace {}...", cfName, ksName);
try {
client.system_add_column_family(createColumnFamily);
} catch (SchemaDisagreementException e) {
throw new TemporaryStorageException("Error in setting up column family", e);
} catch (Exception e) {
throw new PermanentStorageException(e);
}
log.debug("Added column family {} to keyspace {}.", cfName, ksName);
}
use of com.thinkaurelius.titan.diskstorage.StorageException in project titan by thinkaurelius.
the class CassandraThriftStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws StorageException {
Preconditions.checkNotNull(mutations);
final Timestamp timestamp = getTimestamp(txh);
ConsistencyLevel consistency = getTx(txh).getWriteConsistencyLevel().getThriftConsistency();
// Generate Thrift-compatible batch_mutate() datastructure
// key -> cf -> cassmutation
int size = 0;
for (Map<StaticBuffer, KCVMutation> mutation : mutations.values()) size += mutation.size();
Map<ByteBuffer, Map<String, List<org.apache.cassandra.thrift.Mutation>>> batch = new HashMap<ByteBuffer, Map<String, List<org.apache.cassandra.thrift.Mutation>>>(size);
for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> keyMutation : mutations.entrySet()) {
String columnFamily = keyMutation.getKey();
for (Map.Entry<StaticBuffer, KCVMutation> mutEntry : keyMutation.getValue().entrySet()) {
StaticBuffer key = mutEntry.getKey();
ByteBuffer keyBB = key.asByteBuffer();
// Get or create the single Cassandra Mutation object responsible for this key
Map<String, List<org.apache.cassandra.thrift.Mutation>> cfmutation = batch.get(keyBB);
if (cfmutation == null) {
// TODO where did the magic number 3 come from?
cfmutation = new HashMap<String, List<org.apache.cassandra.thrift.Mutation>>(3);
batch.put(keyBB, cfmutation);
}
KCVMutation mutation = mutEntry.getValue();
List<org.apache.cassandra.thrift.Mutation> thriftMutation = new ArrayList<org.apache.cassandra.thrift.Mutation>(mutations.size());
if (mutation.hasDeletions()) {
for (StaticBuffer buf : mutation.getDeletions()) {
Deletion d = new Deletion();
SlicePredicate sp = new SlicePredicate();
sp.addToColumn_names(buf.asByteBuffer());
d.setPredicate(sp);
d.setTimestamp(timestamp.deletionTime);
org.apache.cassandra.thrift.Mutation m = new org.apache.cassandra.thrift.Mutation();
m.setDeletion(d);
thriftMutation.add(m);
}
}
if (mutation.hasAdditions()) {
for (Entry ent : mutation.getAdditions()) {
ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
Column column = new Column(ent.getColumn().asByteBuffer());
column.setValue(ent.getValue().asByteBuffer());
column.setTimestamp(timestamp.additionTime);
cosc.setColumn(column);
org.apache.cassandra.thrift.Mutation m = new org.apache.cassandra.thrift.Mutation();
m.setColumn_or_supercolumn(cosc);
thriftMutation.add(m);
}
}
cfmutation.put(columnFamily, thriftMutation);
}
}
CTConnection conn = null;
try {
conn = pool.borrowObject(keySpaceName);
Cassandra.Client client = conn.getClient();
client.batch_mutate(batch, consistency);
} catch (Exception ex) {
throw CassandraThriftKeyColumnValueStore.convertException(ex);
} finally {
pool.returnObjectUnsafe(keySpaceName, conn);
}
}
use of com.thinkaurelius.titan.diskstorage.StorageException in project titan by thinkaurelius.
the class CassandraThriftStoreManager method getCompressionOptions.
@Override
public Map<String, String> getCompressionOptions(String cf) throws StorageException {
CTConnection conn = null;
Map<String, String> result = null;
try {
conn = pool.borrowObject(keySpaceName);
Cassandra.Client client = conn.getClient();
KsDef ksDef = client.describe_keyspace(keySpaceName);
for (CfDef cfDef : ksDef.getCf_defs()) {
if (null != cfDef && cfDef.getName().equals(cf)) {
result = cfDef.getCompression_options();
break;
}
}
return result;
} catch (InvalidRequestException e) {
log.debug("Keyspace {} does not exist", keySpaceName);
return null;
} catch (Exception e) {
throw new TemporaryStorageException(e);
} finally {
pool.returnObjectUnsafe(keySpaceName, conn);
}
}
use of com.thinkaurelius.titan.diskstorage.StorageException in project titan by thinkaurelius.
the class CTConnectionFactory method waitForClusterSize.
public static void waitForClusterSize(Cassandra.Client thriftClient, int minSize) throws InterruptedException, StorageException {
log.debug("Checking Cassandra cluster size" + " (want at least {} nodes)...", minSize);
Map<String, List<String>> versions = null;
final long STARTUP_WAIT_MAX = 10000L;
final long STARTUP_WAIT_INCREMENT = 100L;
long start = System.currentTimeMillis();
long lastTry = 0;
long limit = start + STARTUP_WAIT_MAX;
long minSleep = STARTUP_WAIT_INCREMENT;
Integer curSize = null;
while (limit - System.currentTimeMillis() >= 0) {
// Block for a little while if we're looping too fast
long sinceLast = System.currentTimeMillis() - lastTry;
long willSleep = minSleep - sinceLast;
if (0 < willSleep) {
// log.debug("Cassandra cluster size={} " +
// "(want {}); rechecking in {} ms",
// new Object[]{ curSize, minSize, willSleep });
Thread.sleep(willSleep);
}
// Issue thrift query
try {
lastTry = System.currentTimeMillis();
versions = thriftClient.describe_schema_versions();
if (1 != versions.size())
continue;
String version = Iterators.getOnlyElement(versions.keySet().iterator());
curSize = versions.get(version).size();
if (curSize >= minSize) {
log.debug("Cassandra cluster verified at size {} (schema version {}) in about {} ms", new Object[] { curSize, version, System.currentTimeMillis() - start });
return;
}
} catch (Exception e) {
throw new PermanentStorageException("Failed to fetch Cassandra Thrift schema versions: " + ((e instanceof InvalidRequestException) ? ((InvalidRequestException) e).getWhy() : e.getMessage()));
}
}
throw new PermanentStorageException("Could not verify Cassandra cluster size");
}
use of com.thinkaurelius.titan.diskstorage.StorageException in project titan by thinkaurelius.
the class CTConnectionFactory method validateSchemaIsSettled.
/* This method was adapted from cassandra 0.7.5 cli/CliClient.java */
public static void validateSchemaIsSettled(Cassandra.Client thriftClient, String currentVersionId) throws InterruptedException, StorageException {
log.debug("Waiting for Cassandra schema propagation...");
Map<String, List<String>> versions = null;
final TimeUUIDType ti = TimeUUIDType.instance;
final long start = System.currentTimeMillis();
long lastTry = 0;
final long limit = start + SCHEMA_WAIT_MAX;
final long minSleep = SCHEMA_WAIT_INCREMENT;
boolean inAgreement = false;
outer: while (limit - System.currentTimeMillis() >= 0 && !inAgreement) {
// Block for a little while if we're looping too fast
final long now = System.currentTimeMillis();
long sinceLast = now - lastTry;
long willSleep = minSleep - sinceLast;
if (0 < willSleep) {
log.debug("Schema not yet propagated; " + "rechecking in {} ms", willSleep);
Thread.sleep(willSleep);
}
// Issue thrift query
try {
lastTry = System.currentTimeMillis();
// getting schema version for nodes of the ring
versions = thriftClient.describe_schema_versions();
} catch (Exception e) {
throw new PermanentStorageException("Failed to fetch Cassandra Thrift schema versions: " + ((e instanceof InvalidRequestException) ? ((InvalidRequestException) e).getWhy() : e.getMessage()));
}
int nodeCount = 0;
// Check schema version
UUID benchmark = UUID.fromString(currentVersionId);
ByteBuffer benchmarkBB = ti.decompose(benchmark);
for (String version : versions.keySet()) {
if (version.equals(StorageProxy.UNREACHABLE)) {
nodeCount += versions.get(version).size();
continue;
}
UUID uuid = UUID.fromString(version);
ByteBuffer uuidBB = ti.decompose(uuid);
if (-1 < ti.compare(uuidBB, benchmarkBB)) {
log.debug("Version {} equals or comes after required version {}", uuid, benchmark);
nodeCount += versions.get(version).size();
continue;
}
continue outer;
}
log.debug("Found {} unreachable or out-of-date Cassandra nodes", nodeCount);
inAgreement = true;
}
if (null == versions) {
throw new TemporaryStorageException("Couldn't contact Cassandra nodes before timeout");
}
if (versions.containsKey(StorageProxy.UNREACHABLE))
log.warn("Warning: unreachable nodes: {}", Joiner.on(", ").join(versions.get(StorageProxy.UNREACHABLE)));
if (!inAgreement) {
throw new TemporaryStorageException("The schema has not settled in " + SCHEMA_WAIT_MAX + " ms. Wanted version " + currentVersionId + "; Versions are " + FBUtilities.toString(versions));
} else {
log.debug("Cassandra schema version {} propagated in about {} ms; Versions are {}", new Object[] { currentVersionId, System.currentTimeMillis() - start, FBUtilities.toString(versions) });
}
}
Aggregations