use of org.apache.cassandra.dht.IPartitioner in project cassandra by apache.
the class StreamingTransferTest method transferRanges.
private void transferRanges(ColumnFamilyStore cfs) throws Exception {
IPartitioner p = cfs.getPartitioner();
List<Range<Token>> ranges = new ArrayList<>();
// wrapped range
ranges.add(new Range<Token>(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key0"))));
StreamPlan streamPlan = new StreamPlan("StreamingTransferTest").transferRanges(LOCAL, cfs.keyspace.getName(), ranges, cfs.getTableName());
streamPlan.execute().get();
verifyConnectionsAreClosed();
//cannot add ranges after stream session is finished
try {
streamPlan.transferRanges(LOCAL, cfs.keyspace.getName(), ranges, cfs.getTableName());
fail("Should have thrown exception");
} catch (RuntimeException e) {
//do nothing
}
}
use of org.apache.cassandra.dht.IPartitioner in project cassandra by apache.
the class BloomFilterTest method testMurmur3FilterHash.
@Test
public void testMurmur3FilterHash() {
IPartitioner partitioner = new Murmur3Partitioner();
Iterator<ByteBuffer> gen = new KeyGenerator.RandomStringGenerator(new Random().nextInt(), FilterTestHelper.ELEMENTS);
long[] expected = new long[2];
long[] actual = new long[2];
while (gen.hasNext()) {
expected[0] = 1;
expected[1] = 2;
actual[0] = 3;
actual[1] = 4;
ByteBuffer key = gen.next();
FilterKey expectedKey = FilterTestHelper.wrap(key);
FilterKey actualKey = partitioner.decorateKey(key);
actualKey.filterHash(actual);
expectedKey.filterHash(expected);
Assert.assertArrayEquals(expected, actual);
}
}
use of org.apache.cassandra.dht.IPartitioner in project cassandra by apache.
the class CompactionStress method generateTokens.
/**
* Populate tokenMetadata consistently across runs.
*
* We need consistency to write and compact the same data offline
* in the case of a range aware sstable writer.
*/
private void generateTokens(String seed, TokenMetadata tokenMetadata, Integer numTokens) {
Random random = new Random(seed.hashCode());
IPartitioner p = tokenMetadata.partitioner;
tokenMetadata.clearUnsafe();
for (int i = 1; i <= numTokens; i++) {
InetAddress addr = FBUtilities.getBroadcastAddress();
List<Token> tokens = Lists.newArrayListWithCapacity(numTokens);
for (int j = 0; j < numTokens; ++j) tokens.add(p.getRandomToken(random));
tokenMetadata.updateNormalTokens(tokens, addr);
}
}
use of org.apache.cassandra.dht.IPartitioner in project titan by thinkaurelius.
the class MapReduceIndexManagement method updateIndex.
/**
* Updates the provided index according to the given {@link SchemaAction}.
* Only {@link SchemaAction#REINDEX} and {@link SchemaAction#REMOVE_INDEX} are supported.
*
* @param index the index to process
* @param updateAction either {@code REINDEX} or {@code REMOVE_INDEX}
* @return a future that returns immediately;
* this method blocks until the Hadoop MapReduce job completes
*/
// TODO make this future actually async and update javadoc @return accordingly
public TitanManagement.IndexJobFuture updateIndex(TitanIndex index, SchemaAction updateAction) throws BackendException {
Preconditions.checkNotNull(index, "Index parameter must not be null", index);
Preconditions.checkNotNull(updateAction, "%s parameter must not be null", SchemaAction.class.getSimpleName());
Preconditions.checkArgument(SUPPORTED_ACTIONS.contains(updateAction), "Only these %s parameters are supported: %s (was given %s)", SchemaAction.class.getSimpleName(), SUPPORTED_ACTIONS_STRING, updateAction);
Preconditions.checkArgument(RelationTypeIndex.class.isAssignableFrom(index.getClass()) || TitanGraphIndex.class.isAssignableFrom(index.getClass()), "Index %s has class %s: must be a %s or %s (or subtype)", index.getClass(), RelationTypeIndex.class.getSimpleName(), TitanGraphIndex.class.getSimpleName());
org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
ModifiableHadoopConfiguration titanmrConf = ModifiableHadoopConfiguration.of(TitanHadoopConfiguration.MAPRED_NS, hadoopConf);
// The job we'll execute to either REINDEX or REMOVE_INDEX
final Class<? extends IndexUpdateJob> indexJobClass;
final Class<? extends Mapper> mapperClass;
// The class of the IndexUpdateJob and the Mapper that will be used to run it (VertexScanJob vs ScanJob)
if (updateAction.equals(SchemaAction.REINDEX)) {
indexJobClass = IndexRepairJob.class;
mapperClass = HadoopVertexScanMapper.class;
} else if (updateAction.equals(SchemaAction.REMOVE_INDEX)) {
indexJobClass = IndexRemoveJob.class;
mapperClass = HadoopScanMapper.class;
} else {
// Shouldn't get here -- if this exception is ever thrown, update SUPPORTED_ACTIONS
throw new IllegalStateException("Unrecognized " + SchemaAction.class.getSimpleName() + ": " + updateAction);
}
// The column family that serves as input to the IndexUpdateJob
final String readCF;
if (RelationTypeIndex.class.isAssignableFrom(index.getClass())) {
readCF = Backend.EDGESTORE_NAME;
} else {
TitanGraphIndex gindex = (TitanGraphIndex) index;
if (gindex.isMixedIndex() && !updateAction.equals(SchemaAction.REINDEX))
throw new UnsupportedOperationException("External mixed indexes must be removed in the indexing system directly.");
Preconditions.checkState(TitanGraphIndex.class.isAssignableFrom(index.getClass()));
if (updateAction.equals(SchemaAction.REMOVE_INDEX))
readCF = Backend.INDEXSTORE_NAME;
else
readCF = Backend.EDGESTORE_NAME;
}
titanmrConf.set(TitanHadoopConfiguration.COLUMN_FAMILY_NAME, readCF);
// The MapReduce InputFormat class based on the open graph's store manager
final Class<? extends InputFormat> inputFormat;
final Class<? extends KeyColumnValueStoreManager> storeManagerClass = graph.getBackend().getStoreManagerClass();
if (CASSANDRA_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
inputFormat = CassandraBinaryInputFormat.class;
// Set the partitioner
IPartitioner part = ((AbstractCassandraStoreManager) graph.getBackend().getStoreManager()).getCassandraPartitioner();
hadoopConf.set("cassandra.input.partitioner.class", part.getClass().getName());
} else if (HBASE_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
inputFormat = HBaseBinaryInputFormat.class;
} else {
throw new IllegalArgumentException("Store manager class " + storeManagerClass + "is not supported");
}
// The index name and relation type name (if the latter is applicable)
final String indexName = index.name();
final String relationTypeName = RelationTypeIndex.class.isAssignableFrom(index.getClass()) ? ((RelationTypeIndex) index).getType().name() : "";
Preconditions.checkNotNull(indexName);
// Set the class of the IndexUpdateJob
titanmrConf.set(TitanHadoopConfiguration.SCAN_JOB_CLASS, indexJobClass.getName());
// Set the configuration of the IndexUpdateJob
copyIndexJobKeys(hadoopConf, indexName, relationTypeName);
titanmrConf.set(TitanHadoopConfiguration.SCAN_JOB_CONFIG_ROOT, GraphDatabaseConfiguration.class.getName() + "#JOB_NS");
// Copy the StandardTitanGraph configuration under TitanHadoopConfiguration.GRAPH_CONFIG_KEYS
org.apache.commons.configuration.Configuration localbc = graph.getConfiguration().getLocalConfiguration();
localbc.clearProperty(Graph.GRAPH);
copyInputKeys(hadoopConf, localbc);
String jobName = HadoopScanMapper.class.getSimpleName() + "[" + indexJobClass.getSimpleName() + "]";
try {
return new CompletedJobFuture(HadoopScanRunner.runJob(hadoopConf, inputFormat, jobName, mapperClass));
} catch (Exception e) {
return new FailedJobFuture(e);
}
}
use of org.apache.cassandra.dht.IPartitioner in project eiger by wlloyd.
the class HintedHandOffManager method scheduleAllDeliveries.
/**
* Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for
* nodes which are never officially down/failed.
*/
private void scheduleAllDeliveries() {
if (logger_.isDebugEnabled())
logger_.debug("Started scheduleAllDeliveries");
ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF);
IPartitioner p = StorageService.getPartitioner();
RowPosition minPos = p.getMinimumToken().minKeyBound();
Range<RowPosition> range = new Range<RowPosition>(minPos, minPos, p);
IFilter filter = new NamesQueryFilter(ImmutableSortedSet.<ByteBuffer>of());
List<Row> rows = hintStore.getRangeSlice(null, range, Integer.MAX_VALUE, filter, null);
for (Row row : rows) {
Token<?> token = StorageService.getPartitioner().getTokenFactory().fromByteArray(row.key.key);
InetAddress target = StorageService.instance.getTokenMetadata().getEndpoint(token);
scheduleHintDelivery(target);
}
if (logger_.isDebugEnabled())
logger_.debug("Finished scheduleAllDeliveries");
}
Aggregations