use of org.apache.hadoop.util.bloom.Key in project Gaffer by gchq.
the class AccumuloIDWithinSetRetrieverTest method shouldDealWithFalsePositives.
private void shouldDealWithFalsePositives(final boolean loadIntoMemory, final AccumuloStore store) throws StoreException, AccumuloElementConversionException {
// Query for all edges in set {A0, A23}
final Set<EntitySeed> seeds = new HashSet<>();
seeds.add(AccumuloTestData.SEED_A0);
seeds.add(AccumuloTestData.SEED_A23);
// positive sensible.
for (int i = 0; i < 10; i++) {
seeds.add(new EntitySeed("abc" + i));
}
// Need to make sure that the Bloom filter we create has the same size and the same number of hashes as the
// one that GraphElementsWithStatisticsWithinSetRetriever creates.
final int numItemsToBeAdded = loadIntoMemory ? seeds.size() : 20;
if (!loadIntoMemory) {
store.getProperties().setMaxEntriesForBatchScanner("20");
}
// Find something that will give a false positive
// Need to repeat the logic used in the getGraphElementsWithStatisticsWithinSet() method.
// Calculate sensible size of filter, aiming for false positive rate of 1 in 10000, with a maximum size of
// maxBloomFilterToPassToAnIterator bytes.
int size = (int) (-numItemsToBeAdded * Math.log(0.0001) / (Math.pow(Math.log(2.0), 2.0)));
size = Math.min(size, store.getProperties().getMaxBloomFilterToPassToAnIterator());
// Work out optimal number of hashes to use in Bloom filter based on size of set - optimal number of hashes is
// (m/n)ln 2 where m is the size of the filter in bits and n is the number of items that will be added to the set.
final int numHashes = Math.max(1, (int) ((size / numItemsToBeAdded) * Math.log(2)));
// Create Bloom filter and add seeds to it
final BloomFilter filter = new BloomFilter(size, numHashes, Hash.MURMUR_HASH);
for (final EntitySeed seed : seeds) {
filter.add(new Key(store.getKeyPackage().getKeyConverter().serialiseVertex(seed.getVertex())));
}
// Test random items against it - should only have to shouldRetieveElementsInRangeBetweenSeeds MAX_SIZE_BLOOM_FILTER / 2 on average before find a
// false positive (but impose an arbitrary limit to avoid an infinite loop if there's a problem).
int count = 0;
int maxNumberOfTries = 50 * store.getProperties().getMaxBloomFilterToPassToAnIterator();
while (count < maxNumberOfTries) {
count++;
if (filter.membershipTest(new Key(("" + count).getBytes()))) {
break;
}
}
if (count == maxNumberOfTries) {
fail("Didn't find a false positive");
}
// False positive is "" + count so create an edge from seeds to that
final GetElements<EntitySeed, ?> op = new GetElements<>(defaultView, seeds);
// Now query for all edges in set - shouldn't get the false positive
final Set<Element> results = returnElementsFromOperation(store, op, new User(), loadIntoMemory);
// Check results are as expected
assertThat(results, IsCollectionContaining.hasItems(AccumuloTestData.EDGE_A0_A23, AccumuloTestData.A0_ENTITY, AccumuloTestData.A23_ENTITY));
}
use of org.apache.hadoop.util.bloom.Key in project common-crawl by matpalm.
the class ReadNgram method main.
public static void main(String[] s) throws IOException {
Configuration conf = new Configuration();
String filename = "bfngrams/out/part-00000";
FileSystem fs = FileSystem.get(URI.create(filename), conf);
Path path = new Path(filename);
SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);
NullWritable nullKey = NullWritable.get();
BloomFilter bloomFilter = new BloomFilter();
reader.next(nullKey, bloomFilter);
reader.close();
System.out.println(bloomFilter.toString());
String[] egs = { "activities other", "membership organizations", "organizations elsewhere", "4 0", "elsewhere classified", "other membership", "0 activities", "20091128093155 4" };
for (String eg : egs) {
Key k = new Key(eg.getBytes());
System.out.println(eg + "\t" + bloomFilter.membershipTest(k));
}
}
Aggregations