use of org.apache.ignite.IgniteCache in project ignite by apache.
the class IgniteDbPutGetAbstractTest method testPutGetLarge.
/**
* @throws Exception if failed.
*/
public void testPutGetLarge() throws Exception {
IgniteEx ig = grid(0);
IgniteCache<Integer, byte[]> cache = ig.cache(DEFAULT_CACHE_NAME);
final byte[] val = new byte[2048];
ThreadLocalRandom.current().nextBytes(val);
cache.put(0, val);
Assert.assertArrayEquals(val, cache.get(0));
final IgniteCache<Integer, LargeDbValue> cache1 = ig.cache("large");
final LargeDbValue large = new LargeDbValue("str1", "str2", randomInts(1024));
cache1.put(1, large);
assertEquals(large, cache1.get(1));
if (indexingEnabled()) {
final List<Cache.Entry<Integer, LargeDbValue>> all = cache1.query(new SqlQuery<Integer, LargeDbValue>(LargeDbValue.class, "str1='str1'")).getAll();
assertEquals(1, all.size());
final Cache.Entry<Integer, LargeDbValue> entry = all.get(0);
assertEquals(1, entry.getKey().intValue());
assertEquals(large, entry.getValue());
}
cache.remove(0);
cache1.remove(1);
assertNull(cache.get(0));
assertNull(cache1.get(1));
}
use of org.apache.ignite.IgniteCache in project ignite by apache.
the class StreamTransformerExample method main.
public static void main(String[] args) throws Exception {
// Mark this cluster member as client.
Ignition.setClientMode(true);
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
if (!ExamplesUtils.hasServerNodes(ignite))
return;
CacheConfiguration<Integer, Long> cfg = new CacheConfiguration<>(CACHE_NAME);
// Index key and value.
cfg.setIndexedTypes(Integer.class, Long.class);
// Auto-close cache at the end of the example.
try (IgniteCache<Integer, Long> stmCache = ignite.getOrCreateCache(cfg)) {
try (IgniteDataStreamer<Integer, Long> stmr = ignite.dataStreamer(stmCache.getName())) {
// Allow data updates.
stmr.allowOverwrite(true);
// Configure data transformation to count random numbers added to the stream.
stmr.receiver(StreamTransformer.from((e, arg) -> {
// Get current count.
Long val = e.getValue();
// Increment count by 1.
e.setValue(val == null ? 1L : val + 1);
return null;
}));
// Stream 10 million of random numbers into the streamer cache.
for (int i = 1; i <= 10_000_000; i++) {
stmr.addData(RAND.nextInt(RANGE), 1L);
if (i % 500_000 == 0)
System.out.println("Number of tuples streamed into Ignite: " + i);
}
}
// Query top 10 most popular numbers every.
SqlFieldsQuery top10Qry = new SqlFieldsQuery("select _key, _val from Long order by _val desc limit 10");
// Execute queries.
List<List<?>> top10 = stmCache.query(top10Qry).getAll();
System.out.println("Top 10 most popular numbers:");
// Print top 10 words.
ExamplesUtils.printQueryResults(top10);
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(CACHE_NAME);
}
}
}
use of org.apache.ignite.IgniteCache in project ignite by apache.
the class StreamVisitorExample method main.
public static void main(String[] args) throws Exception {
// Mark this cluster member as client.
Ignition.setClientMode(true);
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
if (!ExamplesUtils.hasServerNodes(ignite))
return;
// Market data cache with default configuration.
CacheConfiguration<String, Double> mktDataCfg = new CacheConfiguration<>("marketTicks");
// Financial instrument cache configuration.
CacheConfiguration<String, Instrument> instCfg = new CacheConfiguration<>("instCache");
// Index key and value for querying financial instruments.
// Note that Instrument class has @QuerySqlField annotation for secondary field indexing.
instCfg.setIndexedTypes(String.class, Instrument.class);
// Auto-close caches at the end of the example.
try (IgniteCache<String, Double> mktCache = ignite.getOrCreateCache(mktDataCfg);
IgniteCache<String, Instrument> instCache = ignite.getOrCreateCache(instCfg)) {
try (IgniteDataStreamer<String, Double> mktStmr = ignite.dataStreamer(mktCache.getName())) {
// Note that we receive market data, but do not populate 'mktCache' (it remains empty).
// Instead we update the instruments in the 'instCache'.
// Since both, 'instCache' and 'mktCache' use the same key, updates are collocated.
mktStmr.receiver(StreamVisitor.from((cache, e) -> {
String symbol = e.getKey();
Double tick = e.getValue();
Instrument inst = instCache.get(symbol);
if (inst == null)
inst = new Instrument(symbol);
// Don't populate market cache, as we don't use it for querying.
// Update cached instrument based on the latest market tick.
inst.update(tick);
instCache.put(symbol, inst);
}));
// Stream 10 million market data ticks into the system.
for (int i = 1; i <= 10_000_000; i++) {
int idx = RAND.nextInt(INSTRUMENTS.length);
// Use gaussian distribution to ensure that
// numbers closer to 0 have higher probability.
double price = round2(INITIAL_PRICES[idx] + RAND.nextGaussian());
mktStmr.addData(INSTRUMENTS[idx], price);
if (i % 500_000 == 0)
System.out.println("Number of tuples streamed into Ignite: " + i);
}
}
// Select top 3 best performing instruments.
SqlFieldsQuery top3qry = new SqlFieldsQuery("select symbol, (latest - open) from Instrument order by (latest - open) desc limit 3");
// Execute queries.
List<List<?>> top3 = instCache.query(top3qry).getAll();
System.out.println("Top performing financial instruments: ");
// Print top 10 words.
ExamplesUtils.printQueryResults(top3);
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(mktDataCfg.getName());
ignite.destroyCache(instCfg.getName());
}
}
}
use of org.apache.ignite.IgniteCache in project ignite by apache.
the class AlgorithmSpecificDatasetExample method main.
/**
* Run example.
*/
public static void main(String[] args) throws Exception {
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
System.out.println(">>> Algorithm Specific Dataset example started.");
IgniteCache<Integer, Person> persons = createCache(ignite);
// labels are extracted, and partition data and context are created.
try (AlgorithmSpecificDataset dataset = DatasetFactory.create(ignite, persons, (upstream, upstreamSize) -> new AlgorithmSpecificPartitionContext(), new SimpleLabeledDatasetDataBuilder<Integer, Person, AlgorithmSpecificPartitionContext>((k, v) -> new double[] { v.getAge() }, (k, v) -> v.getSalary(), 1).andThen((data, ctx) -> {
double[] features = data.getFeatures();
int rows = data.getRows();
// Makes a copy of features to supplement it by columns with values equal to 1.0.
double[] a = new double[features.length + rows];
for (int i = 0; i < rows; i++) a[i] = 1.0;
System.arraycopy(features, 0, a, rows, features.length);
return new SimpleLabeledDatasetData(a, rows, data.getCols() + 1, data.getLabels());
})).wrap(AlgorithmSpecificDataset::new)) {
// Trains linear regression model using gradient descent.
double[] linearRegressionMdl = new double[2];
for (int i = 0; i < 1000; i++) {
double[] gradient = dataset.gradient(linearRegressionMdl);
if (BLAS.getInstance().dnrm2(gradient.length, gradient, 1) < 1e-4)
break;
for (int j = 0; j < gradient.length; j++) linearRegressionMdl[j] -= 0.1 / persons.size() * gradient[j];
}
System.out.println("Linear Regression Model: " + Arrays.toString(linearRegressionMdl));
}
System.out.println(">>> Algorithm Specific Dataset example completed.");
}
}
use of org.apache.ignite.IgniteCache in project ignite by apache.
the class CacheContinuousQueryExample method main.
/**
* Executes example.
*
* @param args Command line arguments, none required.
* @throws Exception If example execution failed.
*/
public static void main(String[] args) throws Exception {
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
System.out.println();
System.out.println(">>> Cache continuous query example started.");
// Auto-close cache at the end of the example.
try (IgniteCache<Integer, String> cache = ignite.getOrCreateCache(CACHE_NAME)) {
int keyCnt = 20;
// These entries will be queried by initial predicate.
for (int i = 0; i < keyCnt; i++) cache.put(i, Integer.toString(i));
// Create new continuous query.
ContinuousQuery<Integer, String> qry = new ContinuousQuery<>();
qry.setInitialQuery(new ScanQuery<>(new IgniteBiPredicate<Integer, String>() {
@Override
public boolean apply(Integer key, String val) {
return key > 10;
}
}));
// Callback that is called locally when update notifications are received.
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, String>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends String>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends String> e : evts) System.out.println("Updated entry [key=" + e.getKey() + ", val=" + e.getValue() + ']');
}
});
// This filter will be evaluated remotely on all nodes.
// Entry that pass this filter will be sent to the caller.
qry.setRemoteFilterFactory(new Factory<CacheEntryEventFilter<Integer, String>>() {
@Override
public CacheEntryEventFilter<Integer, String> create() {
return new CacheEntryEventFilter<Integer, String>() {
@Override
public boolean evaluate(CacheEntryEvent<? extends Integer, ? extends String> e) {
return e.getKey() > 10;
}
};
}
});
// Execute query.
try (QueryCursor<Cache.Entry<Integer, String>> cur = cache.query(qry)) {
// Iterate through existing data.
for (Cache.Entry<Integer, String> e : cur) System.out.println("Queried existing entry [key=" + e.getKey() + ", val=" + e.getValue() + ']');
// Add a few more keys and watch more query notifications.
for (int i = keyCnt; i < keyCnt + 10; i++) cache.put(i, Integer.toString(i));
// Wait for a while while callback is notified about remaining puts.
Thread.sleep(2000);
}
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(CACHE_NAME);
}
}
}
Aggregations