use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class DataStreamerImplSelfTest method testAllOperationFinishedBeforeFutureCompletion.
/**
* @throws Exception If failed.
*/
public void testAllOperationFinishedBeforeFutureCompletion() throws Exception {
cnt = 0;
Ignite ignite = startGrids(MAX_CACHE_COUNT);
final IgniteCache cache = ignite.cache(DEFAULT_CACHE_NAME);
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> ex = new AtomicReference<>();
Collection<Map.Entry> entries = new ArrayList<>(100);
for (int i = 0; i < 100; i++) entries.add(new IgniteBiTuple<>(i, "" + i));
IgniteDataStreamer ldr = ignite.dataStreamer(DEFAULT_CACHE_NAME);
ldr.addData(entries).listen(new IgniteInClosure<IgniteFuture<?>>() {
@Override
public void apply(IgniteFuture<?> future) {
try {
future.get();
for (int i = 0; i < 100; i++) assertEquals("" + i, cache.get(i));
} catch (Throwable e) {
ex.set(e);
}
latch.countDown();
}
});
ldr.tryFlush();
assertTrue(latch.await(5, TimeUnit.SECONDS));
Throwable e = ex.get();
if (e != null) {
if (e instanceof Error)
throw (Error) e;
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new RuntimeException(e);
}
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class DataStreamerMultinodeCreateCacheTest method testCreateCacheAndStream.
/**
* @throws Exception If failed.
*/
public void testCreateCacheAndStream() throws Exception {
fail("https://issues.apache.org/jira/browse/IGNITE-1603");
final int THREADS = 5;
startGrids(THREADS);
final AtomicInteger idx = new AtomicInteger();
IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
int threadIdx = idx.getAndIncrement();
long stopTime = System.currentTimeMillis() + 60_000;
Ignite ignite = grid(threadIdx);
int iter = 0;
while (System.currentTimeMillis() < stopTime) {
String cacheName = "cache-" + threadIdx + "-" + (iter % 10);
IgniteCache<Integer, String> cache = ignite.getOrCreateCache(cacheName);
try (IgniteDataStreamer<Object, Object> stmr = ignite.dataStreamer(cacheName)) {
((DataStreamerImpl<Object, Object>) stmr).maxRemapCount(0);
for (int i = 0; i < 1000; i++) stmr.addData(i, i);
}
cache.destroy();
iter++;
}
return null;
}
}, THREADS, "create-cache");
fut.get(2 * 60_000);
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class DataStreamerTimeoutTest method timeoutOnAddData.
/**
*/
private int timeoutOnAddData() throws Exception {
boolean thrown = false;
int processed = 0;
try {
Ignite ignite = startGrid(1);
try (IgniteDataStreamer ldr = ignite.dataStreamer(CACHE_NAME)) {
ldr.timeout(TIMEOUT);
ldr.receiver(new TestDataReceiver());
ldr.perNodeBufferSize(1);
ldr.perNodeParallelOperations(1);
((DataStreamerImpl) ldr).maxRemapCount(0);
try {
for (int i = 0; i < ENTRY_AMOUNT; i++) {
ldr.addData(i, i);
processed++;
}
} catch (IllegalStateException ignored) {
// No-op.
}
} catch (CacheException | IgniteDataStreamerTimeoutException ignored) {
thrown = true;
}
} finally {
stopAllGrids();
}
assertTrue(thrown);
return processed;
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class StreamTransformerExample method main.
public static void main(String[] args) throws Exception {
// Mark this cluster member as client.
Ignition.setClientMode(true);
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
if (!ExamplesUtils.hasServerNodes(ignite))
return;
CacheConfiguration<Integer, Long> cfg = new CacheConfiguration<>(CACHE_NAME);
// Index key and value.
cfg.setIndexedTypes(Integer.class, Long.class);
// Auto-close cache at the end of the example.
try (IgniteCache<Integer, Long> stmCache = ignite.getOrCreateCache(cfg)) {
try (IgniteDataStreamer<Integer, Long> stmr = ignite.dataStreamer(stmCache.getName())) {
// Allow data updates.
stmr.allowOverwrite(true);
// Configure data transformation to count random numbers added to the stream.
stmr.receiver(StreamTransformer.from((e, arg) -> {
// Get current count.
Long val = e.getValue();
// Increment count by 1.
e.setValue(val == null ? 1L : val + 1);
return null;
}));
// Stream 10 million of random numbers into the streamer cache.
for (int i = 1; i <= 10_000_000; i++) {
stmr.addData(RAND.nextInt(RANGE), 1L);
if (i % 500_000 == 0)
System.out.println("Number of tuples streamed into Ignite: " + i);
}
}
// Query top 10 most popular numbers every.
SqlFieldsQuery top10Qry = new SqlFieldsQuery("select _key, _val from Long order by _val desc limit 10");
// Execute queries.
List<List<?>> top10 = stmCache.query(top10Qry).getAll();
System.out.println("Top 10 most popular numbers:");
// Print top 10 words.
ExamplesUtils.printQueryResults(top10);
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(CACHE_NAME);
}
}
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class JdbcConnection method prepareStatement.
/**
* {@inheritDoc}
*/
@Override
public PreparedStatement prepareStatement(String sql, int resSetType, int resSetConcurrency, int resSetHoldability) throws SQLException {
ensureNotClosed();
if (resSetType != TYPE_FORWARD_ONLY)
throw new SQLFeatureNotSupportedException("Invalid result set type (only forward is supported.)");
if (resSetConcurrency != CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException("Invalid concurrency (updates are not supported).");
if (!txAllowed && resSetHoldability != HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException("Invalid holdability (transactions are not supported).");
JdbcPreparedStatement stmt;
if (!stream)
stmt = new JdbcPreparedStatement(this, sql);
else {
GridQueryIndexing idx = ignite().context().query().getIndexing();
PreparedStatement nativeStmt = prepareNativeStatement(sql);
try {
idx.checkStatementStreamable(nativeStmt);
} catch (IgniteSQLException e) {
throw e.toJdbcException();
}
IgniteDataStreamer streamer = ignite().dataStreamer(cacheName);
streamer.autoFlushFrequency(streamFlushTimeout);
streamer.allowOverwrite(streamAllowOverwrite);
if (streamNodeBufSize > 0)
streamer.perNodeBufferSize(streamNodeBufSize);
if (streamNodeParOps > 0)
streamer.perNodeParallelOperations(streamNodeParOps);
stmt = new JdbcStreamedPreparedStatement(this, sql, streamer, nativeStmt);
}
statements.add(stmt);
return stmt;
}
Aggregations