use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class IgniteCacheClearDuringRebalanceTest method populate.
/**
* @param node Ignite node;
* @throws Exception If failed.
*/
private void populate(final Ignite node) throws Exception {
final AtomicInteger id = new AtomicInteger();
final int tCnt = Runtime.getRuntime().availableProcessors();
final byte[] data = new byte[1024];
ThreadLocalRandom.current().nextBytes(data);
GridTestUtils.runMultiThreaded(new Runnable() {
@Override
public void run() {
try (IgniteDataStreamer<Object, Object> str = node.dataStreamer(CACHE_NAME)) {
int idx = id.getAndIncrement();
str.autoFlushFrequency(0);
for (int i = idx; i < 500_000; i += tCnt) {
str.addData(i, data);
if (i % (100 * tCnt) == idx)
str.flush();
}
str.flush();
}
}
}, tCnt, "ldr");
assertEquals(500_000, node.cache(CACHE_NAME).size());
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class WalRebalanceRestartTest method restartRebalance.
/**
* Method hangs a rebalance on one node and invoke some trigger and check influence.
*
* @param retrigger Rebalance trigger.
* @param retriggerAsHistorical True means rebalance will be restarted as historical, false is as full.
* @throws Exception if failed.
*/
private void restartRebalance(RebalanceRetrigger retrigger, boolean retriggerAsHistorical) throws Exception {
IgniteEx ignite0 = startGrids(4);
ignite0.cluster().active(true);
try (IgniteDataStreamer streamer = ignite0.dataStreamer(DEFAULT_CACHE_NAME)) {
streamer.allowOverwrite(true);
for (int i = 0; i < 1000; i++) streamer.addData(i, String.valueOf(i));
}
awaitPartitionMapExchange();
forceCheckpoint();
ignite(2).close();
try (IgniteDataStreamer streamer = ignite0.dataStreamer(DEFAULT_CACHE_NAME)) {
streamer.allowOverwrite(true);
for (int i = 1000; i < 2000; i++) streamer.addData(i, String.valueOf(i));
}
awaitPartitionMapExchange();
forceCheckpoint();
IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(2));
TestRecordingCommunicationSpi spi2 = (TestRecordingCommunicationSpi) cfg.getCommunicationSpi();
AtomicBoolean hasFullRebalance = new AtomicBoolean();
spi2.record((node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage demandMsg = (GridDhtPartitionDemandMessage) msg;
if (CU.cacheId(DEFAULT_CACHE_NAME) == demandMsg.groupId()) {
if (rebTopVer == null || rebTopVer.before(demandMsg.topologyVersion()))
rebTopVer = demandMsg.topologyVersion();
if (!F.isEmpty(demandMsg.partitions().fullSet()))
hasFullRebalance.compareAndSet(false, true);
}
}
return false;
});
spi2.blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage demandMsg = (GridDhtPartitionDemandMessage) msg;
if (CU.cacheId(DEFAULT_CACHE_NAME) == demandMsg.groupId())
return true;
}
return false;
});
IgniteEx ignite2 = startGrid(optimize(cfg));
spi2.waitForBlocked();
assertFalse(hasFullRebalance.get());
retrigger.trigger(ignite2);
spi2.stopBlock();
awaitPartitionMapExchange();
if (retriggerAsHistorical)
assertFalse(hasFullRebalance.get());
else
assertTrue(hasFullRebalance.get());
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class GridIndexRebuildTest method testFullIndexRebuild.
/**
* We start several nodes, populate caches, then start replacing values. After that one node is killed, their
* index.bin files would be removed. Finally, we restart the node, index rebuild starting after recovery. And we
* checke indexes by "validate indexes" task.
*/
@Test
public void testFullIndexRebuild() throws Exception {
long start = System.currentTimeMillis();
IgniteEx grid1 = startGrids(4);
grid1.cluster().active(true);
final int accountCnt = 2048;
try (IgniteDataStreamer streamer = grid1.dataStreamer(FIRST_CACHE)) {
for (long i = 0; i < accountCnt; i++) {
streamer.addData(i, new Account(i));
}
streamer.flush();
}
try (IgniteDataStreamer streamer = grid1.dataStreamer(SECOND_CACHE)) {
for (long i = 0; i < accountCnt; i++) {
streamer.addData(i, new Account(i));
}
streamer.flush();
}
AtomicBoolean stop = new AtomicBoolean();
IgniteCache<Object, Object> cache1 = grid1.cache(FIRST_CACHE);
IgniteCache<Object, Object> cache2 = grid1.cache(SECOND_CACHE);
new Thread(new Runnable() {
@Override
public void run() {
long i = 0;
while (!stop.get()) {
try {
cache1.put(i, new Account(i));
if (i % 13 == 7)
cache2.put(i, new Account2(i));
else
cache2.put(i, new Account(i));
i++;
} catch (Throwable e) {
e.printStackTrace();
}
}
}
}).start();
File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false);
long diff = System.currentTimeMillis() - start;
U.sleep(7500 - (diff % 5000));
stopGrid(3);
stop.set(true);
for (File grp : new File(workDir, U.maskForFileName(getTestIgniteInstanceName(3))).listFiles()) {
new File(grp, "index.bin").delete();
}
startGrid(3);
awaitPartitionMapExchange();
U.sleep(3_000);
ImmutableSet<UUID> nodes = ImmutableSet.of(grid(2).localNode().id(), grid(3).localNode().id());
VisorValidateIndexesTaskArg arg = new VisorValidateIndexesTaskArg(null, null, 10000, 1, true, true);
VisorTaskArgument<VisorValidateIndexesTaskArg> visorTaskArg = new VisorTaskArgument<>(nodes, arg, true);
ComputeTaskInternalFuture<VisorValidateIndexesTaskResult> exec = grid1.context().task().execute(new VisorValidateIndexesTask(), visorTaskArg);
VisorValidateIndexesTaskResult res = exec.get();
Map<UUID, VisorValidateIndexesJobResult> results = res.results();
boolean hasIssue = false;
for (VisorValidateIndexesJobResult jobResult : results.values()) {
System.err.println(jobResult);
hasIssue |= jobResult.hasIssues();
}
assertFalse(hasIssue);
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class GridIndexRebuildTest method testPartialIndexRebuild.
/**
* We start several nodes, populate caches, then start replacing values. After that one node is killed, new index
* created. Finally, we restart the node, index rebuild starting after recovery. And we checke indexes by "validate
* indexes" task.
*/
@SuppressWarnings("unchecked")
@Test
public void testPartialIndexRebuild() throws Exception {
LogListener lsnr = LogListener.matches("B+Tree is corrupted").build();
listeningLog.registerListener(lsnr);
long start = System.currentTimeMillis();
IgniteEx grid1 = startGrids(4);
grid1.cluster().active(true);
final int accountCnt = 2048;
try (IgniteDataStreamer streamer = grid1.dataStreamer(SECOND_CACHE)) {
for (long i = 0; i < accountCnt; i++) streamer.addData(i, new Account(i));
streamer.flush();
}
AtomicBoolean stop = new AtomicBoolean();
IgniteCache<Object, Object> cache2 = grid1.cache(SECOND_CACHE);
new Thread(new Runnable() {
@Override
public void run() {
long i = 0;
while (!stop.get()) {
try {
if (i % 13 == 7)
cache2.put(i, new Account2(i));
else
cache2.put(i, new Account(i));
i++;
} catch (Throwable e) {
e.printStackTrace();
}
}
}
}).start();
long diff = System.currentTimeMillis() - start;
U.sleep(7500 - (diff % 5000));
stopGrid(3);
stop.set(true);
cache2.query(new SqlFieldsQuery("CREATE INDEX idx" + UUID.randomUUID().toString().replaceAll("-", "_") + " on Account (amount)")).getAll();
startGrid(3);
awaitPartitionMapExchange();
U.sleep(3_000);
ImmutableSet<UUID> nodes = ImmutableSet.of(grid(2).localNode().id(), grid(3).localNode().id());
VisorValidateIndexesTaskArg arg = new VisorValidateIndexesTaskArg(null, null, 10000, 1, true, true);
VisorTaskArgument<VisorValidateIndexesTaskArg> visorTaskArg = new VisorTaskArgument<>(nodes, arg, true);
ComputeTaskInternalFuture<VisorValidateIndexesTaskResult> execute = grid1.context().task().execute(new VisorValidateIndexesTask(), visorTaskArg);
VisorValidateIndexesTaskResult res = execute.get();
Map<UUID, VisorValidateIndexesJobResult> results = res.results();
boolean hasIssue = false;
for (VisorValidateIndexesJobResult jobResult : results.values()) {
System.err.println(jobResult);
hasIssue |= jobResult.hasIssues();
}
assertFalse(hasIssue);
assertFalse("B+Tree is corrupted.", lsnr.check());
}
use of org.apache.ignite.IgniteDataStreamer in project ignite by apache.
the class JdbcThinStreamingSelfTest method testSimultaneousStreaming.
/**
* @throws SQLException if failed.
*/
public void testSimultaneousStreaming() throws Exception {
try (Connection anotherConn = createOrdinaryConnection()) {
execute(anotherConn, "CREATE TABLE PUBLIC.T(x int primary key, y int) WITH " + "\"cache_name=T,wrap_value=false\"");
}
// Timeout to let connection close be handled on server side.
U.sleep(500);
try (Connection conn = createStreamedConnection(false, 10000)) {
assertStreamingState(true);
PreparedStatement firstStmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)");
PreparedStatement secondStmt = conn.prepareStatement("insert into PUBLIC.T(x, y) values (?, ?)");
try {
for (int i = 1; i <= 10; i++) {
firstStmt.setInt(1, i);
firstStmt.setString(2, nameForId(i));
firstStmt.executeUpdate();
}
for (int i = 51; i <= 67; i++) {
secondStmt.setInt(1, i);
secondStmt.setInt(2, i);
secondStmt.executeUpdate();
}
for (int i = 11; i <= 50; i++) {
firstStmt.setInt(1, i);
firstStmt.setString(2, nameForId(i));
firstStmt.executeUpdate();
}
for (int i = 68; i <= 100; i++) {
secondStmt.setInt(1, i);
secondStmt.setInt(2, i);
secondStmt.executeUpdate();
}
assertCacheEmpty();
SqlClientContext cliCtx = sqlClientContext();
HashMap<String, IgniteDataStreamer<?, ?>> streamers = U.field(cliCtx, "streamers");
assertEquals(2, streamers.size());
assertEqualsCollections(new HashSet<>(Arrays.asList("person", "T")), streamers.keySet());
} finally {
U.closeQuiet(firstStmt);
U.closeQuiet(secondStmt);
}
}
// Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush
// on connection close in any way.
U.sleep(1000);
// Now let's check it's all there.
for (int i = 1; i <= 50; i++) assertEquals(nameForId(i), nameForIdInCache(i));
for (int i = 51; i <= 100; i++) assertEquals(i, grid(0).cache("T").get(i));
}
Aggregations