use of org.apache.ignite.internal.processors.query.SqlClientContext in project ignite by apache.
the class JdbcThinStreamingSelfTest method testStreamingReEnabled.
/**
* @throws SQLException if failed.
*/
public void testStreamingReEnabled() throws Exception {
try (Connection conn = createStreamedConnection(false, 10000)) {
assertStreamingState(true);
try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)")) {
for (int i = 1; i <= 100; i++) {
stmt.setInt(1, i);
stmt.setString(2, nameForId(i));
stmt.executeUpdate();
}
}
assertCacheEmpty();
execute(conn, "set streaming 1 batch_size 111 allow_overwrite 0 per_node_buffer_size 512 " + "per_node_parallel_operations 4 flush_frequency 5000");
U.sleep(500);
assertEquals((Integer) 111, U.field(conn, "streamBatchSize"));
SqlClientContext cliCtx = sqlClientContext();
assertTrue(cliCtx.isStream());
assertFalse(U.field(cliCtx, "streamAllowOverwrite"));
assertEquals((Integer) 512, U.field(cliCtx, "streamNodeBufSize"));
assertEquals((Long) 5000L, U.field(cliCtx, "streamFlushTimeout"));
assertEquals((Integer) 4, U.field(cliCtx, "streamNodeParOps"));
// Now let's check it's all there - SET STREAMING 1 repeated call must also have caused flush.
for (int i = 1; i <= 100; i++) assertEquals(nameForId(i), nameForIdInCache(i));
}
}
use of org.apache.ignite.internal.processors.query.SqlClientContext in project ignite by apache.
the class JdbcThinStreamingAbstractSelfTest method testSimultaneousStreaming.
/**
* @throws SQLException if failed.
*/
@Test
public void testSimultaneousStreaming() throws Exception {
try (Connection anotherConn = createOrdinaryConnection()) {
execute(anotherConn, "CREATE TABLE PUBLIC.T(x int primary key, y int) WITH " + "\"cache_name=T,wrap_value=false\"");
}
// Timeout to let connection close be handled on server side.
U.sleep(500);
try (Connection conn = createStreamedConnection(false, 10000)) {
assertStreamingState(true);
PreparedStatement firstStmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)");
PreparedStatement secondStmt = conn.prepareStatement("insert into PUBLIC.T(x, y) values (?, ?)");
try {
for (int i = 1; i <= 10; i++) {
firstStmt.setInt(1, i);
firstStmt.setString(2, nameForId(i));
firstStmt.executeUpdate();
}
for (int i = 51; i <= 67; i++) {
secondStmt.setInt(1, i);
secondStmt.setInt(2, i);
secondStmt.executeUpdate();
}
for (int i = 11; i <= 50; i++) {
firstStmt.setInt(1, i);
firstStmt.setString(2, nameForId(i));
firstStmt.executeUpdate();
}
for (int i = 68; i <= 100; i++) {
secondStmt.setInt(1, i);
secondStmt.setInt(2, i);
secondStmt.executeUpdate();
}
assertCacheEmpty();
SqlClientContext cliCtx = sqlClientContext();
final HashMap<String, IgniteDataStreamer<?, ?>> streamers = U.field(cliCtx, "streamers");
// Wait when node process requests (because client send batch requests async).
GridTestUtils.waitForCondition(() -> streamers.size() == 2, 1000);
assertEquals(2, streamers.size());
assertEqualsCollections(new HashSet<>(Arrays.asList("person", "T")), streamers.keySet());
} finally {
U.closeQuiet(firstStmt);
U.closeQuiet(secondStmt);
}
}
// Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush
// on connection close in any way.
U.sleep(1000);
// Now let's check it's all there.
for (int i = 1; i <= 50; i++) assertEquals(nameForId(i), nameForIdInCache(i));
for (int i = 51; i <= 100; i++) assertEquals(i, grid(0).cache("T").get(i));
}
use of org.apache.ignite.internal.processors.query.SqlClientContext in project ignite by apache.
the class JdbcThinStreamingAbstractSelfTest method testStreamingReEnabled.
/**
* @throws SQLException if failed.
*/
@Test
public void testStreamingReEnabled() throws Exception {
try (Connection conn = createStreamedConnection(false, 10000)) {
assertStreamingState(true);
try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)")) {
for (int i = 1; i <= 100; i++) {
stmt.setInt(1, i);
stmt.setString(2, nameForId(i));
stmt.executeUpdate();
}
}
assertCacheEmpty();
execute(conn, "set streaming 1 batch_size 111 allow_overwrite 0 per_node_buffer_size 512 " + "per_node_parallel_operations 4 flush_frequency 5000");
U.sleep(500);
assertEquals((Integer) 111, U.field((Object) U.field(conn, "streamState"), "streamBatchSize"));
SqlClientContext cliCtx = sqlClientContext();
assertTrue(cliCtx.isStream());
assertFalse(U.field(cliCtx, "streamAllowOverwrite"));
assertEquals((Integer) 512, U.field(cliCtx, "streamNodeBufSize"));
assertEquals((Long) 5000L, U.field(cliCtx, "streamFlushTimeout"));
assertEquals((Integer) 4, U.field(cliCtx, "streamNodeParOps"));
// Now let's check it's all there - SET STREAMING 1 repeated call must also have caused flush.
for (int i = 1; i <= 100; i++) assertEquals(nameForId(i), nameForIdInCache(i));
}
}
use of org.apache.ignite.internal.processors.query.SqlClientContext in project ignite by apache.
the class JdbcThinStreamingSelfTest method assertStreamingState.
/**
* Check that streaming state on target node is as expected.
* @param on Expected streaming state.
*/
private void assertStreamingState(boolean on) {
SqlClientContext cliCtx = sqlClientContext();
assertEquals(on, cliCtx.isStream());
}
use of org.apache.ignite.internal.processors.query.SqlClientContext in project ignite by apache.
the class JdbcThinStreamingSelfTest method testSimultaneousStreaming.
/**
* @throws SQLException if failed.
*/
public void testSimultaneousStreaming() throws Exception {
try (Connection anotherConn = createOrdinaryConnection()) {
execute(anotherConn, "CREATE TABLE PUBLIC.T(x int primary key, y int) WITH " + "\"cache_name=T,wrap_value=false\"");
}
// Timeout to let connection close be handled on server side.
U.sleep(500);
try (Connection conn = createStreamedConnection(false, 10000)) {
assertStreamingState(true);
PreparedStatement firstStmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)");
PreparedStatement secondStmt = conn.prepareStatement("insert into PUBLIC.T(x, y) values (?, ?)");
try {
for (int i = 1; i <= 10; i++) {
firstStmt.setInt(1, i);
firstStmt.setString(2, nameForId(i));
firstStmt.executeUpdate();
}
for (int i = 51; i <= 67; i++) {
secondStmt.setInt(1, i);
secondStmt.setInt(2, i);
secondStmt.executeUpdate();
}
for (int i = 11; i <= 50; i++) {
firstStmt.setInt(1, i);
firstStmt.setString(2, nameForId(i));
firstStmt.executeUpdate();
}
for (int i = 68; i <= 100; i++) {
secondStmt.setInt(1, i);
secondStmt.setInt(2, i);
secondStmt.executeUpdate();
}
assertCacheEmpty();
SqlClientContext cliCtx = sqlClientContext();
HashMap<String, IgniteDataStreamer<?, ?>> streamers = U.field(cliCtx, "streamers");
assertEquals(2, streamers.size());
assertEqualsCollections(new HashSet<>(Arrays.asList("person", "T")), streamers.keySet());
} finally {
U.closeQuiet(firstStmt);
U.closeQuiet(secondStmt);
}
}
// Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush
// on connection close in any way.
U.sleep(1000);
// Now let's check it's all there.
for (int i = 1; i <= 50; i++) assertEquals(nameForId(i), nameForIdInCache(i));
for (int i = 51; i <= 100; i++) assertEquals(i, grid(0).cache("T").get(i));
}
Aggregations