use of com.datastax.oss.driver.api.core.CqlSession in project zeppelin by apache.
the class CassandraInterpreterTest method setUp.
@BeforeClass
public static synchronized void setUp() throws IOException, InterruptedException {
System.setProperty("cassandra.skip_wait_for_gossip_to_settle", "0");
System.setProperty("cassandra.load_ring_state", "false");
System.setProperty("cassandra.initial_token", "0");
System.setProperty("cassandra.num_tokens", "nil");
System.setProperty("cassandra.allocate_tokens_for_local_replication_factor", "nil");
EmbeddedCassandraServerHelper.startEmbeddedCassandra();
CqlSession session = EmbeddedCassandraServerHelper.getSession();
new CQLDataLoader(session).load(new ClassPathCQLDataSet("prepare_all.cql", "zeppelin"));
Properties properties = new Properties();
properties.setProperty(CASSANDRA_CLUSTER_NAME, EmbeddedCassandraServerHelper.getClusterName());
properties.setProperty(CASSANDRA_COMPRESSION_PROTOCOL, "NONE");
properties.setProperty(CASSANDRA_CREDENTIALS_USERNAME, "none");
properties.setProperty(CASSANDRA_CREDENTIALS_PASSWORD, "none");
properties.setProperty(CASSANDRA_LOAD_BALANCING_POLICY, "DEFAULT");
properties.setProperty(CASSANDRA_RETRY_POLICY, "DEFAULT");
properties.setProperty(CASSANDRA_RECONNECTION_POLICY, "DEFAULT");
properties.setProperty(CASSANDRA_SPECULATIVE_EXECUTION_POLICY, "DEFAULT");
properties.setProperty(CASSANDRA_POOLING_CONNECTION_PER_HOST_LOCAL, "2");
properties.setProperty(CASSANDRA_POOLING_CONNECTION_PER_HOST_REMOTE, "1");
properties.setProperty(CASSANDRA_POOLING_MAX_REQUESTS_PER_CONNECTION, "1024");
properties.setProperty(CASSANDRA_POOLING_POOL_TIMEOUT_MILLIS, "5000");
properties.setProperty(CASSANDRA_POOLING_HEARTBEAT_INTERVAL_SECONDS, "30");
properties.setProperty(CASSANDRA_QUERY_DEFAULT_CONSISTENCY, "ONE");
properties.setProperty(CASSANDRA_QUERY_DEFAULT_SERIAL_CONSISTENCY, "SERIAL");
properties.setProperty(CASSANDRA_QUERY_DEFAULT_FETCH_SIZE, "5000");
properties.setProperty(CASSANDRA_SOCKET_CONNECTION_TIMEOUT_MILLIS, "5000");
properties.setProperty(CASSANDRA_SOCKET_READ_TIMEOUT_MILLIS, "12000");
properties.setProperty(CASSANDRA_SOCKET_TCP_NO_DELAY, "true");
properties.setProperty(CASSANDRA_HOSTS, EmbeddedCassandraServerHelper.getHost());
properties.setProperty(CASSANDRA_PORT, Integer.toString(EmbeddedCassandraServerHelper.getNativeTransportPort()));
properties.setProperty("datastax-java-driver.advanced.connection.pool.local.size", "1");
interpreter = new CassandraInterpreter(properties);
interpreter.open();
}
use of com.datastax.oss.driver.api.core.CqlSession in project cdc-apache-cassandra by datastax.
the class PulsarCassandraSourceTests method testBatchInsert.
public void testBatchInsert(String ksName, Class<? extends Converter> keyConverter, Class<? extends Converter> valueConverter) throws InterruptedException, IOException {
try {
try (CqlSession cqlSession = cassandraContainer1.getCqlSession()) {
cqlSession.execute("CREATE KEYSPACE IF NOT EXISTS " + ksName + " WITH replication = {'class':'SimpleStrategy','replication_factor':'2'};");
cqlSession.execute("CREATE TABLE IF NOT EXISTS " + ksName + ".table1 (id text, a int, b int, PRIMARY KEY (id, a)) WITH cdc=true");
}
deployConnector(ksName, "table1", keyConverter, valueConverter);
// run batch insert in parallel
Executors.newSingleThreadExecutor().submit(() -> {
try (CqlSession cqlSession = cassandraContainer1.getCqlSession()) {
PreparedStatement statement = cqlSession.prepare("INSERT INTO " + ksName + ".table1 (id, a, b) VALUES (?,?,?)");
for (int batch = 0; batch < 10; batch++) {
BatchStatementBuilder batchBuilder = BatchStatement.builder(BatchType.UNLOGGED);
for (int i = 0; i < 1000; i++) {
batchBuilder.addStatement(statement.bind("a" + batch, i, i));
}
cqlSession.execute(batchBuilder.build());
}
// no drain, test use NRT CDC
} catch (Exception e) {
log.error("error:", e);
}
});
try (PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsarContainer.getPulsarBrokerUrl()).build()) {
try (Consumer<GenericRecord> consumer = pulsarClient.newConsumer(org.apache.pulsar.client.api.Schema.AUTO_CONSUME()).topic(String.format(Locale.ROOT, "data-%s.table1", ksName)).subscriptionName("sub1").subscriptionType(SubscriptionType.Key_Shared).subscriptionMode(SubscriptionMode.Durable).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) {
Message<GenericRecord> msg;
int msgCount = 0;
while ((msg = consumer.receive(90, TimeUnit.SECONDS)) != null && msgCount < 10000) {
msgCount++;
GenericObject genericObject = msg.getValue();
assertEquals(SchemaType.KEY_VALUE, genericObject.getSchemaType());
KeyValue<GenericRecord, GenericRecord> kv = (KeyValue<GenericRecord, GenericRecord>) genericObject.getNativeObject();
GenericRecord key = kv.getKey();
Assert.assertTrue(((String) key.getField("id")).startsWith("a"));
consumer.acknowledge(msg);
}
assertEquals(10000, msgCount);
}
}
} finally {
dumpFunctionLogs("cassandra-source-" + ksName + "-table1");
undeployConnector(ksName, "table1");
}
}
use of com.datastax.oss.driver.api.core.CqlSession in project cdc-apache-cassandra by datastax.
the class PulsarCassandraSourceTests method testReadTimeout.
@Test
public void testReadTimeout() throws InterruptedException, IOException {
final String ksName = "ksx";
try (ChaosNetworkContainer<?> chaosContainer = new ChaosNetworkContainer<>(cassandraContainer2.getContainerName(), "100s")) {
try (CqlSession cqlSession = cassandraContainer1.getCqlSession()) {
cqlSession.execute("CREATE KEYSPACE IF NOT EXISTS " + ksName + " WITH replication = {'class':'SimpleStrategy','replication_factor':'2'};");
cqlSession.execute("CREATE TABLE IF NOT EXISTS " + ksName + ".table1 (id text PRIMARY KEY, a int) WITH cdc=true");
cqlSession.execute("INSERT INTO " + ksName + ".table1 (id, a) VALUES('1',1)");
deployConnector(ksName, "table1", NativeAvroConverter.class, NativeAvroConverter.class);
chaosContainer.start();
cqlSession.execute("INSERT INTO " + ksName + ".table1 (id, a) VALUES('2',1)");
cqlSession.execute("INSERT INTO " + ksName + ".table1 (id, a) VALUES('3',1)");
}
try (PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsarContainer.getPulsarBrokerUrl()).build()) {
try (Consumer<GenericRecord> consumer = pulsarClient.newConsumer(org.apache.pulsar.client.api.Schema.AUTO_CONSUME()).topic(String.format(Locale.ROOT, "data-%s.table1", ksName)).subscriptionName("sub1").subscriptionType(SubscriptionType.Key_Shared).subscriptionMode(SubscriptionMode.Durable).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) {
Message<GenericRecord> msg;
int numMessage = 0;
while ((msg = consumer.receive(180, TimeUnit.SECONDS)) != null && numMessage < 3) {
numMessage++;
consumer.acknowledge(msg);
}
assertEquals(3, numMessage);
assertEquals(0, connectorStatus(ksName, "table1"));
}
}
} finally {
dumpFunctionLogs("cassandra-source-" + ksName + "-table1");
undeployConnector(ksName, "table1");
}
}
use of com.datastax.oss.driver.api.core.CqlSession in project cdc-apache-cassandra by datastax.
the class PulsarCassandraSourceTests method testSchema.
// docker exec -it pulsar cat /pulsar/logs/functions/public/default/cassandra-source-ks1-table3/cassandra-source-ks1-table3-0.log
public void testSchema(String ksName, Class<? extends Converter> keyConverter, Class<? extends Converter> valueConverter) throws InterruptedException, IOException {
try {
try (CqlSession cqlSession = cassandraContainer1.getCqlSession()) {
cqlSession.execute("CREATE KEYSPACE IF NOT EXISTS " + ksName + " WITH replication = {'class':'SimpleStrategy','replication_factor':'1'};");
cqlSession.execute("CREATE TYPE IF NOT EXISTS " + ksName + ".zudt (" + "ztext text, zascii ascii, zboolean boolean, zblob blob, ztimestamp timestamp, ztime time, zdate date, zuuid uuid, ztimeuuid timeuuid, " + "ztinyint tinyint, zsmallint smallint, zint int, zbigint bigint, zvarint varint, zdecimal decimal, zduration duration, zdouble double, " + "zfloat float, zinet4 inet, zinet6 inet, zlist frozen<list<text>>, zset frozen<set<int>>, zmap frozen<map<text, double>>" + ");");
UserDefinedType zudt = cqlSession.getMetadata().getKeyspace(ksName).flatMap(ks -> ks.getUserDefinedType("zudt")).orElseThrow(() -> new IllegalArgumentException("Missing UDT zudt definition"));
UdtValue zudtValue = zudt.newValue(dataSpecMap.get("text").cqlValue, dataSpecMap.get("ascii").cqlValue, dataSpecMap.get("boolean").cqlValue, dataSpecMap.get("blob").cqlValue, dataSpecMap.get("timestamp").cqlValue, dataSpecMap.get("time").cqlValue, dataSpecMap.get("date").cqlValue, dataSpecMap.get("uuid").cqlValue, dataSpecMap.get("timeuuid").cqlValue, dataSpecMap.get("tinyint").cqlValue, dataSpecMap.get("smallint").cqlValue, dataSpecMap.get("int").cqlValue, dataSpecMap.get("bigint").cqlValue, dataSpecMap.get("varint").cqlValue, dataSpecMap.get("decimal").cqlValue, dataSpecMap.get("duration").cqlValue, dataSpecMap.get("double").cqlValue, dataSpecMap.get("float").cqlValue, dataSpecMap.get("inet4").cqlValue, dataSpecMap.get("inet6").cqlValue, dataSpecMap.get("list").cqlValue, dataSpecMap.get("set").cqlValue, dataSpecMap.get("map").cqlValue);
cqlSession.execute("CREATE TABLE IF NOT EXISTS " + ksName + ".table3 (" + "xtext text, xascii ascii, xboolean boolean, xblob blob, xtimestamp timestamp, xtime time, xdate date, xuuid uuid, xtimeuuid timeuuid, xtinyint tinyint, xsmallint smallint, xint int, xbigint bigint, xvarint varint, xdecimal decimal, xdouble double, xfloat float, xinet4 inet, xinet6 inet, " + "ytext text, yascii ascii, yboolean boolean, yblob blob, ytimestamp timestamp, ytime time, ydate date, yuuid uuid, ytimeuuid timeuuid, ytinyint tinyint, ysmallint smallint, yint int, ybigint bigint, yvarint varint, ydecimal decimal, ydouble double, yfloat float, yinet4 inet, yinet6 inet, yduration duration, yudt zudt, ylist list<text>, yset set<int>, ymap map<text, double>, ylistofmap list<frozen<map<text,double>>>, ysetofudt set<frozen<zudt>>," + "primary key (xtext, xascii, xboolean, xblob, xtimestamp, xtime, xdate, xuuid, xtimeuuid, xtinyint, xsmallint, xint, xbigint, xvarint, xdecimal, xdouble, xfloat, xinet4, xinet6)) " + "WITH CLUSTERING ORDER BY (xascii ASC, xboolean DESC, xblob ASC, xtimestamp DESC, xtime DESC, xdate ASC, xuuid DESC, xtimeuuid ASC, xtinyint DESC, xsmallint ASC, xint DESC, xbigint ASC, xvarint DESC, xdecimal ASC, xdouble DESC, xfloat ASC, xinet4 ASC, xinet6 DESC) AND cdc=true");
cqlSession.execute("INSERT INTO " + ksName + ".table3 (" + "xtext, xascii, xboolean, xblob, xtimestamp, xtime, xdate, xuuid, xtimeuuid, xtinyint, xsmallint, xint, xbigint, xvarint, xdecimal, xdouble, xfloat, xinet4, xinet6, " + "ytext, yascii, yboolean, yblob, ytimestamp, ytime, ydate, yuuid, ytimeuuid, ytinyint, ysmallint, yint, ybigint, yvarint, ydecimal, ydouble, yfloat, yinet4, yinet6, yduration, yudt, ylist, yset, ymap, ylistofmap, ysetofudt" + ") VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?, ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?, ?,?, ?,?,?,?,?)", dataSpecMap.get("text").cqlValue, dataSpecMap.get("ascii").cqlValue, dataSpecMap.get("boolean").cqlValue, dataSpecMap.get("blob").cqlValue, dataSpecMap.get("timestamp").cqlValue, dataSpecMap.get("time").cqlValue, dataSpecMap.get("date").cqlValue, dataSpecMap.get("uuid").cqlValue, dataSpecMap.get("timeuuid").cqlValue, dataSpecMap.get("tinyint").cqlValue, dataSpecMap.get("smallint").cqlValue, dataSpecMap.get("int").cqlValue, dataSpecMap.get("bigint").cqlValue, dataSpecMap.get("varint").cqlValue, dataSpecMap.get("decimal").cqlValue, dataSpecMap.get("double").cqlValue, dataSpecMap.get("float").cqlValue, dataSpecMap.get("inet4").cqlValue, dataSpecMap.get("inet6").cqlValue, dataSpecMap.get("text").cqlValue, dataSpecMap.get("ascii").cqlValue, dataSpecMap.get("boolean").cqlValue, dataSpecMap.get("blob").cqlValue, dataSpecMap.get("timestamp").cqlValue, dataSpecMap.get("time").cqlValue, dataSpecMap.get("date").cqlValue, dataSpecMap.get("uuid").cqlValue, dataSpecMap.get("timeuuid").cqlValue, dataSpecMap.get("tinyint").cqlValue, dataSpecMap.get("smallint").cqlValue, dataSpecMap.get("int").cqlValue, dataSpecMap.get("bigint").cqlValue, dataSpecMap.get("varint").cqlValue, dataSpecMap.get("decimal").cqlValue, dataSpecMap.get("double").cqlValue, dataSpecMap.get("float").cqlValue, dataSpecMap.get("inet4").cqlValue, dataSpecMap.get("inet6").cqlValue, dataSpecMap.get("duration").cqlValue, zudtValue, dataSpecMap.get("list").cqlValue, dataSpecMap.get("set").cqlValue, dataSpecMap.get("map").cqlValue, dataSpecMap.get("listofmap").cqlValue, ImmutableSet.of(zudtValue, zudtValue));
}
deployConnector(ksName, "table3", keyConverter, valueConverter);
try (PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsarContainer.getPulsarBrokerUrl()).build()) {
try (Consumer<GenericRecord> consumer = pulsarClient.newConsumer(org.apache.pulsar.client.api.Schema.AUTO_CONSUME()).topic(String.format(Locale.ROOT, "data-%s.table3", ksName)).subscriptionName("sub1").subscriptionType(SubscriptionType.Key_Shared).subscriptionMode(SubscriptionMode.Durable).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) {
int mutationTable3Count = 0;
Message<GenericRecord> msg;
while ((msg = consumer.receive(120, TimeUnit.SECONDS)) != null && mutationTable3Count < 1) {
GenericObject genericObject = msg.getValue();
mutationTable3Count++;
assertEquals(SchemaType.KEY_VALUE, genericObject.getSchemaType());
KeyValue<GenericRecord, GenericRecord> kv = (KeyValue<GenericRecord, GenericRecord>) genericObject.getNativeObject();
GenericRecord key = kv.getKey();
GenericRecord value = kv.getValue();
// check primary key fields
Map<String, Object> keyMap = genericRecordToMap(key);
for (Field field : key.getFields()) {
assertField(field.getName(), keyMap.get(field.getName()));
}
// check regular columns.
Map<String, Object> valueMap = genericRecordToMap(value);
for (Field field : value.getFields()) {
assertField(field.getName(), valueMap.get(field.getName()));
}
consumer.acknowledge(msg);
}
assertEquals(1, mutationTable3Count);
}
}
} finally {
dumpFunctionLogs("cassandra-source-" + ksName + "-table3");
undeployConnector(ksName, "table3");
}
}
use of com.datastax.oss.driver.api.core.CqlSession in project cdc-apache-cassandra by datastax.
the class PulsarDualNodeTests method testProducer.
@Test
public void testProducer() throws InterruptedException, IOException {
String pulsarServiceUrl = "pulsar://pulsar:" + pulsarContainer.BROKER_PORT;
try (CassandraContainer<?> cassandraContainer1 = createCassandraContainer(1, pulsarServiceUrl, testNetwork);
CassandraContainer<?> cassandraContainer2 = createCassandraContainer(2, pulsarServiceUrl, testNetwork)) {
cassandraContainer1.start();
cassandraContainer2.start();
try (CqlSession cqlSession = cassandraContainer1.getCqlSession()) {
cqlSession.execute("CREATE KEYSPACE IF NOT EXISTS ks1 WITH replication = {'class':'SimpleStrategy','replication_factor':'2'};");
cqlSession.execute("CREATE TABLE IF NOT EXISTS ks1.table1 (id text PRIMARY KEY, a int) WITH cdc=true");
cqlSession.execute("INSERT INTO ks1.table1 (id, a) VALUES('1',1)");
cqlSession.execute("INSERT INTO ks1.table1 (id, a) VALUES('2',1)");
cqlSession.execute("INSERT INTO ks1.table1 (id, a) VALUES('3',1)");
cqlSession.execute("CREATE TABLE IF NOT EXISTS ks1.table2 (a text, b int, c int, PRIMARY KEY(a,b)) WITH cdc=true");
cqlSession.execute("INSERT INTO ks1.table2 (a,b,c) VALUES('1',1,1)");
cqlSession.execute("INSERT INTO ks1.table2 (a,b,c) VALUES('2',1,1)");
cqlSession.execute("INSERT INTO ks1.table2 (a,b,c) VALUES('3',1,1)");
}
drain(cassandraContainer1, cassandraContainer2);
Map<String, List<UUID>> nodesTable1 = new HashMap<>();
Map<String, List<UUID>> nodesTable2 = new HashMap<>();
Map<String, List<String>> digestsTable1 = new HashMap<>();
Map<String, List<String>> digestsTable2 = new HashMap<>();
try (PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsarContainer.getPulsarBrokerUrl()).build()) {
RecordSchemaBuilder recordSchemaBuilder1 = SchemaBuilder.record("ks1.table1");
recordSchemaBuilder1.field("id").type(SchemaType.STRING).required();
SchemaInfo keySchemaInfo1 = recordSchemaBuilder1.build(SchemaType.AVRO);
Schema<GenericRecord> keySchema1 = Schema.generic(keySchemaInfo1);
Schema<KeyValue<GenericRecord, MutationValue>> schema1 = Schema.KeyValue(keySchema1, Schema.AVRO(MutationValue.class), KeyValueEncodingType.SEPARATED);
// pulsar-admin topics peek-messages persistent://public/default/events-ks1.table1-partition-0 --count 3 --subscription sub1
try (Consumer<KeyValue<GenericRecord, MutationValue>> consumer = pulsarClient.newConsumer(schema1).topic("events-ks1.table1").subscriptionName("sub1").subscriptionType(SubscriptionType.Key_Shared).subscriptionMode(SubscriptionMode.Durable).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) {
Message<KeyValue<GenericRecord, MutationValue>> msg;
while ((msg = consumer.receive(30, TimeUnit.SECONDS)) != null && nodesTable1.values().stream().mapToInt(List::size).sum() < 6) {
KeyValue<GenericRecord, MutationValue> kv = msg.getValue();
GenericRecord key = kv.getKey();
MutationValue val = kv.getValue();
System.out.println("Consumer Record: topicName=" + msg.getTopicName() + " key=" + AgentTestUtil.genericRecordToString(key) + " value=" + val);
List<UUID> nodes = nodesTable1.computeIfAbsent((String) key.getField("id"), k -> new ArrayList<>());
nodes.add(val.getNodeId());
List<String> digests = digestsTable1.computeIfAbsent((String) key.getField("id"), k -> new ArrayList<>());
digests.add(val.getMd5Digest());
consumer.acknowledgeAsync(msg);
}
}
// check we have exactly one mutation per node for each key.
for (int i = 1; i < 4; i++) {
Assert.assertNotNull(nodesTable1.get(Integer.toString(i)));
assertEquals(2, nodesTable1.get(Integer.toString(i)).size());
assertEquals(2, nodesTable1.get(Integer.toString(i)).stream().collect(Collectors.toSet()).size());
}
// check we have exactly 2 identical digests.
for (int i = 1; i < 4; i++) {
Assert.assertNotNull(digestsTable1.get(Integer.toString(i)));
assertEquals(2, digestsTable1.get(Integer.toString(i)).size());
assertEquals(1, digestsTable1.get(Integer.toString(i)).stream().collect(Collectors.toSet()).size());
}
// pulsar-admin schemas get "persistent://public/default/events-ks1.table2"
// pulsar-admin topics peek-messages persistent://public/default/events-ks1.table2-partition-0 --count 3 --subscription sub1
RecordSchemaBuilder recordSchemaBuilder2 = SchemaBuilder.record("ks1.table2");
recordSchemaBuilder2.field("a").type(SchemaType.STRING).required();
recordSchemaBuilder2.field("b").type(SchemaType.INT32).optional().defaultValue(null);
SchemaInfo keySchemaInfo2 = recordSchemaBuilder2.build(SchemaType.AVRO);
Schema<GenericRecord> keySchema2 = Schema.generic(keySchemaInfo2);
Schema<KeyValue<GenericRecord, MutationValue>> schema2 = Schema.KeyValue(keySchema2, Schema.AVRO(MutationValue.class), KeyValueEncodingType.SEPARATED);
try (Consumer<KeyValue<GenericRecord, MutationValue>> consumer = pulsarClient.newConsumer(schema2).topic("events-ks1.table2").subscriptionName("sub1").subscriptionType(SubscriptionType.Key_Shared).subscriptionMode(SubscriptionMode.Durable).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) {
Message<KeyValue<GenericRecord, MutationValue>> msg;
while ((msg = consumer.receive(30, TimeUnit.SECONDS)) != null && nodesTable2.values().stream().mapToInt(List::size).sum() < 6) {
KeyValue<GenericRecord, MutationValue> kv = msg.getValue();
GenericRecord key = kv.getKey();
MutationValue val = kv.getValue();
System.out.println("Consumer Record: topicName=" + msg.getTopicName() + " key=" + AgentTestUtil.genericRecordToString(key) + " value=" + val);
assertEquals(1, key.getField("b"));
List<UUID> nodes = nodesTable2.computeIfAbsent((String) key.getField("a"), k -> new ArrayList<>());
nodes.add(val.getNodeId());
List<String> digests = digestsTable2.computeIfAbsent((String) key.getField("a"), k -> new ArrayList<>());
digests.add(val.getMd5Digest());
consumer.acknowledgeAsync(msg);
}
}
// check we have exactly one mutation per node for each key.
for (int i = 1; i < 4; i++) {
assertEquals(2, nodesTable2.get(Integer.toString(i)).size());
assertEquals(2, nodesTable2.get(Integer.toString(i)).stream().collect(Collectors.toSet()).size());
}
// check we have exactly 2 identical digests.
for (int i = 1; i < 4; i++) {
assertEquals(2, digestsTable2.get(Integer.toString(i)).size());
assertEquals(1, digestsTable2.get(Integer.toString(i)).stream().collect(Collectors.toSet()).size());
}
}
}
}
Aggregations