use of org.apache.cassandra.service.reads.repair.TestableReadRepair in project cassandra by apache.
the class DataResolverTest method testResolveDeletedCollection.
@Test
public void testResolveDeletedCollection() {
EndpointsForRange replicas = makeReplicas(2);
ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
TestableReadRepair readRepair = new TestableReadRepair(cmd);
DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
long[] ts = { 100, 200 };
Row.Builder builder = BTreeRow.unsortedBuilder();
builder.newRow(Clustering.EMPTY);
builder.addComplexDeletion(m, new DeletionTime(ts[0] - 1, nowInSec));
builder.addCell(mapCell(0, 0, ts[0]));
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(cmd, peer1, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
builder.newRow(Clustering.EMPTY);
DeletionTime expectedCmplxDelete = new DeletionTime(ts[1] - 1, nowInSec);
builder.addComplexDeletion(m, expectedCmplxDelete);
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(cmd, peer2, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
}
Mutation mutation = readRepair.getForEndpoint(peer1);
Iterator<Row> rowIter = mutation.getPartitionUpdate(cfm2).iterator();
assertTrue(rowIter.hasNext());
Row row = rowIter.next();
assertFalse(rowIter.hasNext());
ComplexColumnData cd = row.getComplexColumnData(m);
assertEquals(Collections.emptySet(), Sets.newHashSet(cd));
assertEquals(expectedCmplxDelete, cd.complexDeletion());
Assert.assertNull(readRepair.sent.get(peer2));
}
use of org.apache.cassandra.service.reads.repair.TestableReadRepair in project cassandra by apache.
the class DataResolverTest method makeReplicas.
private EndpointsForRange makeReplicas(int num) {
StorageService.instance.getTokenMetadata().clearUnsafe();
switch(num) {
case 2:
ks = AbstractReadResponseTest.ks;
cfs = AbstractReadResponseTest.cfs;
break;
case 4:
ks = AbstractReadResponseTest.ks3;
cfs = AbstractReadResponseTest.cfs3;
break;
default:
throw new IllegalStateException("This test needs refactoring to cleanly support different replication factors");
}
command = Util.cmd(cfs, dk).withNowInSeconds(nowInSec).build();
readRepair = new TestableReadRepair(command);
Token token = Murmur3Partitioner.instance.getMinimumToken();
EndpointsForRange.Builder replicas = EndpointsForRange.builder(ReplicaUtils.FULL_RANGE, num);
for (int i = 0; i < num; i++) {
try {
InetAddressAndPort endpoint = InetAddressAndPort.getByAddress(new byte[] { 127, 0, 0, (byte) (i + 1) });
replicas.add(ReplicaUtils.full(endpoint));
StorageService.instance.getTokenMetadata().updateNormalToken(token = token.increaseSlightly(), endpoint);
Gossiper.instance.initializeNodeUnsafe(endpoint, UUID.randomUUID(), 1);
} catch (UnknownHostException e) {
throw new AssertionError(e);
}
}
return replicas.build();
}
use of org.apache.cassandra.service.reads.repair.TestableReadRepair in project cassandra by apache.
the class DataResolverTest method testResolveComplexDelete.
@Test
public void testResolveComplexDelete() {
EndpointsForRange replicas = makeReplicas(2);
ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
TestableReadRepair readRepair = new TestableReadRepair(cmd);
DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
long[] ts = { 100, 200 };
Row.Builder builder = BTreeRow.unsortedBuilder();
builder.newRow(Clustering.EMPTY);
builder.addComplexDeletion(m, new DeletionTime(ts[0] - 1, nowInSec));
builder.addCell(mapCell(0, 0, ts[0]));
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(cmd, peer1, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
builder.newRow(Clustering.EMPTY);
DeletionTime expectedCmplxDelete = new DeletionTime(ts[1] - 1, nowInSec);
builder.addComplexDeletion(m, expectedCmplxDelete);
Cell<?> expectedCell = mapCell(1, 1, ts[1]);
builder.addCell(expectedCell);
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(cmd, peer2, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
try (PartitionIterator data = resolver.resolve()) {
try (RowIterator rows = Iterators.getOnlyElement(data)) {
Row row = Iterators.getOnlyElement(rows);
assertColumns(row, "m");
Assert.assertNull(row.getCell(m, CellPath.create(bb(0))));
Assert.assertNotNull(row.getCell(m, CellPath.create(bb(1))));
}
}
Mutation mutation = readRepair.getForEndpoint(peer1);
Iterator<Row> rowIter = mutation.getPartitionUpdate(cfm2).iterator();
assertTrue(rowIter.hasNext());
Row row = rowIter.next();
assertFalse(rowIter.hasNext());
ComplexColumnData cd = row.getComplexColumnData(m);
assertEquals(Collections.singleton(expectedCell), Sets.newHashSet(cd));
assertEquals(expectedCmplxDelete, cd.complexDeletion());
Assert.assertNull(readRepair.sent.get(peer2));
}
use of org.apache.cassandra.service.reads.repair.TestableReadRepair in project cassandra by apache.
the class DataResolverTest method testResolveWithBothEmpty.
@Test
public void testResolveWithBothEmpty() {
EndpointsForRange replicas = makeReplicas(2);
TestableReadRepair readRepair = new TestableReadRepair(command);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
resolver.preprocess(response(command, replicas.get(0).endpoint(), EmptyIterators.unfilteredPartition(cfm)));
resolver.preprocess(response(command, replicas.get(1).endpoint(), EmptyIterators.unfilteredPartition(cfm)));
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
}
assertTrue(readRepair.sent.isEmpty());
}
use of org.apache.cassandra.service.reads.repair.TestableReadRepair in project cassandra by apache.
the class DataResolverTest method testResolveNewCollectionOverwritingDeleted.
@Test
public void testResolveNewCollectionOverwritingDeleted() {
EndpointsForRange replicas = makeReplicas(2);
ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
TestableReadRepair readRepair = new TestableReadRepair(cmd);
DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
long[] ts = { 100, 200 };
// cleared map column
Row.Builder builder = BTreeRow.unsortedBuilder();
builder.newRow(Clustering.EMPTY);
builder.addComplexDeletion(m, new DeletionTime(ts[0] - 1, nowInSec));
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(cmd, peer1, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
// newer, overwritten map column
builder.newRow(Clustering.EMPTY);
DeletionTime expectedCmplxDelete = new DeletionTime(ts[1] - 1, nowInSec);
builder.addComplexDeletion(m, expectedCmplxDelete);
Cell<?> expectedCell = mapCell(1, 1, ts[1]);
builder.addCell(expectedCell);
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(cmd, peer2, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
try (PartitionIterator data = resolver.resolve()) {
try (RowIterator rows = Iterators.getOnlyElement(data)) {
Row row = Iterators.getOnlyElement(rows);
assertColumns(row, "m");
ComplexColumnData cd = row.getComplexColumnData(m);
assertEquals(Collections.singleton(expectedCell), Sets.newHashSet(cd));
}
}
Row row = Iterators.getOnlyElement(readRepair.getForEndpoint(peer1).getPartitionUpdate(cfm2).iterator());
ComplexColumnData cd = row.getComplexColumnData(m);
assertEquals(Collections.singleton(expectedCell), Sets.newHashSet(cd));
assertEquals(expectedCmplxDelete, cd.complexDeletion());
Assert.assertNull(readRepair.sent.get(peer2));
}
Aggregations