use of org.sonar.duplications.block.Block in project sonarqube by SonarSource.
the class IntegrateCrossProjectDuplicationsTest method add_duplications_from_a_single_block.
@Test
public void add_duplications_from_a_single_block() {
settings.setProperty("sonar.cpd.xoo.minimumTokens", 10);
Collection<Block> originBlocks = singletonList(// This block contains 11 tokens -> a duplication will be created
new Block.Builder().setResourceId(ORIGIN_FILE_KEY).setBlockHash(new ByteArray("a8998353e96320ec")).setIndexInFile(0).setLines(30, 45).setUnit(0, 10).build());
Collection<Block> duplicatedBlocks = singletonList(new Block.Builder().setResourceId(OTHER_FILE_KEY).setBlockHash(new ByteArray("a8998353e96320ec")).setIndexInFile(0).setLines(40, 55).build());
underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks);
assertThat(duplicationRepository.getDuplications(ORIGIN_FILE)).containsExactly(crossProjectDuplication(new TextBlock(30, 45), OTHER_FILE_KEY, new TextBlock(40, 55)));
}
use of org.sonar.duplications.block.Block in project sonarqube by SonarSource.
the class IntegrateCrossProjectDuplicationsTest method default_minimum_tokens_is_one_hundred.
@Test
public void default_minimum_tokens_is_one_hundred() {
settings.setProperty("sonar.cpd.xoo.minimumTokens", (Integer) null);
Collection<Block> originBlocks = singletonList(new Block.Builder().setResourceId(ORIGIN_FILE_KEY).setBlockHash(new ByteArray("a8998353e96320ec")).setIndexInFile(0).setLines(30, 45).setUnit(0, 100).build());
Collection<Block> duplicatedBlocks = singletonList(new Block.Builder().setResourceId(OTHER_FILE_KEY).setBlockHash(new ByteArray("a8998353e96320ec")).setIndexInFile(0).setLines(40, 55).build());
underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks);
assertThat(duplicationRepository.getDuplications(ORIGIN_FILE)).containsExactly(crossProjectDuplication(new TextBlock(30, 45), OTHER_FILE_KEY, new TextBlock(40, 55)));
}
use of org.sonar.duplications.block.Block in project sonarqube by SonarSource.
the class IntegrateCrossProjectDuplicationsTest method do_not_compute_more_than_one_hundred_duplications_when_too_many_duplications.
@Test
public void do_not_compute_more_than_one_hundred_duplications_when_too_many_duplications() throws Exception {
Collection<Block> originBlocks = new ArrayList<>();
Collection<Block> duplicatedBlocks = new ArrayList<>();
Block.Builder blockBuilder = new Block.Builder().setIndexInFile(0).setLines(30, 45).setUnit(0, 100);
// Generate more than 100 duplication on different files
for (int i = 0; i < 110; i++) {
String hash = padStart("hash" + i, 16, 'a');
originBlocks.add(blockBuilder.setResourceId(ORIGIN_FILE_KEY).setBlockHash(new ByteArray(hash)).build());
duplicatedBlocks.add(blockBuilder.setResourceId("resource" + i).setBlockHash(new ByteArray(hash)).build());
}
underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks);
assertThat(duplicationRepository.getDuplications(ORIGIN_FILE)).hasSize(100);
assertThat(logTester.logs(LoggerLevel.WARN)).containsOnly("Too many duplication groups on file " + ORIGIN_FILE_KEY + ". Keeping only the first 100 groups.");
}
use of org.sonar.duplications.block.Block in project sonarqube by SonarSource.
the class IntegrateCrossProjectDuplicationsTest method add_no_duplication_when_not_enough_tokens.
@Test
public void add_no_duplication_when_not_enough_tokens() {
settings.setProperty("sonar.cpd.xoo.minimumTokens", 10);
Collection<Block> originBlocks = singletonList(// This block contains 5 tokens -> not enough to consider it as a duplication
new Block.Builder().setResourceId(ORIGIN_FILE_KEY).setBlockHash(new ByteArray("a8998353e96320ec")).setIndexInFile(0).setLines(30, 45).setUnit(0, 4).build());
Collection<Block> duplicatedBlocks = singletonList(new Block.Builder().setResourceId(OTHER_FILE_KEY).setBlockHash(new ByteArray("a8998353e96320ec")).setIndexInFile(0).setLines(40, 55).build());
underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks);
assertNoDuplicationAdded(ORIGIN_FILE);
}
use of org.sonar.duplications.block.Block in project sonarqube by SonarSource.
the class LoadCrossProjectDuplicationsRepositoryStepTest method call_compute_cpd_on_many_duplication.
@Test
public void call_compute_cpd_on_many_duplication() throws Exception {
when(crossProjectDuplicationStatusHolder.isEnabled()).thenReturn(true);
analysisMetadataHolder.setBaseAnalysis(baseProjectAnalysis);
ComponentDto otherProject = createProject("OTHER_PROJECT_KEY");
SnapshotDto otherProjectSnapshot = createProjectSnapshot(otherProject);
ComponentDto otherFile = createFile("OTHER_FILE_KEY", otherProject);
ScannerReport.CpdTextBlock originBlock1 = ScannerReport.CpdTextBlock.newBuilder().setHash("a8998353e96320ec").setStartLine(30).setEndLine(45).setStartTokenIndex(0).setEndTokenIndex(10).build();
ScannerReport.CpdTextBlock originBlock2 = ScannerReport.CpdTextBlock.newBuilder().setHash("b1234353e96320ff").setStartLine(10).setEndLine(25).setStartTokenIndex(5).setEndTokenIndex(15).build();
batchReportReader.putDuplicationBlocks(FILE_REF, asList(originBlock1, originBlock2));
DuplicationUnitDto duplicate1 = new DuplicationUnitDto().setHash(originBlock1.getHash()).setStartLine(40).setEndLine(55).setIndexInFile(0).setAnalysisUuid(otherProjectSnapshot.getUuid()).setComponentUuid(otherFile.uuid());
DuplicationUnitDto duplicate2 = new DuplicationUnitDto().setHash(originBlock2.getHash()).setStartLine(20).setEndLine(35).setIndexInFile(1).setAnalysisUuid(otherProjectSnapshot.getUuid()).setComponentUuid(otherFile.uuid());
dbClient.duplicationDao().insert(dbSession, duplicate1);
dbClient.duplicationDao().insert(dbSession, duplicate2);
dbSession.commit();
underTest.execute();
Class<ArrayList<Block>> listClass = (Class<ArrayList<Block>>) (Class) ArrayList.class;
ArgumentCaptor<ArrayList<Block>> originBlocks = ArgumentCaptor.forClass(listClass);
ArgumentCaptor<ArrayList<Block>> duplicationBlocks = ArgumentCaptor.forClass(listClass);
verify(integrateCrossProjectDuplications).computeCpd(eq(CURRENT_FILE), originBlocks.capture(), duplicationBlocks.capture());
Map<Integer, Block> originBlocksByIndex = blocksByIndexInFile(originBlocks.getValue());
assertThat(originBlocksByIndex.get(0)).isEqualTo(new Block.Builder().setResourceId(CURRENT_FILE_KEY).setBlockHash(new ByteArray(originBlock1.getHash())).setIndexInFile(0).setLines(originBlock1.getStartLine(), originBlock1.getEndLine()).setUnit(originBlock1.getStartTokenIndex(), originBlock1.getEndTokenIndex()).build());
assertThat(originBlocksByIndex.get(1)).isEqualTo(new Block.Builder().setResourceId(CURRENT_FILE_KEY).setBlockHash(new ByteArray(originBlock2.getHash())).setIndexInFile(1).setLines(originBlock2.getStartLine(), originBlock2.getEndLine()).setUnit(originBlock2.getStartTokenIndex(), originBlock2.getEndTokenIndex()).build());
Map<Integer, Block> duplicationBlocksByIndex = blocksByIndexInFile(duplicationBlocks.getValue());
assertThat(duplicationBlocksByIndex.get(0)).isEqualTo(new Block.Builder().setResourceId(otherFile.getKey()).setBlockHash(new ByteArray(originBlock1.getHash())).setIndexInFile(duplicate1.getIndexInFile()).setLines(duplicate1.getStartLine(), duplicate1.getEndLine()).build());
assertThat(duplicationBlocksByIndex.get(1)).isEqualTo(new Block.Builder().setResourceId(otherFile.getKey()).setBlockHash(new ByteArray(originBlock2.getHash())).setIndexInFile(duplicate2.getIndexInFile()).setLines(duplicate2.getStartLine(), duplicate2.getEndLine()).build());
}
Aggregations