use of com.github.lindenb.jvarkit.pedigree.Pedigree in project jvarkit by lindenb.
the class IndexCovToVcf method doWork.
@Override
public int doWork(final List<String> args) {
final IndexCovUtils indexCovUtils = new IndexCovUtils(this.treshold);
final CharSplitter tab = CharSplitter.TAB;
BufferedReader r = null;
VariantContextWriter vcw = null;
try {
final SAMSequenceDictionary dict;
if (this.refFile == null) {
dict = null;
} else {
dict = SequenceDictionaryUtils.extractRequired(this.refFile);
}
r = super.openBufferedReader(oneFileOrNull(args));
String line = r.readLine();
if (line == null) {
LOG.error("Cannot read first line of input");
return -1;
}
String[] tokens = tab.split(line);
if (tokens.length < 4 || !tokens[0].equals("#chrom") || !tokens[1].equals("start") || !tokens[2].equals("end")) {
LOG.error("bad first line " + line);
return -1;
}
final Set<VCFHeaderLine> metaData = new HashSet<>();
VCFStandardHeaderLines.addStandardFormatLines(metaData, true, VCFConstants.GENOTYPE_KEY, VCFConstants.GENOTYPE_QUALITY_KEY);
VCFStandardHeaderLines.addStandardInfoLines(metaData, true, VCFConstants.END_KEY);
/**
* raw value in indexcov
*/
final VCFFormatHeaderLine foldHeader = new VCFFormatHeaderLine("F", 1, VCFHeaderLineType.Float, "Relative number of copy: 0.5 deletion 1 normal 2.0 duplication");
metaData.add(foldHeader);
final VCFFilterHeaderLine filterAllDel = new VCFFilterHeaderLine("ALL_DEL", "number of samples greater than 1 and all are deletions");
metaData.add(filterAllDel);
final VCFFilterHeaderLine filterAllDup = new VCFFilterHeaderLine("ALL_DUP", "number of samples greater than 1 and all are duplication");
metaData.add(filterAllDup);
final VCFFilterHeaderLine filterNoSV = new VCFFilterHeaderLine("NO_SV", "There is no DUP or DEL in this variant");
metaData.add(filterNoSV);
final VCFFilterHeaderLine filterHomDel = new VCFFilterHeaderLine("HOM_DEL", "There is one Homozygous deletion.");
metaData.add(filterHomDel);
final VCFFilterHeaderLine filterHomDup = new VCFFilterHeaderLine("HOM_DUP", "There is one Homozygous duplication.");
metaData.add(filterHomDup);
final VCFInfoHeaderLine infoNumDup = new VCFInfoHeaderLine("NDUP", 1, VCFHeaderLineType.Integer, "Number of samples being duplicated");
metaData.add(infoNumDup);
final VCFInfoHeaderLine infoNumDel = new VCFInfoHeaderLine("NDEL", 1, VCFHeaderLineType.Integer, "Number of samples being deleted");
metaData.add(infoNumDel);
final VCFInfoHeaderLine infoSingleton = new VCFInfoHeaderLine("SINGLETON", 1, VCFHeaderLineType.Flag, "Singleton candidate");
metaData.add(infoSingleton);
final VCFInfoHeaderLine infoAllAffected = new VCFInfoHeaderLine("ALL_CASES", 1, VCFHeaderLineType.Flag, "All cases are affected");
metaData.add(infoAllAffected);
final List<String> samples = Arrays.asList(tokens).subList(3, tokens.length);
final Pedigree pedigree;
final int count_cases_in_pedigree;
if (this.pedFile == null) {
pedigree = PedigreeParser.empty();
count_cases_in_pedigree = 0;
} else {
pedigree = new PedigreeParser().parse(this.pedFile);
final Set<String> set = new HashSet<>(samples);
count_cases_in_pedigree = (int) pedigree.getAffectedSamples().stream().filter(S -> set.contains(S.getId())).count();
}
final VCFHeader vcfHeader = new VCFHeader(metaData, samples);
JVarkitVersion.getInstance().addMetaData(this, vcfHeader);
if (dict != null) {
vcfHeader.setSequenceDictionary(dict);
}
vcw = this.writingDelegate.dictionary(dict).open(outputFile);
vcw.writeHeader(vcfHeader);
// final List<Allele> NO_CALL_NO_CALL = Arrays.asList(Allele.NO_CALL,Allele.NO_CALL);
final Allele DUP_ALLELE = Allele.create("<DUP>", false);
final Allele DEL_ALLELE = Allele.create("<DEL>", false);
final Allele REF_ALLELE = Allele.create("N", true);
while ((line = r.readLine()) != null) {
if (StringUtil.isBlank(line))
continue;
tokens = tab.split(line);
if (tokens.length != 3 + samples.size()) {
r.close();
vcw.close();
throw new JvarkitException.TokenErrors("expected " + (samples.size() + 3) + "columns.", tokens);
}
final Set<Allele> alleles = new HashSet<>();
alleles.add(REF_ALLELE);
final VariantContextBuilder vcb = new VariantContextBuilder();
vcb.chr(tokens[0]);
vcb.start(Integer.parseInt(tokens[1]));
final int chromEnd = Integer.parseInt(tokens[2]);
vcb.stop(chromEnd);
vcb.attribute(VCFConstants.END_KEY, chromEnd);
if (dict != null) {
final SAMSequenceRecord ssr = dict.getSequence(tokens[0]);
if (ssr == null) {
LOG.error(JvarkitException.ContigNotFoundInDictionary.getMessage(tokens[0], dict));
return -1;
}
if (chromEnd > ssr.getSequenceLength()) {
LOG.warn("WARNING sequence length in " + line + " is greater than in dictionary ");
}
}
int count_dup = 0;
int count_del = 0;
final Map<String, Float> sample2fold = new HashMap<>(samples.size());
for (int i = 3; i < tokens.length; i++) {
final String sampleName = samples.get(i - 3);
final float f = Float.parseFloat(tokens[i]);
if (f < 0 || Float.isNaN(f) || !Float.isFinite(f)) {
LOG.error("Bad fold " + f + " for sample " + sampleName + " in " + line);
}
sample2fold.put(sampleName, f);
}
final List<Genotype> genotypes = new ArrayList<>(samples.size());
int count_sv_cases = 0;
int count_sv_controls = 0;
int count_ref_cases = 0;
int count_ref_controls = 0;
boolean got_sv = false;
for (final String sampleName : sample2fold.keySet()) {
final float normDepth = sample2fold.get(sampleName);
final IndexCovUtils.SvType type = indexCovUtils.getType(normDepth);
final GenotypeBuilder gb;
switch(type) {
case REF:
{
gb = new GenotypeBuilder(sampleName, Arrays.asList(REF_ALLELE, REF_ALLELE));
break;
}
case HET_DEL:
{
gb = new GenotypeBuilder(sampleName, Arrays.asList(REF_ALLELE, DEL_ALLELE));
alleles.add(DEL_ALLELE);
count_del++;
break;
}
case HOM_DEL:
{
gb = new GenotypeBuilder(sampleName, Arrays.asList(DEL_ALLELE, DEL_ALLELE));
alleles.add(DEL_ALLELE);
count_del++;
vcb.filter(filterHomDel.getID());
break;
}
case HET_DUP:
{
gb = new GenotypeBuilder(sampleName, Arrays.asList(REF_ALLELE, DUP_ALLELE));
alleles.add(DUP_ALLELE);
count_dup++;
break;
}
case HOM_DUP:
{
gb = new GenotypeBuilder(sampleName, Arrays.asList(DUP_ALLELE, DUP_ALLELE));
alleles.add(DUP_ALLELE);
vcb.filter(filterHomDup.getID());
count_dup++;
break;
}
default:
{
gb = new GenotypeBuilder(sampleName, Arrays.asList(Allele.NO_CALL, Allele.NO_CALL));
break;
}
}
if (type.isVariant())
got_sv = true;
gb.attribute(foldHeader.getID(), normDepth);
gb.GQ(type.getGenotypeQuality(normDepth));
final Sample sn = pedigree.getSampleById(sampleName);
if (sn != null) {
if (type.isVariant()) {
if (sn.isAffected()) {
count_sv_cases++;
} else if (sn.isUnaffected()) {
count_sv_controls++;
}
} else {
if (sn.isAffected()) {
count_ref_cases++;
} else if (sn.isUnaffected()) {
count_ref_controls++;
}
}
}
genotypes.add(gb.make());
}
vcb.alleles(alleles);
if (!pedigree.isEmpty() && count_sv_cases == 1 && count_ref_cases > 0 && count_sv_controls == 0 && count_ref_controls > 0) {
vcb.attribute(infoSingleton.getID(), Boolean.TRUE);
} else if (!pedigree.isEmpty() && count_sv_cases > 0 && count_sv_cases == count_cases_in_pedigree && count_ref_cases == 0 && count_sv_controls == 0 && count_ref_controls > 0) {
vcb.attribute(infoAllAffected.getID(), Boolean.TRUE);
}
vcb.genotypes(genotypes);
if (count_dup == samples.size() && samples.size() != 1) {
vcb.filter(filterAllDup.getID());
}
if (count_del == samples.size() && samples.size() != 1) {
vcb.filter(filterAllDel.getID());
}
if (!got_sv) {
vcb.filter(filterNoSV.getID());
}
vcb.attribute(infoNumDel.getID(), count_del);
vcb.attribute(infoNumDup.getID(), count_dup);
vcw.add(vcb.make());
}
vcw.close();
vcw = null;
r.close();
r = null;
return 0;
} catch (final Throwable err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(r);
CloserUtil.close(vcw);
}
}
use of com.github.lindenb.jvarkit.pedigree.Pedigree in project jvarkit by lindenb.
the class VCFComposite method doVcfToVcf.
protected int doVcfToVcf(final String inputName, final VCFIterator iterin, final VariantContextWriter out) {
final VCFHeader header = iterin.getHeader();
if (!header.hasGenotypingData()) {
LOG.error("No genotypes in " + inputName);
return -1;
}
final GeneExtractorFactory geneExtractorFactory = new GeneExtractorFactory(header);
final List<GeneExtractorFactory.GeneExtractor> extractors = geneExtractorFactory.parse(this.extractorsNames);
if (extractors.isEmpty()) {
LOG.error("no gene extractor found/defined.");
return -1;
}
final Pedigree pedigree;
try {
final Set<String> sampleNames = new HashSet<>(header.getSampleNamesInOrder());
final PedigreeParser pedParser = new PedigreeParser();
pedigree = pedParser.parse(this.pedigreeFile);
if (pedigree == null || pedigree.isEmpty()) {
LOG.error("pedigree missing/empty");
return -1;
}
this.affectedSamples.addAll(pedigree.getAffectedSamples());
this.affectedSamples.removeIf(S -> !sampleNames.contains(S.getId()));
if (this.affectedSamples.isEmpty()) {
LOG.error("No Affected sample in pedigree. " + this.pedigreeFile + "/" + inputName);
return -1;
}
this.unaffectedSamples.addAll(pedigree.getUnaffectedSamples());
this.unaffectedSamples.removeIf(S -> !sampleNames.contains(S.getId()));
if (pedigree.getUnaffectedSamples().isEmpty()) {
LOG.error("No Unaffected sample in " + this.pedigreeFile + "/" + inputName);
return -1;
}
} catch (final IOException err) {
throw new RuntimeIOException(err);
}
header.addMetaDataLine(new VCFInfoHeaderLine(INFO_TAG, VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, "Variant of VCFComposite"));
if (!StringUtils.isBlank(this.filterNonCompositeTag)) {
header.addMetaDataLine(new VCFFilterHeaderLine(this.filterNonCompositeTag, "Not a Variant fir VCFComposite"));
}
final SAMSequenceDictionary dict = header.getSequenceDictionary();
final Comparator<String> contigCmp;
if (dict == null || dict.isEmpty()) {
contigCmp = (A, B) -> A.compareTo(B);
} else {
contigCmp = new ContigDictComparator(dict);
}
final Comparator<VariantContext> ctxComparator = (V1, V2) -> {
int i = contigCmp.compare(V1.getContig(), V2.getContig());
if (i != 0)
return i;
i = Integer.compare(V1.getStart(), V2.getStart());
if (i != 0)
return i;
return V1.getReference().compareTo(V2.getReference());
};
final Comparator<VariantLine> variantLineComparator = (V1, V2) -> {
final int i = ctxComparator.compare(V1.ctx, V2.ctx);
if (i != 0)
return i;
return Long.compare(V1.id, V2.id);
};
long ID_GENERATOR = 0L;
this.vcfDecoder = VCFUtils.createDefaultVCFCodec();
this.vcfDecoder.setVCFHeader(header, VCFHeaderVersion.VCF4_2);
this.vcfEncoder = new VCFEncoder(header, false, true);
SortingCollection<GeneAndVariant> sorting = null;
SortingCollection<VariantLine> outputSorter = null;
try {
LOG.info("reading variants and genes");
/* Gene and variant sorter */
sorting = SortingCollection.newInstance(GeneAndVariant.class, new GeneAndVariantCodec(), GeneAndVariant::compareGeneThenIndex, this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
sorting.setDestructiveIteration(true);
/* Variant sorter */
outputSorter = SortingCollection.newInstance(VariantLine.class, new VariantLineCodec(), variantLineComparator, this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
outputSorter.setDestructiveIteration(true);
/* read input */
while (iterin.hasNext()) {
final VariantContext ctx = iterin.next();
final VariantLine variantLine = new VariantLine(++ID_GENERATOR, ctx);
if (!this.variantJexl.test(ctx)) {
outputSorter.add(variantLine);
continue;
}
if (!acceptVariant(ctx)) {
outputSorter.add(variantLine);
continue;
}
final Set<GeneIdentifier> geneKeys = new HashSet<>();
extractors.stream().map(EX -> EX.apply(ctx)).flatMap(H -> H.keySet().stream()).forEach(KG -> {
geneKeys.add(new GeneIdentifier(KG.getKey(), KG.getGene(), KG.getMethod().replace('/', '_')));
});
if (geneKeys.isEmpty()) {
outputSorter.add(variantLine);
continue;
}
for (final GeneIdentifier gk : geneKeys) {
final GeneAndVariant gav = new GeneAndVariant(gk, variantLine);
gav.gene.contig = ctx.getContig();
sorting.add(gav);
}
}
sorting.doneAdding();
LOG.info("compile per gene");
this.reportGeneWriter = (this.geneReportPath == null ? new PrintWriter(new NullOuputStream()) : IOUtils.openPathForPrintWriter(this.geneReportPath));
this.reportGeneWriter.print("#CHROM");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("bed.start");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("bed.end");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("gene.key");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("gene.label");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("gene.source");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("count.variants");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("affected.counts");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("affected.total");
this.reportGeneWriter.print('\t');
this.reportGeneWriter.print("affected.samples");
this.reportGeneWriter.println();
this.reportWriter = (this.reportPath == null ? new PrintWriter(new NullOuputStream()) : IOUtils.openPathForPrintWriter(this.reportPath));
this.reportWriter.print("#CHROM");
this.reportWriter.print('\t');
this.reportWriter.print("bed.start");
this.reportWriter.print('\t');
this.reportWriter.print("bed.end");
this.reportWriter.print('\t');
this.reportWriter.print("gene.index");
this.reportWriter.print('\t');
this.reportWriter.print("gene.key");
this.reportWriter.print('\t');
this.reportWriter.print("gene.label");
this.reportWriter.print('\t');
this.reportWriter.print("gene.source");
for (int side = 0; side < 2; ++side) {
this.reportWriter.print('\t');
final String prefix = "variant" + (side + 1) + ".";
this.reportWriter.print(prefix + "start");
this.reportWriter.print('\t');
this.reportWriter.print(prefix + "end");
this.reportWriter.print('\t');
this.reportWriter.print(prefix + "ref");
this.reportWriter.print('\t');
this.reportWriter.print(prefix + "alt");
this.reportWriter.print('\t');
this.reportWriter.print(prefix + "info");
for (final Sample sn : this.affectedSamples) {
this.reportWriter.print('\t');
this.reportWriter.print(prefix + "gt[" + sn.getId() + "].affected");
}
for (final Sample sn : this.unaffectedSamples) {
this.reportWriter.print('\t');
this.reportWriter.print(prefix + "gt[" + sn.getId() + "].unaffected");
}
}
this.reportWriter.println();
// compile data
CloseableIterator<GeneAndVariant> iter2 = sorting.iterator();
EqualRangeIterator<GeneAndVariant> eqiter = new EqualRangeIterator<>(iter2, (A, B) -> A.gene.compareTo(B.gene));
while (eqiter.hasNext()) {
final List<GeneAndVariant> variants = eqiter.next();
scan(variants.get(0).gene, variants.stream().map(L -> L.variant).collect(Collectors.toList()));
for (final GeneAndVariant ga : variants) outputSorter.add(ga.variant);
}
eqiter.close();
iter2.close();
sorting.cleanup();
//
this.reportWriter.flush();
this.reportWriter.close();
this.reportGeneWriter.flush();
this.reportGeneWriter.close();
LOG.info("write variants");
CloseableIterator<VariantLine> iter1 = outputSorter.iterator();
EqualRangeIterator<VariantLine> eqiter1 = new EqualRangeIterator<>(iter1, variantLineComparator);
out.writeHeader(header);
while (eqiter1.hasNext()) {
final List<VariantLine> array = eqiter1.next();
final VariantContext firstCtx = array.get(0).ctx;
final Set<String> set = getAnnotationsForVariant(firstCtx);
final VariantContext outCtx;
final VariantContextBuilder vcb = new VariantContextBuilder(firstCtx);
for (int y = 1; y < array.size(); ++y) {
set.addAll(getAnnotationsForVariant(array.get(y).ctx));
}
if (set.isEmpty()) {
if (StringUtils.isBlank(this.filterNonCompositeTag)) {
// ignore
continue;
} else {
vcb.filter(this.filterNonCompositeTag);
}
} else {
if (!firstCtx.isFiltered()) {
vcb.passFilters();
}
vcb.attribute(INFO_TAG, new ArrayList<>(set));
}
outCtx = vcb.make();
out.add(outCtx);
}
outputSorter.cleanup();
eqiter1.close();
iter1.close();
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
}
}
use of com.github.lindenb.jvarkit.pedigree.Pedigree in project jvarkit by lindenb.
the class GroupByGene method read.
private void read(final String input) throws IOException {
SortingCollection<Call> sortingCollection = null;
VCFIterator vcfIterator = null;
try {
final BcfIteratorBuilder iterbuilder = new BcfIteratorBuilder();
vcfIterator = (input == null ? iterbuilder.open(stdin()) : iterbuilder.open(input));
final VCFHeader header = vcfIterator.getHeader();
this.contigDictComparator = new ContigDictComparator(SequenceDictionaryUtils.extractRequired(header));
sortingCollection = SortingCollection.newInstance(Call.class, new CallCodec(header), (C1, C2) -> C1.compare2(C2), this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
sortingCollection.setDestructiveIteration(true);
final GeneExtractorFactory geneExtractorFactory = new GeneExtractorFactory(header);
final List<GeneExtractorFactory.GeneExtractor> geneExtractors = geneExtractorFactory.parse(this.extractorsNames);
final List<String> sampleNames;
if (header.getSampleNamesInOrder() != null) {
sampleNames = header.getSampleNamesInOrder();
} else {
sampleNames = Collections.emptyList();
}
final Pedigree pedigree;
if (this.pedigreePath != null) {
final PedigreeParser pedParser = new PedigreeParser();
pedigree = pedParser.parse(this.pedigreePath);
} else {
pedigree = PedigreeParser.empty();
}
final ProgressFactory.Watcher<VariantContext> progress = ProgressFactory.newInstance().dictionary(header).logger(LOG).build();
while (vcfIterator.hasNext()) {
final VariantContext ctx = progress.apply(vcfIterator.next());
if (!ctx.isVariant())
continue;
if (ignore_filtered && ctx.isFiltered())
continue;
// simplify line
final VariantContextBuilder vcb = new VariantContextBuilder(ctx);
vcb.noID();
vcb.log10PError(VariantContext.NO_LOG10_PERROR);
vcb.unfiltered();
vcb.attributes(Collections.emptyMap());
final VariantContext ctx2 = vcb.make();
final SortingCollection<Call> finalSorter = sortingCollection;
geneExtractors.stream().flatMap(EX -> EX.apply(ctx).keySet().stream()).forEach(KG -> {
final Call c = new Call();
c.ctx = ctx2;
c.gene = new GeneName(KG.getKey(), KG.getGene(), KG.getMethod());
finalSorter.add(c);
});
}
CloserUtil.close(vcfIterator);
vcfIterator = null;
sortingCollection.doneAdding();
progress.close();
/**
* dump
*/
final Set<String> casesSamples = pedigree.getAffectedSamples().stream().map(P -> P.getId()).filter(ID -> sampleNames.contains(ID)).collect(Collectors.toSet());
final Set<String> controlsSamples = pedigree.getUnaffectedSamples().stream().map(P -> P.getId()).filter(ID -> sampleNames.contains(ID)).collect(Collectors.toSet());
final Set<String> maleSamples = pedigree.getSamples().stream().filter(P -> P.isMale()).map(P -> P.getId()).filter(ID -> sampleNames.contains(ID)).collect(Collectors.toSet());
final Set<String> femaleSamples = pedigree.getSamples().stream().filter(P -> P.isFemale()).map(P -> P.getId()).filter(ID -> sampleNames.contains(ID)).collect(Collectors.toSet());
final Predicate<Genotype> genotypeFilter = genotype -> {
if (!genotype.isAvailable())
return false;
if (!genotype.isCalled())
return false;
if (genotype.isNoCall())
return false;
if (genotype.isHomRef())
return false;
if (this.ignore_filtered_genotype && genotype.isFiltered())
return false;
return true;
};
PrintStream pw = openPathOrStdoutAsPrintStream(this.outFile);
pw.print("#chrom");
pw.print('\t');
pw.print("min.POS");
pw.print('\t');
pw.print("max.POS");
pw.print('\t');
pw.print("gene.name");
pw.print('\t');
pw.print("gene.label");
pw.print('\t');
pw.print("gene.type");
pw.print('\t');
pw.print("samples.affected");
pw.print('\t');
pw.print("count.variations");
if (this.print_positions) {
pw.print('\t');
pw.print("positions");
}
if (!casesSamples.isEmpty()) {
pw.print('\t');
pw.print("pedigree.cases");
}
if (!controlsSamples.isEmpty()) {
pw.print('\t');
pw.print("pedigree.controls");
}
if (!maleSamples.isEmpty()) {
pw.print('\t');
pw.print("pedigree.males");
}
if (!femaleSamples.isEmpty()) {
pw.print('\t');
pw.print("pedigree.females");
}
if (this.print_fisher && !controlsSamples.isEmpty() && !casesSamples.isEmpty()) {
pw.print('\t');
pw.print("fisher");
}
for (final String sample : sampleNames) {
pw.print('\t');
pw.print(sample);
}
pw.println();
final CloseableIterator<Call> iter = sortingCollection.iterator();
final EqualRangeIterator<Call> eqiter = new EqualRangeIterator<>(iter, (C1, C2) -> C1.compareTo(C2));
while (eqiter.hasNext()) {
final List<Call> row = eqiter.next();
final Call first = row.get(0);
final List<VariantContext> variantList = row.stream().map(R -> R.ctx).collect(Collectors.toList());
final int minPos = variantList.stream().mapToInt(R -> R.getStart()).min().getAsInt();
final int maxPos = variantList.stream().mapToInt(R -> R.getEnd()).max().getAsInt();
final Set<String> sampleCarryingMut = new HashSet<String>();
final Counter<String> pedCasesCarryingMut = new Counter<String>();
final Counter<String> pedCtrlsCarryingMut = new Counter<String>();
final Counter<String> malesCarryingMut = new Counter<String>();
final Counter<String> femalesCarryingMut = new Counter<String>();
final Counter<String> sample2count = new Counter<String>();
for (final VariantContext ctx : variantList) {
for (final Genotype genotype : ctx.getGenotypes()) {
if (!genotypeFilter.test(genotype))
continue;
final String sampleName = genotype.getSampleName();
sample2count.incr(sampleName);
sampleCarryingMut.add(sampleName);
if (casesSamples.contains(sampleName)) {
pedCasesCarryingMut.incr(sampleName);
}
if (controlsSamples.contains(sampleName)) {
pedCtrlsCarryingMut.incr(sampleName);
}
if (maleSamples.contains(sampleName)) {
malesCarryingMut.incr(sampleName);
}
if (femaleSamples.contains(sampleName)) {
femalesCarryingMut.incr(sampleName);
}
}
}
pw.print(first.getContig());
pw.print('\t');
// convert to bed
pw.print(minPos - 1);
pw.print('\t');
pw.print(maxPos);
pw.print('\t');
pw.print(first.gene.name);
pw.print('\t');
pw.print(first.gene.label);
pw.print('\t');
pw.print(first.gene.type);
pw.print('\t');
pw.print(sampleCarryingMut.size());
pw.print('\t');
pw.print(variantList.size());
if (this.print_positions) {
pw.print('\t');
pw.print(variantList.stream().map(CTX -> String.valueOf(CTX.getStart()) + ":" + CTX.getAlleles().stream().map(A -> A.getDisplayString()).collect(Collectors.joining("/"))).collect(Collectors.joining(";")));
}
if (!casesSamples.isEmpty()) {
pw.print('\t');
pw.print(pedCasesCarryingMut.getCountCategories());
}
if (!controlsSamples.isEmpty()) {
pw.print('\t');
pw.print(pedCtrlsCarryingMut.getCountCategories());
}
if (!maleSamples.isEmpty()) {
pw.print('\t');
pw.print(malesCarryingMut.getCountCategories());
}
if (!femaleSamples.isEmpty()) {
pw.print('\t');
pw.print(femalesCarryingMut.getCountCategories());
}
if (this.print_fisher && !controlsSamples.isEmpty() && !casesSamples.isEmpty()) {
int count_case_mut = 0;
int count_ctrl_mut = 0;
int count_case_wild = 0;
int count_ctrl_wild = 0;
for (final String sampleName : header.getSampleNamesInOrder()) {
final boolean has_mutation = variantList.stream().map(V -> V.getGenotype(sampleName)).anyMatch(G -> G != null && genotypeFilter.test(G));
if (controlsSamples.contains(sampleName)) {
if (has_mutation) {
count_ctrl_mut++;
} else {
count_ctrl_wild++;
}
} else if (casesSamples.contains(sampleName)) {
if (has_mutation) {
count_case_mut++;
} else {
count_case_wild++;
}
}
}
final FisherExactTest fisher = FisherExactTest.compute(count_case_mut, count_case_wild, count_ctrl_mut, count_ctrl_wild);
pw.print('\t');
pw.print(fisher.getAsDouble());
}
for (final String sample : sampleNames) {
pw.print('\t');
pw.print(sample2count.count(sample));
}
pw.println();
if (pw.checkError())
break;
}
eqiter.close();
iter.close();
pw.flush();
if (this.outFile != null)
pw.close();
} finally {
CloserUtil.close(vcfIterator);
if (sortingCollection != null)
sortingCollection.cleanup();
}
}
use of com.github.lindenb.jvarkit.pedigree.Pedigree in project jvarkit by lindenb.
the class BamToMNV method doWork.
@Override
public int doWork(final List<String> args) {
try {
final List<Path> bams = IOUtils.unrollPaths(this.input_bams);
if (bams.isEmpty()) {
LOG.error("No bam was defined");
return -1;
}
final Pedigree pedigree;
if (this.pedigreePath != null) {
pedigree = new PedigreeParser().parse(this.pedigreePath);
pedigree.checkUniqIds();
} else {
pedigree = null;
}
try (VCFReader reader = VCFReaderFactory.makeDefault().open(oneAndOnlyOneFile(args), false)) {
final VCFHeader header = reader.getHeader();
final OrderChecker<VariantContext> order = new OrderChecker<>(header.getSequenceDictionary(), false);
try (CloseableIterator<VariantContext> r = reader.iterator()) {
this.all_variants.addAll(r.stream().filter(V -> V.isBiallelic() && V.isSNP()).map(V -> new VariantContextBuilder(V).noGenotypes().make()).map(order).collect(Collectors.toList()));
}
}
final List<Mnv> all_mnvs = new ArrayList<>();
for (int i = 0; i + 1 < this.all_variants.size(); i++) {
final VariantContext v1 = this.all_variants.get(i);
for (int j = i + 1; j < this.all_variants.size(); j++) {
final VariantContext v2 = this.all_variants.get(j);
if (!v1.withinDistanceOf(v2, min_distance_mnv))
break;
if (v1.getStart() == v2.getStart())
continue;
all_mnvs.add(new Mnv(i, j));
}
}
final Set<String> samples = new TreeSet<>();
final SamReaderFactory srf = super.createSamReaderFactory().referenceSequence(this.faidx);
for (final Path bam : bams) {
LOG.info(String.valueOf(bam));
try (SamReader sr = srf.open(bam)) {
final SAMFileHeader header = sr.getFileHeader();
final SAMSequenceDictionary dict = SequenceDictionaryUtils.extractRequired(header);
final String sample = header.getReadGroups().stream().map(R -> R.getSample()).filter(S -> !StringUtils.isBlank(S)).findFirst().orElse(IOUtils.getFilenameWithoutCommonSuffixes(bam));
if (samples.contains(sample)) {
LOG.error("duplicate sample " + sample);
return -1;
}
samples.add(sample);
if (pedigree != null && pedigree.getSampleById(sample) == null) {
LOG.warn("sample " + sample + " from " + bam + " is not in pedigree.");
}
if (all_mnvs.isEmpty())
continue;
final QueryInterval[] intervals = QueryInterval.optimizeIntervals(this.all_variants.stream().map(V -> new QueryInterval(dict.getSequenceIndex(V.getContig()), V.getStart(), V.getEnd())).toArray(X -> new QueryInterval[X]));
final List<SAMRecord> sam_reads = new ArrayList<>();
try (CloseableIterator<SAMRecord> iter = sr.query(intervals, false)) {
while (iter.hasNext()) {
final SAMRecord record = iter.next();
if (!SAMRecordDefaultFilter.accept(record, this.minmapq))
continue;
if (record.getReadBases() == SAMRecord.NULL_SEQUENCE)
continue;
sam_reads.add(record);
}
}
// sort on query name
Collections.sort(sam_reads, (A, B) -> A.getReadName().compareTo(B.getReadName()));
for (final Mnv mnv : all_mnvs) {
final Phase phase = mnv.getPhase(sam_reads);
if (phase.equals(Phase.ambigous))
continue;
mnv.sample2phase.put(sample, phase);
}
}
}
try (PrintWriter pw = super.openPathOrStdoutAsPrintWriter(this.outputFile)) {
pw.print("#CHROM1\tPOS1\tREF1\tALT1");
pw.print("\tCHROM2\tPOS2\tREF2\tALT2");
pw.print("\tdistance");
for (final String sn : samples) pw.print("\t" + sn);
if (pedigree != null) {
pw.print("\tcase-cis\tcase-trans\tcontrol-cis\tcontrol-trans\tfisher");
}
pw.println();
for (final Mnv mnv : all_mnvs) {
if (mnv.sample2phase.values().stream().allMatch(V -> V.equals(Phase.ambigous) || V.equals(Phase.ref)))
continue;
for (int side = 0; side < 2; ++side) {
final VariantContext ctx = mnv.get(side);
if (side > 0)
pw.print("\t");
pw.print(ctx.getContig());
pw.print("\t");
pw.print(ctx.getStart());
pw.print("\t");
pw.print(ctx.getReference().getDisplayString());
pw.print("\t");
pw.print(ctx.getAlleles().get(1).getDisplayString());
}
pw.print("\t");
pw.print(CoordMath.getLength(mnv.get(0).getStart(), mnv.get(1).getEnd()));
int case_cis = 0;
int case_trans = 0;
int ctrl_cis = 0;
int ctrl_trans = 0;
for (final String sn : samples) {
pw.print("\t");
final Phase phase = mnv.sample2phase.get(sn);
if (phase == null) {
pw.print(".");
continue;
}
pw.print(phase.name());
if (pedigree != null) {
final Sample sample = pedigree.getSampleById(sn);
if (sample == null) {
// nothing
} else if (sample.isAffected()) {
if (phase.equals(Phase.cis)) {
case_cis++;
} else if (phase.equals(Phase.trans)) {
case_trans++;
}
} else if (sample.isUnaffected()) {
if (phase.equals(Phase.cis)) {
ctrl_cis++;
} else if (phase.equals(Phase.trans)) {
ctrl_trans++;
}
}
}
}
if (pedigree != null) {
pw.print("\t");
pw.print(case_cis);
pw.print("\t");
pw.print(case_trans);
pw.print("\t");
pw.print(ctrl_cis);
pw.print("\t");
pw.print(ctrl_trans);
pw.print("\t");
final FisherExactTest fisher = FisherExactTest.compute(case_cis, case_trans, ctrl_cis, ctrl_trans);
pw.print(fisher.getAsDouble());
}
pw.println();
}
pw.flush();
}
return 0;
} catch (final Throwable err) {
LOG.error(err);
return -1;
}
}
Aggregations