use of com.github.lindenb.jvarkit.util.vcf.predictions.SnpEffPredictionParserFactory in project jvarkit by lindenb.
the class VCFComparePredictions method doWork.
@Override
public int doWork(List<String> args) {
PrintWriter out = null;
SortingCollection<LineAndFile> variants = null;
try {
if (args.isEmpty()) {
LOG.error("Illegal number of arguments");
return -1;
}
out = super.openFileOrStdoutAsPrintWriter(super.outputFile);
variants = SortingCollection.newInstance(LineAndFile.class, new AbstractVCFCompareBase.LineAndFileCodec(), new AbstractVCFCompareBase.LineAndFileComparator(), super.sortingCollectionArgs.getMaxRecordsInRam(), super.sortingCollectionArgs.getTmpPaths());
variants.setDestructiveIteration(true);
for (final String filename : args) {
LOG.info("Reading from " + filename);
Input input = super.put(variants, filename);
LOG.info("end reading " + input.filename);
}
List<PredictionTuple> predictionTuples = new ArrayList<PredictionTuple>(super.inputs.size());
for (AbstractVCFCompareBase.Input input : this.inputs) {
PredictionTuple predictionTuple = new PredictionTuple();
predictionTuple.snpEffPredictionParser = new SnpEffPredictionParserFactory(input.codecAndHeader.header).get();
predictionTuple.vepPredictionParser = new VepPredictionParserFactory(input.codecAndHeader.header).get();
predictionTuples.add(predictionTuple);
}
List<AbstractVCFCompareBase.LineAndFile> row = new ArrayList<LineAndFile>(super.inputs.size());
CloseableIterator<LineAndFile> iter = variants.iterator();
final Comparator<LineAndFile> posCompare = (A, B) -> A.getContigPosRef().compareTo(B.getContigPosRef());
for (; ; ) {
LineAndFile rec = null;
if (iter.hasNext()) {
rec = iter.next();
}
if (rec == null || (!row.isEmpty() && posCompare.compare(row.get(0), rec) != 0)) {
if (!row.isEmpty()) {
boolean printed = false;
VariantContext ctx = row.get(0).getContext();
if (row.size() != this.inputs.size()) {
startLine(out, ctx);
out.println("\tDiscordant number of variants");
printed = true;
}
for (int i = 0; i + 1 < row.size(); ++i) {
Input input1 = this.inputs.get(row.get(i).fileIdx);
VariantContext ctx1 = row.get(i).getContext();
PredictionTuple predtuple1 = predictionTuples.get(row.get(i).fileIdx);
List<VepPrediction> vepPredictions1 = predtuple1.vepPredictionParser.getPredictions(ctx1);
List<SnpEffPrediction> snpEffPredictions1 = predtuple1.snpEffPredictionParser.getPredictions(ctx1);
Set<SequenceOntologyTree.Term> so_vep_1 = getVepSoTerms(predtuple1.vepPredictionParser, ctx1);
Set<SequenceOntologyTree.Term> so_snpeff_1 = getSnpEffSoTerms(predtuple1.snpEffPredictionParser, ctx1);
for (int j = i + 1; j < row.size(); ++j) {
Input input2 = this.inputs.get(row.get(j).fileIdx);
VariantContext ctx2 = row.get(j).getContext();
PredictionTuple predtuple2 = predictionTuples.get(row.get(j).fileIdx);
List<VepPrediction> vepPredictions2 = predtuple2.vepPredictionParser.getPredictions(ctx2);
List<SnpEffPrediction> snpEffPredictions2 = predtuple2.snpEffPredictionParser.getPredictions(ctx2);
Set<SequenceOntologyTree.Term> so_vep_2 = getVepSoTerms(predtuple2.vepPredictionParser, ctx2);
Set<SequenceOntologyTree.Term> so_snpeff_2 = getSnpEffSoTerms(predtuple2.snpEffPredictionParser, ctx2);
if (vepPredictions1.size() != vepPredictions2.size()) {
startLine(out, ctx);
out.print("\tVEP discordant transcripts count");
out.print("\t" + input1.filename + ":" + vepPredictions1.size());
out.print("\t" + input2.filename + ":" + vepPredictions2.size());
out.println();
printed = true;
}
if (snpEffPredictions1.size() != snpEffPredictions2.size()) {
startLine(out, ctx);
out.print("\tSNPEFF discordant transcripts count");
out.print("\t" + input1.filename + ":" + snpEffPredictions1.size());
out.print("\t" + input2.filename + ":" + snpEffPredictions2.size());
out.println();
printed = true;
}
if (!unshared(so_vep_1, so_vep_2).isEmpty()) {
startLine(out, ctx);
out.print("\tVEP discordant SO:terms");
printDiscordantSO(out, input1, so_vep_1, input2, so_vep_2);
printed = true;
}
if (!unshared(so_snpeff_1, so_snpeff_2).isEmpty()) {
startLine(out, ctx);
out.print("\tSNPEFF discordant SO:terms");
printDiscordantSO(out, input1, so_snpeff_1, input2, so_snpeff_2);
printed = true;
}
}
}
if (!printed) {
startLine(out, ctx);
out.println("\tPASS");
}
row.clear();
}
if (rec == null)
break;
}
row.add(rec);
}
iter.close();
out.flush();
out.close();
out = null;
return 0;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(out);
try {
if (variants != null)
variants.cleanup();
} catch (Exception err) {
}
}
}
use of com.github.lindenb.jvarkit.util.vcf.predictions.SnpEffPredictionParserFactory in project jvarkit by lindenb.
the class VcfTools method init.
public void init(final VCFHeader header) {
this.header = header;
this.snpEffPredictionParser = new SnpEffPredictionParserFactory().header(header).get();
this.vepPredictionParser = new VepPredictionParserFactory().header(header).get();
this.annPredictionParser = new AnnPredictionParserFactory(header).get();
}
use of com.github.lindenb.jvarkit.util.vcf.predictions.SnpEffPredictionParserFactory in project jvarkit by lindenb.
the class VCFCompare method doWork.
@Override
public int doWork(final List<String> args) {
if (args.isEmpty()) {
LOG.error("VCFs missing.");
return -1;
}
if (args.size() != 2) {
System.err.println("Illegal number or arguments. Expected two VCFs");
return -1;
}
PrintWriter pw = null;
XMLStreamWriter w = null;
InputStream in = null;
SortingCollection<LineAndFile> variants = null;
try {
LineAndFileComparator varcmp = new LineAndFileComparator();
variants = SortingCollection.newInstance(LineAndFile.class, new LineAndFileCodec(), varcmp, this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
variants.setDestructiveIteration(true);
for (int i = 0; i < 2; ++i) {
this.inputs[i] = new Input();
this.inputs[i].codec = VCFUtils.createDefaultVCFCodec();
this.inputs[i].filename = args.get(i);
LOG.info("Opening " + this.inputs[i].filename);
in = IOUtils.openURIForReading(this.inputs[i].filename);
final LineReader lr = new SynchronousLineReader(in);
final LineIterator li = new LineIteratorImpl(lr);
this.inputs[i].header = (VCFHeader) this.inputs[i].codec.readActualHeader(li);
this.inputs[i].vepPredictionParser = new VepPredictionParserFactory(this.inputs[i].header).get();
this.inputs[i].snpEffPredictionParser = new SnpEffPredictionParserFactory(this.inputs[i].header).get();
this.inputs[i].annPredictionParser = new AnnPredictionParserFactory(this.inputs[i].header).get();
while (li.hasNext()) {
LineAndFile laf = new LineAndFile();
laf.fileIdx = i;
laf.line = li.next();
variants.add(laf);
}
LOG.info("Done Reading " + this.inputs[i].filename);
CloserUtil.close(li);
CloserUtil.close(lr);
CloserUtil.close(in);
}
variants.doneAdding();
LOG.info("Done Adding");
Set<String> commonSamples = new TreeSet<String>(this.inputs[0].header.getSampleNamesInOrder());
commonSamples.retainAll(this.inputs[1].header.getSampleNamesInOrder());
List<Venn0> venn1List = new ArrayList<VCFCompare.Venn0>();
venn1List.add(new Venn1("ALL"));
venn1List.add(new Venn1("having ID") {
@Override
public VariantContext filter(VariantContext ctx, int fileIndex) {
return ctx == null || !ctx.hasID() ? null : ctx;
}
});
venn1List.add(new Venn1("QUAL greater 30") {
@Override
public VariantContext filter(VariantContext ctx, int fileIndex) {
return ctx == null || !ctx.hasLog10PError() || ctx.getPhredScaledQual() < 30.0 ? null : ctx;
}
});
for (VariantContext.Type t : VariantContext.Type.values()) {
venn1List.add(new VennType(t));
}
for (SequenceOntologyTree.Term term : SequenceOntologyTree.getInstance().getTerms()) {
venn1List.add(new VennPred("vep", term) {
@Override
Set<Term> terms(VariantContext ctx, int file_id) {
Set<Term> tt = new HashSet<SequenceOntologyTree.Term>();
for (VepPredictionParser.VepPrediction pred : VCFCompare.this.inputs[file_id].vepPredictionParser.getPredictions(ctx)) {
tt.addAll(pred.getSOTerms());
}
return tt;
}
});
venn1List.add(new VennPred("SnpEff", term) {
@Override
Set<Term> terms(VariantContext ctx, int file_id) {
Set<Term> tt = new HashSet<SequenceOntologyTree.Term>();
for (SnpEffPredictionParser.SnpEffPrediction pred : VCFCompare.this.inputs[file_id].snpEffPredictionParser.getPredictions(ctx)) {
tt.addAll(pred.getSOTerms());
}
return tt;
}
});
venn1List.add(new VennPred("ANN", term) {
@Override
Set<Term> terms(VariantContext ctx, int file_id) {
Set<Term> tt = new HashSet<SequenceOntologyTree.Term>();
for (AnnPredictionParser.AnnPrediction pred : VCFCompare.this.inputs[file_id].annPredictionParser.getPredictions(ctx)) {
tt.addAll(pred.getSOTerms());
}
return tt;
}
});
}
for (String s : commonSamples) {
venn1List.add(new VennGType(s));
}
/* START : digest results ====================== */
Counter<String> diff = new Counter<String>();
List<LineAndFile> row = new ArrayList<LineAndFile>();
CloseableIterator<LineAndFile> iter = variants.iterator();
for (; ; ) {
LineAndFile rec = null;
if (iter.hasNext()) {
rec = iter.next();
}
if (rec == null || (!row.isEmpty() && varcmp.compare(row.get(0), rec) != 0)) {
if (!row.isEmpty()) {
diff.incr("count.variations");
VariantContext[] contexes_init = new VariantContext[] { null, null };
for (LineAndFile var : row) {
if (contexes_init[var.fileIdx] != null) {
LOG.error("Duplicate context in " + inputs[var.fileIdx].filename + " : " + var.line);
continue;
}
contexes_init[var.fileIdx] = var.getContext();
}
for (Venn0 venn : venn1List) {
venn.visit(contexes_init);
}
row.clear();
}
if (rec == null)
break;
}
row.add(rec);
}
iter.close();
/* END : digest results ====================== */
pw = super.openFileOrStdoutAsPrintWriter(outputFile);
XMLOutputFactory xmlfactory = XMLOutputFactory.newInstance();
w = xmlfactory.createXMLStreamWriter(pw);
w.writeStartElement("html");
w.writeStartElement("body");
/* specific samples */
w.writeStartElement("div");
w.writeStartElement("dl");
for (int i = 0; i < 3; ++i) {
String title;
Set<String> samples;
switch(i) {
case 0:
case 1:
title = "Sample(s) for " + this.inputs[i].filename + ".";
samples = new TreeSet<String>(this.inputs[i].header.getSampleNamesInOrder());
samples.removeAll(commonSamples);
break;
default:
title = "Common Sample(s).";
samples = new TreeSet<String>(commonSamples);
break;
}
w.writeStartElement("dt");
w.writeCharacters(title);
w.writeEndElement();
w.writeStartElement("dd");
w.writeStartElement("ol");
for (String s : samples) {
w.writeStartElement("li");
w.writeCharacters(s);
w.writeEndElement();
}
w.writeEndElement();
w.writeEndElement();
}
// dl
w.writeEndElement();
// div
w.writeEndElement();
for (Venn0 v : venn1List) {
v.write(w);
}
// body
w.writeEndElement();
// html
w.writeEndElement();
w.writeEndDocument();
w.close();
w = null;
pw.flush();
pw.close();
pw = null;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(w);
CloserUtil.close(pw);
if (variants != null)
variants.cleanup();
}
return 0;
}
use of com.github.lindenb.jvarkit.util.vcf.predictions.SnpEffPredictionParserFactory in project jvarkit by lindenb.
the class VcfGeneOntology method filterVcfIterator.
private void filterVcfIterator(final VcfIterator in) throws IOException {
VariantContextWriter w = null;
try {
VCFHeader header = in.getHeader();
VCFHeader h2 = new VCFHeader(header);
h2.addMetaDataLine(new VCFInfoHeaderLine(TAG, VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, "GO terms from GO " + GO + " and GOA=" + GOA));
h2.addMetaDataLine(new VCFHeaderLine(getClass().getSimpleName() + "CmdLine", String.valueOf(getProgramCommandLine())));
h2.addMetaDataLine(new VCFHeaderLine(getClass().getSimpleName() + "Version", String.valueOf(getVersion())));
h2.addMetaDataLine(new VCFHeaderLine(getClass().getSimpleName() + "HtsJdkVersion", HtsjdkVersion.getVersion()));
h2.addMetaDataLine(new VCFHeaderLine(getClass().getSimpleName() + "HtsJdkHome", HtsjdkVersion.getHome()));
if (filterName != null) {
h2.addMetaDataLine(new VCFFilterHeaderLine(filterName, "Flag GO terms " + (inverse_filter ? " not descendant of " : "") + " the provided GO terms"));
}
w = super.openVariantContextWriter(outputFile);
w.writeHeader(h2);
final SAMSequenceDictionaryProgress progess = new SAMSequenceDictionaryProgress(header.getSequenceDictionary());
final SnpEffPredictionParser snpEffPredictionParser = new SnpEffPredictionParserFactory().header(header).get();
final VepPredictionParser vepPredictionParser = new VepPredictionParserFactory().header(header).get();
while (in.hasNext()) {
if (System.out.checkError())
break;
VariantContext ctx = progess.watch(in.next());
/* symbols for this variant */
Set<String> symbols = new HashSet<String>();
/* scan SNPEFF gene */
for (SnpEffPrediction pred : snpEffPredictionParser.getPredictions(ctx)) {
String genName = pred.getGeneName();
if (genName == null || genName.isEmpty())
continue;
symbols.add(genName);
}
/* scan VEP gene */
for (VepPrediction pred : vepPredictionParser.getPredictions(ctx)) {
String genName = pred.getGeneName();
if (!(genName == null || genName.isEmpty())) {
symbols.add(genName);
}
genName = pred.getGene();
if (!(genName == null || genName.isEmpty())) {
symbols.add(genName);
}
genName = pred.getHGNC();
if (!(genName == null || genName.isEmpty())) {
symbols.add(genName);
}
}
/* only keep known GENES from GOA */
symbols.retainAll(this.name2go.keySet());
boolean found_child_of_filter = false;
/* ATTS */
List<String> atts = new ArrayList<String>();
/* loop over symbols */
for (String symbol : symbols) {
/* go terms associated to this symbol */
Set<GoTree.Term> t2 = this.name2go.get(symbol);
if (t2 == null || t2.isEmpty())
continue;
StringBuilder sb = new StringBuilder(symbol);
sb.append("|");
boolean first = true;
for (GoTree.Term gt : t2) {
/* user gave terms to filter */
if (!found_child_of_filter && this.goTermToFilter != null) {
for (GoTree.Term userTerm : this.goTermToFilter) {
if (userTerm.hasDescendant(gt.getAcn())) {
found_child_of_filter = true;
break;
}
}
}
if (!first)
sb.append("&");
sb.append(gt.getAcn());
first = false;
}
atts.add(sb.toString());
}
/* no go term was found */
if (atts.isEmpty()) {
if (!removeIfNoGo) {
w.add(ctx);
}
continue;
}
VariantContextBuilder vcb = new VariantContextBuilder(ctx);
/* check children of user's terms */
if (this.goTermToFilter != null) {
/* keep if found children*/
if ((this.inverse_filter && found_child_of_filter) || (!this.inverse_filter && !found_child_of_filter)) {
/* don't remove, but set filter */
if (this.filterName != null) {
Set<String> filters = new HashSet<String>(ctx.getFilters());
filters.add(this.filterName);
vcb.filters(filters);
} else {
continue;
}
}
}
/* add go terms */
vcb.attribute(this.TAG, atts);
w.add(vcb.make());
}
progess.finish();
w.close();
w = null;
} finally {
CloserUtil.close(w);
w = null;
}
}
Aggregations