use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class VcfConcat method fromFiles.
private int fromFiles(final VariantContextWriter out) throws IOException {
List<VcfIterator> inputs = new ArrayList<VcfIterator>(this.inputFiles.size());
List<String> inputFiles = new ArrayList<>(this.inputFiles.size());
List<String> samples = new ArrayList<>();
SAMSequenceDictionary dict = null;
try {
Set<VCFHeaderLine> metaData = new HashSet<VCFHeaderLine>();
/* open each vcf file */
for (String vcfFile : this.inputFiles) {
LOG.info("Opening " + vcfFile);
VcfIterator r = VCFUtils.createVcfIterator(vcfFile);
/* check VCF dict */
VCFHeader header = r.getHeader();
if (dict == null && inputs.isEmpty()) {
dict = header.getSequenceDictionary();
} else if (!inputs.isEmpty() && ((dict == null && header.getSequenceDictionary() != null) || (dict != null && header.getSequenceDictionary() == null))) {
LOG.error("not.the.same.sequence.dictionaries");
return -1;
} else if (!inputs.isEmpty() && dict != null && !SequenceUtil.areSequenceDictionariesEqual(dict, header.getSequenceDictionary())) {
LOG.error("not.the.same.sequence.dictionaries");
return -1;
}
/* check samples */
if (inputs.isEmpty()) {
samples = header.getSampleNamesInOrder();
} else if (!header.getSampleNamesInOrder().equals(samples)) {
LOG.error("No same samples");
return -1;
}
metaData.addAll(header.getMetaDataInInputOrder());
inputs.add(r);
inputFiles.add(VCFUtils.escapeInfoField(vcfFile));
}
/* create comparator according to dict*/
final Comparator<VariantContext> comparator = (dict == null ? VCFUtils.createChromPosRefComparator() : VCFUtils.createTidPosRefComparator(dict));
metaData.add(new VCFHeaderLine(getClass().getSimpleName() + "CmdLine", String.valueOf(getProgramCommandLine())));
metaData.add(new VCFHeaderLine(getClass().getSimpleName() + "Version", String.valueOf(getVersion())));
metaData.add(new VCFHeaderLine(getClass().getSimpleName() + "HtsJdkVersion", HtsjdkVersion.getVersion()));
metaData.add(new VCFHeaderLine(getClass().getSimpleName() + "HtsJdkHome", HtsjdkVersion.getHome()));
metaData.add(new VCFInfoHeaderLine(VARIANTSOURCE, 1, VCFHeaderLineType.String, "Origin File of Varant"));
VCFHeader h2 = new VCFHeader(metaData, samples);
out.writeHeader(h2);
SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(dict);
for (; ; ) {
/* get 'smallest' variant */
VariantContext smallest = null;
int idx = 0;
int best_idx = -1;
while (idx < inputs.size()) {
VcfIterator in = inputs.get(idx);
if (!in.hasNext()) {
CloserUtil.close(in);
inputs.remove(idx);
inputFiles.remove(idx);
} else {
VariantContext ctx = in.peek();
if (smallest == null || comparator.compare(smallest, ctx) > 0) {
smallest = ctx;
best_idx = idx;
}
++idx;
}
}
if (smallest == null)
break;
final VariantContext ctx = progress.watch(inputs.get(best_idx).next());
final VariantContextBuilder vcb = new VariantContextBuilder(ctx);
vcb.attribute(VARIANTSOURCE, inputFiles.get(best_idx));
out.add(vcb.make());
}
progress.finish();
return 0;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(inputs);
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class LumpySort method doWork.
@Override
public int doWork(final List<String> args) {
VariantContextWriter vcw = null;
LineIterator vcfIn = null;
Environment environment = null;
Database variantsDb1 = null;
final List<File> inputs = IOUtil.unrollFiles(args.stream().map(S -> new File(S)).collect(Collectors.toList()), ".vcf", ".vcf.gz");
if (inputs.isEmpty()) {
LOG.error("empty vcf list");
return -1;
}
try {
IOUtil.assertDirectoryIsWritable(this.bdbHomeDir);
final Set<VCFHeaderLine> metaData = new HashSet<>();
final Set<String> sampleNames = new TreeSet<>();
final IntervalTreeMap<Boolean> intervalTreeMapBed;
if (this.bedFile != null) {
intervalTreeMapBed = new IntervalTreeMap<>();
final BedLineCodec bedLineCodec = new BedLineCodec();
final BufferedReader br = IOUtils.openFileForBufferedReading(this.bedFile);
br.lines().map(L -> bedLineCodec.decode(L)).filter(L -> L != null).forEach(B -> intervalTreeMapBed.put(B.toInterval(), true));
br.close();
} else {
intervalTreeMapBed = null;
}
for (int idx = 0; idx < inputs.size(); ++idx) {
final File vcfFile = inputs.get(idx);
LOG.info("Read header " + (idx + 1) + "/" + inputs.size());
final VCFFileReader r = new VCFFileReader(vcfFile, false);
final VCFHeader header = r.getFileHeader();
if (!LumpyConstants.isLumpyHeader(header)) {
LOG.error("doesn't look like a Lumpy-SV vcf header " + vcfFile);
r.close();
return -1;
}
if (!header.hasGenotypingData()) {
LOG.error("No sample in " + vcfFile);
r.close();
return -1;
}
for (final String sampleName : header.getSampleNamesInOrder()) {
if (sampleNames.contains(sampleName)) {
LOG.error("Sample found twice " + sampleName + " in " + vcfFile);
r.close();
return -1;
}
sampleNames.add(sampleName);
}
metaData.addAll(header.getMetaDataInInputOrder().stream().filter(H -> !H.getKey().equals("fileDate")).collect(Collectors.toSet()));
r.close();
}
final VCFInfoHeaderLine nSampleInfoHeaderLine = new VCFInfoHeaderLine("NSAMPLES", 1, VCFHeaderLineType.Integer, "Number of affected samples.");
metaData.add(nSampleInfoHeaderLine);
final VCFFormatHeaderLine chromStartFormatHeaderLine = new VCFFormatHeaderLine("CB", 1, VCFHeaderLineType.Integer, "Original Variant POS");
metaData.add(chromStartFormatHeaderLine);
final VCFFormatHeaderLine chromEndFormatHeaderLine = new VCFFormatHeaderLine("CE", 1, VCFHeaderLineType.Integer, "Original Variant END");
metaData.add(chromEndFormatHeaderLine);
final VCFHeader outHeader = new VCFHeader(metaData, sampleNames);
final VCFHeaderVersion[] versions = VCFHeaderVersion.values();
this.vcfEncoder = new VCFEncoder(outHeader, false, true);
this.vcfCodec.setVCFHeader(outHeader, versions[versions.length - 1]);
/* open BDB env */
final Transaction txn = null;
environment = new Environment(this.bdbHomeDir, new EnvironmentConfig().setAllowCreate(true).setReadOnly(false));
variantsDb1 = environment.openDatabase(txn, "variants1", new DatabaseConfig().setBtreeComparator(KeySorterComparator.class).setAllowCreate(true).setReadOnly(false).setTemporary(true));
long total_variants = 0L;
final LumpyVarBinding lumpVarBinding = new LumpyVarBinding();
final KeySorterBinding keySorterBinding = new KeySorterBinding();
for (int idx = 0; idx < inputs.size(); ++idx) {
final long millisecstart = System.currentTimeMillis();
final File vcfFile = inputs.get(idx);
int nVariant = 0;
final VCFFileReader r = new VCFFileReader(vcfFile, false);
final List<Genotype> missing = new ArrayList<>(sampleNames.size());
for (final String sn : sampleNames) {
if (r.getFileHeader().getSampleNamesInOrder().contains(sn))
continue;
missing.add(GenotypeBuilder.createMissing(sn, 2));
}
final CloseableIterator<VariantContext> iter = r.iterator();
while (iter.hasNext()) {
VariantContext ctx = iter.next();
if (!this.keep_secondary) {
if (ctx.hasAttribute("SECONDARY"))
continue;
}
if (!this.variantFilter.test(ctx))
continue;
if (intervalTreeMapBed != null && !intervalTreeMapBed.containsOverlapping(ctx))
continue;
final List<Genotype> gtList = new ArrayList<>(ctx.getGenotypes());
for (int gi = 0; gi < gtList.size(); gi++) {
Genotype g = gtList.get(gi);
final GenotypeBuilder gb;
if (this.do_genotype && isAvailableGenotype(g)) {
gb = new GenotypeBuilder(g.getSampleName(), ctx.getAlternateAlleles());
gb.attributes(g.getExtendedAttributes());
} else {
gb = new GenotypeBuilder(g);
}
gb.attribute(chromStartFormatHeaderLine.getID(), ctx.getStart());
gb.attribute(chromEndFormatHeaderLine.getID(), ctx.getEnd());
gtList.set(gi, gb.make());
}
gtList.addAll(missing);
ctx = new VariantContextBuilder(ctx).genotypes(gtList).rmAttribute("PRPOS").make();
final LumpyVar lvar = new LumpyVar(ctx, total_variants);
final DatabaseEntry key = new DatabaseEntry();
final DatabaseEntry data = new DatabaseEntry();
lumpVarBinding.objectToEntry(lvar, data);
keySorterBinding.objectToEntry(lvar.getSortKey(), key);
if (variantsDb1.put(txn, key, data) != OperationStatus.SUCCESS) {
r.close();
LOG.error("insertion failed");
return -1;
}
nVariant++;
total_variants++;
}
iter.close();
r.close();
LOG.info("Read " + (idx + 1) + "/" + inputs.size() + " variants of " + vcfFile + " N=" + nVariant + " Total:" + total_variants + " That took: " + Duration.ofMillis(System.currentTimeMillis() - millisecstart));
System.gc();
}
if (intervalTreeMapBed != null)
intervalTreeMapBed.clear();
System.gc();
LOG.info("Writing output");
final List<Allele> ALLELES_NO_CALLS = this.do_genotype ? Collections.singletonList(Allele.NO_CALL) : Arrays.asList(Allele.NO_CALL, Allele.NO_CALL);
final Cursor cursor = variantsDb1.openCursor(txn, null);
vcw = super.openVariantContextWriter(this.outputFile);
vcw.writeHeader(outHeader);
for (; ; ) {
final DatabaseEntry key = new DatabaseEntry();
final DatabaseEntry data = new DatabaseEntry();
OperationStatus status = cursor.getNext(key, data, LockMode.DEFAULT);
if (!status.equals(OperationStatus.SUCCESS))
break;
final LumpyVar first = lumpVarBinding.entryToObject(data);
if (this.do_not_merge_ctx) {
vcw.add(first.ctx);
continue;
}
final KeySorter keySorter1 = keySorterBinding.entryToObject(key);
final List<LumpyVar> buffer = new ArrayList<>();
buffer.add(first);
final DatabaseEntry key2 = new DatabaseEntry();
final DatabaseEntry data2 = new DatabaseEntry();
final Cursor cursor2 = cursor.dup(true);
for (; ; ) {
status = cursor2.getNext(key2, data2, LockMode.DEFAULT);
if (!status.equals(OperationStatus.SUCCESS))
break;
final KeySorter keySorter2 = keySorterBinding.entryToObject(key2);
if (keySorter1.compare1(keySorter2) != 0) {
break;
}
final LumpyVar lv = lumpVarBinding.entryToObject(data2);
if (lv.ctx.getStart() > first.ctx.getEnd()) {
break;
}
if (first.canMerge(lv)) {
buffer.add(lv);
cursor2.delete();
}
}
cursor2.close();
// delete 'first'
cursor.delete();
final int variantStartA = buffer.stream().mapToInt(V -> V.ctx.getStart()).min().getAsInt();
final int variantStartB = (int) buffer.stream().mapToInt(V -> V.ctx.getStart()).average().getAsDouble();
final int variantStartC = buffer.stream().mapToInt(V -> V.ctx.getStart()).max().getAsInt();
final int variantEndA = buffer.stream().mapToInt(V -> V.ctx.getEnd()).min().getAsInt();
final int variantEndB = (int) buffer.stream().mapToInt(V -> V.ctx.getEnd()).average().getAsDouble();
final int variantEndC = buffer.stream().mapToInt(V -> V.ctx.getEnd()).max().getAsInt();
final VariantContextBuilder vcb = new VariantContextBuilder("lumpymerge", first.ctx.getContig(), variantStartB, variantEndB, first.ctx.getAlleles());
vcb.attribute("END", variantEndB);
vcb.attribute("SVTYPE", first.ctx.getAttribute("SVTYPE"));
vcb.attribute("SVLEN", (int) Percentile.median().evaluate(buffer.stream().mapToInt(V -> V.ctx.getEnd() - V.ctx.getStart())));
vcb.attribute("CIPOS", Arrays.asList(variantStartB - variantStartA, variantStartC - variantStartB));
vcb.attribute("CIEND", Arrays.asList(variantEndB - variantEndA, variantEndC - variantEndB));
vcb.attribute("SU", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("SU", 0)).sum());
vcb.attribute("SR", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("SR", 0)).sum());
vcb.attribute("PE", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("PE", 0)).sum());
final Map<String, Genotype> sample2genotype = new HashMap<>(sampleNames.size());
buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).filter(G -> isAvailableGenotype(G)).forEach(G -> {
sample2genotype.put(G.getSampleName(), G);
});
vcb.attribute(nSampleInfoHeaderLine.getID(), sample2genotype.size());
for (final String sn : sampleNames) {
if (!sample2genotype.containsKey(sn)) {
sample2genotype.put(sn, new GenotypeBuilder(sn, ALLELES_NO_CALLS).attribute("SU", 0).attribute("SR", 0).attribute("PE", 0).make());
}
}
vcb.genotypes(sample2genotype.values());
vcw.add(vcb.make());
}
cursor.close();
vcw.close();
vcw = null;
variantsDb1.close();
variantsDb1 = null;
environment.close();
environment = null;
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(vcfIn);
CloserUtil.close(vcw);
CloserUtil.close(variantsDb1);
CloserUtil.close(environment);
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class FixVCF method doWork.
private int doWork(String filenameIn, InputStream vcfStream, VariantContextWriter w) throws IOException {
final AbstractVCFCodec vcfCodec = VCFUtils.createDefaultVCFCodec();
LineIterator r = new LineIteratorImpl(new SynchronousLineReader(vcfStream));
final VCFHeader header = (VCFHeader) vcfCodec.readActualHeader(r);
// samples names have been changed by picard api and reordered !!!
// re-create the original order
List<String> sampleNamesInSameOrder = new ArrayList<String>(header.getSampleNamesInOrder().size());
for (int col = 0; col < header.getSampleNamesInOrder().size(); ++col) {
for (String sample : header.getSampleNameToOffset().keySet()) {
if (header.getSampleNameToOffset().get(sample) == col) {
sampleNamesInSameOrder.add(sample);
break;
}
}
}
if (sampleNamesInSameOrder.size() != header.getSampleNamesInOrder().size()) {
throw new IllegalStateException();
}
VCFHeader h2 = new VCFHeader(header.getMetaDataInInputOrder(), sampleNamesInSameOrder);
File tmp = IOUtil.newTempFile("tmp", ".vcf.gz", new File[] { tmpDir });
tmp.deleteOnExit();
PrintWriter pw = new PrintWriter(new GZIPOutputStream(new FileOutputStream(tmp)));
while (r.hasNext()) {
String line = r.next();
pw.println(line);
VariantContext ctx = null;
try {
ctx = vcfCodec.decode(line);
} catch (Exception err) {
pw.close();
LOG.error(line);
LOG.error(err);
return -1;
}
for (String f : ctx.getFilters()) {
if (h2.getFilterHeaderLine(f) != null)
continue;
// if(f.equals(VCFConstants.PASSES_FILTERS_v4)) continue; hum...
if (f.isEmpty() || f.equals(VCFConstants.UNFILTERED))
continue;
LOG.info("Fixing missing Filter:" + f);
h2.addMetaDataLine(new VCFFilterHeaderLine(f));
}
for (String tag : ctx.getAttributes().keySet()) {
if (h2.getInfoHeaderLine(tag) != null)
continue;
LOG.info("Fixing missing INFO:" + tag);
h2.addMetaDataLine(new VCFInfoHeaderLine(tag, VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, "undefined. Saved by " + getClass()));
}
}
pw.flush();
pw.close();
pw = null;
LOG.info("re-reading VCF frm tmpFile:" + tmp);
h2.addMetaDataLine(new VCFHeaderLine(getClass().getSimpleName(), "Saved VCF FILTER AND INFO from " + filenameIn));
// save header in memory
ByteArrayOutputStream baos = new ByteArrayOutputStream();
VariantContextWriter w2 = VCFUtils.createVariantContextWriterToOutputStream(baos);
w2.writeHeader(h2);
w2.close();
baos.close();
// reopen tmp file
@SuppressWarnings("resource") VcfIterator in = new VcfIteratorImpl(new SequenceInputStream(new ByteArrayInputStream(baos.toByteArray()), new GZIPInputStream(new FileInputStream(tmp))));
w.writeHeader(h2);
while (in.hasNext()) {
w.add(in.next());
}
in.close();
tmp.delete();
return 0;
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class VCFAnnoBam method doVcfToVcf.
@Override
protected int doVcfToVcf(final String inputName, final VcfIterator r, final VariantContextWriter w) {
BufferedReader bedIn = null;
List<SamReader> samReaders = new ArrayList<SamReader>();
IntervalTreeMap<Rgn> capture = new IntervalTreeMap<Rgn>();
try {
SAMFileHeader firstHeader = null;
for (final File samFile : new HashSet<File>(BAMFILE)) {
LOG.info("open bam " + samFile);
final SamReader samReader = super.openSamReader(samFile.getPath());
final SAMFileHeader samHeader = samReader.getFileHeader();
samReaders.add(samReader);
if (firstHeader == null) {
firstHeader = samHeader;
} else if (!SequenceUtil.areSequenceDictionariesEqual(firstHeader.getSequenceDictionary(), samHeader.getSequenceDictionary())) {
throw new JvarkitException.DictionariesAreNotTheSame(firstHeader.getSequenceDictionary(), samHeader.getSequenceDictionary());
}
}
IntervalList intervalList = new IntervalList(firstHeader);
LOG.info("read bed " + BEDILE);
bedIn = IOUtils.openFileForBufferedReading(BEDILE);
String line;
final BedLineCodec bedCodec = new BedLineCodec();
while ((line = bedIn.readLine()) != null) {
if (line.isEmpty() || line.startsWith("#"))
continue;
final BedLine bed = bedCodec.decode(line);
if (bed == null)
continue;
if (firstHeader.getSequenceDictionary().getSequence(bed.getContig()) == null) {
LOG.error("error in BED +" + BEDILE + " : " + line + " chromosome is not in sequence dict of " + BAMFILE);
continue;
}
intervalList.add(bed.toInterval());
}
bedIn.close();
bedIn = null;
intervalList = intervalList.sorted();
for (final Interval interval : intervalList.uniqued()) {
final Rgn rgn = new Rgn();
rgn.interval = interval;
capture.put(rgn.interval, rgn);
}
intervalList = null;
VCFHeader header = r.getHeader();
VCFHeader h2 = new VCFHeader(header.getMetaDataInInputOrder(), header.getSampleNamesInOrder());
h2.addMetaDataLine(new VCFInfoHeaderLine(this.capture_tag, 1, VCFHeaderLineType.String, "Capture stats: Format is (start|end|mean|min|max|length|not_covered|percent_covered) BAM files: " + BAMFILE + " CAPTURE:" + BEDILE));
w.writeHeader(h2);
while (r.hasNext()) {
final VariantContext ctx = r.next();
Interval interval = new Interval(ctx.getContig(), ctx.getStart(), ctx.getEnd());
Collection<Rgn> rgns = capture.getOverlapping(interval);
Iterator<Rgn> it = rgns.iterator();
if (!it.hasNext()) {
w.add(ctx);
continue;
}
final Rgn rgn = it.next();
if (!rgn.processed) {
// LOG.info("processing "+rgn.interval);
process(rgn, samReaders);
}
final VariantContextBuilder b = new VariantContextBuilder(ctx);
b.attribute(this.capture_tag, rgn.toString());
w.add(b.make());
}
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
for (final SamReader samReader : samReaders) CloserUtil.close(samReader);
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class VCFCombineTwoSnvs method doVcfToVcf.
@Override
protected int doVcfToVcf(final String inputName, File saveAs) {
BufferedReader bufferedReader = null;
htsjdk.variant.variantcontext.writer.VariantContextWriter w = null;
SortingCollection<CombinedMutation> mutations = null;
CloseableIterator<Variant> varIter = null;
CloseableIterator<CombinedMutation> mutIter = null;
Map<String, SamReader> sample2samReader = new HashMap<>();
try {
bufferedReader = inputName == null ? IOUtils.openStreamForBufferedReader(stdin()) : IOUtils.openURIForBufferedReading(inputName);
final VCFUtils.CodecAndHeader cah = VCFUtils.parseHeader(bufferedReader);
/* get VCF header */
final VCFHeader header = cah.header;
final Set<String> sampleNamesInOrder = new HashSet<>(header.getSampleNamesInOrder());
LOG.info("opening REF:" + referenceFile);
this.indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.referenceFile);
final SAMSequenceDictionary dict = this.indexedFastaSequenceFile.getSequenceDictionary();
if (dict == null)
throw new IOException("dictionary missing");
if (this.bamIn != null) {
/**
* unroll and open bam file
*/
for (final File bamFile : IOUtils.unrollFileCollection(Collections.singletonList(this.bamIn))) {
LOG.info("opening BAM :" + this.bamIn);
final SamReader samReader = SamReaderFactory.makeDefault().referenceSequence(this.referenceFile).validationStringency(ValidationStringency.LENIENT).open(this.bamIn);
if (!samReader.hasIndex()) {
throw new IOException("Sam file is NOT indexed: " + bamFile);
}
final SAMFileHeader samHeader = samReader.getFileHeader();
if (samHeader.getSequenceDictionary() == null || !SequenceUtil.areSequenceDictionariesEqual(dict, samReader.getFileHeader().getSequenceDictionary())) {
throw new IOException(bamFile + " and REF don't have the same Sequence Dictionary.");
}
/* get sample name */
String sampleName = null;
for (final SAMReadGroupRecord rg : samHeader.getReadGroups()) {
if (rg.getSample() == null)
continue;
if (sampleName != null && !sampleName.equals(rg.getSample())) {
samReader.close();
throw new IOException(bamFile + " Contains two samples " + sampleName + " " + rg.getSample());
}
sampleName = rg.getSample();
}
if (sampleName == null) {
samReader.close();
LOG.warn("no sample in " + bamFile);
continue;
}
if (!sampleNamesInOrder.contains(sampleName)) {
samReader.close();
LOG.warn("no sample " + sampleName + " in vcf");
continue;
}
sample2samReader.put(sampleName, samReader);
}
}
loadKnownGenesFromUri();
this.variants = SortingCollection.newInstance(Variant.class, new VariantCodec(), new VariantComparator(dict), this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
this.variants.setDestructiveIteration(true);
SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(header);
String vcfLine = null;
while ((vcfLine = bufferedReader.readLine()) != null) {
final VariantContext ctx = progress.watch(cah.codec.decode(vcfLine));
/* discard non SNV variant */
if (!ctx.isVariant() || ctx.isIndel()) {
continue;
}
/* find the overlapping genes : extend the interval of the variant to include the stop codon */
final Collection<KnownGene> genes = new ArrayList<>();
for (List<KnownGene> lkg : this.knownGenes.getOverlapping(new Interval(ctx.getContig(), Math.max(1, ctx.getStart() - 3), ctx.getEnd() + 3))) {
genes.addAll(lkg);
}
final List<Allele> alternateAlleles = ctx.getAlternateAlleles();
/* loop over overlapping genes */
for (final KnownGene kg : genes) {
/* loop over available alleles */
for (int allele_idx = 0; allele_idx < alternateAlleles.size(); ++allele_idx) {
final Allele alt = alternateAlleles.get(allele_idx);
challenge(ctx, alt, kg, vcfLine);
}
}
}
progress.finish();
this.variants.doneAdding();
mutations = SortingCollection.newInstance(CombinedMutation.class, new MutationCodec(), new MutationComparator(dict), this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
mutations.setDestructiveIteration(true);
final VCFFilterHeaderLine vcfFilterHeaderLine = new VCFFilterHeaderLine("TwoHaplotypes", "(number of reads carrying both mutation) < (reads carrying variant 1 + reads carrying variant 2) ");
varIter = this.variants.iterator();
progress = new SAMSequenceDictionaryProgress(header);
final ArrayList<Variant> buffer = new ArrayList<>();
for (; ; ) {
Variant variant = null;
if (varIter.hasNext()) {
variant = varIter.next();
progress.watch(variant.contig, variant.genomicPosition1);
}
if (variant == null || !(!buffer.isEmpty() && buffer.get(0).contig.equals(variant.contig) && buffer.get(0).transcriptName.equals(variant.transcriptName))) {
if (!buffer.isEmpty()) {
for (int i = 0; i < buffer.size(); ++i) {
final Variant v1 = buffer.get(i);
for (int j = i + 1; j < buffer.size(); ++j) {
final Variant v2 = buffer.get(j);
if (v1.codonStart() != v2.codonStart())
continue;
if (v1.positionInCodon() == v2.positionInCodon())
continue;
if (!v1.wildCodon.equals(v2.wildCodon)) {
throw new IllegalStateException();
}
final StringBuilder combinedCodon = new StringBuilder(v1.wildCodon);
combinedCodon.setCharAt(v1.positionInCodon(), v1.mutCodon.charAt(v1.positionInCodon()));
combinedCodon.setCharAt(v2.positionInCodon(), v2.mutCodon.charAt(v2.positionInCodon()));
final String pwild = new ProteinCharSequence(v1.wildCodon).getString();
final String p1 = new ProteinCharSequence(v1.mutCodon).getString();
final String p2 = new ProteinCharSequence(v2.mutCodon).getString();
final String pCombined = new ProteinCharSequence(combinedCodon).getString();
final String combinedSO;
final String combinedType;
/* both AA are synonymous, while combined is not */
if (!pCombined.equals(pwild) && p1.equals(pwild) && p2.equals(pwild)) {
combinedType = "combined_is_nonsynonymous";
if (pCombined.equals("*")) {
/* http://www.sequenceontology.org/browser/current_svn/term/SO:0001587 */
combinedSO = "stop_gained";
} else if (pwild.equals("*")) {
/* http://www.sequenceontology.org/browser/current_svn/term/SO:0002012 */
combinedSO = "stop_lost";
} else {
/* http://www.sequenceontology.org/miso/current_svn/term/SO:0001992 */
combinedSO = "nonsynonymous_variant";
}
} else if (!pCombined.equals(p1) && !pCombined.equals(p2) && !pCombined.equals(pwild)) {
combinedType = "combined_is_new";
if (pCombined.equals("*")) {
/* http://www.sequenceontology.org/browser/current_svn/term/SO:0001587 */
combinedSO = "stop_gained";
} else {
/* http://www.sequenceontology.org/miso/current_svn/term/SO:0001992 */
combinedSO = "nonsynonymous_variant";
}
} else {
combinedType = null;
combinedSO = null;
}
/**
* ok, there is something interesting here ,
* create two new Mutations carrying the
* two variants
*/
if (combinedSO != null) {
/**
* grantham score is max found combined vs (p1/p2/wild)
*/
int grantham_score = GranthamScore.score(pCombined.charAt(0), pwild.charAt(0));
grantham_score = Math.max(grantham_score, GranthamScore.score(pCombined.charAt(0), p1.charAt(0)));
grantham_score = Math.max(grantham_score, GranthamScore.score(pCombined.charAt(0), p2.charAt(0)));
/**
* info that will be displayed in the vcf
*/
final Map<String, Object> info1 = v1.getInfo(v2);
final Map<String, Object> info2 = v2.getInfo(v1);
// filter for this combined: default it fails the filter
String filter = vcfFilterHeaderLine.getID();
final Map<String, Object> combinedMap = new LinkedHashMap<>();
combinedMap.put("CombinedCodon", combinedCodon);
combinedMap.put("CombinedAA", pCombined);
combinedMap.put("CombinedSO", combinedSO);
combinedMap.put("CombinedType", combinedType);
combinedMap.put("GranthamScore", grantham_score);
info1.putAll(combinedMap);
info2.putAll(combinedMap);
final Map<String, CoverageInfo> sample2coverageInfo = new HashMap<>(sample2samReader.size());
final int chromStart = Math.min(v1.genomicPosition1, v2.genomicPosition1);
final int chromEnd = Math.max(v1.genomicPosition1, v2.genomicPosition1);
/* get phasing info for each sample*/
for (final String sampleName : sample2samReader.keySet()) {
final SamReader samReader = sample2samReader.get(sampleName);
final CoverageInfo covInfo = new CoverageInfo();
sample2coverageInfo.put(sampleName, covInfo);
SAMRecordIterator iter = null;
try {
iter = samReader.query(v1.contig, chromStart, chromEnd, false);
while (iter.hasNext()) {
final SAMRecord rec = iter.next();
if (rec.getReadUnmappedFlag())
continue;
if (rec.isSecondaryOrSupplementary())
continue;
if (rec.getDuplicateReadFlag())
continue;
if (rec.getReadFailsVendorQualityCheckFlag())
continue;
// get DEPTh for variant 1
if (rec.getAlignmentStart() <= v1.genomicPosition1 && v1.genomicPosition1 <= rec.getAlignmentEnd()) {
covInfo.depth1++;
}
// get DEPTh for variant 2
if (rec.getAlignmentStart() <= v2.genomicPosition1 && v2.genomicPosition1 <= rec.getAlignmentEnd()) {
covInfo.depth2++;
}
if (rec.getAlignmentEnd() < chromEnd)
continue;
if (rec.getAlignmentStart() > chromStart)
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
final byte[] bases = rec.getReadBases();
if (bases == null)
continue;
int refpos1 = rec.getAlignmentStart();
int readpos = 0;
boolean found_variant1_on_this_read = false;
boolean found_variant2_on_this_read = false;
/**
* loop over cigar
*/
for (final CigarElement ce : cigar.getCigarElements()) {
final CigarOperator op = ce.getOperator();
switch(op) {
case P:
continue;
case S:
case I:
readpos += ce.getLength();
break;
case D:
case N:
refpos1 += ce.getLength();
break;
case H:
continue;
case EQ:
case M:
case X:
for (int x = 0; x < ce.getLength(); ++x) {
if (refpos1 == v1.genomicPosition1 && same(bases[readpos], v1.altAllele)) {
found_variant1_on_this_read = true;
} else if (refpos1 == v2.genomicPosition1 && same(bases[readpos], v2.altAllele)) {
found_variant2_on_this_read = true;
}
refpos1++;
readpos++;
}
break;
default:
throw new IllegalStateException(op.name());
}
/* skip remaining bases after last variant */
if (refpos1 > chromEnd)
break;
}
/* sum-up what we found */
if (found_variant1_on_this_read && found_variant2_on_this_read) {
covInfo.count_reads_having_both_variants++;
} else if (!found_variant1_on_this_read && !found_variant2_on_this_read) {
covInfo.count_reads_having_no_variants++;
} else if (found_variant1_on_this_read) {
covInfo.count_reads_having_variant1++;
} else if (found_variant2_on_this_read) {
covInfo.count_reads_having_variant2++;
}
}
/* end of loop over reads */
} finally {
iter.close();
iter = null;
}
info1.put("N_READS_BOTH_VARIANTS_" + sampleName, covInfo.count_reads_having_both_variants);
info2.put("N_READS_BOTH_VARIANTS_" + sampleName, covInfo.count_reads_having_both_variants);
info1.put("N_READS_NO_VARIANTS_" + sampleName, covInfo.count_reads_having_no_variants);
info2.put("N_READS_NO_VARIANTS_" + sampleName, covInfo.count_reads_having_no_variants);
info1.put("N_READS_TOTAL_" + sampleName, covInfo.count_reads_having_both_variants + covInfo.count_reads_having_no_variants + covInfo.count_reads_having_variant1 + covInfo.count_reads_having_variant2);
info2.put("N_READS_TOTAL_" + sampleName, covInfo.count_reads_having_both_variants + covInfo.count_reads_having_no_variants + covInfo.count_reads_having_variant1 + covInfo.count_reads_having_variant2);
// count for variant 1
info1.put("N_READS_ONLY_1_" + sampleName, covInfo.count_reads_having_variant1);
info1.put("N_READS_ONLY_2_" + sampleName, covInfo.count_reads_having_variant2);
info1.put("DEPTH_1_" + sampleName, covInfo.depth1);
// inverse previous count
info2.put("N_READS_ONLY_1_" + sampleName, covInfo.count_reads_having_variant2);
info2.put("N_READS_ONLY_2_" + sampleName, covInfo.count_reads_having_variant1);
info2.put("DEPTH_2_" + sampleName, covInfo.depth2);
/* number of reads with both variant is greater than
* reads carrying only one variant: reset the filter
*/
if (2 * covInfo.count_reads_having_both_variants > (covInfo.count_reads_having_variant1 + covInfo.count_reads_having_variant2)) {
/* reset filter */
filter = VCFConstants.UNFILTERED;
info1.put("FILTER_1_" + sampleName, ".");
info2.put("FILTER_2_" + sampleName, ".");
} else {
info1.put("FILTER_1_" + sampleName, vcfFilterHeaderLine.getID());
info2.put("FILTER_2_" + sampleName, vcfFilterHeaderLine.getID());
}
}
/* end of loop over bams */
final CombinedMutation m1 = new CombinedMutation();
m1.contig = v1.contig;
m1.genomicPosition1 = v1.genomicPosition1;
m1.id = v1.id;
m1.refAllele = v1.refAllele;
m1.altAllele = v1.altAllele;
m1.vcfLine = v1.vcfLine;
m1.info = mapToString(info1);
m1.filter = filter;
m1.grantham_score = grantham_score;
m1.sorting_id = ID_GENERATOR++;
mutations.add(m1);
final CombinedMutation m2 = new CombinedMutation();
m2.contig = v2.contig;
m2.genomicPosition1 = v2.genomicPosition1;
m2.id = v2.id;
m2.refAllele = v2.refAllele;
m2.altAllele = v2.altAllele;
m2.vcfLine = v2.vcfLine;
m2.info = mapToString(info2);
m2.filter = filter;
m2.grantham_score = grantham_score;
m2.sorting_id = ID_GENERATOR++;
mutations.add(m2);
}
}
}
}
buffer.clear();
if (variant == null)
break;
}
buffer.add(variant);
}
progress.finish();
mutations.doneAdding();
varIter.close();
varIter = null;
variants.cleanup();
variants = null;
final ArrayList<CombinedMutation> mBuffer = new ArrayList<>();
final VCFHeader header2 = new VCFHeader(header);
header2.addMetaDataLine(new VCFHeaderLine(getProgramName() + "AboutQUAL", "QUAL is filled with Grantham Score http://www.ncbi.nlm.nih.gov/pubmed/4843792"));
final StringBuilder infoDesc = new StringBuilder("Variant affected by two distinct mutation. Format is defined in the INFO column. ");
final VCFInfoHeaderLine infoHeaderLine = new VCFInfoHeaderLine("CodonVariant", VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, infoDesc.toString());
super.addMetaData(header2);
header2.addMetaDataLine(infoHeaderLine);
if (!sample2samReader.isEmpty()) {
header2.addMetaDataLine(vcfFilterHeaderLine);
}
w = super.openVariantContextWriter(saveAs);
w.writeHeader(header2);
progress = new SAMSequenceDictionaryProgress(header);
mutIter = mutations.iterator();
for (; ; ) {
CombinedMutation mutation = null;
if (mutIter.hasNext()) {
mutation = mutIter.next();
progress.watch(mutation.contig, mutation.genomicPosition1);
}
if (mutation == null || !(!mBuffer.isEmpty() && mBuffer.get(0).contig.equals(mutation.contig) && mBuffer.get(0).genomicPosition1 == mutation.genomicPosition1 && mBuffer.get(0).refAllele.equals(mutation.refAllele))) {
if (!mBuffer.isEmpty()) {
// default grantham score used in QUAL
int grantham_score = -1;
// default filter fails
String filter = vcfFilterHeaderLine.getID();
final CombinedMutation first = mBuffer.get(0);
final Set<String> info = new HashSet<>();
final VariantContext ctx = cah.codec.decode(first.vcfLine);
final VariantContextBuilder vcb = new VariantContextBuilder(ctx);
vcb.chr(first.contig);
vcb.start(first.genomicPosition1);
vcb.stop(first.genomicPosition1 + first.refAllele.length() - 1);
if (!first.id.equals(VCFConstants.EMPTY_ID_FIELD))
vcb.id(first.id);
for (final CombinedMutation m : mBuffer) {
info.add(m.info);
grantham_score = Math.max(grantham_score, m.grantham_score);
if (VCFConstants.UNFILTERED.equals(m.filter)) {
// at least one SNP is ok one this line
filter = null;
}
}
vcb.unfiltered();
if (filter != null && !sample2samReader.isEmpty()) {
vcb.filter(filter);
} else {
vcb.passFilters();
}
vcb.attribute(infoHeaderLine.getID(), new ArrayList<String>(info));
if (grantham_score > 0) {
vcb.log10PError(grantham_score / -10.0);
} else {
vcb.log10PError(VariantContext.NO_LOG10_PERROR);
}
w.add(vcb.make());
}
mBuffer.clear();
if (mutation == null)
break;
}
mBuffer.add(mutation);
}
progress.finish();
mutIter.close();
mutations.cleanup();
mutations = null;
return RETURN_OK;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(this.indexedFastaSequenceFile);
CloserUtil.close(mutIter);
CloserUtil.close(varIter);
if (this.variants != null)
this.variants.cleanup();
if (mutations != null)
mutations.cleanup();
this.variants = null;
for (SamReader r : sample2samReader.values()) CloserUtil.close(r);
CloserUtil.close(w);
CloserUtil.close(bufferedReader);
}
}
Aggregations