use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class VCFTabixml method doVcfToVcf.
@Override
protected int doVcfToVcf(String inputName, VcfIterator r, VariantContextWriter w) {
TabixReader tabixReader = null;
try {
LOG.info("opening BED" + BEDFILE);
tabixReader = new TabixReader(this.BEDFILE);
Pattern tab = Pattern.compile("[\t]");
LOG.info("loading xslt " + STYLESHEET);
this.stylesheet = TransformerFactory.newInstance().newTemplates(new StreamSource(STYLESHEET));
Transformer transformer = this.stylesheet.newTransformer();
transformer.setOutputProperty(OutputKeys.METHOD, "xml");
final VCFHeader header = r.getHeader();
final VCFHeader h2 = new VCFHeader(header);
LOG.info("reading Tags " + TAGS);
BufferedReader rT = IOUtils.openFileForBufferedReading(TAGS);
String line;
while ((line = rT.readLine()) != null) {
if (!line.startsWith(VCFHeader.METADATA_INDICATOR)) {
throw new RuntimeException("should start with " + VCFHeader.METADATA_INDICATOR + ":" + line);
}
if (!line.startsWith(VCFConstants.INFO_HEADER_START)) {
throw new RuntimeException("should start with " + VCFConstants.INFO_HEADER_START + ":" + line);
}
VCFInfoHeaderLine hi = new VCFInfoHeaderLine(line.substring(7), VCFHeaderVersion.VCF4_1);
if (hi.getCount() != 1) {
throw new IllegalArgumentException("VCFHeaderLineCount not supported : " + hi);
}
switch(hi.getType()) {
case String:
break;
default:
throw new IllegalArgumentException("VCFHeaderLineTyoe not supported : " + hi);
}
LOG.info(hi.toString());
h2.addMetaDataLine(hi);
}
rT.close();
LOG.info("writing header");
w.writeHeader(h2);
JAXBContext jaxbCtx = JAXBContext.newInstance(Properties.class, Property.class);
Unmarshaller unmarshaller = jaxbCtx.createUnmarshaller();
while (r.hasNext()) {
VariantContext ctx = r.next();
HashMap<String, Set<String>> insert = new LinkedHashMap<String, Set<String>>();
int[] array = tabixReader.parseReg(ctx.getContig() + ":" + (ctx.getStart()) + "-" + (ctx.getEnd() + 1));
TabixReader.Iterator iter = null;
if (array != null && array.length == 3 && array[0] != -1 && array[1] >= 0 && array[2] >= 0) {
iter = tabixReader.query(array[0], array[1], array[2]);
} else {
LOG.info("Cannot get " + ctx.getContig() + ":" + (ctx.getStart()) + "-" + (ctx.getEnd() + 1));
}
String line2 = null;
while (iter != null && (line2 = iter.next()) != null) {
String[] tokens2 = tab.split(line2, 5);
if (tokens2.length < 4) {
LOG.error("[VCFTabixml] VCF. Error not enough columns in tabix.line " + line2);
return -1;
}
int chromStart = Integer.parseInt(tokens2[1]);
int chromEnd = Integer.parseInt(tokens2[2]);
if (chromStart + 1 != chromEnd) {
LOG.error("Error in " + this.BEDFILE + " extected start+1=end int " + tokens2[0] + ":" + tokens2[1] + "-" + tokens2[2]);
continue;
}
if (ctx.getStart() - 1 != chromStart)
continue;
transformer.setParameter("vcfchrom", ctx.getContig());
transformer.setParameter("vcfpos", ctx.getStart());
transformer.setParameter("vcfref", ctx.getReference().getBaseString());
transformer.setParameter("vcfalt", ctx.getAltAlleleWithHighestAlleleCount().getBaseString());
try {
StringWriter sw = new StringWriter();
StreamSource src = new StreamSource(new StringReader(tokens2[3]));
StreamResult rez = new StreamResult(sw);
transformer.transform(src, rez);
Properties props = unmarshaller.unmarshal(new StreamSource(new StringReader(sw.toString())), Properties.class).getValue();
for (Property p : props.getProperty()) {
if (p.key.isEmpty())
continue;
if (h2.getInfoHeaderLine(p.key) == null) {
LOG.info("ignoring key " + p.key + " you could set it to:\n" + "##INFO=<ID=" + p.key + ",Number=1,Type=String,Description=\"" + p.key + " from " + BEDFILE + "\">");
continue;
}
Set<String> x = insert.get(p.key);
if (x == null) {
x = new LinkedHashSet<String>();
insert.put(p.key, x);
}
x.add(p.value);
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("error", e);
}
}
if (insert.isEmpty()) {
w.add(ctx);
continue;
}
VariantContextBuilder b = new VariantContextBuilder(ctx);
for (String key : insert.keySet()) {
for (String s2 : insert.get(key)) {
b.attribute(key, s2);
// limit 1
break;
}
}
w.add(b.make());
}
return 0;
} catch (IOException err) {
err.printStackTrace();
return -1;
} catch (Throwable err) {
err.printStackTrace();
return -1;
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class XContaminations method doWork.
@Override
public int doWork(final List<String> args) {
long last_save_ms = System.currentTimeMillis();
if (this.output_as_vcf && !this.use_only_sample_name) {
LOG.error("cannot write vcf if --sample is not set");
return -1;
}
if (args.size() < 2) {
LOG.error("Illegal Number of args");
return -1;
}
final Set<File> bamFiles = IOUtils.unrollFiles(args.subList(1, args.size())).stream().map(S -> new File(S)).collect(Collectors.toSet());
if (bamFiles.isEmpty()) {
LOG.error("Undefined BAM file(s)");
return -1;
}
SAMRecordIterator iter = null;
VcfIterator in = null;
Map<String, SamReader> sample2samReader = new HashMap<>();
VariantContextWriter vcfw = null;
try {
final SamReaderFactory srf = super.createSamReaderFactory();
if (args.get(0).equals("-")) {
in = super.openVcfIterator(null);
} else {
in = super.openVcfIterator(args.get(0));
}
VCFHeader vcfHeader = in.getHeader();
final SAMSequenceDictionary dict1 = vcfHeader.getSequenceDictionary();
if (dict1 == null) {
LOG.error(JvarkitException.VcfDictionaryMissing.getMessage(args.get(0)));
return -1;
}
final Set<String> sampleNames = new HashSet<>(vcfHeader.getSampleNamesInOrder());
if (sampleNames.isEmpty()) {
LOG.error("VCF contains no sample");
return -1;
}
for (final File bamFile : bamFiles) {
LOG.info("Opening " + bamFile);
final SamReader samReader = srf.open(bamFile);
final SAMFileHeader samHeader = samReader.getFileHeader();
final SAMSequenceDictionary dict2 = samHeader.getSequenceDictionary();
if (dict2 == null) {
samReader.close();
LOG.error(JvarkitException.BamDictionaryMissing.getMessage(bamFile.getPath()));
return -1;
}
if (!SequenceUtil.areSequenceDictionariesEqual(dict1, dict2)) {
samReader.close();
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict1, dict2));
return -1;
}
if (!samReader.hasIndex()) {
samReader.close();
LOG.error("sam is not indexed : " + bamFile);
return -1;
}
String sampleName = null;
for (final SAMReadGroupRecord rgr : samHeader.getReadGroups()) {
final String s = rgr.getSample();
if (StringUtil.isBlank(s))
continue;
if (sampleName == null) {
sampleName = s;
} else if (!sampleName.equals(s)) {
samReader.close();
LOG.error("Cannot handle more than one sample/bam " + bamFile + " " + sampleName);
return -1;
}
}
if (sampleName == null) {
samReader.close();
LOG.error("No sample in " + bamFile);
// skip this bam
continue;
}
if (!sampleNames.contains(sampleName)) {
samReader.close();
LOG.error("Not in VCF header: sample " + sampleName + " " + bamFile);
// skip this bam
continue;
}
if (sample2samReader.containsKey(sampleName)) {
samReader.close();
LOG.error("Cannot handle more than one bam/sample: " + bamFile + " " + sampleName);
return -1;
}
sample2samReader.put(sampleName, samReader);
}
if (sample2samReader.size() < 2) {
LOG.error("Not engough BAM/samples. Expected at least two valid BAMs");
return -1;
}
sampleNames.retainAll(sample2samReader.keySet());
/* create a VCF is VCF output asked */
final List<SamplePair> sampleListForVcf;
if (this.output_as_vcf) {
vcfw = super.openVariantContextWriter(outputFile);
final Set<VCFHeaderLine> metaData = new HashSet<>();
metaData.add(new VCFFormatHeaderLine("S1S1", 1, VCFHeaderLineType.Integer, "reads sample 1 supporting sample 1"));
metaData.add(new VCFFormatHeaderLine("S1S2", 1, VCFHeaderLineType.Integer, "reads sample 1 supporting sample 2"));
metaData.add(new VCFFormatHeaderLine("S1SO", 1, VCFHeaderLineType.Integer, "reads sample 1 supporting others"));
metaData.add(new VCFFormatHeaderLine("S2S1", 1, VCFHeaderLineType.Integer, "reads sample 2 supporting sample 1"));
metaData.add(new VCFFormatHeaderLine("S2S2", 1, VCFHeaderLineType.Integer, "reads sample 2 supporting sample 2"));
metaData.add(new VCFFormatHeaderLine("S2SO", 1, VCFHeaderLineType.Integer, "reads sample 2 supporting others"));
metaData.add(new VCFFormatHeaderLine("FR", 1, VCFHeaderLineType.Float, "Fraction. '-1' for unavailable."));
metaData.add(new VCFFormatHeaderLine("S1A", 1, VCFHeaderLineType.Character, "sample 1 allele"));
metaData.add(new VCFFormatHeaderLine("S2A", 1, VCFHeaderLineType.Character, "sample 2 allele"));
metaData.add(new VCFFilterHeaderLine("XCONTAMINATION", "Fraction test is > " + fraction_treshold));
metaData.add(new VCFFilterHeaderLine("BADSAMPLES", "At least one pair of genotype fails the 'LE' test"));
metaData.add(new VCFInfoHeaderLine("LE", 1, VCFHeaderLineType.Integer, "number of pair of genotypes having (S1S1<=S1S2 or S2S2<=S2S1)."));
metaData.add(new VCFInfoHeaderLine("BADSAMPLES", VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, "Samples founds failing the 'LE' test"));
sampleListForVcf = new ArrayList<>();
final List<String> sampleList = new ArrayList<>(sampleNames);
for (int x = 0; x + 1 < sampleList.size(); ++x) {
for (int y = x + 1; y < sampleList.size(); ++y) {
sampleListForVcf.add(new SamplePair(new SimpleSampleIdenfifier(sampleList.get(x)), new SimpleSampleIdenfifier(sampleList.get(y))));
}
}
final VCFHeader header2 = new VCFHeader(metaData, sampleListForVcf.stream().map(V -> V.getLabel()).sorted().collect(Collectors.toList()));
header2.setSequenceDictionary(dict1);
vcfw.writeHeader(header2);
} else {
vcfw = null;
sampleListForVcf = null;
}
final Map<SamplePair, SampleAlleles> contaminationTable = new HashMap<>();
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(dict1).logger(LOG);
while (in.hasNext()) {
final VariantContext ctx = progress.watch(in.next());
if (!ctx.isSNP() || ctx.isFiltered() || !ctx.isBiallelic() || ctx.isSymbolic() || !this.variantFilter.test(ctx)) {
continue;
}
int count_homref = 0;
int count_homvar = 0;
int count_het = 0;
final Map<String, Genotype> sample2gt = new HashMap<>();
for (int gidx = 0; gidx < ctx.getNSamples(); ++gidx) {
final Genotype G = ctx.getGenotype(gidx);
if (!G.isCalled())
continue;
if (G.isHet()) {
// here because in use_singleton we must be sure that there is only one hom_var
count_het++;
if (this.use_singleton && count_het > 0)
break;
} else if (G.isHomVar()) {
// here because in use_singleton we must be sure that there is only one hom_var
count_homvar++;
if (this.use_singleton && count_homvar > 1)
break;
}
if (G.isFiltered())
continue;
if (!sample2samReader.containsKey(G.getSampleName()))
continue;
if (!sampleNames.contains(G.getSampleName()))
continue;
if (!this.genotypeFilter.test(ctx, G))
continue;
sample2gt.put(G.getSampleName(), G);
}
if (this.use_singleton && count_het > 0)
continue;
if (this.use_singleton && count_homvar > 1)
continue;
if (sample2gt.size() < 2)
continue;
// reset and recount
count_homref = 0;
count_homvar = 0;
count_het = 0;
for (final String sampleName : sample2gt.keySet()) {
final Genotype G = ctx.getGenotype(sampleName);
switch(G.getType()) {
case HOM_REF:
count_homref++;
break;
case HOM_VAR:
count_homvar++;
break;
case HET:
count_het++;
break;
default:
break;
}
}
// singleton check
if (this.use_singleton && (count_het > 0 || count_homvar != 1)) {
continue;
}
// at least one HOM_REF and one HOM_VAR
if (count_homref == 0)
continue;
if (count_homvar == 0)
continue;
final Map<SampleIdentifier, Counter<Character>> sample_identifier_2allelesCount = new HashMap<>();
/* scan Reads for those Genotype/Samples */
for (final String sampleName : sample2gt.keySet()) {
if (!sample2samReader.containsKey(sampleName))
continue;
// sample name is not in vcf header
final SamReader samReader = sample2samReader.get(sampleName);
if (samReader == null)
continue;
final Genotype genotype = sample2gt.get(sampleName);
if (genotype == null)
continue;
iter = samReader.query(ctx.getContig(), ctx.getStart(), ctx.getEnd(), false);
while (iter.hasNext()) {
final SAMRecord record = iter.next();
if (record.getEnd() < ctx.getStart())
continue;
if (ctx.getEnd() < record.getStart())
continue;
if (record.getReadUnmappedFlag())
continue;
if (this.filter.filterOut(record))
continue;
final SAMReadGroupRecord srgr = record.getReadGroup();
// not current sample
if (srgr == null)
continue;
if (!sampleName.equals(srgr.getSample()))
continue;
final Cigar cigar = record.getCigar();
if (cigar == null || cigar.isEmpty())
continue;
byte[] readSeq = record.getReadBases();
if (readSeq == null || readSeq.length == 0)
continue;
int readPos = record.getReadPositionAtReferencePosition(ctx.getStart());
if (readPos < 1)
continue;
readPos--;
if (readPos >= readSeq.length)
continue;
final char base = Character.toUpperCase((char) readSeq[readPos]);
if (base == 'N')
continue;
final SampleIdentifier sampleIdentifier;
if (this.use_only_sample_name) {
sampleIdentifier = new SimpleSampleIdenfifier(sampleName);
} else {
final ShortReadName readName = ShortReadName.parse(record);
if (!readName.isValid()) {
LOG.info("No a valid read name " + record.getReadName());
continue;
}
sampleIdentifier = new SequencerFlowCellRunLaneSample(readName, sampleName);
}
Counter<Character> sampleAlleles = sample_identifier_2allelesCount.get(sampleIdentifier);
if (sampleAlleles == null) {
sampleAlleles = new Counter<Character>();
sample_identifier_2allelesCount.put(sampleIdentifier, sampleAlleles);
}
sampleAlleles.incr(base);
}
iter.close();
iter = null;
}
/* end scan reads for this sample */
/* sum-up data for this SNP */
final VariantContextBuilder vcb;
final List<Genotype> genotypeList;
if (this.output_as_vcf) {
vcb = new VariantContextBuilder(args.get(0), ctx.getContig(), ctx.getStart(), ctx.getEnd(), ctx.getAlleles());
if (ctx.hasID())
vcb.id(ctx.getID());
genotypeList = new ArrayList<>();
} else {
vcb = null;
genotypeList = null;
}
for (final String sample1 : sample2gt.keySet()) {
final Genotype g1 = sample2gt.get(sample1);
final char a1 = g1.getAllele(0).getBaseString().charAt(0);
for (final String sample2 : sample2gt.keySet()) {
if (sample1.compareTo(sample2) >= 0)
continue;
final Genotype g2 = sample2gt.get(sample2);
if (g2.sameGenotype(g1))
continue;
final char a2 = g2.getAllele(0).getBaseString().charAt(0);
for (final SampleIdentifier sfcr1 : sample_identifier_2allelesCount.keySet()) {
if (!sfcr1.getSampleName().equals(sample1))
continue;
final Counter<Character> counter1 = sample_identifier_2allelesCount.get(sfcr1);
if (counter1 == null)
continue;
for (final SampleIdentifier sfcr2 : sample_identifier_2allelesCount.keySet()) {
if (!sfcr2.getSampleName().equals(sample2))
continue;
final SamplePair samplePair = new SamplePair(sfcr1, sfcr2);
final Counter<Character> counter2 = sample_identifier_2allelesCount.get(sfcr2);
if (counter2 == null)
continue;
SampleAlleles sampleAlleles = contaminationTable.get(samplePair);
if (sampleAlleles == null) {
sampleAlleles = new SampleAlleles();
contaminationTable.put(samplePair, sampleAlleles);
if (!this.output_as_vcf && contaminationTable.size() % 10000 == 0)
LOG.info("n(pairs)=" + contaminationTable.size());
}
sampleAlleles.number_of_comparaisons++;
for (final Character allele : counter1.keySet()) {
final long n = counter1.count(allele);
if (allele.equals(a1)) {
sampleAlleles.reads_sample1_supporting_sample1 += n;
} else if (allele.equals(a2)) {
sampleAlleles.reads_sample1_supporting_sample2 += n;
} else {
sampleAlleles.reads_sample1_supporting_other += n;
}
}
for (final Character allele : counter2.keySet()) {
final long n = counter2.count(allele);
if (allele.equals(a2)) {
sampleAlleles.reads_sample2_supporting_sample2 += n;
} else if (allele.equals(a1)) {
sampleAlleles.reads_sample2_supporting_sample1 += n;
} else {
sampleAlleles.reads_sample2_supporting_other += n;
}
}
}
}
}
}
if (this.output_as_vcf) {
final Set<String> bad_samples = new TreeSet<>();
boolean fraction_flag = false;
int num_lt = 0;
for (final SamplePair samplepair : sampleListForVcf) {
final GenotypeBuilder gb = new GenotypeBuilder(samplepair.getLabel());
final SampleAlleles sampleAlleles = contaminationTable.get(samplepair);
if (sampleAlleles != null) {
gb.attribute("S1S1", sampleAlleles.reads_sample1_supporting_sample1);
gb.attribute("S1S2", sampleAlleles.reads_sample1_supporting_sample2);
gb.attribute("S1SO", sampleAlleles.reads_sample1_supporting_other);
gb.attribute("S2S1", sampleAlleles.reads_sample2_supporting_sample1);
gb.attribute("S2S2", sampleAlleles.reads_sample2_supporting_sample2);
gb.attribute("S2SO", sampleAlleles.reads_sample2_supporting_other);
gb.attribute("S1A", sample2gt.get(samplepair.sample1.getSampleName()).getAllele(0).getDisplayString().charAt(0));
gb.attribute("S2A", sample2gt.get(samplepair.sample2.getSampleName()).getAllele(0).getDisplayString().charAt(0));
final double fraction = sampleAlleles.getFraction();
gb.attribute("FR", fraction);
if (!this.passFractionTreshold.test(fraction)) {
fraction_flag = true;
}
boolean bad_lt_flag = false;
if (sampleAlleles.reads_sample1_supporting_sample1 <= this.fail_factor * sampleAlleles.reads_sample1_supporting_sample2) {
bad_samples.add(samplepair.sample1.getSampleName());
bad_lt_flag = true;
}
if (sampleAlleles.reads_sample2_supporting_sample2 <= this.fail_factor * sampleAlleles.reads_sample2_supporting_sample1) {
bad_samples.add(samplepair.sample2.getSampleName());
bad_lt_flag = true;
}
if (bad_lt_flag) {
num_lt++;
}
} else {
gb.attribute("S1S1", -1);
gb.attribute("S1S2", -1);
gb.attribute("S1SO", -1);
gb.attribute("S2S1", -1);
gb.attribute("S2S2", -1);
gb.attribute("S2SO", -1);
gb.attribute("S1A", '.');
gb.attribute("S2A", '.');
gb.attribute("FR", -1f);
}
genotypeList.add(gb.make());
}
if (!bad_samples.isEmpty()) {
vcb.attribute("BADSAMPLES", new ArrayList<>(bad_samples));
}
vcb.attribute("LE", num_lt);
if (fraction_flag || !bad_samples.isEmpty()) {
if (fraction_flag)
vcb.filter("XCONTAMINATION");
if (!bad_samples.isEmpty())
vcb.filter("BADSAMPLES");
} else {
vcb.passFilters();
}
vcb.genotypes(genotypeList);
vcfw.add(vcb.make());
contaminationTable.clear();
} else {
final long now = System.currentTimeMillis();
if (this.outputFile != null && this.save_every_sec > -1L && last_save_ms + (this.save_every_sec * 1000L) > now) {
saveToFile(contaminationTable);
last_save_ms = now;
}
}
}
progress.finish();
if (this.output_as_vcf) {
vcfw.close();
vcfw = null;
} else {
saveToFile(contaminationTable);
}
return 0;
} catch (final Exception e) {
LOG.error(e);
return -1;
} finally {
CloserUtil.close(vcfw);
CloserUtil.close(in);
CloserUtil.close(iter);
for (SamReader samReader : sample2samReader.values()) CloserUtil.close(samReader);
sample2samReader.clear();
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class Biostar78285 method doWork.
@Override
public int doWork(final List<String> args) {
if (this.gc_percent_window < 1) {
LOG.error("Bad GC% window size:" + this.gc_percent_window);
return -1;
}
final List<File> bamFiles = IOUtil.unrollFiles(args.stream().map(F -> new File(F)).collect(Collectors.toCollection(HashSet::new)), ".bam");
SAMSequenceDictionary dict = null;
final List<SamReader> samReaders = new ArrayList<>();
final List<CloseableIterator<SAMRecord>> samIterators = new ArrayList<>();
final TreeSet<String> samples = new TreeSet<>();
final String DEFAULT_PARTITION = "UNDEFINED_PARTITION";
IndexedFastaSequenceFile indexedFastaSequenceFile = null;
VariantContextWriter out = null;
try {
final SamReaderFactory samReaderFactory = SamReaderFactory.makeDefault().validationStringency(ValidationStringency.LENIENT);
for (final File bamFile : bamFiles) {
LOG.info("Opening " + bamFile);
final SamReader samReader = samReaderFactory.open(bamFile);
samReaders.add(samReader);
final SAMFileHeader header = samReader.getFileHeader();
if (header == null) {
LOG.error("No header in " + bamFile);
return -1;
}
if (header.getSortOrder() != SortOrder.coordinate) {
LOG.error("Sam file " + bamFile + " is not sorted on coordinate :" + header.getSortOrder());
return -1;
}
samples.addAll(header.getReadGroups().stream().map(RG -> this.partition.apply(RG, DEFAULT_PARTITION)).collect(Collectors.toSet()));
final SAMSequenceDictionary currDict = header.getSequenceDictionary();
if (currDict == null) {
LOG.error("SamFile doesn't contain a SAMSequenceDictionary : " + bamFile);
return -1;
}
if (dict == null) {
dict = currDict;
} else if (!SequenceUtil.areSequenceDictionariesEqual(dict, currDict)) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict, currDict));
return -1;
}
}
if (samReaders.isEmpty()) {
LOG.error("no bam");
return -1;
}
if (dict == null) {
LOG.error("no dictionary");
return -1;
}
final QueryInterval[] intervals;
if (this.captureBed != null) {
LOG.info("Opening " + this.captureBed);
ContigNameConverter.setDefaultAliases(dict);
final List<QueryInterval> L = new ArrayList<>();
final BedLineCodec codec = new BedLineCodec();
final LineIterator li = IOUtils.openFileForLineIterator(this.captureBed);
while (li.hasNext()) {
final BedLine bed = codec.decode(li.next());
if (bed == null)
continue;
final QueryInterval q = bed.toQueryInterval(dict);
L.add(q);
}
CloserUtil.close(li);
intervals = QueryInterval.optimizeIntervals(L.toArray(new QueryInterval[L.size()]));
} else {
intervals = null;
}
for (final SamReader samReader : samReaders) {
LOG.info("querying " + samReader.getResourceDescription());
final CloseableIterator<SAMRecord> iter;
if (intervals == null) {
iter = samReader.iterator();
} else {
iter = samReader.queryOverlapping(intervals);
}
samIterators.add(new FilterIterator<SAMRecord>(iter, R -> !R.getReadUnmappedFlag() && !filter.filterOut(R)));
}
if (this.refFile != null) {
LOG.info("opening " + refFile);
indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.refFile);
final SAMSequenceDictionary refdict = indexedFastaSequenceFile.getSequenceDictionary();
ContigNameConverter.setDefaultAliases(refdict);
if (refdict == null) {
throw new JvarkitException.FastaDictionaryMissing(this.refFile);
}
if (!SequenceUtil.areSequenceDictionariesEqual(dict, refdict)) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict, refdict));
return -1;
}
}
out = openVariantContextWriter(this.outputFile);
final Set<VCFHeaderLine> metaData = new HashSet<>();
VCFStandardHeaderLines.addStandardFormatLines(metaData, true, VCFConstants.DEPTH_KEY, VCFConstants.GENOTYPE_KEY);
VCFStandardHeaderLines.addStandardInfoLines(metaData, true, VCFConstants.DEPTH_KEY);
metaData.add(new VCFFormatHeaderLine("DF", 1, VCFHeaderLineType.Integer, "Number of Reads on plus strand"));
metaData.add(new VCFFormatHeaderLine("DR", 1, VCFHeaderLineType.Integer, "Number of Reads on minus strand"));
metaData.add(new VCFInfoHeaderLine("AVG_DP", 1, VCFHeaderLineType.Float, "Mean depth"));
metaData.add(new VCFInfoHeaderLine("MEDIAN_DP", 1, VCFHeaderLineType.Float, "Median depth"));
metaData.add(new VCFInfoHeaderLine("MIN_DP", 1, VCFHeaderLineType.Integer, "Min depth"));
metaData.add(new VCFInfoHeaderLine("MAX_DP", 1, VCFHeaderLineType.Integer, "Max depth"));
metaData.add(new VCFHeaderLine(Biostar78285.class.getSimpleName() + ".SamFilter", this.filter.toString()));
for (final Integer treshold : this.minDepthTresholds) {
metaData.add(new VCFFilterHeaderLine("DP_LT_" + treshold, "All genotypes have DP< " + treshold));
metaData.add(new VCFInfoHeaderLine("NUM_DP_LT_" + treshold, 1, VCFHeaderLineType.Integer, "Number of genotypes having DP< " + treshold));
metaData.add(new VCFInfoHeaderLine("FRACT_DP_LT_" + treshold, 1, VCFHeaderLineType.Float, "Fraction of genotypes having DP< " + treshold));
}
if (indexedFastaSequenceFile != null) {
metaData.add(new VCFInfoHeaderLine("GC_PERCENT", 1, VCFHeaderLineType.Integer, "GC% window_size:" + this.gc_percent_window));
}
final List<Allele> refAlleles = Collections.singletonList(Allele.create("N", true));
final List<Allele> NO_CALLS = Arrays.asList(Allele.NO_CALL, Allele.NO_CALL);
final VCFHeader vcfHeader = new VCFHeader(metaData, samples);
vcfHeader.setSequenceDictionary(dict);
out.writeHeader(vcfHeader);
final SAMRecordCoordinateComparator samRecordCoordinateComparator = new SAMRecordCoordinateComparator();
final PeekableIterator<SAMRecord> peekIter = new PeekableIterator<>(new MergingIterator<>((R1, R2) -> samRecordCoordinateComparator.fileOrderCompare(R1, R2), samIterators));
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(dict);
for (final SAMSequenceRecord ssr : dict.getSequences()) {
final IntervalTree<Boolean> capturePos;
if (intervals != null) {
if (!Arrays.stream(intervals).anyMatch(I -> I.referenceIndex == ssr.getSequenceIndex())) {
continue;
}
capturePos = new IntervalTree<>();
Arrays.stream(intervals).filter(I -> I.referenceIndex == ssr.getSequenceIndex()).forEach(I -> capturePos.put(I.start, I.end, true));
;
} else {
capturePos = null;
}
final GenomicSequence genomicSequence;
if (indexedFastaSequenceFile != null && indexedFastaSequenceFile.getSequenceDictionary().getSequence(ssr.getSequenceName()) != null) {
genomicSequence = new GenomicSequence(indexedFastaSequenceFile, ssr.getSequenceName());
} else {
genomicSequence = null;
}
final List<SAMRecord> buffer = new ArrayList<>();
for (int ssr_pos = 1; ssr_pos <= ssr.getSequenceLength(); ++ssr_pos) {
if (capturePos != null && !capturePos.overlappers(ssr_pos, ssr_pos).hasNext())
continue;
progress.watch(ssr.getSequenceName(), ssr_pos);
while (peekIter.hasNext()) {
final SAMRecord rec = peekIter.peek();
if (rec.getReadUnmappedFlag()) {
// consumme
peekIter.next();
continue;
}
if (this.filter.filterOut(rec)) {
// consumme
peekIter.next();
continue;
}
if (rec.getReferenceIndex() < ssr.getSequenceIndex()) {
throw new IllegalStateException("should not happen");
}
if (rec.getReferenceIndex() > ssr.getSequenceIndex()) {
break;
}
if (rec.getAlignmentEnd() < ssr_pos) {
throw new IllegalStateException("should not happen");
}
if (rec.getAlignmentStart() > ssr_pos) {
break;
}
buffer.add(peekIter.next());
}
int x = 0;
while (x < buffer.size()) {
final SAMRecord R = buffer.get(x);
if (R.getReferenceIndex() != ssr.getSequenceIndex() || R.getAlignmentEnd() < ssr_pos) {
buffer.remove(x);
} else {
x++;
}
}
final Map<String, PosInfo> count = samples.stream().map(S -> new PosInfo(S)).collect(Collectors.toMap(P -> P.sample, Function.identity()));
for (final SAMRecord rec : buffer) {
if (rec.getReferenceIndex() != ssr.getSequenceIndex())
throw new IllegalStateException("should not happen");
if (rec.getAlignmentEnd() < ssr_pos)
continue;
if (rec.getAlignmentStart() > ssr_pos)
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
int refpos = rec.getAlignmentStart();
final String sample = this.partition.getPartion(rec, DEFAULT_PARTITION);
for (final CigarElement ce : cigar.getCigarElements()) {
if (refpos > ssr_pos)
break;
final CigarOperator op = ce.getOperator();
if (op.consumesReferenceBases()) {
if (op.consumesReadBases()) {
if (refpos <= ssr_pos && ssr_pos <= refpos + ce.getLength()) {
final PosInfo posInfo = count.get(sample);
if (posInfo != null) {
posInfo.dp++;
if (rec.getReadNegativeStrandFlag()) {
posInfo.negative_strand++;
}
}
break;
}
}
refpos += ce.getLength();
}
}
}
final VariantContextBuilder vcb = new VariantContextBuilder();
final Set<String> filters = new HashSet<>();
vcb.chr(ssr.getSequenceName());
vcb.start(ssr_pos);
vcb.stop(ssr_pos);
if (genomicSequence == null) {
vcb.alleles(refAlleles);
} else {
vcb.alleles(Collections.singletonList(Allele.create((byte) genomicSequence.charAt(ssr_pos - 1), true)));
final GenomicSequence.GCPercent gcp = genomicSequence.getGCPercent(Math.max((ssr_pos - 1) - this.gc_percent_window, 0), Math.min(ssr_pos + this.gc_percent_window, ssr.getSequenceLength()));
if (!gcp.isEmpty()) {
vcb.attribute("GC_PERCENT", gcp.getGCPercentAsInteger());
}
}
vcb.attribute(VCFConstants.DEPTH_KEY, (int) count.values().stream().mapToInt(S -> S.dp).sum());
vcb.genotypes(count.values().stream().map(C -> new GenotypeBuilder(C.sample, NO_CALLS).DP((int) C.dp).attribute("DR", C.negative_strand).attribute("DF", C.dp - C.negative_strand).make()).collect(Collectors.toList()));
for (final Integer treshold : this.minDepthTresholds) {
final int count_lt = (int) count.values().stream().filter(S -> S.dp < treshold).count();
if (count_lt == samples.size()) {
filters.add("DP_LT_" + treshold);
}
vcb.attribute("NUM_DP_LT_" + treshold, count_lt);
if (!samples.isEmpty()) {
vcb.attribute("FRACT_DP_LT_" + treshold, count_lt / (float) samples.size());
}
}
if (!samples.isEmpty()) {
final int[] array = count.values().stream().mapToInt(S -> S.dp).toArray();
vcb.attribute("AVG_DP", Percentile.average().evaluate(array));
vcb.attribute("MEDIAN_DP", Percentile.median().evaluate(array));
vcb.attribute("MIN_DP", (int) Percentile.min().evaluate(array));
vcb.attribute("MAX_DP", (int) Percentile.max().evaluate(array));
}
if (filters.isEmpty()) {
vcb.passFilters();
} else {
vcb.filters(filters);
}
out.add(vcb.make());
}
}
progress.finish();
peekIter.close();
out.close();
out = null;
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(out);
CloserUtil.close(samIterators);
CloserUtil.close(samReaders);
CloserUtil.close(indexedFastaSequenceFile);
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class BimToVcf method doWork.
@Override
public int doWork(List<String> args) {
VariantContextWriter w = null;
BufferedReader r = null;
IndexedFastaSequenceFile faidx = null;
GenomicSequence genomic = null;
try {
if (this.REF == null) {
LOG.error("Reference -R missing.");
return -1;
}
faidx = new IndexedFastaSequenceFile(this.REF);
final SAMSequenceDictionary dict = faidx.getSequenceDictionary();
if (dict == null) {
LOG.error("No dictionary in " + this.REF);
return -1;
}
r = super.openBufferedReader(oneFileOrNull(args));
final Set<VCFHeaderLine> headerLines = new HashSet<>();
final VCFInfoHeaderLine morgan = new VCFInfoHeaderLine("MORGAN", 1, VCFHeaderLineType.Float, "Centimorgan");
final VCFInfoHeaderLine svtype = new VCFInfoHeaderLine("SVTYPE", 1, VCFHeaderLineType.String, "Variation type");
VCFStandardHeaderLines.addStandardInfoLines(headerLines, false, "");
// super.addMetaData(headerLines);
headerLines.add(morgan);
headerLines.add(svtype);
final List<String> genotypeSampleNames = Collections.emptyList();
final VCFHeader header = new VCFHeader(headerLines, genotypeSampleNames);
header.setSequenceDictionary(dict);
w = super.openVariantContextWriter(this.outputFile);
w.writeHeader(header);
final Pattern tab = Pattern.compile("[\t]");
String line;
final Pattern iupacATGC = Pattern.compile("[atgcATGC]");
while ((line = r.readLine()) != null) {
String[] tokens = tab.split(line);
if (tokens.length != 6) {
LOG.error("expected 6 column in " + line);
return -1;
}
Allele a1 = null;
Allele a2 = null;
Allele ref = null;
String contig = tokens[0];
SAMSequenceRecord ssr = null;
ssr = dict.getSequence(contig);
// ugly below !!
if (ssr == null && contig.equals("23")) {
ssr = dict.getSequence("X");
}
if (ssr == null && contig.equals("23")) {
ssr = dict.getSequence("chrX");
}
if (ssr == null && contig.equals("24")) {
ssr = dict.getSequence("Y");
}
if (ssr == null && contig.equals("24")) {
ssr = dict.getSequence("chrY");
}
if (ssr == null && contig.equals("26")) {
ssr = dict.getSequence("chrM");
}
if (ssr == null && contig.equals("26")) {
ssr = dict.getSequence("MT");
}
if (ssr == null && contig.equals("25")) {
LOG.warn("ignoring " + line);
continue;
}
if (ssr == null) {
LOG.error("unknown chrom in " + line);
return -1;
}
if (genomic == null || !ssr.getSequenceName().equals(genomic.getChrom())) {
genomic = new GenomicSequence(faidx, ssr.getSequenceName());
}
int pos1 = Integer.parseInt(tokens[3]);
if (tokens[4].equals("0"))
tokens[4] = tokens[5];
if (tokens[5].equals("0"))
tokens[5] = tokens[4];
final VariantContextBuilder vcb = new VariantContextBuilder();
vcb.chr(ssr.getSequenceName());
vcb.attribute(morgan.getID(), Float.parseFloat(tokens[2]));
if (iupacATGC.matcher(tokens[4]).matches() && iupacATGC.matcher(tokens[5]).matches()) {
String refBase = String.valueOf(genomic.charAt(pos1 - 1));
ref = Allele.create(refBase, true);
a1 = refBase.equalsIgnoreCase(tokens[4]) ? ref : Allele.create(tokens[4], false);
a2 = refBase.equalsIgnoreCase(tokens[5]) ? ref : Allele.create(tokens[5], false);
vcb.attribute(svtype.getID(), a1.isReference() && a2.isReference() ? "NOVARIATION" : "SNV");
} else if ((tokens[4].equals("-") && iupacATGC.matcher(tokens[5]).matches()) || (tokens[5].equals("-") && iupacATGC.matcher(tokens[4]).matches())) {
// shift left
pos1--;
String refBase = String.valueOf(genomic.charAt(pos1 - 1));
a1 = Allele.create(refBase, false);
ref = Allele.create(refBase + tokens[tokens[4].equals("-") ? 5 : 4], true);
a2 = a1;
vcb.attribute(svtype.getID(), "DEL");
} else if (tokens[4].equals("-") && tokens[5].equals("-")) {
// shift left
pos1--;
String refBase = String.valueOf(genomic.charAt(pos1 - 1));
a1 = Allele.create(refBase, false);
ref = Allele.create(refBase + genomic.charAt(pos1), true);
a2 = a1;
vcb.attribute(svtype.getID(), "DEL");
} else {
LOG.error("not handled: " + line);
return -1;
}
final Set<Allele> alleles = new HashSet<>();
alleles.add(ref);
alleles.add(a1);
alleles.add(a2);
vcb.start(pos1);
vcb.stop(pos1 + ref.length() - 1);
if (!tokens[1].isEmpty())
vcb.id(tokens[1]);
vcb.alleles(alleles);
w.add(vcb.make());
}
r.close();
r = null;
w.close();
w = null;
return RETURN_OK;
} catch (final Exception e) {
LOG.error(e);
return -1;
} finally {
CloserUtil.close(faidx);
CloserUtil.close(w);
CloserUtil.close(r);
}
}
use of htsjdk.variant.vcf.VCFInfoHeaderLine in project jvarkit by lindenb.
the class VcfTools method initSnpEffParser.
public void initSnpEffParser(final String definition) {
failIf(definition == null || definition.trim().isEmpty(), "SnpEff definition is empty");
final VCFHeader header = new VCFHeader();
final VCFInfoHeaderLine info = new VCFInfoHeaderLine(VepPredictionParser.getDefaultTag(), VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, definition);
header.addMetaDataLine(info);
this.vepPredictionParser = new VepPredictionParserFactory(header).get();
}
Aggregations