use of com.github.lindenb.jvarkit.util.bio.bed.BedLine in project jvarkit by lindenb.
the class BedLiftOver method scan.
private void scan(BufferedReader r, PrintWriter out, PrintWriter failed) throws IOException {
String line;
final BedLineCodec bedCodec = new BedLineCodec();
while ((line = r.readLine()) != null) {
if (line.startsWith("#") || line.trim().isEmpty())
continue;
final BedLine bedLine = bedCodec.decode(line);
if (bedLine == null)
continue;
final Interval srcInterval = bedLine.toInterval();
Interval dest = this.liftOver.liftOver(srcInterval);
if (dest != null) {
out.print(dest.getContig());
out.print('\t');
out.print(dest.getStart() - 1);
out.print('\t');
out.print(dest.getEnd());
for (int i = 3; i < bedLine.getColumnCount(); ++i) {
out.print('\t');
out.print(bedLine.get(i));
}
out.println();
} else if (failed != null) {
failed.println(line);
}
}
}
use of com.github.lindenb.jvarkit.util.bio.bed.BedLine in project jvarkit by lindenb.
the class VCFAnnoBam method doVcfToVcf.
@Override
protected int doVcfToVcf(final String inputName, final VcfIterator r, final VariantContextWriter w) {
BufferedReader bedIn = null;
List<SamReader> samReaders = new ArrayList<SamReader>();
IntervalTreeMap<Rgn> capture = new IntervalTreeMap<Rgn>();
try {
SAMFileHeader firstHeader = null;
for (final File samFile : new HashSet<File>(BAMFILE)) {
LOG.info("open bam " + samFile);
final SamReader samReader = super.openSamReader(samFile.getPath());
final SAMFileHeader samHeader = samReader.getFileHeader();
samReaders.add(samReader);
if (firstHeader == null) {
firstHeader = samHeader;
} else if (!SequenceUtil.areSequenceDictionariesEqual(firstHeader.getSequenceDictionary(), samHeader.getSequenceDictionary())) {
throw new JvarkitException.DictionariesAreNotTheSame(firstHeader.getSequenceDictionary(), samHeader.getSequenceDictionary());
}
}
IntervalList intervalList = new IntervalList(firstHeader);
LOG.info("read bed " + BEDILE);
bedIn = IOUtils.openFileForBufferedReading(BEDILE);
String line;
final BedLineCodec bedCodec = new BedLineCodec();
while ((line = bedIn.readLine()) != null) {
if (line.isEmpty() || line.startsWith("#"))
continue;
final BedLine bed = bedCodec.decode(line);
if (bed == null)
continue;
if (firstHeader.getSequenceDictionary().getSequence(bed.getContig()) == null) {
LOG.error("error in BED +" + BEDILE + " : " + line + " chromosome is not in sequence dict of " + BAMFILE);
continue;
}
intervalList.add(bed.toInterval());
}
bedIn.close();
bedIn = null;
intervalList = intervalList.sorted();
for (final Interval interval : intervalList.uniqued()) {
final Rgn rgn = new Rgn();
rgn.interval = interval;
capture.put(rgn.interval, rgn);
}
intervalList = null;
VCFHeader header = r.getHeader();
VCFHeader h2 = new VCFHeader(header.getMetaDataInInputOrder(), header.getSampleNamesInOrder());
h2.addMetaDataLine(new VCFInfoHeaderLine(this.capture_tag, 1, VCFHeaderLineType.String, "Capture stats: Format is (start|end|mean|min|max|length|not_covered|percent_covered) BAM files: " + BAMFILE + " CAPTURE:" + BEDILE));
w.writeHeader(h2);
while (r.hasNext()) {
final VariantContext ctx = r.next();
Interval interval = new Interval(ctx.getContig(), ctx.getStart(), ctx.getEnd());
Collection<Rgn> rgns = capture.getOverlapping(interval);
Iterator<Rgn> it = rgns.iterator();
if (!it.hasNext()) {
w.add(ctx);
continue;
}
final Rgn rgn = it.next();
if (!rgn.processed) {
// LOG.info("processing "+rgn.interval);
process(rgn, samReaders);
}
final VariantContextBuilder b = new VariantContextBuilder(ctx);
b.attribute(this.capture_tag, rgn.toString());
w.add(b.make());
}
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
for (final SamReader samReader : samReaders) CloserUtil.close(samReader);
}
}
use of com.github.lindenb.jvarkit.util.bio.bed.BedLine in project jvarkit by lindenb.
the class VCFBedSetFilter method doVcfToVcf.
@Override
protected int doVcfToVcf(String inputName, VcfIterator r, VariantContextWriter w) {
try {
final VCFHeader h2 = new VCFHeader(r.getHeader());
addMetaData(h2);
final VCFFilterHeaderLine filter = new VCFFilterHeaderLine(this.filterName, "Filtered with " + getProgramName() + ", " + (this.inverse ? " NOT " : "") + "overlapping " + (this.tabixFile == null ? this.treeMapFile : this.tabixFile));
if (!this.discardFlag) {
h2.addMetaDataLine(filter);
}
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(h2);
w.writeHeader(h2);
while (r.hasNext()) {
final VariantContext ctx = progress.watch(r.next());
boolean set_filter = true;
if (this.intervalTreeMap != null) {
if (this.intervalTreeMap.containsOverlapping(new Interval(ctx.getContig(), ctx.getStart(), ctx.getEnd()))) {
set_filter = false;
}
} else {
final CloseableIterator<BedLine> iter = this.bedReader.iterator(ctx.getContig(), ctx.getStart() - 1, ctx.getEnd() + 1);
while (iter.hasNext()) {
final BedLine bed = iter.next();
if (!ctx.getContig().equals(bed.getContig()))
continue;
if (ctx.getStart() > bed.getEnd())
continue;
if (ctx.getEnd() < bed.getStart())
continue;
set_filter = false;
break;
}
CloserUtil.close(iter);
}
if (this.inverse)
set_filter = !set_filter;
if (!set_filter) {
w.add(ctx);
continue;
}
if (!this.discardFlag) {
final VariantContextBuilder vcb = new VariantContextBuilder(ctx);
vcb.filter(filter.getID());
w.add(vcb.make());
}
if (w.checkError())
break;
}
progress.finish();
return RETURN_OK;
} catch (Exception err) {
LOG.error(err);
return -1;
}
}
use of com.github.lindenb.jvarkit.util.bio.bed.BedLine in project jvarkit by lindenb.
the class Biostar178713 method readBed.
private final void readBed(Collection<BedLine> bed, final LineIterator in) {
final BedLineCodec codec = new BedLineCodec();
codec.readActualHeader(in);
while (in.hasNext()) {
final BedLine line = codec.decode(in);
if (line == null)
continue;
bed.add(line);
}
CloserUtil.close(in);
}
use of com.github.lindenb.jvarkit.util.bio.bed.BedLine in project jvarkit by lindenb.
the class Biostar78285 method doWork.
@Override
public int doWork(final List<String> args) {
if (this.gc_percent_window < 1) {
LOG.error("Bad GC% window size:" + this.gc_percent_window);
return -1;
}
final List<File> bamFiles = IOUtil.unrollFiles(args.stream().map(F -> new File(F)).collect(Collectors.toCollection(HashSet::new)), ".bam");
SAMSequenceDictionary dict = null;
final List<SamReader> samReaders = new ArrayList<>();
final List<CloseableIterator<SAMRecord>> samIterators = new ArrayList<>();
final TreeSet<String> samples = new TreeSet<>();
final String DEFAULT_PARTITION = "UNDEFINED_PARTITION";
IndexedFastaSequenceFile indexedFastaSequenceFile = null;
VariantContextWriter out = null;
try {
final SamReaderFactory samReaderFactory = SamReaderFactory.makeDefault().validationStringency(ValidationStringency.LENIENT);
for (final File bamFile : bamFiles) {
LOG.info("Opening " + bamFile);
final SamReader samReader = samReaderFactory.open(bamFile);
samReaders.add(samReader);
final SAMFileHeader header = samReader.getFileHeader();
if (header == null) {
LOG.error("No header in " + bamFile);
return -1;
}
if (header.getSortOrder() != SortOrder.coordinate) {
LOG.error("Sam file " + bamFile + " is not sorted on coordinate :" + header.getSortOrder());
return -1;
}
samples.addAll(header.getReadGroups().stream().map(RG -> this.partition.apply(RG, DEFAULT_PARTITION)).collect(Collectors.toSet()));
final SAMSequenceDictionary currDict = header.getSequenceDictionary();
if (currDict == null) {
LOG.error("SamFile doesn't contain a SAMSequenceDictionary : " + bamFile);
return -1;
}
if (dict == null) {
dict = currDict;
} else if (!SequenceUtil.areSequenceDictionariesEqual(dict, currDict)) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict, currDict));
return -1;
}
}
if (samReaders.isEmpty()) {
LOG.error("no bam");
return -1;
}
if (dict == null) {
LOG.error("no dictionary");
return -1;
}
final QueryInterval[] intervals;
if (this.captureBed != null) {
LOG.info("Opening " + this.captureBed);
ContigNameConverter.setDefaultAliases(dict);
final List<QueryInterval> L = new ArrayList<>();
final BedLineCodec codec = new BedLineCodec();
final LineIterator li = IOUtils.openFileForLineIterator(this.captureBed);
while (li.hasNext()) {
final BedLine bed = codec.decode(li.next());
if (bed == null)
continue;
final QueryInterval q = bed.toQueryInterval(dict);
L.add(q);
}
CloserUtil.close(li);
intervals = QueryInterval.optimizeIntervals(L.toArray(new QueryInterval[L.size()]));
} else {
intervals = null;
}
for (final SamReader samReader : samReaders) {
LOG.info("querying " + samReader.getResourceDescription());
final CloseableIterator<SAMRecord> iter;
if (intervals == null) {
iter = samReader.iterator();
} else {
iter = samReader.queryOverlapping(intervals);
}
samIterators.add(new FilterIterator<SAMRecord>(iter, R -> !R.getReadUnmappedFlag() && !filter.filterOut(R)));
}
if (this.refFile != null) {
LOG.info("opening " + refFile);
indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.refFile);
final SAMSequenceDictionary refdict = indexedFastaSequenceFile.getSequenceDictionary();
ContigNameConverter.setDefaultAliases(refdict);
if (refdict == null) {
throw new JvarkitException.FastaDictionaryMissing(this.refFile);
}
if (!SequenceUtil.areSequenceDictionariesEqual(dict, refdict)) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict, refdict));
return -1;
}
}
out = openVariantContextWriter(this.outputFile);
final Set<VCFHeaderLine> metaData = new HashSet<>();
VCFStandardHeaderLines.addStandardFormatLines(metaData, true, VCFConstants.DEPTH_KEY, VCFConstants.GENOTYPE_KEY);
VCFStandardHeaderLines.addStandardInfoLines(metaData, true, VCFConstants.DEPTH_KEY);
metaData.add(new VCFFormatHeaderLine("DF", 1, VCFHeaderLineType.Integer, "Number of Reads on plus strand"));
metaData.add(new VCFFormatHeaderLine("DR", 1, VCFHeaderLineType.Integer, "Number of Reads on minus strand"));
metaData.add(new VCFInfoHeaderLine("AVG_DP", 1, VCFHeaderLineType.Float, "Mean depth"));
metaData.add(new VCFInfoHeaderLine("MEDIAN_DP", 1, VCFHeaderLineType.Float, "Median depth"));
metaData.add(new VCFInfoHeaderLine("MIN_DP", 1, VCFHeaderLineType.Integer, "Min depth"));
metaData.add(new VCFInfoHeaderLine("MAX_DP", 1, VCFHeaderLineType.Integer, "Max depth"));
metaData.add(new VCFHeaderLine(Biostar78285.class.getSimpleName() + ".SamFilter", this.filter.toString()));
for (final Integer treshold : this.minDepthTresholds) {
metaData.add(new VCFFilterHeaderLine("DP_LT_" + treshold, "All genotypes have DP< " + treshold));
metaData.add(new VCFInfoHeaderLine("NUM_DP_LT_" + treshold, 1, VCFHeaderLineType.Integer, "Number of genotypes having DP< " + treshold));
metaData.add(new VCFInfoHeaderLine("FRACT_DP_LT_" + treshold, 1, VCFHeaderLineType.Float, "Fraction of genotypes having DP< " + treshold));
}
if (indexedFastaSequenceFile != null) {
metaData.add(new VCFInfoHeaderLine("GC_PERCENT", 1, VCFHeaderLineType.Integer, "GC% window_size:" + this.gc_percent_window));
}
final List<Allele> refAlleles = Collections.singletonList(Allele.create("N", true));
final List<Allele> NO_CALLS = Arrays.asList(Allele.NO_CALL, Allele.NO_CALL);
final VCFHeader vcfHeader = new VCFHeader(metaData, samples);
vcfHeader.setSequenceDictionary(dict);
out.writeHeader(vcfHeader);
final SAMRecordCoordinateComparator samRecordCoordinateComparator = new SAMRecordCoordinateComparator();
final PeekableIterator<SAMRecord> peekIter = new PeekableIterator<>(new MergingIterator<>((R1, R2) -> samRecordCoordinateComparator.fileOrderCompare(R1, R2), samIterators));
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(dict);
for (final SAMSequenceRecord ssr : dict.getSequences()) {
final IntervalTree<Boolean> capturePos;
if (intervals != null) {
if (!Arrays.stream(intervals).anyMatch(I -> I.referenceIndex == ssr.getSequenceIndex())) {
continue;
}
capturePos = new IntervalTree<>();
Arrays.stream(intervals).filter(I -> I.referenceIndex == ssr.getSequenceIndex()).forEach(I -> capturePos.put(I.start, I.end, true));
;
} else {
capturePos = null;
}
final GenomicSequence genomicSequence;
if (indexedFastaSequenceFile != null && indexedFastaSequenceFile.getSequenceDictionary().getSequence(ssr.getSequenceName()) != null) {
genomicSequence = new GenomicSequence(indexedFastaSequenceFile, ssr.getSequenceName());
} else {
genomicSequence = null;
}
final List<SAMRecord> buffer = new ArrayList<>();
for (int ssr_pos = 1; ssr_pos <= ssr.getSequenceLength(); ++ssr_pos) {
if (capturePos != null && !capturePos.overlappers(ssr_pos, ssr_pos).hasNext())
continue;
progress.watch(ssr.getSequenceName(), ssr_pos);
while (peekIter.hasNext()) {
final SAMRecord rec = peekIter.peek();
if (rec.getReadUnmappedFlag()) {
// consumme
peekIter.next();
continue;
}
if (this.filter.filterOut(rec)) {
// consumme
peekIter.next();
continue;
}
if (rec.getReferenceIndex() < ssr.getSequenceIndex()) {
throw new IllegalStateException("should not happen");
}
if (rec.getReferenceIndex() > ssr.getSequenceIndex()) {
break;
}
if (rec.getAlignmentEnd() < ssr_pos) {
throw new IllegalStateException("should not happen");
}
if (rec.getAlignmentStart() > ssr_pos) {
break;
}
buffer.add(peekIter.next());
}
int x = 0;
while (x < buffer.size()) {
final SAMRecord R = buffer.get(x);
if (R.getReferenceIndex() != ssr.getSequenceIndex() || R.getAlignmentEnd() < ssr_pos) {
buffer.remove(x);
} else {
x++;
}
}
final Map<String, PosInfo> count = samples.stream().map(S -> new PosInfo(S)).collect(Collectors.toMap(P -> P.sample, Function.identity()));
for (final SAMRecord rec : buffer) {
if (rec.getReferenceIndex() != ssr.getSequenceIndex())
throw new IllegalStateException("should not happen");
if (rec.getAlignmentEnd() < ssr_pos)
continue;
if (rec.getAlignmentStart() > ssr_pos)
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
int refpos = rec.getAlignmentStart();
final String sample = this.partition.getPartion(rec, DEFAULT_PARTITION);
for (final CigarElement ce : cigar.getCigarElements()) {
if (refpos > ssr_pos)
break;
final CigarOperator op = ce.getOperator();
if (op.consumesReferenceBases()) {
if (op.consumesReadBases()) {
if (refpos <= ssr_pos && ssr_pos <= refpos + ce.getLength()) {
final PosInfo posInfo = count.get(sample);
if (posInfo != null) {
posInfo.dp++;
if (rec.getReadNegativeStrandFlag()) {
posInfo.negative_strand++;
}
}
break;
}
}
refpos += ce.getLength();
}
}
}
final VariantContextBuilder vcb = new VariantContextBuilder();
final Set<String> filters = new HashSet<>();
vcb.chr(ssr.getSequenceName());
vcb.start(ssr_pos);
vcb.stop(ssr_pos);
if (genomicSequence == null) {
vcb.alleles(refAlleles);
} else {
vcb.alleles(Collections.singletonList(Allele.create((byte) genomicSequence.charAt(ssr_pos - 1), true)));
final GenomicSequence.GCPercent gcp = genomicSequence.getGCPercent(Math.max((ssr_pos - 1) - this.gc_percent_window, 0), Math.min(ssr_pos + this.gc_percent_window, ssr.getSequenceLength()));
if (!gcp.isEmpty()) {
vcb.attribute("GC_PERCENT", gcp.getGCPercentAsInteger());
}
}
vcb.attribute(VCFConstants.DEPTH_KEY, (int) count.values().stream().mapToInt(S -> S.dp).sum());
vcb.genotypes(count.values().stream().map(C -> new GenotypeBuilder(C.sample, NO_CALLS).DP((int) C.dp).attribute("DR", C.negative_strand).attribute("DF", C.dp - C.negative_strand).make()).collect(Collectors.toList()));
for (final Integer treshold : this.minDepthTresholds) {
final int count_lt = (int) count.values().stream().filter(S -> S.dp < treshold).count();
if (count_lt == samples.size()) {
filters.add("DP_LT_" + treshold);
}
vcb.attribute("NUM_DP_LT_" + treshold, count_lt);
if (!samples.isEmpty()) {
vcb.attribute("FRACT_DP_LT_" + treshold, count_lt / (float) samples.size());
}
}
if (!samples.isEmpty()) {
final int[] array = count.values().stream().mapToInt(S -> S.dp).toArray();
vcb.attribute("AVG_DP", Percentile.average().evaluate(array));
vcb.attribute("MEDIAN_DP", Percentile.median().evaluate(array));
vcb.attribute("MIN_DP", (int) Percentile.min().evaluate(array));
vcb.attribute("MAX_DP", (int) Percentile.max().evaluate(array));
}
if (filters.isEmpty()) {
vcb.passFilters();
} else {
vcb.filters(filters);
}
out.add(vcb.make());
}
}
progress.finish();
peekIter.close();
out.close();
out = null;
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(out);
CloserUtil.close(samIterators);
CloserUtil.close(samReaders);
CloserUtil.close(indexedFastaSequenceFile);
}
}
Aggregations