use of htsjdk.samtools.util.IntervalTreeMap in project jvarkit by lindenb.
the class LumpySort method doWork.
@Override
public int doWork(final List<String> args) {
VariantContextWriter vcw = null;
LineIterator vcfIn = null;
Environment environment = null;
Database variantsDb1 = null;
final List<File> inputs = IOUtil.unrollFiles(args.stream().map(S -> new File(S)).collect(Collectors.toList()), ".vcf", ".vcf.gz");
if (inputs.isEmpty()) {
LOG.error("empty vcf list");
return -1;
}
try {
IOUtil.assertDirectoryIsWritable(this.bdbHomeDir);
final Set<VCFHeaderLine> metaData = new HashSet<>();
final Set<String> sampleNames = new TreeSet<>();
final IntervalTreeMap<Boolean> intervalTreeMapBed;
if (this.bedFile != null) {
intervalTreeMapBed = new IntervalTreeMap<>();
final BedLineCodec bedLineCodec = new BedLineCodec();
final BufferedReader br = IOUtils.openFileForBufferedReading(this.bedFile);
br.lines().map(L -> bedLineCodec.decode(L)).filter(L -> L != null).forEach(B -> intervalTreeMapBed.put(B.toInterval(), true));
br.close();
} else {
intervalTreeMapBed = null;
}
for (int idx = 0; idx < inputs.size(); ++idx) {
final File vcfFile = inputs.get(idx);
LOG.info("Read header " + (idx + 1) + "/" + inputs.size());
final VCFFileReader r = new VCFFileReader(vcfFile, false);
final VCFHeader header = r.getFileHeader();
if (!LumpyConstants.isLumpyHeader(header)) {
LOG.error("doesn't look like a Lumpy-SV vcf header " + vcfFile);
r.close();
return -1;
}
if (!header.hasGenotypingData()) {
LOG.error("No sample in " + vcfFile);
r.close();
return -1;
}
for (final String sampleName : header.getSampleNamesInOrder()) {
if (sampleNames.contains(sampleName)) {
LOG.error("Sample found twice " + sampleName + " in " + vcfFile);
r.close();
return -1;
}
sampleNames.add(sampleName);
}
metaData.addAll(header.getMetaDataInInputOrder().stream().filter(H -> !H.getKey().equals("fileDate")).collect(Collectors.toSet()));
r.close();
}
final VCFInfoHeaderLine nSampleInfoHeaderLine = new VCFInfoHeaderLine("NSAMPLES", 1, VCFHeaderLineType.Integer, "Number of affected samples.");
metaData.add(nSampleInfoHeaderLine);
final VCFFormatHeaderLine chromStartFormatHeaderLine = new VCFFormatHeaderLine("CB", 1, VCFHeaderLineType.Integer, "Original Variant POS");
metaData.add(chromStartFormatHeaderLine);
final VCFFormatHeaderLine chromEndFormatHeaderLine = new VCFFormatHeaderLine("CE", 1, VCFHeaderLineType.Integer, "Original Variant END");
metaData.add(chromEndFormatHeaderLine);
final VCFHeader outHeader = new VCFHeader(metaData, sampleNames);
final VCFHeaderVersion[] versions = VCFHeaderVersion.values();
this.vcfEncoder = new VCFEncoder(outHeader, false, true);
this.vcfCodec.setVCFHeader(outHeader, versions[versions.length - 1]);
/* open BDB env */
final Transaction txn = null;
environment = new Environment(this.bdbHomeDir, new EnvironmentConfig().setAllowCreate(true).setReadOnly(false));
variantsDb1 = environment.openDatabase(txn, "variants1", new DatabaseConfig().setBtreeComparator(KeySorterComparator.class).setAllowCreate(true).setReadOnly(false).setTemporary(true));
long total_variants = 0L;
final LumpyVarBinding lumpVarBinding = new LumpyVarBinding();
final KeySorterBinding keySorterBinding = new KeySorterBinding();
for (int idx = 0; idx < inputs.size(); ++idx) {
final long millisecstart = System.currentTimeMillis();
final File vcfFile = inputs.get(idx);
int nVariant = 0;
final VCFFileReader r = new VCFFileReader(vcfFile, false);
final List<Genotype> missing = new ArrayList<>(sampleNames.size());
for (final String sn : sampleNames) {
if (r.getFileHeader().getSampleNamesInOrder().contains(sn))
continue;
missing.add(GenotypeBuilder.createMissing(sn, 2));
}
final CloseableIterator<VariantContext> iter = r.iterator();
while (iter.hasNext()) {
VariantContext ctx = iter.next();
if (!this.keep_secondary) {
if (ctx.hasAttribute("SECONDARY"))
continue;
}
if (!this.variantFilter.test(ctx))
continue;
if (intervalTreeMapBed != null && !intervalTreeMapBed.containsOverlapping(ctx))
continue;
final List<Genotype> gtList = new ArrayList<>(ctx.getGenotypes());
for (int gi = 0; gi < gtList.size(); gi++) {
Genotype g = gtList.get(gi);
final GenotypeBuilder gb;
if (this.do_genotype && isAvailableGenotype(g)) {
gb = new GenotypeBuilder(g.getSampleName(), ctx.getAlternateAlleles());
gb.attributes(g.getExtendedAttributes());
} else {
gb = new GenotypeBuilder(g);
}
gb.attribute(chromStartFormatHeaderLine.getID(), ctx.getStart());
gb.attribute(chromEndFormatHeaderLine.getID(), ctx.getEnd());
gtList.set(gi, gb.make());
}
gtList.addAll(missing);
ctx = new VariantContextBuilder(ctx).genotypes(gtList).rmAttribute("PRPOS").make();
final LumpyVar lvar = new LumpyVar(ctx, total_variants);
final DatabaseEntry key = new DatabaseEntry();
final DatabaseEntry data = new DatabaseEntry();
lumpVarBinding.objectToEntry(lvar, data);
keySorterBinding.objectToEntry(lvar.getSortKey(), key);
if (variantsDb1.put(txn, key, data) != OperationStatus.SUCCESS) {
r.close();
LOG.error("insertion failed");
return -1;
}
nVariant++;
total_variants++;
}
iter.close();
r.close();
LOG.info("Read " + (idx + 1) + "/" + inputs.size() + " variants of " + vcfFile + " N=" + nVariant + " Total:" + total_variants + " That took: " + Duration.ofMillis(System.currentTimeMillis() - millisecstart));
System.gc();
}
if (intervalTreeMapBed != null)
intervalTreeMapBed.clear();
System.gc();
LOG.info("Writing output");
final List<Allele> ALLELES_NO_CALLS = this.do_genotype ? Collections.singletonList(Allele.NO_CALL) : Arrays.asList(Allele.NO_CALL, Allele.NO_CALL);
final Cursor cursor = variantsDb1.openCursor(txn, null);
vcw = super.openVariantContextWriter(this.outputFile);
vcw.writeHeader(outHeader);
for (; ; ) {
final DatabaseEntry key = new DatabaseEntry();
final DatabaseEntry data = new DatabaseEntry();
OperationStatus status = cursor.getNext(key, data, LockMode.DEFAULT);
if (!status.equals(OperationStatus.SUCCESS))
break;
final LumpyVar first = lumpVarBinding.entryToObject(data);
if (this.do_not_merge_ctx) {
vcw.add(first.ctx);
continue;
}
final KeySorter keySorter1 = keySorterBinding.entryToObject(key);
final List<LumpyVar> buffer = new ArrayList<>();
buffer.add(first);
final DatabaseEntry key2 = new DatabaseEntry();
final DatabaseEntry data2 = new DatabaseEntry();
final Cursor cursor2 = cursor.dup(true);
for (; ; ) {
status = cursor2.getNext(key2, data2, LockMode.DEFAULT);
if (!status.equals(OperationStatus.SUCCESS))
break;
final KeySorter keySorter2 = keySorterBinding.entryToObject(key2);
if (keySorter1.compare1(keySorter2) != 0) {
break;
}
final LumpyVar lv = lumpVarBinding.entryToObject(data2);
if (lv.ctx.getStart() > first.ctx.getEnd()) {
break;
}
if (first.canMerge(lv)) {
buffer.add(lv);
cursor2.delete();
}
}
cursor2.close();
// delete 'first'
cursor.delete();
final int variantStartA = buffer.stream().mapToInt(V -> V.ctx.getStart()).min().getAsInt();
final int variantStartB = (int) buffer.stream().mapToInt(V -> V.ctx.getStart()).average().getAsDouble();
final int variantStartC = buffer.stream().mapToInt(V -> V.ctx.getStart()).max().getAsInt();
final int variantEndA = buffer.stream().mapToInt(V -> V.ctx.getEnd()).min().getAsInt();
final int variantEndB = (int) buffer.stream().mapToInt(V -> V.ctx.getEnd()).average().getAsDouble();
final int variantEndC = buffer.stream().mapToInt(V -> V.ctx.getEnd()).max().getAsInt();
final VariantContextBuilder vcb = new VariantContextBuilder("lumpymerge", first.ctx.getContig(), variantStartB, variantEndB, first.ctx.getAlleles());
vcb.attribute("END", variantEndB);
vcb.attribute("SVTYPE", first.ctx.getAttribute("SVTYPE"));
vcb.attribute("SVLEN", (int) Percentile.median().evaluate(buffer.stream().mapToInt(V -> V.ctx.getEnd() - V.ctx.getStart())));
vcb.attribute("CIPOS", Arrays.asList(variantStartB - variantStartA, variantStartC - variantStartB));
vcb.attribute("CIEND", Arrays.asList(variantEndB - variantEndA, variantEndC - variantEndB));
vcb.attribute("SU", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("SU", 0)).sum());
vcb.attribute("SR", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("SR", 0)).sum());
vcb.attribute("PE", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("PE", 0)).sum());
final Map<String, Genotype> sample2genotype = new HashMap<>(sampleNames.size());
buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).filter(G -> isAvailableGenotype(G)).forEach(G -> {
sample2genotype.put(G.getSampleName(), G);
});
vcb.attribute(nSampleInfoHeaderLine.getID(), sample2genotype.size());
for (final String sn : sampleNames) {
if (!sample2genotype.containsKey(sn)) {
sample2genotype.put(sn, new GenotypeBuilder(sn, ALLELES_NO_CALLS).attribute("SU", 0).attribute("SR", 0).attribute("PE", 0).make());
}
}
vcb.genotypes(sample2genotype.values());
vcw.add(vcb.make());
}
cursor.close();
vcw.close();
vcw = null;
variantsDb1.close();
variantsDb1 = null;
environment.close();
environment = null;
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(vcfIn);
CloserUtil.close(vcw);
CloserUtil.close(variantsDb1);
CloserUtil.close(environment);
}
}
use of htsjdk.samtools.util.IntervalTreeMap in project jvarkit by lindenb.
the class VCFAnnoBam method doVcfToVcf.
@Override
protected int doVcfToVcf(final String inputName, final VcfIterator r, final VariantContextWriter w) {
BufferedReader bedIn = null;
List<SamReader> samReaders = new ArrayList<SamReader>();
IntervalTreeMap<Rgn> capture = new IntervalTreeMap<Rgn>();
try {
SAMFileHeader firstHeader = null;
for (final File samFile : new HashSet<File>(BAMFILE)) {
LOG.info("open bam " + samFile);
final SamReader samReader = super.openSamReader(samFile.getPath());
final SAMFileHeader samHeader = samReader.getFileHeader();
samReaders.add(samReader);
if (firstHeader == null) {
firstHeader = samHeader;
} else if (!SequenceUtil.areSequenceDictionariesEqual(firstHeader.getSequenceDictionary(), samHeader.getSequenceDictionary())) {
throw new JvarkitException.DictionariesAreNotTheSame(firstHeader.getSequenceDictionary(), samHeader.getSequenceDictionary());
}
}
IntervalList intervalList = new IntervalList(firstHeader);
LOG.info("read bed " + BEDILE);
bedIn = IOUtils.openFileForBufferedReading(BEDILE);
String line;
final BedLineCodec bedCodec = new BedLineCodec();
while ((line = bedIn.readLine()) != null) {
if (line.isEmpty() || line.startsWith("#"))
continue;
final BedLine bed = bedCodec.decode(line);
if (bed == null)
continue;
if (firstHeader.getSequenceDictionary().getSequence(bed.getContig()) == null) {
LOG.error("error in BED +" + BEDILE + " : " + line + " chromosome is not in sequence dict of " + BAMFILE);
continue;
}
intervalList.add(bed.toInterval());
}
bedIn.close();
bedIn = null;
intervalList = intervalList.sorted();
for (final Interval interval : intervalList.uniqued()) {
final Rgn rgn = new Rgn();
rgn.interval = interval;
capture.put(rgn.interval, rgn);
}
intervalList = null;
VCFHeader header = r.getHeader();
VCFHeader h2 = new VCFHeader(header.getMetaDataInInputOrder(), header.getSampleNamesInOrder());
h2.addMetaDataLine(new VCFInfoHeaderLine(this.capture_tag, 1, VCFHeaderLineType.String, "Capture stats: Format is (start|end|mean|min|max|length|not_covered|percent_covered) BAM files: " + BAMFILE + " CAPTURE:" + BEDILE));
w.writeHeader(h2);
while (r.hasNext()) {
final VariantContext ctx = r.next();
Interval interval = new Interval(ctx.getContig(), ctx.getStart(), ctx.getEnd());
Collection<Rgn> rgns = capture.getOverlapping(interval);
Iterator<Rgn> it = rgns.iterator();
if (!it.hasNext()) {
w.add(ctx);
continue;
}
final Rgn rgn = it.next();
if (!rgn.processed) {
// LOG.info("processing "+rgn.interval);
process(rgn, samReaders);
}
final VariantContextBuilder b = new VariantContextBuilder(ctx);
b.attribute(this.capture_tag, rgn.toString());
w.add(b.make());
}
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
for (final SamReader samReader : samReaders) CloserUtil.close(samReader);
}
}
use of htsjdk.samtools.util.IntervalTreeMap in project jvarkit by lindenb.
the class SamScanSplitReads method saveAsVcf.
private void saveAsVcf(Set<String> sampleNames, SAMSequenceDictionary dict) throws IOException {
final Function<String, Integer> contig2tid = S -> {
int i = dict.getSequenceIndex(S);
if (i == -1)
throw new IllegalArgumentException("cannot find contig " + S + " in dictionary");
return i;
};
final Comparator<Interval> intervalComparator = new Comparator<Interval>() {
@Override
public int compare(final Interval r1, final Interval r2) {
final int cmp = contig2tid.apply(r1.getContig()) - contig2tid.apply(r2.getContig());
if (cmp != 0)
return cmp;
return r1.getStart() - r2.getStart();
}
};
final Allele REF = Allele.create("N", true);
final Set<VCFHeaderLine> meta = new HashSet<>();
VCFStandardHeaderLines.addStandardFormatLines(meta, false, VCFConstants.GENOTYPE_KEY, VCFConstants.DEPTH_KEY);
sampleNames.addAll(this.sample2database.keySet());
if (sampleNames.isEmpty())
sampleNames.add(this.defaultSampleName);
VCFHeader header = new VCFHeader(meta, sampleNames);
header.setSequenceDictionary(dict);
VariantContextWriter vcw = super.openVariantContextWriter(outputFile);
vcw.writeHeader(header);
final List<Arc> all_arcs = new ArrayList<>();
for (final String sample : this.sample2database.keySet()) {
final IntervalTreeMap<Set<Arc>> database = this.sample2database.get(sample);
for (final Interval interval : database.keySet()) {
for (final Arc arc : database.get(interval)) {
all_arcs.add(arc);
}
}
}
Collections.sort(all_arcs, (A1, A2) -> intervalComparator.compare(A1.intervalFrom, A2.intervalFrom));
for (final Arc row : all_arcs) {
final List<Genotype> genotypes = new ArrayList<>();
final Set<Allele> alleles = new HashSet<>();
for (final String sample : this.sample2database.keySet()) {
final IntervalTreeMap<Set<Arc>> database = this.sample2database.get(sample);
for (final Arc arc : database.get(row.intervalFrom)) {
if (!arc.equals(row))
continue;
final Allele alt = Allele.create(new StringBuilder().append("<").append(arc.intervalFrom.getContig()).append(":").append(arc.intervalFrom.getStart()).append("-").append(arc.intervalFrom.getEnd()).append("|").append(arc.intervalTo.getContig()).append(":").append(arc.intervalTo.getStart()).append("-").append(arc.intervalTo.getEnd()).append(">").toString(), false);
alleles.add(alt);
final Genotype g = new GenotypeBuilder(sample).alleles(Collections.singletonList(alt)).DP(arc.countSupportingReads).make();
genotypes.add(g);
}
}
alleles.add(REF);
VariantContextBuilder vcb = new VariantContextBuilder().chr(row.intervalFrom.getContig()).start(row.intervalFrom.getStart()).stop(row.intervalFrom.getStart()).alleles(alleles).genotypes(genotypes);
vcw.add(vcb.make());
}
vcw.close();
}
use of htsjdk.samtools.util.IntervalTreeMap in project jvarkit by lindenb.
the class Biostar214299 method doWork.
@Override
public int doWork(final List<String> args) {
if (this.positionFile == null) {
LOG.error("position File is not defined.");
return -1;
}
final String UNAFFECTED_SAMPLE = "UNAFFECTED";
final String AMBIGOUS_SAMPLE = "AMBIGOUS";
final String UNMAPPED = "UNMAPPED";
SamReader sfr = null;
SAMFileWriter sfw = null;
final IntervalTreeMap<Position> positionsTreeMap = new IntervalTreeMap<>();
final Set<String> samples = new HashSet<>();
try {
sfr = openSamReader(oneFileOrNull(args));
final SAMFileHeader header = sfr.getFileHeader();
final SAMSequenceDictionary dict = header.getSequenceDictionary();
if (dict == null) {
LOG.error("Dictionary missing in input sam");
return -1;
}
try (BufferedReader br = IOUtils.openFileForBufferedReading(this.positionFile)) {
String line;
while ((line = br.readLine()) != null) {
if (line.trim().isEmpty() || line.startsWith("#"))
continue;
final String[] tokens = line.split("[\t]");
if (tokens.length < 4) {
LOG.error("Not enough columns in " + line);
return -1;
}
final String contig = tokens[0];
if (dict.getSequence(contig) == null) {
LOG.error("No such contig in input's sam dictionary: " + contig);
return -1;
}
final int refpos = Integer.parseInt(tokens[1]);
final Interval interval = new Interval(contig, refpos, refpos);
Position position = positionsTreeMap.get(interval);
if (position == null) {
position = new Position();
// position.contig = contig;
position.refpos = refpos;
positionsTreeMap.put(interval, position);
}
final String bases = tokens[2].toUpperCase();
if (bases.length() != 1 || !bases.matches("[ATGC]")) {
LOG.error("in " + line + " bases should be one letter an ATGC");
return -1;
}
if (position.base2sample.containsKey(bases.charAt(0))) {
LOG.error("in " + line + " bases already defined for this position");
return -1;
}
final String sampleName = tokens[3].trim();
if (sampleName.isEmpty()) {
LOG.error("sample name cannot be empty");
return -1;
}
samples.add(sampleName);
position.base2sample.put(bases.charAt(0), sampleName);
}
} catch (final IOException err) {
LOG.error(err);
return -1;
}
if (samples.contains(UNAFFECTED_SAMPLE)) {
LOG.error("Sample cannot be named " + UNAFFECTED_SAMPLE);
return -1;
}
if (samples.contains(AMBIGOUS_SAMPLE)) {
LOG.error("Sample cannot be named " + AMBIGOUS_SAMPLE);
return -1;
}
if (samples.contains(UNMAPPED)) {
LOG.error("Sample cannot be named " + UNMAPPED);
return -1;
}
samples.add(UNAFFECTED_SAMPLE);
samples.add(AMBIGOUS_SAMPLE);
samples.add(UNMAPPED);
final SAMFileHeader newHeader = new SAMFileHeader();
newHeader.setSortOrder(header.getSortOrder());
newHeader.setSequenceDictionary(dict);
newHeader.addComment("generated with " + getProgramName() + " " + getVersion() + " Pierre Lindenbaum : " + getProgramCommandLine());
/* create groups */
for (final String sample : samples) {
final SAMReadGroupRecord rg = new SAMReadGroupRecord(sample);
rg.setSample(sample);
rg.setLibrary(sample);
newHeader.addReadGroup(rg);
}
sfw = this.writingBamArgs.openSAMFileWriter(this.outputFile, newHeader, true);
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(header).logger(LOG);
final SAMRecordIterator iter = sfr.iterator();
while (iter.hasNext()) {
final SAMRecord rec = progress.watch(iter.next());
rec.setAttribute("RG", null);
if (rec.getReadUnmappedFlag()) {
rec.setAttribute("RG", UNMAPPED);
sfw.addAlignment(rec);
continue;
}
final Cigar cigar = rec.getCigar();
final Collection<Position> snps = positionsTreeMap.getContained(new Interval(rec.getContig(), rec.getUnclippedStart(), rec.getUnclippedEnd()));
if (snps == null || snps.isEmpty()) {
rec.setAttribute("RG", UNAFFECTED_SAMPLE);
sfw.addAlignment(rec);
continue;
}
final Map<Integer, Position> index2pos = snps.stream().collect(Collectors.toMap(P -> P.refpos, P -> P));
final Set<String> selectedSamples = new HashSet<>();
final byte[] bases = rec.getReadBases();
if (bases == null || bases.equals(SAMRecord.NULL_SEQUENCE)) {
LOG.error("Bases missing in read " + rec);
return -1;
}
int refPos1 = rec.getUnclippedStart();
int readPos0 = 0;
for (final CigarElement ce : cigar.getCigarElements()) {
final CigarOperator op = ce.getOperator();
final boolean consummeReadBaseOrSoftClip = op.consumesReadBases() || op.equals(CigarOperator.S);
if (op.consumesReferenceBases() && consummeReadBaseOrSoftClip) {
for (int i = 0; i < ce.getLength(); ++i) {
final int nowRefPos1 = (refPos1 + i);
final int nowReadPos0 = (readPos0 + i);
final Position position = index2pos.get(nowRefPos1);
if (position == null)
continue;
if (nowReadPos0 >= bases.length)
continue;
final char base = (char) Character.toUpperCase(bases[nowReadPos0]);
final String sample = position.base2sample.get(base);
if (sample == null)
continue;
selectedSamples.add(sample);
index2pos.remove(nowRefPos1);
if (index2pos.isEmpty())
break;
}
}
if (op.consumesReferenceBases())
refPos1 += ce.getLength();
if (consummeReadBaseOrSoftClip || op.equals(CigarOperator.H)) {
readPos0 += ce.getLength();
}
}
if (selectedSamples.isEmpty()) {
rec.setAttribute("RG", UNAFFECTED_SAMPLE);
} else if (selectedSamples.size() == 1) {
rec.setAttribute("RG", selectedSamples.iterator().next());
} else {
rec.setAttribute("RG", AMBIGOUS_SAMPLE);
}
sfw.addAlignment(rec);
}
progress.finish();
LOG.info("done");
return RETURN_OK;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(sfr);
CloserUtil.close(sfw);
}
}
Aggregations