use of htsjdk.samtools.reference.IndexedFastaSequenceFile in project jvarkit by lindenb.
the class TView method initialize.
public int initialize() throws IOException {
if (this.referenceFile != null) {
this.indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.referenceFile);
}
if (this.samRecordFilter == null) {
this.samRecordFilter = SamFilterParser.ACCEPT_ALL;
}
final SamReaderFactory srf = SamReaderFactory.makeDefault().referenceSequence(this.referenceFile).validationStringency(ValidationStringency.LENIENT);
for (final SamInputResource sir : this.samInputResources) {
final SamReader samReader = srf.open(sir);
this.samReaders.add(samReader);
}
for (final File vcfFile : IOUtils.unrollFile(this.variantFiles)) {
final VcfSource vcfSource = new VcfSource();
LOG.debug("OPEN " + vcfFile);
vcfSource.vcfFile = vcfFile;
vcfSource.vcfFileReader = new VCFFileReader(vcfFile, true);
this.vcfReaders.add(vcfSource);
}
if (this.tabixKnownGene != null) {
this.tabixKnownGene = new TabixKnownGeneFileReader(this.knownGeneUri);
}
return 0;
}
use of htsjdk.samtools.reference.IndexedFastaSequenceFile in project jvarkit by lindenb.
the class VCFCombineTwoSnvs method doVcfToVcf.
@Override
protected int doVcfToVcf(final String inputName, File saveAs) {
BufferedReader bufferedReader = null;
htsjdk.variant.variantcontext.writer.VariantContextWriter w = null;
SortingCollection<CombinedMutation> mutations = null;
CloseableIterator<Variant> varIter = null;
CloseableIterator<CombinedMutation> mutIter = null;
Map<String, SamReader> sample2samReader = new HashMap<>();
try {
bufferedReader = inputName == null ? IOUtils.openStreamForBufferedReader(stdin()) : IOUtils.openURIForBufferedReading(inputName);
final VCFUtils.CodecAndHeader cah = VCFUtils.parseHeader(bufferedReader);
/* get VCF header */
final VCFHeader header = cah.header;
final Set<String> sampleNamesInOrder = new HashSet<>(header.getSampleNamesInOrder());
LOG.info("opening REF:" + referenceFile);
this.indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.referenceFile);
final SAMSequenceDictionary dict = this.indexedFastaSequenceFile.getSequenceDictionary();
if (dict == null)
throw new IOException("dictionary missing");
if (this.bamIn != null) {
/**
* unroll and open bam file
*/
for (final File bamFile : IOUtils.unrollFileCollection(Collections.singletonList(this.bamIn))) {
LOG.info("opening BAM :" + this.bamIn);
final SamReader samReader = SamReaderFactory.makeDefault().referenceSequence(this.referenceFile).validationStringency(ValidationStringency.LENIENT).open(this.bamIn);
if (!samReader.hasIndex()) {
throw new IOException("Sam file is NOT indexed: " + bamFile);
}
final SAMFileHeader samHeader = samReader.getFileHeader();
if (samHeader.getSequenceDictionary() == null || !SequenceUtil.areSequenceDictionariesEqual(dict, samReader.getFileHeader().getSequenceDictionary())) {
throw new IOException(bamFile + " and REF don't have the same Sequence Dictionary.");
}
/* get sample name */
String sampleName = null;
for (final SAMReadGroupRecord rg : samHeader.getReadGroups()) {
if (rg.getSample() == null)
continue;
if (sampleName != null && !sampleName.equals(rg.getSample())) {
samReader.close();
throw new IOException(bamFile + " Contains two samples " + sampleName + " " + rg.getSample());
}
sampleName = rg.getSample();
}
if (sampleName == null) {
samReader.close();
LOG.warn("no sample in " + bamFile);
continue;
}
if (!sampleNamesInOrder.contains(sampleName)) {
samReader.close();
LOG.warn("no sample " + sampleName + " in vcf");
continue;
}
sample2samReader.put(sampleName, samReader);
}
}
loadKnownGenesFromUri();
this.variants = SortingCollection.newInstance(Variant.class, new VariantCodec(), new VariantComparator(dict), this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
this.variants.setDestructiveIteration(true);
SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(header);
String vcfLine = null;
while ((vcfLine = bufferedReader.readLine()) != null) {
final VariantContext ctx = progress.watch(cah.codec.decode(vcfLine));
/* discard non SNV variant */
if (!ctx.isVariant() || ctx.isIndel()) {
continue;
}
/* find the overlapping genes : extend the interval of the variant to include the stop codon */
final Collection<KnownGene> genes = new ArrayList<>();
for (List<KnownGene> lkg : this.knownGenes.getOverlapping(new Interval(ctx.getContig(), Math.max(1, ctx.getStart() - 3), ctx.getEnd() + 3))) {
genes.addAll(lkg);
}
final List<Allele> alternateAlleles = ctx.getAlternateAlleles();
/* loop over overlapping genes */
for (final KnownGene kg : genes) {
/* loop over available alleles */
for (int allele_idx = 0; allele_idx < alternateAlleles.size(); ++allele_idx) {
final Allele alt = alternateAlleles.get(allele_idx);
challenge(ctx, alt, kg, vcfLine);
}
}
}
progress.finish();
this.variants.doneAdding();
mutations = SortingCollection.newInstance(CombinedMutation.class, new MutationCodec(), new MutationComparator(dict), this.writingSortingCollection.getMaxRecordsInRam(), this.writingSortingCollection.getTmpPaths());
mutations.setDestructiveIteration(true);
final VCFFilterHeaderLine vcfFilterHeaderLine = new VCFFilterHeaderLine("TwoHaplotypes", "(number of reads carrying both mutation) < (reads carrying variant 1 + reads carrying variant 2) ");
varIter = this.variants.iterator();
progress = new SAMSequenceDictionaryProgress(header);
final ArrayList<Variant> buffer = new ArrayList<>();
for (; ; ) {
Variant variant = null;
if (varIter.hasNext()) {
variant = varIter.next();
progress.watch(variant.contig, variant.genomicPosition1);
}
if (variant == null || !(!buffer.isEmpty() && buffer.get(0).contig.equals(variant.contig) && buffer.get(0).transcriptName.equals(variant.transcriptName))) {
if (!buffer.isEmpty()) {
for (int i = 0; i < buffer.size(); ++i) {
final Variant v1 = buffer.get(i);
for (int j = i + 1; j < buffer.size(); ++j) {
final Variant v2 = buffer.get(j);
if (v1.codonStart() != v2.codonStart())
continue;
if (v1.positionInCodon() == v2.positionInCodon())
continue;
if (!v1.wildCodon.equals(v2.wildCodon)) {
throw new IllegalStateException();
}
final StringBuilder combinedCodon = new StringBuilder(v1.wildCodon);
combinedCodon.setCharAt(v1.positionInCodon(), v1.mutCodon.charAt(v1.positionInCodon()));
combinedCodon.setCharAt(v2.positionInCodon(), v2.mutCodon.charAt(v2.positionInCodon()));
final String pwild = new ProteinCharSequence(v1.wildCodon).getString();
final String p1 = new ProteinCharSequence(v1.mutCodon).getString();
final String p2 = new ProteinCharSequence(v2.mutCodon).getString();
final String pCombined = new ProteinCharSequence(combinedCodon).getString();
final String combinedSO;
final String combinedType;
/* both AA are synonymous, while combined is not */
if (!pCombined.equals(pwild) && p1.equals(pwild) && p2.equals(pwild)) {
combinedType = "combined_is_nonsynonymous";
if (pCombined.equals("*")) {
/* http://www.sequenceontology.org/browser/current_svn/term/SO:0001587 */
combinedSO = "stop_gained";
} else if (pwild.equals("*")) {
/* http://www.sequenceontology.org/browser/current_svn/term/SO:0002012 */
combinedSO = "stop_lost";
} else {
/* http://www.sequenceontology.org/miso/current_svn/term/SO:0001992 */
combinedSO = "nonsynonymous_variant";
}
} else if (!pCombined.equals(p1) && !pCombined.equals(p2) && !pCombined.equals(pwild)) {
combinedType = "combined_is_new";
if (pCombined.equals("*")) {
/* http://www.sequenceontology.org/browser/current_svn/term/SO:0001587 */
combinedSO = "stop_gained";
} else {
/* http://www.sequenceontology.org/miso/current_svn/term/SO:0001992 */
combinedSO = "nonsynonymous_variant";
}
} else {
combinedType = null;
combinedSO = null;
}
/**
* ok, there is something interesting here ,
* create two new Mutations carrying the
* two variants
*/
if (combinedSO != null) {
/**
* grantham score is max found combined vs (p1/p2/wild)
*/
int grantham_score = GranthamScore.score(pCombined.charAt(0), pwild.charAt(0));
grantham_score = Math.max(grantham_score, GranthamScore.score(pCombined.charAt(0), p1.charAt(0)));
grantham_score = Math.max(grantham_score, GranthamScore.score(pCombined.charAt(0), p2.charAt(0)));
/**
* info that will be displayed in the vcf
*/
final Map<String, Object> info1 = v1.getInfo(v2);
final Map<String, Object> info2 = v2.getInfo(v1);
// filter for this combined: default it fails the filter
String filter = vcfFilterHeaderLine.getID();
final Map<String, Object> combinedMap = new LinkedHashMap<>();
combinedMap.put("CombinedCodon", combinedCodon);
combinedMap.put("CombinedAA", pCombined);
combinedMap.put("CombinedSO", combinedSO);
combinedMap.put("CombinedType", combinedType);
combinedMap.put("GranthamScore", grantham_score);
info1.putAll(combinedMap);
info2.putAll(combinedMap);
final Map<String, CoverageInfo> sample2coverageInfo = new HashMap<>(sample2samReader.size());
final int chromStart = Math.min(v1.genomicPosition1, v2.genomicPosition1);
final int chromEnd = Math.max(v1.genomicPosition1, v2.genomicPosition1);
/* get phasing info for each sample*/
for (final String sampleName : sample2samReader.keySet()) {
final SamReader samReader = sample2samReader.get(sampleName);
final CoverageInfo covInfo = new CoverageInfo();
sample2coverageInfo.put(sampleName, covInfo);
SAMRecordIterator iter = null;
try {
iter = samReader.query(v1.contig, chromStart, chromEnd, false);
while (iter.hasNext()) {
final SAMRecord rec = iter.next();
if (rec.getReadUnmappedFlag())
continue;
if (rec.isSecondaryOrSupplementary())
continue;
if (rec.getDuplicateReadFlag())
continue;
if (rec.getReadFailsVendorQualityCheckFlag())
continue;
// get DEPTh for variant 1
if (rec.getAlignmentStart() <= v1.genomicPosition1 && v1.genomicPosition1 <= rec.getAlignmentEnd()) {
covInfo.depth1++;
}
// get DEPTh for variant 2
if (rec.getAlignmentStart() <= v2.genomicPosition1 && v2.genomicPosition1 <= rec.getAlignmentEnd()) {
covInfo.depth2++;
}
if (rec.getAlignmentEnd() < chromEnd)
continue;
if (rec.getAlignmentStart() > chromStart)
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
final byte[] bases = rec.getReadBases();
if (bases == null)
continue;
int refpos1 = rec.getAlignmentStart();
int readpos = 0;
boolean found_variant1_on_this_read = false;
boolean found_variant2_on_this_read = false;
/**
* loop over cigar
*/
for (final CigarElement ce : cigar.getCigarElements()) {
final CigarOperator op = ce.getOperator();
switch(op) {
case P:
continue;
case S:
case I:
readpos += ce.getLength();
break;
case D:
case N:
refpos1 += ce.getLength();
break;
case H:
continue;
case EQ:
case M:
case X:
for (int x = 0; x < ce.getLength(); ++x) {
if (refpos1 == v1.genomicPosition1 && same(bases[readpos], v1.altAllele)) {
found_variant1_on_this_read = true;
} else if (refpos1 == v2.genomicPosition1 && same(bases[readpos], v2.altAllele)) {
found_variant2_on_this_read = true;
}
refpos1++;
readpos++;
}
break;
default:
throw new IllegalStateException(op.name());
}
/* skip remaining bases after last variant */
if (refpos1 > chromEnd)
break;
}
/* sum-up what we found */
if (found_variant1_on_this_read && found_variant2_on_this_read) {
covInfo.count_reads_having_both_variants++;
} else if (!found_variant1_on_this_read && !found_variant2_on_this_read) {
covInfo.count_reads_having_no_variants++;
} else if (found_variant1_on_this_read) {
covInfo.count_reads_having_variant1++;
} else if (found_variant2_on_this_read) {
covInfo.count_reads_having_variant2++;
}
}
/* end of loop over reads */
} finally {
iter.close();
iter = null;
}
info1.put("N_READS_BOTH_VARIANTS_" + sampleName, covInfo.count_reads_having_both_variants);
info2.put("N_READS_BOTH_VARIANTS_" + sampleName, covInfo.count_reads_having_both_variants);
info1.put("N_READS_NO_VARIANTS_" + sampleName, covInfo.count_reads_having_no_variants);
info2.put("N_READS_NO_VARIANTS_" + sampleName, covInfo.count_reads_having_no_variants);
info1.put("N_READS_TOTAL_" + sampleName, covInfo.count_reads_having_both_variants + covInfo.count_reads_having_no_variants + covInfo.count_reads_having_variant1 + covInfo.count_reads_having_variant2);
info2.put("N_READS_TOTAL_" + sampleName, covInfo.count_reads_having_both_variants + covInfo.count_reads_having_no_variants + covInfo.count_reads_having_variant1 + covInfo.count_reads_having_variant2);
// count for variant 1
info1.put("N_READS_ONLY_1_" + sampleName, covInfo.count_reads_having_variant1);
info1.put("N_READS_ONLY_2_" + sampleName, covInfo.count_reads_having_variant2);
info1.put("DEPTH_1_" + sampleName, covInfo.depth1);
// inverse previous count
info2.put("N_READS_ONLY_1_" + sampleName, covInfo.count_reads_having_variant2);
info2.put("N_READS_ONLY_2_" + sampleName, covInfo.count_reads_having_variant1);
info2.put("DEPTH_2_" + sampleName, covInfo.depth2);
/* number of reads with both variant is greater than
* reads carrying only one variant: reset the filter
*/
if (2 * covInfo.count_reads_having_both_variants > (covInfo.count_reads_having_variant1 + covInfo.count_reads_having_variant2)) {
/* reset filter */
filter = VCFConstants.UNFILTERED;
info1.put("FILTER_1_" + sampleName, ".");
info2.put("FILTER_2_" + sampleName, ".");
} else {
info1.put("FILTER_1_" + sampleName, vcfFilterHeaderLine.getID());
info2.put("FILTER_2_" + sampleName, vcfFilterHeaderLine.getID());
}
}
/* end of loop over bams */
final CombinedMutation m1 = new CombinedMutation();
m1.contig = v1.contig;
m1.genomicPosition1 = v1.genomicPosition1;
m1.id = v1.id;
m1.refAllele = v1.refAllele;
m1.altAllele = v1.altAllele;
m1.vcfLine = v1.vcfLine;
m1.info = mapToString(info1);
m1.filter = filter;
m1.grantham_score = grantham_score;
m1.sorting_id = ID_GENERATOR++;
mutations.add(m1);
final CombinedMutation m2 = new CombinedMutation();
m2.contig = v2.contig;
m2.genomicPosition1 = v2.genomicPosition1;
m2.id = v2.id;
m2.refAllele = v2.refAllele;
m2.altAllele = v2.altAllele;
m2.vcfLine = v2.vcfLine;
m2.info = mapToString(info2);
m2.filter = filter;
m2.grantham_score = grantham_score;
m2.sorting_id = ID_GENERATOR++;
mutations.add(m2);
}
}
}
}
buffer.clear();
if (variant == null)
break;
}
buffer.add(variant);
}
progress.finish();
mutations.doneAdding();
varIter.close();
varIter = null;
variants.cleanup();
variants = null;
final ArrayList<CombinedMutation> mBuffer = new ArrayList<>();
final VCFHeader header2 = new VCFHeader(header);
header2.addMetaDataLine(new VCFHeaderLine(getProgramName() + "AboutQUAL", "QUAL is filled with Grantham Score http://www.ncbi.nlm.nih.gov/pubmed/4843792"));
final StringBuilder infoDesc = new StringBuilder("Variant affected by two distinct mutation. Format is defined in the INFO column. ");
final VCFInfoHeaderLine infoHeaderLine = new VCFInfoHeaderLine("CodonVariant", VCFHeaderLineCount.UNBOUNDED, VCFHeaderLineType.String, infoDesc.toString());
super.addMetaData(header2);
header2.addMetaDataLine(infoHeaderLine);
if (!sample2samReader.isEmpty()) {
header2.addMetaDataLine(vcfFilterHeaderLine);
}
w = super.openVariantContextWriter(saveAs);
w.writeHeader(header2);
progress = new SAMSequenceDictionaryProgress(header);
mutIter = mutations.iterator();
for (; ; ) {
CombinedMutation mutation = null;
if (mutIter.hasNext()) {
mutation = mutIter.next();
progress.watch(mutation.contig, mutation.genomicPosition1);
}
if (mutation == null || !(!mBuffer.isEmpty() && mBuffer.get(0).contig.equals(mutation.contig) && mBuffer.get(0).genomicPosition1 == mutation.genomicPosition1 && mBuffer.get(0).refAllele.equals(mutation.refAllele))) {
if (!mBuffer.isEmpty()) {
// default grantham score used in QUAL
int grantham_score = -1;
// default filter fails
String filter = vcfFilterHeaderLine.getID();
final CombinedMutation first = mBuffer.get(0);
final Set<String> info = new HashSet<>();
final VariantContext ctx = cah.codec.decode(first.vcfLine);
final VariantContextBuilder vcb = new VariantContextBuilder(ctx);
vcb.chr(first.contig);
vcb.start(first.genomicPosition1);
vcb.stop(first.genomicPosition1 + first.refAllele.length() - 1);
if (!first.id.equals(VCFConstants.EMPTY_ID_FIELD))
vcb.id(first.id);
for (final CombinedMutation m : mBuffer) {
info.add(m.info);
grantham_score = Math.max(grantham_score, m.grantham_score);
if (VCFConstants.UNFILTERED.equals(m.filter)) {
// at least one SNP is ok one this line
filter = null;
}
}
vcb.unfiltered();
if (filter != null && !sample2samReader.isEmpty()) {
vcb.filter(filter);
} else {
vcb.passFilters();
}
vcb.attribute(infoHeaderLine.getID(), new ArrayList<String>(info));
if (grantham_score > 0) {
vcb.log10PError(grantham_score / -10.0);
} else {
vcb.log10PError(VariantContext.NO_LOG10_PERROR);
}
w.add(vcb.make());
}
mBuffer.clear();
if (mutation == null)
break;
}
mBuffer.add(mutation);
}
progress.finish();
mutIter.close();
mutations.cleanup();
mutations = null;
return RETURN_OK;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(this.indexedFastaSequenceFile);
CloserUtil.close(mutIter);
CloserUtil.close(varIter);
if (this.variants != null)
this.variants.cleanup();
if (mutations != null)
mutations.cleanup();
this.variants = null;
for (SamReader r : sample2samReader.values()) CloserUtil.close(r);
CloserUtil.close(w);
CloserUtil.close(bufferedReader);
}
}
use of htsjdk.samtools.reference.IndexedFastaSequenceFile in project jvarkit by lindenb.
the class Biostar170742 method doWork.
@Override
public int doWork(final List<String> args) {
if (this.faidx == null) {
LOG.error("Reference sequence was not defined");
return -1;
}
PrintStream out = null;
SamReader sfr = null;
SAMRecordIterator iter = null;
GenomicSequence genomicSequence = null;
IndexedFastaSequenceFile indexedFastaSequenceFile = null;
try {
indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.faidx);
long align_id = 0;
sfr = openSamReader(oneFileOrNull(args));
out = super.openFileOrStdoutAsPrintStream(this.outputFile);
final StringBuilder refseq = new StringBuilder();
final StringBuilder readseq = new StringBuilder();
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(sfr.getFileHeader());
iter = sfr.iterator();
while (iter.hasNext()) {
final SAMRecord rec = progress.watch(iter.next());
if (rec.getReadUnmappedFlag())
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
final byte[] readbases = rec.getReadBases();
if (readbases == null)
continue;
if (genomicSequence == null || !rec.getReferenceName().equals(genomicSequence.getChrom())) {
genomicSequence = new GenomicSequence(indexedFastaSequenceFile, rec.getReferenceName());
}
int refpos1 = rec.getAlignmentStart();
int readpos = 0;
refseq.setLength(0);
readseq.setLength(0);
for (final CigarElement ce : cigar.getCigarElements()) {
final CigarOperator op = ce.getOperator();
if (op.equals(CigarOperator.S)) {
readpos += ce.getLength();
continue;
}
if (op.equals(CigarOperator.H)) {
continue;
}
for (int i = 0; i < ce.getLength(); ++i) {
if (op.consumesReferenceBases() && op.consumesReadBases()) {
refseq.append(genomicSequence.charAt(refpos1 - 1));
readseq.append((char) readbases[readpos]);
readpos++;
refpos1++;
} else if (op.consumesReferenceBases()) {
refseq.append(genomicSequence.charAt(refpos1 - 1));
readseq.append('-');
refpos1++;
} else if (op.consumesReadBases()) {
refseq.append('-');
readseq.append((char) readbases[readpos]);
readpos++;
}
}
}
out.print(align_id);
out.print(' ');
out.print(rec.getReferenceName());
out.print(' ');
out.print(rec.getAlignmentStart());
out.print(' ');
out.print(rec.getAlignmentEnd());
out.print(' ');
out.print(rec.getReadName());
if (rec.getReadPairedFlag()) {
if (rec.getFirstOfPairFlag()) {
out.print("/1");
} else if (rec.getSecondOfPairFlag()) {
out.print("/2");
}
}
out.print(' ');
out.print(1 + rec.getAlignmentStart() - rec.getUnclippedStart());
out.print(' ');
out.print(rec.getReadLength() - (rec.getUnclippedEnd() - rec.getAlignmentEnd()));
out.print(' ');
out.print(rec.getReadNegativeStrandFlag() ? "-" : "+");
out.print(' ');
out.print(rec.getMappingQuality());
out.println();
out.println(refseq);
out.println(readseq);
out.println();
++align_id;
}
progress.finish();
iter.close();
out.flush();
LOG.info("done");
return RETURN_OK;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(out);
CloserUtil.close(iter);
CloserUtil.close(sfr);
CloserUtil.close(indexedFastaSequenceFile);
}
}
use of htsjdk.samtools.reference.IndexedFastaSequenceFile in project jvarkit by lindenb.
the class Biostar78285 method doWork.
@Override
public int doWork(final List<String> args) {
if (this.gc_percent_window < 1) {
LOG.error("Bad GC% window size:" + this.gc_percent_window);
return -1;
}
final List<File> bamFiles = IOUtil.unrollFiles(args.stream().map(F -> new File(F)).collect(Collectors.toCollection(HashSet::new)), ".bam");
SAMSequenceDictionary dict = null;
final List<SamReader> samReaders = new ArrayList<>();
final List<CloseableIterator<SAMRecord>> samIterators = new ArrayList<>();
final TreeSet<String> samples = new TreeSet<>();
final String DEFAULT_PARTITION = "UNDEFINED_PARTITION";
IndexedFastaSequenceFile indexedFastaSequenceFile = null;
VariantContextWriter out = null;
try {
final SamReaderFactory samReaderFactory = SamReaderFactory.makeDefault().validationStringency(ValidationStringency.LENIENT);
for (final File bamFile : bamFiles) {
LOG.info("Opening " + bamFile);
final SamReader samReader = samReaderFactory.open(bamFile);
samReaders.add(samReader);
final SAMFileHeader header = samReader.getFileHeader();
if (header == null) {
LOG.error("No header in " + bamFile);
return -1;
}
if (header.getSortOrder() != SortOrder.coordinate) {
LOG.error("Sam file " + bamFile + " is not sorted on coordinate :" + header.getSortOrder());
return -1;
}
samples.addAll(header.getReadGroups().stream().map(RG -> this.partition.apply(RG, DEFAULT_PARTITION)).collect(Collectors.toSet()));
final SAMSequenceDictionary currDict = header.getSequenceDictionary();
if (currDict == null) {
LOG.error("SamFile doesn't contain a SAMSequenceDictionary : " + bamFile);
return -1;
}
if (dict == null) {
dict = currDict;
} else if (!SequenceUtil.areSequenceDictionariesEqual(dict, currDict)) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict, currDict));
return -1;
}
}
if (samReaders.isEmpty()) {
LOG.error("no bam");
return -1;
}
if (dict == null) {
LOG.error("no dictionary");
return -1;
}
final QueryInterval[] intervals;
if (this.captureBed != null) {
LOG.info("Opening " + this.captureBed);
ContigNameConverter.setDefaultAliases(dict);
final List<QueryInterval> L = new ArrayList<>();
final BedLineCodec codec = new BedLineCodec();
final LineIterator li = IOUtils.openFileForLineIterator(this.captureBed);
while (li.hasNext()) {
final BedLine bed = codec.decode(li.next());
if (bed == null)
continue;
final QueryInterval q = bed.toQueryInterval(dict);
L.add(q);
}
CloserUtil.close(li);
intervals = QueryInterval.optimizeIntervals(L.toArray(new QueryInterval[L.size()]));
} else {
intervals = null;
}
for (final SamReader samReader : samReaders) {
LOG.info("querying " + samReader.getResourceDescription());
final CloseableIterator<SAMRecord> iter;
if (intervals == null) {
iter = samReader.iterator();
} else {
iter = samReader.queryOverlapping(intervals);
}
samIterators.add(new FilterIterator<SAMRecord>(iter, R -> !R.getReadUnmappedFlag() && !filter.filterOut(R)));
}
if (this.refFile != null) {
LOG.info("opening " + refFile);
indexedFastaSequenceFile = new IndexedFastaSequenceFile(this.refFile);
final SAMSequenceDictionary refdict = indexedFastaSequenceFile.getSequenceDictionary();
ContigNameConverter.setDefaultAliases(refdict);
if (refdict == null) {
throw new JvarkitException.FastaDictionaryMissing(this.refFile);
}
if (!SequenceUtil.areSequenceDictionariesEqual(dict, refdict)) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(dict, refdict));
return -1;
}
}
out = openVariantContextWriter(this.outputFile);
final Set<VCFHeaderLine> metaData = new HashSet<>();
VCFStandardHeaderLines.addStandardFormatLines(metaData, true, VCFConstants.DEPTH_KEY, VCFConstants.GENOTYPE_KEY);
VCFStandardHeaderLines.addStandardInfoLines(metaData, true, VCFConstants.DEPTH_KEY);
metaData.add(new VCFFormatHeaderLine("DF", 1, VCFHeaderLineType.Integer, "Number of Reads on plus strand"));
metaData.add(new VCFFormatHeaderLine("DR", 1, VCFHeaderLineType.Integer, "Number of Reads on minus strand"));
metaData.add(new VCFInfoHeaderLine("AVG_DP", 1, VCFHeaderLineType.Float, "Mean depth"));
metaData.add(new VCFInfoHeaderLine("MEDIAN_DP", 1, VCFHeaderLineType.Float, "Median depth"));
metaData.add(new VCFInfoHeaderLine("MIN_DP", 1, VCFHeaderLineType.Integer, "Min depth"));
metaData.add(new VCFInfoHeaderLine("MAX_DP", 1, VCFHeaderLineType.Integer, "Max depth"));
metaData.add(new VCFHeaderLine(Biostar78285.class.getSimpleName() + ".SamFilter", this.filter.toString()));
for (final Integer treshold : this.minDepthTresholds) {
metaData.add(new VCFFilterHeaderLine("DP_LT_" + treshold, "All genotypes have DP< " + treshold));
metaData.add(new VCFInfoHeaderLine("NUM_DP_LT_" + treshold, 1, VCFHeaderLineType.Integer, "Number of genotypes having DP< " + treshold));
metaData.add(new VCFInfoHeaderLine("FRACT_DP_LT_" + treshold, 1, VCFHeaderLineType.Float, "Fraction of genotypes having DP< " + treshold));
}
if (indexedFastaSequenceFile != null) {
metaData.add(new VCFInfoHeaderLine("GC_PERCENT", 1, VCFHeaderLineType.Integer, "GC% window_size:" + this.gc_percent_window));
}
final List<Allele> refAlleles = Collections.singletonList(Allele.create("N", true));
final List<Allele> NO_CALLS = Arrays.asList(Allele.NO_CALL, Allele.NO_CALL);
final VCFHeader vcfHeader = new VCFHeader(metaData, samples);
vcfHeader.setSequenceDictionary(dict);
out.writeHeader(vcfHeader);
final SAMRecordCoordinateComparator samRecordCoordinateComparator = new SAMRecordCoordinateComparator();
final PeekableIterator<SAMRecord> peekIter = new PeekableIterator<>(new MergingIterator<>((R1, R2) -> samRecordCoordinateComparator.fileOrderCompare(R1, R2), samIterators));
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(dict);
for (final SAMSequenceRecord ssr : dict.getSequences()) {
final IntervalTree<Boolean> capturePos;
if (intervals != null) {
if (!Arrays.stream(intervals).anyMatch(I -> I.referenceIndex == ssr.getSequenceIndex())) {
continue;
}
capturePos = new IntervalTree<>();
Arrays.stream(intervals).filter(I -> I.referenceIndex == ssr.getSequenceIndex()).forEach(I -> capturePos.put(I.start, I.end, true));
;
} else {
capturePos = null;
}
final GenomicSequence genomicSequence;
if (indexedFastaSequenceFile != null && indexedFastaSequenceFile.getSequenceDictionary().getSequence(ssr.getSequenceName()) != null) {
genomicSequence = new GenomicSequence(indexedFastaSequenceFile, ssr.getSequenceName());
} else {
genomicSequence = null;
}
final List<SAMRecord> buffer = new ArrayList<>();
for (int ssr_pos = 1; ssr_pos <= ssr.getSequenceLength(); ++ssr_pos) {
if (capturePos != null && !capturePos.overlappers(ssr_pos, ssr_pos).hasNext())
continue;
progress.watch(ssr.getSequenceName(), ssr_pos);
while (peekIter.hasNext()) {
final SAMRecord rec = peekIter.peek();
if (rec.getReadUnmappedFlag()) {
// consumme
peekIter.next();
continue;
}
if (this.filter.filterOut(rec)) {
// consumme
peekIter.next();
continue;
}
if (rec.getReferenceIndex() < ssr.getSequenceIndex()) {
throw new IllegalStateException("should not happen");
}
if (rec.getReferenceIndex() > ssr.getSequenceIndex()) {
break;
}
if (rec.getAlignmentEnd() < ssr_pos) {
throw new IllegalStateException("should not happen");
}
if (rec.getAlignmentStart() > ssr_pos) {
break;
}
buffer.add(peekIter.next());
}
int x = 0;
while (x < buffer.size()) {
final SAMRecord R = buffer.get(x);
if (R.getReferenceIndex() != ssr.getSequenceIndex() || R.getAlignmentEnd() < ssr_pos) {
buffer.remove(x);
} else {
x++;
}
}
final Map<String, PosInfo> count = samples.stream().map(S -> new PosInfo(S)).collect(Collectors.toMap(P -> P.sample, Function.identity()));
for (final SAMRecord rec : buffer) {
if (rec.getReferenceIndex() != ssr.getSequenceIndex())
throw new IllegalStateException("should not happen");
if (rec.getAlignmentEnd() < ssr_pos)
continue;
if (rec.getAlignmentStart() > ssr_pos)
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
int refpos = rec.getAlignmentStart();
final String sample = this.partition.getPartion(rec, DEFAULT_PARTITION);
for (final CigarElement ce : cigar.getCigarElements()) {
if (refpos > ssr_pos)
break;
final CigarOperator op = ce.getOperator();
if (op.consumesReferenceBases()) {
if (op.consumesReadBases()) {
if (refpos <= ssr_pos && ssr_pos <= refpos + ce.getLength()) {
final PosInfo posInfo = count.get(sample);
if (posInfo != null) {
posInfo.dp++;
if (rec.getReadNegativeStrandFlag()) {
posInfo.negative_strand++;
}
}
break;
}
}
refpos += ce.getLength();
}
}
}
final VariantContextBuilder vcb = new VariantContextBuilder();
final Set<String> filters = new HashSet<>();
vcb.chr(ssr.getSequenceName());
vcb.start(ssr_pos);
vcb.stop(ssr_pos);
if (genomicSequence == null) {
vcb.alleles(refAlleles);
} else {
vcb.alleles(Collections.singletonList(Allele.create((byte) genomicSequence.charAt(ssr_pos - 1), true)));
final GenomicSequence.GCPercent gcp = genomicSequence.getGCPercent(Math.max((ssr_pos - 1) - this.gc_percent_window, 0), Math.min(ssr_pos + this.gc_percent_window, ssr.getSequenceLength()));
if (!gcp.isEmpty()) {
vcb.attribute("GC_PERCENT", gcp.getGCPercentAsInteger());
}
}
vcb.attribute(VCFConstants.DEPTH_KEY, (int) count.values().stream().mapToInt(S -> S.dp).sum());
vcb.genotypes(count.values().stream().map(C -> new GenotypeBuilder(C.sample, NO_CALLS).DP((int) C.dp).attribute("DR", C.negative_strand).attribute("DF", C.dp - C.negative_strand).make()).collect(Collectors.toList()));
for (final Integer treshold : this.minDepthTresholds) {
final int count_lt = (int) count.values().stream().filter(S -> S.dp < treshold).count();
if (count_lt == samples.size()) {
filters.add("DP_LT_" + treshold);
}
vcb.attribute("NUM_DP_LT_" + treshold, count_lt);
if (!samples.isEmpty()) {
vcb.attribute("FRACT_DP_LT_" + treshold, count_lt / (float) samples.size());
}
}
if (!samples.isEmpty()) {
final int[] array = count.values().stream().mapToInt(S -> S.dp).toArray();
vcb.attribute("AVG_DP", Percentile.average().evaluate(array));
vcb.attribute("MEDIAN_DP", Percentile.median().evaluate(array));
vcb.attribute("MIN_DP", (int) Percentile.min().evaluate(array));
vcb.attribute("MAX_DP", (int) Percentile.max().evaluate(array));
}
if (filters.isEmpty()) {
vcb.passFilters();
} else {
vcb.filters(filters);
}
out.add(vcb.make());
}
}
progress.finish();
peekIter.close();
out.close();
out = null;
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(out);
CloserUtil.close(samIterators);
CloserUtil.close(samReaders);
CloserUtil.close(indexedFastaSequenceFile);
}
}
use of htsjdk.samtools.reference.IndexedFastaSequenceFile in project jvarkit by lindenb.
the class BimToVcf method doWork.
@Override
public int doWork(List<String> args) {
VariantContextWriter w = null;
BufferedReader r = null;
IndexedFastaSequenceFile faidx = null;
GenomicSequence genomic = null;
try {
if (this.REF == null) {
LOG.error("Reference -R missing.");
return -1;
}
faidx = new IndexedFastaSequenceFile(this.REF);
final SAMSequenceDictionary dict = faidx.getSequenceDictionary();
if (dict == null) {
LOG.error("No dictionary in " + this.REF);
return -1;
}
r = super.openBufferedReader(oneFileOrNull(args));
final Set<VCFHeaderLine> headerLines = new HashSet<>();
final VCFInfoHeaderLine morgan = new VCFInfoHeaderLine("MORGAN", 1, VCFHeaderLineType.Float, "Centimorgan");
final VCFInfoHeaderLine svtype = new VCFInfoHeaderLine("SVTYPE", 1, VCFHeaderLineType.String, "Variation type");
VCFStandardHeaderLines.addStandardInfoLines(headerLines, false, "");
// super.addMetaData(headerLines);
headerLines.add(morgan);
headerLines.add(svtype);
final List<String> genotypeSampleNames = Collections.emptyList();
final VCFHeader header = new VCFHeader(headerLines, genotypeSampleNames);
header.setSequenceDictionary(dict);
w = super.openVariantContextWriter(this.outputFile);
w.writeHeader(header);
final Pattern tab = Pattern.compile("[\t]");
String line;
final Pattern iupacATGC = Pattern.compile("[atgcATGC]");
while ((line = r.readLine()) != null) {
String[] tokens = tab.split(line);
if (tokens.length != 6) {
LOG.error("expected 6 column in " + line);
return -1;
}
Allele a1 = null;
Allele a2 = null;
Allele ref = null;
String contig = tokens[0];
SAMSequenceRecord ssr = null;
ssr = dict.getSequence(contig);
// ugly below !!
if (ssr == null && contig.equals("23")) {
ssr = dict.getSequence("X");
}
if (ssr == null && contig.equals("23")) {
ssr = dict.getSequence("chrX");
}
if (ssr == null && contig.equals("24")) {
ssr = dict.getSequence("Y");
}
if (ssr == null && contig.equals("24")) {
ssr = dict.getSequence("chrY");
}
if (ssr == null && contig.equals("26")) {
ssr = dict.getSequence("chrM");
}
if (ssr == null && contig.equals("26")) {
ssr = dict.getSequence("MT");
}
if (ssr == null && contig.equals("25")) {
LOG.warn("ignoring " + line);
continue;
}
if (ssr == null) {
LOG.error("unknown chrom in " + line);
return -1;
}
if (genomic == null || !ssr.getSequenceName().equals(genomic.getChrom())) {
genomic = new GenomicSequence(faidx, ssr.getSequenceName());
}
int pos1 = Integer.parseInt(tokens[3]);
if (tokens[4].equals("0"))
tokens[4] = tokens[5];
if (tokens[5].equals("0"))
tokens[5] = tokens[4];
final VariantContextBuilder vcb = new VariantContextBuilder();
vcb.chr(ssr.getSequenceName());
vcb.attribute(morgan.getID(), Float.parseFloat(tokens[2]));
if (iupacATGC.matcher(tokens[4]).matches() && iupacATGC.matcher(tokens[5]).matches()) {
String refBase = String.valueOf(genomic.charAt(pos1 - 1));
ref = Allele.create(refBase, true);
a1 = refBase.equalsIgnoreCase(tokens[4]) ? ref : Allele.create(tokens[4], false);
a2 = refBase.equalsIgnoreCase(tokens[5]) ? ref : Allele.create(tokens[5], false);
vcb.attribute(svtype.getID(), a1.isReference() && a2.isReference() ? "NOVARIATION" : "SNV");
} else if ((tokens[4].equals("-") && iupacATGC.matcher(tokens[5]).matches()) || (tokens[5].equals("-") && iupacATGC.matcher(tokens[4]).matches())) {
// shift left
pos1--;
String refBase = String.valueOf(genomic.charAt(pos1 - 1));
a1 = Allele.create(refBase, false);
ref = Allele.create(refBase + tokens[tokens[4].equals("-") ? 5 : 4], true);
a2 = a1;
vcb.attribute(svtype.getID(), "DEL");
} else if (tokens[4].equals("-") && tokens[5].equals("-")) {
// shift left
pos1--;
String refBase = String.valueOf(genomic.charAt(pos1 - 1));
a1 = Allele.create(refBase, false);
ref = Allele.create(refBase + genomic.charAt(pos1), true);
a2 = a1;
vcb.attribute(svtype.getID(), "DEL");
} else {
LOG.error("not handled: " + line);
return -1;
}
final Set<Allele> alleles = new HashSet<>();
alleles.add(ref);
alleles.add(a1);
alleles.add(a2);
vcb.start(pos1);
vcb.stop(pos1 + ref.length() - 1);
if (!tokens[1].isEmpty())
vcb.id(tokens[1]);
vcb.alleles(alleles);
w.add(vcb.make());
}
r.close();
r = null;
w.close();
w = null;
return RETURN_OK;
} catch (final Exception e) {
LOG.error(e);
return -1;
} finally {
CloserUtil.close(faidx);
CloserUtil.close(w);
CloserUtil.close(r);
}
}
Aggregations