use of htsjdk.samtools.util.CloseableIterator in project jvarkit by lindenb.
the class ImpactOfDuplicates method doWork.
@Override
public int doWork(final List<String> args) {
CloseableIterator<Duplicate> dupIter = null;
final List<File> INPUT = args.stream().map(S -> new File(S)).collect(Collectors.toList());
try {
this.duplicates = SortingCollection.newInstance(Duplicate.class, new DuplicateCodec(), new Comparator<Duplicate>() {
@Override
public int compare(Duplicate o1, Duplicate o2) {
return o1.compareTo(o2);
}
}, this.sortingCollectionArgs.getMaxRecordsInRam(), this.sortingCollectionArgs.getTmpPaths());
for (this.bamIndex = 0; this.bamIndex < INPUT.size(); this.bamIndex++) {
int prev_tid = -1;
int prev_pos = -1;
long nLines = 0L;
File inFile = INPUT.get(this.bamIndex);
LOG.info("Processing " + inFile);
IOUtil.assertFileIsReadable(inFile);
SamReader samReader = null;
CloseableIterator<SAMRecord> iter = null;
try {
samReader = SamReaderFactory.make().validationStringency(ValidationStringency.LENIENT).open(inFile);
final SAMFileHeader header = samReader.getFileHeader();
this.samFileDicts.add(header.getSequenceDictionary());
if (BEDFILE == null) {
iter = samReader.iterator();
} else {
IntervalList intervalList = new IntervalList(header);
BufferedReader in = new BufferedReader(new FileReader(BEDFILE));
String line = null;
while ((line = in.readLine()) != null) {
if (line.isEmpty() || line.startsWith("#"))
continue;
String[] tokens = line.split("[\t]");
Interval interval = new Interval(tokens[0], 1 + Integer.parseInt(tokens[1]), Integer.parseInt(tokens[2]));
intervalList.add(interval);
}
in.close();
intervalList = intervalList.sorted();
List<Interval> uniqueIntervals = IntervalList.getUniqueIntervals(intervalList, false);
SamRecordIntervalIteratorFactory sriif = new SamRecordIntervalIteratorFactory();
iter = sriif.makeSamRecordIntervalIterator(samReader, uniqueIntervals, false);
}
while (iter.hasNext()) {
SAMRecord rec = iter.next();
if (rec.getReadUnmappedFlag())
continue;
if (!rec.getReadPairedFlag())
continue;
if (rec.getReferenceIndex() != rec.getMateReferenceIndex())
continue;
if (!rec.getProperPairFlag())
continue;
if (!rec.getFirstOfPairFlag())
continue;
if (prev_tid != -1) {
if (prev_tid > rec.getReferenceIndex()) {
throw new IOException("Bad sort order from " + rec);
} else if (prev_tid == rec.getReferenceIndex() && prev_pos > rec.getAlignmentStart()) {
throw new IOException("Bad sort order from " + rec);
} else {
prev_pos = rec.getAlignmentStart();
}
} else {
prev_tid = rec.getReferenceIndex();
prev_pos = -1;
}
if ((++nLines) % 1000000 == 0) {
LOG.info("In " + inFile + " N=" + nLines);
}
Duplicate dup = new Duplicate();
dup.bamIndex = this.bamIndex;
dup.pos = Math.min(rec.getAlignmentStart(), rec.getMateAlignmentStart());
dup.tid = rec.getReferenceIndex();
dup.size = Math.abs(rec.getInferredInsertSize());
this.duplicates.add(dup);
}
} finally {
if (iter != null)
iter.close();
if (samReader != null)
samReader.close();
}
LOG.info("done " + inFile);
}
/**
* loop done, now scan the duplicates
*/
LOG.info("doneAdding");
this.duplicates.doneAdding();
this.out = super.openFileOrStdoutAsPrintStream(outputFile);
out.print("#INTERVAL\tMAX\tMEAN");
for (int i = 0; i < INPUT.size(); ++i) {
out.print('\t');
out.print(INPUT.get(i));
}
out.println();
dupIter = this.duplicates.iterator();
while (dupIter.hasNext()) {
Duplicate dup = dupIter.next();
if (this.duplicatesBuffer.isEmpty() || dup.compareChromPosSize(this.duplicatesBuffer.get(0)) == 0) {
this.duplicatesBuffer.add(dup);
} else {
dumpDuplicatesBuffer(INPUT);
this.duplicatesBuffer.add(dup);
}
}
dumpDuplicatesBuffer(INPUT);
LOG.info("end iterator");
out.flush();
out.close();
} catch (Exception e) {
LOG.error(e);
return -1;
} finally {
if (dupIter != null)
dupIter.close();
LOG.info("cleaning duplicates");
this.duplicates.cleanup();
}
return 0;
}
use of htsjdk.samtools.util.CloseableIterator in project jvarkit by lindenb.
the class LumpySort method doWork.
@Override
public int doWork(final List<String> args) {
VariantContextWriter vcw = null;
LineIterator vcfIn = null;
Environment environment = null;
Database variantsDb1 = null;
final List<File> inputs = IOUtil.unrollFiles(args.stream().map(S -> new File(S)).collect(Collectors.toList()), ".vcf", ".vcf.gz");
if (inputs.isEmpty()) {
LOG.error("empty vcf list");
return -1;
}
try {
IOUtil.assertDirectoryIsWritable(this.bdbHomeDir);
final Set<VCFHeaderLine> metaData = new HashSet<>();
final Set<String> sampleNames = new TreeSet<>();
final IntervalTreeMap<Boolean> intervalTreeMapBed;
if (this.bedFile != null) {
intervalTreeMapBed = new IntervalTreeMap<>();
final BedLineCodec bedLineCodec = new BedLineCodec();
final BufferedReader br = IOUtils.openFileForBufferedReading(this.bedFile);
br.lines().map(L -> bedLineCodec.decode(L)).filter(L -> L != null).forEach(B -> intervalTreeMapBed.put(B.toInterval(), true));
br.close();
} else {
intervalTreeMapBed = null;
}
for (int idx = 0; idx < inputs.size(); ++idx) {
final File vcfFile = inputs.get(idx);
LOG.info("Read header " + (idx + 1) + "/" + inputs.size());
final VCFReader r = VCFReaderFactory.makeDefault().open(vcfFile.toPath(), false);
final VCFHeader header = r.getHeader();
if (!LumpyConstants.isLumpyHeader(header)) {
LOG.error("doesn't look like a Lumpy-SV vcf header " + vcfFile);
r.close();
return -1;
}
if (!header.hasGenotypingData()) {
LOG.error("No sample in " + vcfFile);
r.close();
return -1;
}
for (final String sampleName : header.getSampleNamesInOrder()) {
if (sampleNames.contains(sampleName)) {
LOG.error("Sample found twice " + sampleName + " in " + vcfFile);
r.close();
return -1;
}
sampleNames.add(sampleName);
}
metaData.addAll(header.getMetaDataInInputOrder().stream().filter(H -> !H.getKey().equals("fileDate")).collect(Collectors.toSet()));
r.close();
}
final VCFInfoHeaderLine nSampleInfoHeaderLine = new VCFInfoHeaderLine("NSAMPLES", 1, VCFHeaderLineType.Integer, "Number of affected samples.");
metaData.add(nSampleInfoHeaderLine);
final VCFFormatHeaderLine chromStartFormatHeaderLine = new VCFFormatHeaderLine("CB", 1, VCFHeaderLineType.Integer, "Original Variant POS");
metaData.add(chromStartFormatHeaderLine);
final VCFFormatHeaderLine chromEndFormatHeaderLine = new VCFFormatHeaderLine("CE", 1, VCFHeaderLineType.Integer, "Original Variant END");
metaData.add(chromEndFormatHeaderLine);
final VCFHeader outHeader = new VCFHeader(metaData, sampleNames);
final VCFHeaderVersion[] versions = VCFHeaderVersion.values();
this.vcfEncoder = new VCFEncoder(outHeader, false, true);
this.vcfCodec.setVCFHeader(outHeader, versions[versions.length - 1]);
/* open BDB env */
final Transaction txn = null;
environment = new Environment(this.bdbHomeDir, new EnvironmentConfig().setAllowCreate(true).setReadOnly(false));
variantsDb1 = environment.openDatabase(txn, "variants1", new DatabaseConfig().setBtreeComparator(KeySorterComparator.class).setAllowCreate(true).setReadOnly(false).setTemporary(true));
long total_variants = 0L;
final LumpyVarBinding lumpVarBinding = new LumpyVarBinding();
final KeySorterBinding keySorterBinding = new KeySorterBinding();
for (int idx = 0; idx < inputs.size(); ++idx) {
final long millisecstart = System.currentTimeMillis();
final File vcfFile = inputs.get(idx);
int nVariant = 0;
final VCFReader r = VCFReaderFactory.makeDefault().open(vcfFile.toPath(), false);
final List<Genotype> missing = new ArrayList<>(sampleNames.size());
for (final String sn : sampleNames) {
if (r.getHeader().getSampleNamesInOrder().contains(sn))
continue;
missing.add(GenotypeBuilder.createMissing(sn, 2));
}
final CloseableIterator<VariantContext> iter = r.iterator();
while (iter.hasNext()) {
VariantContext ctx = iter.next();
if (!this.keep_secondary) {
if (ctx.hasAttribute("SECONDARY"))
continue;
}
if (!this.variantFilter.test(ctx))
continue;
if (intervalTreeMapBed != null && !intervalTreeMapBed.containsOverlapping(ctx))
continue;
final List<Genotype> gtList = new ArrayList<>(ctx.getGenotypes());
for (int gi = 0; gi < gtList.size(); gi++) {
Genotype g = gtList.get(gi);
final GenotypeBuilder gb;
if (this.do_genotype && isAvailableGenotype(g)) {
gb = new GenotypeBuilder(g.getSampleName(), ctx.getAlternateAlleles());
gb.attributes(g.getExtendedAttributes());
} else {
gb = new GenotypeBuilder(g);
}
gb.attribute(chromStartFormatHeaderLine.getID(), ctx.getStart());
gb.attribute(chromEndFormatHeaderLine.getID(), ctx.getEnd());
gtList.set(gi, gb.make());
}
gtList.addAll(missing);
ctx = new VariantContextBuilder(ctx).genotypes(gtList).rmAttribute("PRPOS").make();
final LumpyVar lvar = new LumpyVar(ctx, total_variants);
final DatabaseEntry key = new DatabaseEntry();
final DatabaseEntry data = new DatabaseEntry();
lumpVarBinding.objectToEntry(lvar, data);
keySorterBinding.objectToEntry(lvar.getSortKey(), key);
if (variantsDb1.put(txn, key, data) != OperationStatus.SUCCESS) {
r.close();
LOG.error("insertion failed");
return -1;
}
nVariant++;
total_variants++;
}
iter.close();
r.close();
LOG.info("Read " + (idx + 1) + "/" + inputs.size() + " variants of " + vcfFile + " N=" + nVariant + " Total:" + total_variants + " That took: " + Duration.ofMillis(System.currentTimeMillis() - millisecstart));
System.gc();
}
if (intervalTreeMapBed != null)
intervalTreeMapBed.clear();
System.gc();
LOG.info("Writing output");
final List<Allele> ALLELES_NO_CALLS = this.do_genotype ? Collections.singletonList(Allele.NO_CALL) : Arrays.asList(Allele.NO_CALL, Allele.NO_CALL);
final Cursor cursor = variantsDb1.openCursor(txn, null);
vcw = this.writingVariantsDelegate.open(this.outputFile);
vcw.writeHeader(outHeader);
for (; ; ) {
final DatabaseEntry key = new DatabaseEntry();
final DatabaseEntry data = new DatabaseEntry();
OperationStatus status = cursor.getNext(key, data, LockMode.DEFAULT);
if (!status.equals(OperationStatus.SUCCESS))
break;
final LumpyVar first = lumpVarBinding.entryToObject(data);
if (this.do_not_merge_ctx) {
vcw.add(first.ctx);
continue;
}
final KeySorter keySorter1 = keySorterBinding.entryToObject(key);
final List<LumpyVar> buffer = new ArrayList<>();
buffer.add(first);
final DatabaseEntry key2 = new DatabaseEntry();
final DatabaseEntry data2 = new DatabaseEntry();
final Cursor cursor2 = cursor.dup(true);
for (; ; ) {
status = cursor2.getNext(key2, data2, LockMode.DEFAULT);
if (!status.equals(OperationStatus.SUCCESS))
break;
final KeySorter keySorter2 = keySorterBinding.entryToObject(key2);
if (keySorter1.compare1(keySorter2) != 0) {
break;
}
final LumpyVar lv = lumpVarBinding.entryToObject(data2);
if (lv.ctx.getStart() > first.ctx.getEnd()) {
break;
}
if (first.canMerge(lv)) {
buffer.add(lv);
cursor2.delete();
}
}
cursor2.close();
// delete 'first'
cursor.delete();
final int variantStartA = buffer.stream().mapToInt(V -> V.ctx.getStart()).min().getAsInt();
final int variantStartB = (int) buffer.stream().mapToInt(V -> V.ctx.getStart()).average().getAsDouble();
final int variantStartC = buffer.stream().mapToInt(V -> V.ctx.getStart()).max().getAsInt();
final int variantEndA = buffer.stream().mapToInt(V -> V.ctx.getEnd()).min().getAsInt();
final int variantEndB = (int) buffer.stream().mapToInt(V -> V.ctx.getEnd()).average().getAsDouble();
final int variantEndC = buffer.stream().mapToInt(V -> V.ctx.getEnd()).max().getAsInt();
final VariantContextBuilder vcb = new VariantContextBuilder("lumpymerge", first.ctx.getContig(), variantStartB, variantEndB, first.ctx.getAlleles());
vcb.attribute("END", variantEndB);
vcb.attribute("SVTYPE", first.ctx.getAttribute("SVTYPE"));
vcb.attribute("SVLEN", (int) Percentile.median().evaluate(buffer.stream().mapToInt(V -> V.ctx.getEnd() - V.ctx.getStart())).getAsDouble());
vcb.attribute("CIPOS", Arrays.asList(variantStartB - variantStartA, variantStartC - variantStartB));
vcb.attribute("CIEND", Arrays.asList(variantEndB - variantEndA, variantEndC - variantEndB));
vcb.attribute("SU", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("SU", 0)).sum());
vcb.attribute("SR", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("SR", 0)).sum());
vcb.attribute("PE", buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).mapToInt(G -> G.getAttributeAsInt("PE", 0)).sum());
final Map<String, Genotype> sample2genotype = new HashMap<>(sampleNames.size());
buffer.stream().flatMap(V -> V.ctx.getGenotypes().stream()).filter(G -> isAvailableGenotype(G)).forEach(G -> {
sample2genotype.put(G.getSampleName(), G);
});
vcb.attribute(nSampleInfoHeaderLine.getID(), sample2genotype.size());
for (final String sn : sampleNames) {
if (!sample2genotype.containsKey(sn)) {
sample2genotype.put(sn, new GenotypeBuilder(sn, ALLELES_NO_CALLS).attribute("SU", 0).attribute("SR", 0).attribute("PE", 0).make());
}
}
vcb.genotypes(sample2genotype.values());
vcw.add(vcb.make());
}
cursor.close();
vcw.close();
vcw = null;
variantsDb1.close();
variantsDb1 = null;
environment.close();
environment = null;
return 0;
} catch (final Exception err) {
LOG.error(err);
return -1;
} finally {
CloserUtil.close(vcfIn);
CloserUtil.close(vcw);
CloserUtil.close(variantsDb1);
CloserUtil.close(environment);
}
}
use of htsjdk.samtools.util.CloseableIterator in project jvarkit by lindenb.
the class IjgvdToVcf method doWork.
@Override
public int doWork(List<String> args) {
try {
final SAMSequenceDictionary dict = SequenceDictionaryUtils.extractRequired(this.fai);
this.ctgNameConverter = ContigNameConverter.fromOneDictionary(dict);
List<Path> zipPaths = IOUtils.unrollPaths(args);
List<CloseableIterator<VariantContext>> iterators = new ArrayList<>(zipPaths.size() * 2);
for (final Path zipPath : zipPaths) {
if (zipPath.getFileName().toString().endsWith(".tsv")) {
String fname = zipPath.getFileName().toString();
if (fname.endsWith("filtered.tsv") && skip_filtered) {
continue;
}
if (fname.endsWith("_multiallelic.tsv") && skip_multiallelic) {
continue;
}
final InputStream in = Files.newInputStream(zipPath);
iterators.add(new ZipIterator(fname, in));
continue;
}
for (int i = 0; i < 2; i++) {
final ZipInputStream zin = new ZipInputStream(Files.newInputStream(zipPath));
ZipEntry entry = null;
ZipIterator zipIter = null;
while ((entry = zin.getNextEntry()) != null) {
if (entry.getName().endsWith("filtered.tsv") && skip_filtered) {
zin.closeEntry();
continue;
}
if (entry.getName().endsWith("_multiallelic.tsv") && skip_multiallelic) {
zin.closeEntry();
continue;
}
if (i == 0 && !(entry.getName().endsWith("passed.tsv") || entry.getName().endsWith("multiallelic.tsv"))) {
zin.closeEntry();
continue;
}
if (i == 1 && !entry.getName().endsWith("filtered.tsv")) {
zin.closeEntry();
continue;
}
zipIter = new ZipIterator(entry.getName(), zin);
break;
}
if (zipIter == null) {
zin.close();
} else {
iterators.add(zipIter);
}
}
}
final ContigDictComparator contigDictComparator = new ContigDictComparator(dict);
final Comparator<VariantContext> comparator = (A, B) -> {
int i = contigDictComparator.compare(A.getContig(), B.getContig());
if (i != 0)
return i;
i = Integer.compare(A.getStart(), B.getStart());
if (i != 0)
return i;
return A.getReference().compareTo(B.getReference());
};
final MergingIterator<VariantContext> iter = new MergingIterator<>(comparator, iterators);
final VariantContextWriter vcw = writingVariantsDelegate.dictionary(dict).open(out);
final Set<VCFHeaderLine> metaData = new HashSet<>();
VCFStandardHeaderLines.addStandardInfoLines(metaData, true, VCFConstants.ALLELE_COUNT_KEY, VCFConstants.ALLELE_FREQUENCY_KEY, VCFConstants.ALLELE_NUMBER_KEY);
metaData.add(new VCFInfoHeaderLine(REF_ALLELE_FREQ, 1, VCFHeaderLineType.Float, "Ref Allele Freq."));
metaData.add(new VCFInfoHeaderLine(ALT_ALLELE_FREQ, 1, VCFHeaderLineType.Float, "Alt Allele Freq."));
metaData.add(new VCFInfoHeaderLine(REF_ALLELE_COUNT, 1, VCFHeaderLineType.Integer, "Ref Allele count"));
metaData.add(new VCFInfoHeaderLine(ALT_ALLELE_COUNT, 1, VCFHeaderLineType.Integer, "Alt Allele count."));
metaData.add(new VCFInfoHeaderLine(TOTAL_ALLELES_COUNT, 1, VCFHeaderLineType.Integer, "Total allele count"));
metaData.add(new VCFInfoHeaderLine(N_SAMPLES, 1, VCFHeaderLineType.Integer, "N samples"));
metaData.add(new VCFFilterHeaderLine(MULTIALLELIC, "multiallelic"));
metaData.add(new VCFFilterHeaderLine(FILTER1, "filtered in input"));
VCFHeader header = new VCFHeader(metaData);
header.setSequenceDictionary(dict);
JVarkitVersion.getInstance().addMetaData(this, header);
vcw.writeHeader(header);
ProgressFactory.Watcher<VariantContext> progress = ProgressFactory.newInstance().dictionary(dict).logger(LOG).build();
while (iter.hasNext()) {
final VariantContext ctx = progress.apply(iter.next());
vcw.add(ctx);
}
vcw.close();
iter.close();
progress.close();
return 0;
} catch (Exception e) {
LOG.error(e);
return -1;
}
}
use of htsjdk.samtools.util.CloseableIterator in project jvarkit by lindenb.
the class GcPercentAndDepth method doWork.
@Override
public int doWork(final List<String> args) {
if (this.windowSize <= 0) {
LOG.error("Bad window size.");
return -1;
}
if (this.windowStep <= 0) {
LOG.error("Bad window step.");
return -1;
}
if (this.refFile == null) {
LOG.error("Undefined REF File");
return -1;
}
if (args.isEmpty()) {
LOG.error("Illegal Number of arguments.");
return -1;
}
ReferenceSequenceFile indexedFastaSequenceFile = null;
List<SamReader> readers = new ArrayList<SamReader>();
PrintWriter out = null;
try {
LOG.info("Loading " + this.refFile);
indexedFastaSequenceFile = ReferenceSequenceFileFactory.getReferenceSequenceFile(this.refFile);
this.samSequenceDictionary = SequenceDictionaryUtils.extractRequired(indexedFastaSequenceFile);
if (this.samSequenceDictionary == null) {
LOG.error("Cannot get sequence dictionary for " + this.refFile);
return -1;
}
out = super.openPathOrStdoutAsPrintWriter(outPutFile);
Set<String> all_samples = new TreeSet<String>();
/* create input, collect sample names */
for (int optind = 0; optind < args.size(); ++optind) {
LOG.info("Opening " + args.get(optind));
final SamReader samFileReaderScan = super.openSamReader(args.get(optind));
readers.add(samFileReaderScan);
final SAMFileHeader header = samFileReaderScan.getFileHeader();
if (!SequenceUtil.areSequenceDictionariesEqual(this.samSequenceDictionary, header.getSequenceDictionary())) {
LOG.error(JvarkitException.DictionariesAreNotTheSame.getMessage(this.samSequenceDictionary, header.getSequenceDictionary()));
return -1;
}
for (final SAMReadGroupRecord g : header.getReadGroups()) {
final String sample = this.partition.apply(g);
if (StringUtil.isBlank(sample)) {
LOG.warning("Read group " + g.getId() + " has no sample in merged dictionary");
continue;
}
all_samples.add(sample);
}
}
LOG.info("N " + this.partition.name() + "=" + all_samples.size());
/* print header */
out.print("#");
if (!this.hide_genomic_index) {
out.print("id");
out.print("\t");
}
out.print("chrom");
out.print("\t");
out.print("start");
out.print("\t");
out.print("end");
out.print("\t");
out.print("GCPercent");
for (final String sample : all_samples) {
out.print("\t");
out.print(sample);
}
out.println();
final List<RegionCaptured> regionsCaptured = new ArrayList<RegionCaptured>();
if (bedFile != null) {
LOG.info("Reading BED:" + bedFile);
try (BedLineReader r = new BedLineReader(bedFile)) {
r.stream().filter(B -> B != null).forEach(B -> {
final SAMSequenceRecord ssr = this.samSequenceDictionary.getSequence(B.getContig());
if (ssr == null) {
LOG.warning("Cannot resolve " + B.getContig());
return;
}
final RegionCaptured roi = new RegionCaptured(ssr, B.getStart() - 1, B.getEnd());
regionsCaptured.add(roi);
});
}
LOG.info("end Reading BED:" + bedFile);
Collections.sort(regionsCaptured);
} else {
LOG.info("No capture, peeking everything");
for (final SAMSequenceRecord ssr : this.samSequenceDictionary.getSequences()) {
final RegionCaptured roi = new RegionCaptured(ssr, 0, ssr.getSequenceLength());
regionsCaptured.add(roi);
}
}
final SAMSequenceDictionaryProgress progress = new SAMSequenceDictionaryProgress(this.samSequenceDictionary).logger(LOG);
GenomicSequence genomicSequence = null;
for (final RegionCaptured roi : regionsCaptured) {
if (genomicSequence == null || !genomicSequence.getChrom().equals(roi.getContig())) {
genomicSequence = new GenomicSequence(indexedFastaSequenceFile, roi.getContig());
}
Map<String, int[]> sample2depth = new HashMap<String, int[]>();
Map<String, Double> sample2meanDepth = new HashMap<String, Double>();
for (final String sample : all_samples) {
int[] depth = new int[roi.length()];
Arrays.fill(depth, 0);
sample2depth.put(sample, depth);
}
List<CloseableIterator<SAMRecord>> iterators = new ArrayList<CloseableIterator<SAMRecord>>();
for (final SamReader r : readers) {
iterators.add(r.query(roi.getContig(), roi.getStart(), roi.getEnd(), false));
}
final MergingIterator<SAMRecord> merginIter = new MergingIterator<>(new SAMRecordCoordinateComparator(), iterators);
while (merginIter.hasNext()) {
final SAMRecord rec = merginIter.next();
if (rec.getReadUnmappedFlag())
continue;
if (this.filter.filterOut(rec))
continue;
final String sample = this.partition.getPartion(rec, null);
if (sample == null)
continue;
final int[] depth = sample2depth.get(sample);
if (depth == null)
continue;
final Cigar cigar = rec.getCigar();
if (cigar == null)
continue;
int refpos1 = rec.getAlignmentStart();
for (final CigarElement ce : cigar.getCigarElements()) {
final CigarOperator op = ce.getOperator();
if (!op.consumesReferenceBases())
continue;
if (op.consumesReadBases()) {
for (int i = 0; i < ce.getLength(); ++i) {
if (refpos1 + i < roi.getStart())
continue;
if (refpos1 + i > roi.getEnd())
break;
depth[refpos1 + i - roi.getStart()]++;
}
}
refpos1 += ce.getLength();
}
}
merginIter.close();
for (final RegionCaptured.SlidingWindow win : roi) {
double total = 0f;
int countN = 0;
for (int pos1 = win.getStart(); pos1 <= win.getEnd(); ++pos1) {
switch(genomicSequence.charAt(pos1 - 1)) {
case 'c':
case 'C':
case 'g':
case 'G':
case 's':
case 'S':
{
total++;
break;
}
case 'n':
case 'N':
countN++;
break;
default:
break;
}
}
if (skip_if_contains_N && countN > 0)
continue;
double GCPercent = total / (double) win.length();
int max_depth_for_win = 0;
sample2meanDepth.clear();
for (final String sample : all_samples) {
int[] depth = sample2depth.get(sample);
double sum = 0;
for (int pos = win.getStart(); pos < win.getEnd() && (pos - roi.getStart()) < depth.length; ++pos) {
sum += depth[pos - roi.getStart()];
}
double mean = (sum / (double) depth.length);
max_depth_for_win = Math.max(max_depth_for_win, (int) mean);
sample2meanDepth.put(sample, mean);
}
if (max_depth_for_win < this.min_depth)
continue;
if (!this.hide_genomic_index) {
out.print(win.getGenomicIndex());
out.print("\t");
}
out.print(win.getContig());
out.print("\t");
out.print(win.getStart() - 1);
out.print("\t");
out.print(win.getEnd());
out.print("\t");
out.printf("%.2f", GCPercent);
for (String sample : all_samples) {
out.print("\t");
out.printf("%.2f", (double) sample2meanDepth.get(sample));
}
out.println();
}
}
progress.finish();
out.flush();
return 0;
} catch (Exception err) {
LOG.error(err);
return -1;
} finally {
for (SamReader r : readers) CloserUtil.close(r);
CloserUtil.close(indexedFastaSequenceFile);
CloserUtil.close(out);
}
}
use of htsjdk.samtools.util.CloseableIterator in project jvarkit by lindenb.
the class SamReadLengthDistribution method scan.
private void scan(final SamReader in, Path pathName) throws IOException {
final String defName = (pathName == null ? "STDIN" : pathName.toString()) + "#" + this.partition.name();
in.getFileHeader().getReadGroups().stream().map(RG -> this.partition.apply(RG)).map(S -> StringUtils.isBlank(S) ? defName : S).filter(S -> !this.sample2discreteMedian.containsKey(S)).forEach(S -> this.sample2discreteMedian.put(S, new DiscreteMedian<Integer>()));
final CloseableIterator<SAMRecord> iter = openSamIterator(in);
while (iter.hasNext()) {
final SAMRecord rec = iter.next();
if (rec.getReadFailsVendorQualityCheckFlag())
continue;
if (rec.getDuplicateReadFlag())
continue;
if (rec.isSecondaryOrSupplementary())
continue;
if (!rec.getReadUnmappedFlag() && rec.getMappingQuality() < this.mapq)
continue;
final String sampleName = this.partition.getPartion(rec, defName);
DiscreteMedian<Integer> counter = this.sample2discreteMedian.get(sampleName);
if (counter == null) {
counter = new DiscreteMedian<>();
this.sample2discreteMedian.put(sampleName, counter);
}
final int len;
switch(this.method) {
case SEQ_LENGTH:
len = rec.getReadLength();
break;
case CIGAR_REF_LENGTH:
{
if (rec.getReadUnmappedFlag())
continue;
final Cigar c = rec.getCigar();
if (c == null || c.isEmpty())
continue;
len = c.getReferenceLength();
break;
}
case CIGAR_PADDED_REF_LENGTH:
{
if (rec.getReadUnmappedFlag())
continue;
final Cigar c = rec.getCigar();
if (c == null || c.isEmpty())
continue;
len = c.getPaddedReferenceLength();
break;
}
case INSERT_LENGTH:
{
if (rec.getReadUnmappedFlag())
continue;
if (!rec.getReadPairedFlag())
continue;
if (rec.getMateUnmappedFlag())
continue;
if (!rec.getContig().equals(rec.getMateReferenceName()))
continue;
// ignore 2nd
if (!rec.getFirstOfPairFlag())
continue;
len = Math.abs(rec.getInferredInsertSize());
break;
}
default:
throw new IllegalStateException("unsupported method " + this.method);
}
counter.add(len);
}
iter.close();
}
Aggregations