Search in sources :

Example 6 with StructureDataInterface

use of org.rcsb.mmtf.api.StructureDataInterface in project mm-dev by sbl-sdsc.

the class MergeMmtf method MergeStructures.

public static StructureDataInterface MergeStructures(String structureId, StructureDataInterface... structures) {
    for (StructureDataInterface s : structures) {
        if (s.getNumModels() != 1) {
            throw new IllegalArgumentException("ERROR: Cannot merge structures with more than one model");
        }
    }
    AdapterToStructureData complex = new AdapterToStructureData();
    initStructure(structureId, structures, complex);
    addEntityInfo(structures, complex);
    for (StructureDataInterface structure : structures) {
        addStructure(structure, complex);
    }
    complex.finalizeStructure();
    return complex;
}
Also used : AdapterToStructureData(org.rcsb.mmtf.encoder.AdapterToStructureData) StructureDataInterface(org.rcsb.mmtf.api.StructureDataInterface)

Example 7 with StructureDataInterface

use of org.rcsb.mmtf.api.StructureDataInterface in project mm-dev by sbl-sdsc.

the class D3RLigandProteinMerger method main.

public static void main(String[] args) throws IOException {
    long start = System.nanoTime();
    // instantiate Spark
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("D3RLigandProteinMerger");
    JavaSparkContext sc = new JavaSparkContext(conf);
    // String path = "/Users/peter/Downloads/Pose_prediction/417-1-hciq4/";
    String path = "/Users/peter/Downloads/Pose_prediction/";
    JavaPairRDD<String, StructureDataInterface> ligands = Molmporter.importMolFiles(path, sc);
    ligands = ligands.mapToPair(t -> new Tuple2<String, StructureDataInterface>(removeExtension(t._1), t._2));
    JavaPairRDD<String, StructureDataInterface> proteins = MmtfImporter.importPdbFiles(path, sc);
    proteins = proteins.mapToPair(t -> new Tuple2<String, StructureDataInterface>(removeExtension(t._1), t._2));
    JavaPairRDD<String, Tuple2<StructureDataInterface, StructureDataInterface>> pairs = proteins.join(ligands);
    JavaPairRDD<String, StructureDataInterface> complexes = pairs.mapToPair(t -> new Tuple2<String, StructureDataInterface>(t._1, MergeMmtf.MergeStructures(t._1, t._2._1, t._2._2)));
    complexes.foreach(t -> TraverseStructureHierarchy.printChainInfo(t._2));
    // System.out.println("Complexes: " + complexes.count());
    // complexes.keys().foreach(k -> System.out.println(k));
    // TraverseStructureHierarchy.printChainInfo(complexes.first()._2);
    sc.close();
    long end = System.nanoTime();
    System.out.println("Time: " + (end - start) / 1E9 + " sec.");
}
Also used : MmtfImporter(edu.sdsc.mmtf.spark.io.MmtfImporter) Arrays(java.util.Arrays) SparkConf(org.apache.spark.SparkConf) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) Set(java.util.Set) IOException(java.io.IOException) Tuple2(scala.Tuple2) JavaPairRDD(org.apache.spark.api.java.JavaPairRDD) File(java.io.File) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) TraverseStructureHierarchy(edu.sdsc.mmtf.spark.io.demos.TraverseStructureHierarchy) List(java.util.List) MergeMmtf(edu.sdsc.mm.dev.io.MergeMmtf) Molmporter(edu.sdsc.mm.dev.io.Molmporter) StructureDataInterface(org.rcsb.mmtf.api.StructureDataInterface) SynchronizedSortedBag(org.apache.commons.collections.bag.SynchronizedSortedBag) Path(java.nio.file.Path) Files(org.spark_project.guava.io.Files) JavaRDD(org.apache.spark.api.java.JavaRDD) Tuple2(scala.Tuple2) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) StructureDataInterface(org.rcsb.mmtf.api.StructureDataInterface) SparkConf(org.apache.spark.SparkConf)

Example 8 with StructureDataInterface

use of org.rcsb.mmtf.api.StructureDataInterface in project mm-dev by sbl-sdsc.

the class DownloadSwissModelFiles method main.

/**
 * Converts a directory containing Rosetta-style PDB files into an MMTF-Hadoop Sequence file.
 * The input directory is traversed recursively to find PDB files.
 *
 * <p> Example files from Gremlin website:
 * https://gremlin2.bakerlab.org/meta/aah4043_final.zip
 *
 * @param args args[0] <path-to-pdb_files>, args[1] <path-to-mmtf-hadoop-file>
 *
 * @throws FileNotFoundException
 */
public static void main(String[] args) throws FileNotFoundException {
    // instantiate Spark
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("DownloadSwissProtModelFiles");
    JavaSparkContext sc = new JavaSparkContext(conf);
    List<String> uniProtIds = Arrays.asList("P22629", "Q9H2C2", "Q8WXK3");
    // List<String> uniProtIds = Arrays.asList("P07900");
    // read PDB files recursively starting the specified directory
    // TODO: Empty structure record for Q8WXK3
    JavaPairRDD<String, StructureDataInterface> structures = MmtfImporter.downloadSwissModelsByUniProtIds(uniProtIds, sc);
    structures.foreach(t -> TraverseStructureHierarchy.printStructureData(t._2));
    // save as an MMTF-Hadoop Sequence File
    // MmtfWriter.writeSequenceFile(mmtfPath, sc, structures);
    // close Spark
    sc.close();
}
Also used : JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) StructureDataInterface(org.rcsb.mmtf.api.StructureDataInterface) SparkConf(org.apache.spark.SparkConf)

Example 9 with StructureDataInterface

use of org.rcsb.mmtf.api.StructureDataInterface in project mm-dev by sbl-sdsc.

the class PdbToMmtfFull method main.

/**
 * Converts a directory containing Rosetta-style PDB files into an MMTF-Hadoop Sequence file.
 * The input directory is traversed recursively to find PDB files.
 *
 * <p> Example files from Gremlin website:
 * https://gremlin2.bakerlab.org/meta/aah4043_final.zip
 *
 * @param args args[0] <path-to-pdb_files>, args[1] <path-to-mmtf-hadoop-file>
 *
 * @throws FileNotFoundException
 */
public static void main(String[] args) throws FileNotFoundException {
    if (args.length != 2) {
        System.out.println("Usage: RosettaToMmtfFull <path-to-pdb_files> <path-to-mmtf-hadoop-file>");
    }
    // path to input directory
    String pdbPath = args[0];
    // path to output directory
    String mmtfPath = args[1];
    // instantiate Spark
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("RosettaToMmtfFull");
    JavaSparkContext sc = new JavaSparkContext(conf);
    // read PDB files recursively starting the specified directory
    JavaPairRDD<String, StructureDataInterface> structures = MmtfImporter.importPdbFiles(pdbPath, sc);
    structures.foreach(t -> TraverseStructureHierarchy.printStructureData(t._2));
    // save as an MMTF-Hadoop Sequence File
    MmtfWriter.writeSequenceFile(mmtfPath, sc, structures);
    // close Spark
    sc.close();
}
Also used : JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) StructureDataInterface(org.rcsb.mmtf.api.StructureDataInterface) SparkConf(org.apache.spark.SparkConf)

Example 10 with StructureDataInterface

use of org.rcsb.mmtf.api.StructureDataInterface in project mm-dev by sbl-sdsc.

the class RosettaToMmtfFull method main.

/**
 * Converts a directory containing Rosetta-style PDB files into an MMTF-Hadoop Sequence file.
 * The input directory is traversed recursively to find PDB files.
 *
 * <p> Example files from Gremlin website:
 * https://gremlin2.bakerlab.org/meta/aah4043_final.zip
 *
 * @param args args[0] <path-to-pdb_files>, args[1] <path-to-mmtf-hadoop-file>
 *
 * @throws FileNotFoundException
 */
public static void main(String[] args) throws FileNotFoundException {
    if (args.length != 2) {
        System.out.println("Usage: RosettaToMmtfFull <path-to-pdb_files> <path-to-mmtf-hadoop-file>");
    }
    // path to input directory
    String pdbPath = args[0];
    // path to output directory
    String mmtfPath = args[1];
    // instantiate Spark
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("RosettaToMmtfFull");
    JavaSparkContext sc = new JavaSparkContext(conf);
    // read PDB files recursively starting the specified directory
    JavaPairRDD<String, StructureDataInterface> structures = MmtfImporter.importPdbFiles(pdbPath, sc);
    // save as an MMTF-Hadoop Sequence File
    MmtfWriter.writeSequenceFile(mmtfPath, sc, structures);
    // close Spark
    sc.close();
}
Also used : JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) StructureDataInterface(org.rcsb.mmtf.api.StructureDataInterface) SparkConf(org.apache.spark.SparkConf)

Aggregations

StructureDataInterface (org.rcsb.mmtf.api.StructureDataInterface)102 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)60 SparkConf (org.apache.spark.SparkConf)58 Row (org.apache.spark.sql.Row)27 StructureToPolymerChains (edu.sdsc.mmtf.spark.mappers.StructureToPolymerChains)22 Test (org.junit.Test)20 Pisces (edu.sdsc.mmtf.spark.webfilters.Pisces)19 ArrayList (java.util.ArrayList)12 ProteinSequenceEncoder (edu.sdsc.mmtf.spark.ml.ProteinSequenceEncoder)10 ColumnarStructure (edu.sdsc.mmtf.spark.utils.ColumnarStructure)10 Tuple2 (scala.Tuple2)9 Path (java.nio.file.Path)7 HashSet (java.util.HashSet)7 AdapterToStructureData (org.rcsb.mmtf.encoder.AdapterToStructureData)7 JavaPairRDD (org.apache.spark.api.java.JavaPairRDD)6 ContainsLProteinChain (edu.sdsc.mmtf.spark.filters.ContainsLProteinChain)5 List (java.util.List)5 Resolution (edu.sdsc.mmtf.spark.filters.Resolution)4 MmtfReader (edu.sdsc.mmtf.spark.io.MmtfReader)4 File (java.io.File)4