Search in sources :

Example 1 with IndexAccumulation

use of org.nd4j.linalg.api.ops.IndexAccumulation in project nd4j by deeplearning4j.

the class OpExecutionerTests method testIMin.

@Test
public void testIMin() {
    INDArray arr = Nd4j.linspace(1, 10, 10);
    IMin imin = new IMin(arr);
    assertEquals(0, ((IndexAccumulation) Nd4j.getExecutioner().exec(imin)).getFinalResult());
    arr.muli(-1);
    imin = new IMin(arr);
    int minIdx = ((IndexAccumulation) Nd4j.getExecutioner().exec(imin)).getFinalResult();
    assertEquals(9, minIdx);
}
Also used : INDArray(org.nd4j.linalg.api.ndarray.INDArray) IMin(org.nd4j.linalg.api.ops.impl.indexaccum.IMin) IndexAccumulation(org.nd4j.linalg.api.ops.IndexAccumulation) BaseNd4jTest(org.nd4j.linalg.BaseNd4jTest) Test(org.junit.Test)

Example 2 with IndexAccumulation

use of org.nd4j.linalg.api.ops.IndexAccumulation in project nd4j by deeplearning4j.

the class CudaIndexReduceTests method testPinnedIMin.

@Test
public void testPinnedIMin() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());
    INDArray array1 = Nd4j.create(new float[] { 1.0f, 0.1f, 2.0f, 3.0f, 4.0f, 5.0f });
    int idx = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMin(array1))).getFinalResult();
    System.out.println("Array1: " + array1);
    assertEquals(1, idx);
}
Also used : INDArray(org.nd4j.linalg.api.ndarray.INDArray) IMin(org.nd4j.linalg.api.ops.impl.indexaccum.IMin) IndexAccumulation(org.nd4j.linalg.api.ops.IndexAccumulation) Test(org.junit.Test)

Example 3 with IndexAccumulation

use of org.nd4j.linalg.api.ops.IndexAccumulation in project nd4j by deeplearning4j.

the class CudaIndexReduceTests method testPinnedIMax.

@Test
public void testPinnedIMax() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());
    INDArray array1 = Nd4j.create(new float[] { 1.0f, 0.1f, 2.0f, 3.0f, 4.0f, 5.0f });
    int idx = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMax(array1))).getFinalResult();
    System.out.println("Array1: " + array1);
    assertEquals(5, idx);
}
Also used : INDArray(org.nd4j.linalg.api.ndarray.INDArray) IMax(org.nd4j.linalg.api.ops.impl.indexaccum.IMax) IndexAccumulation(org.nd4j.linalg.api.ops.IndexAccumulation) Test(org.junit.Test)

Example 4 with IndexAccumulation

use of org.nd4j.linalg.api.ops.IndexAccumulation in project nd4j by deeplearning4j.

the class CudaIndexReduceTests method testPinnedIMax3.

@Test
public void testPinnedIMax3() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());
    INDArray array1 = Nd4j.create(new float[] { 6.0f, 0.1f, 2.0f, 3.0f, 7.0f, 9.0f });
    int idx = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMax(array1))).getFinalResult();
    System.out.println("Array1: " + array1);
    assertEquals(5, idx);
}
Also used : INDArray(org.nd4j.linalg.api.ndarray.INDArray) IMax(org.nd4j.linalg.api.ops.impl.indexaccum.IMax) IndexAccumulation(org.nd4j.linalg.api.ops.IndexAccumulation) Test(org.junit.Test)

Example 5 with IndexAccumulation

use of org.nd4j.linalg.api.ops.IndexAccumulation in project deeplearning4j by deeplearning4j.

the class BagOfWordsVectorizerTest method testBagOfWordsVectorizer.

@Test
public void testBagOfWordsVectorizer() throws Exception {
    File rootDir = new ClassPathResource("rootdir").getFile();
    LabelAwareSentenceIterator iter = new LabelAwareFileSentenceIterator(rootDir);
    List<String> labels = Arrays.asList("label1", "label2");
    TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
    BagOfWordsVectorizer vectorizer = new BagOfWordsVectorizer.Builder().setMinWordFrequency(1).setStopWords(new ArrayList<String>()).setTokenizerFactory(tokenizerFactory).setIterator(iter).allowParallelTokenization(false).build();
    vectorizer.fit();
    VocabWord word = vectorizer.getVocabCache().wordFor("file.");
    assumeNotNull(word);
    assertEquals(word, vectorizer.getVocabCache().tokenFor("file."));
    assertEquals(2, vectorizer.getVocabCache().totalNumberOfDocs());
    assertEquals(2, word.getSequencesCount());
    assertEquals(2, word.getElementFrequency(), 0.1);
    VocabWord word1 = vectorizer.getVocabCache().wordFor("1");
    assertEquals(1, word1.getSequencesCount());
    assertEquals(1, word1.getElementFrequency(), 0.1);
    log.info("Labels used: " + vectorizer.getLabelsSource().getLabels());
    assertEquals(2, vectorizer.getLabelsSource().getNumberOfLabelsUsed());
    ///////////////////
    INDArray array = vectorizer.transform("This is 2 file.");
    log.info("Transformed array: " + array);
    assertEquals(5, array.columns());
    VocabCache<VocabWord> vocabCache = vectorizer.getVocabCache();
    assertEquals(2, array.getDouble(vocabCache.tokenFor("This").getIndex()), 0.1);
    assertEquals(2, array.getDouble(vocabCache.tokenFor("is").getIndex()), 0.1);
    assertEquals(2, array.getDouble(vocabCache.tokenFor("file.").getIndex()), 0.1);
    assertEquals(0, array.getDouble(vocabCache.tokenFor("1").getIndex()), 0.1);
    assertEquals(1, array.getDouble(vocabCache.tokenFor("2").getIndex()), 0.1);
    DataSet dataSet = vectorizer.vectorize("This is 2 file.", "label2");
    assertEquals(array, dataSet.getFeatureMatrix());
    INDArray labelz = dataSet.getLabels();
    log.info("Labels array: " + labelz);
    int idx2 = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMax(labelz))).getFinalResult();
    //        assertEquals(1.0, dataSet.getLabels().getDouble(0), 0.1);
    //        assertEquals(0.0, dataSet.getLabels().getDouble(1), 0.1);
    dataSet = vectorizer.vectorize("This is 1 file.", "label1");
    assertEquals(2, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("This").getIndex()), 0.1);
    assertEquals(2, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("is").getIndex()), 0.1);
    assertEquals(2, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("file.").getIndex()), 0.1);
    assertEquals(1, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("1").getIndex()), 0.1);
    assertEquals(0, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("2").getIndex()), 0.1);
    int idx1 = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMax(dataSet.getLabels()))).getFinalResult();
    //assertEquals(0.0, dataSet.getLabels().getDouble(0), 0.1);
    //assertEquals(1.0, dataSet.getLabels().getDouble(1), 0.1);
    assertNotEquals(idx2, idx1);
    // Serialization check
    File tempFile = File.createTempFile("fdsf", "fdfsdf");
    tempFile.deleteOnExit();
    SerializationUtils.saveObject(vectorizer, tempFile);
    BagOfWordsVectorizer vectorizer2 = SerializationUtils.readObject(tempFile);
    vectorizer2.setTokenizerFactory(tokenizerFactory);
    dataSet = vectorizer2.vectorize("This is 2 file.", "label2");
    assertEquals(array, dataSet.getFeatureMatrix());
}
Also used : TokenizerFactory(org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory) DefaultTokenizerFactory(org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory) DataSet(org.nd4j.linalg.dataset.DataSet) VocabWord(org.deeplearning4j.models.word2vec.VocabWord) ClassPathResource(org.datavec.api.util.ClassPathResource) LabelAwareFileSentenceIterator(org.deeplearning4j.text.sentenceiterator.labelaware.LabelAwareFileSentenceIterator) DefaultTokenizerFactory(org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory) INDArray(org.nd4j.linalg.api.ndarray.INDArray) IMax(org.nd4j.linalg.api.ops.impl.indexaccum.IMax) LabelAwareSentenceIterator(org.deeplearning4j.text.sentenceiterator.labelaware.LabelAwareSentenceIterator) File(java.io.File) IndexAccumulation(org.nd4j.linalg.api.ops.IndexAccumulation) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)14 INDArray (org.nd4j.linalg.api.ndarray.INDArray)14 IndexAccumulation (org.nd4j.linalg.api.ops.IndexAccumulation)14 IMax (org.nd4j.linalg.api.ops.impl.indexaccum.IMax)10 BaseNd4jTest (org.nd4j.linalg.BaseNd4jTest)4 IMin (org.nd4j.linalg.api.ops.impl.indexaccum.IMin)4 NDArrayIndex.point (org.nd4j.linalg.indexing.NDArrayIndex.point)2 File (java.io.File)1 ClassPathResource (org.datavec.api.util.ClassPathResource)1 VocabWord (org.deeplearning4j.models.word2vec.VocabWord)1 LabelAwareFileSentenceIterator (org.deeplearning4j.text.sentenceiterator.labelaware.LabelAwareFileSentenceIterator)1 LabelAwareSentenceIterator (org.deeplearning4j.text.sentenceiterator.labelaware.LabelAwareSentenceIterator)1 DefaultTokenizerFactory (org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory)1 TokenizerFactory (org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory)1 DataSet (org.nd4j.linalg.dataset.DataSet)1