Search in sources :

Example 21 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testOutOfOrder.

@Test(expected = IllegalArgumentException.class)
public void testOutOfOrder() throws Exception {
    // test that exception declared in API is thrown
    Key k1 = new Key("r1", "f1", "q1");
    Value v1 = new Value("1".getBytes());
    Key k2 = new Key("r2", "f1", "q1");
    Value v2 = new Value("2".getBytes());
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
        writer.append(k2, v2);
        writer.append(k1, v1);
    }
}
Also used : SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 22 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testDoubleStart.

@Test(expected = IllegalStateException.class)
public void testDoubleStart() throws Exception {
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
        writer.startDefaultLocalityGroup();
        writer.startDefaultLocalityGroup();
    }
}
Also used : SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Test(org.junit.Test)

Example 23 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testMultipleSources.

@Test
public void testMultipleSources() throws Exception {
    SortedMap<Key, Value> testData1 = createTestData(10, 10, 10);
    SortedMap<Key, Value> testData2 = createTestData(0, 10, 0, 10, 10);
    String testFile1 = createRFile(testData1);
    String testFile2 = createRFile(testData2);
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    Scanner scanner = RFile.newScanner().from(testFile1, testFile2).withFileSystem(localFs).build();
    TreeMap<Key, Value> expected = new TreeMap<>(testData1);
    expected.putAll(testData2);
    Assert.assertEquals(expected, toMap(scanner));
    Range range = new Range(rowStr(3), true, rowStr(14), true);
    scanner.setRange(range);
    Assert.assertEquals(expected.subMap(range.getStartKey(), range.getEndKey()), toMap(scanner));
    scanner.close();
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) TreeMap(java.util.TreeMap) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 24 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testIndependance.

@Test
public void testIndependance() throws Exception {
    // test to ensure two iterators allocated from same RFile scanner are independent.
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    SortedMap<Key, Value> testData = createTestData(10, 10, 10);
    String testFile = createRFile(testData);
    Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).build();
    Range range1 = Range.exact(rowStr(5));
    scanner.setRange(range1);
    Iterator<Entry<Key, Value>> scnIter1 = scanner.iterator();
    Iterator<Entry<Key, Value>> mapIter1 = testData.subMap(range1.getStartKey(), range1.getEndKey()).entrySet().iterator();
    Range range2 = new Range(rowStr(3), true, rowStr(4), true);
    scanner.setRange(range2);
    Iterator<Entry<Key, Value>> scnIter2 = scanner.iterator();
    Iterator<Entry<Key, Value>> mapIter2 = testData.subMap(range2.getStartKey(), range2.getEndKey()).entrySet().iterator();
    while (scnIter1.hasNext() || scnIter2.hasNext()) {
        if (scnIter1.hasNext()) {
            Assert.assertTrue(mapIter1.hasNext());
            Assert.assertEquals(scnIter1.next(), mapIter1.next());
        } else {
            Assert.assertFalse(mapIter1.hasNext());
        }
        if (scnIter2.hasNext()) {
            Assert.assertTrue(mapIter2.hasNext());
            Assert.assertEquals(scnIter2.next(), mapIter2.next());
        } else {
            Assert.assertFalse(mapIter2.hasNext());
        }
    }
    Assert.assertFalse(mapIter1.hasNext());
    Assert.assertFalse(mapIter2.hasNext());
    scanner.close();
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Entry(java.util.Map.Entry) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 25 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testWriterTableProperties.

@Test
public void testWriterTableProperties() throws Exception {
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    Map<String, String> props = new HashMap<>();
    props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
    props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "1K");
    RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).withTableProperties(props).build();
    SortedMap<Key, Value> testData1 = createTestData(10, 10, 10);
    writer.append(testData1.entrySet());
    writer.close();
    Reader reader = getReader(localFs, testFile);
    FileSKVIterator iiter = reader.getIndex();
    int count = 0;
    while (iiter.hasTop()) {
        count++;
        iiter.next();
    }
    // if settings are used then should create multiple index entries
    Assert.assertTrue(count > 10);
    reader.close();
    Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).build();
    Assert.assertEquals(testData1, toMap(scanner));
    scanner.close();
}
Also used : FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) Scanner(org.apache.accumulo.core.client.Scanner) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) HashMap(java.util.HashMap) Reader(org.apache.accumulo.core.file.rfile.RFile.Reader) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

LocalFileSystem (org.apache.hadoop.fs.LocalFileSystem)121 Path (org.apache.hadoop.fs.Path)77 Test (org.junit.Test)64 Configuration (org.apache.hadoop.conf.Configuration)57 FileSystem (org.apache.hadoop.fs.FileSystem)35 IOException (java.io.IOException)33 File (java.io.File)23 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)23 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)23 SummarizerConfiguration (org.apache.accumulo.core.client.summary.SummarizerConfiguration)23 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)23 Key (org.apache.accumulo.core.data.Key)22 Value (org.apache.accumulo.core.data.Value)22 ArrayList (java.util.ArrayList)19 ExecutorService (java.util.concurrent.ExecutorService)15 Future (java.util.concurrent.Future)15 Scanner (org.apache.accumulo.core.client.Scanner)14 DataSegment (org.apache.druid.timeline.DataSegment)13 DataSegmentPusher (org.apache.druid.segment.loading.DataSegmentPusher)8 HdfsDataSegmentPusher (org.apache.druid.storage.hdfs.HdfsDataSegmentPusher)8