Search in sources :

Example 26 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testBadVisIterable.

@Test(expected = IllegalArgumentException.class)
public void testBadVisIterable() throws Exception {
    // test append(iterable) method
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
        writer.startDefaultLocalityGroup();
        Key k1 = new Key("r1", "f1", "q1", "(A&(B");
        Entry<Key, Value> entry = new AbstractMap.SimpleEntry<>(k1, new Value("".getBytes()));
        writer.append(Collections.singletonList(entry));
    }
}
Also used : SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 27 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testWrongGroup.

@Test(expected = IllegalArgumentException.class)
public void testWrongGroup() throws Exception {
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
        writer.startNewLocalityGroup("lg1", "fam1");
        Key k1 = new Key("r1", "fam1", "q1");
        writer.append(k1, new Value("".getBytes()));
        writer.startDefaultLocalityGroup();
        // should not be able to append the column family fam1 to default locality group
        Key k2 = new Key("r1", "fam1", "q2");
        writer.append(k2, new Value("".getBytes()));
    }
}
Also used : SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 28 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testIllegalColumn.

@Test(expected = IllegalArgumentException.class)
public void testIllegalColumn() throws Exception {
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
        writer.startNewLocalityGroup("lg1", "fam1");
        Key k1 = new Key("r1", "f1", "q1");
        // should not be able to append the column family f1
        writer.append(k1, new Value("".getBytes()));
    }
}
Also used : SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 29 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testLocalityGroups.

@Test
public void testLocalityGroups() throws Exception {
    SortedMap<Key, Value> testData1 = createTestData(0, 10, 0, 2, 10);
    SortedMap<Key, Value> testData2 = createTestData(0, 10, 2, 1, 10);
    SortedMap<Key, Value> defaultData = createTestData(0, 10, 3, 7, 10);
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build();
    writer.startNewLocalityGroup("z", colStr(0), colStr(1));
    writer.append(testData1.entrySet());
    writer.startNewLocalityGroup("h", colStr(2));
    writer.append(testData2.entrySet());
    writer.startDefaultLocalityGroup();
    writer.append(defaultData.entrySet());
    writer.close();
    Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).build();
    scanner.fetchColumnFamily(new Text(colStr(0)));
    scanner.fetchColumnFamily(new Text(colStr(1)));
    Assert.assertEquals(testData1, toMap(scanner));
    scanner.clearColumns();
    scanner.fetchColumnFamily(new Text(colStr(2)));
    Assert.assertEquals(testData2, toMap(scanner));
    scanner.clearColumns();
    for (int i = 3; i < 10; i++) {
        scanner.fetchColumnFamily(new Text(colStr(i)));
    }
    Assert.assertEquals(defaultData, toMap(scanner));
    scanner.clearColumns();
    Assert.assertEquals(createTestData(10, 10, 10), toMap(scanner));
    scanner.close();
    Reader reader = getReader(localFs, testFile);
    Map<String, ArrayList<ByteSequence>> lGroups = reader.getLocalityGroupCF();
    Assert.assertTrue(lGroups.containsKey("z"));
    Assert.assertTrue(lGroups.get("z").size() == 2);
    Assert.assertTrue(lGroups.get("z").contains(new ArrayByteSequence(colStr(0))));
    Assert.assertTrue(lGroups.get("z").contains(new ArrayByteSequence(colStr(1))));
    Assert.assertTrue(lGroups.containsKey("h"));
    Assert.assertEquals(Arrays.asList(new ArrayByteSequence(colStr(2))), lGroups.get("h"));
    reader.close();
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) ArrayList(java.util.ArrayList) Reader(org.apache.accumulo.core.file.rfile.RFile.Reader) Text(org.apache.hadoop.io.Text) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) ArrayByteSequence(org.apache.accumulo.core.data.ArrayByteSequence) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 30 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.

the class RFileTest method testMultipleFilesAndCache.

@Test
public void testMultipleFilesAndCache() throws Exception {
    SortedMap<Key, Value> testData = createTestData(100, 10, 10);
    List<String> files = Arrays.asList(createTmpTestFile(), createTmpTestFile(), createTmpTestFile());
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    for (int i = 0; i < files.size(); i++) {
        try (RFileWriter writer = RFile.newWriter().to(files.get(i)).withFileSystem(localFs).build()) {
            for (Entry<Key, Value> entry : testData.entrySet()) {
                if (entry.getKey().hashCode() % files.size() == i) {
                    writer.append(entry.getKey(), entry.getValue());
                }
            }
        }
    }
    Scanner scanner = RFile.newScanner().from(files.toArray(new String[files.size()])).withFileSystem(localFs).withIndexCache(1000000).withDataCache(10000000).build();
    Assert.assertEquals(testData, toMap(scanner));
    scanner.close();
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

LocalFileSystem (org.apache.hadoop.fs.LocalFileSystem)121 Path (org.apache.hadoop.fs.Path)77 Test (org.junit.Test)64 Configuration (org.apache.hadoop.conf.Configuration)57 FileSystem (org.apache.hadoop.fs.FileSystem)35 IOException (java.io.IOException)33 File (java.io.File)23 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)23 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)23 SummarizerConfiguration (org.apache.accumulo.core.client.summary.SummarizerConfiguration)23 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)23 Key (org.apache.accumulo.core.data.Key)22 Value (org.apache.accumulo.core.data.Value)22 ArrayList (java.util.ArrayList)19 ExecutorService (java.util.concurrent.ExecutorService)15 Future (java.util.concurrent.Future)15 Scanner (org.apache.accumulo.core.client.Scanner)14 DataSegment (org.apache.druid.timeline.DataSegment)13 DataSegmentPusher (org.apache.druid.segment.loading.DataSegmentPusher)8 HdfsDataSegmentPusher (org.apache.druid.storage.hdfs.HdfsDataSegmentPusher)8