Search in sources :

Example 1 with VolumeImpl

use of org.apache.accumulo.core.volume.VolumeImpl in project accumulo by apache.

the class VolumeManagerImpl method getLocalForTesting.

// for testing only
public static VolumeManager getLocalForTesting(String localBasePath) throws IOException {
    AccumuloConfiguration accConf = DefaultConfiguration.getInstance();
    Configuration hadoopConf = new Configuration();
    FileSystem localFS = FileSystem.getLocal(hadoopConf);
    Volume defaultLocalVolume = new VolumeImpl(localFS, localBasePath);
    return new VolumeManagerImpl(Collections.singletonMap("", defaultLocalVolume), accConf, hadoopConf);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) VolumeConfiguration(org.apache.accumulo.core.volume.VolumeConfiguration) Volume(org.apache.accumulo.core.volume.Volume) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) VolumeImpl(org.apache.accumulo.core.volume.VolumeImpl) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 2 with VolumeImpl

use of org.apache.accumulo.core.volume.VolumeImpl in project accumulo by apache.

the class TabletServerSyncCheckTest method testFailureOnExplicitAppendFalseConf.

@Test
public void testFailureOnExplicitAppendFalseConf() {
    Configuration conf = new Configuration();
    conf.set(DFS_SUPPORT_APPEND, "false");
    FileSystem fs = new TestFileSystem(conf);
    assertThrows(RuntimeException.class, () -> new TestVolumeManagerImpl(Map.of("foo", new VolumeImpl(fs, "/"))));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) VolumeImpl(org.apache.accumulo.core.volume.VolumeImpl) Test(org.junit.Test)

Example 3 with VolumeImpl

use of org.apache.accumulo.core.volume.VolumeImpl in project accumulo by apache.

the class VolumeManagerImpl method get.

public static VolumeManager get(AccumuloConfiguration conf, final Configuration hadoopConf) throws IOException {
    final Map<String, Volume> volumes = new HashMap<>();
    Set<String> volumeStrings = VolumeConfiguration.getVolumeUris(conf);
    // The "default" Volume for Accumulo (in case no volumes are specified)
    for (String volumeUriOrDir : volumeStrings) {
        if (volumeUriOrDir.isBlank())
            throw new IllegalArgumentException("Empty volume specified in configuration");
        if (volumeUriOrDir.startsWith("viewfs"))
            throw new IllegalArgumentException("Cannot use viewfs as a volume");
        // We require a URI here, fail if it doesn't look like one
        if (volumeUriOrDir.contains(":")) {
            volumes.put(volumeUriOrDir, new VolumeImpl(new Path(volumeUriOrDir), hadoopConf));
        } else {
            throw new IllegalArgumentException("Expected fully qualified URI for " + Property.INSTANCE_VOLUMES.getKey() + " got " + volumeUriOrDir);
        }
    }
    return new VolumeManagerImpl(volumes, conf, hadoopConf);
}
Also used : Path(org.apache.hadoop.fs.Path) Volume(org.apache.accumulo.core.volume.Volume) HashMap(java.util.HashMap) VolumeImpl(org.apache.accumulo.core.volume.VolumeImpl)

Example 4 with VolumeImpl

use of org.apache.accumulo.core.volume.VolumeImpl in project accumulo by apache.

the class Upgrader9to10Test method testDropSortedMapWALs.

@Test
public void testDropSortedMapWALs() throws IOException {
    Configuration hadoopConf = new Configuration();
    ConfigurationCopy conf = new ConfigurationCopy();
    FileSystem fs = new Path("file:///").getFileSystem(hadoopConf);
    List<String> volumes = Arrays.asList("/vol1/", "/vol2/");
    Collection<Volume> vols = volumes.stream().map(s -> new VolumeImpl(fs, s)).collect(Collectors.toList());
    Set<String> fullyQualifiedVols = Set.of("file://vol1/", "file://vol2/");
    Set<String> recoveryDirs = Set.of("file://vol1/accumulo/recovery", "file://vol2/accumulo/recovery");
    conf.set(Property.INSTANCE_VOLUMES, String.join(",", fullyQualifiedVols));
    ServerContext context = createMock(ServerContext.class);
    Path recoveryDir1 = new Path("file://vol1/accumulo/recovery");
    Path recoveryDir2 = new Path("file://vol2/accumulo/recovery");
    VolumeManager volumeManager = createMock(VolumeManager.class);
    FileStatus[] dirs = new FileStatus[2];
    dirs[0] = createMock(FileStatus.class);
    Path dir0 = new Path("file://vol1/accumulo/recovery/A123456789");
    FileStatus[] dir0Files = new FileStatus[1];
    dir0Files[0] = createMock(FileStatus.class);
    dirs[1] = createMock(FileStatus.class);
    Path dir1 = new Path("file://vol1/accumulo/recovery/B123456789");
    FileStatus[] dir1Files = new FileStatus[1];
    dir1Files[0] = createMock(FileStatus.class);
    Path part1Dir = new Path("file://vol1/accumulo/recovery/B123456789/part-r-0000");
    expect(context.getVolumeManager()).andReturn(volumeManager).once();
    expect(context.getConfiguration()).andReturn(conf).once();
    expect(context.getHadoopConf()).andReturn(hadoopConf).once();
    expect(context.getRecoveryDirs()).andReturn(recoveryDirs).once();
    expect(volumeManager.getVolumes()).andReturn(vols).once();
    expect(volumeManager.exists(recoveryDir1)).andReturn(true).once();
    expect(volumeManager.exists(recoveryDir2)).andReturn(false).once();
    expect(volumeManager.listStatus(recoveryDir1)).andReturn(dirs).once();
    expect(dirs[0].getPath()).andReturn(dir0).once();
    expect(volumeManager.listStatus(dir0)).andReturn(dir0Files).once();
    expect(dir0Files[0].isDirectory()).andReturn(false).once();
    expect(dirs[1].getPath()).andReturn(dir1).once();
    expect(volumeManager.listStatus(dir1)).andReturn(dir1Files).once();
    expect(dir1Files[0].isDirectory()).andReturn(true).once();
    expect(dir1Files[0].getPath()).andReturn(part1Dir).once();
    expect(volumeManager.deleteRecursively(dir1)).andReturn(true).once();
    replay(context, volumeManager, dirs[0], dirs[1], dir0Files[0], dir1Files[0]);
    Upgrader9to10.dropSortedMapWALFiles(context);
}
Also used : Path(org.apache.hadoop.fs.Path) TableId(org.apache.accumulo.core.data.TableId) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) Assert.assertThrows(org.junit.Assert.assertThrows) LoggerFactory(org.slf4j.LoggerFactory) DataFileColumnFamily(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) ColumnUpdate(org.apache.accumulo.core.data.ColumnUpdate) Volume(org.apache.accumulo.core.volume.Volume) Key(org.apache.accumulo.core.data.Key) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) EasyMock.replay(org.easymock.EasyMock.replay) Value(org.apache.accumulo.core.data.Value) EasyMock.createMock(org.easymock.EasyMock.createMock) Property(org.apache.accumulo.core.conf.Property) EasyMock.anyObject(org.easymock.EasyMock.anyObject) GcVolumeUtil(org.apache.accumulo.server.gc.GcVolumeUtil) Ample(org.apache.accumulo.core.metadata.schema.Ample) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) ServerContext(org.apache.accumulo.server.ServerContext) Collection(java.util.Collection) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) Assert.assertTrue(org.junit.Assert.assertTrue) Set(java.util.Set) IOException(java.io.IOException) VolumeImpl(org.apache.accumulo.core.volume.VolumeImpl) Test(org.junit.Test) EasyMock.expect(org.easymock.EasyMock.expect) BULK_PREFIX(org.apache.accumulo.core.Constants.BULK_PREFIX) Authorizations(org.apache.accumulo.core.security.Authorizations) Collectors(java.util.stream.Collectors) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) List(java.util.List) TreeMap(java.util.TreeMap) Assert.assertFalse(org.junit.Assert.assertFalse) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Assert.assertEquals(org.junit.Assert.assertEquals) SortedMap(java.util.SortedMap) Scanner(org.apache.accumulo.core.client.Scanner) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) ServerContext(org.apache.accumulo.server.ServerContext) Volume(org.apache.accumulo.core.volume.Volume) FileSystem(org.apache.hadoop.fs.FileSystem) VolumeImpl(org.apache.accumulo.core.volume.VolumeImpl) Test(org.junit.Test)

Aggregations

VolumeImpl (org.apache.accumulo.core.volume.VolumeImpl)4 Volume (org.apache.accumulo.core.volume.Volume)3 Configuration (org.apache.hadoop.conf.Configuration)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 HashMap (java.util.HashMap)1 Iterator (java.util.Iterator)1 List (java.util.List)1 Set (java.util.Set)1 SortedMap (java.util.SortedMap)1 TreeMap (java.util.TreeMap)1 Collectors (java.util.stream.Collectors)1 BULK_PREFIX (org.apache.accumulo.core.Constants.BULK_PREFIX)1 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)1 BatchWriter (org.apache.accumulo.core.client.BatchWriter)1