Search in sources :

Example 1 with Property

use of org.apache.accumulo.core.conf.Property in project accumulo by apache.

the class IteratorUtil method parseIterConf.

public static void parseIterConf(IteratorScope scope, List<IterInfo> iters, Map<String, Map<String, String>> allOptions, AccumuloConfiguration conf) {
    final Property scopeProperty = getProperty(scope);
    final String scopePropertyKey = scopeProperty.getKey();
    for (Entry<String, String> entry : conf.getAllPropertiesWithPrefix(scopeProperty).entrySet()) {
        String suffix = entry.getKey().substring(scopePropertyKey.length());
        String[] suffixSplit = suffix.split("\\.", 3);
        if (suffixSplit.length == 1) {
            String[] sa = entry.getValue().split(",");
            int prio = Integer.parseInt(sa[0]);
            String className = sa[1];
            iters.add(new IterInfo(prio, className, suffixSplit[0]));
        } else if (suffixSplit.length == 3 && suffixSplit[1].equals("opt")) {
            String iterName = suffixSplit[0];
            String optName = suffixSplit[2];
            Map<String, String> options = allOptions.get(iterName);
            if (options == null) {
                options = new HashMap<>();
                allOptions.put(iterName, options);
            }
            options.put(optName, entry.getValue());
        } else {
            throw new IllegalArgumentException("Invalid iterator format: " + entry.getKey());
        }
    }
    Collections.sort(iters, new IterInfoComparator());
}
Also used : HashMap(java.util.HashMap) Property(org.apache.accumulo.core.conf.Property) IterInfo(org.apache.accumulo.core.data.thrift.IterInfo) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) DefaultKeySizeConstraint(org.apache.accumulo.core.constraints.DefaultKeySizeConstraint)

Example 2 with Property

use of org.apache.accumulo.core.conf.Property in project accumulo by apache.

the class Accumulo method init.

public static void init(VolumeManager fs, Instance instance, ServerConfigurationFactory serverConfig, String application) throws IOException {
    final AccumuloConfiguration conf = serverConfig.getSystemConfiguration();
    log.info("{} starting", application);
    log.info("Instance {}", instance.getInstanceID());
    int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
    log.info("Data Version {}", dataVersion);
    Accumulo.waitForZookeeperAndHdfs(fs);
    if (!(canUpgradeFromDataVersion(dataVersion))) {
        throw new RuntimeException("This version of accumulo (" + Constants.VERSION + ") is not compatible with files stored using data version " + dataVersion);
    }
    TreeMap<String, String> sortedProps = new TreeMap<>();
    for (Entry<String, String> entry : conf) sortedProps.put(entry.getKey(), entry.getValue());
    for (Entry<String, String> entry : sortedProps.entrySet()) {
        String key = entry.getKey();
        log.info("{} = {}", key, (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
    }
    monitorSwappiness(conf);
    // Encourage users to configure TLS
    final String SSL = "SSL";
    for (Property sslProtocolProperty : Arrays.asList(Property.RPC_SSL_CLIENT_PROTOCOL, Property.RPC_SSL_ENABLED_PROTOCOLS, Property.MONITOR_SSL_INCLUDE_PROTOCOLS)) {
        String value = conf.get(sslProtocolProperty);
        if (value.contains(SSL)) {
            log.warn("It is recommended that {} only allow TLS", sslProtocolProperty);
        }
    }
}
Also used : TreeMap(java.util.TreeMap) Property(org.apache.accumulo.core.conf.Property) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 3 with Property

use of org.apache.accumulo.core.conf.Property in project accumulo by apache.

the class FileUtilTest method testCleanupIndexOpWithoutCommonParentVolumeWithDepth.

@Test
public void testCleanupIndexOpWithoutCommonParentVolumeWithDepth() throws IOException {
    // Make some directories to simulate multiple volumes
    File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
    assertTrue(v1.mkdirs() || v1.isDirectory());
    assertTrue(v2.mkdirs() || v2.isDirectory());
    // And a "unique" tmp directory for each volume
    // Make sure we can handle nested directories (a single tmpdir with potentially multiple unique dirs)
    File tmp1 = new File(new File(v1, "tmp"), "tmp_1"), tmp2 = new File(new File(v2, "tmp"), "tmp_1");
    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
    HashMap<Property, String> testProps = new HashMap<>();
    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
    VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
    FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
    FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) HashMap(java.util.HashMap) File(java.io.File) Property(org.apache.accumulo.core.conf.Property) Test(org.junit.Test)

Example 4 with Property

use of org.apache.accumulo.core.conf.Property in project accumulo by apache.

the class FileUtilTest method testCleanupIndexOpWithCommonParentVolumeWithDepth.

@Test
public void testCleanupIndexOpWithCommonParentVolumeWithDepth() throws IOException {
    File volumeDir = new File(accumuloDir, "volumes");
    assertTrue(volumeDir.mkdirs() || volumeDir.isDirectory());
    // Make some directories to simulate multiple volumes
    File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
    assertTrue(v1.mkdirs() || v1.isDirectory());
    assertTrue(v2.mkdirs() || v2.isDirectory());
    // And a "unique" tmp directory for each volume
    // Make sure we can handle nested directories (a single tmpdir with potentially multiple unique dirs)
    File tmp1 = new File(new File(v1, "tmp"), "tmp_1"), tmp2 = new File(new File(v2, "tmp"), "tmp_1");
    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
    HashMap<Property, String> testProps = new HashMap<>();
    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
    VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
    FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
    FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) HashMap(java.util.HashMap) File(java.io.File) Property(org.apache.accumulo.core.conf.Property) Test(org.junit.Test)

Example 5 with Property

use of org.apache.accumulo.core.conf.Property in project accumulo by apache.

the class FileUtilTest method testCleanupIndexOpWithoutCommonParentVolume.

@Test
public void testCleanupIndexOpWithoutCommonParentVolume() throws IOException {
    // Make some directories to simulate multiple volumes
    File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
    assertTrue(v1.mkdirs() || v1.isDirectory());
    assertTrue(v2.mkdirs() || v2.isDirectory());
    // And a "unique" tmp directory for each volume
    File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
    HashMap<Property, String> testProps = new HashMap<>();
    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
    VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
    FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
    FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) HashMap(java.util.HashMap) File(java.io.File) Property(org.apache.accumulo.core.conf.Property) Test(org.junit.Test)

Aggregations

Property (org.apache.accumulo.core.conf.Property)40 Test (org.junit.Test)19 HashMap (java.util.HashMap)11 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)11 File (java.io.File)8 Path (org.apache.hadoop.fs.Path)7 IOException (java.io.IOException)6 Map (java.util.Map)6 Predicate (java.util.function.Predicate)5 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)5 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)5 AccumuloException (org.apache.accumulo.core.client.AccumuloException)4 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)4 TableConfiguration (org.apache.accumulo.server.conf.TableConfiguration)4 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)3 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)3 AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)3 NamespaceConfiguration (org.apache.accumulo.server.conf.NamespaceConfiguration)3 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)3 ArrayList (java.util.ArrayList)2