Search in sources :

Example 86 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class TServerUtils method startServer.

/**
 * Start a server, at the given port, or higher, if that port is not available.
 *
 * @param context
 *          RPC configuration
 * @param portHintProperty
 *          the port to attempt to open, can be zero, meaning "any available port"
 * @param processor
 *          the service to be started
 * @param serverName
 *          the name of the class that is providing the service
 * @param threadName
 *          name this service's thread for better debugging
 * @param portSearchProperty
 *          A boolean Property to control if port-search should be used, or null to disable
 * @param minThreadProperty
 *          A Property to control the minimum number of threads in the pool
 * @param timeBetweenThreadChecksProperty
 *          A Property to control the amount of time between checks to resize the thread pool
 * @param maxMessageSizeProperty
 *          A Property to control the maximum Thrift message size accepted
 * @return the server object created, and the port actually used
 * @throws UnknownHostException
 *           when we don't know our own address
 */
public static ServerAddress startServer(ServerContext context, String hostname, Property portHintProperty, TProcessor processor, String serverName, String threadName, Property portSearchProperty, Property minThreadProperty, Property threadTimeOutProperty, Property timeBetweenThreadChecksProperty, Property maxMessageSizeProperty) throws UnknownHostException {
    final AccumuloConfiguration config = context.getConfiguration();
    final IntStream portHint = config.getPortStream(portHintProperty);
    int minThreads = 2;
    if (minThreadProperty != null) {
        minThreads = config.getCount(minThreadProperty);
    }
    long threadTimeOut = ThreadPools.DEFAULT_TIMEOUT_MILLISECS;
    if (threadTimeOutProperty != null) {
        threadTimeOut = config.getTimeInMillis(threadTimeOutProperty);
    }
    long timeBetweenThreadChecks = 1000;
    if (timeBetweenThreadChecksProperty != null) {
        timeBetweenThreadChecks = config.getTimeInMillis(timeBetweenThreadChecksProperty);
    }
    long maxMessageSize = 10_000_000;
    if (maxMessageSizeProperty != null) {
        maxMessageSize = config.getAsBytes(maxMessageSizeProperty);
    }
    boolean portSearch = false;
    if (portSearchProperty != null) {
        portSearch = config.getBoolean(portSearchProperty);
    }
    final ThriftServerType serverType = context.getThriftServerType();
    if (serverType == ThriftServerType.SASL) {
        processor = updateSaslProcessor(serverType, processor);
    }
    // create the TimedProcessor outside the port search loop so we don't try to
    // register the same
    // metrics mbean more than once
    TimedProcessor timedProcessor = new TimedProcessor(config, processor, serverName, threadName);
    HostAndPort[] addresses = getHostAndPorts(hostname, portHint);
    try {
        return TServerUtils.startTServer(serverType, timedProcessor, serverName, threadName, minThreads, threadTimeOut, config, timeBetweenThreadChecks, maxMessageSize, context.getServerSslParams(), context.getSaslParams(), context.getClientTimeoutInMillis(), addresses);
    } catch (TTransportException e) {
        if (portSearch) {
            // Build a list of reserved ports - as identified by properties of type PropertyType.PORT
            Map<Integer, Property> reservedPorts = getReservedPorts(config, portHintProperty);
            HostAndPort last = addresses[addresses.length - 1];
            // Search sequentially over the next 1000 ports
            for (int port = last.getPort() + 1; port < last.getPort() + 1001; port++) {
                if (reservedPorts.containsKey(port)) {
                    log.debug("During port search, skipping reserved port {} - property {} ({})", port, reservedPorts.get(port).getKey(), reservedPorts.get(port).getDescription());
                    continue;
                }
                if (PortRange.VALID_RANGE.isBefore(port)) {
                    break;
                }
                try {
                    HostAndPort addr = HostAndPort.fromParts(hostname, port);
                    return TServerUtils.startTServer(serverType, timedProcessor, serverName, threadName, minThreads, threadTimeOut, config, timeBetweenThreadChecks, maxMessageSize, context.getServerSslParams(), context.getSaslParams(), context.getClientTimeoutInMillis(), addr);
                } catch (TTransportException tte) {
                    log.info("Unable to use port {}, retrying. (Thread Name = {})", port, threadName);
                }
            }
            log.error("Unable to start TServer", e);
            throw new UnknownHostException("Unable to find a listen port");
        } else {
            log.error("Unable to start TServer", e);
            throw new UnknownHostException("Unable to find a listen port");
        }
    }
}
Also used : HostAndPort(org.apache.accumulo.core.util.HostAndPort) UnknownHostException(java.net.UnknownHostException) TTransportException(org.apache.thrift.transport.TTransportException) IntStream(java.util.stream.IntStream) Map(java.util.Map) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 87 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class InMemoryMap method delete.

public void delete(long waitTime) {
    synchronized (this) {
        if (deleted)
            throw new IllegalStateException("Double delete");
        deleted = true;
    }
    long t1 = System.currentTimeMillis();
    while (!activeIters.isEmpty() && System.currentTimeMillis() - t1 < waitTime) {
        sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
    }
    if (!activeIters.isEmpty()) {
        // dump memmap exactly as is to a tmp file on disk, and switch scans to that temp file
        try {
            Configuration conf = context.getHadoopConf();
            FileSystem fs = FileSystem.getLocal(conf);
            String tmpFile = memDumpDir + "/memDump" + UUID.randomUUID() + "." + RFile.EXTENSION;
            Configuration newConf = new Configuration(conf);
            newConf.setInt("io.seqfile.compress.blocksize", 100000);
            AccumuloConfiguration aconf = context.getConfiguration();
            if (getOrCreateSampler() != null) {
                aconf = createSampleConfig(aconf);
            }
            FileSKVWriter out = new RFileOperations().newWriterBuilder().forFile(tmpFile, fs, newConf, context.getCryptoService()).withTableConfiguration(aconf).build();
            InterruptibleIterator iter = map.skvIterator(null);
            HashSet<ByteSequence> allfams = new HashSet<>();
            for (Entry<String, Set<ByteSequence>> entry : lggroups.entrySet()) {
                allfams.addAll(entry.getValue());
                out.startNewLocalityGroup(entry.getKey(), entry.getValue());
                iter.seek(new Range(), entry.getValue(), true);
                dumpLocalityGroup(out, iter);
            }
            out.startDefaultLocalityGroup();
            iter.seek(new Range(), allfams, false);
            dumpLocalityGroup(out, iter);
            out.close();
            log.debug("Created mem dump file {}", tmpFile);
            memDumpFile = tmpFile;
            synchronized (activeIters) {
                for (MemoryIterator mi : activeIters) {
                    mi.switchNow();
                }
            }
            // rely on unix behavior that file will be deleted when last
            // reader closes it
            fs.delete(new Path(memDumpFile), true);
        } catch (IOException ioe) {
            log.error("Failed to create mem dump file", ioe);
            while (!activeIters.isEmpty()) {
                sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
            }
        }
    }
    SimpleMap tmpMap = map;
    synchronized (this) {
        map = null;
    }
    tmpMap.delete();
}
Also used : Path(org.apache.hadoop.fs.Path) Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) FileSKVWriter(org.apache.accumulo.core.file.FileSKVWriter) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) InterruptibleIterator(org.apache.accumulo.core.iteratorsImpl.system.InterruptibleIterator) RFileOperations(org.apache.accumulo.core.file.rfile.RFileOperations) FileSystem(org.apache.hadoop.fs.FileSystem) ByteSequence(org.apache.accumulo.core.data.ByteSequence) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) HashSet(java.util.HashSet)

Example 88 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class InMemoryMap method createSampleConfig.

private AccumuloConfiguration createSampleConfig(AccumuloConfiguration siteConf) {
    ConfigurationCopy confCopy = new ConfigurationCopy(Iterables.filter(siteConf, input -> !input.getKey().startsWith(Property.TABLE_SAMPLER.getKey())));
    for (Entry<String, String> entry : samplerRef.get().getFirst().toTablePropertiesMap().entrySet()) {
        confCopy.set(entry.getKey(), entry.getValue());
    }
    siteConf = confCopy;
    return siteConf;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ByteSequence(org.apache.accumulo.core.data.ByteSequence) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) Sampler(org.apache.accumulo.core.client.sample.Sampler) Partitioner(org.apache.accumulo.core.util.LocalityGroupUtil.Partitioner) Mutation(org.apache.accumulo.core.data.Mutation) LocalityGroupUtil(org.apache.accumulo.core.util.LocalityGroupUtil) DataSource(org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator.DataSource) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) EmptyIterator(org.apache.accumulo.core.iteratorsImpl.system.EmptyIterator) MutableLong(org.apache.commons.lang3.mutable.MutableLong) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) Value(org.apache.accumulo.core.data.Value) Property(org.apache.accumulo.core.conf.Property) FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) Set(java.util.Set) UUID(java.util.UUID) SourceSwitchingIterator(org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator) List(java.util.List) LocalityGroup(org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator.LocalityGroup) PreAllocatedArray(org.apache.accumulo.core.util.PreAllocatedArray) Pair(org.apache.accumulo.core.util.Pair) Entry(java.util.Map.Entry) SortedMapIterator(org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) UtilWaitThread.sleepUninterruptibly(org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly) RFileOperations(org.apache.accumulo.core.file.rfile.RFileOperations) SamplerFactory(org.apache.accumulo.core.sample.impl.SamplerFactory) WrappingIterator(org.apache.accumulo.core.iterators.WrappingIterator) Iterables(com.google.common.collect.Iterables) SamplerConfigurationImpl(org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ColumnUpdate(org.apache.accumulo.core.data.ColumnUpdate) FileSKVWriter(org.apache.accumulo.core.file.FileSKVWriter) LocalityGroupIterator(org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator) Key(org.apache.accumulo.core.data.Key) Logger(org.slf4j.Logger) ServerContext(org.apache.accumulo.server.ServerContext) IOException(java.io.IOException) SampleNotPresentException(org.apache.accumulo.core.client.SampleNotPresentException) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Range(org.apache.accumulo.core.data.Range) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) RFile(org.apache.accumulo.core.file.rfile.RFile) InterruptibleIterator(org.apache.accumulo.core.iteratorsImpl.system.InterruptibleIterator) Collections(java.util.Collections) IteratorEnvironment(org.apache.accumulo.core.iterators.IteratorEnvironment) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy)

Example 89 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class WebViews method addExternalResources.

/**
 * Get HTML for external CSS and JS resources from configuration. See ACCUMULO-4739
 *
 * @param model
 *          map of the MVC model
 */
private void addExternalResources(Map<String, Object> model) {
    AccumuloConfiguration conf = monitor.getContext().getConfiguration();
    String resourcesProperty = conf.get(Property.MONITOR_RESOURCES_EXTERNAL);
    if (resourcesProperty.isBlank()) {
        return;
    }
    List<String> monitorResources = new ArrayList<>();
    ObjectMapper objectMapper = new ObjectMapper();
    try {
        Collections.addAll(monitorResources, objectMapper.readValue(resourcesProperty, String[].class));
    } catch (IOException e) {
        log.error("Error Monitor Resources config property {}: {}", Property.MONITOR_RESOURCES_EXTERNAL, e);
        return;
    }
    if (!monitorResources.isEmpty()) {
        model.put("externalResources", monitorResources);
    }
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 90 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class AccumuloReplicaSystem method replicate.

@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by admin")
@Override
public Status replicate(final Path p, final Status status, final ReplicationTarget target, final ReplicaSystemHelper helper) {
    final AccumuloConfiguration localConf = conf;
    log.debug("Replication RPC timeout is {}", localConf.get(Property.REPLICATION_RPC_TIMEOUT.getKey()));
    final String principal = getPrincipal(localConf, target);
    final File keytab;
    final String password;
    if (localConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
        String keytabPath = getKeytab(localConf, target);
        keytab = new File(keytabPath);
        if (!keytab.exists() || !keytab.isFile()) {
            log.error("{} is not a regular file. Cannot login to replicate", keytabPath);
            return status;
        }
        password = null;
    } else {
        keytab = null;
        password = getPassword(localConf, target);
    }
    if (keytab != null) {
        try {
            final UserGroupInformation accumuloUgi = UserGroupInformation.getCurrentUser();
            // Get a UGI with the principal + keytab
            UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath());
            // Run inside a doAs to avoid nuking the Tserver's user
            return ugi.doAs((PrivilegedAction<Status>) () -> {
                KerberosToken token;
                try {
                    // Do *not* replace the current user
                    token = new KerberosToken(principal, keytab);
                } catch (IOException e) {
                    log.error("Failed to create KerberosToken", e);
                    return status;
                }
                ClientContext peerContext = getContextForPeer(localConf, target, principal, token);
                return _replicate(p, status, target, helper, localConf, peerContext, accumuloUgi);
            });
        } catch (IOException e) {
            // Can't log in, can't replicate
            log.error("Failed to perform local login", e);
            return status;
        }
    } else {
        // Simple case: make a password token, context and then replicate
        PasswordToken token = new PasswordToken(password);
        ClientContext peerContext = getContextForPeer(localConf, target, principal, token);
        return _replicate(p, status, target, helper, localConf, peerContext, null);
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) IOException(java.io.IOException) File(java.io.File) RFile(org.apache.accumulo.core.file.rfile.RFile) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Aggregations

AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)164 Test (org.junit.Test)51 Path (org.apache.hadoop.fs.Path)44 IOException (java.io.IOException)31 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)31 Configuration (org.apache.hadoop.conf.Configuration)27 HashMap (java.util.HashMap)24 ArrayList (java.util.ArrayList)23 Key (org.apache.accumulo.core.data.Key)23 FileSystem (org.apache.hadoop.fs.FileSystem)22 Value (org.apache.accumulo.core.data.Value)21 ServerContext (org.apache.accumulo.server.ServerContext)18 Property (org.apache.accumulo.core.conf.Property)16 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)15 HostAndPort (org.apache.accumulo.core.util.HostAndPort)15 Map (java.util.Map)12 ByteArrayOutputStream (java.io.ByteArrayOutputStream)11 DataInputStream (java.io.DataInputStream)11 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)11 SamplerConfigurationImpl (org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl)11