Search in sources :

Example 51 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class RecoveryManager method recoverLogs.

public boolean recoverLogs(KeyExtent extent, Collection<Collection<String>> walogs) throws IOException {
    boolean recoveryNeeded = false;
    for (Collection<String> logs : walogs) {
        for (String walog : logs) {
            Path switchedWalog = VolumeUtil.switchVolume(walog, FileType.WAL, manager.getContext().getVolumeReplacements());
            if (switchedWalog != null) {
                // replaces the volume used for sorting, but do not change entry in metadata table. When
                // the tablet loads it will change the metadata table entry. If
                // the tablet has the same replacement config, then it will find the sorted log.
                log.info("Volume replaced {} -> {}", walog, switchedWalog);
                walog = switchedWalog.toString();
            }
            String[] parts = walog.split("/");
            String sortId = parts[parts.length - 1];
            String filename = new Path(walog).toString();
            String dest = RecoveryPath.getRecoveryPath(new Path(filename)).toString();
            boolean sortQueued;
            synchronized (this) {
                sortQueued = sortsQueued.contains(sortId);
            }
            if (sortQueued && zooCache.get(manager.getZooKeeperRoot() + Constants.ZRECOVERY + "/" + sortId) == null) {
                synchronized (this) {
                    sortsQueued.remove(sortId);
                }
            }
            if (exists(SortedLogState.getFinishedMarkerPath(dest))) {
                synchronized (this) {
                    closeTasksQueued.remove(sortId);
                    recoveryDelay.remove(sortId);
                    sortsQueued.remove(sortId);
                }
                continue;
            }
            recoveryNeeded = true;
            synchronized (this) {
                if (!closeTasksQueued.contains(sortId) && !sortsQueued.contains(sortId)) {
                    AccumuloConfiguration aconf = manager.getConfiguration();
                    @SuppressWarnings("deprecation") LogCloser closer = Property.createInstanceFromPropertyName(aconf, aconf.resolve(Property.MANAGER_WAL_CLOSER_IMPLEMENTATION, Property.MANAGER_WALOG_CLOSER_IMPLEMETATION), LogCloser.class, new HadoopLogCloser());
                    Long delay = recoveryDelay.get(sortId);
                    if (delay == null) {
                        delay = aconf.getTimeInMillis(Property.MANAGER_RECOVERY_DELAY);
                    } else {
                        delay = Math.min(2 * delay, 1000 * 60 * 5L);
                    }
                    log.info("Starting recovery of {} (in : {}s), tablet {} holds a reference", filename, (delay / 1000), extent);
                    executor.schedule(new LogSortTask(closer, filename, dest, sortId), delay, TimeUnit.MILLISECONDS);
                    closeTasksQueued.add(sortId);
                    recoveryDelay.put(sortId, delay);
                }
            }
        }
    }
    return recoveryNeeded;
}
Also used : RecoveryPath(org.apache.accumulo.server.manager.recovery.RecoveryPath) Path(org.apache.hadoop.fs.Path) HadoopLogCloser(org.apache.accumulo.server.manager.recovery.HadoopLogCloser) LogCloser(org.apache.accumulo.server.manager.recovery.LogCloser) HadoopLogCloser(org.apache.accumulo.server.manager.recovery.HadoopLogCloser) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 52 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class CompactorTest method testCompactionSucceeds.

@Test
public void testCompactionSucceeds() throws Exception {
    UUID uuid = UUID.randomUUID();
    Supplier<UUID> supplier = () -> uuid;
    ExternalCompactionId eci = ExternalCompactionId.generate(supplier.get());
    PowerMock.resetAll();
    PowerMock.suppress(PowerMock.methods(Halt.class, "halt"));
    PowerMock.suppress(PowerMock.constructor(AbstractServer.class));
    ServerAddress client = PowerMock.createNiceMock(ServerAddress.class);
    HostAndPort address = HostAndPort.fromString("localhost:10240");
    EasyMock.expect(client.getAddress()).andReturn(address);
    TExternalCompactionJob job = PowerMock.createNiceMock(TExternalCompactionJob.class);
    TKeyExtent extent = PowerMock.createNiceMock(TKeyExtent.class);
    EasyMock.expect(job.isSetExternalCompactionId()).andReturn(true).anyTimes();
    EasyMock.expect(job.getExternalCompactionId()).andReturn(eci.toString()).anyTimes();
    EasyMock.expect(job.getExtent()).andReturn(extent).anyTimes();
    EasyMock.expect(extent.getTable()).andReturn("testTable".getBytes()).anyTimes();
    AccumuloConfiguration conf = PowerMock.createNiceMock(AccumuloConfiguration.class);
    EasyMock.expect(conf.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT)).andReturn(86400000L);
    ServerContext context = PowerMock.createNiceMock(ServerContext.class);
    EasyMock.expect(context.getConfiguration()).andReturn(conf);
    ZooReaderWriter zrw = PowerMock.createNiceMock(ZooReaderWriter.class);
    ZooKeeper zk = PowerMock.createNiceMock(ZooKeeper.class);
    EasyMock.expect(context.getZooReaderWriter()).andReturn(zrw).anyTimes();
    EasyMock.expect(zrw.getZooKeeper()).andReturn(zk).anyTimes();
    VolumeManagerImpl vm = PowerMock.createNiceMock(VolumeManagerImpl.class);
    EasyMock.expect(context.getVolumeManager()).andReturn(vm);
    vm.close();
    PowerMock.replayAll();
    SuccessfulCompactor c = new SuccessfulCompactor(supplier, client, job, conf, context, eci);
    c.run();
    PowerMock.verifyAll();
    c.close();
    assertTrue(c.isCompletedCalled());
    assertFalse(c.isFailedCalled());
}
Also used : Halt(org.apache.accumulo.core.util.Halt) ExternalCompactionId(org.apache.accumulo.core.metadata.schema.ExternalCompactionId) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) VolumeManagerImpl(org.apache.accumulo.server.fs.VolumeManagerImpl) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) HostAndPort(org.apache.accumulo.core.util.HostAndPort) AbstractServer(org.apache.accumulo.server.AbstractServer) ZooKeeper(org.apache.zookeeper.ZooKeeper) ServerContext(org.apache.accumulo.server.ServerContext) UUID(java.util.UUID) TExternalCompactionJob(org.apache.accumulo.core.tabletserver.thrift.TExternalCompactionJob) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 53 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class LoadFiles method call.

@Override
public Repo<Manager> call(final long tid, final Manager manager) throws Exception {
    manager.updateBulkImportStatus(source, BulkImportState.LOADING);
    ExecutorService executor = getThreadPool(manager);
    final AccumuloConfiguration conf = manager.getConfiguration();
    VolumeManager fs = manager.getVolumeManager();
    List<FileStatus> files = new ArrayList<>();
    Collections.addAll(files, fs.listStatus(new Path(bulk)));
    log.debug(FateTxId.formatTid(tid) + " importing " + files.size() + " files");
    Path writable = new Path(this.errorDir, ".iswritable");
    if (!fs.createNewFile(writable)) {
        // Maybe this is a re-try... clear the flag and try again
        fs.delete(writable);
        if (!fs.createNewFile(writable))
            throw new AcceptableThriftTableOperationException(tableId.canonical(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, "Unable to write to " + this.errorDir);
    }
    fs.delete(writable);
    final Set<String> filesToLoad = Collections.synchronizedSet(new HashSet<>());
    for (FileStatus f : files) filesToLoad.add(f.getPath().toString());
    final int RETRIES = Math.max(1, conf.getCount(Property.MANAGER_BULK_RETRIES));
    for (int attempt = 0; attempt < RETRIES && !filesToLoad.isEmpty(); attempt++) {
        List<Future<Void>> results = new ArrayList<>();
        if (manager.onlineTabletServers().isEmpty())
            log.warn("There are no tablet server to process bulk import, waiting (tid = " + FateTxId.formatTid(tid) + ")");
        while (manager.onlineTabletServers().isEmpty()) {
            sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
        }
        // Use the threadpool to assign files one-at-a-time to the server
        final List<String> loaded = Collections.synchronizedList(new ArrayList<>());
        final TServerInstance[] servers;
        String prop = conf.get(Property.MANAGER_BULK_TSERVER_REGEX);
        if (prop == null || "".equals(prop)) {
            servers = manager.onlineTabletServers().toArray(new TServerInstance[0]);
        } else {
            Pattern regex = Pattern.compile(prop);
            List<TServerInstance> subset = new ArrayList<>();
            manager.onlineTabletServers().forEach(t -> {
                if (regex.matcher(t.getHost()).matches()) {
                    subset.add(t);
                }
            });
            if (subset.isEmpty()) {
                log.warn("There are no tablet servers online that match supplied regex: {}", conf.get(Property.MANAGER_BULK_TSERVER_REGEX));
            }
            servers = subset.toArray(new TServerInstance[0]);
        }
        if (servers.length > 0) {
            for (final String file : filesToLoad) {
                results.add(executor.submit(() -> {
                    ClientService.Client client = null;
                    HostAndPort server = null;
                    try {
                        // get a connection to a random tablet server, do not prefer cached connections
                        // because this is running on the manager and there are lots of connections to tablet
                        // servers serving the metadata tablets
                        long timeInMillis = manager.getConfiguration().getTimeInMillis(Property.MANAGER_BULK_TIMEOUT);
                        server = servers[random.nextInt(servers.length)].getHostAndPort();
                        client = ThriftUtil.getTServerClient(server, manager.getContext(), timeInMillis);
                        List<String> attempt1 = Collections.singletonList(file);
                        log.debug("Asking " + server + " to bulk import " + file);
                        List<String> fail = client.bulkImportFiles(TraceUtil.traceInfo(), manager.getContext().rpcCreds(), tid, tableId.canonical(), attempt1, errorDir, setTime);
                        if (fail.isEmpty()) {
                            loaded.add(file);
                        }
                    } catch (Exception ex) {
                        log.error("rpc failed server:" + server + ", tid:" + FateTxId.formatTid(tid) + " " + ex);
                    } finally {
                        ThriftUtil.returnClient(client, manager.getContext());
                    }
                    return null;
                }));
            }
        }
        for (Future<Void> f : results) {
            f.get();
        }
        filesToLoad.removeAll(loaded);
        if (!filesToLoad.isEmpty()) {
            log.debug(FateTxId.formatTid(tid) + " attempt " + (attempt + 1) + " " + sampleList(filesToLoad, 10) + " failed");
            sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        }
    }
    FSDataOutputStream failFile = fs.overwrite(new Path(errorDir, BulkImport.FAILURES_TXT));
    try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8))) {
        for (String f : filesToLoad) {
            out.write(f);
            out.write("\n");
        }
    }
    // return the next step, which will perform cleanup
    return new CompleteBulkImport(tableId, source, bulk, errorDir);
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) BufferedWriter(java.io.BufferedWriter) HostAndPort(org.apache.accumulo.core.util.HostAndPort) ArrayList(java.util.ArrayList) List(java.util.List) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Path(org.apache.hadoop.fs.Path) Pattern(java.util.regex.Pattern) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) OutputStreamWriter(java.io.OutputStreamWriter)

Example 54 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class ShellServerIT method createRFiles.

private File createRFiles(final Configuration conf, final FileSystem fs, final String postfix) throws IOException {
    File importDir = new File(rootPath, "import_" + postfix);
    assertTrue(importDir.mkdir());
    String even = new File(importDir, "even.rf").toString();
    String odd = new File(importDir, "odd.rf").toString();
    AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
    FileSKVWriter evenWriter = FileOperations.getInstance().newWriterBuilder().forFile(even, fs, conf, CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(aconf).build();
    evenWriter.startDefaultLocalityGroup();
    FileSKVWriter oddWriter = FileOperations.getInstance().newWriterBuilder().forFile(odd, fs, conf, CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(aconf).build();
    oddWriter.startDefaultLocalityGroup();
    long timestamp = System.currentTimeMillis();
    Text cf = new Text("cf");
    Text cq = new Text("cq");
    Value value = new Value("value");
    for (int i = 0; i < 100; i += 2) {
        Key key = new Key(new Text(String.format("%8d", i)), cf, cq, timestamp);
        evenWriter.append(key, value);
        key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, timestamp);
        oddWriter.append(key, value);
    }
    evenWriter.close();
    oddWriter.close();
    assertEquals(0, ts.shell.getExitCode());
    return importDir;
}
Also used : FileSKVWriter(org.apache.accumulo.core.file.FileSKVWriter) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) File(java.io.File) Key(org.apache.accumulo.core.data.Key) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 55 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class ServerContext method enforceKerberosLogin.

/**
 * A "client-side" assertion for servers to validate that they are logged in as the expected user,
 * per the configuration, before performing any RPC
 */
// Should be private, but package-protected so EasyMock will work
void enforceKerberosLogin() {
    final AccumuloConfiguration conf = getServerConfFactory().getSiteConfiguration();
    // Unwrap _HOST into the FQDN to make the kerberos principal we'll compare against
    final String kerberosPrincipal = SecurityUtil.getServerPrincipal(conf.get(Property.GENERAL_KERBEROS_PRINCIPAL));
    UserGroupInformation loginUser;
    try {
        // The system user should be logged in via keytab when the process is started, not the
        // currentUser() like KerberosToken
        loginUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new RuntimeException("Could not get login user", e);
    }
    checkArgument(loginUser.hasKerberosCredentials(), "Server does not have Kerberos credentials");
    checkArgument(kerberosPrincipal.equals(loginUser.getUserName()), "Expected login user to be " + kerberosPrincipal + " but was " + loginUser.getUserName());
}
Also used : IOException(java.io.IOException) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Aggregations

AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)164 Test (org.junit.Test)51 Path (org.apache.hadoop.fs.Path)44 IOException (java.io.IOException)31 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)31 Configuration (org.apache.hadoop.conf.Configuration)27 HashMap (java.util.HashMap)24 ArrayList (java.util.ArrayList)23 Key (org.apache.accumulo.core.data.Key)23 FileSystem (org.apache.hadoop.fs.FileSystem)22 Value (org.apache.accumulo.core.data.Value)21 ServerContext (org.apache.accumulo.server.ServerContext)18 Property (org.apache.accumulo.core.conf.Property)16 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)15 HostAndPort (org.apache.accumulo.core.util.HostAndPort)15 Map (java.util.Map)12 ByteArrayOutputStream (java.io.ByteArrayOutputStream)11 DataInputStream (java.io.DataInputStream)11 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)11 SamplerConfigurationImpl (org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl)11