Search in sources :

Example 6 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class Manager method setupReplication.

@Deprecated
private TServer setupReplication() throws UnknownHostException, KeeperException, InterruptedException {
    ServerContext context = getContext();
    // Start the replication coordinator which assigns tservers to service replication requests
    var impl = new org.apache.accumulo.manager.replication.ManagerReplicationCoordinator(this);
    ReplicationCoordinator.Iface haReplicationProxy = HighlyAvailableServiceWrapper.service(impl, this);
    ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor = new ReplicationCoordinator.Processor<>(TraceUtil.wrapService(haReplicationProxy));
    ServerAddress replAddress = TServerUtils.startServer(context, getHostname(), Property.MANAGER_REPLICATION_COORDINATOR_PORT, replicationCoordinatorProcessor, "Manager Replication Coordinator", "Replication Coordinator", null, Property.MANAGER_REPLICATION_COORDINATOR_MINTHREADS, null, Property.MANAGER_REPLICATION_COORDINATOR_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
    log.info("Started replication coordinator service at " + replAddress.address);
    // Start the daemon to scan the replication table and make units of work
    replicationWorkThread = Threads.createThread("Replication Driver", new org.apache.accumulo.manager.replication.ReplicationDriver(this));
    replicationWorkThread.start();
    // Start the daemon to assign work to tservers to replicate to our peers
    var wd = new org.apache.accumulo.manager.replication.WorkDriver(this);
    replicationAssignerThread = Threads.createThread(wd.getName(), wd);
    replicationAssignerThread.start();
    // Advertise that port we used so peers don't have to be told what it is
    context.getZooReaderWriter().putPersistentData(getZooKeeperRoot() + Constants.ZMANAGER_REPLICATION_COORDINATOR_ADDR, replAddress.address.toString().getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
    return replAddress.server;
}
Also used : Processor(org.apache.accumulo.core.manager.thrift.ManagerClientService.Processor) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) ReplicationCoordinator(org.apache.accumulo.core.replication.thrift.ReplicationCoordinator) Iface(org.apache.accumulo.core.manager.thrift.ManagerClientService.Iface) ServerContext(org.apache.accumulo.server.ServerContext)

Example 7 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class Manager method run.

@Override
public void run() {
    final ServerContext context = getContext();
    final String zroot = getZooKeeperRoot();
    // ACCUMULO-4424 Put up the Thrift servers before getting the lock as a sign of process health
    // when a hot-standby
    // 
    // Start the Manager's Client service
    clientHandler = new ManagerClientServiceHandler(this);
    // Ensure that calls before the manager gets the lock fail
    Iface haProxy = HighlyAvailableServiceWrapper.service(clientHandler, this);
    Iface rpcProxy = TraceUtil.wrapService(haProxy);
    final Processor<Iface> processor;
    if (context.getThriftServerType() == ThriftServerType.SASL) {
        Iface tcredsProxy = TCredentialsUpdatingWrapper.service(rpcProxy, clientHandler.getClass(), getConfiguration());
        processor = new Processor<>(tcredsProxy);
    } else {
        processor = new Processor<>(rpcProxy);
    }
    ServerAddress sa;
    try {
        sa = TServerUtils.startServer(context, getHostname(), Property.MANAGER_CLIENTPORT, processor, "Manager", "Manager Client Service Handler", null, Property.MANAGER_MINTHREADS, Property.MANAGER_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
    } catch (UnknownHostException e) {
        throw new IllegalStateException("Unable to start server on host " + getHostname(), e);
    }
    clientService = sa.server;
    log.info("Started Manager client service at {}", sa.address);
    // block until we can obtain the ZK lock for the manager
    try {
        getManagerLock(ServiceLock.path(zroot + Constants.ZMANAGER_LOCK));
    } catch (KeeperException | InterruptedException e) {
        throw new IllegalStateException("Exception getting manager lock", e);
    }
    // upgrading.
    if (upgradeCoordinator.getStatus() != UpgradeCoordinator.UpgradeStatus.COMPLETE) {
        managerUpgrading.set(true);
    }
    try {
        MetricsUtil.initializeMetrics(getContext().getConfiguration(), this.applicationName, sa.getAddress());
        ManagerMetrics.init(getConfiguration(), this);
    } catch (Exception e1) {
        log.error("Error initializing metrics, metrics will not be emitted.", e1);
    }
    recoveryManager = new RecoveryManager(this, TIME_TO_CACHE_RECOVERY_WAL_EXISTENCE);
    context.getTableManager().addObserver(this);
    Thread statusThread = Threads.createThread("Status Thread", new StatusThread());
    statusThread.start();
    Threads.createThread("Migration Cleanup Thread", new MigrationCleanupThread()).start();
    tserverSet.startListeningForTabletServerChanges();
    try {
        blockForTservers();
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
    }
    ZooReaderWriter zReaderWriter = context.getZooReaderWriter();
    try {
        zReaderWriter.getChildren(zroot + Constants.ZRECOVERY, new Watcher() {

            @Override
            public void process(WatchedEvent event) {
                nextEvent.event("Noticed recovery changes %s", event.getType());
                try {
                    // watcher only fires once, add it back
                    zReaderWriter.getChildren(zroot + Constants.ZRECOVERY, this);
                } catch (Exception e) {
                    log.error("Failed to add log recovery watcher back", e);
                }
            }
        });
    } catch (KeeperException | InterruptedException e) {
        throw new IllegalStateException("Unable to read " + zroot + Constants.ZRECOVERY, e);
    }
    watchers.add(new TabletGroupWatcher(this, TabletStateStore.getStoreForLevel(DataLevel.USER, context, this), null) {

        @Override
        boolean canSuspendTablets() {
            // Always allow user data tablets to enter suspended state.
            return true;
        }
    });
    watchers.add(new TabletGroupWatcher(this, TabletStateStore.getStoreForLevel(DataLevel.METADATA, context, this), watchers.get(0)) {

        @Override
        boolean canSuspendTablets() {
            // setting.
            return getConfiguration().getBoolean(Property.MANAGER_METADATA_SUSPENDABLE);
        }
    });
    watchers.add(new TabletGroupWatcher(this, TabletStateStore.getStoreForLevel(DataLevel.ROOT, context), watchers.get(1)) {

        @Override
        boolean canSuspendTablets() {
            // Never allow root tablet to enter suspended state.
            return false;
        }
    });
    for (TabletGroupWatcher watcher : watchers) {
        watcher.start();
    }
    // Once we are sure the upgrade is complete, we can safely allow fate use.
    try {
        // wait for metadata upgrade running in background to complete
        if (null != upgradeMetadataFuture) {
            upgradeMetadataFuture.get();
        }
        // Everything is fully upgraded by this point.
        managerUpgrading.set(false);
    } catch (ExecutionException | InterruptedException e) {
        throw new IllegalStateException("Metadata upgrade failed", e);
    }
    try {
        final AgeOffStore<Manager> store = new AgeOffStore<>(new org.apache.accumulo.fate.ZooStore<>(getZooKeeperRoot() + Constants.ZFATE, context.getZooReaderWriter()), TimeUnit.HOURS.toMillis(8), System::currentTimeMillis);
        fate = new Fate<>(this, store, TraceRepo::toLogString);
        fate.startTransactionRunners(getConfiguration());
        context.getScheduledExecutor().scheduleWithFixedDelay(store::ageOff, 63000, 63000, TimeUnit.MILLISECONDS);
    } catch (KeeperException | InterruptedException e) {
        throw new IllegalStateException("Exception setting up FaTE cleanup thread", e);
    }
    initializeZkForReplication(zReaderWriter, zroot);
    // Make sure that we have a secret key (either a new one or an old one from ZK) before we start
    // the manager client service.
    Thread authenticationTokenKeyManagerThread = null;
    if (authenticationTokenKeyManager != null && keyDistributor != null) {
        log.info("Starting delegation-token key manager");
        try {
            keyDistributor.initialize();
        } catch (KeeperException | InterruptedException e) {
            throw new IllegalStateException("Exception setting up delegation-token key manager", e);
        }
        authenticationTokenKeyManagerThread = Threads.createThread("Delegation Token Key Manager", authenticationTokenKeyManager);
        authenticationTokenKeyManagerThread.start();
        boolean logged = false;
        while (!authenticationTokenKeyManager.isInitialized()) {
            // Print out a status message when we start waiting for the key manager to get initialized
            if (!logged) {
                log.info("Waiting for AuthenticationTokenKeyManager to be initialized");
                logged = true;
            }
            sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
        }
        // And log when we are initialized
        log.info("AuthenticationTokenSecretManager is initialized");
    }
    String address = sa.address.toString();
    log.info("Setting manager lock data to {}", address);
    try {
        managerLock.replaceLockData(address.getBytes());
    } catch (KeeperException | InterruptedException e) {
        throw new IllegalStateException("Exception updating manager lock", e);
    }
    while (!clientService.isServing()) {
        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }
    // if the replication name is ever set, then start replication services
    final AtomicReference<TServer> replServer = new AtomicReference<>();
    context.getScheduledExecutor().scheduleWithFixedDelay(() -> {
        try {
            @SuppressWarnings("deprecation") Property p = Property.REPLICATION_NAME;
            if ((replServer.get() == null) && !getConfiguration().get(p).isEmpty()) {
                log.info("{} was set, starting repl services.", p.getKey());
                replServer.set(setupReplication());
            }
        } catch (UnknownHostException | KeeperException | InterruptedException e) {
            log.error("Error occurred starting replication services. ", e);
        }
    }, 0, 5000, TimeUnit.MILLISECONDS);
    // checking stored user hashes if any of them uses an outdated algorithm
    security.validateStoredUserCreditentials();
    // The manager is fully initialized. Clients are allowed to connect now.
    managerInitialized.set(true);
    while (clientService.isServing()) {
        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }
    log.info("Shutting down fate.");
    fate.shutdown();
    final long deadline = System.currentTimeMillis() + MAX_CLEANUP_WAIT_TIME;
    try {
        statusThread.join(remaining(deadline));
        if (null != replicationAssignerThread) {
            replicationAssignerThread.join(remaining(deadline));
        }
        if (null != replicationWorkThread) {
            replicationWorkThread.join(remaining(deadline));
        }
    } catch (InterruptedException e) {
        throw new IllegalStateException("Exception stopping replication workers", e);
    }
    var nullableReplServer = replServer.get();
    if (nullableReplServer != null) {
        nullableReplServer.stop();
    }
    // Signal that we want it to stop, and wait for it to do so.
    if (authenticationTokenKeyManager != null) {
        authenticationTokenKeyManager.gracefulStop();
        try {
            if (null != authenticationTokenKeyManagerThread) {
                authenticationTokenKeyManagerThread.join(remaining(deadline));
            }
        } catch (InterruptedException e) {
            throw new IllegalStateException("Exception waiting on delegation-token key manager", e);
        }
    }
    // don't stop
    for (TabletGroupWatcher watcher : watchers) {
        try {
            watcher.join(remaining(deadline));
        } catch (InterruptedException e) {
            throw new IllegalStateException("Exception waiting on watcher", e);
        }
    }
    log.info("exiting");
}
Also used : TServer(org.apache.thrift.server.TServer) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) Watcher(org.apache.zookeeper.Watcher) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) AuthenticationTokenSecretManager(org.apache.accumulo.server.security.delegation.AuthenticationTokenSecretManager) RecoveryManager(org.apache.accumulo.manager.recovery.RecoveryManager) AuthenticationTokenKeyManager(org.apache.accumulo.server.security.delegation.AuthenticationTokenKeyManager) TableManager(org.apache.accumulo.server.tables.TableManager) WatchedEvent(org.apache.zookeeper.WatchedEvent) Iface(org.apache.accumulo.core.manager.thrift.ManagerClientService.Iface) ExecutionException(java.util.concurrent.ExecutionException) AgeOffStore(org.apache.accumulo.fate.AgeOffStore) Property(org.apache.accumulo.core.conf.Property) UnknownHostException(java.net.UnknownHostException) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) AtomicReference(java.util.concurrent.atomic.AtomicReference) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) NoAuthException(org.apache.zookeeper.KeeperException.NoAuthException) TException(org.apache.thrift.TException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) TTransportException(org.apache.thrift.transport.TTransportException) KeeperException(org.apache.zookeeper.KeeperException) ThriftTableOperationException(org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException) RecoveryManager(org.apache.accumulo.manager.recovery.RecoveryManager) ServerContext(org.apache.accumulo.server.ServerContext) KeeperException(org.apache.zookeeper.KeeperException)

Example 8 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info) throws AccumuloException {
    KeyExtent extent = info.getExtent();
    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
    Manager.log.debug("Deleting tablets for {}", extent);
    MetadataTime metadataTime = null;
    KeyExtent followingTablet = null;
    if (extent.endRow() != null) {
        Key nextExtent = new Key(extent.endRow()).followingKey(PartialKey.ROW);
        followingTablet = getHighTablet(new KeyExtent(extent.tableId(), nextExtent.getRow(), extent.endRow()));
        Manager.log.debug("Found following tablet {}", followingTablet);
    }
    try {
        AccumuloClient client = manager.getContext();
        ServerContext context = manager.getContext();
        Ample ample = context.getAmple();
        Text start = extent.prevEndRow();
        if (start == null) {
            start = new Text();
        }
        Manager.log.debug("Making file deletion entries for {}", extent);
        Range deleteRange = new Range(TabletsSection.encodeRow(extent.tableId(), start), false, TabletsSection.encodeRow(extent.tableId(), extent.endRow()), true);
        Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(deleteRange);
        ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        scanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
        Set<String> datafiles = new TreeSet<>();
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                datafiles.add(TabletFileUtil.validate(key.getColumnQualifierData().toString()));
                if (datafiles.size() > 1000) {
                    ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
                    datafiles.clear();
                }
            } else if (ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                metadataTime = MetadataTime.parse(entry.getValue().toString());
            } else if (key.compareColumnFamily(CurrentLocationColumnFamily.NAME) == 0) {
                throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
            } else if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                String path = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(extent.tableId(), entry.getValue().toString());
                datafiles.add(path);
                if (datafiles.size() > 1000) {
                    ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
                    datafiles.clear();
                }
            }
        }
        ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
        BatchWriter bw = client.createBatchWriter(targetSystemTable);
        try {
            deleteTablets(info, deleteRange, bw, client);
        } finally {
            bw.close();
        }
        if (followingTablet != null) {
            Manager.log.debug("Updating prevRow of {} to {}", followingTablet, extent.prevEndRow());
            bw = client.createBatchWriter(targetSystemTable);
            try {
                Mutation m = new Mutation(followingTablet.toMetaRow());
                TabletColumnFamily.PREV_ROW_COLUMN.put(m, TabletColumnFamily.encodePrevEndRow(extent.prevEndRow()));
                ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
                bw.addMutation(m);
                bw.flush();
            } finally {
                bw.close();
            }
        } else {
            // Recreate the default tablet to hold the end of the table
            MetadataTableUtil.addTablet(new KeyExtent(extent.tableId(), null, extent.prevEndRow()), ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, manager.getContext(), metadataTime.getType(), manager.managerLock);
        }
    } catch (RuntimeException | TableNotFoundException ex) {
        throw new AccumuloException(ex);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ServerContext(org.apache.accumulo.server.ServerContext) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 9 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class CompactorTest method testCompactionSucceeds.

@Test
public void testCompactionSucceeds() throws Exception {
    UUID uuid = UUID.randomUUID();
    Supplier<UUID> supplier = () -> uuid;
    ExternalCompactionId eci = ExternalCompactionId.generate(supplier.get());
    PowerMock.resetAll();
    PowerMock.suppress(PowerMock.methods(Halt.class, "halt"));
    PowerMock.suppress(PowerMock.constructor(AbstractServer.class));
    ServerAddress client = PowerMock.createNiceMock(ServerAddress.class);
    HostAndPort address = HostAndPort.fromString("localhost:10240");
    EasyMock.expect(client.getAddress()).andReturn(address);
    TExternalCompactionJob job = PowerMock.createNiceMock(TExternalCompactionJob.class);
    TKeyExtent extent = PowerMock.createNiceMock(TKeyExtent.class);
    EasyMock.expect(job.isSetExternalCompactionId()).andReturn(true).anyTimes();
    EasyMock.expect(job.getExternalCompactionId()).andReturn(eci.toString()).anyTimes();
    EasyMock.expect(job.getExtent()).andReturn(extent).anyTimes();
    EasyMock.expect(extent.getTable()).andReturn("testTable".getBytes()).anyTimes();
    AccumuloConfiguration conf = PowerMock.createNiceMock(AccumuloConfiguration.class);
    EasyMock.expect(conf.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT)).andReturn(86400000L);
    ServerContext context = PowerMock.createNiceMock(ServerContext.class);
    EasyMock.expect(context.getConfiguration()).andReturn(conf);
    ZooReaderWriter zrw = PowerMock.createNiceMock(ZooReaderWriter.class);
    ZooKeeper zk = PowerMock.createNiceMock(ZooKeeper.class);
    EasyMock.expect(context.getZooReaderWriter()).andReturn(zrw).anyTimes();
    EasyMock.expect(zrw.getZooKeeper()).andReturn(zk).anyTimes();
    VolumeManagerImpl vm = PowerMock.createNiceMock(VolumeManagerImpl.class);
    EasyMock.expect(context.getVolumeManager()).andReturn(vm);
    vm.close();
    PowerMock.replayAll();
    SuccessfulCompactor c = new SuccessfulCompactor(supplier, client, job, conf, context, eci);
    c.run();
    PowerMock.verifyAll();
    c.close();
    assertTrue(c.isCompletedCalled());
    assertFalse(c.isFailedCalled());
}
Also used : Halt(org.apache.accumulo.core.util.Halt) ExternalCompactionId(org.apache.accumulo.core.metadata.schema.ExternalCompactionId) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) VolumeManagerImpl(org.apache.accumulo.server.fs.VolumeManagerImpl) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) HostAndPort(org.apache.accumulo.core.util.HostAndPort) AbstractServer(org.apache.accumulo.server.AbstractServer) ZooKeeper(org.apache.zookeeper.ZooKeeper) ServerContext(org.apache.accumulo.server.ServerContext) UUID(java.util.UUID) TExternalCompactionJob(org.apache.accumulo.core.tabletserver.thrift.TExternalCompactionJob) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 10 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class BulkFailureIT method runTest.

/**
 * This test verifies two things. First it ensures that after a bulk imported file is compacted
 * that import request are ignored. Second it ensures that after the bulk import transaction is
 * canceled that import request fail. The public API for bulk import can not be used for this
 * test. Internal (non public API) RPCs and Zookeeper state is manipulated directly. This is the
 * only way to interleave compactions with multiple, duplicate import RPC request.
 */
protected void runTest(String table, long fateTxid, Loader loader) throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, KeeperException, InterruptedException, Exception, FileNotFoundException, TableNotFoundException {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        SortedMap<Key, Value> testData = createTestData();
        FileSystem fs = getCluster().getFileSystem();
        String testFile = createTestFile(fateTxid, testData, fs);
        c.tableOperations().create(table);
        String tableId = c.tableOperations().tableIdMap().get(table);
        // Table has no splits, so this extent corresponds to the tables single tablet
        KeyExtent extent = new KeyExtent(TableId.of(tableId), null, null);
        ServerContext asCtx = getServerContext();
        ZooArbitrator.start(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
        VolumeManager vm = asCtx.getVolumeManager();
        // move the file into a directory for the table and rename the file to something unique
        String bulkDir = BulkImport.prepareBulkImport(asCtx, vm, testFile, TableId.of(tableId), fateTxid);
        // determine the files new name and path
        FileStatus status = fs.listStatus(new Path(bulkDir))[0];
        Path bulkLoadPath = fs.makeQualified(status.getPath());
        // Directly ask the tablet to load the file.
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(Set.of(bulkLoadPath), getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // Compact the bulk imported file. Subsequent request to load the file should be ignored.
        c.tableOperations().compact(table, new CompactionConfig().setWait(true));
        Set<Path> tabletFiles = getFiles(c, extent);
        assertFalse(tabletFiles.contains(bulkLoadPath));
        assertEquals(1, tabletFiles.size());
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // this request should be ignored by the tablet
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // this is done to ensure the tablet reads the load flags from the metadata table when it
        // loads
        c.tableOperations().offline(table, true);
        c.tableOperations().online(table, true);
        // this request should be ignored by the tablet
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // After this, all load request should fail.
        ZooArbitrator.stop(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
        c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        BatchDeleter bd = c.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 1);
        bd.setRanges(Collections.singleton(extent.toMetaRange()));
        bd.fetchColumnFamily(BulkFileColumnFamily.NAME);
        bd.delete();
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), true);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) BatchDeleter(org.apache.accumulo.core.client.BatchDeleter) FileStatus(org.apache.hadoop.fs.FileStatus) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) ServerContext(org.apache.accumulo.server.ServerContext) FileSystem(org.apache.hadoop.fs.FileSystem) Value(org.apache.accumulo.core.data.Value) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Key(org.apache.accumulo.core.data.Key)

Aggregations

ServerContext (org.apache.accumulo.server.ServerContext)87 Test (org.junit.Test)41 ZooReaderWriter (org.apache.accumulo.fate.zookeeper.ZooReaderWriter)18 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)15 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)15 TServerInstance (org.apache.accumulo.core.metadata.TServerInstance)15 HostAndPort (org.apache.accumulo.core.util.HostAndPort)15 Path (org.apache.hadoop.fs.Path)15 ArrayList (java.util.ArrayList)14 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)14 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)13 KeeperException (org.apache.zookeeper.KeeperException)13 ServerAddress (org.apache.accumulo.server.rpc.ServerAddress)12 TableId (org.apache.accumulo.core.data.TableId)11 LiveTServerSet (org.apache.accumulo.server.manager.LiveTServerSet)11 Value (org.apache.accumulo.core.data.Value)10 IOException (java.io.IOException)9 UUID (java.util.UUID)9 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)9 Client (org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client)9