Search in sources :

Example 46 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class VerifyTabletAssignments method checkTable.

private static void checkTable(final ClientContext context, final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, InterruptedException {
    if (check == null)
        System.out.println("Checking table " + tableName);
    else
        System.out.println("Checking table " + tableName + " again, failures " + check.size());
    TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
    TableId tableId = context.getTableNameToIdMap().get(tableName);
    MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
    final HashSet<KeyExtent> failures = new HashSet<>();
    Map<HostAndPort, List<KeyExtent>> extentsPerServer = new TreeMap<>();
    for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
        KeyExtent keyExtent = entry.getKey();
        String loc = entry.getValue();
        if (loc == null)
            System.out.println(" Tablet " + keyExtent + " has no location");
        else if (opts.verbose)
            System.out.println(" Tablet " + keyExtent + " is located at " + loc);
        if (loc != null) {
            final HostAndPort parsedLoc = HostAndPort.fromString(loc);
            List<KeyExtent> extentList = extentsPerServer.computeIfAbsent(parsedLoc, k -> new ArrayList<>());
            if (check == null || check.contains(keyExtent))
                extentList.add(keyExtent);
        }
    }
    ExecutorService tp = ThreadPools.createFixedThreadPool(20, "CheckTabletServer", false);
    for (final Entry<HostAndPort, List<KeyExtent>> entry : extentsPerServer.entrySet()) {
        Runnable r = () -> {
            try {
                checkTabletServer(context, entry, failures);
            } catch (Exception e) {
                log.error("Failure on tablet server '" + entry.getKey() + ".", e);
                failures.addAll(entry.getValue());
            }
        };
        tp.execute(r);
    }
    tp.shutdown();
    while (!tp.awaitTermination(1, TimeUnit.HOURS)) {
    }
    if (!failures.isEmpty())
        checkTable(context, opts, tableName, failures);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TreeMap(java.util.TreeMap) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchScanIDException(org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) HostAndPort(org.apache.accumulo.core.util.HostAndPort) ExecutorService(java.util.concurrent.ExecutorService) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet)

Example 47 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class FindOfflineTablets method findOffline.

static int findOffline(ServerContext context, String tableName) throws TableNotFoundException {
    final AtomicBoolean scanning = new AtomicBoolean(false);
    LiveTServerSet tservers = new LiveTServerSet(context, new Listener() {

        @Override
        public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
            if (!deleted.isEmpty() && scanning.get())
                log.warn("Tablet servers deleted while scanning: {}", deleted);
            if (!added.isEmpty() && scanning.get())
                log.warn("Tablet servers added while scanning: {}", added);
        }
    });
    tservers.startListeningForTabletServerChanges();
    scanning.set(true);
    Iterator<TabletLocationState> zooScanner = TabletStateStore.getStoreForLevel(DataLevel.ROOT, context).iterator();
    int offline = 0;
    System.out.println("Scanning zookeeper");
    if ((offline = checkTablets(context, zooScanner, tservers)) > 0)
        return offline;
    if (RootTable.NAME.equals(tableName))
        return 0;
    System.out.println("Scanning " + RootTable.NAME);
    Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(context, TabletsSection.getRange(), RootTable.NAME);
    if ((offline = checkTablets(context, rootScanner, tservers)) > 0)
        return offline;
    if (MetadataTable.NAME.equals(tableName))
        return 0;
    System.out.println("Scanning " + MetadataTable.NAME);
    Range range = TabletsSection.getRange();
    if (tableName != null) {
        TableId tableId = context.getTableId(tableName);
        range = new KeyExtent(tableId, null, null).toMetaRange();
    }
    try (MetaDataTableScanner metaScanner = new MetaDataTableScanner(context, range, MetadataTable.NAME)) {
        return checkTablets(context, metaScanner, tservers);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Listener(org.apache.accumulo.server.manager.LiveTServerSet.Listener) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) LiveTServerSet(org.apache.accumulo.server.manager.LiveTServerSet) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MetaDataTableScanner(org.apache.accumulo.server.manager.state.MetaDataTableScanner) TabletLocationState(org.apache.accumulo.core.metadata.TabletLocationState)

Example 48 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class VolumeManagerImplTest method chooseFromOptions.

// Expected to throw a runtime exception when the WrongVolumeChooser picks an invalid volume.
@Test
public void chooseFromOptions() throws Exception {
    Set<String> volumes = Set.of("file://one/", "file://two/", "file://three/");
    ConfigurationCopy conf = new ConfigurationCopy();
    conf.set(Property.INSTANCE_VOLUMES, String.join(",", volumes));
    conf.set(Property.GENERAL_VOLUME_CHOOSER, WrongVolumeChooser.class.getName());
    try (var vm = VolumeManagerImpl.get(conf, hadoopConf)) {
        org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment() {

            @Override
            public Optional<TableId> getTable() {
                throw new UnsupportedOperationException();
            }

            @Override
            public ServiceEnvironment getServiceEnv() {
                throw new UnsupportedOperationException();
            }

            @Override
            public Text getEndRow() {
                throw new UnsupportedOperationException();
            }

            @Override
            public Scope getChooserScope() {
                throw new UnsupportedOperationException();
            }
        };
        assertThrows(RuntimeException.class, () -> vm.choose(chooserEnv, volumes));
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) VolumeChooserEnvironment(org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment) VolumeChooserEnvironment(org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment) Test(org.junit.Test)

Example 49 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class SequentialWorkAssignerTest method basicZooKeeperCleanup.

@Test
public void basicZooKeeperCleanup() {
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    ZooCache zooCache = createMock(ZooCache.class);
    Map<String, Map<TableId, String>> queuedWork = new TreeMap<>();
    Map<TableId, String> cluster1Work = new TreeMap<>();
    // Two files for cluster1, one for table '1' and another for table '2' we haven't assigned work
    // for
    cluster1Work.put(TableId.of("1"), DistributedWorkQueueWorkAssignerHelper.getQueueKey("file1", new ReplicationTarget("cluster1", "1", TableId.of("1"))));
    cluster1Work.put(TableId.of("2"), DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", TableId.of("2"))));
    queuedWork.put("cluster1", cluster1Work);
    assigner.setClient(client);
    assigner.setZooCache(zooCache);
    assigner.setWorkQueue(workQueue);
    assigner.setQueuedWork(queuedWork);
    InstanceOperations opts = createMock(InstanceOperations.class);
    var iid = InstanceId.of("instance");
    expect(opts.getInstanceId()).andReturn(iid);
    expect(client.instanceOperations()).andReturn(opts);
    // file1 replicated
    expect(zooCache.get(ZooUtil.getRoot(iid) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + DistributedWorkQueueWorkAssignerHelper.getQueueKey("file1", new ReplicationTarget("cluster1", "1", TableId.of("1"))))).andReturn(null);
    // file2 still needs to replicate
    expect(zooCache.get(ZooUtil.getRoot(iid) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", TableId.of("2"))))).andReturn(new byte[0]);
    replay(workQueue, zooCache, opts, client);
    assigner.cleanupFinishedWork();
    verify(workQueue, zooCache, client);
    assertEquals(1, cluster1Work.size());
    assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", TableId.of("2"))), cluster1Work.get(TableId.of("2")));
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) TreeMap(java.util.TreeMap) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) TreeMap(java.util.TreeMap) Map(java.util.Map) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) InstanceOperations(org.apache.accumulo.core.client.admin.InstanceOperations) Test(org.junit.Test)

Example 50 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class CompactionDriverTest method testCancelId.

@Test
public void testCancelId() throws Exception {
    final InstanceId instance = InstanceId.of(UUID.randomUUID());
    final long compactId = 123;
    final long cancelId = 124;
    final NamespaceId namespaceId = NamespaceId.of("13");
    final TableId tableId = TableId.of("42");
    final byte[] startRow = new byte[0];
    final byte[] endRow = new byte[0];
    Manager manager = EasyMock.createNiceMock(Manager.class);
    ServerContext ctx = EasyMock.createNiceMock(ServerContext.class);
    ZooReaderWriter zrw = EasyMock.createNiceMock(ZooReaderWriter.class);
    EasyMock.expect(manager.getInstanceID()).andReturn(instance).anyTimes();
    EasyMock.expect(manager.getContext()).andReturn(ctx);
    EasyMock.expect(ctx.getZooReaderWriter()).andReturn(zrw);
    final String zCancelID = CompactionDriver.createCompactionCancellationPath(instance, tableId);
    EasyMock.expect(zrw.getData(zCancelID)).andReturn(Long.toString(cancelId).getBytes());
    EasyMock.replay(manager, ctx, zrw);
    final CompactionDriver driver = new CompactionDriver(compactId, namespaceId, tableId, startRow, endRow);
    final long tableIdLong = Long.parseLong(tableId.toString());
    var e = assertThrows(AcceptableThriftTableOperationException.class, () -> driver.isReady(tableIdLong, manager));
    assertTrue(e.getTableId().equals(tableId.toString()));
    assertTrue(e.getOp().equals(TableOperation.COMPACT));
    assertTrue(e.getType().equals(TableOperationExceptionType.OTHER));
    assertTrue(e.getDescription().equals(TableOperationsImpl.COMPACTION_CANCELED_MSG));
    EasyMock.verify(manager, ctx, zrw);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ServerContext(org.apache.accumulo.server.ServerContext) InstanceId(org.apache.accumulo.core.data.InstanceId) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) NamespaceId(org.apache.accumulo.core.data.NamespaceId) Manager(org.apache.accumulo.manager.Manager) Test(org.junit.Test)

Aggregations

TableId (org.apache.accumulo.core.data.TableId)169 Text (org.apache.hadoop.io.Text)64 HashMap (java.util.HashMap)55 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)55 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)43 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 Map (java.util.Map)37 Key (org.apache.accumulo.core.data.Key)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)34 HashSet (java.util.HashSet)31 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)31 Value (org.apache.accumulo.core.data.Value)31 IOException (java.io.IOException)28 Scanner (org.apache.accumulo.core.client.Scanner)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)27 Mutation (org.apache.accumulo.core.data.Mutation)27 List (java.util.List)26 Range (org.apache.accumulo.core.data.Range)24 BatchWriter (org.apache.accumulo.core.client.BatchWriter)23