use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class VerifyTabletAssignments method checkTable.
private static void checkTable(final ClientContext context, final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, InterruptedException {
if (check == null)
System.out.println("Checking table " + tableName);
else
System.out.println("Checking table " + tableName + " again, failures " + check.size());
TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
TableId tableId = context.getTableNameToIdMap().get(tableName);
MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
final HashSet<KeyExtent> failures = new HashSet<>();
Map<HostAndPort, List<KeyExtent>> extentsPerServer = new TreeMap<>();
for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
KeyExtent keyExtent = entry.getKey();
String loc = entry.getValue();
if (loc == null)
System.out.println(" Tablet " + keyExtent + " has no location");
else if (opts.verbose)
System.out.println(" Tablet " + keyExtent + " is located at " + loc);
if (loc != null) {
final HostAndPort parsedLoc = HostAndPort.fromString(loc);
List<KeyExtent> extentList = extentsPerServer.computeIfAbsent(parsedLoc, k -> new ArrayList<>());
if (check == null || check.contains(keyExtent))
extentList.add(keyExtent);
}
}
ExecutorService tp = ThreadPools.createFixedThreadPool(20, "CheckTabletServer", false);
for (final Entry<HostAndPort, List<KeyExtent>> entry : extentsPerServer.entrySet()) {
Runnable r = () -> {
try {
checkTabletServer(context, entry, failures);
} catch (Exception e) {
log.error("Failure on tablet server '" + entry.getKey() + ".", e);
failures.addAll(entry.getValue());
}
};
tp.execute(r);
}
tp.shutdown();
while (!tp.awaitTermination(1, TimeUnit.HOURS)) {
}
if (!failures.isEmpty())
checkTable(context, opts, tableName, failures);
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class FindOfflineTablets method findOffline.
static int findOffline(ServerContext context, String tableName) throws TableNotFoundException {
final AtomicBoolean scanning = new AtomicBoolean(false);
LiveTServerSet tservers = new LiveTServerSet(context, new Listener() {
@Override
public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
if (!deleted.isEmpty() && scanning.get())
log.warn("Tablet servers deleted while scanning: {}", deleted);
if (!added.isEmpty() && scanning.get())
log.warn("Tablet servers added while scanning: {}", added);
}
});
tservers.startListeningForTabletServerChanges();
scanning.set(true);
Iterator<TabletLocationState> zooScanner = TabletStateStore.getStoreForLevel(DataLevel.ROOT, context).iterator();
int offline = 0;
System.out.println("Scanning zookeeper");
if ((offline = checkTablets(context, zooScanner, tservers)) > 0)
return offline;
if (RootTable.NAME.equals(tableName))
return 0;
System.out.println("Scanning " + RootTable.NAME);
Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(context, TabletsSection.getRange(), RootTable.NAME);
if ((offline = checkTablets(context, rootScanner, tservers)) > 0)
return offline;
if (MetadataTable.NAME.equals(tableName))
return 0;
System.out.println("Scanning " + MetadataTable.NAME);
Range range = TabletsSection.getRange();
if (tableName != null) {
TableId tableId = context.getTableId(tableName);
range = new KeyExtent(tableId, null, null).toMetaRange();
}
try (MetaDataTableScanner metaScanner = new MetaDataTableScanner(context, range, MetadataTable.NAME)) {
return checkTablets(context, metaScanner, tservers);
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class VolumeManagerImplTest method chooseFromOptions.
// Expected to throw a runtime exception when the WrongVolumeChooser picks an invalid volume.
@Test
public void chooseFromOptions() throws Exception {
Set<String> volumes = Set.of("file://one/", "file://two/", "file://three/");
ConfigurationCopy conf = new ConfigurationCopy();
conf.set(Property.INSTANCE_VOLUMES, String.join(",", volumes));
conf.set(Property.GENERAL_VOLUME_CHOOSER, WrongVolumeChooser.class.getName());
try (var vm = VolumeManagerImpl.get(conf, hadoopConf)) {
org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment() {
@Override
public Optional<TableId> getTable() {
throw new UnsupportedOperationException();
}
@Override
public ServiceEnvironment getServiceEnv() {
throw new UnsupportedOperationException();
}
@Override
public Text getEndRow() {
throw new UnsupportedOperationException();
}
@Override
public Scope getChooserScope() {
throw new UnsupportedOperationException();
}
};
assertThrows(RuntimeException.class, () -> vm.choose(chooserEnv, volumes));
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class SequentialWorkAssignerTest method basicZooKeeperCleanup.
@Test
public void basicZooKeeperCleanup() {
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
ZooCache zooCache = createMock(ZooCache.class);
Map<String, Map<TableId, String>> queuedWork = new TreeMap<>();
Map<TableId, String> cluster1Work = new TreeMap<>();
// Two files for cluster1, one for table '1' and another for table '2' we haven't assigned work
// for
cluster1Work.put(TableId.of("1"), DistributedWorkQueueWorkAssignerHelper.getQueueKey("file1", new ReplicationTarget("cluster1", "1", TableId.of("1"))));
cluster1Work.put(TableId.of("2"), DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", TableId.of("2"))));
queuedWork.put("cluster1", cluster1Work);
assigner.setClient(client);
assigner.setZooCache(zooCache);
assigner.setWorkQueue(workQueue);
assigner.setQueuedWork(queuedWork);
InstanceOperations opts = createMock(InstanceOperations.class);
var iid = InstanceId.of("instance");
expect(opts.getInstanceId()).andReturn(iid);
expect(client.instanceOperations()).andReturn(opts);
// file1 replicated
expect(zooCache.get(ZooUtil.getRoot(iid) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + DistributedWorkQueueWorkAssignerHelper.getQueueKey("file1", new ReplicationTarget("cluster1", "1", TableId.of("1"))))).andReturn(null);
// file2 still needs to replicate
expect(zooCache.get(ZooUtil.getRoot(iid) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", TableId.of("2"))))).andReturn(new byte[0]);
replay(workQueue, zooCache, opts, client);
assigner.cleanupFinishedWork();
verify(workQueue, zooCache, client);
assertEquals(1, cluster1Work.size());
assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", TableId.of("2"))), cluster1Work.get(TableId.of("2")));
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class CompactionDriverTest method testCancelId.
@Test
public void testCancelId() throws Exception {
final InstanceId instance = InstanceId.of(UUID.randomUUID());
final long compactId = 123;
final long cancelId = 124;
final NamespaceId namespaceId = NamespaceId.of("13");
final TableId tableId = TableId.of("42");
final byte[] startRow = new byte[0];
final byte[] endRow = new byte[0];
Manager manager = EasyMock.createNiceMock(Manager.class);
ServerContext ctx = EasyMock.createNiceMock(ServerContext.class);
ZooReaderWriter zrw = EasyMock.createNiceMock(ZooReaderWriter.class);
EasyMock.expect(manager.getInstanceID()).andReturn(instance).anyTimes();
EasyMock.expect(manager.getContext()).andReturn(ctx);
EasyMock.expect(ctx.getZooReaderWriter()).andReturn(zrw);
final String zCancelID = CompactionDriver.createCompactionCancellationPath(instance, tableId);
EasyMock.expect(zrw.getData(zCancelID)).andReturn(Long.toString(cancelId).getBytes());
EasyMock.replay(manager, ctx, zrw);
final CompactionDriver driver = new CompactionDriver(compactId, namespaceId, tableId, startRow, endRow);
final long tableIdLong = Long.parseLong(tableId.toString());
var e = assertThrows(AcceptableThriftTableOperationException.class, () -> driver.isReady(tableIdLong, manager));
assertTrue(e.getTableId().equals(tableId.toString()));
assertTrue(e.getOp().equals(TableOperation.COMPACT));
assertTrue(e.getType().equals(TableOperationExceptionType.OTHER));
assertTrue(e.getDescription().equals(TableOperationsImpl.COMPACTION_CANCELED_MSG));
EasyMock.verify(manager, ctx, zrw);
}
Aggregations