use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class HostRegexTableLoadBalancerTest method init.
public void init() {
ServerContext context1 = createMockContext();
replay(context1);
final TestServerConfigurationFactory factory = new TestServerConfigurationFactory(context1);
initFactory(factory);
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class TableConfigurationUpdateIT method test.
@Test
public void test() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
ServerContext context = getCluster().getServerContext();
String table = getUniqueNames(1)[0];
client.tableOperations().create(table);
final NamespaceConfiguration defaultConf = new NamespaceConfiguration(Namespace.DEFAULT.id(), context, DefaultConfiguration.getInstance());
// Cache invalidates 25% of the time
int randomMax = 4;
// Number of threads
int numThreads = 2;
// Number of iterations per thread
int iterations = 100000;
AccumuloConfiguration tableConf = new TableConfiguration(context, TableId.of(table), defaultConf);
long start = System.currentTimeMillis();
ExecutorService svc = Executors.newFixedThreadPool(numThreads);
CountDownLatch countDown = new CountDownLatch(numThreads);
ArrayList<Future<Exception>> futures = new ArrayList<>(numThreads);
for (int i = 0; i < numThreads; i++) {
futures.add(svc.submit(new TableConfRunner(randomMax, iterations, tableConf, countDown)));
}
svc.shutdown();
assertTrue(svc.awaitTermination(60, TimeUnit.MINUTES));
for (Future<Exception> fut : futures) {
Exception e = fut.get();
if (e != null) {
fail("Thread failed with exception " + e);
}
}
long end = System.currentTimeMillis();
log.debug("{} with {} iterations and {} threads and cache invalidates {}% took {} second(s)", tableConf, iterations, numThreads, ((1. / randomMax) * 100.), (end - start) / 1000);
}
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class ManagerRepairsDualAssignmentIT method test.
@Test
public void test() throws Exception {
// make some tablets, spread 'em around
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
ClientContext context = (ClientContext) c;
ServerContext serverContext = cluster.getServerContext();
String table = this.getUniqueNames(1)[0];
c.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
SortedSet<Text> partitions = new TreeSet<>();
for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
partitions.add(new Text(part));
}
NewTableConfiguration ntc = new NewTableConfiguration().withSplits(partitions);
c.tableOperations().create(table, ntc);
// scan the metadata table and get the two table location states
Set<TServerInstance> states = new HashSet<>();
Set<TabletLocationState> oldLocations = new HashSet<>();
TabletStateStore store = TabletStateStore.getStoreForLevel(DataLevel.USER, context);
while (states.size() < 2) {
UtilWaitThread.sleep(250);
oldLocations.clear();
for (TabletLocationState tls : store) {
if (tls.current != null) {
states.add(tls.current);
oldLocations.add(tls);
}
}
}
assertEquals(2, states.size());
// Kill a tablet server... we don't care which one... wait for everything to be reassigned
cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
Set<TServerInstance> replStates = new HashSet<>();
@SuppressWarnings("deprecation") TableId repTable = org.apache.accumulo.core.replication.ReplicationTable.ID;
// Find out which tablet server remains
while (true) {
UtilWaitThread.sleep(1000);
states.clear();
replStates.clear();
boolean allAssigned = true;
for (TabletLocationState tls : store) {
if (tls != null && tls.current != null) {
states.add(tls.current);
} else if (tls != null && tls.extent.equals(new KeyExtent(repTable, null, null))) {
replStates.add(tls.current);
} else {
allAssigned = false;
}
}
System.out.println(states + " size " + states.size() + " allAssigned " + allAssigned);
if (states.size() != 2 && allAssigned)
break;
}
assertEquals(1, replStates.size());
assertEquals(1, states.size());
// pick an assigned tablet and assign it to the old tablet
TabletLocationState moved = null;
for (TabletLocationState old : oldLocations) {
if (!states.contains(old.current)) {
moved = old;
}
}
assertNotEquals(null, moved);
// throw a mutation in as if we were the dying tablet
TabletMutator tabletMutator = serverContext.getAmple().mutateTablet(moved.extent);
tabletMutator.putLocation(moved.current, LocationType.CURRENT);
tabletMutator.mutate();
// wait for the manager to fix the problem
waitForCleanStore(store);
// now jam up the metadata table
tabletMutator = serverContext.getAmple().mutateTablet(new KeyExtent(MetadataTable.ID, null, null));
tabletMutator.putLocation(moved.current, LocationType.CURRENT);
tabletMutator.mutate();
waitForCleanStore(TabletStateStore.getStoreForLevel(DataLevel.METADATA, context));
}
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class InMemoryMapIT method getServerContext.
public static ServerContext getServerContext() {
ServerContext context = EasyMock.createMock(ServerContext.class);
EasyMock.expect(context.getCryptoService()).andReturn(CryptoServiceFactory.newDefaultInstance()).anyTimes();
EasyMock.replay(context);
return context;
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class Upgrader9to10Test method testDropSortedMapWALs.
@Test
public void testDropSortedMapWALs() throws IOException {
Configuration hadoopConf = new Configuration();
ConfigurationCopy conf = new ConfigurationCopy();
FileSystem fs = new Path("file:///").getFileSystem(hadoopConf);
List<String> volumes = Arrays.asList("/vol1/", "/vol2/");
Collection<Volume> vols = volumes.stream().map(s -> new VolumeImpl(fs, s)).collect(Collectors.toList());
Set<String> fullyQualifiedVols = Set.of("file://vol1/", "file://vol2/");
Set<String> recoveryDirs = Set.of("file://vol1/accumulo/recovery", "file://vol2/accumulo/recovery");
conf.set(Property.INSTANCE_VOLUMES, String.join(",", fullyQualifiedVols));
ServerContext context = createMock(ServerContext.class);
Path recoveryDir1 = new Path("file://vol1/accumulo/recovery");
Path recoveryDir2 = new Path("file://vol2/accumulo/recovery");
VolumeManager volumeManager = createMock(VolumeManager.class);
FileStatus[] dirs = new FileStatus[2];
dirs[0] = createMock(FileStatus.class);
Path dir0 = new Path("file://vol1/accumulo/recovery/A123456789");
FileStatus[] dir0Files = new FileStatus[1];
dir0Files[0] = createMock(FileStatus.class);
dirs[1] = createMock(FileStatus.class);
Path dir1 = new Path("file://vol1/accumulo/recovery/B123456789");
FileStatus[] dir1Files = new FileStatus[1];
dir1Files[0] = createMock(FileStatus.class);
Path part1Dir = new Path("file://vol1/accumulo/recovery/B123456789/part-r-0000");
expect(context.getVolumeManager()).andReturn(volumeManager).once();
expect(context.getConfiguration()).andReturn(conf).once();
expect(context.getHadoopConf()).andReturn(hadoopConf).once();
expect(context.getRecoveryDirs()).andReturn(recoveryDirs).once();
expect(volumeManager.getVolumes()).andReturn(vols).once();
expect(volumeManager.exists(recoveryDir1)).andReturn(true).once();
expect(volumeManager.exists(recoveryDir2)).andReturn(false).once();
expect(volumeManager.listStatus(recoveryDir1)).andReturn(dirs).once();
expect(dirs[0].getPath()).andReturn(dir0).once();
expect(volumeManager.listStatus(dir0)).andReturn(dir0Files).once();
expect(dir0Files[0].isDirectory()).andReturn(false).once();
expect(dirs[1].getPath()).andReturn(dir1).once();
expect(volumeManager.listStatus(dir1)).andReturn(dir1Files).once();
expect(dir1Files[0].isDirectory()).andReturn(true).once();
expect(dir1Files[0].getPath()).andReturn(part1Dir).once();
expect(volumeManager.deleteRecursively(dir1)).andReturn(true).once();
replay(context, volumeManager, dirs[0], dirs[1], dir0Files[0], dir1Files[0]);
Upgrader9to10.dropSortedMapWALFiles(context);
}
Aggregations