use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class SystemCredentialsIT method main.
public static void main(final String[] args) throws AccumuloException, TableNotFoundException {
var siteConfig = SiteConfiguration.auto();
try (ServerContext context = new ServerContext(siteConfig)) {
Credentials creds;
InstanceId badInstanceID = InstanceId.of(SystemCredentials.class.getName());
if (args.length < 2) {
throw new RuntimeException("Incorrect usage; expected to be run by test only");
}
switch(args[0]) {
case "bad":
creds = SystemCredentials.get(badInstanceID, siteConfig);
break;
case "good":
creds = SystemCredentials.get(context.getInstanceID(), siteConfig);
break;
case "bad_password":
creds = new SystemCredentials(badInstanceID, "!SYSTEM", new PasswordToken("fake"));
break;
default:
throw new RuntimeException("Incorrect usage; expected to be run by test only");
}
try (AccumuloClient client = Accumulo.newClient().from(context.getProperties()).as(creds.getPrincipal(), creds.getToken()).build()) {
client.securityOperations().authenticateUser(creds.getPrincipal(), creds.getToken());
try (Scanner scan = client.createScanner(RootTable.NAME, Authorizations.EMPTY)) {
for (Entry<Key, Value> e : scan) {
e.hashCode();
}
} catch (RuntimeException e) {
e.printStackTrace(System.err);
System.exit(SCAN_FAILED);
}
} catch (AccumuloSecurityException e) {
e.printStackTrace(System.err);
System.exit(AUTHENICATION_FAILED);
}
}
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class CompactionDriverTest method testTableBeingDeleted.
@Test
public void testTableBeingDeleted() throws Exception {
final InstanceId instance = InstanceId.of(UUID.randomUUID());
final long compactId = 123;
final long cancelId = 122;
final NamespaceId namespaceId = NamespaceId.of("14");
final TableId tableId = TableId.of("43");
final byte[] startRow = new byte[0];
final byte[] endRow = new byte[0];
Manager manager = EasyMock.createNiceMock(Manager.class);
ServerContext ctx = EasyMock.createNiceMock(ServerContext.class);
ZooReaderWriter zrw = EasyMock.createNiceMock(ZooReaderWriter.class);
EasyMock.expect(manager.getInstanceID()).andReturn(instance).anyTimes();
EasyMock.expect(manager.getContext()).andReturn(ctx);
EasyMock.expect(ctx.getZooReaderWriter()).andReturn(zrw);
final String zCancelID = CompactionDriver.createCompactionCancellationPath(instance, tableId);
EasyMock.expect(zrw.getData(zCancelID)).andReturn(Long.toString(cancelId).getBytes());
String deleteMarkerPath = PreDeleteTable.createDeleteMarkerPath(instance, tableId);
EasyMock.expect(zrw.exists(deleteMarkerPath)).andReturn(true);
EasyMock.replay(manager, ctx, zrw);
final CompactionDriver driver = new CompactionDriver(compactId, namespaceId, tableId, startRow, endRow);
final long tableIdLong = Long.parseLong(tableId.toString());
var e = assertThrows(AcceptableThriftTableOperationException.class, () -> driver.isReady(tableIdLong, manager));
assertTrue(e.getTableId().equals(tableId.toString()));
assertTrue(e.getOp().equals(TableOperation.COMPACT));
assertTrue(e.getType().equals(TableOperationExceptionType.OTHER));
assertTrue(e.getDescription().equals(TableOperationsImpl.TABLE_DELETED_MSG));
EasyMock.verify(manager, ctx, zrw);
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class TotalQueuedIT method getSyncs.
private long getSyncs(AccumuloClient c) throws Exception {
ServerContext context = getServerContext();
for (String address : c.instanceOperations().getTabletServers()) {
TabletClientService.Client client = ThriftUtil.getTServerClient(HostAndPort.fromString(address), context);
TabletServerStatus status = client.getTabletServerStatus(null, context.rpcCreds());
return status.syncs;
}
return 0;
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class UnusedWALIT method test.
@Test
public void test() throws Exception {
// don't want this bad boy cleaning up walog entries
getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
// make two tables
String[] tableNames = getUniqueNames(2);
String bigTable = tableNames[0];
String lilTable = tableNames[1];
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
c.tableOperations().create(bigTable);
c.tableOperations().create(lilTable);
ServerContext context = getServerContext();
ClientInfo info = ClientInfo.from(getClientProperties());
new ZooReaderWriter(info.getZooKeepers(), info.getZooKeepersSessionTimeOut(), "");
// put some data in a log that should be replayed for both tables
writeSomeData(c, bigTable, 0, 10, 0, 10);
scanSomeData(c, bigTable, 0, 10, 0, 10);
writeSomeData(c, lilTable, 0, 1, 0, 1);
scanSomeData(c, lilTable, 0, 1, 0, 1);
assertEquals(2, getWALCount(context));
// roll the logs by pushing data into bigTable
writeSomeData(c, bigTable, 0, 3000, 0, 1000);
assertEquals(3, getWALCount(context));
// put some data in the latest log
writeSomeData(c, lilTable, 1, 10, 0, 10);
scanSomeData(c, lilTable, 1, 10, 0, 10);
// bounce the tserver
getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
getCluster().getClusterControl().start(ServerType.TABLET_SERVER);
// wait for the metadata table to be online
Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
// check our two sets of data in different logs
scanSomeData(c, lilTable, 0, 1, 0, 1);
scanSomeData(c, lilTable, 1, 10, 0, 10);
}
}
use of org.apache.accumulo.server.ServerContext in project accumulo by apache.
the class ReplicationMetricsTest method testAddReplicationQueueTimeMetrics.
@Test
public void testAddReplicationQueueTimeMetrics() throws Exception {
Manager manager = EasyMock.createMock(Manager.class);
ServerContext context = EasyMock.createMock(ServerContext.class);
VolumeManager fileSystem = EasyMock.createMock(VolumeManager.class);
ReplicationUtil util = EasyMock.createMock(ReplicationUtil.class);
MeterRegistry meterRegistry = EasyMock.createMock(MeterRegistry.class);
Timer timer = EasyMock.createMock(Timer.class);
Path path1 = new Path("hdfs://localhost:9000/accumulo/wal/file1");
Path path2 = new Path("hdfs://localhost:9000/accumulo/wal/file2");
// First call will initialize the map of paths to modification time
EasyMock.expect(manager.getContext()).andReturn(context).anyTimes();
EasyMock.expect(meterRegistry.timer("replicationQueue")).andReturn(timer).anyTimes();
EasyMock.expect(meterRegistry.gauge(EasyMock.eq("filesPendingReplication"), EasyMock.anyObject(AtomicLong.class))).andReturn(new AtomicLong(0)).anyTimes();
EasyMock.expect(meterRegistry.gauge(EasyMock.eq("numPeers"), EasyMock.anyObject(AtomicInteger.class))).andReturn(new AtomicInteger(0)).anyTimes();
EasyMock.expect(meterRegistry.gauge(EasyMock.eq("maxReplicationThreads"), EasyMock.anyObject(AtomicInteger.class))).andReturn(new AtomicInteger(0)).anyTimes();
EasyMock.expect(util.getPendingReplicationPaths()).andReturn(Set.of(path1, path2));
EasyMock.expect(manager.getVolumeManager()).andReturn(fileSystem);
EasyMock.expect(fileSystem.getFileStatus(path1)).andReturn(createStatus(100));
EasyMock.expect(manager.getVolumeManager()).andReturn(fileSystem);
EasyMock.expect(fileSystem.getFileStatus(path2)).andReturn(createStatus(200));
// Second call will recognize the missing path1 and add the latency stat
EasyMock.expect(util.getPendingReplicationPaths()).andReturn(Set.of(path2));
timer.record(EasyMock.isA(Duration.class));
EasyMock.expectLastCall();
EasyMock.replay(manager, fileSystem, util, meterRegistry, timer);
ReplicationMetrics metrics = new ReplicationMetricsTestMetrics(manager);
// Inject our mock objects
replaceField(metrics, "replicationUtil", util);
replaceField(metrics, "replicationQueueTimer", timer);
// Two calls to this will initialize the map and then add metrics
metrics.addReplicationQueueTimeMetrics();
metrics.addReplicationQueueTimeMetrics();
EasyMock.verify(manager, fileSystem, util, meterRegistry, timer);
}
Aggregations