use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class ReadWriteIT method sunnyDay.
@SuppressFBWarnings(value = { "PATH_TRAVERSAL_IN", "URLCONNECTION_SSRF_FD" }, justification = "path provided by test; url provided by test")
@Test
public void sunnyDay() throws Exception {
// Start accumulo, create a table, insert some data, verify we can read it out.
// Shutdown cleanly.
log.debug("Starting Monitor");
cluster.getClusterControl().startAllServers(ServerType.MONITOR);
try (AccumuloClient accumuloClient = Accumulo.newClient().from(getClientProps()).build()) {
String tableName = getUniqueNames(1)[0];
ingest(accumuloClient, getClientInfo(), ROWS, COLS, 50, 0, tableName);
verify(accumuloClient, getClientInfo(), ROWS, COLS, 50, 0, tableName);
String monitorLocation = null;
while (monitorLocation == null) {
monitorLocation = MonitorUtil.getLocation((ClientContext) accumuloClient);
if (monitorLocation == null) {
log.debug("Could not fetch monitor HTTP address from zookeeper");
Thread.sleep(2000);
}
}
if (getCluster() instanceof StandaloneAccumuloCluster) {
String monitorSslKeystore = getCluster().getSiteConfiguration().get(Property.MONITOR_SSL_KEYSTORE.getKey());
if (monitorSslKeystore != null && !monitorSslKeystore.isEmpty()) {
log.info("Using HTTPS since monitor ssl keystore configuration was observed in accumulo configuration");
SSLContext ctx = SSLContext.getInstance("TLSv1.2");
TrustManager[] tm = { new TestTrustManager() };
ctx.init(new KeyManager[0], tm, random);
SSLContext.setDefault(ctx);
HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
}
}
URL url = new URL(monitorLocation);
log.debug("Fetching web page {}", url);
String result = FunctionalTestUtils.readWebPage(url).body();
assertTrue(result.length() > 100);
log.debug("Stopping accumulo cluster");
ClusterControl control = cluster.getClusterControl();
control.adminStopAll();
ClientInfo info = ClientInfo.from(accumuloClient.properties());
ZooReader zreader = new ZooReader(info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
ZooCache zcache = new ZooCache(zreader, null);
var zLockPath = ServiceLock.path(ZooUtil.getRoot(accumuloClient.instanceOperations().getInstanceId()) + Constants.ZMANAGER_LOCK);
byte[] managerLockData;
do {
managerLockData = ServiceLock.getLockData(zcache, zLockPath, null);
if (managerLockData != null) {
log.info("Manager lock is still held");
Thread.sleep(1000);
}
} while (managerLockData != null);
control.stopAllServers(ServerType.MANAGER);
control.stopAllServers(ServerType.TABLET_SERVER);
control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
control.stopAllServers(ServerType.MONITOR);
log.debug("success!");
// Restarting everything
cluster.start();
}
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class ServerUtilOpts method getServerContext.
public synchronized ServerContext getServerContext() {
if (context == null) {
if (getClientConfigFile() == null) {
context = new ServerContext(SiteConfiguration.auto());
} else {
ClientInfo info = ClientInfo.from(getClientProps());
context = ServerContext.override(SiteConfiguration.auto(), info.getInstanceName(), info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
}
}
return context;
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class KerberosReplicationIT method dataReplicatedToCorrectTable.
@Test
public void dataReplicatedToCorrectTable() throws Exception {
// Login as the root user
final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().toURI().toString());
ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
log.info("testing {}", ugi);
final KerberosToken token = new KerberosToken();
try (AccumuloClient primaryclient = primary.createAccumuloClient(rootUser.getPrincipal(), token);
AccumuloClient peerclient = peer.createAccumuloClient(rootUser.getPrincipal(), token)) {
ClusterUser replicationUser = kdc.getClientPrincipal(0);
// Create user for replication to the peer
peerclient.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
primaryclient.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME, replicationUser.getPrincipal());
primaryclient.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME, replicationUser.getKeytab().getAbsolutePath());
// ...peer = AccumuloReplicaSystem,instanceName,zookeepers
ClientInfo info = ClientInfo.from(peerclient.properties());
primaryclient.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + PEER_NAME, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(info.getInstanceName(), info.getZooKeepers())));
String primaryTable1 = "primary", peerTable1 = "peer";
// Create tables
peerclient.tableOperations().create(peerTable1);
String peerTableId1 = peerclient.tableOperations().tableIdMap().get(peerTable1);
assertNotNull(peerTableId1);
Map<String, String> props = new HashMap<>();
props.put(Property.TABLE_REPLICATION.getKey(), "true");
// Replicate this table to the peerClusterName in a table with the peerTableId table id
props.put(Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);
primaryclient.tableOperations().create(primaryTable1, new NewTableConfiguration().setProperties(props));
String managerTableId1 = primaryclient.tableOperations().tableIdMap().get(primaryTable1);
assertNotNull(managerTableId1);
// Grant write permission
peerclient.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1, TablePermission.WRITE);
// Write some data to table1
long managerTable1Records = 0L;
try (BatchWriter bw = primaryclient.createBatchWriter(primaryTable1)) {
for (int rows = 0; rows < 2500; rows++) {
Mutation m = new Mutation(primaryTable1 + rows);
for (int cols = 0; cols < 100; cols++) {
String value = Integer.toString(cols);
m.put(value, "", value);
managerTable1Records++;
}
bw.addMutation(m);
}
}
log.info("Wrote all data to primary cluster");
Set<String> filesFor1 = primaryclient.replicationOperations().referencedFiles(primaryTable1);
// Restart the tserver to force a close on the WAL
for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
primary.killProcess(ServerType.TABLET_SERVER, proc);
}
primary.exec(TabletServer.class);
log.info("Restarted the tserver");
// Read the data -- the tserver is back up and running and tablets are assigned
Iterators.size(primaryclient.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
// Wait for both tables to be replicated
log.info("Waiting for {} for {}", filesFor1, primaryTable1);
primaryclient.replicationOperations().drain(primaryTable1, filesFor1);
long countTable = 0L;
try (var scanner = peerclient.createScanner(peerTable1, Authorizations.EMPTY)) {
for (Entry<Key, Value> entry : scanner) {
countTable++;
assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(primaryTable1));
}
}
log.info("Found {} records in {}", countTable, peerTable1);
assertEquals(managerTable1Records, countTable);
return null;
}
});
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class AccumuloInputFormatIT method testGetSplits.
/**
* Tests several different paths through the getSplits() method by setting different properties
* and verifying the results.
*/
@Test
public void testGetSplits() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
String table = getUniqueNames(1)[0];
client.tableOperations().create(table);
insertData(client, table, currentTimeMillis());
Job job = Job.getInstance();
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setInputTableName(job, table);
ClientInfo ci = getClientInfo();
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setZooKeeperInstance(job, ci.getInstanceName(), ci.getZooKeepers());
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setConnectorInfo(job, ci.getPrincipal(), ci.getAuthenticationToken());
// split table
TreeSet<Text> splitsToAdd = new TreeSet<>();
for (int i = 0; i < 10000; i += 1000) splitsToAdd.add(new Text(String.format("%09d", i)));
client.tableOperations().addSplits(table, splitsToAdd);
// wait for splits to be propagated
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
// get splits without setting any range
Collection<Text> actualSplits = client.tableOperations().listSplits(table);
List<InputSplit> splits = inputFormat.getSplits(job);
// No ranges set on the job so it'll start with -inf
assertEquals(actualSplits.size() + 1, splits.size());
// set ranges and get splits
List<Range> ranges = new ArrayList<>();
for (Text text : actualSplits) ranges.add(new Range(text));
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setRanges(job, ranges);
splits = inputFormat.getSplits(job);
assertEquals(actualSplits.size(), splits.size());
// offline mode
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setOfflineTableScan(job, true);
assertThrows(IOException.class, () -> inputFormat.getSplits(job));
client.tableOperations().offline(table, true);
splits = inputFormat.getSplits(job);
assertEquals(actualSplits.size(), splits.size());
// auto adjust ranges
ranges = new ArrayList<>();
for (int i = 0; i < 5; i++) // overlapping ranges
ranges.add(new Range(String.format("%09d", i), String.format("%09d", i + 2)));
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setRanges(job, ranges);
splits = inputFormat.getSplits(job);
assertEquals(2, splits.size());
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setAutoAdjustRanges(job, false);
splits = inputFormat.getSplits(job);
assertEquals(ranges.size(), splits.size());
// BatchScan not available for offline scans
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setBatchScan(job, true);
// Reset auto-adjust ranges too
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setAutoAdjustRanges(job, true);
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setOfflineTableScan(job, true);
assertThrows(IllegalArgumentException.class, () -> inputFormat.getSplits(job));
client.tableOperations().online(table, true);
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setOfflineTableScan(job, false);
// test for resumption of success
splits = inputFormat.getSplits(job);
assertEquals(2, splits.size());
// BatchScan not available with isolated iterators
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setScanIsolation(job, true);
assertThrows(IllegalArgumentException.class, () -> inputFormat.getSplits(job));
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setScanIsolation(job, false);
// test for resumption of success
splits = inputFormat.getSplits(job);
assertEquals(2, splits.size());
// BatchScan not available with local iterators
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setLocalIterators(job, true);
assertThrows(IllegalArgumentException.class, () -> inputFormat.getSplits(job));
org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setLocalIterators(job, false);
// Check we are getting back correct type pf split
client.tableOperations().online(table);
splits = inputFormat.getSplits(job);
for (InputSplit split : splits) assert (split instanceof org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit);
// We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job,
// true)`
assertEquals(2, splits.size());
}
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class UnusedWALIT method test.
@Test
public void test() throws Exception {
// don't want this bad boy cleaning up walog entries
getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
// make two tables
String[] tableNames = getUniqueNames(2);
String bigTable = tableNames[0];
String lilTable = tableNames[1];
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
c.tableOperations().create(bigTable);
c.tableOperations().create(lilTable);
ServerContext context = getServerContext();
ClientInfo info = ClientInfo.from(getClientProperties());
new ZooReaderWriter(info.getZooKeepers(), info.getZooKeepersSessionTimeOut(), "");
// put some data in a log that should be replayed for both tables
writeSomeData(c, bigTable, 0, 10, 0, 10);
scanSomeData(c, bigTable, 0, 10, 0, 10);
writeSomeData(c, lilTable, 0, 1, 0, 1);
scanSomeData(c, lilTable, 0, 1, 0, 1);
assertEquals(2, getWALCount(context));
// roll the logs by pushing data into bigTable
writeSomeData(c, bigTable, 0, 3000, 0, 1000);
assertEquals(3, getWALCount(context));
// put some data in the latest log
writeSomeData(c, lilTable, 1, 10, 0, 10);
scanSomeData(c, lilTable, 1, 10, 0, 10);
// bounce the tserver
getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
getCluster().getClusterControl().start(ServerType.TABLET_SERVER);
// wait for the metadata table to be online
Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
// check our two sets of data in different logs
scanSomeData(c, lilTable, 0, 1, 0, 1);
scanSomeData(c, lilTable, 1, 10, 0, 10);
}
}
Aggregations