use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class DropUserCommandTest method dropUserWithoutForcePrompts.
@Test
public void dropUserWithoutForcePrompts() throws Exception {
Connector conn = EasyMock.createMock(Connector.class);
CommandLine cli = EasyMock.createMock(CommandLine.class);
Shell shellState = EasyMock.createMock(Shell.class);
ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
EasyMock.expect(shellState.getConnector()).andReturn(conn);
// The user we want to remove
EasyMock.expect(cli.getArgs()).andReturn(new String[] { "user" });
// We're the root user
EasyMock.expect(conn.whoami()).andReturn("root");
// Force option was not provided
EasyMock.expect(cli.hasOption("f")).andReturn(false);
EasyMock.expect(shellState.getReader()).andReturn(reader);
reader.flush();
EasyMock.expectLastCall().once();
// Fake a "yes" response
EasyMock.expect(shellState.getReader()).andReturn(reader);
EasyMock.expect(reader.readLine(EasyMock.anyObject(String.class))).andReturn("yes");
EasyMock.expect(shellState.getConnector()).andReturn(conn);
EasyMock.expect(conn.securityOperations()).andReturn(secOps);
secOps.dropLocalUser("user");
EasyMock.expectLastCall();
EasyMock.replay(conn, cli, shellState, reader, secOps);
cmd.execute("dropuser foo -f", cli, shellState);
EasyMock.verify(conn, cli, shellState, reader, secOps);
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class MasterClientServiceHandler method waitForFlush.
@Override
public void waitForFlush(TInfo tinfo, TCredentials c, String tableIdStr, ByteBuffer startRow, ByteBuffer endRow, long flushID, long maxLoops) throws ThriftSecurityException, ThriftTableOperationException {
Table.ID tableId = Table.ID.of(tableIdStr);
Namespace.ID namespaceId = getNamespaceIdFromTableId(TableOperation.FLUSH, tableId);
master.security.canFlush(c, tableId, namespaceId);
if (endRow != null && startRow != null && ByteBufferUtil.toText(startRow).compareTo(ByteBufferUtil.toText(endRow)) >= 0)
throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
Set<TServerInstance> serversToFlush = new HashSet<>(master.tserverSet.getCurrentServers());
for (long l = 0; l < maxLoops; l++) {
for (TServerInstance instance : serversToFlush) {
try {
final TServerConnection server = master.tserverSet.getConnection(instance);
if (server != null)
server.flush(master.masterLock, tableId, ByteBufferUtil.toBytes(startRow), ByteBufferUtil.toBytes(endRow));
} catch (TException ex) {
Master.log.error(ex.toString());
}
}
if (l == maxLoops - 1)
break;
sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
serversToFlush.clear();
try {
Connector conn = master.getConnector();
Scanner scanner;
if (tableId.equals(MetadataTable.ID)) {
scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
scanner.setRange(MetadataSchema.TabletsSection.getRange());
} else {
scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
Range range = new KeyExtent(tableId, null, ByteBufferUtil.toText(startRow)).toMetadataRange();
scanner.setRange(range.clip(MetadataSchema.TabletsSection.getRange()));
}
TabletsSection.ServerColumnFamily.FLUSH_COLUMN.fetch(scanner);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
scanner.fetchColumnFamily(LogColumnFamily.NAME);
RowIterator ri = new RowIterator(scanner);
int tabletsToWaitFor = 0;
int tabletCount = 0;
Text ert = ByteBufferUtil.toText(endRow);
while (ri.hasNext()) {
Iterator<Entry<Key, Value>> row = ri.next();
long tabletFlushID = -1;
int logs = 0;
boolean online = false;
TServerInstance server = null;
Entry<Key, Value> entry = null;
while (row.hasNext()) {
entry = row.next();
Key key = entry.getKey();
if (TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
tabletFlushID = Long.parseLong(entry.getValue().toString());
}
if (LogColumnFamily.NAME.equals(key.getColumnFamily()))
logs++;
if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily())) {
online = true;
server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
}
}
// when tablet is not online and has no logs, there is no reason to wait for it
if ((online || logs > 0) && tabletFlushID < flushID) {
tabletsToWaitFor++;
if (server != null)
serversToFlush.add(server);
}
tabletCount++;
Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
if (tabletEndRow == null || (ert != null && tabletEndRow.compareTo(ert) >= 0))
break;
}
if (tabletsToWaitFor == 0)
break;
if (tabletCount == 0 && !Tables.exists(master.getInstance(), tableId))
throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
} catch (AccumuloException | TabletDeletedException e) {
Master.log.debug("Failed to scan {} table to wait for flush {}", MetadataTable.NAME, tableId, e);
} catch (AccumuloSecurityException e) {
Master.log.warn("{}", e.getMessage(), e);
throw new ThriftSecurityException();
} catch (TableNotFoundException e) {
Master.log.error("{}", e.getMessage(), e);
throw new ThriftTableOperationException();
}
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class GarbageCollectWriteAheadLogs method removeReplicationEntries.
protected int removeReplicationEntries(Map<UUID, TServerInstance> candidates) throws IOException, KeeperException, InterruptedException {
Connector conn;
try {
conn = context.getConnector();
try {
final Scanner s = ReplicationTable.getScanner(conn);
StatusSection.limit(s);
for (Entry<Key, Value> entry : s) {
UUID id = path2uuid(new Path(entry.getKey().getRow().toString()));
candidates.remove(id);
log.info("Ignore closed log " + id + " because it is being replicated");
}
} catch (ReplicationTableOfflineException ex) {
return candidates.size();
}
final Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
scanner.setRange(MetadataSchema.ReplicationSection.getRange());
for (Entry<Key, Value> entry : scanner) {
Text file = new Text();
MetadataSchema.ReplicationSection.getFile(entry.getKey(), file);
UUID id = path2uuid(new Path(file.toString()));
candidates.remove(id);
log.info("Ignore closed log " + id + " because it is being replicated");
}
return candidates.size();
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
log.error("Failed to scan metadata table", e);
throw new IllegalArgumentException(e);
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class CloseWriteAheadLogReferences method run.
@Override
public void run() {
// As long as we depend on a newer Guava than Hadoop uses, we have to make sure we're compatible with
// what the version they bundle uses.
Stopwatch sw = new Stopwatch();
Connector conn;
try {
conn = context.getConnector();
} catch (Exception e) {
log.error("Could not create connector", e);
throw new RuntimeException(e);
}
if (!ReplicationTable.isOnline(conn)) {
log.debug("Replication table isn't online, not attempting to clean up wals");
return;
}
Span findWalsSpan = Trace.start("findReferencedWals");
HashSet<String> closed = null;
try {
sw.start();
closed = getClosedLogs(conn);
} finally {
sw.stop();
findWalsSpan.stop();
}
log.info("Found {} WALs referenced in metadata in {}", closed.size(), sw.toString());
sw.reset();
Span updateReplicationSpan = Trace.start("updateReplicationTable");
long recordsClosed = 0;
try {
sw.start();
recordsClosed = updateReplicationEntries(conn, closed);
} finally {
sw.stop();
updateReplicationSpan.stop();
}
log.info("Closed {} WAL replication references in replication table in {}", recordsClosed, sw.toString());
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class GarbageCollectWriteAheadLogsTest method deleteUnreferenceLogOnDeadServer.
@Test
public void deleteUnreferenceLogOnDeadServer() throws Exception {
AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
VolumeManager fs = EasyMock.createMock(VolumeManager.class);
WalStateManager marker = EasyMock.createMock(WalStateManager.class);
LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
Connector conn = EasyMock.createMock(Connector.class);
Scanner mscanner = EasyMock.createMock(Scanner.class);
Scanner rscanner = EasyMock.createMock(Scanner.class);
GCStatus status = new GCStatus(null, null, null, new GcCycleStats());
EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
EasyMock.expect(marker.getAllMarkers()).andReturn(markers2).once();
EasyMock.expect(marker.state(server2, id)).andReturn(new Pair<>(WalState.OPEN, path));
EasyMock.expect(context.getConnector()).andReturn(conn);
EasyMock.expect(conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)).andReturn(rscanner);
rscanner.fetchColumnFamily(ReplicationSchema.StatusSection.NAME);
EasyMock.expectLastCall().once();
EasyMock.expect(rscanner.iterator()).andReturn(emptyKV);
EasyMock.expect(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)).andReturn(mscanner);
mscanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
EasyMock.expectLastCall().once();
mscanner.setRange(MetadataSchema.ReplicationSection.getRange());
EasyMock.expectLastCall().once();
EasyMock.expect(mscanner.iterator()).andReturn(emptyKV);
EasyMock.expect(fs.deleteRecursively(path)).andReturn(true).once();
marker.removeWalMarker(server2, id);
EasyMock.expectLastCall().once();
marker.forget(server2);
EasyMock.expectLastCall().once();
EasyMock.replay(context, fs, marker, tserverSet, conn, rscanner, mscanner);
GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false, tserverSet, marker, tabletOnServer1List);
gc.collect(status);
EasyMock.verify(context, fs, marker, tserverSet, conn, rscanner, mscanner);
}
Aggregations