use of com.google.common.net.HostAndPort in project druid by druid-io.
the class KafkaSimpleConsumer method findLeader.
private PartitionMetadata findLeader() throws InterruptedException {
for (HostAndPort broker : replicaBrokers) {
SimpleConsumer consumer = null;
try {
log.info("Finding new leader from Kafka brokers, try broker [%s]", broker.toString());
consumer = new SimpleConsumer(broker.getHostText(), broker.getPort(), SO_TIMEOUT, BUFFER_SIZE, leaderLookupClientId);
TopicMetadataResponse resp = consumer.send(new TopicMetadataRequest(Collections.singletonList(topic)));
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
if (topic.equals(item.topic())) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == partitionId) {
return part;
}
}
}
}
} catch (Exception e) {
ensureNotInterrupted(e);
log.warn(e, "error communicating with Kafka Broker [%s] to find leader for [%s] - [%s]", broker, topic, partitionId);
} finally {
if (consumer != null) {
consumer.close();
}
}
}
return null;
}
use of com.google.common.net.HostAndPort in project presto by prestodb.
the class ClientOptions method parseServer.
public static URI parseServer(String server) {
server = server.toLowerCase(ENGLISH);
if (server.startsWith("http://") || server.startsWith("https://")) {
return URI.create(server);
}
HostAndPort host = HostAndPort.fromString(server);
try {
return new URI("http", null, host.getHostText(), host.getPortOrDefault(80), null, null, null);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
use of com.google.common.net.HostAndPort in project ratpack by ratpack.
the class RequestActionSupport method finalizeHeaders.
private void finalizeHeaders() {
if (requestConfig.headers.get(HttpHeaderConstants.HOST) == null) {
HostAndPort hostAndPort = HostAndPort.fromParts(channelKey.host, channelKey.port);
requestConfig.headers.set(HttpHeaderConstants.HOST, hostAndPort.toString());
}
if (client.getPoolSize() == 0) {
requestConfig.headers.set(HttpHeaderConstants.CONNECTION, HttpHeaderValues.CLOSE);
}
int contentLength = requestConfig.body.readableBytes();
if (contentLength > 0) {
requestConfig.headers.set(HttpHeaderConstants.CONTENT_LENGTH, Integer.toString(contentLength));
}
}
use of com.google.common.net.HostAndPort in project torodb by torodb.
the class RecoveryService method initialSync.
private boolean initialSync() throws TryAgainException, FatalErrorException {
/*
* 1. store that data is inconsistent 2. decide a sync source 3. lastRemoteOptime1 = get the
* last optime of the sync source 4. clone all databases except local 5. lastRemoteOptime2 = get
* the last optime of the sync source 6. apply remote oplog from lastRemoteOptime1 to
* lastRemoteOptime2 7. lastRemoteOptime3 = get the last optime of the sync source 8. apply
* remote oplog from lastRemoteOptime2 to lastRemoteOptime3 9. rebuild indexes 10. store
* lastRemoteOptime3 as the last applied operation optime 11. store that data is consistent 12.
* change replication state to SECONDARY
*/
//TODO: Support fastsync (used to restore a node by copying the data from other up-to-date node)
LOGGER.info("Starting initial sync");
callback.setConsistentState(false);
HostAndPort syncSource;
try {
syncSource = syncSourceProvider.newSyncSource();
LOGGER.info("Using node " + syncSource + " to replicate from");
} catch (NoSyncSourceFoundException ex) {
throw new TryAgainException("No sync source");
}
MongoClient remoteClient;
try {
remoteClient = remoteClientFactory.createClient(syncSource);
} catch (UnreachableMongoServerException ex) {
throw new TryAgainException(ex);
}
try {
LOGGER.debug("Remote client obtained");
MongoConnection remoteConnection = remoteClient.openConnection();
try (OplogReader reader = oplogReaderProvider.newReader(remoteConnection)) {
OplogOperation lastClonedOp = reader.getLastOp();
OpTime lastRemoteOptime1 = lastClonedOp.getOpTime();
try (WriteOplogTransaction oplogTransaction = oplogManager.createWriteTransaction()) {
LOGGER.info("Remote database cloning started");
oplogTransaction.truncate();
LOGGER.info("Local databases dropping started");
Status<?> status = dropDatabases();
if (!status.isOk()) {
throw new TryAgainException("Error while trying to drop collections: " + status);
}
LOGGER.info("Local databases dropping finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Remote database cloning started");
cloneDatabases(remoteClient);
LOGGER.info("Remote database cloning finished");
oplogTransaction.forceNewValue(lastClonedOp.getHash(), lastClonedOp.getOpTime());
}
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
TorodServer torodServer = server.getTorodServer();
try (TorodConnection connection = torodServer.openConnection();
SharedWriteTorodTransaction trans = connection.openWriteTransaction(false)) {
OpTime lastRemoteOptime2 = reader.getLastOp().getOpTime();
LOGGER.info("First oplog application started");
applyOplog(reader, lastRemoteOptime1, lastRemoteOptime2);
trans.commit();
LOGGER.info("First oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
OplogOperation lastOperation = reader.getLastOp();
OpTime lastRemoteOptime3 = lastOperation.getOpTime();
LOGGER.info("Second oplog application started");
applyOplog(reader, lastRemoteOptime2, lastRemoteOptime3);
trans.commit();
LOGGER.info("Second oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Index rebuild started");
rebuildIndexes();
trans.commit();
LOGGER.info("Index rebuild finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
trans.commit();
}
} catch (OplogStartMissingException ex) {
throw new TryAgainException(ex);
} catch (OplogOperationUnsupported ex) {
throw new TryAgainException(ex);
} catch (MongoException | RollbackException ex) {
throw new TryAgainException(ex);
} catch (OplogManagerPersistException ex) {
throw new FatalErrorException();
} catch (UserException ex) {
throw new FatalErrorException(ex);
}
callback.setConsistentState(true);
LOGGER.info("Initial sync finished");
} finally {
remoteClient.close();
}
return true;
}
use of com.google.common.net.HostAndPort in project torodb by torodb.
the class ReplSyncFetcher method runProtected.
@Override
public void runProtected() {
runThread = Thread.currentThread();
boolean rollbackNeeded = false;
try {
OplogReader oplogReader = null;
while (!rollbackNeeded && isRunning()) {
try {
if (callback.shouldPause()) {
callback.awaitUntilUnpaused();
}
callback.awaitUntilAllFetchedAreApplied();
HostAndPort syncSource = null;
try {
syncSource = syncSourceProvider.newSyncSource(lastFetchedOpTime);
oplogReader = readerProvider.newReader(syncSource);
} catch (NoSyncSourceFoundException ex) {
LOGGER.warn("There is no source to sync from");
Thread.sleep(1000);
continue;
} catch (UnreachableMongoServerException ex) {
assert syncSource != null;
LOGGER.warn("It was impossible to reach the sync source " + syncSource);
Thread.sleep(1000);
continue;
}
rollbackNeeded = fetch(oplogReader);
} catch (InterruptedException ex) {
LOGGER.info("Interrupted fetch process", ex);
} catch (RestartFetchException ex) {
LOGGER.info("Restarting fetch process", ex);
} catch (Throwable ex) {
throw new StopFetchException(ex);
} finally {
if (oplogReader != null) {
oplogReader.close();
}
}
}
if (rollbackNeeded) {
LOGGER.info("Requesting rollback");
callback.rollback(oplogReader);
} else {
LOGGER.info(serviceName() + " ending by external request");
callback.fetchFinished();
}
} catch (StopFetchException ex) {
LOGGER.info(serviceName() + " stopped by self request");
callback.fetchAborted(ex);
}
LOGGER.info(serviceName() + " stopped");
}
Aggregations