use of io.fabric8.patch.management.Patch in project strimzi by strimzi.
the class AbstractResourceOperatorTest method createWhenExistsIsAPatch.
public void createWhenExistsIsAPatch(TestContext context, boolean cascade) {
T resource = resource();
Resource mockResource = mock(resourceType());
when(mockResource.get()).thenReturn(resource);
when(mockResource.cascading(cascade)).thenReturn(mockResource);
NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource);
MixedOperation mockCms = mock(MixedOperation.class);
when(mockCms.inNamespace(matches(resource.getMetadata().getNamespace()))).thenReturn(mockNameable);
C mockClient = mock(clientType());
mocker(mockClient, mockCms);
AbstractResourceOperator<C, T, L, D, R, P> op = createResourceOperations(vertx, mockClient);
Async async = context.async();
Future<ReconcileResult<P>> fut = op.createOrUpdate(resource);
fut.setHandler(ar -> {
assertTrue(ar.succeeded());
verify(mockResource).get();
verify(mockResource).patch(any());
verify(mockResource, never()).create(any());
verify(mockResource, never()).createNew();
verify(mockResource, never()).createOrReplace(any());
verify(mockCms, never()).createOrReplace(any());
async.complete();
});
}
use of io.fabric8.patch.management.Patch in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testUpdateKafkaWithChangedDeleteClaim.
/**
* Test that we can change the deleteClaim flag, and that it's honoured
*/
@Test
public void testUpdateKafkaWithChangedDeleteClaim(TestContext context) {
if (!Storage.StorageType.PERSISTENT_CLAIM.equals(storageType(kafkaStorage))) {
LOGGER.info("Skipping claim-based test because using storage type {}", kafkaStorage);
return;
}
Set<String> allPvcs = new HashSet<>();
Set<String> kafkaPvcs = createPvcs(kafkaStorage, kafkaReplicas, podId -> KafkaCluster.getPersistentVolumeClaimName(KafkaCluster.kafkaClusterName(CLUSTER_NAME), podId));
Set<String> zkPvcs = createPvcs(zkStorage, zkReplicas, podId -> ZookeeperCluster.getPersistentVolumeClaimName(ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME), podId));
allPvcs.addAll(kafkaPvcs);
allPvcs.addAll(zkPvcs);
KafkaAssemblyOperator kco = createCluster(context);
boolean originalKafkaDeleteClaim = deleteClaim(kafkaStorage);
// assertDeleteClaim(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalKafkaDeleteClaim);
// Try to update the storage class
boolean changedKafkaDeleteClaim = !originalKafkaDeleteClaim;
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_STORAGE, new JsonObject(kafkaStorage.toString()).put(Storage.DELETE_CLAIM_FIELD, changedKafkaDeleteClaim).toString());
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Updating with changed delete claim");
Async updateAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
updateAsync.complete();
});
updateAsync.await();
LOGGER.info("Reconciling again -> delete");
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).delete();
Async deleteAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
assertPvcs(context, changedKafkaDeleteClaim ? deleteClaim(zkStorage) ? emptySet() : zkPvcs : deleteClaim(zkStorage) ? kafkaPvcs : allPvcs);
deleteAsync.complete();
});
}
use of io.fabric8.patch.management.Patch in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testKafkaScaleUp.
/**
* Create a cluster from a Kafka Cluster CM
*/
@Test
public void testKafkaScaleUp(TestContext context) {
KafkaAssemblyOperator kco = createCluster(context);
Async updateAsync = context.async();
int newScale = kafkaReplicas + 1;
String newPod = KafkaCluster.kafkaPodName(CLUSTER_NAME, kafkaReplicas);
context.assertNull(mockClient.pods().inNamespace(NAMESPACE).withName(newPod).get());
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_REPLICAS, String.valueOf(newScale));
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Scaling up to {} Kafka pods", newScale);
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
context.assertEquals(newScale, mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get().getSpec().getReplicas());
context.assertNotNull(mockClient.pods().inNamespace(NAMESPACE).withName(newPod).get(), "Expected pod " + newPod + " to have been created");
// TODO assert no rolling update
updateAsync.complete();
});
updateAsync.await();
}
use of io.fabric8.patch.management.Patch in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testKafkaScaleDown.
/**
* Create a cluster from a Kafka Cluster CM
*/
@Test
public void testKafkaScaleDown(TestContext context) {
if (kafkaReplicas <= 1) {
LOGGER.info("Skipping scale down test because there's only 1 broker");
return;
}
KafkaAssemblyOperator kco = createCluster(context);
Async updateAsync = context.async();
int newScale = kafkaReplicas - 1;
String deletedPod = KafkaCluster.kafkaPodName(CLUSTER_NAME, newScale);
context.assertNotNull(mockClient.pods().inNamespace(NAMESPACE).withName(deletedPod).get());
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_REPLICAS, String.valueOf(newScale));
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Scaling down to {} Kafka pods", newScale);
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
context.assertEquals(newScale, mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get().getSpec().getReplicas());
context.assertNull(mockClient.pods().inNamespace(NAMESPACE).withName(deletedPod).get(), "Expected pod " + deletedPod + " to have been deleted");
// TODO assert no rolling update
updateAsync.complete();
});
updateAsync.await();
}
use of io.fabric8.patch.management.Patch in project fabric8 by jboss-fuse.
the class DeploymentAgent method doUpdate.
public boolean doUpdate(Dictionary<String, ?> props) throws Exception {
if (props == null || Boolean.parseBoolean((String) props.get("disabled"))) {
return false;
}
final Hashtable<String, String> properties = new Hashtable<>();
for (Enumeration e = props.keys(); e.hasMoreElements(); ) {
Object key = e.nextElement();
Object val = props.get(key);
if (!"service.pid".equals(key) && !FeatureConfigInstaller.FABRIC_ZOOKEEPER_PID.equals(key)) {
properties.put(key.toString(), val.toString());
}
}
updateStatus("analyzing", null);
// Building configuration
curatorCompleteService.waitForService(TimeUnit.SECONDS.toMillis(30));
String httpUrl;
List<URI> mavenRepoURIs;
// force reading of updated informations from ZK
if (!fabricService.isEmpty()) {
updateMavenRepositoryConfiguration(fabricService.getService());
}
try {
fabricServiceOperations.lock();
// no one will change the members now
httpUrl = this.httpUrl;
mavenRepoURIs = this.mavenRepoURIs;
} finally {
fabricServiceOperations.unlock();
}
addMavenProxies(properties, httpUrl, mavenRepoURIs);
final MavenResolver resolver = MavenResolvers.createMavenResolver(properties, "org.ops4j.pax.url.mvn");
final DownloadManager manager = DownloadManagers.createDownloadManager(resolver, getDownloadExecutor());
manager.addListener(new DownloadCallback() {
@Override
public void downloaded(StreamProvider provider) throws Exception {
int pending = manager.pending();
updateStatus(pending > 0 ? "downloading (" + pending + " pending)" : "downloading", null);
}
});
// Update framework, libs, system and config props
final Object lock = new Object();
final AtomicBoolean restart = new AtomicBoolean();
final Set<String> libsToRemove = new HashSet<>(managedLibs.keySet());
final Set<String> endorsedLibsToRemove = new HashSet<>(managedEndorsedLibs.keySet());
final Set<String> extensionLibsToRemove = new HashSet<>(managedExtensionLibs.keySet());
final Set<String> sysPropsToRemove = new HashSet<>(managedSysProps.keySet());
final Set<String> configPropsToRemove = new HashSet<>(managedConfigProps.keySet());
final Set<String> etcsToRemove = new HashSet<>(managedEtcs.keySet());
final Properties configProps = new Properties(new File(KARAF_BASE + File.separator + "etc" + File.separator + "config.properties"));
final Properties systemProps = new Properties(new File(KARAF_BASE + File.separator + "etc" + File.separator + "system.properties"));
Downloader downloader = manager.createDownloader();
for (String key : properties.keySet()) {
if (key.equals("framework")) {
String url = properties.get(key);
if (!url.startsWith("mvn:")) {
throw new IllegalArgumentException("Framework url must use the mvn: protocol");
}
downloader.download(url, new DownloadCallback() {
@Override
public void downloaded(StreamProvider provider) throws Exception {
File file = provider.getFile();
String path = file.getPath();
if (path.startsWith(KARAF_HOME)) {
path = path.substring(KARAF_HOME.length() + 1);
}
synchronized (lock) {
if (!path.equals(configProps.get("karaf.framework.felix"))) {
configProps.put("karaf.framework", "felix");
configProps.put("karaf.framework.felix", path);
restart.set(true);
}
}
}
});
} else if (key.startsWith("config.")) {
String k = key.substring("config.".length());
String v = properties.get(key);
synchronized (lock) {
managedConfigProps.put(k, v);
configPropsToRemove.remove(k);
if (!v.equals(configProps.get(k))) {
configProps.put(k, v);
restart.set(true);
}
}
} else if (key.startsWith("system.")) {
String k = key.substring("system.".length());
synchronized (lock) {
String v = properties.get(key);
managedSysProps.put(k, v);
sysPropsToRemove.remove(k);
if (!v.equals(systemProps.get(k))) {
systemProps.put(k, v);
restart.set(true);
}
}
} else if (key.startsWith("lib.")) {
String value = properties.get(key);
downloader.download(value, new DownloadCallback() {
@Override
public void downloaded(StreamProvider provider) throws Exception {
File libFile = provider.getFile();
String libName = libFile.getName();
Long checksum = ChecksumUtils.checksum(libFile);
boolean update;
synchronized (lock) {
managedLibs.put(libName, "true");
libsToRemove.remove(libName);
update = !Long.toString(checksum).equals(libChecksums.getProperty(libName));
}
if (update) {
Files.copy(libFile, new File(LIB_PATH, libName));
restart.set(true);
}
}
});
} else if (key.startsWith("endorsed.")) {
String value = properties.get(key);
downloader.download(value, new DownloadCallback() {
@Override
public void downloaded(StreamProvider provider) throws Exception {
File libFile = provider.getFile();
String libName = libFile.getName();
Long checksum = ChecksumUtils.checksum(new FileInputStream(libFile));
boolean update;
synchronized (lock) {
managedEndorsedLibs.put(libName, "true");
endorsedLibsToRemove.remove(libName);
update = !Long.toString(checksum).equals(endorsedChecksums.getProperty(libName));
}
if (update) {
Files.copy(libFile, new File(LIB_ENDORSED_PATH, libName));
restart.set(true);
}
}
});
} else if (key.startsWith("extension.")) {
String value = properties.get(key);
downloader.download(value, new DownloadCallback() {
@Override
public void downloaded(StreamProvider provider) throws Exception {
File libFile = provider.getFile();
String libName = libFile.getName();
Long checksum = ChecksumUtils.checksum(libFile);
boolean update;
synchronized (lock) {
managedExtensionLibs.put(libName, "true");
extensionLibsToRemove.remove(libName);
update = !Long.toString(checksum).equals(extensionChecksums.getProperty(libName));
}
if (update) {
Files.copy(libFile, new File(LIB_EXT_PATH, libName));
restart.set(true);
}
}
});
} else if (key.startsWith("etc.")) {
String value = properties.get(key);
downloader.download(value, new DownloadCallback() {
@Override
public void downloaded(StreamProvider provider) throws Exception {
File etcFile = provider.getFile();
String etcName = etcFile.getName();
Long checksum = ChecksumUtils.checksum(new FileInputStream(etcFile));
boolean update;
synchronized (lock) {
managedEtcs.put(etcName, "true");
etcsToRemove.remove(etcName);
update = !Long.toString(checksum).equals(etcChecksums.getProperty(etcName));
}
if (update) {
Files.copy(etcFile, new File(KARAF_ETC, etcName));
}
}
});
}
}
downloader.await();
// Remove unused libs, system & config properties
for (String sysProp : sysPropsToRemove) {
systemProps.remove(sysProp);
managedSysProps.remove(sysProp);
System.clearProperty(sysProp);
restart.set(true);
}
for (String configProp : configPropsToRemove) {
configProps.remove(configProp);
managedConfigProps.remove(configProp);
restart.set(true);
}
for (String lib : libsToRemove) {
File libFile = new File(LIB_PATH, lib);
libFile.delete();
libChecksums.remove(lib);
managedLibs.remove(lib);
restart.set(true);
}
for (String lib : endorsedLibsToRemove) {
File libFile = new File(LIB_ENDORSED_PATH, lib);
libFile.delete();
endorsedChecksums.remove(lib);
managedEndorsedLibs.remove(lib);
restart.set(true);
}
for (String lib : extensionLibsToRemove) {
File libFile = new File(LIB_EXT_PATH, lib);
libFile.delete();
extensionChecksums.remove(lib);
managedExtensionLibs.remove(lib);
restart.set(true);
}
for (String etc : etcsToRemove) {
File etcFile = new File(KARAF_ETC, etc);
etcFile.delete();
etcChecksums.remove(etc);
managedEtcs.remove(etc);
}
libChecksums.save();
endorsedChecksums.save();
extensionChecksums.save();
etcChecksums.save();
managedLibs.save();
managedEndorsedLibs.save();
managedExtensionLibs.save();
managedConfigProps.save();
managedSysProps.save();
managedEtcs.save();
if (restart.get()) {
updateStatus("restarting", null);
configProps.save();
systemProps.save();
System.setProperty("karaf.restart", "true");
bundleContext.getBundle(0).stop();
return false;
}
FeatureConfigInstaller configInstaller = null;
ServiceReference configAdminServiceReference = bundleContext.getServiceReference(ConfigurationAdmin.class.getName());
if (configAdminServiceReference != null) {
ConfigurationAdmin configAdmin = (ConfigurationAdmin) bundleContext.getService(configAdminServiceReference);
configInstaller = new FeatureConfigInstaller(bundleContext, configAdmin, manager);
}
int bundleStartTimeout = Constants.BUNDLE_START_TIMEOUT;
String overriddenTimeout = properties.get(Constants.BUNDLE_START_TIMEOUT_PID_KEY);
try {
if (overriddenTimeout != null)
bundleStartTimeout = Integer.parseInt(overriddenTimeout);
} catch (Exception e) {
LOGGER.warn("Failed to set {} value: [{}], applying default value: {}", Constants.BUNDLE_START_TIMEOUT_PID_KEY, overriddenTimeout, Constants.BUNDLE_START_TIMEOUT);
}
Agent agent = new Agent(bundleContext.getBundle(), systemBundleContext, manager, configInstaller, null, DEFAULT_FEATURE_RESOLUTION_RANGE, DEFAULT_BUNDLE_UPDATE_RANGE, DEFAULT_UPDATE_SNAPSHOTS, bundleContext.getDataFile(STATE_FILE), bundleStartTimeout) {
@Override
public void updateStatus(String status) {
DeploymentAgent.this.updateStatus(status, null, false);
}
@Override
public void updateStatus(String status, boolean force) {
DeploymentAgent.this.updateStatus(status, null, force);
}
@Override
protected void saveState(State newState) throws IOException {
super.saveState(newState);
DeploymentAgent.this.state.replace(newState);
}
@Override
protected void provisionList(Set<Resource> resources) {
DeploymentAgent.this.provisionList = resources;
}
@Override
protected boolean done(boolean agentStarted, List<String> urls) {
if (agentStarted) {
// let's do patch-management "last touch" only if new agent wasn't started.
return true;
}
// agent finished provisioning, we can call back to low level patch management
ServiceReference<PatchManagement> srPm = systemBundleContext.getServiceReference(PatchManagement.class);
ServiceReference<FabricService> srFs = systemBundleContext.getServiceReference(FabricService.class);
if (srPm != null && srFs != null) {
PatchManagement pm = systemBundleContext.getService(srPm);
FabricService fs = systemBundleContext.getService(srFs);
if (pm != null && fs != null) {
LOGGER.info("Validating baseline information");
this.updateStatus("validating baseline information", true);
Profile profile = fs.getCurrentContainer().getOverlayProfile();
Map<String, String> versions = profile.getConfiguration("io.fabric8.version");
File localRepository = resolver.getLocalRepository();
if (pm.alignTo(versions, urls, localRepository, new PatchSynchronization())) {
this.updateStatus("requires full restart", true);
// let's reuse the same flag
restart.set(true);
return false;
}
if (handleRestartJvmFlag(profile, restart)) {
return false;
}
}
}
return true;
}
};
agent.setDeploymentAgentId(deploymentAgentId);
agent.provision(getPrefixedProperties(properties, "repository."), getPrefixedProperties(properties, "feature."), getPrefixedProperties(properties, "bundle."), getPrefixedProperties(properties, "req."), getPrefixedProperties(properties, "override."), getPrefixedProperties(properties, "optional."), getMetadata(properties, "metadata#"));
if (restart.get()) {
// prevent updating status to "success"
return false;
}
return true;
}
Aggregations