use of io.fabric8.patch.management.Patch in project fabric8 by jboss-fuse.
the class PatchApplyTest method deployment.
@Deployment
@StartLevelAware(autostart = true)
public static Archive<?> deployment() {
final JavaArchive archive = ShrinkWrap.create(JavaArchive.class, "patch-apply-test.jar");
archive.addPackage(CommandSupport.class.getPackage());
archive.addPackage(IOHelpers.class.getPackage());
archive.setManifest(new Asset() {
@Override
public InputStream openStream() {
OSGiManifestBuilder builder = OSGiManifestBuilder.newInstance();
builder.addBundleManifestVersion(2);
builder.addBundleSymbolicName(archive.getName());
builder.addBundleVersion("1.0.0");
builder.addImportPackages(ServiceLocator.class, FabricService.class);
builder.addImportPackages("io.fabric8.git");
builder.addImportPackages(AbstractCommand.class, Action.class);
builder.addImportPackage("org.apache.felix.service.command;status=provisional");
builder.addImportPackages(ConfigurationAdmin.class, ServiceTracker.class, Logger.class);
return builder.openStream();
}
});
// add the original 'patchable' bundle version and the patch file to the test bundle as an extra resource
archive.add(createPatchableBundle(ORIGINAL_VERSION), "/bundles", ZipExporter.class);
archive.add(createPatchZip(), "/patches", ZipExporter.class);
return archive;
}
use of io.fabric8.patch.management.Patch in project fabric8 by jboss-fuse.
the class ShowAction method doExecute.
@Override
protected void doExecute(Service service) throws Exception {
Patch patch = patchManagement.loadPatch(new PatchDetailsRequest(patchId, bundles, files, diff));
if (patch == null) {
throw new PatchException("Patch '" + patchId + "' not found");
}
System.out.println(String.format("Patch ID: %s", patch.getPatchData().getId()));
if (patch.getManagedPatch() != null) {
System.out.println(String.format("Patch Commit ID: %s", patch.getManagedPatch().getCommitId()));
}
if (bundles) {
System.out.println(String.format("#### %d Bundles%s", patch.getPatchData().getBundles().size(), patch.getPatchData().getBundles().size() == 0 ? "" : ":"));
iterate(patch.getPatchData().getBundles());
}
if (files) {
ManagedPatch details = patch.getManagedPatch();
System.out.println(String.format("#### %d Files added%s", details.getFilesAdded().size(), details.getFilesAdded().size() == 0 ? "" : ":"));
iterate(details.getFilesAdded());
System.out.println(String.format("#### %d Files modified%s", details.getFilesModified().size(), details.getFilesModified().size() == 0 ? "" : ":"));
iterate(details.getFilesModified());
System.out.println(String.format("#### %d Files removed%s", details.getFilesRemoved().size(), details.getFilesRemoved().size() == 0 ? "" : ":"));
iterate(details.getFilesRemoved());
}
if (diff) {
System.out.println("#### Patch changes:\n" + patch.getManagedPatch().getUnifiedDiff());
}
}
use of io.fabric8.patch.management.Patch in project fabric8-maven-plugin by fabric8io.
the class OpenshiftBuildServiceTest method testSuccessfulSecondBuild.
@Test
public void testSuccessfulSecondBuild() throws Exception {
int nTries = 0;
boolean bTestComplete = false;
do {
try {
nTries++;
BuildService.BuildServiceConfig config = defaultConfig.build();
WebServerEventCollector<OpenShiftMockServer> collector = createMockServer(config, true, 50, true, true);
OpenShiftMockServer mockServer = collector.getMockServer();
OpenShiftClient client = mockServer.createOpenShiftClient();
OpenshiftBuildService service = new OpenshiftBuildService(client, logger, dockerServiceHub, config);
service.build(image);
assertTrue(mockServer.getRequestCount() > 8);
collector.assertEventsRecordedInOrder("build-config-check", "patch-build-config", "pushed");
collector.assertEventsNotRecorded("new-build-config");
bTestComplete = true;
} catch (Fabric8ServiceException exception) {
Throwable rootCause = getRootCause(exception);
logger.warn("A problem encountered while running test {}, retrying..", exception.getMessage());
// Let's wait for a while, and then retry again
if (rootCause != null && rootCause instanceof IOException) {
continue;
}
}
} while (nTries < MAX_TIMEOUT_RETRIES && !bTestComplete);
}
use of io.fabric8.patch.management.Patch in project fabric8-maven-plugin by fabric8io.
the class OpenshiftBuildServiceTest method createMockServer.
protected WebServerEventCollector<OpenShiftMockServer> createMockServer(BuildService.BuildServiceConfig config, boolean success, long buildDelay, boolean buildConfigExists, boolean imageStreamExists) {
OpenShiftMockServer mockServer = new OpenShiftMockServer(false);
WebServerEventCollector<OpenShiftMockServer> collector = new WebServerEventCollector<>(mockServer);
BuildConfig bc = new BuildConfigBuilder().withNewMetadata().withName(projectName + config.getS2iBuildNameSuffix()).endMetadata().withNewSpec().endSpec().build();
ImageStream imageStream = new ImageStreamBuilder().withNewMetadata().withName(projectName).endMetadata().withStatus(new ImageStreamStatusBuilder().addNewTagLike(new NamedTagEventListBuilder().addNewItem().withImage("abcdef0123456789").endItem().build()).endTag().build()).build();
KubernetesList builds = new KubernetesListBuilder().withItems(new BuildBuilder().withNewMetadata().withName(projectName).endMetadata().build()).withNewMetadata().withResourceVersion("1").endMetadata().build();
String buildStatus = success ? "Complete" : "Fail";
Build build = new BuildBuilder().withNewMetadata().withResourceVersion("2").endMetadata().withNewStatus().withPhase(buildStatus).endStatus().build();
if (!buildConfigExists) {
mockServer.expect().get().withPath("/oapi/v1/namespaces/test/buildconfigs/" + projectName + config.getS2iBuildNameSuffix()).andReply(collector.record("build-config-check").andReturn(404, "")).once();
mockServer.expect().post().withPath("/oapi/v1/namespaces/test/buildconfigs").andReply(collector.record("new-build-config").andReturn(201, bc)).once();
} else {
mockServer.expect().patch().withPath("/oapi/v1/namespaces/test/buildconfigs/" + projectName + config.getS2iBuildNameSuffix()).andReply(collector.record("patch-build-config").andReturn(200, bc)).once();
}
mockServer.expect().get().withPath("/oapi/v1/namespaces/test/buildconfigs/" + projectName + config.getS2iBuildNameSuffix()).andReply(collector.record("build-config-check").andReturn(200, bc)).always();
if (!imageStreamExists) {
mockServer.expect().get().withPath("/oapi/v1/namespaces/test/imagestreams/" + projectName).andReturn(404, "").once();
}
mockServer.expect().get().withPath("/oapi/v1/namespaces/test/imagestreams/" + projectName).andReturn(200, imageStream).always();
mockServer.expect().post().withPath("/oapi/v1/namespaces/test/imagestreams").andReturn(201, imageStream).once();
mockServer.expect().post().withPath("/oapi/v1/namespaces/test/buildconfigs/" + projectName + config.getS2iBuildNameSuffix() + "/instantiatebinary?commit=").andReply(collector.record("pushed").andReturn(201, imageStream)).once();
mockServer.expect().get().withPath("/oapi/v1/namespaces/test/builds").andReply(collector.record("check-build").andReturn(200, builds)).always();
mockServer.expect().get().withPath("/oapi/v1/namespaces/test/builds?labelSelector=openshift.io/build-config.name%3D" + projectName + config.getS2iBuildNameSuffix()).andReturn(200, builds).always();
mockServer.expect().withPath("/oapi/v1/namespaces/test/builds/" + projectName).andReturn(200, build).always();
mockServer.expect().withPath("/oapi/v1/namespaces/test/builds?fieldSelector=metadata.name%3D" + projectName + "&watch=true").andUpgradeToWebSocket().open().waitFor(buildDelay).andEmit(new WatchEvent(build, "MODIFIED")).done().always();
return collector;
}
use of io.fabric8.patch.management.Patch in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method updateCluster.
private void updateCluster(TestContext context, ConfigMap originalCm, ConfigMap clusterCm, boolean kafkaRolling, boolean zkRolling) {
KafkaCluster originalKafkaCluster = KafkaCluster.fromConfigMap(originalCm);
KafkaCluster updatedKafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromConfigMap(originalCm);
ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
TopicController originalTopicController = TopicController.fromConfigMap(originalCm);
// create CM, Service, headless service, statefulset and so on
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
// Mock CM get
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ConfigMap metricsCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaCluster.metricConfigsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
when(mockCmOps.get(clusterCmNamespace, KafkaCluster.metricConfigsName(clusterCmName))).thenReturn(metricsCm);
ConfigMap zkMetricsCm = new ConfigMapBuilder().withNewMetadata().withName(ZookeeperCluster.zookeeperMetricsName(clusterCmName)).withNamespace(clusterCmNamespace).endMetadata().withData(Collections.singletonMap(AbstractModel.METRICS_CONFIG_FILE, METRICS_CONFIG)).build();
when(mockCmOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperMetricsName(clusterCmName))).thenReturn(zkMetricsCm);
// Mock Service gets
when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
when(mockServiceOps.get(clusterCmNamespace, KafkaCluster.headlessName(clusterCmName))).thenReturn(originalKafkaCluster.generateHeadlessService());
when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateService());
when(mockServiceOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperHeadlessName(clusterCmName))).thenReturn(originalZookeeperCluster.generateHeadlessService());
when(mockServiceOps.endpointReadiness(eq(clusterCmNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock StatefulSet get
when(mockKsOps.get(clusterCmNamespace, KafkaCluster.kafkaClusterName(clusterCmName))).thenReturn(originalKafkaCluster.generateStatefulSet(openShift));
when(mockZsOps.get(clusterCmNamespace, ZookeeperCluster.zookeeperClusterName(clusterCmName))).thenReturn(originalZookeeperCluster.generateStatefulSet(openShift));
// Mock Deployment get
if (originalTopicController != null) {
when(mockDepOps.get(clusterCmNamespace, TopicController.topicControllerName(clusterCmName))).thenReturn(originalTopicController.generateDeployment());
}
// Mock CM patch
Set<String> metricsCms = set();
doAnswer(invocation -> {
metricsCms.add(invocation.getArgument(1));
return Future.succeededFuture();
}).when(mockCmOps).reconcile(eq(clusterCmNamespace), anyString(), any());
// Mock Service patch (both service and headless service
ArgumentCaptor<String> patchedServicesCaptor = ArgumentCaptor.forClass(String.class);
when(mockServiceOps.reconcile(eq(clusterCmNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture());
// Mock StatefulSet patch
when(mockZsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(zkRolling)));
when(mockKsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(kafkaRolling)));
// Mock StatefulSet rollingUpdate
Set<String> rollingRestarts = set();
// Mock StatefulSet scaleUp
ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
when(mockZsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
// Mock StatefulSet scaleDown
ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
when(mockZsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockZsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
if (!zkRolling) {
context.fail("Unexpected rolling update");
}
return Future.succeededFuture();
});
// ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
when(mockKsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
// Mock StatefulSet scaleDown
// ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
when(mockKsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockKsOps.rollingUpdate(anyString(), anyString())).thenAnswer(i -> {
if (!kafkaRolling) {
context.fail("Unexpected rolling update");
}
return Future.succeededFuture();
});
// Mock Deployment patch
ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
when(mockDepOps.reconcile(anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture());
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to update a KafkaCluster based on this CM
Async async = context.async();
ops.createOrUpdate(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), clusterCm, createResult -> {
if (createResult.failed())
createResult.cause().printStackTrace();
context.assertTrue(createResult.succeeded());
// rolling restart
Set<String> expectedRollingRestarts = set();
if (KafkaSetOperator.needsRollingUpdate(new StatefulSetDiff(originalKafkaCluster.generateStatefulSet(openShift), updatedKafkaCluster.generateStatefulSet(openShift)))) {
expectedRollingRestarts.add(originalKafkaCluster.getName());
}
if (ZookeeperSetOperator.needsRollingUpdate(new StatefulSetDiff(originalZookeeperCluster.generateStatefulSet(openShift), updatedZookeeperCluster.generateStatefulSet(openShift)))) {
expectedRollingRestarts.add(originalZookeeperCluster.getName());
}
// No metrics config => no CMs created
verify(mockCmOps, never()).createOrUpdate(any());
verifyNoMoreInteractions(mockPvcOps);
async.complete();
});
}
Aggregations