diff --git a/addons/telemetry/telemetry_test.go b/addons/telemetry/telemetry_test.go index ad57ba96c7..3b90085155 100644 --- a/addons/telemetry/telemetry_test.go +++ b/addons/telemetry/telemetry_test.go @@ -91,7 +91,7 @@ func TestTelemetryTraitWithValues(t *testing.T) { assert.Equal(t, "${camel.k.telemetry.samplerParentBased}", e.ApplicationProperties["quarkus.opentelemetry.tracer.sampler.parent-based"]) } -func TestTelemetryForSourceless(t *testing.T) { +func TestTelemetryForSelfManagedBuild(t *testing.T) { e := createEnvironment(t, camel.QuarkusCatalog) telemetry := NewTelemetryTrait() tt, _ := telemetry.(*telemetryTrait) diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index c37d506127..475ba51907 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,3 +1,4 @@ +* xref:concepts/overview.adoc[Concepts overview] * xref:installation/installation.adoc[Installation] ** xref:installation/integrationplatform.adoc[Configure IntegrationPlatform] ** xref:installation/registry/registry.adoc[Configure Registry] @@ -6,26 +7,22 @@ ** xref:installation/upgrade.adoc[Upgrade] ** xref:installation/uninstalling.adoc[Uninstalling] ** xref:installation/advanced/advanced.adoc[Advanced] +*** xref:installation/advanced/network.adoc[Components topology] *** xref:installation/advanced/build-config.adoc[Build tuning] -*** xref:installation/advanced/network.adoc[Network architecture] *** xref:installation/advanced/resources.adoc[Resource management] *** xref:installation/advanced/multi.adoc[Multiple Operators] *** xref:installation/advanced/http-proxy.adoc[HTTP Proxy] *** xref:installation/advanced/multi-architecture.adoc[Multi Architecture] *** xref:installation/advanced/offline.adoc[Offline] -* xref:cli/cli.adoc[Command Line Interface] -** xref:cli/file-based-config.adoc[File-based Config] -** xref:cli/modeline.adoc[Modeline] +*** xref:installation/advanced/pruning-registry.adoc[Pruning Registry] * xref:running/running.adoc[Run an Integration] -** xref:running/dev-mode.adoc[Developer mode] -** xref:running/dry-run.adoc[Dry run] -** xref:running/runtime-version.adoc[Camel version] -** xref:running/quarkus-native.adoc[Quarkus Native] -** xref:running/camel-runtimes.adoc[Camel runtimes] -** xref:running/import.adoc[Import existing Camel apps] -** xref:running/run-from-github.adoc[Run from GitHub] +** xref:running/running-cli.adoc[kamel run CLI] +** xref:running/self-managed.adoc[Self managed Integrations] +** xref:running/synthetic.adoc[Synthetic Integrations] ** xref:running/promoting.adoc[Promote an Integration] -** xref:running/knative-sink.adoc[Knative Sinks] +* xref:pipes/pipes.adoc[Run an Pipe] +** xref:pipes/pipes-cli.adoc[kamel bind CLI] +** xref:pipes/promoting.adoc[Promote a Pipe] * xref:languages/languages.adoc[Languages] ** xref:languages/java.adoc[Java] ** xref:languages/yaml.adoc[YAML] @@ -35,18 +32,14 @@ ** xref:languages/jsh.adoc[JSheel] ** xref:languages/kotlin.adoc[Kotlin] * xref:configuration/configuration.adoc[Configuration] -** xref:configuration/build-time-properties.adoc[Build time properties] -** xref:configuration/components.adoc[Components] ** xref:configuration/dependencies.adoc[Dependencies] -** xref:configuration/maven-profile.adoc[Maven Profile] -** xref:configuration/runtime-properties.adoc[Properties] +** xref:configuration/build-time-properties.adoc[Build time properties] +** xref:configuration/quarkus-native.adoc[Quarkus Native] +** xref:configuration/base-image.adoc[Base image] +** xref:configuration/camel-properties.adoc[Camel Properties] ** xref:configuration/runtime-config.adoc[Runtime configuration] ** xref:configuration/runtime-resources.adoc[Runtime resources] -* xref:kamelets/kamelets.adoc[Kamelets] -** xref:kamelets/kamelets-distribution.adoc[Distribution] -** xref:kamelets/kamelets-user.adoc[User Guide] -** xref:kamelets/kamelets-dev.adoc[Developer Guide] -** xref:kamelets/kameletbindings-error-handler.adoc[Error Handling] +** xref:configuration/runtime-version.adoc[Camel version] * xref:traits:traits.adoc[Traits] // Start of autogenerated code - DO NOT EDIT! (trait-nav) ** xref:traits:3scale.adoc[3Scale] @@ -92,8 +85,13 @@ ** xref:traits:telemetry.adoc[Telemetry] ** xref:traits:toleration.adoc[Toleration] // End of autogenerated code - DO NOT EDIT! (trait-nav) +* xref:kamelets/kamelets.adoc[Kamelets] +** xref:kamelets/kamelets-distribution.adoc[Distribution] +** xref:kamelets/kamelets-user.adoc[User Guide] +** xref:kamelets/kamelets-dev.adoc[Developer Guide] +** xref:kamelets/kameletbindings-error-handler.adoc[Error Handling] * xref:pipeline/pipeline.adoc[Pipelines] -** xref:pipeline/tekton.adoc[Tekton] +** xref:pipeline/external.adoc[External CICD] * Scaling ** xref:scaling/integration.adoc[Integrations] ** xref:scaling/binding.adoc[Pipes] @@ -104,10 +102,9 @@ ** xref:observability/monitoring.adoc[Monitoring] *** xref:observability/monitoring/operator.adoc[Operator] *** xref:observability/monitoring/integration.adoc[Integration] +*** xref:observability/monitoring/operator-sops.adoc[Standard Operating Procedures] * xref:troubleshooting/troubleshooting.adoc[Troubleshooting] ** xref:troubleshooting/debugging.adoc[Debugging] -** xref:troubleshooting/operating.adoc[Operating] -** xref:troubleshooting/known-issues.adoc[Known Issues] * xref:architecture/architecture.adoc[Architecture] ** xref:architecture/operator.adoc[Operator] *** xref:architecture/cr/integration-platform.adoc[IntegrationPlatform] @@ -127,5 +124,4 @@ ** xref:contributing/local-development.adoc[Local development] *** xref:contributing/local-execution.adoc[Operator - local execution] *** xref:contributing/remote-debugging.adoc[Operator - remote debug] -** xref:contributing/local-deployment-olm.adoc[Local OLM deployment] ** xref:contributing/e2e.adoc[Local E2E testing] diff --git a/docs/modules/ROOT/pages/cli/cli.adoc b/docs/modules/ROOT/pages/cli/cli.adoc deleted file mode 100644 index d6eeba139b..0000000000 --- a/docs/modules/ROOT/pages/cli/cli.adoc +++ /dev/null @@ -1,111 +0,0 @@ -= Camel K CLI (kamel) - -The Camel K command line interface, `kamel`, is the main entry point for running integrations on a Kubernetes cluster. - -Releases of the Camel K CLI are available on: - -- Apache Mirrors (official): https://downloads.apache.org/camel/camel-k/ -- Github Releases: https://github.com/apache/camel-k/releases -- Homebrew (Mac and Linux): https://formulae.brew.sh/formula/kamel - -== Running on macOS - -Before running the CLI on macOS, it may be necessary to give adequate permissions for it to run. - -It can either be done on the "Privacy & Security" panel in the System Settings or via command-line: - -``` -xattr -d com.apple.quarantine /path/to/kamel -``` - -== Available Commands - -Some of the most used commands are: - -.Useful Commands -[cols="1m,2,2m"] -|=== -|Name |Description |Example - -|help -|Obtain the full list of available commands -|kamel help - -|run -|Run an integration on Kubernetes -|kamel run Routes.java - -|debug -|Debug a remote integration using a local debugger -|kamel debug my-integration - -|get -|Get integrations deployed on Kubernetes -|kamel get - -|log -|Print the logs of a running integration -|kamel log routes - -|delete -|Delete integrations deployed on Kubernetes -|kamel delete routes - -|bind -|Bind Kubernetes resources, such as Kamelets, in an integration flow. -|kamel bind timer-source -p "source.message=hello world" channel:mychannel - -|rebuild -|Clear the state of integrations to rebuild them. -|kamel rebuild --all - -|reset -|Reset the Camel K installation -|kamel reset - -|version -|Display client version -|kamel version - -|=== - -The list above is not the full list of available commands. You can run `kamel help` to obtain the full list. Each command also takes the `--help` as option to output more information, e.g.: - -[source,console] ----- -$ kamel run --help ----- - -== Global Flags - -While each command has a dedicated set of flags, there are global flags that are available to every command: - -.Global Flags -[cols="1,2,2m"] -|=== -|Flag |Description |Example - -|`--kube-config PATH` -|Path to the config file to use for CLI requests -|kamel run my-route.yaml --kube-config ~/.kube/config - -|`-h` or `--help` -|Help for `kamel`, or the command -|kamel run --help - -|`-n` or `--namespace NAME` -|Namespace to use for all operations -|kamel get --namespace NAME - -|=== - -For command-specific flags, run `--help` with the command to obtain the full list, e.g.: - -[source,console] ----- -$ kamel --help ----- - -== Modeline - -Some command options in the CLI can be also specified as modeline in the source file, take a look at the xref:cli/modeline.adoc[Modeline] section for more information. diff --git a/docs/modules/ROOT/pages/cli/file-based-config.adoc b/docs/modules/ROOT/pages/cli/file-based-config.adoc deleted file mode 100644 index a824cc1ebc..0000000000 --- a/docs/modules/ROOT/pages/cli/file-based-config.adoc +++ /dev/null @@ -1,94 +0,0 @@ -= File Based Configuration - -File-based configuration is used to set command flags. Flag values do not need to be entered on a regular basis. The file is read on Kamel startup and the flags are set accordingly. - -The file's default name is `kamel-config.yaml`, it can be changed by setting the environment variable `KAMEL_CONFIG_NAME`. Kamel tries to read the file from the following directories in the given order: - - - `.` - - `./.kamel/` - - `~/.kamel/` - -It can be overridden by setting the environment variable `KAMEL_CONFIG_PATH` to file path. - - -To configure this flag, create a file named `kamel-config.yaml` on the same directory as your integration. The file must contain a yaml structure as shown below: - -.kamel-config.yaml - -```yaml -kamel: - install: - health-port: 8081 - monitoring-port: 8082 -``` - -As there are several supported locations, it can be handy to list a configuration file in one specific location, in this particular case the `config` command can be used. - -To list the configuration file used in practice by Kamel: - -[source,console] ----- -$ kamel config --list -The configuration file is read from /some/path/kamel-config.yaml -kamel: - config: - default-namespace: some-name ----- - -Alternatively, the same result can be retrieved using the `--folder` flag with `used` as value. - -[source,console] ----- -$ kamel config --list --folder used ----- - -The flag `--folder` accepts 4 other possible values, one per possible location. - -To list the configuration file in the working directory (`.`): - -[source,console] ----- -$ kamel config --list --folder working ----- - -To list the configuration file in the folder `.kamel` located in the working directory (`./.kamel/`): - -[source,console] ----- -$ kamel config --list --folder sub ----- - -To list the configuration file in the home directory (`~/.kamel/`): - -[source,console] ----- -$ kamel config --list --folder home ----- - -To list the configuration file located in the folder whose path is set in the environment variable `KAMEL_CONFIG_PATH`: - -[source,console] ----- -$ kamel config --list --folder env ----- - -The `config` command can also set the default namespace for all Kamel commands thanks to the flag `--default-namespace` as next: - -[source,console] ----- -$ kamel config --default-namespace some-name ----- - -Note that the flag `--default-namespace` can be associated with `--list` to see directly the resulting content: - -[source,console] ----- -$ kamel config --list --default-namespace some-name -The configuration file is read from /some/path/kamel-config.yaml -kamel: - config: - default-namespace: some-name - install: - health-port: 8081 - monitoring-port: 8082 ----- diff --git a/docs/modules/ROOT/pages/cli/modeline.adoc b/docs/modules/ROOT/pages/cli/modeline.adoc deleted file mode 100644 index 8b1963a177..0000000000 --- a/docs/modules/ROOT/pages/cli/modeline.adoc +++ /dev/null @@ -1,102 +0,0 @@ -= Camel K Modeline - -Integration files can contain modeline hooks that allow to customize the way integrations are executed via command line. -For example: - -.Hello.java -[source,java] ----- -// camel-k: dependency=mvn:org.my:application:1.0 // <1> - -import org.apache.camel.builder.RouteBuilder; - -public class Hello extends RouteBuilder { - @Override - public void configure() throws Exception { - - from("timer:java?period=1000") - .bean(org.my.BusinessLogic) // <2> - .log("${body}"); - - } -} ----- -<1> Modeline import of Maven library -<2> Usage of a business logic class from the external library - -When the integration code above is executed using the `kamel run` CLI command, the modeline options declared in the file are appended to -the list of arguments that are passed to the command. - -The `kamel` CLI will alert you, printing the full command in the shell: - -[source,console] ----- -$ kamel run Hello.java -Modeline options have been loaded from source files -Full command: kamel run Hello.java --dependency mvn:org.my:application:1.0 ----- - -Multiple options can be specified for an integration. -For example, the following modeline options enables 3scale and limits the integration container memory: - -.ThreeScaleRest.java -[source,java] ----- -// camel-k: trait=3scale.enabled=true trait=container.limit-memory=256Mi // <1> - -import org.apache.camel.builder.RouteBuilder; - -public class ThreeScaleRest extends RouteBuilder { - - @Override - public void configure() throws Exception { - rest().get("/") - .route() - .setBody().constant("Hello"); - } -} ----- -<1> Enables both the _container_ and _3scale_ traits, to expose the route via 3scale and limit the container memory. - -All options that are available for the `kamel run` command can be specified as modeline options. -The following is a partial list of useful options: - -.Useful Modeline Options -[cols="1m,2v"] -|=== -|Option | Description - -|build-property -|Add a build time property or properties file (syntax: _[my-key=my-value\|file:/path/to/my-conf.properties]_ - -|config -|Add a runtime configuration from a Configmap, Secret or file (syntax: _[configmap\|secret\|file]:name[/key]_, where name represents the local file path or the configmap/secret name and key optionally represents the configmap/secret key to be filtered) - -|dependency -|An external library that should be included, e.g. for Maven dependencies `dependency=mvn:org.my:app:1.0` - -|env -|Set an environment variable in the integration container, e.g. `env=MY_VAR=my-value` - -|label -|Add a label to the integration pod, e.g., `label=my.company=hello` - -|name -|The integration name - -|open-api -|Add an OpenAPI v2 spec (file path) - -|profile -|Trait profile used for deployment - -|property -|Add a runtime property or properties file (syntax: _[my-key=my-value\|file:/path/to/my-conf.properties]_) - -|resource -|Add a runtime resource from a Configmap, Secret or file (syntax: _[configmap\|secret\|file]:name[/key][@path]_, where name represents the local file path or the configmap/secret name, key optionally represents the configmap/secret key to be filtered and path represents the destination path) - -|trait -|Configure a trait, e.g. `trait=service.enabled=false` - -|=== diff --git a/docs/modules/ROOT/pages/concepts/integrations.png b/docs/modules/ROOT/pages/concepts/integrations.png new file mode 100644 index 0000000000..3a659069ea Binary files /dev/null and b/docs/modules/ROOT/pages/concepts/integrations.png differ diff --git a/docs/modules/ROOT/pages/concepts/overview.adoc b/docs/modules/ROOT/pages/concepts/overview.adoc new file mode 100644 index 0000000000..9235230cee --- /dev/null +++ b/docs/modules/ROOT/pages/concepts/overview.adoc @@ -0,0 +1,33 @@ += What is Camel K + +Camel K is a https://kubernetes.io/docs/concepts/extend-kubernetes/operator/[Kubernetes Operator] in charge to manage the lifecycle of Camel workloads running on the cloud. It can manages aspects like build and deploy (managed Integrations), only deploy (self managed build Integrations) and any other operational aspects (promoting across environments, monitoring, resource tuning, upgrades, interactions with Knative and Kafka, ...). + +== Integrations + +A Camel workload is generally represented by a route expressed in any Camel DSL. This is wrapped into one or more custom resource which will manage the lifecycle of the application on the cloud. + +image::concepts/integrations.png[Camel workload lifecycle, width=1024] + +The user is responsible to create a single IntegrationPlatform which contains the configuration required to drive the build and publishing process. Then the user creates any Integration custom resource, which is mainly a container for the Camel route and other optional Kubernetes fine tunings. + +**IntegrationPlatform**: it is required to configure building aspects such as which container registry to use or Maven configuration settings. + +**Integration**: it is used to create the Camel application, setting mainly the Camel route the user wants to run on the cloud. The user can provide a _self managed build_ Integration as well, in which case, the operator will skip the building part. + +**IntegrationKit**: the operator will reuse an existing IntegrationKit if the Integration has the same set of capabilities and dependencies. Otherwise it creates an IntegrationKit with the configuration required by the Integration. The presence of this resource makes Camel K applications to run immediately, when reusing an existing IntegrationKit. + +**Build**: the operator will create a Build for each IntegrationKit. It creates a Maven project with the dependencies required by the IntegrationKit, it builds and it publish to a given registry. + +**Deployment**, **KnativeService**, **CronJob**: the operator will create any of those deployment objects to run the Camel application. The default choice is the Deployment resource, unless the operator detects the Camel route is more suitable to run as a CronJob (ie, when there is a scheduler component such as Quartz). If the Camel route contains an HTTP service and the cluster provides a Knative installation, then, a KnativeService is used instead as a deployment. + +== Pipes (Connectors) + +The user can use an alternative approach using the Pipe (Connector) abstraction. With Pipe, he can provide a declarative connector-style approach, connecting an **Event Source** to an **Event Sink**. The source and sink can be any Kubernetes object reference that the operator can transform. The operator will be in charge to transform such a Pipe into an Integration and start the build and deployment process as described above. + +image::concepts/pipes.png[Camel connector lifecycle, width=1024] + +**Pipe**: it is used to create the connector binding an event source to an event sink. + +**ObjectReference**: this is the reference to any Kubernetes object. The operator is able to transform any Camel URI, Kamelet, Strimzi Kafka resource or Knative resource. + +**Integration**: it is created from the Pipe translating the source and sinks into a Camel route. diff --git a/docs/modules/ROOT/pages/concepts/pipes.png b/docs/modules/ROOT/pages/concepts/pipes.png new file mode 100644 index 0000000000..122d5c94b9 Binary files /dev/null and b/docs/modules/ROOT/pages/concepts/pipes.png differ diff --git a/docs/modules/ROOT/pages/configuration/build-time-properties.adoc b/docs/modules/ROOT/pages/configuration/build-time-properties.adoc index 3393e74c56..88c3991c68 100644 --- a/docs/modules/ROOT/pages/configuration/build-time-properties.adoc +++ b/docs/modules/ROOT/pages/configuration/build-time-properties.adoc @@ -3,6 +3,8 @@ You may be required to provide certain *build-time properties* that are needed only during the process of `Integration` building. Since Camel K version 1.5, we introduced a `--build-property` flag that will be handful in such circumstances. The property value may be also used inside Camel K integrations using the *property placeholder* mechanism. +NOTE: the --build-property option is syntactic sugar for `builder.properties` trait. + [[build-time-single-prop]] == Single property @@ -93,7 +95,7 @@ The key-value pairs of the `ConfigMap` are loaded and used as build-time propert [[build-time-configmap-as-file]] == Property from ConfigMap/Secret as file -When you have a lot of key-value pairs to store into a given `ConfigMap`/`Secret`, you may consider storing some build-time properties as a file into a specific key-value pair for the sake of simplicity. +When you have a lot of key-value pairs to store into a given `ConfigMap`/`Secret`, you may consider storing some build-time properties as a file into a specific key-value pair for the sake of simplicity. The only constraint is to use `.properties` as a suffix of the key to indicate that the value is actually a property file, not a simple value. @@ -120,7 +122,7 @@ Then we launch the `run` command with the `--build-property` flag whose value ma kamel run --build-property configmap:my-cm-bps build-property-route.yaml ---- -The value of the key-value of the `ConfigMap` is loaded as a property file and used as build-time properties of the `Integration`. you will see the log with the expected configuration. +The value of the key-value of the `ConfigMap` is loaded as a property file and used as build-time properties of the `Integration`. You will see the log with the expected configuration. [[build-time-props-file-precedence]] == Property collision priority @@ -130,4 +132,4 @@ If you have a property repeated more than once, the general rule is that the las [[build-time-runtime-conf]] == Run time properties -If you're looking for *runtime properties configuration* you can look at the xref:configuration/runtime-properties.adoc[runtime properties] section. +If you're looking for *runtime properties configuration* you can look at the xref:configuration/camel-properties.adoc[Camel properties] section. diff --git a/docs/modules/ROOT/pages/configuration/runtime-properties.adoc b/docs/modules/ROOT/pages/configuration/camel-properties.adoc similarity index 97% rename from docs/modules/ROOT/pages/configuration/runtime-properties.adoc rename to docs/modules/ROOT/pages/configuration/camel-properties.adoc index f4fb39d759..d474220f86 100644 --- a/docs/modules/ROOT/pages/configuration/runtime-properties.adoc +++ b/docs/modules/ROOT/pages/configuration/camel-properties.adoc @@ -6,7 +6,7 @@ During the execution of an `Integration` you can provide a single property or a [[runtime-single-prop]] == Single property -Imagine you have a generic `Route` and you set a placeholder for certain information (ie, _my.message_ variable): +Imagine you have a generic route and you set a placeholder for certain information (ie, _my.message_ variable): [source,yaml] .property-route.yaml @@ -117,7 +117,7 @@ The key-value pairs of the `ConfigMap` are loaded and used as runtime properties [[runtime-configmap-as-file]] == Property from ConfigMap/Secret as file -When you have a lot of key-value pairs to store into a given `ConfigMap`/`Secret`, you may consider storing some runtime properties as a file into a specific key-value pair for the sake of simplicity. +When you have a lot of key-value pairs to store into a given `ConfigMap`/`Secret`, you may consider storing some runtime properties as a file into a specific key-value pair for the sake of simplicity. The only constraint is to use `.properties` as a suffix of the key to indicate that the value is actually a property file, not a simple value. diff --git a/docs/modules/ROOT/pages/configuration/components.adoc b/docs/modules/ROOT/pages/configuration/components.adoc deleted file mode 100644 index 410c42c744..0000000000 --- a/docs/modules/ROOT/pages/configuration/components.adoc +++ /dev/null @@ -1,37 +0,0 @@ -= Configure Integration Components - -Camel components can be configured programmatically (within the integration code) or using properties with the following syntax: - -[source] ----- -camel.component.${scheme}.${property}=${value} ----- - -As example if you want to change the queue size of the seda component, you can use the following property: - -[source] ----- -camel.component.seda.queueSize=10 ----- - -For example, you can do it when running the integration from the command line: - -[source,yaml] -.config-seda-route.yaml ----- -- from: - uri: "timer:seda" - steps: - - setBody: - simple: "Hello World!" - - to: "seda:next" -- from: - uri: "seda:next" - steps: - - to: "log:info" ----- - -``` -kamel run --property camel.component.seda.queueSize=10 config-seda-route.yaml -``` - diff --git a/docs/modules/ROOT/pages/configuration/configuration.adoc b/docs/modules/ROOT/pages/configuration/configuration.adoc index f44953f351..a19f884cc3 100644 --- a/docs/modules/ROOT/pages/configuration/configuration.adoc +++ b/docs/modules/ROOT/pages/configuration/configuration.adoc @@ -1,28 +1,31 @@ -[[configuration]] = Configure Integrations -Runtime properties associated to an integration can be configured using the `--property` flag when running the integration. If the property is a *build time property* (ie, certain `Quarkus` configuration), then, you can use xref:configuration/build-time-properties.adoc[`--build-property`] instead. +Camel K is highly configurable and attempts to be as much as automatic as possible in order to provide a nice development experience. However, as soon as you start playing harder you will need to provide more tuning to your applications. In this section we're exploring specifically Integration configuration, make sure not to get confused with the xref:installation/integrationplatform.adoc[IntegrationPlatform configuration] section, which influences the general building and publishing behavior. -The property value can be used inside Camel K integrations using the *property placeholder* mechanism. +We can distinguish a series of different configurations for an Integration: -The syntax for properties has the form `{{my.property}}`, for example: +* Build time configuration +* Runtime configuration +* Deployment configuration -[source,yaml] -.property-route.yaml ----- -- from: - uri: "timer:props" - steps: - - setBody: - simple: "{{my.message}}" - - to: "log:info" ----- +== Build time configuration -In order to give a value to the `my.message` property you can pass it using the command line: +Most of the time you won't need to influence the building process. There may be circumstances where you need to provide a given Maven dependency to your application, provide some build time property or fine tune the application and the container image resulting from the build process. -[source] ----- -kamel run --property my.message="Hello World" property-route.yaml ----- +Camel K does the best it can to discover automatically dependencies required by your application. However, when you're using external code or the operator can't discover dependencies, you'll need to add them manually. See more in the xref:configuration/dependencies.adoc[dependencies] section. -For more details and advanced use cases, see the xref:configuration/runtime-properties.adoc[runtime properties] section. +Build time properties are the configuration injected during the building of the project. They are mainly used by Camel Quarkus and you may not need at all depending on the kind of applications you're building. See more in the xref:configuration/build-time-properties.adoc[build time properties] section. + +If you want to optimize your application to run natively on the cloud, then you may see how to xref:configuration/quarkus-native.adoc[run your application with Quarkus Native]. + +While building the container image, the operator will use a default JDK based image. See how you can xref:configuration/base-image.adoc[change the default base image]. + +== Runtime configuration + +The runtime configuration are probably the ones you'll be using mostly during your development. Majority of the time you will need to provide xref:configuration/camel-properties.adoc[Camel properties], xref:configuration/runtime-config.adoc[runtime configuration] and xref:configuration/runtime-resources.adoc[runtime resources] files. + +Another configuration you may be interested in controlling is the xref:configuration/runtime-version.adoc[Camel runtime version]. + +== Deployment configuration + +The goal of the operator is to simplify the building and deployment process of a Camel application on the cloud. Most of the time the default settings to configure the deployment resources should be enough. However, if you need to fine tune the final resulting deployment, then you need to know how to configure the so called xref:traits:traits.adoc[Camel K traits]. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/configuration/maven-profile.adoc b/docs/modules/ROOT/pages/configuration/maven-profile.adoc deleted file mode 100644 index 8bda1592e6..0000000000 --- a/docs/modules/ROOT/pages/configuration/maven-profile.adoc +++ /dev/null @@ -1,25 +0,0 @@ -[[maven-profile]] -= Maven Profile - -You can customize the build of an integration with a https://maven.apache.org/guides/introduction/introduction-to-profiles.html#profiles-in-poms[Maven profile]: - -``` - - my-profile - ... - -``` - -Once you have the file ready you can create a Configmap or a Secret in order to use it in your integration: - -``` -kubectl create cm my-maven-profile --from-file my-profile.xml -``` - -Once the Configmap/Secret is ready, then, you can use it to run your integration: - -``` -kamel run hello.yaml -t builder.maven-profile=configmap:my-maven-profile/my-profile.xml -``` - -The profile will be added to your integration's project generated pom file. What will be changed in the `mvn package` execution will depend on your profile definition. diff --git a/docs/modules/ROOT/pages/running/quarkus-native.adoc b/docs/modules/ROOT/pages/configuration/quarkus-native.adoc similarity index 100% rename from docs/modules/ROOT/pages/running/quarkus-native.adoc rename to docs/modules/ROOT/pages/configuration/quarkus-native.adoc diff --git a/docs/modules/ROOT/pages/configuration/runtime-config.adoc b/docs/modules/ROOT/pages/configuration/runtime-config.adoc index cce2072b35..04c9a1cd25 100644 --- a/docs/modules/ROOT/pages/configuration/runtime-config.adoc +++ b/docs/modules/ROOT/pages/configuration/runtime-config.adoc @@ -82,7 +82,7 @@ NOTE: you can provide a `Secret` which is not yet available on the cluster. The [[runtime-config-props]] == Configmap/Secret property references -Each `Configmap`/`Secret` will be parsed as a property file and you will be able to use those properties inside your `Route` definition or, more in general, as you would do with any other xref:configuration/runtime-properties.adoc[runtime property]. As an example, you can create the following `Secret`: +Each `Configmap`/`Secret` will be parsed as a property file and you will be able to use those properties inside your `Route` definition or, more in general, as you would do with any other xref:configuration/camel-properties.adoc[Camel property]. As an example, you can create the following `Secret`: [source,text] .secret.properties diff --git a/docs/modules/ROOT/pages/running/runtime-version.adoc b/docs/modules/ROOT/pages/configuration/runtime-version.adoc similarity index 67% rename from docs/modules/ROOT/pages/running/runtime-version.adoc rename to docs/modules/ROOT/pages/configuration/runtime-version.adoc index a54cbe8c35..d949656aad 100644 --- a/docs/modules/ROOT/pages/running/runtime-version.adoc +++ b/docs/modules/ROOT/pages/configuration/runtime-version.adoc @@ -1,4 +1,4 @@ -= Choose the runtime version += Choose a Camel runtime version Starting from Camel K version 2 you will be able to use any Camel K Runtime version available which version is above 1.17. In order to use the feature you need to use the xref:traits:camel.adoc[Camel trait]. @@ -12,10 +12,10 @@ Having the ability to choose the runtime, gives you the ability to specify which == How does it work -This feature requires the dynamic generation of a builder that contains all the tooling expected by the build phase. In particular, this is a requirement for Quarkus native builds which, from now on, can be only done with builder `Pod` strategy. +This feature requires the dynamic generation of a builder that contains all the tooling expected by the build phase. In particular, this is a requirement for Quarkus native builds which, can be only done with builder `Pod` strategy. When you are creating a new runtime for which a xref:architecture/cr/camel-catalog.adoc[CamelCatalog] does not yest exist, Camel K Operator is in charge to create such a catalog. Every CamelCatalog may carry the definition of a container image builder which may later be used by the builder `Pod` to build a Camel application which is specific to such a runtime (for instance, when running a Quarkus Native build). == Pin a runtime version -By default each Camel K version uses a specific runtime version, ie, Camel K 2.0 uses Camel K Runtime 2.16.0. Using the trait will let you pin to a well defined version, avoiding to unintentionally upgrade the runtime of the integrations running when you perform an operator upgrade. See more info in the xref:installation/upgrade.adoc#maintain-runtime-integrations[Camel K upgrade documentation]. +By default each Camel K version uses the runtime version provided in the IntegrationPlatform. Using the trait will let you pin to a well defined version, avoiding to unintentionally upgrade the runtime of the integrations running when you perform an operator upgrade (hence an IntegrationPlatform upgrade). See more info in the xref:installation/upgrade.adoc#maintain-runtime-integrations[Camel K upgrade documentation]. diff --git a/docs/modules/ROOT/pages/contributing/local-deployment-olm.adoc b/docs/modules/ROOT/pages/contributing/local-deployment-olm.adoc deleted file mode 100644 index 996c8b731e..0000000000 --- a/docs/modules/ROOT/pages/contributing/local-deployment-olm.adoc +++ /dev/null @@ -1,127 +0,0 @@ -[[contributing]] -= Deploy with OLM from source - -The following steps assume that - -- you've already built the camel-k image using `make images` and made it available in the cluster as an imagestream -- you've already built the bundle image using `make bundle` and have pushed it to some registry - -To perform OLM (Operator Lifecycle Manager) based deployment of camel-k, built from source locally on an OpenShift cluster, you can follow the steps below. - -Login to the cluster using the standard "oc" tool, create new project, complete the basic setup process. Reference commands below - -``` -oc login -u -p -oc new-project camelk || true -oc policy add-role-to-group system:image-puller system:serviceaccounts --namespace=camelk || true -oc patch configs.imageregistry.operator.openshift.io/cluster --patch '{"spec":{"defaultRoute":true}}' --type=merge -HOST=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}') -echo $HOST -podman login -u -p $(oc whoami -t) --tls-verify=false $HOST -``` - -Now, you need to build a catalog image (from the previously built bundle image, which has been pushed to some registry) and make it available in the cluster as an imagestream. And to achieve this, you need `opm` binary and the image named `upstream-opm-builder` which is used by opm to build the catalog image. The `opm` binary can be dowloaded from https://github.com/operator-framework/operator-registry/releases[here] and the image is available on https://quay.io/repository/operator-framework/upstream-opm-builder[quay.io]. Add the `opm` binary to the PATH environment. The steps below have been validated with `opm` v1.15.3. However, newer versions may work as expected, with minor changes in command line arguments (if any). If the binary and the image are not available for your platform, you'll need to build those from source. - -Now, the catalog image can be built and pushed to the imagestream as: - -``` -opm index add -u podman --bundles //camel-k-bundle: --tag $HOST/camelk/camel-k-catalog: -p podman -podman push $HOST/camelk/camel-k-catalog: --tls-verify=false -``` - -Note that the -u and -p options of opm allow usage of different tools for dealing with containers/image registries i.e. docker, podman. Here are the details of the available options for opm index build - -``` -opm index add -h -Add operator bundles to an index. - - This command will add the given set of bundle images (specified by the --bundles option) to an index image (provided by the --from-index option). - - If multiple bundles are given with '--mode=replaces' (the default), bundles are added to the index by order of ascending (semver) version unless the update graph specified by replaces requires a different input order; e.g. 1.0.0 replaces 1.0.1 would result in [1.0.1, 1.0.0] instead of the [1.0.0, 1.0.1] normally expected of semver. However, for most cases (e.g. 1.0.1 replaces 1.0.0) the bundle with the highest version is used to set the default channel of the related package. - -Usage: - opm index add [flags] - -Examples: - # Create an index image from scratch with a single bundle image - opm index add --bundles quay.io/operator-framework/operator-bundle-prometheus@sha256:a3ee653ffa8a0d2bbb2fabb150a94da6e878b6e9eb07defd40dc884effde11a0 --tag quay.io/operator-framework/monitoring:1.0.0 - - # Add a single bundle image to an index image - opm index add --bundles quay.io/operator-framework/operator-bundle-prometheus:0.15.0 --from-index quay.io/operator-framework/monitoring:1.0.0 --tag quay.io/operator-framework/monitoring:1.0.1 - - # Add multiple bundles to an index and generate a Dockerfile instead of an image - opm index add --bundles quay.io/operator-framework/operator-bundle-prometheus:0.15.0,quay.io/operator-framework/operator-bundle-prometheus:0.22.2 --generate - -Flags: - -i, --binary-image opm container image for on-image opm command - -u, --build-tool string tool to build container images. One of: [docker, podman]. Defaults to podman. Overrides part of container-tool. - -b, --bundles strings comma separated list of bundles to add - -c, --container-tool string tool to interact with container images (save, build, etc.). One of: [docker, podman] - -f, --from-index string previous index to add to - --generate if enabled, just creates the dockerfile and saves it to local disk - -h, --help help for add - --mode string graph update mode that defines how channel graphs are updated. One of: [replaces, semver, semver-skippatch] (default "replaces") - -d, --out-dockerfile string if generating the dockerfile, this flag is used to (optionally) specify a dockerfile name - --permissive allow registry load errors - -p, --pull-tool string tool to pull container images. One of: [none, docker, podman]. Defaults to none. Overrides part of container-tool. - -t, --tag string custom tag for container image being built - -Global Flags: - --skip-tls skip TLS certificate verification for container image registries while pulling bundles or index -``` - -In order to create a catalogsource which will use this custom catalog, create a catalog-source.yaml file - -``` -apiVersion: operators.coreos.com/v1alpha1 -kind: CatalogSource -metadata: - name: camel-k-catalog - namespace: openshift-marketplace -spec: - sourceType: grpc - image: image-registry.openshift-image-registry.svc:5000/camelk/camel-k-catalog: - displayName: Camel K catalog - publisher: My publisher -``` - -and create the catalogsource and confirm its' creation. - -``` -# oc create -f catalog-source.yaml -catalogsource.operators.coreos.com/camel-k-catalog created -``` - -The custom catalog that we created using the yaml file above should be visible in the cluster now, along with the corresponding package manifest. - -``` -# oc get catalogsources -A | grep camel -openshift-marketplace camel-k-catalog Camel K catalog grpc My publisher 41m -# oc get packagemanifest -A | grep camel -openshift-marketplace knative-camel-operator Community Operators 21d -openshift-marketplace red-hat-camel-k Red Hat Operators 21d -openshift-marketplace camel-k Community Operators 21d -openshift-marketplace camel-k Camel K catalog 41m -``` - -Now, you can deploy the custom operator using the custom catalog as - -``` -# cd $GOPATH/src/github.com/apache/camel-k -# ./kamel install --olm-source=camel-k-catalog --olm-source-namespace=openshift-marketplace --olm-channel=alpha -OLM is available in the cluster -Camel K installed in namespace camelk via OLM subscription -``` - -You can confirm the deployment. The sample log for camel-k 1.3.0 is pasted below. - -``` -# oc get all -A | grep camel -camelk pod/camel-k-operator-7fbb745899-qflcb 1/1 Running 0 8s -openshift-marketplace pod/camel-k-catalog-m8f9g 1/1 Running 0 4m38s -openshift-marketplace service/camel-k-catalog ClusterIP xxx.xx.xx.xxx 50051/TCP 4m38s -camelk deployment.apps/camel-k-operator 1/1 1 1 11s -camelk replicaset.apps/camel-k-operator-7fbb745899 1 1 1 9s -camelk imagestream.image.openshift.io/camel-k default-route-openshift-image-registry.apps.shivani-2-46.openshift.com/camelk/camel-k 1.3.0 8 hours ago -camelk imagestream.image.openshift.io/camel-k-catalog default-route-openshift-image-registry.apps.shivani-2-46.openshift.com/camelk/camel-k-catalog 1.3.0 6 hours ago -``` diff --git a/docs/modules/ROOT/pages/contributing/local-execution.adoc b/docs/modules/ROOT/pages/contributing/local-execution.adoc index 8e478f0232..8620134836 100644 --- a/docs/modules/ROOT/pages/contributing/local-execution.adoc +++ b/docs/modules/ROOT/pages/contributing/local-execution.adoc @@ -7,25 +7,40 @@ NOTE: if you need a simpler approach you can build and run the operator on a con Let's use a namespace called `operator-test. You can start with setting the environment variable `WATCH_NAMESPACE` with the namespace you'd like your operator to watch. You also need to specify the name of the operator, as you may have different operators running on the cluster. ----- + +```bash export WATCH_NAMESPACE=operator-test export OPERATOR_ID="camel-k-dev" ----- +``` The next step is to install an `IntegrationPlatform` on the cluster namespace. You probably need to tweak the registry parameters in order to be able to authenticate against an image repository (see below paragraph for local repository instructions). It's important to specify the target operator that will take care of this IntegrationPlatform (`-x` or `--operator-id` option). ----- -./kamel install --skip-operator-setup -n operator-test --registry my-registry:5000 -x camel-k-dev ----- + +```yaml +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + annotations: + camel.apache.org/operator.id: camel-k-dev + name: camel-k + namespace: operator-test +spec: + build: + registry: + address: my-registry:5000 + insecure: true +``` Finally, assuming you've built your application correctly we can run the operator: ------ + +```bash ./kamel operator ------ +``` + +Test the local operator by creating a test `Integration`: -Test the local operator by creating a test `Integration`. ------ +```bash ./kamel run xyz.abc -n operator-test -x camel-k-dev ------ +``` IMPORTANT: make sure no other Camel K Operators are watching this namespace, neither you have a global Camel K Operator installed on your cluster. As you may have more than one Camel K operator installed on the cluster, it's important you specify the `-x` (or `--operator-id`) option. @@ -50,10 +65,22 @@ registry-fttbv 1/1 Running 40 89d kubectl port-forward --namespace kube-system registry-fttbv 5000:5000 ---- -Update the ``IntegrationPlatform`` to instruct it to use the ``localhost`` registry: ----- -./kamel install --skip-operator-setup -n operator-test --registry localhost:5000 --force -x camel-k-dev ----- +Update the `IntegrationPlatform` to instruct it to use the `localhost` registry: + +```yaml +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + annotations: + camel.apache.org/operator.id: camel-k-dev + name: camel-k + namespace: operator-test +spec: + build: + registry: + address: localhost:5000 + insecure: true +``` A similar procedure may work if you use other local environments. The idea is to expose the docker registry and be able to use it from your local operator. @@ -62,11 +89,24 @@ IMPORTANT: using build strategy as `Pod` won't probably work as it will expect t === Local Camel K runtime Camel K integrations are based on https://github.com/apache/camel-k-runtime[Camel K runtime], generally paired with the operator release. If you need to specify a different runtime, or you have a local Camel K runtime that you want to test, then you will need to specify it in the `Integration Platform`: ----- -./kamel install --skip-operator-setup -n operator-test --registry localhost:5000 --force --runtime-version $version -x camel-k-dev ----- -The `$version` variable must be replaced with the version you are building. For example, `1.3.1-SNAPSHOT`. With these instructions, the operator will pick up and use the snapshot version you have released locally. In order to use the local maven repository, you will also need to edit your IntegrationPlatform as follow: +```yaml +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + annotations: + camel.apache.org/operator.id: camel-k-dev + name: camel-k + namespace: operator-test +spec: + build: + registry: + address: localhost:5000 + insecure: true + runtimeVersion: +``` + +The variable must be replaced with the version you are building. For example, `1.3.1-SNAPSHOT`. With these instructions, the operator will pick up and use the snapshot version you have released locally. In order to use the local maven repository, you will also need to edit your IntegrationPlatform as follow: ---- $ k edit ip -n operator-test diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 5608941b7b..e9a59c3944 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -1,25 +1,8 @@ = Apache Camel K -[NOTE] --- -This version ({page-component-display-version}) of {page-component-title} depends on: - -* https://github.com/apache/camel-k-runtime.git[camel-k-runtime] at version {camel-k-runtime-version}, and therefore: -** xref:{camel-quarkus-docs-version}@camel-quarkus::index.adoc[] at version {camel-quarkus-version}. -** xref:{camel-docs-version}@components::index.adoc[Camel] at version {camel-version} -** https://quarkus.io[Quarkus] at version {quarkus-version} -* xref:{camel-kamelets-docs-version}@camel-kamelets::index.adoc[] at version {camel-kamelets-version} - -ifdef::lts[This is a long term service release.] -ifndef::lts[] -ifdef::prerelease[This is the development version of {page-component-title}. It should not be used in production.] -ifndef::prerelease[This release will not be updated, but rather replaced by a new release.] -endif::[] --- +Apache Camel K is a lightweight integration framework built from Apache Camel that runs natively on Kubernetes and is specifically designed for serverless and microservice architectures. The Camel K https://kubernetes.io/docs/concepts/extend-kubernetes/operator/[Kubernetes Operator] is in charge to transform a user provided Integration custom resource into a Camel application running on the cloud. -Apache Camel K is a lightweight integration framework built from Apache Camel that runs natively on Kubernetes and is specifically designed for serverless and microservice architectures. - -Users of Camel K can instantly run integration code written in Camel DSL on their preferred cloud (Kubernetes or OpenShift). +Users of Camel K can instantly run integration code written in any Camel DSL without worrying about the building and deployment of the application on the cloud. [[how-it-works]] == How It Works @@ -28,28 +11,37 @@ Just write a _helloworld.yaml_ integration file with the following content: [source,yaml] ---- -- from: - uri: "timer:tick" - parameters: - period: "3000" - steps: +apiVersion: camel.apache.org/v1 +kind: Integration +metadata: + name: helloworld +spec: + flows: + - from: + steps: - setBody: - simple: "Hello world from Camel K" - - to: "log:info" + simple: Hello Camel from ${routeId} + - log: ${body} + uri: timer:yaml ---- You can then execute the following command: [source] ---- -kamel run helloworld.yaml +kubectl -f helloworld.yaml ---- -The integration code immediately runs in the cloud. **Nothing else** is needed. - -Continue reading the documentation to xref:installation/installation.adoc[install and get started with Camel K]. +The integration code will immediately run in the cloud. Continue reading the documentation to xref:installation/installation.adoc[install and get started with Camel K]. == Camel dependencies matrix +-- +ifdef::lts[This is a long term service release.] +ifndef::lts[] +ifdef::prerelease[This is a development version of {page-component-title}. It should not be used in production.] +ifndef::prerelease[This release will not be updated, but rather replaced by a new release.] +endif::[] +-- From Camel K version 2 onward you will be able to use any Camel K Runtime. Each runtime depends on a Camel Quarkus, Camel and Quarkus Platform version. Every Camel K has a default runtime used, but you can xref:running/runtime-version.adoc[pick any Camel K Runtime available] (backward and forward compatible). diff --git a/docs/modules/ROOT/pages/installation/advanced/advanced.adoc b/docs/modules/ROOT/pages/installation/advanced/advanced.adoc index a0d941ba55..1df6ba4605 100644 --- a/docs/modules/ROOT/pages/installation/advanced/advanced.adoc +++ b/docs/modules/ROOT/pages/installation/advanced/advanced.adoc @@ -13,7 +13,10 @@ Most of these settings will require changing either an environment variable or o You can install one or more Camel K operators (which must share CRDs). In such case you need to specify a unique operator name which will be used by each resource to know who is in charge to reconcile it. This behavior is controlled by the OPERATOR_ID environment variable (default value, _camel-k_). -[[resources]] +== Watch namespaces + +Camel K operator can work in **global** (descoped) mode by watching all namespaces or **namespaced** (scoped). This behavior is controlled by the WATCH_NAMESPACE environment variable. If set to an empty value, then the operator will watch for resources in all namespaces. If it set to a given namespace, then it will only watch for resources in such a namespace. + == Resource management We provide certain configuration to better "operationalize" the Camel K Operator. More detailed information on the xref:installation/advanced/resources.adoc[resource management] page. diff --git a/docs/modules/ROOT/pages/installation/advanced/maven.adoc b/docs/modules/ROOT/pages/installation/advanced/maven.adoc index 2d20b8cd1f..f32ebd98da 100644 --- a/docs/modules/ROOT/pages/installation/advanced/maven.adoc +++ b/docs/modules/ROOT/pages/installation/advanced/maven.adoc @@ -297,3 +297,29 @@ Maven dependencies hosted in your S3 bucket can now be used just like any other ---- $ kamel run S3.java --dependency=mvn:artfiactId:groupId:version ---- + +[[maven-profiles]] +== Maven Profiles + +You can customize the build of an Integration with a https://maven.apache.org/guides/introduction/introduction-to-profiles.html#profiles-in-poms[Maven profile]: + +``` + + my-profile + ... + +``` + +Once you have the file ready you can create a Configmap or a Secret in order to use it in your integration: + +``` +kubectl create cm my-maven-profile --from-file my-profile.xml +``` + +Once the Configmap/Secret is ready, then, you can use it to run your integration: + +``` +kamel run hello.yaml -t builder.maven-profile=configmap:my-maven-profile/my-profile.xml +``` + +The profile will be added to your Integration's project generated POM file. What will be changed in the `mvn package` execution will depend on your profile definition. diff --git a/docs/modules/ROOT/pages/installation/advanced/multi-architecture.adoc b/docs/modules/ROOT/pages/installation/advanced/multi-architecture.adoc index c801f1e550..f1bf8ea156 100644 --- a/docs/modules/ROOT/pages/installation/advanced/multi-architecture.adoc +++ b/docs/modules/ROOT/pages/installation/advanced/multi-architecture.adoc @@ -1,8 +1,6 @@ [[multi-architecture-props]] = Multi Architecture -NOTE: this part is evolving quickly during development of version 2 so it may be slightly inaccurate. - Since version 2, Camel K is publishing an ARM64 architecture beside the default AMD64 architecture. You can build your particular architecture from source as well, following the guidelines in xref:contributing/local-development.adoc[Local development]. [[multi-architecture-install]] diff --git a/docs/modules/ROOT/pages/installation/advanced/network.adoc b/docs/modules/ROOT/pages/installation/advanced/network.adoc index 0c54e29ce9..0ad88d805a 100644 --- a/docs/modules/ROOT/pages/installation/advanced/network.adoc +++ b/docs/modules/ROOT/pages/installation/advanced/network.adoc @@ -1,9 +1,7 @@ -= Network architecture += Components topology Camel K operator requires certain side technologies in order to perform the build and the deployment of a Camel application into the cloud. The operator itself can take care to build an application or delegate to a builder Pod. However, nothing change for the sake of this document. The operator is very fast in performing its tasks and when you experience some slower operation is typically due to the needs to access to external components/resources. In this document we want to highlight which are those components in order to help you tune them properly. -== Components topology - One of Camel K capabilities is to build a Camel application. As a Camel application (regardless its runtime) is a Java application, then, we require the presence of Maven as a technology to compile and package a Java application. Once the application is built, it is "containerized" as an image that will be later used for deployment scopes. The operator therefore is in charge to push the application container image into a Container Registry. @@ -14,6 +12,17 @@ Most of these operations typically require the connection to the Internet as the image::architecture/camel-k-network.svg[Network architecture, width=800] +[[registry]] +== Container registry + +During installation procedure, you may have already familiarized with the concept of a container registry. This one may be a registry operated by the user in the same cluster, an external registry or the embedded registry which may be offered by certain Kubernetes distributions (ie, Openshift). Whichever is your configuration, it's important to know that when Camel K operator creates the container image, it may requires to access certain base images in public container registries such as `docker.io` or `quay.io`. + +In particular the access will be required when the operator build the builder container image (driven by the runtime catalog you'll be using) and when it builds the IntegrationKit container image from the base image. + +Also in this case, the longer the operator runs, the lower the need to access to the base images, since they will be already cached and the higher the possibility to use incremental image from other IntegrationKits created. + +NOTE: at the moment of writing, the default builder image we use is _quay.io/quarkus/ubi-quarkus-mandrel-builder-image:23.0-jdk-17_ and the default integration image is _eclipse-temurin:17_ + [[build]] == Build application with Maven @@ -24,14 +33,3 @@ As you can see in the diagram, either you're using a Maven proxy or you're runni If the dependencies are stored in the local disk of the operator (or an IntegrationKit is already available to be used), then, no access to the Internet will be required. As a natural consequence, the longer the operator runs, the less it will need to access the Internet. A particular case is when you use the builder Pod strategy, in which case, it will require to download all dependencies from scratch. Similar situation when the operator Pod is restarted. We suggest you to check the xref:installation/advanced/maven.adoc[Maven configuration] page which contains all the details required to fine tune the build phase. - -[[registry]] -== Container registry - -The other required component for Camel K to run properly is the availability of a container registry. This one may be a registry operated by the user in the same cluster, an external registry or the embedded registry which may be offered by certain Kubernetes distributions (ie, Openshift). Whichever is your configuration, it's important to know that when Camel K operator creates the container image, it may requires to access certain base images in public container registries such as docker.io or quay.io. - -In particular the access will be required when the operator build the builder container image (driven by the runtime catalog you'll be using) and when it builds the IntegrationKit container image from the base image. - -Also in this case, the longer the operator runs, the lower the need to access to the base images, since they will be already cached and the higher the possibility to use incremental image from other IntegrationKits created. - -NOTE: at the moment of writing, the default builder image we use is _quay.io/quarkus/ubi-quarkus-mandrel-builder-image:23.0-jdk-17_ and the default integration image is _eclipse-temurin:17_ \ No newline at end of file diff --git a/docs/modules/ROOT/pages/installation/advanced/offline.adoc b/docs/modules/ROOT/pages/installation/advanced/offline.adoc index afe9707697..d8f7a9fa07 100644 --- a/docs/modules/ROOT/pages/installation/advanced/offline.adoc +++ b/docs/modules/ROOT/pages/installation/advanced/offline.adoc @@ -2,7 +2,7 @@ Camel K is naturally developed to fit in an "open world" cluster model. It basically means that the default installation assumes it can pull and push resources from the Internet. However, there could be certain domains or use cases where this is a limitation. For this reason this guide will show you how to setup properly Camel K in an offline (or disconnected, or air gapped) cluster environment. -In order to understand the content of this guide. It is good to have familiarity with the default xref:installation/advanced/network.adoc[network architecture]. Let's see again the diagram here: +In order to understand the content of this guide. It is good to have familiarity with the default xref:installation/advanced/network.adoc[components topology]. Let's see again the diagram here: image::architecture/camel-k-network.svg[Network architecture, width=800] diff --git a/docs/modules/ROOT/pages/installation/advanced/pruning-registry.adoc b/docs/modules/ROOT/pages/installation/advanced/pruning-registry.adoc new file mode 100644 index 0000000000..a6da1bb560 --- /dev/null +++ b/docs/modules/ROOT/pages/installation/advanced/pruning-registry.adoc @@ -0,0 +1,59 @@ += Pruning unused images from container registry + +WARNING: This is an unsupported functionality, use at your own risk. + +Over time, while building Integrations, the produced images are stored in the container registry and it may become outdated and may require pruning old unused images. + +NOTE: Each container registry vendor can provide unique details about the pruning policy, check your vendor documentation. + +It's recommended only to delete container images from container registry if the corresponding `Integration` or `IntegrationKit` doesn't exist anymore or has no expectation to be used. Then if you delete the container image, you should also delete corresponding `Integrationkit` custom resource object. + +Camel K materializes the Camel integration in one of the two kubernetes objects: `Deployment` or `CronJob`. + +You have to check if the `Integration` is running or scaled down to zero pods, which is the case for CronJobs or Knative deployments. + +Then, we can provide some general guide about how to inspect the Camel K objects to prune unused images. + +For this guide, we assume you are connected to the container registry with `docker login`. + +Step 1: List all Camel K container images, prefixed with `camel-k` + +``` +$ docker images |grep k-kit +10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheon0 bd52ae6e32af 54 years ago 481MB +10.98.248.245/camel-k/camel-k-kit-cptguntf799b89lheok0 b7f347193b3c 54 years ago 471MB +10.98.248.245/camel-k/camel-k-kit-cptgv0tf799b89lheokg 8d2d963396ca 54 years ago 477MB +10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheomg dc11800ef203 54 years ago 481MB +10.98.248.245/camel-k/camel-k-kit-cptgvd5f799b89lheol0 0bbdf20f2f49 54 years ago 479MB +``` + +Step 2: List the container images of the Camel K Integrations (don't print the sha256 digest) +``` +$ kubectl get -A it -oyaml|grep 'image:'|sed 's/^\s*image: //g;s/@sha256.*//g'|sort|uniq +10.98.248.245/camel-k/camel-k-kit-cptguntf799b89lheok0 +10.98.248.245/camel-k/camel-k-kit-cptgv0tf799b89lheokg +10.98.248.245/camel-k/camel-k-kit-cptgvd5f799b89lheol0 +10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheon0 +``` + +Step 3: Compare them and remove the container images and `IntegrationKit` from list 1 not found in list 2 +``` +docker rmi dc11800ef203 +kubectl delete ik/kit-cpth0mtf799b89lheomg +``` + +There is a https://github.com/apache/camel-k/blob/main/script/prune-camel-k-kit-images.sh[prune-camel-k-kit-images.sh] script to help you in this task. This script requires the following cli tools: `kubectl, comm, docker`. +The script lists the dangling images from the container registry, it accepts two parameters with no arguments: `-v` (verbose) and `-p` (prune images). + +An example of an execution: +``` +$ prune-camel-k-kit-images.sh -p +> Images from container registry, eligible for pruning. +10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheom0 + +> Delete Container Images +integrationkit.camel.apache.org "kit-cpth0mtf799b89lheom0" deleted +Untagged: 10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheom0@sha256:3857f8e331e50ded6529641e668de8781eb3cb7b881ea14b89cfc4f6b6e9d455 +Deleted: sha256:1015a6b18f164e9b086337e69a98e5850149c158cb778bac6059984756dc0528 +Deleted: sha256:2f0d224916e77654c4401f6fc4b1147a9a6e3ccf713213c38e877d7b939bab81 +``` \ No newline at end of file diff --git a/docs/modules/ROOT/pages/installation/advanced/resources.adoc b/docs/modules/ROOT/pages/installation/advanced/resources.adoc index 5ab784d5f0..327da53acb 100644 --- a/docs/modules/ROOT/pages/installation/advanced/resources.adoc +++ b/docs/modules/ROOT/pages/installation/advanced/resources.adoc @@ -7,13 +7,16 @@ The usage of these advanced configuration assumes you're familiar with the https Depending on the installation methodology (ie, Helm) provided you may have certain configuration parameters out of the box. Otherwise you should be able to fine tune those parameters altering directly the Camel K operator Deployment. -[[scheduling-infra-pod-scheduling]] -== Scheduling +== Scheduling Node Selectors, Affinity and Tolerations -=== Node Selectors and tolerations +=== Operator Pod We suggest to edit the Deployment to configure the https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/[`NodeSelector` Kubernetes feature] and the https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/[`Taint` and `Toleration` Kubernetes feature]. +=== Builder Pods and Integrations + +Builder Pods and Integration Pods can be also scheduled and assigned in the cluster. For this reason you need to use xref:traits:builder.adoc[builder trait] configuration (for builder Pod when using `pod` building strategy) and xref:traits:affinity.adoc[affinity], xref:traits:pod.adoc[pod], xref:traits:toleration.adoc[toleration] traits for Integration Pods. + [[scheduling-infra-pod-resources]] == Resources @@ -39,7 +42,7 @@ Note that if you plan to perform **native builds**, then the memory requirements === Default Integration Pod configuration -The resource set on the container here is highly dependant on what your application is doing. You can control this behavior by setting opportunely the resources on the Integration via container trait. +The resource set on the container here is highly dependant on what your application is doing. You can control this behavior by setting opportunely the resources on the Integration via xref:traits:container.adoc[container] trait. Be aware that the default are actually the following: @@ -51,4 +54,8 @@ resources: limits: memory: "1Gi" cpu: "500m" -``` \ No newline at end of file +``` + +=== Builder Pod configuration + +You can set the resources requests and limits for the builder Pod using xref:traits:builder.adoc[container] trait. diff --git a/docs/modules/ROOT/pages/installation/installation.adoc b/docs/modules/ROOT/pages/installation/installation.adoc index 985a59f5ee..3fc5c41b10 100644 --- a/docs/modules/ROOT/pages/installation/installation.adoc +++ b/docs/modules/ROOT/pages/installation/installation.adoc @@ -8,6 +8,17 @@ Camel K allows us to run Camel integrations directly on a Kubernetes cluster. To The first step is to install and run the Camel K operator. You can do it via any of the following methodologies: +[[kustomize]] +=== Installation via Kustomize + +https://kustomize.io[Kustomize] provides a declarative approach to the configuration customization of a Camel-K installation. Kustomize works either with a standalone executable or as a built-in to `kubectl`. The https://github.com/apache/camel-k/tree/main/install[/install] directory provides a series of base and overlays configuration that you can use. You can create your own overlays or customize the one available in the repository to accommodate your need. + +``` +$ kubectl apply -k github.com/apache/camel-k/install/overlays/kubernetes/descoped?ref=v2.4.0 --server-side +``` + +You can specify as `ref` parameter the version you're willing to install (ie, `v2.4.0`). The command above will install a descoped (global) operator in the camel-k namespace. + [[helm]] === Installation via Helm Hub @@ -33,17 +44,6 @@ You can edit the `Subscription` custom resource, setting the channel you want to NOTE: Some Kubernetes clusters such as Openshift may let you to perform the same operation from a GUI as well. Refer to the cluster instruction to learn how to perform such action from user interface. -[[kustomize]] -=== Installation via Kustomize - -https://kustomize.io[Kustomize] provides a declarative approach to the configuration customization of a Camel-K installation. Kustomize works either with a standalone executable or as a built-in to `kubectl`. The https://github.com/apache/camel-k/tree/main/install[/install] directory provides a series of base and overlays configuration that you can use. You can create your own overlays or customize the one available in the repository to accommodate your need. - -``` -$ kubectl apply -k github.com/apache/camel-k/install/overlays/kubernetes/descoped?ref=v2.4.0 --server-side -``` - -You can specify as `ref` parameter the version you're willing to install (ie, `v2.4.0`). The command above will install a descoped (global) operator in the camel-k namespace. - [[verify]] === Verify that the operator is up and running @@ -110,15 +110,8 @@ Once you've completed any of the above installation procedure, you'll be ready t Camel K installation is usually straightforward, but for certain cluster types you need to apply specific configuration settings before installing it. You need customized instructions for the following cluster types: -- xref:installation/platform/digitalocean.adoc[DigitalOcean] -- xref:installation/platform/docker-desktop.adoc[Docker Desktop] - xref:installation/platform/gke.adoc[Google Kubernetes Engine (GKE)] - xref:installation/platform/iks.adoc[IBM Kubernetes Services (IKS)] -- xref:installation/platform/k3s.adoc[K3s] -- xref:installation/platform/kind.adoc[Kind] -- xref:installation/platform/minikube.adoc[Minikube] -- xref:installation/platform/openshift.adoc[OpenShift] -- xref:installation/platform/crc.adoc[Red Hat CodeReady Containers (CRC)] [[fine-tuning]] == Fine Tuning diff --git a/docs/modules/ROOT/pages/installation/platform/crc.adoc b/docs/modules/ROOT/pages/installation/platform/crc.adoc deleted file mode 100644 index 24355cc3aa..0000000000 --- a/docs/modules/ROOT/pages/installation/platform/crc.adoc +++ /dev/null @@ -1,9 +0,0 @@ -[[installation-on-crc]] -= Installing Camel K on Red Hat CodeReady Containers (CRC)- OpenShift 4 on your laptop - -You can run Camel K integrations on OpenShift 4 on your laptop using the CodeReady Containers cluster creation tool. -Follow the instructions in the https://code-ready.github.io/crc/[getting started guide] for the installation. - -After https://code-ready.github.io/crc/#setting-up-codeready-containers_gsg[setting up] and https://code-ready.github.io/crc/#starting-the-virtual-machine_gsg[starting] the cluster you will need to login with the `kubeadmin` user as shown in the startup logs (or setting up a user with sufficient permissions). - -You can now proceed with the xref:installation/installation.adoc[standard Camel K installation procedure]. diff --git a/docs/modules/ROOT/pages/installation/platform/digitalocean.adoc b/docs/modules/ROOT/pages/installation/platform/digitalocean.adoc deleted file mode 100644 index 4ebab39ed0..0000000000 --- a/docs/modules/ROOT/pages/installation/platform/digitalocean.adoc +++ /dev/null @@ -1,8 +0,0 @@ -[[installation-on-digitalocean]] -= Installing Camel K on DigitalOcean - -This guide assumes you've already created a Kubernetes Engine cluster on https://digitalocean.com. - -To install Camel K on a DigitalOcean Kubernetes cluster, just to xref:installation/registry/digitalocean.adoc[configure the DigitalOcean container registry] during installation. - -After doing that, you'll be ready to play with Camel K. Enjoy! diff --git a/docs/modules/ROOT/pages/installation/platform/gke.adoc b/docs/modules/ROOT/pages/installation/platform/gke.adoc index eb0b99b3f1..0e6a888a58 100644 --- a/docs/modules/ROOT/pages/installation/platform/gke.adoc +++ b/docs/modules/ROOT/pages/installation/platform/gke.adoc @@ -26,9 +26,4 @@ kubectl create clusterrolebinding user-cluster-admin-binding --clusterrole=clust The command above is needed to make sure your user is able to delegate some permissions to Camel K service accounts. -You can now get the *kamel* CLI tool the from https://github.com/apache/camel-k/releases[release page] -and put it on your system path. - -The last thing to do is to xref:installation/registry/gcr.adoc[configure gcr.io as registry] to host your integration image. - -After doing that, you'll be ready to play with Camel K. Enjoy! +The last thing to do is to xref:installation/registry/special/gcr.adoc[configure gcr.io as registry] to host your integration image. diff --git a/docs/modules/ROOT/pages/installation/platform/iks.adoc b/docs/modules/ROOT/pages/installation/platform/iks.adoc index d3c06e96ce..3908146e2b 100644 --- a/docs/modules/ROOT/pages/installation/platform/iks.adoc +++ b/docs/modules/ROOT/pages/installation/platform/iks.adoc @@ -14,13 +14,8 @@ kubectl get pods --all-namespaces ==== IKS provide an internal container registry feature. Camel K is able to leverage that registry. -You could create a customized namespace on xref:installation/registry/icr.adoc[IBM container registry] in order to host your integration images. +You could create a customized namespace on xref:installation/registry/special/icr.adoc[IBM container registry] in order to host your integration images. Please take note of the namespace and region created to configure them on the installation step. ==== -You can now download *kamel* CLI tool the from https://github.com/apache/camel-k/releases[release page] -and put it on your system path. - -After configure *kamel* CLI, you could start with the xref:installation/installation.adoc[standard installation] or xref:installation/installation.adoc#helm[installation via Helm]. - -Finally you are ready to work with Camel K. +You can now install via xref:installation/installation.adoc[standard installation]. diff --git a/docs/modules/ROOT/pages/installation/platform/k3s.adoc b/docs/modules/ROOT/pages/installation/platform/k3s.adoc deleted file mode 100644 index 8d45d1a3b4..0000000000 --- a/docs/modules/ROOT/pages/installation/platform/k3s.adoc +++ /dev/null @@ -1,4 +0,0 @@ -[[installation-on-k3s]] -= Installing Camel K on K3s - -Because K3s does not come with a builtin registry, you should follow the steps described on xref:installation/registry/k3s.adoc[Installing Camel K on K3s] to deploy it configure it to use a public or a private registry. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/installation/platform/minikube.adoc b/docs/modules/ROOT/pages/installation/platform/minikube.adoc deleted file mode 100644 index b8b7fee94f..0000000000 --- a/docs/modules/ROOT/pages/installation/platform/minikube.adoc +++ /dev/null @@ -1,24 +0,0 @@ -[[installation-on-minikube]] -= Installing Camel K on Minikube - -You can run Camel K integrations on plain Kubernetes using the Minikube cluster creation tool. -Follow the instructions in the https://github.com/kubernetes/minikube#installation[official doc] for the installation. - -Start a new instance of Minikube using the command: - -``` -minikube start -``` - -After the startup process is completed, you need to **enable the `registry` addon**: - -``` -minikube addons enable registry -``` - -Alternatively, you can also start an instance with the `registry` addon in one command: -``` -minikube start --addons registry -``` - -You can now proceed with the xref:installation/installation.adoc[standard Camel K installation procedure]. diff --git a/docs/modules/ROOT/pages/installation/platform/openshift.adoc b/docs/modules/ROOT/pages/installation/platform/openshift.adoc deleted file mode 100644 index 42a07fe045..0000000000 --- a/docs/modules/ROOT/pages/installation/platform/openshift.adoc +++ /dev/null @@ -1,19 +0,0 @@ -[[installation-on-openshift]] -= Installing Camel K on OpenShift - -Installation of Camel K on OpenShift requires that you execute first some specific actions as cluster-admin. - -OpenShift does not always provide full cluster-admin rights to all users, so you may need to contact an administrator to install the -Kubernetes custom resources and roles needed by Camel K. - -You need to get the *kamel* CLI (_camel-k-client_) tool the from https://github.com/apache/camel-k/releases[release page] -and put it on your system path (e.g. on `/usr/bin/kamel` on Linux). - -To install the custom resource definitions and related roles, just execute (with **cluster-admin role**): - -``` -kamel install --cluster-setup -``` - -Once you've done this **only once per the whole cluster**, you can **login as a standard user** and -continue with the xref:installation/installation.adoc[standard Camel K installation procedure]. diff --git a/docs/modules/ROOT/pages/installation/registry/digitalocean.adoc b/docs/modules/ROOT/pages/installation/registry/digitalocean.adoc deleted file mode 100644 index 58b2e04cc5..0000000000 --- a/docs/modules/ROOT/pages/installation/registry/digitalocean.adoc +++ /dev/null @@ -1,17 +0,0 @@ -[[configuring-registry-digitalocean]] -= Configuring a DigitalOcean Container Registry - -You can host your container images on the Digital Ocean container registry in case your cluster doesn't provide a xref:installation/registry/registry.adoc[default registry]. - -After logging in into the DigitalOcean web console on https://www.digitalocean.com/, access the *registry page* to do the following actions: - -- Take note of the registry address and organization: you should find something like `registry.digitalocean.com/` -- Download the "Docker Credentials" for the registry for **"Read & Write"**: this will save a file named `docker-config.json` in your machine - -[source,bash] ----- -# make sure you set the right organization parameter -kamel install --registry registry.digitalocean.com --organization your-org-id --registry-auth-file docker-config.json ----- - -Have fun with Camel K! diff --git a/docs/modules/ROOT/pages/installation/registry/dockerhub.adoc b/docs/modules/ROOT/pages/installation/registry/dockerhub.adoc deleted file mode 100644 index 79f1d40e94..0000000000 --- a/docs/modules/ROOT/pages/installation/registry/dockerhub.adoc +++ /dev/null @@ -1,48 +0,0 @@ -[[configuring-registry-dockerhub]] -= Configuring a DockerHub Registry - -You can host your container images on Docker Hub in case your cluster doesn't provide a xref:installation/registry/registry.adoc[default registry]. - -You need to create an account on https://hub.docker.com/, then use the following command to configure Camel K during installation: - -[source,bash] ----- -kamel install --registry docker.io --organization your-user-id-or-org --registry-auth-username your-user-id --registry-auth-password your-password ----- - -The `--registry-auth-username` and `--registry-auth-password` flags are used by the `kamel` CLI to create a Kubernetes secret -that holds your credentials for authenticating against the Docker registry. - -In the general case, the `--registry-auth-server` should be used ad it's automatically set to `https://index.docker.io/v1/`. Depending on the xref:installation/registry/registry.adoc[publish strategy] you are using you will need to adapt you credentials with the `--registry-auth-server` flag. **Spectrum** expect `https://index.docker.io/v1/` while **Jib** expect `docker.io`. - -NOTE: **Jib** works with Docker Hub in API v2 out of the box while **Spectrum** needs some adaptations for it to work. - -== Alternative Methods - -In some cases, you might already have a push/pull secret for Docker Hub in your current namespace. -Or you can also decide to create it using `kubectl`, with the following command: - -[source,bash] ----- -kubectl create secret docker-registry your-secret-name --docker-username your-user --docker-password your-pass ----- - - -Another possibility is to upload to the cluster your entire list of push/pull secrets: - -[source,bash] ----- -# First login to your registry and provide credentials -docker login -# Then create a secret from your credentials file (may contain passwords for other registries) -kubectl create secret generic your-secret-name --from-file ~/.docker/config.json ----- - -After you've created the secret, you can link it to Camel K during installation: - -[source,bash] ----- -kamel install --registry docker.io --organization your-user-id-or-org --registry-secret your-secret-name ----- - -As with the default method, this depends on the xref:installation/registry/registry.adoc[publish strategy] you are using. So make sure any credential contains the valid authentication servers: `https://index.docker.io/v1/` for **Spectrum** and `docker.io` for **Jib**. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/installation/registry/github.adoc b/docs/modules/ROOT/pages/installation/registry/github.adoc deleted file mode 100644 index 3656d592db..0000000000 --- a/docs/modules/ROOT/pages/installation/registry/github.adoc +++ /dev/null @@ -1,29 +0,0 @@ -[[configuring-registry-github]] -= Configuring a Github Packages Registry - -You can use a Github Packages registry to host your container images, in case your cluster doesn't provide a xref:installation/registry/registry.adoc[default registry]. - -Login to Github, then navigate to `Settings -> Developer settings -> Personal access tokens`. -Create a new token with the following permissions: - -* `repo` (all) -* `write:packages` -* `read:packages` -* `delete:packages` - -Take note of the personal access token, you'll use it during installation. You also need to **choose a github repository** for hosting your images. - -To configure Camel K, install it using the following command: - -[source,bash] ----- -kamel install --registry docker.pkg.github.com --organization github-user/repository --registry-auth-username github-user-id --registry-auth-password github-token ----- - -The `--registry-auth-username` and `--registry-auth-password` flags are used by the `kamel` CLI to create a Kubernetes secret -that holds your credentials for authenticating against the Docker registry. - -In the general case, the `--registry-auth-server` should be used, but it can be omitted for Github because it's -equal to the registry by default (`docker.pkg.github.com`). - -You're now ready to publish your integration. Images will be automatically published to Github Packages. diff --git a/docs/modules/ROOT/pages/installation/registry/k3s.adoc b/docs/modules/ROOT/pages/installation/registry/k3s.adoc deleted file mode 100644 index 82e016e9b2..0000000000 --- a/docs/modules/ROOT/pages/installation/registry/k3s.adoc +++ /dev/null @@ -1,52 +0,0 @@ -[[installation-on-k3s]] -= Installing Camel K on K3s - -This guide assumes you've already deployed a https://k3s.io[K3s] cluster and have installed and configured the kubectl command to manage the cluster. - -You can create a namespace to install Camel K on: - -``` -kubectl create namespace camel-k-test || true -``` - -Camel K needs a xref:installation/registry/registry.adoc[registry] to push the integrations it builds. For K3s you have to possibilities: - -* You can configure Camel K installation to use the Docker registry, Quay.io or a similar publicly available registry, or; -* You can deploy your own private repository in the cluster or on your network. - -[[public-registry]] -== Using a Public Registry - -Most of the those registries require authentication to push images. Therefore, we have to create a secret the namespace that will contain the credentials to access it. To do so, you can execute: - -NOTE: Before running the command below, please make sure that you are logged in to the registry you are planning to use. - -``` -kubectl -n camel-k-test create secret generic my-registry-secret --from-file=$HOME/.docker/config.json -``` - -You can follow the steps described in the xref:installation/registry/registry.adoc[registry] documentation if you are deploying using a public registry, such as the https://hub.docker.com[DockerHub] or https://quay.io[Quay]. - - -[[private-registry]] -== Using a Private Registry - -Although K3s does not come with a private registry, one can be installed by following the steps described in the https://rancher.com/docs/k3s/latest/en/installation/private-registry/[K3s' private registry] documentation. - -*Note*: installing your own registry gives you more flexibility to define how the registry should run, including the level of security required for it to run. More specifically, you can configure your registry to require or not credentials, to use HTTP instead of HTTPS, and so on. For the purpose of this guide and to present how Camel K can be installed on a seamless way, this guide demonstrates the installation using an insecure registry (unencrypted and without authentication). - -[[installation]] -=== Installing Camel K on K3s with Private Registry - -With the secret created on the cluster, we can install Camel K and tell it to use those credentials when pushing the integrations. - -You can now download *kamel* CLI tool from https://github.com/apache/camel-k/releases[release page] -and put it on your system path. - -After configure *kamel* CLI, you can execute the following command to install it on the namespace and configured to use your private registry: - -``` -kamel install -n camel-k-test --force --olm=false --registry address-of-the-registry --organization your-user-id-or-org --registry-insecure true -``` - -After doing that, you'll be ready to play with Camel K. Enjoy! diff --git a/docs/modules/ROOT/pages/installation/registry/registry-secret.adoc b/docs/modules/ROOT/pages/installation/registry/registry-secret.adoc new file mode 100644 index 0000000000..f7e50a58dd --- /dev/null +++ b/docs/modules/ROOT/pages/installation/registry/registry-secret.adoc @@ -0,0 +1,41 @@ +[[configuring-registry-secret]] += Configuring a secret for the container registry + +You can host your container images on https://hub.docker.com/[Docker Hub] or any other registry out there. Most of the time, the following procedure will be enough to create a secret credentials and let Camel K access privately to a container registry. + +== How to create a registry secret + +In some cases, you might already have a push/pull secret for your container registry in your current namespace. If it's not the case, you can use the following command to create one: + +[source,bash] +---- +kubectl create secret docker-registry your-secret-name --docker-username your-user --docker-password your-pass +---- + +Another possibility is to upload to the cluster your entire list of push/pull secrets: + +[source,bash] +---- +# First login to your registry and provide credentials +docker login +# Then create a secret from your credentials file (may contain passwords for other registries) +kubectl create secret generic your-secret-name --from-file ~/.docker/config.json +---- + +After you've created the secret, you can provide it to your IntegrationPlatform resource: + +[source,yaml] +---- +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + name: camel-k +spec: + build: + registry: + address: (1) + organization: (2) + secret: (3) +---- + +NOTE: make sure any credential contains the valid authentication servers: `docker.io` is used by **Jib**. Other publishing strategies may instead require to use a different endpoint, ie, `https://index.docker.io/v1/`. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/installation/registry/registry.adoc b/docs/modules/ROOT/pages/installation/registry/registry.adoc index 116b07a67a..1b6ad25887 100644 --- a/docs/modules/ROOT/pages/installation/registry/registry.adoc +++ b/docs/modules/ROOT/pages/installation/registry/registry.adoc @@ -15,7 +15,7 @@ For any other platform that do not provide a default container registry, then, a When running a production grade installation, you'll be probably using a private container registry which is accessible via authenticated method. The secret is something that will be https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret[included at deployment time] as `imagePullSecret` configuration. -As each registry may have a slightly different way of securing the access you can use the generic guidelines provided in xref:installation/registry/dockerhub.adoc[Docker Hub] registry configuration and adjust accordingly. We expect that at the end of the process you have a public address (1) an _organization_ (2) and a _secret_ (3) values that will be used to configure the registry. +As each registry may have a slightly different way of securing the access you can use the generic guidelines provided in xref:installation/registry/registry-secret.adoc[Secret registry configuration] and adjust accordingly. We expect that at the end of the process you have a public address (1) an _organization_ (2) and a _secret_ (3) values that will be used to configure the registry. You will need to create or edit any existing `IntegrationPlatform` custom resource with the values as expected in the `.spec.build.registry`. @@ -51,74 +51,13 @@ NOTE: you can configure Camel K to use an insecure private registry. However, yo === Special container registry requirements We have some hints that can help you configuring on the most common platforms: -- xref:installation/registry/digitalocean.adoc[DigitalOcean] -- xref:installation/registry/dockerhub.adoc[Docker Hub] -- xref:installation/registry/github.adoc[Github Packages] -- xref:installation/registry/gcr.adoc[Gcr.io] -- xref:installation/registry/icr.adoc[IBM Container Registry] -- xref:installation/registry/k3s.adoc[K3s] +- xref:installation/registry/special/docker-desktop.adoc[Docker Hub] +- xref:installation/registry/special/gcr.adoc[Gcr.io] +- xref:installation/registry/special/github.adoc[Github Packages] +- xref:installation/registry/special/icr.adoc[IBM Container Registry] +- xref:installation/registry/special/kind.adoc[K3s] +- xref:installation/registry/special/minikube.adoc[K3s] [[configuring-registry-run-it-yours]] == Run your own container registry -You can also xref:installation/registry/own.adoc[run your own registry], but this option is recommended only for advanced use cases as it may requires certain changes in the cluster configuration, so, make sure to understand how each change may affect your cluster. As you've seen in the diagram above, the cluster has to be aware of the presence of the registry in order to pull the images pushed by the operator. - -[[pruning-registry]] -== Pruning unused images from container registry - -Over time, while building integrations the produced images are stored in the container registry and it may become outdated and may require pruning old unused images. - -NOTE: Each container registry vendor can provide unique details about the pruning policy, check your vendor documentation. - -NOTE: This is an unsupported functionality, use at your own risk. - -It's recommended only to delete container images from container registry if the corresponding `Integration` or `IntegrationKit` doesn't exist anymore or has no expectation to be used. Then if you delete the container image, you should also delete corresponding `Integrationkit` custom resource object. - -Camel K materializes the Camel integration in one of the two kubernetes objects: `Deployment` or `CronJob`. - -You have to check if the `Integration` is running or scaled down to zero pods, which is the case for CronJobs or Knative deployments. - -Then, we can provide some general guide about how to inspect the Camel K objects to prune unused images. - -For this guide, we assume you are connected to the container registry with `docker login`. - -Step 1: List all Camel K container images, prefixed with `camel-k` - -``` -$ docker images |grep k-kit -10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheon0 bd52ae6e32af 54 years ago 481MB -10.98.248.245/camel-k/camel-k-kit-cptguntf799b89lheok0 b7f347193b3c 54 years ago 471MB -10.98.248.245/camel-k/camel-k-kit-cptgv0tf799b89lheokg 8d2d963396ca 54 years ago 477MB -10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheomg dc11800ef203 54 years ago 481MB -10.98.248.245/camel-k/camel-k-kit-cptgvd5f799b89lheol0 0bbdf20f2f49 54 years ago 479MB -``` - -Step 2: List the container images of the Camel K Integrations (don't print the sha256 digest) -``` -$ kubectl get -A it -oyaml|grep 'image:'|sed 's/^\s*image: //g;s/@sha256.*//g'|sort|uniq -10.98.248.245/camel-k/camel-k-kit-cptguntf799b89lheok0 -10.98.248.245/camel-k/camel-k-kit-cptgv0tf799b89lheokg -10.98.248.245/camel-k/camel-k-kit-cptgvd5f799b89lheol0 -10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheon0 -``` - -Step 3: Compare them and remove the container images and `IntegrationKit` from list 1 not found in list 2 -``` -docker rmi dc11800ef203 -kubectl delete ik/kit-cpth0mtf799b89lheomg -``` - -There is a https://github.com/apache/camel-k/blob/main/script/prune-camel-k-kit-images.sh[prune-camel-k-kit-images.sh] script to help you in this task. This script requires the following cli tools: `kubectl, comm, docker`. -The script lists the dangling images from the container registry, it accepts two parameters with no arguments: `-v` (verbose) and `-p` (prune images). - -An example of an execution: -``` -$ prune-camel-k-kit-images.sh -p -> Images from container registry, eligible for pruning. -10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheom0 - -> Delete Container Images -integrationkit.camel.apache.org "kit-cpth0mtf799b89lheom0" deleted -Untagged: 10.98.248.245/camel-k/camel-k-kit-cpth0mtf799b89lheom0@sha256:3857f8e331e50ded6529641e668de8781eb3cb7b881ea14b89cfc4f6b6e9d455 -Deleted: sha256:1015a6b18f164e9b086337e69a98e5850149c158cb778bac6059984756dc0528 -Deleted: sha256:2f0d224916e77654c4401f6fc4b1147a9a6e3ccf713213c38e877d7b939bab81 -``` +You can also xref:installation/registry/special/own.adoc[run your own registry], but this option is recommended only for advanced use cases as it may requires certain changes in the cluster configuration, so, make sure to understand how each change may affect your cluster. As you've seen in the diagram above, the cluster has to be aware of the presence of the registry in order to pull the images pushed by the operator. diff --git a/docs/modules/ROOT/pages/installation/platform/docker-desktop.adoc b/docs/modules/ROOT/pages/installation/registry/special/docker-desktop.adoc similarity index 84% rename from docs/modules/ROOT/pages/installation/platform/docker-desktop.adoc rename to docs/modules/ROOT/pages/installation/registry/special/docker-desktop.adoc index 2f779f142f..cc513d5501 100644 --- a/docs/modules/ROOT/pages/installation/platform/docker-desktop.adoc +++ b/docs/modules/ROOT/pages/installation/registry/special/docker-desktop.adoc @@ -1,5 +1,4 @@ -[[installation-on-docker-desktop]] -= Installing Camel K on Docker Desktop += Configure a registry on Docker Desktop You can run Camel K integrations on plain Kubernetes using Docker Desktop, for that an external registry is needed and Kubernetes must be enabled. @@ -25,7 +24,16 @@ An example of `daemon.json` with the expected configuration: Once the configuration changed, the Docker daemon must be restarted to take the modification into account. -Finally, you can install the Camel-K operator with the insecure registry properly configured thanks to the next command: -``` -kamel install --registry host.docker.internal:5000 --registry-insecure true +Finally, you can install the Camel-K operator with the insecure registry properly configured as: + +```yaml +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + name: camel-k +spec: + build: + registry: + address: host.docker.internal:5000 + insecure: true ``` diff --git a/docs/modules/ROOT/pages/installation/registry/gcr.adoc b/docs/modules/ROOT/pages/installation/registry/special/gcr.adoc similarity index 100% rename from docs/modules/ROOT/pages/installation/registry/gcr.adoc rename to docs/modules/ROOT/pages/installation/registry/special/gcr.adoc diff --git a/docs/modules/ROOT/pages/installation/registry/special/github.adoc b/docs/modules/ROOT/pages/installation/registry/special/github.adoc new file mode 100644 index 0000000000..d9ba0f7370 --- /dev/null +++ b/docs/modules/ROOT/pages/installation/registry/special/github.adoc @@ -0,0 +1,20 @@ +[[configuring-registry-github]] += Configuring a Github Packages Registry + +You can use a Github Packages registry to host your container images. + +Login to Github, then navigate to `Settings -> Developer settings -> Personal access tokens`. Create a new token with the following permissions: + +* `repo` (all) +* `write:packages` +* `read:packages` +* `delete:packages` + +Take note of the personal access token, you'll use it during installation. You also need to **choose a github repository** for hosting your images. + +Finally you can xref:installation/registry/registry-secret.adoc[configure the secret] with the following parameters: + +* registry: docker.pkg.github.com +* organization: github-user/repository +* registry-auth-username: github-user-id +* registry-auth-password: github-token diff --git a/docs/modules/ROOT/pages/installation/registry/icr.adoc b/docs/modules/ROOT/pages/installation/registry/special/icr.adoc similarity index 85% rename from docs/modules/ROOT/pages/installation/registry/icr.adoc rename to docs/modules/ROOT/pages/installation/registry/special/icr.adoc index 16532f3696..d953cb5256 100644 --- a/docs/modules/ROOT/pages/installation/registry/icr.adoc +++ b/docs/modules/ROOT/pages/installation/registry/special/icr.adoc @@ -54,9 +54,18 @@ Then create a secret with the Docker config file `$HOME/.docker/config.json` (or $ kubectl create secret generic my-icr-secret --from-file=.dockerconfigjson=$HOME/.docker/config.json --type=kubernetes.io/dockerconfigjson ---- -Now you can proceed to install Camel K with the following command: +Now you can provide the secret to the IntegrationPlatform: -[source,console] +[source,yaml] ---- -$ kamel install --registry .icr.io --organization --registry-secret my-icr-secret +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + name: camel-k +spec: + build: + registry: + address: .icr.io + organization: + secret: my-icr-secret ---- diff --git a/docs/modules/ROOT/pages/installation/platform/kind.adoc b/docs/modules/ROOT/pages/installation/registry/special/kind.adoc similarity index 63% rename from docs/modules/ROOT/pages/installation/platform/kind.adoc rename to docs/modules/ROOT/pages/installation/registry/special/kind.adoc index d00fb5bec2..3368ca4dc1 100644 --- a/docs/modules/ROOT/pages/installation/platform/kind.adoc +++ b/docs/modules/ROOT/pages/installation/registry/special/kind.adoc @@ -1,41 +1,6 @@ -[[installation-on-kind]] -= Installing Camel K on Kind += Configuring a local registry on Kind -[[with-public-registry]] -== With a public registry - -Installing Camel K on Kind, with a public registry doesn't require any special configuration. - -Assuming you have Kind installed, then start by creating a cluster: - -[source,shell] ----- -kind create cluster ----- - -Create a secret with your registry username and password: - -[source,shell] ----- -kubectl -n default create secret docker-registry external-registry-secret --docker-username my-user --docker-password "password" ----- - -Install Camel K operator on the cluster in the default namespace: - -[source,shell] ----- -kamel install --olm=false -n default --registry docker.io --organization my-org-or-username --registry-secret external-registry-secret --wait ----- - -Make sure to replace the `my-org-or-username` with your actual username or organization used to host the images. - -[[with-local-registry]] -== With a local registry - -Installing Camel K on Kind, with a local insecure registry doesn't require any special configuration. - -Assuming you have Kind installed, then start by creating a cluster with a pre-configured local registry by executing the -following script: +Installing Camel K on Kind, with a local insecure registry doesn't require any special configuration. Assuming you have Kind installed, then start by creating a cluster with a pre-configured local registry by executing the following script: [source,shell] ---- @@ -105,14 +70,16 @@ The local registry is then listening on port `5001`, so we can push the docker i Assuming that you want to use a snapshot version (created with `make images`), simply create a tag with the proper host then push it to the local registry with the following commands: -[source,shell] ----- -docker tag apache/camel-k:2.0.0-SNAPSHOT localhost:5001/apache/camel-k:2.0.0-SNAPSHOT -docker push localhost:5001/apache/camel-k:2.0.0-SNAPSHOT ----- - -Finally, install Camel K operator on the cluster in the default namespace: -[source,shell] +Finally, configure the IntegrationPlatform as: +[source,yaml] ---- -kamel install --olm=false --operator-image kind-registry:5000/apache/camel-k:2.0.0-SNAPSHOT --registry kind-registry:5000 --registry-insecure +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + name: camel-k +spec: + build: + registry: + address: kind-registry:5000 + insecure: true ---- diff --git a/docs/modules/ROOT/pages/installation/registry/special/minikube.adoc b/docs/modules/ROOT/pages/installation/registry/special/minikube.adoc new file mode 100644 index 0000000000..345133c6e5 --- /dev/null +++ b/docs/modules/ROOT/pages/installation/registry/special/minikube.adoc @@ -0,0 +1,42 @@ += Configuring registry on Minikube + +You can run Camel K integrations on plain Kubernetes using the Minikube cluster creation tool. Follow the instructions in the https://github.com/kubernetes/minikube#installation[official doc] for the installation. + +Start a new instance of Minikube using the command: + +``` +minikube start +``` + +Minikube provides a simple embedded local container registry that it makes the default choice for local development and demo purposes. After the startup process is completed, you need to **enable the `registry` addon**: + +``` +minikube addons enable registry +``` + +Alternatively, you can also start an instance with the `registry` addon in one command: +``` +minikube start --addons registry +``` + +Once the registry is available, you can check the IP address by typing: + +``` +$ kubectl -n kube-system get service registry -o jsonpath='{.spec.clusterIP}' +10.100.107.57 +``` + +Now you can use such value into your IntegrationPlatform: + +[source,yaml] +---- +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + name: camel-k +spec: + build: + registry: + address: 10.100.107.57 + insecure: true +---- diff --git a/docs/modules/ROOT/pages/installation/registry/own.adoc b/docs/modules/ROOT/pages/installation/registry/special/own.adoc similarity index 92% rename from docs/modules/ROOT/pages/installation/registry/own.adoc rename to docs/modules/ROOT/pages/installation/registry/special/own.adoc index 1f2c15698f..12d278a024 100644 --- a/docs/modules/ROOT/pages/installation/registry/own.adoc +++ b/docs/modules/ROOT/pages/installation/registry/special/own.adoc @@ -63,10 +63,23 @@ If you apply this configuration, a registry Pod will be started and you can use ``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE registry ClusterIP 10.96.112.40 80/TCP 23h - -$ kamel install --registry 10.96.112.40 ``` +Now you can use such value into your IntegrationPlatform: + +[source,yaml] +---- +apiVersion: camel.apache.org/v1 +kind: IntegrationPlatform +metadata: + name: camel-k +spec: + build: + registry: + address: 10.96.112.40 + insecure: true +---- + The above installation should be able to push and pull Integration images correctly. [[configuring-registry-run-docker]] diff --git a/docs/modules/ROOT/pages/installation/uninstalling.adoc b/docs/modules/ROOT/pages/installation/uninstalling.adoc index a309770a69..d0fbe14296 100644 --- a/docs/modules/ROOT/pages/installation/uninstalling.adoc +++ b/docs/modules/ROOT/pages/installation/uninstalling.adoc @@ -3,18 +3,6 @@ We're sad to see you go, but If you really need to, it is possible to completely uninstall Camel K from your cluster. The uninstalling procedure typically removes the operator but keeps Custom Resource Definition and any Integration which was previously running. They can be removed by the user by an additional cleaning operation. -[[cli]] -== Uninstall via Kamel CLI - -[source] ----- -kamel uninstall ----- - -This will uninstall all Camel K resources along with the operator from the cluster namespace. - -NOTE: By _default_ the resources possibly shared between clusters such as https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources[CustomResourceDefinitions (CRD)], https://kubernetes.io/docs/reference/access-authn-authz/rbac[ClusterRole] and https://docs.openshift.com/container-platform/4.1/applications/operators/olm-understanding-olm.html[Operator Lifecycle Manager(OLM)] will be **excluded**. To force the inclusion of all resources you can use the **--all** flag. If the **--olm=false** option was specified during installation, which is the case when installing Camel K from sources on CRC, then it also must be used with the uninstall command. - [[helms]] == Uninstall via Helm diff --git a/docs/modules/ROOT/pages/observability/logging.adoc b/docs/modules/ROOT/pages/observability/logging.adoc index 29ca049094..b9a37407a4 100644 --- a/docs/modules/ROOT/pages/observability/logging.adoc +++ b/docs/modules/ROOT/pages/observability/logging.adoc @@ -1,8 +1,7 @@ [[logging]] = Camel K Logging -Logs are an essential aspect of observability, and traditionally used to check nominal operation, or for troubleshooting. -The following pages contain some details on the logging provided by the various Camel K components: +Logs are an essential aspect of observability, and traditionally used to check nominal operation, or for troubleshooting. The following pages contain some details on the logging provided by the various Camel K components: - xref:observability/logging/operator.adoc[Camel K operator logging] - xref:observability/logging/integration.adoc[Camel K integration logging] diff --git a/docs/modules/ROOT/pages/observability/logging/operator.adoc b/docs/modules/ROOT/pages/observability/logging/operator.adoc index 1052c71b36..c85557e0c5 100644 --- a/docs/modules/ROOT/pages/observability/logging/operator.adoc +++ b/docs/modules/ROOT/pages/observability/logging/operator.adoc @@ -1,19 +1,15 @@ [[logging]] = Camel K Operator Logging -The operator provides https://kubernetes.io/blog/2020/09/04/kubernetes-1-19-introducing-structured-logs/[structured logging], so that the logs are more easily parsable. - -This includes the output of components managed by the operator, such as the Maven build, and the Integration container image build. - -For example, the Maven build logs display like this: +The operator provides https://kubernetes.io/blog/2020/09/04/kubernetes-1-19-introducing-structured-logs/[structured logging], so that the logs are more easily parsable. This includes the output of components managed by the operator, such as the Maven build, and the Integration container image build. For example, the Maven build logs display like this: [source,json] ---- -{"level":"info","ts":1620393185.321101,"logger":"camel-k.maven.build","msg":"Downloading from repository-000: http://my.repository.com:8081/artifactory/fuse-brno/org/jboss/shrinkwrap/resolver/shrinkwrap-resolver-bom/2.2.4/shrinkwrap-resolver-bom-2.2.4.pom"} +... +{"level":"info","ts":"2024-09-04T09:09:18Z","logger":"camel-k.maven.build","msg":"Building camel-k-integration 2.5.0-SNAPSHOT"} +... ---- -This may differ when running the operator locally, for development purposes, in which case the local Maven installation that is used may provide a different output. - [[operator-logging-level]] == Logging Level diff --git a/docs/modules/ROOT/pages/observability/monitoring.adoc b/docs/modules/ROOT/pages/observability/monitoring.adoc index bc848e7ee6..131d73ea9d 100644 --- a/docs/modules/ROOT/pages/observability/monitoring.adoc +++ b/docs/modules/ROOT/pages/observability/monitoring.adoc @@ -13,8 +13,7 @@ To take full advantage of the Camel K monitoring capabilities, it is recommended [[kubernetes]] === Kubernetes -The easiest way of starting with the Prometheus Operator is by deploying it as part of https://github.com/prometheus-operator/kube-prometheus[kube-prometheus], which provisions an entire monitoring stack. -You can follow the https://prometheus-operator.dev/docs/prologue/quick-start/[quickstart] from the Prometheus Operator https://prometheus-operator.dev/[documentation]. +The easiest way of starting with the Prometheus Operator is by deploying it as part of https://github.com/prometheus-operator/kube-prometheus[kube-prometheus], which provisions an entire monitoring stack. You can follow the https://prometheus-operator.dev/docs/prologue/quick-start/[quickstart] from the Prometheus Operator https://prometheus-operator.dev/[documentation]. Alternatively, you can quickly deploy the Prometheus operator by running: @@ -42,9 +41,7 @@ spec: EOF ---- -By default, the Prometheus instance discovers applications to be monitored in the same namespace. -You can use the `podMonitorNamespaceSelector` field from the `Prometheus` resource to enable cross-namespace monitoring. -You may also need to specify a ServiceAccount with the `serviceAccountName` field, that's bound to a Role with the necessary permissions. +By default, the Prometheus instance discovers applications to be monitored in the same namespace. You can use the `podMonitorNamespaceSelector` field from the `Prometheus` resource to enable cross-namespace monitoring. You may also need to specify a ServiceAccount with the `serviceAccountName` field, that's bound to a Role with the necessary permissions. [[openshift]] === OpenShift diff --git a/docs/modules/ROOT/pages/troubleshooting/operating.adoc b/docs/modules/ROOT/pages/observability/monitoring/operator-sops.adoc similarity index 92% rename from docs/modules/ROOT/pages/troubleshooting/operating.adoc rename to docs/modules/ROOT/pages/observability/monitoring/operator-sops.adoc index a59af67da4..4c02e9b566 100644 --- a/docs/modules/ROOT/pages/troubleshooting/operating.adoc +++ b/docs/modules/ROOT/pages/observability/monitoring/operator-sops.adoc @@ -1,13 +1,10 @@ -[[operating]] -= Operating += Standard Operating Procedures NOTE: The following guide uses the terminology from the https://sre.google/sre-book/service-level-objectives/[Site Reliability Engineer] book. -The Camel K operator exposes a monitoring endpoint, that publishes xref:observability/monitoring/operator.adoc#metrics[metrics] indicating the _level of service_ provided to its users. -These metrics materialize the Service Level Indicators (SLIs) for the Camel K operator. +The Camel K operator exposes a monitoring endpoint, that publishes xref:observability/monitoring/operator.adoc#metrics[metrics] indicating the _level of service_ provided to its users. These metrics materialize the Service Level Indicators (SLIs) for the Camel K operator. -Service Level Objectives (SLOs) can be defined based on these SLIs. -The xref:observability/monitoring/operator.adoc#alerting[default alerts] created for the Camel K operator query the SLIs corresponding metrics, and match the SLOs for the Camel K operator, so that they fire up as soon as the _level of service_ is not met, and preemptive measures can be taken before beaching the Service Level Agreement (SLA) for the Camel K operator. +Service Level Objectives (SLOs) can be defined based on these SLIs. The xref:observability/monitoring/operator.adoc#alerting[default alerts] created for the Camel K operator query the SLIs corresponding metrics, and match the SLOs for the Camel K operator, so that they fire up as soon as the _level of service_ is not met, and preemptive measures can be taken before beaching the Service Level Agreement (SLA) for the Camel K operator. [[operator-sops]] == Operator SOPs diff --git a/docs/modules/ROOT/pages/observability/monitoring/operator.adoc b/docs/modules/ROOT/pages/observability/monitoring/operator.adoc index c40c89351d..1f2335923e 100644 --- a/docs/modules/ROOT/pages/observability/monitoring/operator.adoc +++ b/docs/modules/ROOT/pages/observability/monitoring/operator.adoc @@ -163,5 +163,4 @@ spec: for {{ $labels.job }} have their first time to readiness above 1m. ---- -More information can be found in the Prometheus Operator https://prometheus-operator.dev/docs/user-guides/alerting/[Alerting] user guide. -You can also find more details in https://docs.openshift.com/container-platform/4.12/monitoring/managing-alerts.html#creating-alerting-rules-for-user-defined-projects_managing-alerts[Creating alerting rules] from the OpenShift documentation. \ No newline at end of file +More information can be found in the Prometheus Operator https://prometheus-operator.dev/docs/user-guides/alerting/[Alerting] user guide. You can also find more details in https://docs.openshift.com/container-platform/4.12/monitoring/managing-alerts.html#creating-alerting-rules-for-user-defined-projects_managing-alerts[Creating alerting rules] from the OpenShift documentation. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/pipeline/tekton.adoc b/docs/modules/ROOT/pages/pipeline/external.adoc similarity index 90% rename from docs/modules/ROOT/pages/pipeline/tekton.adoc rename to docs/modules/ROOT/pages/pipeline/external.adoc index b0f1436266..96b931c654 100644 --- a/docs/modules/ROOT/pages/pipeline/tekton.adoc +++ b/docs/modules/ROOT/pages/pipeline/external.adoc @@ -11,4 +11,6 @@ Since Camel K version 2 we are supporting a https://hub.tekton.dev/tekton/task/k [[cicd-pipeline]] == Integrate with other pipelines -There are many CICD tools and we cannot provide support for every technology. However, Camel K gives you the possibility to run your own build with the CICD technology of choice and operate the Camel application accordingly. What you need to do is to let the CICD technology to provide an Integration custom resource with the container image built by the pipeline, ie: `kamel run test.yaml -t container.image=docker.io/my-org/my-image:xyz`. \ No newline at end of file +There are many CICD tools and we cannot provide support for every technology. However, Camel K gives you the possibility to run your own build with the CICD technology of choice and operate the Camel application accordingly. What you need to do is to let the CICD technology to provide an Integration custom resource with the container image built by the pipeline, ie: `kamel run test.yaml -t container.image=docker.io/my-org/my-image:xyz`. + +The above is known as xref:running/self-managed.adoc[**Self managed build** Integration]. diff --git a/docs/modules/ROOT/pages/pipes/bind-cli.adoc b/docs/modules/ROOT/pages/pipes/bind-cli.adoc new file mode 100644 index 0000000000..8c66289ea3 --- /dev/null +++ b/docs/modules/ROOT/pages/pipes/bind-cli.adoc @@ -0,0 +1,80 @@ += Bind Pipes with kamel CLI + +You may be already familiar of the xref:running/running-cli.adoc[`kamel run`] CLI command. The CLI has a similar command thought to easily interact with Pipe custom resource: `kamel bind`. + +The command will allow to easily create and submit a Pipe with a few line of code: + +```bash +kamel bind timer:foo log:bar --step https://gist.githubusercontent.com/squakez/48b4ebf24c2579caf6bcb3e8a59fa509/raw/c7d9db6ee5e8851f5dc6a564172d85f00d87219c/gistfile1.txt +... +binding "timer-to-log" created +``` + +The Pipe will be immediately created and you will be able to log the content of the Integration created after the Pipe: + +```bash +kamel logs timer-to-log +Integration 'timer-to-log' is now running. Showing log ... +[1] Monitoring pod timer-to-log-6d949466c8-97d7x +[1] 2024-09-03 14:32:31,789 INFO [org.apa.cam.k.Runtime] (main) Apache Camel K Runtime 3.8.1 +... +[1] 2024-09-03 14:32:41,170 INFO [bar] (Camel (camel-1) thread #1 - timer://foo) Exchange[ExchangePattern: InOnly, BodyType: byte[], Body: Hello Camel K] +[1] 2024-09-03 14:32:41,270 INFO [bar] (Camel (camel-1) thread #1 - timer://foo) Exchange[ExchangePattern: InOnly, BodyType: byte[], Body: Hello Camel K] +``` + +The similar developer experience when you want to run any supported custom resource, for example, Kamelets: + +```bash +kamel bind timer-source log-sink -p source.message="Hello Camel K" +... +binding "timer-source-to-log-sink" created +``` + +In this case you need to provide one of the parameter required by the Kamelet used. Then you can watch at the Integration log as usual: + +```bash +kamel logs timer-source-to-log-sink +The building kit for integration 'timer-source-to-log-sink' is at: Build Running +Integration 'timer-source-to-log-sink' is now running. Showing log ... +[1] Monitoring pod timer-source-to-log-sink-9bf7bf67f-wh2v2 +[1] 2024-09-03 14:37:58,091 INFO [org.apa.cam.k.Runtime] (main) Apache Camel K Runtime 3.8.1 +... +[1] 2024-09-03 14:38:01,693 INFO [log-sink] (Camel (camel-1) thread #1 - timer://tick) Exchange[ExchangePattern: InOnly, BodyType: String, Body: Hello Camel K] +``` + +[[dry-run]] +== Dry Run + +The `bind` command has also a **dry-run** option as you may have already familiar with the `run`. If you have familiarity with Kubernetes, you will see we use the same approach used by `kubectl`, exposing a `-o` parameter which accepts either `yaml` or `json`. The presence of this feature will let you simplify any deployment strategy (including GitOps) as you can just get the result of the Integration which will be eventually executed by the Camel K Operator. + +NOTE: we make use of `stderr` for many CLI warning and this is automatically redirected to `stdout` to show immediately the result of any error to the user. If you're running any automation, make sure to redirect the `stderr` to any channel to avoid altering the result of the dry run, Ie `kamel run /tmp/Test.java -o yaml 2>/dev/null`. + +As an example, take the option available on the `kamel bind timer-source log-sink -p source.message="Hello Camel K v3.6.0" -t camel.runtime-version=3.6.0 -n camel-k -o yaml` command: + +```yaml +apiVersion: camel.apache.org/v1 +kind: Pipe +metadata: + annotations: + camel.apache.org/operator.id: camel-k + trait.camel.apache.org/camel.runtime-version: 3.6.0 + creationTimestamp: null + name: timer-source-to-log-sink + namespace: camel-k +spec: + sink: + ref: + apiVersion: camel.apache.org/v1 + kind: Kamelet + name: log-sink + namespace: camel-k + source: + properties: + message: Hello Camel K v3.6.0 + ref: + apiVersion: camel.apache.org/v1 + kind: Kamelet + name: timer-source + namespace: camel-k +status: {} +``` diff --git a/docs/modules/ROOT/pages/pipes/pipes.adoc b/docs/modules/ROOT/pages/pipes/pipes.adoc new file mode 100644 index 0000000000..b38bdc677b --- /dev/null +++ b/docs/modules/ROOT/pages/pipes/pipes.adoc @@ -0,0 +1,128 @@ += Running an Pipe + +The Pipe is a concept that enable the user to cerate a "composable" Event Driven Architecture design. The Pipe can bind **source** and **sink** endpoints where an endpoint represents a source/sink external entity (could be any Camel URI or a Kubernetes resource such as xref:kamelets/kamelets.adoc[Kamelets], Kafka (https://strimzi.io/[Strimzi]) or https://knative.dev[Knative] resources). + +NOTE: make sure you're familiar with the concept of xref:kamelets/kamelets.adoc[Kamelet] before continuing. + +The operator is in charge to transform a binding between a source and a sink and transform into a running Integration taking care to do all the building involved and the transformation required. + +```yaml +apiVersion: camel.apache.org/v1 +kind: Pipe +metadata: + name: timer-to-log +spec: + sink: + uri: log:bar + source: + uri: timer:foo +``` + +The above example is the simplest example we can use to show how to "connect" a Camel URI source to a Camel URI sink. You can run it executing `kubectl apply -f timer-to-log.yaml`. Once executed, you can check the status of your Pipe: + +``` +kubectl get pipe -w + +NAME PHASE REPLICAS +timer-to-log Creating +timer-to-log Ready 0 +timer-to-log Ready 1 +``` + +The operator has taken the Pipe and has created an Integration from the Pipe configuration. The Integration is the resource that will run your final application and you can look at it accordingly: + +``` +NAME PHASE READY RUNTIME PROVIDER RUNTIME VERSION CATALOG VERSION KIT REPLICAS +timer-to-log Running True quarkus 3.8.1 3.8.1 kit-crbgrhmn5tgc73cb1tl0 1 +``` + +== Sources, Sinks and Actions + +The development of a Pipe should be limiting the binding between a source and a sink. However sometimes you may need to perform slight transformation when consuming the events. In such case you can include a set of actions that will take care of that. + +```yaml +apiVersion: camel.apache.org/v1 +kind: Pipe +metadata: + name: timer-to-log +spec: + sink: + uri: log:bar + source: + uri: timer:foo + steps: + - uri: https://gist.githubusercontent.com/squakez/48b4ebf24c2579caf6bcb3e8a59fa509/raw/c7d9db6ee5e8851f5dc6a564172d85f00d87219c/gistfile1.txt +``` + +In the example above we're making sure to call an intermediate resource in order to fill the content with some value. + +== Differences with Integrations + +The simples examples above may make you wonder which are the differences between a Pipe and an Integration. The Integration is meant for any generic Camel workload where you have complex business logic to perform, whereas the Pipe are more useful when you have events and you want to emit or consume such events in an connector style approach. + +Most of the time you will have consumer applications (one Pipe) which are consuming events from a topic (Kafka, Kamelet or Knative) and producer applications (another Pipe) producing to a topic. + +NOTE: Camel K operator will allow you to use directly Kafka (Strimzi) and Knative endpoints custom resources. + +== Examples + +Here some other examples involving Kamelets, Knative and Kafka. + +=== Binding Kamelets + +One development that emerges is the Connector development. You can consider a Kamelet as a connector endpoint, therefore binding together source and sink Kamelets to perform some logic. In this one, for instance, we're moving data from an AWS Kinesis source to a PostgreSQL database. + +```yaml +apiVersion: camel.apache.org/v1 +kind: Pipe +metadata: + name: from-kinesis-to-pgdb +spec: + source: + ref: + kind: Kamelet + apiVersion: camel.apache.org/v1 + name: aws-kinesis-source + properties: + region: my-region + stream: my-stream + sink: + ref: + kind: Kamelet + apiVersion: camel.apache.org/v1 + name: postgresql-sink + properties: + databaseName: my-db + password: my-pwd + query: INSERT INTO accounts (username,city) VALUES (:#username,:#city) + serverName: localhost + username: my-usr +``` + +=== Consuming events from a Kafka topic + +Another typical use case is consume/produce events directly from a KafkaTopic custom resource (managed by Strimzi operator) or Knative resources: + +```yaml +apiVersion: camel.apache.org/v1alpha1 +kind: KameletBinding +metadata: + name: beer-event-source +spec: + source: + ref: + kind: Kamelet + apiVersion: camel.apache.org/v1alpha1 + name: beer-source + properties: + period: 5000 + sink: + ref: + kind: KafkaTopic + apiVersion: kafka.strimzi.io/v1beta1 + name: beer-events +``` + +== Using Kamel CLI + +Camel K works very well with any Kubernetes compatible user interface (such as CLI as `kubectl`, `oc` or any other visual tooling). However we do provide a simple CLI that helps you performing most of the Pipe works in an easier fashion: it's xref:running/bind-cli.adoc[`kamel` CLI]. diff --git a/docs/modules/ROOT/pages/pipes/promoting.adoc b/docs/modules/ROOT/pages/pipes/promoting.adoc new file mode 100644 index 0000000000..3c2b7a5766 --- /dev/null +++ b/docs/modules/ROOT/pages/pipes/promoting.adoc @@ -0,0 +1,51 @@ += Promoting Pipes across environments + +As soon as you have an Pipes running in your cluster, you will be challenged to move that Pipe to an higher environment. Ie, you can test your Pipe in a **development** environment, and, as soon as you're happy with the result, you will need to move it into a **production** environment. + +== CLI `promote` command + +You may already be familiar with this command as seen when xref:running/promoting.adoc[promoting Integrations across environments]. The command is smart enough to detect when you want to promote a Pipe or an Integration and it works exactly in the same manner. + +NOTE: use dry run option (`-o yaml`) and export the result to any separated cluster or Git repository to perform a GitOps strategy. + +Let's run a simple Pipe to see it in action: + +```bash +kamel bind timer-source log-sink -p source.message="Hello Camel K" +... +binding "timer-source-to-log-sink" created +``` + +Once the Pipe Integration is running, we can `promote` the Pipe with `kamel promote timer-source-to-log-sink --to prod -o yaml`. We get the following result: + +```yaml +apiVersion: camel.apache.org/v1 +kind: Pipe +metadata: + annotations: + camel.apache.org/kamelet.icon: data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gU3ZnIFZlY3RvciBJY29ucyA6IGh0dHA6Ly93d3cub25saW5ld2ViZm9udHMuY29tL2ljb24gLS0... + trait.camel.apache.org/camel.runtime-version: 3.8.1 + trait.camel.apache.org/container.image: 10.100.107.57/camel-k/camel-k-kit-crbhu56n5tgc73cb1ts0@sha256:e3f66b61148e77ceda8531632847b455219300d95c9e640f4924b7e69419c2b9 + trait.camel.apache.org/jvm.classpath: dependencies/*:dependencies/app/*:dependencies/lib/boot/*:dependencies/lib/main/*:dependencies/quarkus/* + creationTimestamp: null + name: timer-source-to-log-sink + namespace: prod +spec: + sink: + ref: + apiVersion: camel.apache.org/v1 + kind: Kamelet + name: log-sink + namespace: prod + source: + properties: + message: Hello Camel K + ref: + apiVersion: camel.apache.org/v1 + kind: Kamelet + name: timer-source + namespace: prod +status: {} +``` + +As you may already have seen with the Integration example, also here the Pipe is reusing the very same container image. From a release perspective we are guaranteeing the **immutability** of the Pipe as the container used is exactly the same of the one we have tested in development (what we change are just the configurations, if any). diff --git a/docs/modules/ROOT/pages/running/dev-mode.adoc b/docs/modules/ROOT/pages/running/dev-mode.adoc deleted file mode 100644 index fc6f32af59..0000000000 --- a/docs/modules/ROOT/pages/running/dev-mode.adoc +++ /dev/null @@ -1,53 +0,0 @@ -[[dev-mode]] -= Running an Integration in Dev Mode - -Camel K provides a specific flag for quickly iterating on integrations during development and have fast feedbacks on the code you're writing. -It's called *dev mode*. - -Differently from other frameworks, artifacts generated by Camel K in *dev mode are no different* from the one you run in production. -Dev mode is just a helper to let you be quicker during development. - -To enable dev mode, just add the `--dev` flag when running the integration: - -``` -kamel run examples/languages/Sample.java --dev -``` - -The `--dev` flag deploys immediately the integration and shows the integration logs in the console. You can then change the code and see -the **changes automatically applied (instantly)** to the remote integration pod. - -The console follows automatically all redeploys of the integration. - -Here's an example of the output: - -``` -$ kamel run examples/languages/Sample.java --dev -integration "sample" created -integration "sample" in phase Initialization -integration "sample" in phase Building Kit -integration "sample" in phase Deploying -integration "sample" in phase Running -[1] Monitoring pod sample-56fb69c989-42gmw[1] Starting the Java application using /opt/run-java/run-java.sh ... -[1] exec java -XX:+UseParallelGC -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90 -XX:MinHeapFreeRatio=20 -XX:MaxHeapFreeRatio=40 -XX:+ExitOnOutOfMemoryError -cp ./resources:/etc/camel/conf:/etc/camel/resources:/etc/camel/sources/i-source-000:dependencies/com.sun.xml.bind.jaxb-core-2.3.0.1.jar:dependencies/com.sun.xml.bind.jaxb-impl-2.3.0.jar:dependencies/commons-io.commons-io-2.6.jar:dependencies/javax.xml.bind.jaxb-api-2.3.0.jar:dependencies/org.apache.camel.camel-api-3.0.0.jar:dependencies/org.apache.camel.camel-base-3.0.0.jar:dependencies/org.apache.camel.camel-core-engine-3.0.0.jar:dependencies/org.apache.camel.camel-endpointdsl-3.0.0.jar:dependencies/org.apache.camel.camel-jaxp-3.0.0.jar:dependencies/org.apache.camel.camel-main-3.0.0.jar:dependencies/org.apache.camel.camel-management-api-3.0.0.jar:dependencies/org.apache.camel.camel-support-3.0.0.jar:dependencies/org.apache.camel.camel-timer-3.0.0.jar:dependencies/org.apache.camel.camel-util-3.0.0.jar:dependencies/org.apache.camel.camel-util-json-3.0.0.jar:dependencies/org.apache.camel.k.camel-k-loader-java-1.0.8.jar:dependencies/org.apache.camel.k.camel-k-runtime-core-1.0.8.jar:dependencies/org.apache.camel.k.camel-k-runtime-main-1.0.8.jar:dependencies/org.apache.camel.spi-annotations-3.0.0.jar:dependencies/org.apache.commons.commons-lang3-3.9.jar:dependencies/org.apache.logging.log4j.log4j-api-2.12.1.jar:dependencies/org.apache.logging.log4j.log4j-core-2.12.1.jar:dependencies/org.apache.logging.log4j.log4j-slf4j-impl-2.12.1.jar:dependencies/org.jooq.joor-java-8-0.9.12.jar:dependencies/org.slf4j.slf4j-api-1.7.29.jar org.apache.camel.k.main.Application -[1] OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N -[1] 2019-12-16 11:33:42.916 INFO [main] ApplicationRuntime - Add listener: org.apache.camel.k.listener.ContextConfigurer@12b0404f -[1] 2019-12-16 11:33:42.919 INFO [main] ApplicationRuntime - Add listener: org.apache.camel.k.listener.RoutesConfigurer@4313f5bc -[1] 2019-12-16 11:33:42.920 INFO [main] ApplicationRuntime - Add listener: org.apache.camel.k.listener.RoutesDumper@2b6faea6 -[1] 2019-12-16 11:33:42.934 INFO [main] RuntimeSupport - Looking up loader for language: java -[1] 2019-12-16 11:33:42.939 INFO [main] RuntimeSupport - Found loader org.apache.camel.k.loader.java.JavaSourceLoader@3c419631 for language java from service definition -[1] 2019-12-16 11:33:43.644 INFO [main] RoutesConfigurer - Loading routes from: file:/etc/camel/sources/i-source-000/Sample.java?language=java -[1] 2019-12-16 11:33:43.644 INFO [main] ApplicationRuntime - Listener org.apache.camel.k.listener.RoutesConfigurer@4313f5bc executed in phase ConfigureRoutes -[1] 2019-12-16 11:33:43.658 INFO [main] BaseMainSupport - Using properties from: file:/etc/camel/conf/application.properties -[1] 2019-12-16 11:33:43.917 INFO [main] ApplicationRuntime - Listener org.apache.camel.k.listener.ContextConfigurer@12b0404f executed in phase ConfigureContext -[1] 2019-12-16 11:33:43.918 INFO [main] DefaultCamelContext - Apache Camel 3.0.0 (CamelContext: camel-k) is starting -[1] 2019-12-16 11:33:43.919 INFO [main] DefaultManagementStrategy - JMX is disabled -[1] 2019-12-16 11:33:44.099 INFO [main] DefaultCamelContext - StreamCaching is not in use. If using streams then its recommended to enable stream caching. See more details at http://camel.apache.org/stream-caching.html -[1] 2019-12-16 11:33:44.109 INFO [main] DefaultCamelContext - Route: route1 started and consuming from: timer://tick -[1] 2019-12-16 11:33:44.116 INFO [main] DefaultCamelContext - Total 1 routes, of which 1 are started -[1] 2019-12-16 11:33:44.119 INFO [main] DefaultCamelContext - Apache Camel 3.0.0 (CamelContext: camel-k) started in 0.199 seconds -[1] 2019-12-16 11:33:44.123 INFO [main] ApplicationRuntime - Listener org.apache.camel.k.listener.RoutesDumper@2b6faea6 executed in phase Started -[1] 2019-12-16 11:33:45.127 INFO [Camel (camel-k) thread #1 - timer://tick] route1 - Hello Camel K! - -``` - -You can write your own integration from scratch or start from one of the examples available in the https://github.com/apache/camel-k/releases[release page]. diff --git a/docs/modules/ROOT/pages/running/dry-run.adoc b/docs/modules/ROOT/pages/running/dry-run.adoc deleted file mode 100644 index ac25c11a07..0000000000 --- a/docs/modules/ROOT/pages/running/dry-run.adoc +++ /dev/null @@ -1,90 +0,0 @@ -= Dry Run an Integration - -The CLI is a very powerful facility which will do a lot of heavy lift for you, transforming a Camel route into an Integration specification which will be watched and reconciled by the operator. However, sometimes you don't want to apply the result of an execution on the cluster. You may want only to check how the route is transformed or you want to run the conversion and apply the result later. - -*Dry Run* is the mode you can find on several commands, above all on `kamel run` and `promote`. If you have familiarity with Kubernetes, you will see we use the same approach used by `kubectl`, exposing a `-o` parameter which accepts either `yaml` or `json`. - -The presence of this feature will let you simplify any deployment strategy (including GitOps) as you can just get the result of the Integration which will be eventually executed by the Camel K Operator. - -NOTE: we make use of `stderr` for many CLI warning and this is automatically redirected to `stdout` to show immediately the result of any error to the user. If you're running any automation, make sure to redirect the `stderr` to any channel to avoid altering the result of the dry run, Ie `kamel run /tmp/Test.java -o yaml 2>/dev/null`. - -[[run]] -== Run subcommand - -As an example, take the option available on the `kamel run test.yaml -o yaml -t prometheus.enabled=true` command: -```yaml -apiVersion: camel.apache.org/v1 -kind: Integration -metadata: - annotations: - camel.apache.org/operator.id: camel-k - creationTimestamp: null - name: test -spec: - flows: - - from: - parameters: - period: "1000" - steps: - - setBody: - constant: Hello Camel from yaml - - log: ${body} - uri: timer:yaml - traits: - prometheus: - enabled: true -status: {} -``` -This can be saved for future processing (ie, stored to a GIT repository and later deployed to a cluster via some GitOps deployment strategy). Consider that any **modeline** option will be translated accordingly. - -[[bind]] -== Bind subcommand -The same option can be used for `kamel bind timer:foo log:bar -t camel.runtime-version=1.17.0 -o yaml`: -```yaml -apiVersion: camel.apache.org/v1 -kind: Pipe -metadata: - annotations: - camel.apache.org/operator.id: camel-k - creationTimestamp: null - name: timer-to-log -spec: - integration: - traits: - camel: - runtimeVersion: 1.17.0 - sink: - uri: log:bar - source: - uri: timer:foo -status: {} -``` - -[[promote]] -== Promote subcommand -This one is very interesting as will give you the possibility to see how a running Integration can be promoted to a different environment (without being actually executed). For example `kamel promote test -o yaml --to prod-ns` will return: -```yaml -apiVersion: camel.apache.org/v1 -kind: Integration -metadata: - creationTimestamp: null - name: test - namespace: prod-ns -spec: - flows: - - from: - parameters: - period: "1000" - steps: - - setBody: - constant: Hello Camel from yaml - - log: ${body} - uri: timer:yaml - traits: - camel: - runtimeVersion: 1.17.0 - container: - image: 10.110.251.124/default/camel-k-kit-cjquhq90gomc73bb5fkg@sha256:448b882537accac6a815404fbf2da3d52f3e2982756caf3adac16b824a1097b1 -status: {} -``` -We can see it specify the image that was already used by the previous execution (which likely served as a test). diff --git a/docs/modules/ROOT/pages/running/knative-sink.adoc b/docs/modules/ROOT/pages/running/knative-sink.adoc deleted file mode 100644 index 8c7d2aef1b..0000000000 --- a/docs/modules/ROOT/pages/running/knative-sink.adoc +++ /dev/null @@ -1,48 +0,0 @@ -[[knative-sinks]] -= Knative Sinks - -A https://knative.dev/docs/eventing/sinks[Knative Sink] can be referenced in a producer path of the Camel Knative component, e.g.: - -[source,java] ----- -from('timer:tick') - .setBody().constant('event') - .to('knative:event/broker') ----- - -Or as the value of the `sink` field in a `Pipe` resource, e.g.: - -[source,yaml] ----- -apiVersion: camel.apache.org/v1 -kind: Pipe -metadata: - name: timer-source-binding -spec: - source: - ref: - kind: Kamelet - apiVersion: camel.apache.org/v1 - name: timer-source - properties: - message: Event - sink: - ref: - kind: Broker - apiVersion: eventing.knative.dev/v1 - name: broker - properties: - type: type ----- - -In the above examples, the Knative Sink is a https://knative.dev/docs/eventing/broker/[Knative Broker]. -However, a Knative Sink can be any Kubernetes resource that's _addressable_, i.e., whose URL can be retrieved by reading its `status.address.url`. - -This is what the Camel K operator does to resolve the Sink URLs, so that events can be sent to. -That requires the ServiceAccount that runs the operator to be granted permission to GET these resources, while it configures the integration runtime. - -As the set of resources is arbitrary, and cannot be known _a-priori_, these permissions are automatically granted to `camel-k-operator` ServiceAccount, by relying on the Knative Addressable resolver aggregated ClusterRole. - -For the above examples, that sink into an `eventing.knative.dev/broker` resource, this is already being configured by Knative, that aggregates the required permissions for its own resources by default. - -However, if you integrate custom resources as sinks, the required permissions must be added manually, as documented in the https://knative.dev/docs/eventing/sinks/#using-custom-resources-as-sinks[Knative documentation]. diff --git a/docs/modules/ROOT/pages/running/promoting.adoc b/docs/modules/ROOT/pages/running/promoting.adoc index 308c87ae36..b3784b1e10 100644 --- a/docs/modules/ROOT/pages/running/promoting.adoc +++ b/docs/modules/ROOT/pages/running/promoting.adoc @@ -1,5 +1,5 @@ [[promoting-integration]] -= Promoting across environments += Promoting Integrations across environments As soon as you have an Integration running in your cluster, you will be challenged to move that Integration to an higher environment. Ie, you can test your Integration in a **development** environment, and, as soon as you're happy with the result, you will need to move it into a **production** environment. @@ -51,4 +51,4 @@ hello, I am production! ``` Something nice is that since the Integration is reusing the very same container image, the execution of the new application will be immediate. Also from a release perspective we are guaranteeing the **immutability** of the Integration as the container used is exactly the same of the one we have tested in development (what we change are just the configurations). -Please notice that the Integration running in test is not altered in any way and will be running until any user will stop it. \ No newline at end of file +Please notice that the Integration running in test is not altered in any way and will be running until any user will stop it. diff --git a/docs/modules/ROOT/pages/running/run-from-github.adoc b/docs/modules/ROOT/pages/running/run-from-github.adoc deleted file mode 100644 index f9427bf17a..0000000000 --- a/docs/modules/ROOT/pages/running/run-from-github.adoc +++ /dev/null @@ -1,64 +0,0 @@ -[[run-from-github]] -= Run an Integration from GitHub - -It is possible to run integrations from a GitHub repository or Gist with dedicated URL syntax: - -== Integration code stored in a Github repository - -.Syntax -[source] ----- -kamel run github:$user/$repo/$path?branch=$branch ----- - -As example, running the following command - - -[source] ----- -kamel run github:apache/camel-k-examples/generic-examples/languages/Sample.java ----- - -is equivalent to: - -[source] ----- -kamel run https://raw.githubusercontent.com/apache/camel-k-examples/main/generic-examples/languages/Sample.java ----- - -but does not require to type the full GitHub RAW URL. - -Declaring the branch query param is not required and defaults to `master` if not explicit set. - -== Integration code stored in a Gist - -.Syntax -[source] ----- -kamel run https://gist.github.com/${user-id}/${gist-id} -kamel run gist:${gist-id} ----- - -Camel k will add any file that is part of the Gist as a source. - -As example, assuming there are two files listed as part of a Gist, beans.yaml and routes.yaml, then the following command - - -[source] ----- -kamel run gist:${gist-id} ----- - -is equivalent to: - -[source] ----- -kamel run \ - https://gist.githubusercontent.com/${user-id}/${gist-id}/raw/${...}/beans.yaml \ - https://gist.githubusercontent.com/${user-id}/${gist-id}/raw/${...}/routes.yaml ----- - -[NOTE] -==== -GitHub applies rate limiting to its APIs and as Authenticated requests get a higher rate limit, the camel-k cli honour the env var GITHUB_TOKEN and if it is found, then it is used for GitHub authentication. -==== \ No newline at end of file diff --git a/docs/modules/ROOT/pages/running/running-cli.adoc b/docs/modules/ROOT/pages/running/running-cli.adoc new file mode 100644 index 0000000000..10a5d6d2f8 --- /dev/null +++ b/docs/modules/ROOT/pages/running/running-cli.adoc @@ -0,0 +1,264 @@ += Camel K CLI: kamel + +In the previous section you have learnt xref:running/running.adoc[how to run an Integration custom resource]. In this page you will learn a simple utility we release beside the operator, the `kamel` CLI. + +Releases of the Camel K CLI are available on: + +- Apache Mirrors (official): https://downloads.apache.org/camel/camel-k/ +- Github Releases: https://github.com/apache/camel-k/releases +- Homebrew (Mac and Linux): https://formulae.brew.sh/formula/kamel + +So, pick yours, set in your operating system path and be ready to run some Camel K Integration. The `kamel` cli will reuse the same cluster configuration you have set for your cluster via `kubectl`, `oc` or any other tool. So, you can log in to the cluster via those CLI and then using `kamel` afterwards. + +Let's try some application by creating a file with the following content. + +[source,yaml] +.run-hello.yaml +---- +- from: + uri: "timer:tick?period=3000" + steps: + - setBody: + constant: "Hello world from Camel K" + - to: "log:info" +---- + +NOTE: you can also use Camel JBang and initialize any Camel DSL via `camel init run-hello.yaml` + +You can now run it on the cluster by executing: + +[source] +---- +kamel run run-hello.yaml +---- + +[[monitoring-integration]] +== Monitoring the application status + +Camel K integrations follow a lifecycle composed of several steps before getting into the `Running` state. You can check the status of all integrations by executing the following command: + +``` +kamel get +``` + +[[logging-integration]] +== Log the standard output + +Once the application is running you can check the content of the Pods log by executing: + +``` +kamel logs hello +``` + +NOTE: if the above example failed, have a look at xref:troubleshooting/troubleshooting.adoc[how to troubleshoot a Camel K Integration]. + +[[dev-mode-integration]] +== Running in dev mode + +Camel K provides a specific flag for quickly iterating on integrations during development and have fast feedbacks on the code you're writing. It's called *dev mode*. Differently from other frameworks, artifacts generated by Camel K in dev mode are no different from the one you run in production. Dev mode is just a helper to let you be quicker during development. To enable dev mode, just add the `--dev` flag when running the integration: + +``` +kamel run examples/languages/Sample.java --dev +``` + +The `--dev` flag deploys immediately the Integration and shows the Integration logs in the console. You can then change the code and see the **changes automatically applied (instantly)** to the remote Integration Pod. The console follows automatically all redeploys of the integration. + +Here's an example of the output: + +``` +$ kamel run examples/languages/Sample.java --dev +integration "sample" created +integration "sample" in phase Initialization +integration "sample" in phase Building Kit +integration "sample" in phase Deploying +integration "sample" in phase Running +... +[1] 2019-12-16 11:33:43.918 INFO [main] DefaultCamelContext - Apache Camel 3.0.0 (CamelContext: camel-k) is starting +[1] 2019-12-16 11:33:43.919 INFO [main] DefaultManagementStrategy - JMX is disabled +[1] 2019-12-16 11:33:44.099 INFO [main] DefaultCamelContext - StreamCaching is not in use. If using streams then its recommended to enable stream caching. See more details at http://camel.apache.org/stream-caching.html +[1] 2019-12-16 11:33:44.109 INFO [main] DefaultCamelContext - Route: route1 started and consuming from: timer://tick +[1] 2019-12-16 11:33:44.116 INFO [main] DefaultCamelContext - Total 1 routes, of which 1 are started +[1] 2019-12-16 11:33:44.119 INFO [main] DefaultCamelContext - Apache Camel 3.0.0 (CamelContext: camel-k) started in 0.199 seconds +[1] 2019-12-16 11:33:44.123 INFO [main] ApplicationRuntime - Listener org.apache.camel.k.listener.RoutesDumper@2b6faea6 executed in phase Started +[1] 2019-12-16 11:33:45.127 INFO [Camel (camel-k) thread #1 - timer://tick] route1 - Hello Camel K! +... +``` +[[dry-run]] +== Dry Run + +The CLI is a simple yet powerful facility which will do a lot of heavy lift for you, transforming a Camel route into an Integration specification which will be watched and reconciled by the operator. However, sometimes you don't want to apply the result of an execution on the cluster. You may want only to check how the route is transformed or you want to run the conversion and apply the result later. + +*Dry Run* is the mode you can find on `kamel run`. If you have familiarity with Kubernetes, you will see we use the same approach used by `kubectl`, exposing a `-o` parameter which accepts either `yaml` or `json`. The presence of this feature will let you simplify any deployment strategy (including GitOps) as you can just get the result of the Integration which will be eventually executed by the Camel K Operator. + +NOTE: we make use of `stderr` for many CLI warning and this is automatically redirected to `stdout` to show immediately the result of any error to the user. If you're running any automation, make sure to redirect the `stderr` to any channel to avoid altering the result of the dry run, Ie `kamel run /tmp/Test.java -o yaml 2>/dev/null`. + +As an example, take the option available on the `kamel run test.yaml -t prometheus.enabled=true -o yaml` command: + +```yaml +apiVersion: camel.apache.org/v1 +kind: Integration +metadata: + annotations: + camel.apache.org/operator.id: camel-k + creationTimestamp: null + name: test +spec: + flows: + - from: + parameters: + period: "1000" + steps: + - setBody: + constant: Hello Camel from yaml + - log: ${body} + uri: timer:yaml + traits: + prometheus: + enabled: true +status: {} +``` +This can be saved for future processing (ie, stored to a GIT repository and later deployed to a cluster via some GitOps deployment strategy). Consider that any **modeline** option will be translated accordingly. + +[[modeline]] +== Camel K Modeline + +Integration files can contain **modeline** hooks that allow to customize the way integrations are executed via command line. For example: + +.Hello.java +[source,java] +---- +// camel-k: dependency=mvn:org.my:application:1.0 // <1> + +import org.apache.camel.builder.RouteBuilder; + +public class Hello extends RouteBuilder { + @Override + public void configure() throws Exception { + from("timer:java?period=1000") + .bean(org.my.BusinessLogic) // <2> + .log("${body}"); + } +} +---- +<1> Modeline import of Maven library +<2> Usage of a business logic class from the external library + +When the integration code above is executed using the `kamel run` CLI command, the modeline options declared in the file are appended to the list of arguments that are passed to the command. + +The `kamel` CLI will alert you, printing the full command in the shell: + +[source,console] +---- +$ kamel run Hello.java +Modeline options have been loaded from source files +Full command: kamel run Hello.java --dependency mvn:org.my:application:1.0 +---- + +Multiple options can be specified for an integration. For example, the following modeline options enables 3scale and limits the integration container memory: + +.ThreeScaleRest.java +[source,java] +---- +// camel-k: trait=3scale.enabled=true trait=container.limit-memory=256Mi // <1> + +import org.apache.camel.builder.RouteBuilder; + +public class ThreeScaleRest extends RouteBuilder { + + @Override + public void configure() throws Exception { + rest().get("/") + .route() + .setBody().constant("Hello"); + } +} +---- +<1> Enables both the _container_ and _3scale_ traits, to expose the route via 3scale and limit the container memory. + +All options that are available for the `kamel run` command can be specified as modeline options. The following is a partial list of useful options: + +.Useful Modeline Options +[cols="1m,2v"] +|=== +|Option | Description + +|build-property +|Add a build time property or properties file (syntax: _[my-key=my-value\|file:/path/to/my-conf.properties]_ + +|config +|Add a runtime configuration from a Configmap, Secret or file (syntax: _[configmap\|secret\|file]:name[/key]_, where name represents the local file path or the configmap/secret name and key optionally represents the configmap/secret key to be filtered) + +|dependency +|An external library that should be included, e.g. for Maven dependencies `dependency=mvn:org.my:app:1.0` + +|env +|Set an environment variable in the integration container, e.g. `env=MY_VAR=my-value` + +|label +|Add a label to the integration pod, e.g., `label=my.company=hello` + +|name +|The integration name + +|open-api +|Add an OpenAPI v2 spec (file path) + +|profile +|Trait profile used for deployment + +|property +|Add a runtime property or properties file (syntax: _[my-key=my-value\|file:/path/to/my-conf.properties]_) + +|resource +|Add a runtime resource from a Configmap, Secret or file (syntax: _[configmap\|secret\|file]:name[/key][@path]_, where name represents the local file path or the configmap/secret name, key optionally represents the configmap/secret key to be filtered and path represents the destination path) + +|trait +|Configure a trait, e.g. `trait=service.enabled=false` + +|=== + +== Run an Integration from the Internet + +The `kamel` cli will allow you to run any application available on the Internet. Just run `kamel run https://path/to/route.yaml` and the CLI will take care to recover the route remotely. It is also possible to run Integrations from a GitHub repository or Gist with dedicated URL syntax: + +.Syntax +[source] +---- +kamel run github:$user/$repo/$path?branch=$branch +---- + +As example, running the following command + +[source] +---- +kamel run github:apache/camel-k-examples/generic-examples/languages/Sample.java +---- + +Declaring the branch query param is not required and defaults to `master` if not explicit set. + +Similar approach is used for the Gists: + +.Syntax +[source] +---- +kamel run https://gist.github.com/${user-id}/${gist-id} +kamel run gist:${gist-id} +---- + +Camel k will add any file that is part of the Gist as a source. As example, assuming there are two files listed as part of a Gist, beans.yaml and routes.yaml, then the following command: + +[source] +---- +kamel run gist:${gist-id} +---- + +is equivalent to: + +[source] +---- +kamel run \ + https://gist.githubusercontent.com/${user-id}/${gist-id}/raw/${...}/beans.yaml \ + https://gist.githubusercontent.com/${user-id}/${gist-id}/raw/${...}/routes.yaml +---- + +NOTE: GitHub applies rate limiting to its APIs and as Authenticated requests get a higher rate limit, the `kamel` honour the env var GITHUB_TOKEN and if it is found, then it is used for GitHub authentication. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/running/running.adoc b/docs/modules/ROOT/pages/running/running.adoc index 5980c2ee3a..fea5e083bf 100644 --- a/docs/modules/ROOT/pages/running/running.adoc +++ b/docs/modules/ROOT/pages/running/running.adoc @@ -1,120 +1,97 @@ [[running-integration]] = Running an Integration -After completing the xref:installation/installation.adoc[installation] you should be connected to a Kubernetes/OpenShift cluster -and have the `kamel` CLI correctly configured. +After completing the xref:installation/installation.adoc[installation] you should be able to run an Integration containing a Camel DSL application. The most basic configuration required is to wrap a Camel route into the Integration custom resource. Let's start with a java DSL Camel application: -Ensure you're connected to the cluster by executing a simple command using the Kubernetes CLI: - -[source] ----- -kubectl get pod ----- - -Just replace `kubectl` with `oc` if you're using OpenShift. If everything is correctly configured you should get a response from the Kubernetes API -server (you should see at least the `camel-k-operator` running). - -You are now ready to create your first integration using Camel K. Just create a new Yaml file with the following content: - -[source,yaml] -.run-hello.yaml ----- -- from: - uri: "timer:tick?period=3000" - steps: - - setBody: - constant: "Hello world from Camel K" - - to: "log:info" ----- - -You can run it on the cluster by executing: - -[source] ----- -kamel run run-hello.yaml ----- - -Integrations can be written in any supported Camel DSL. We are collecting examples in our https://github.com/apache/camel-k/[Camel K GitHub repository]. - -[[monitoring-integration]] -== Monitoring the application status +```yaml +apiVersion: camel.apache.org/v1 +kind: Integration +metadata: + name: my-integration +spec: + sources: + - content: | + import org.apache.camel.builder.RouteBuilder; + + public class Test extends RouteBuilder { + @Override + public void configure() throws Exception { + from("timer:java?period=1000") + .setBody() + .simple("Hello Camel from ${routeId}") + .log("${body}"); + } + } + name: Test.java +``` -Camel K integrations follow a lifecycle composed of several steps before getting into the `Running` state. +Save the above as `my-integration.yaml` and apply with `kubectl apply -f my-integration.yaml`. Once the resource is stored in the cloud, the operator will take care to build, deploy and run the Camel application for you. -You can check the status of all integrations by executing the following command: +You can monitor the Integration with `kubectl get it -w`: -``` -kamel get +```bash +NAME PHASE READY RUNTIME PROVIDER RUNTIME VERSION CATALOG VERSION KIT REPLICAS +my-integration Building Kit quarkus 3.8.1 3.8.1 kit-crbekb6n5tgc73cb1tkg +my-integration Deploying quarkus 3.8.1 3.8.1 kit-crbekb6n5tgc73cb1tkg +my-integration Running False quarkus 3.8.1 3.8.1 kit-crbekb6n5tgc73cb1tkg 0 +my-integration Running False quarkus 3.8.1 3.8.1 kit-crbekb6n5tgc73cb1tkg 1 +my-integration Running False quarkus 3.8.1 3.8.1 kit-crbekb6n5tgc73cb1tkg 1 +my-integration Running True quarkus 3.8.1 3.8.1 kit-crbekb6n5tgc73cb1tkg 1 ``` -[[logging-integration]] -== Log the standard output +NOTE: we're using the shortname `it` which can be used instead longer `integration` -Camel K integrations follow a lifecycle composed of several steps before getting into the `Running` state. +The first time you run the application it may takes some minute as it needs to download dependencies and build the base images that will be reused later. You may notice certain parameters like the phase and the readiness of the application. You can also see the runtime information and the number or replicas running for this application. You will learn more about IntegrationKit and other aspects in the other sections of the documentations. -You can check the status of all integrations by executing the following command: +You can now check the Pods which are running the application and log to see what's going on: ``` -kamel logs hello +kubectl get pods +NAME READY STATUS RESTARTS AGE +my-integration-7d86444646-62497 1/1 Running 0 118s + +kubectl logs my-integration-7d86444646-62497 +2024-09-03 10:53:04,370 INFO [org.apa.cam.k.Runtime] (main) Apache Camel K Runtime 3.8.1 +2024-09-03 10:53:04,378 INFO [org.apa.cam.qua.cor.CamelBootstrapRecorder] (main) Bootstrap runtime: org.apache.camel.quarkus.main.CamelMainRuntime +2024-09-03 10:53:04,389 INFO [org.apa.cam.mai.MainSupport] (main) Apache Camel (Main) 4.4.1 is starting +2024-09-03 10:53:04,973 INFO [org.apa.cam.k.sup.SourcesSupport] (main) Loading routes from: SourceDefinition{name='Test', language='java', type='source', location='file:/etc/camel/sources/Test.java', } +2024-09-03 10:53:15,275 INFO [org.apa.cam.imp.eng.AbstractCamelContext] (main) Apache Camel 4.4.1 (camel-1) is starting +2024-09-03 10:53:15,471 INFO [org.apa.cam.imp.eng.AbstractCamelContext] (main) Routes startup (started:1) +2024-09-03 10:53:15,471 INFO [org.apa.cam.imp.eng.AbstractCamelContext] (main) Started route1 (timer://java) +2024-09-03 10:53:15,472 INFO [org.apa.cam.imp.eng.AbstractCamelContext] (main) Apache Camel 4.4.1 (camel-1) started in 194ms (build:0ms init:0ms start:194ms) +2024-09-03 10:53:15,479 INFO [io.quarkus] (main) camel-k-integration 2.5.0-SNAPSHOT on JVM (powered by Quarkus 3.8.3) started in 30.798s. +2024-09-03 10:53:15,480 INFO [io.quarkus] (main) Profile prod activated. +2024-09-03 10:53:15,480 INFO [io.quarkus] (main) Installed features: [camel-bean, camel-core, camel-java-joor-dsl, camel-k-core, camel-k-runtime, camel-kubernetes, camel-timer, cdi, kubernetes-client, smallrye-context-propagation, vertx] +2024-09-03 10:53:16,478 INFO [route1] (Camel (camel-1) thread #1 - timer://java) Hello Camel from route1 +2024-09-03 10:53:17,470 INFO [route1] (Camel (camel-1) thread #1 - timer://java) Hello Camel from route1 +... ``` -[[dev-mode-integration]] -== Running in dev mode +NOTE: if the above example failed, have a look at xref:troubleshooting/troubleshooting.adoc[how to troubleshoot a Camel K Integration]. -Camel K provide a very nice **dev mode** feature that will allow you to apply any change to your Integration code reactively. Check out the xref:running/dev-mode.adoc[Camel K dev mode] +== YAML DSL -[[running-model]] -== Camel K development model +In the example above we've seen how to run a Java DSL Camel application. You can use any other xref:languages/languages.adoc[Camel DSL compatible] in the same way. However, the Yaml DSL is a bit peculiar as it is a first class citizen for Camel K. Since the Integration is already a Yaml specification, then, Camel K provides a first class mechanism to embed the Yaml DSL in the Integration `.spec.flows`: -The idea of Camel K is to simplify the deployment of your Integration to the cloud. For this reason, there are certain limitations you need to take into account, depending on the Camel DSL you're using. For instance, taking Java DSL as reference, you are generally requested to maintain your route development within a single class. As soon as your development gain complexity, the suggestion is to keep the business logic in an external dependency and use such dependency from your class containing the route definition. See more about xref:configuration/dependencies.adoc[how to use a dependency]. - -[[running-integration-dsl]] -== Running locally - -During development, for most of the cases, if you are not using Camel K traits or specific Camel Quarkus configuration, you can use Camel JBang to run the Integration. This is a quick way to start testing your Integration, adding only at a later stage of the developments any configuration related to the cluster where you're going to run the Integration. See link:/blog/2022/11/camel-k-jbang/[how to test Camel K with Camel JBang] blog. - -[[no-cli-integration]] -== Running without CLI - -You can run your integration also if you have no CLI available. `kamel` CLI manages a lot of fancy features but you can create an xref:apis/camel-k.adoc#_camel_apache_org_v1_Integration[Integration Custom Resource] with all the configuration expected to run your application. - -As an example, let's get the result of a xref:running/dry-run.adoc[Dry Run] execution for a sample route: - -``` -kamel run Sample.java -o yaml -``` - -It will return the expected Integration custom resource (you can type it manually according to the specification linked above): - -``` +```yaml apiVersion: camel.apache.org/v1 kind: Integration metadata: - creationTimestamp: null - name: my-integration - namespace: default + name: my-it spec: - sources: - - content: " - import org.apache.camel.builder.RouteBuilder; - public class Sample extends RouteBuilder { - @Override - public void configure() - throws Exception { - from(\"timer:tick\") - .log(\"Hello Integration!\"); - } - }" - name: Sample.java -status: {} + flows: + - from: + parameters: + period: "1000" + steps: + - setBody: + simple: Hello Camel from ${routeId} + - log: ${body} + uri: timer:yaml ``` -We can save this custom resource in a yaml file, ie, `my-integration.yaml`. Once done, we can run the integration storing the Integration custom resource, via `kubectl`, UI, API call or whichever mean we have to call our Kubernetes cluster. In our example, we'll do this through `kubectl` CLI: +You can see the specification is a lot neater, so, try choosing Yaml DSL whenever it's possible. -``` -kubectl apply -f my-integration.yaml -... -integration.camel.apache.org/my-integration created -``` +== Using Kamel CLI -The operator will now take care to run the Integration accordingly. +Camel K works very well with any Kubernetes compatible user interface (such as CLI as `kubectl`, `oc` or any other visual tooling). However we do provide a simple CLI that helps you performing most of the Integration works in an easier fashion: it's xref:running/running-cli.adoc[`kamel` CLI]. diff --git a/docs/modules/ROOT/pages/running/camel-runtimes.adoc b/docs/modules/ROOT/pages/running/self-managed.adoc similarity index 71% rename from docs/modules/ROOT/pages/running/camel-runtimes.adoc rename to docs/modules/ROOT/pages/running/self-managed.adoc index ebe5834729..841c8ed3bc 100644 --- a/docs/modules/ROOT/pages/running/camel-runtimes.adoc +++ b/docs/modules/ROOT/pages/running/self-managed.adoc @@ -1,6 +1,6 @@ -= Camel Runtimes (aka "sourceless" Integrations) += Self Managed Build Integrations -Camel K operator is traditionally in charge to perform a build from a Camel DSL source. The resulting Integration depends directly on an IntegrationKit, which is a reusable custom resource backing the final container image that your application will run. In the last versions, the only runtime the operator can build is Camel Quarkus (via Camel K Runtime project). +Camel K operator is traditionally in charge to perform a build from a Camel DSL source. The resulting Integration depends directly on an xref:architecture/cr/integration-kit.adoc[IntegrationKit], which is a reusable custom resource backing the final container image that your application will run. The only runtime the operator can build is Camel Quarkus (via Camel K Runtime project). However Camel K can run any runtime available in Apache Camel. This is possible only when the Camel application was previously built and packaged into a container image externally. Mind that if you run through this option, some of the features offered by the operator may not be available. For example, you won't be able to discover Camel capabilities because the source is not available to the operator but embedded in the container image. @@ -24,9 +24,9 @@ If all is good, in a few seconds (there is no build involved) you should have yo [[camel-runtime-discovery]] == Camel Runtime version discovery -Every Camel application requires a `CamelCatalog` object to know how to perform certain runtime configuration. When you run a **sourceless Integrations** there is no easy way to automatically discover for which runtime your application was built. In this case, we suggest you to specify the `camel.runtime-version` trait in order to improve the compatibility between the operator configuration and the specific runtime you're running. If no runtime version is specified, then, as default, the operator will use the one specified in the IntegrationPlatform. +Every Camel application requires a `CamelCatalog` object to know how to perform certain runtime configuration. When you run a **self managed build Integrations** there is no easy way to automatically discover for which runtime your application was built. In this case, we suggest you to specify the `camel.runtime-version` trait in order to improve the compatibility between the operator configuration and the specific runtime you're running. If no runtime version is specified, then, as default, the operator will use the one specified in the IntegrationPlatform. [[traits]] == Trait configuration -Certain Camel K operational aspect may be driven by traits. When you're building the application outside the operator, some of those traits may not be executed as they are executed during the building phase that we are skipping when running **sourceless Integrations**. There is also no possible way to auto-tune certain traits that require the presence of the source. In this case, you should instead provide a trait configuration with the values that are required by your Integration (for example, Knative, Service and other deployment traits). +Certain Camel K operational aspect may be driven by traits. When you're building the application outside the operator, some of those traits may not be executed as they are executed during the building phase that we are skipping when running **self managed build Integrations**. There is also no possible way to auto-tune certain traits that require the presence of the source. In this case, you should instead provide a trait configuration with the values that are required by your Integration (for example, Knative, Service and other deployment traits). diff --git a/docs/modules/ROOT/pages/running/import.adoc b/docs/modules/ROOT/pages/running/synthetic.adoc similarity index 95% rename from docs/modules/ROOT/pages/running/import.adoc rename to docs/modules/ROOT/pages/running/synthetic.adoc index 9fa8099b8f..ba824194d8 100644 --- a/docs/modules/ROOT/pages/running/import.adoc +++ b/docs/modules/ROOT/pages/running/synthetic.adoc @@ -1,6 +1,8 @@ -= Importing existing Camel applications += Synthtetic Integrations -You may have already a Camel application running on your cluster. You may have created it via a manual deployment, a CICD or any other deployment mechanism you have in place. Since the Camel K operator is meant to operate any Camel application out there, then, you will be able to import it and monitor in a similar fashion of any other Camel K **managed Integration**. +You may have already a Camel application running on your cluster. You may have created it via a manual deployment, a CICD or any other deployment mechanism you have in place. Since the Camel K operator is meant to operate any Camel application out there, then, you will be able to import it and monitor in a similar fashion of any other Camel K **managed Integration**. These Integrations are known as **Synthtetic Integrations** + +== Importing existing Camel applications This feature is disabled by default. In order to enable it, you need to run the operator deployment with an environment variable, `CAMEL_K_SYNTHETIC_INTEGRATIONS`, set to `true`. diff --git a/docs/modules/ROOT/pages/troubleshooting/debugging.adoc b/docs/modules/ROOT/pages/troubleshooting/debugging.adoc index 26f0494b1c..24ffdd1833 100644 --- a/docs/modules/ROOT/pages/troubleshooting/debugging.adoc +++ b/docs/modules/ROOT/pages/troubleshooting/debugging.adoc @@ -1,8 +1,7 @@ [[debugging]] = Debugging Camel K Integrations -Sometimes an Integration can fail or behave unexpectedly for unknown reasons, and a developer needs to investigate the cause of such behavior. -Attaching a Java debugger to an integration is a common way to start the investigation. +Sometimes an Integration can fail or behave unexpectedly for unknown reasons, and a developer needs to investigate the cause of such behavior. Attaching a Java debugger to an integration is a common way to start the investigation. Even if the Integration Pods run on a Kubernetes cluster, it's very easy to attach a Java debugger to the remote Integration container using the CLI. @@ -19,8 +18,7 @@ Suppose you've started an Integration using the following command: $ kamel run examples/Sample.java ---- -An Integration named `sample` should be running in the cluster. -You can use the `kamel debug` command to put it in _debug_ mode: +An Integration named `sample` should be running in the cluster. You can use the `kamel debug` command to put it in _debug_ mode: [source,console] ---- diff --git a/docs/modules/ROOT/pages/troubleshooting/known-issues.adoc b/docs/modules/ROOT/pages/troubleshooting/known-issues.adoc deleted file mode 100644 index 413958af33..0000000000 --- a/docs/modules/ROOT/pages/troubleshooting/known-issues.adoc +++ /dev/null @@ -1,14 +0,0 @@ -[[known-issues]] -= Known Issues - -== `[Openshift] Repeated install/uninstall and removal of CamelCatalog leads to re-creation of builder image` - -Openshift's internal container image registry operates on image streams instead of directly on images. As a side effect in a non production usage it can lead to an increase of the container image storage. This is because the `uninstall` command will remove the CamelCatalog but can't remove the actual container image. - -In case you don't need any change in the CamelCatalog, the solutions is to use the following flag when uninstalling if you plan to install again after: - -[source,console] ----- -kamel uninstall --skip-camel-catalogs ----- - diff --git a/docs/modules/traits/pages/jvm.adoc b/docs/modules/traits/pages/jvm.adoc index 20f7b2c632..4a6086c4ab 100755 --- a/docs/modules/traits/pages/jvm.adoc +++ b/docs/modules/traits/pages/jvm.adoc @@ -66,7 +66,7 @@ Deprecated: no longer in use. The `jar` parameter is something the user should not worry about, unless that, for any reason, he wants to specify which is the executable dependency to use. Mind that, in order to do that, the base image used to build the container require a java binary executable from path (ie, `/usr/bin/java`). -This parameters enables also the possibility to use the trait when running a synthetic IntegrationKit (ie, "sourceless" Integrations). In such circumstances, the user can run a Camel application built externally and make use of the trait configuration as well as for example: +This parameters enables also the possibility to use the trait when running a self managed build Integrations. In such circumstances, the user can run a Camel application built externally and make use of the trait configuration as well as for example: [source,console] $ kamel run --image docker.io/squakez/my-camel-sb:1.0.0 -t jvm.jar=/deployments/my-camel-app.jar -t jvm.options=-Xmx1024M diff --git a/docs/modules/traits/pages/traits.adoc b/docs/modules/traits/pages/traits.adoc index cbf1d23631..8803acefb2 100644 --- a/docs/modules/traits/pages/traits.adoc +++ b/docs/modules/traits/pages/traits.adoc @@ -1,35 +1,27 @@ [[traits]] = Traits -Traits are high level named features of Camel K that can be enabled/disabled or configured to customize the -behavior of the final integration. +A Camel K trait is a feature which encapsulate a specific Kubernetes behavior. The traits are used to fine tune the building and deployment process of a Camel workload on the cloud. -Camel K provide sensible defaults for all such traits, taking into account the details of the target platform where -the integration is going to run into. However, it's possible for a **expert user** to configure them in -order to obtain a different behavior. +Camel K provide sensible defaults for all such traits, taking into account the details of the target platform where the integration is going to run into. However, it's possible for a **expert user** to configure them in order to obtain a different behavior. [[traits-configuration]] == Configuration -Each trait has a unique ID that can be used to configure it using the command line tool. - -E.g. in order to disable the creation of a Service for a integration, a user can execute: +Each trait has a unique ID that can be used to configure it using the CLI or changing the Integration `.spec.traits` parameter. For example, in order to disable the creation of a Service for an Integration, a user can execute: [source] ---- kamel run --trait service.enabled=false file.yaml ---- -The flag `--trait` can be also abbreviated with `-t`. +NOTE: the flag `--trait` can be also abbreviated with `-t`. -The `enabled` property is available on all traits and can be used to enable/disable them. All traits have their own -internal logic to determine if they need to be enabled when the user does not activate them explicitly. +The `enabled` property is available on all traits and can be used to enable/disable them. All traits have their own internal logic to determine if they need to be enabled when the user does not activate them explicitly. -Some traits share also a `auto` property that can be used to enable/disable auto-configuration of the trait based on the -environment. The auto-configuration mechanism is able to enable/disable the trait when the `enabled` property is not explicitly -set by the user and also change the trait configuration. +Some traits share also a `auto` property that can be used to enable/disable auto-configuration of the trait based on the environment. The auto-configuration mechanism is able to enable/disable the trait when the `enabled` property is not explicitly set by the user and also change the trait configuration. -NOTE: Some traits are applicable only to specific platforms (see the "profiles" in the trait description page). +NOTE: some traits are applicable only to specific platforms (see the "profiles" in the trait description page). A trait may have additional properties that can be configured by the end user. @@ -54,12 +46,11 @@ metadata: spec: traits: jvm: - configuration: - classpath: /path/to/my.jar + classpath: /path/to/my.jar ... ---- -The `.spec.traits` holds an array of traits, identified by their id (`jvm`, in this case). Then, the `.jvm.configuration.classpath` is the property we want to set. If you need to set a trait directly in the `Integration` spec, then, you should proceed in the way illustrated above. +The `.spec.traits` holds an array of traits, identified by their id (`jvm`, in this case). Then, the `.jvm.classpath` is the property we want to set. If you need to set a trait directly in the `Integration` spec, then, you should proceed in the way illustrated above. [[traits-list]] == List of available traits diff --git a/e2e/common/runtimes/runtimes_test.go b/e2e/common/runtimes/runtimes_test.go index a08550e9b7..14f60df9c4 100644 --- a/e2e/common/runtimes/runtimes_test.go +++ b/e2e/common/runtimes/runtimes_test.go @@ -34,7 +34,7 @@ import ( v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" ) -func TestSourceLessIntegrations(t *testing.T) { +func TestSelfManagedBuildIntegrations(t *testing.T) { t.Parallel() WithNewTestNamespace(t, func(ctx context.Context, g *WithT, ns string) { var cmData = make(map[string]string) diff --git a/pkg/cmd/run_test.go b/pkg/cmd/run_test.go index 0cfa30a925..9ca7457ffc 100644 --- a/pkg/cmd/run_test.go +++ b/pkg/cmd/run_test.go @@ -934,7 +934,7 @@ func TestRunOutputWithoutKubernetesCluster(t *testing.T) { require.NoError(t, err) } -func TestSourceLessIntegration(t *testing.T) { +func TestSelfManagedBuildIntegration(t *testing.T) { runCmdOptions, runCmd, _ := initializeRunCmdOptionsWithOutput(t) output, err := test.ExecuteCommand(runCmd, cmdRun, "--image", "docker.io/my-org/my-app:1.0.0", "-o", "yaml", "-t", "mount.configs=configmap:my-cm") assert.Equal(t, "yaml", runCmdOptions.OutputFormat) diff --git a/pkg/trait/jvm_test.go b/pkg/trait/jvm_test.go index 0eea06c2f7..8365c80df3 100644 --- a/pkg/trait/jvm_test.go +++ b/pkg/trait/jvm_test.go @@ -90,7 +90,7 @@ func TestConfigureJvmTraitInWrongJvmDisabled(t *testing.T) { assert.Equal(t, expectedCondition, condition) } -func TestConfigureJvmTraitExecutableSourcelessContainer(t *testing.T) { +func TestConfigureJvmTraitExecutableSelfManagedBuildContainer(t *testing.T) { trait, environment := createNominalJvmTest(v1.IntegrationKitTypePlatform) environment.Integration.Spec.Traits.Container = &traitv1.ContainerTrait{ Image: "my-image", @@ -105,7 +105,7 @@ func TestConfigureJvmTraitExecutableSourcelessContainer(t *testing.T) { ) } -func TestConfigureJvmTraitExecutableSourcelessContainerWithJar(t *testing.T) { +func TestConfigureJvmTraitExecutableSelfManagedBuildContainerWithJar(t *testing.T) { trait, environment := createNominalJvmTest(v1.IntegrationKitTypePlatform) environment.Integration.Spec.Traits.Container = &traitv1.ContainerTrait{ Image: "my-image", @@ -142,7 +142,7 @@ func TestConfigureJvmTraitExecutableSourcelessContainerWithJar(t *testing.T) { }, d.Spec.Template.Spec.Containers[0].Args) } -func TestConfigureJvmTraitExecutableSourcelessContainerWithJarAndOptions(t *testing.T) { +func TestConfigureJvmTraitExecutableSelfManagedBuildContainerWithJarAndOptions(t *testing.T) { trait, environment := createNominalJvmTest(v1.IntegrationKitTypePlatform) environment.Integration.Spec.Traits.Container = &traitv1.ContainerTrait{ Image: "my-image",