diff --git a/.envrc.example b/.envrc.example index e464c745..b2c34347 100644 --- a/.envrc.example +++ b/.envrc.example @@ -1,23 +1,16 @@ -export KUBECONFIG=$PWD/.mgt-cluster-kubeconfig.yaml +# direnv configuration — copy to .envrc and adjust +# The justfile and hack/ scripts also read from .env (dotenv-load) -export PATH=$PWD/hack/tools/bin:$PATH +# Cluster stack selection +export PROVIDER=openstack +export CLUSTER_STACK=scs2 -export PROVIDER=docker +# OCI registry (optional, defaults to ttl.sh for dev builds) +# export OCI_REGISTRY=ghcr.io +# export OCI_REPOSITORY=SovereignCloudStack/cluster-stacks -# Versions -export K8S_VERSION=1-27 -export CAPI_VERSION=v1.5.1 -export CAPD_VERSION=$CAPI_VERSION +# GitHub token for higher API rate limits (optional) +# export GITHUB_TOKEN=ghp_xxx -# Names -export NAMESPACE=scs-cs -export CLUSTER_CLASS_NAME=ferrol -export CLUSTER_NAME=cs-cluster -export CLUSTER_TOPOLOGY=true - -# Debugging -export DISABLE_VERSIONCHECK="true" - -# Release -export RELEASE_CLUSTER_CLASS=$CLUSTER_CLASS_NAME -export RELEASE_KUBERNETES_VERSION=1-27 +# Management cluster (optional, for testing) +# export KUBECONFIG=$PWD/.mgt-cluster-kubeconfig.yaml diff --git a/docs/build-system.md b/docs/build-system.md new file mode 100644 index 00000000..322eafae --- /dev/null +++ b/docs/build-system.md @@ -0,0 +1,143 @@ +# Build System + +The cluster stacks build system uses bash scripts in `hack/` orchestrated by [just](https://github.com/casey/just). + +## Prerequisites + +**Required tools:** +- `bash`, `helm`, `yq` (mikefarah), `git`, `curl`, `tar`, `jq` + +**Optional:** +- `oras` — for OCI registry publishing +- `python3` + `PyYAML` — for `docugen.py` only +- `just` — task runner (scripts also work standalone) + +**Nix dev shell (recommended):** +```bash +# Enter the development environment with all tools +nix develop +``` + +**Container alternative:** +```bash +# Build the tools container +just container-build + +# Run any command inside +just container-run build-all +``` + +## Configuration + +Copy `task.env.example` to `.env` and set your provider/stack: + +```bash +PROVIDER=openstack # or: docker +CLUSTER_STACK=scs2 # or: scs +OCI_REGISTRY=ghcr.io # for publishing +OCI_REPOSITORY=myorg/cluster-stacks +``` + +The `justfile` uses `set dotenv-load` to automatically read `.env`. + +## Commands + +### Building + +| Command | Description | +|---------|-------------| +| `just build 1.34` | Build for one K8s version | +| `just build-all` | Build for all versions in versions.yaml | +| `just publish 1.34` | Build + publish to OCI registry | +| `just publish-all` | Build + publish all versions | +| `just clean` | Clean `.release/` and output directories | + +The build system: +1. Copies the cluster-class chart, patches `Chart.yaml` with the correct version +2. For each addon in `cluster-addon/`, resolves the version from `versions.yaml` and patches the addon's `Chart.yaml` +3. Runs `helm package` for each chart +4. Bundles everything into a release artifact +5. Optionally publishes to an OCI registry via `oras push` + +### Version Management + +| Command | Description | +|---------|-------------| +| `just update-versions --check` | Check for K8s patch updates, new minors, and addon bumps | +| `just update-versions --apply` | Apply all updates to `versions.yaml` | +| `just update-versions-all --check` | Check for updates across all stacks | + +`update-versions` fetches the latest Kubernetes releases from GitHub tags and queries +Helm repo indexes for K8s-tied addon versions (e.g., CCM, CSI). It automatically: +- Bumps patch versions for existing K8s minors +- Adds new K8s minor versions (with correct Ubuntu image mapping) +- Removes EOL minor versions (keeps the 4 most recent) + +Set `GITHUB_TOKEN` for higher API rate limits in CI (optional, 60 req/h without). + +### Addon Management + +| Command | Description | +|---------|-------------| +| `just update-addons` | Interactive: check upstream Helm repos for new versions | +| `just update-addons --yes` | Auto-approve all updates | +| `just update-addons-all` | Update addons for all providers/stacks | + +`update-addons` reads the Helm repository URLs from each addon's `Chart.yaml`, queries for new versions, and updates both `Chart.yaml` and `versions.yaml` (for K8s-version-tied addons). + +### Utilities + +| Command | Description | +|---------|-------------| +| `just matrix` | Show version matrix (K8s versions, addon versions, CS versions) | +| `just generate-resources 1.34` | Generate ClusterStack + Cluster YAML for testing | +| `just generate-image-manifests` | Generate OpenStack Image CRD manifests | +| `just generate-docs` | Generate configuration docs from ClusterClass variables | + +### Provider Shortcuts + +Override the default provider/stack for any command: + +```bash +PROVIDER=docker CLUSTER_STACK=scs2 just build-all +``` + +## Scripts Reference + +All scripts in `hack/` take the stack directory as the first argument: + +```bash +# Direct invocation (without just) +./hack/build.sh providers/openstack/scs2 --version 1.34 +./hack/build.sh providers/openstack/scs2 --version 1.34 --publish +./hack/build.sh providers/openstack/scs2 --all +./hack/update-versions.sh providers/openstack/scs2 --check +./hack/update-versions.sh providers/openstack/scs2 --apply +./hack/update-addons.sh providers/openstack/scs2 +./hack/update-addons.sh providers/openstack/scs2 --yes +./hack/show-matrix.sh providers/openstack/scs2 +./hack/generate-resources.sh providers/openstack/scs2 --version 1.34 +./hack/generate-image-manifests.sh providers/openstack/scs2 +``` + +## Linting + +```bash +yamllint . +``` + +Configuration: `.yamllint.yml` — line-length disabled, Helm templates excluded. This is enforced in CI. + +## Helm Template Validation + +You can validate rendered templates locally: + +```bash +# Render the chart +helm template test providers/openstack/scs2/cluster-class/ + +# Validate against CRD schemas (requires kubeconform) +helm template test providers/openstack/scs2/cluster-class/ | kubeconform -summary -strict \ + -schema-location default \ + -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' +``` diff --git a/docs/overview.md b/docs/overview.md index 60d45ec4..371246ee 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -1,148 +1,110 @@ # Overview -## Cluster Stacks +Cluster Stacks is the reference implementation for defining and managing Kubernetes clusters via [Cluster API](https://cluster-api.sigs.k8s.io/) (CAPI). Each **cluster stack** is a versioned, self-contained package that bundles everything needed to create production-grade Kubernetes clusters on a given infrastructure provider. -Cluster Stacks is a comprehensive framework and reference implementations for defining and managing Kubernetes clusters via the Cluster API. It is designed to cater to multiple providers and supports a broad range of Kubernetes versions, offering a standardized approach to managing and configuring Kubernetes clusters. +## Architecture -It encapsulates multiple layers, including node configuration, Cluster API setup, and application-level configurations, such as the Container Network Interface (CNI). By packaging these interdependent configurations, the cluster stack allows for efficient management and deployment of Kubernetes clusters, offering standardized, resilient, and self-managed Kubernetes environments. +A cluster stack lives in `providers/{provider}/{stack}/` and consists of two main components: -## 🔧 Usage - -Follow our [quickstart guide](providers/openstack/quickstart.md) for an introduction on how to deploy cluster stacks on openstack. - -## Layers of a Cluster Stack - -In essence, a cluster stack is an amalgamation of various components each of which serves a crucial role in setting up, maintaining, and operating a Kubernetes cluster. In the context of our framework, we categorize these components into three core layers: `cluster-class`, `cluster-addons`, and `node-images`. Let's delve deeper into understanding each of these layers: - -### 📚 Cluster Class - -The Cluster Class serves as a blueprint for creating and configuring Kubernetes clusters consistently. It encapsulates various aspects of a cluster, including: - -* The infrastructure provider details -* Networking configurations -* Cluster-class templating -* Other cluster-specific settings - -Essentially, it defines the desired configuration and properties of a Kubernetes cluster. It leverages the [ClusterClass](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/) feature of Cluster API, which provides a declarative, Kubernetes-style API for cluster creation, configuration, and management. Any change in this layer or in the node-image or cluster-addon layers triggers a version bump in the cluster class, hence the cluster stack. - -### 🎁 Cluster Addons - -Cluster Addons are core components or services required for the Kubernetes cluster to function correctly and efficiently. These are not user-facing applications but rather foundational services critical to the operation and management of a Kubernetes cluster. They're usually installed and configured after the cluster infrastructure has been provisioned and before the cluster is ready to serve workloads. - -Cluster addons encompass a variety of functionalities, including but not limited to: - -* Container Network Interfaces (CNI): These are plugins that facilitate container networking. A CNI is integral to setting up network connectivity and ensuring communication between pods in a Kubernetes cluster. -* Cloud Controller Manager (CCM): The CCM is a Kubernetes control plane component that embeds the cloud-specific control logic. Its role is to manage the communication with the underlying cloud services. -* Konnectivity service: This is a network proxy that enables connectivity from the control plane to nodes and vice versa. It is a critical component that supports Kubernetes API server connectivity. -* Metrics Server: A cluster-wide aggregator of resource usage data, Metrics Server collects CPU, memory, and other metrics from nodes and pods, enabling features like Horizontal Pod Autoscaling. - -It's important to note that cluster addons are not user-provided applications or services that can be installed multiple times, such as ingress controllers, application-level monitoring tools, or user-facing APIs. Those are left to the discretion and responsibility of the users, who install and manage them according to their specific needs and preferences. +``` +providers/openstack/scs2/ + csctl.yaml # Stack metadata (provider, name, K8s version) + clusteraddon.yaml # Addon lifecycle hooks (when to install what) + versions.yaml # K8s version -> addon version mapping + metadata + cluster-class/ # Helm chart: ClusterClass CRD + Chart.yaml + values.yaml # Variable defaults + image references + templates/ + cluster-class.yaml + kubeadm-control-plane-template.yaml + kubeadm-config-template-worker-*.yaml + *-machine-template-*.yaml + *-cluster-template.yaml + cluster-addon/ # Collection of Helm sub-charts + cni/ # Cilium + metrics-server/ + occm/ # OpenStack Cloud Controller Manager (provider-specific) + cinder-csi/ # Cinder CSI (provider-specific) +``` -Each addon version is independent and can be updated separately. However, a change in this layer also necessitates a version bump in the cluster class and the cluster stack, which is reflected in the metadata.yaml. +### Cluster Class -### 🎞️ Node Images +The **cluster-class** is a single Helm chart that defines the [ClusterClass](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/) CRD. It contains: -Node images provide the foundation for the operating system environment on each node of a Kubernetes cluster. They are typically a minimal operating system distribution, like a lightweight Linux distro, which may also include container runtime components such as Docker or containerd. +- The ClusterClass resource with variables, patches, and template references +- Infrastructure templates (e.g. OpenStackMachineTemplate, DockerClusterTemplate) +- Control plane template (KubeadmControlPlaneTemplate) +- Worker bootstrap template (KubeadmConfigTemplate) -Node images are responsible for providing the necessary environment and dependencies to support Kubernetes components and workloads. This includes components like kubelet, kube-proxy, and other necessary system utilities and libraries. +Variables declared in the ClusterClass allow per-cluster customization (flavors, disk sizes, security groups, OIDC, etc.) without modifying the stack itself. Default values are defined in `values.yaml` and referenced via `{{ .Values.variables.* }}` in the templates. -The version of a node image can be different from that of the cluster stack or the cluster class. However, an update to a node image will trigger a version bump in the cluster class and hence the cluster stack. +### Cluster Addons -In the cluster-stacks repository's directory structure, the build instructions for Node Images are always placed within the respective directory. The instructions outline the steps and configurations required to create the Node Image automatically. The specific method for releasing the Node Image may vary based on the provider's capabilities and requirements. +**Cluster addons** are core components installed onto the workload cluster by the [cluster-stack-operator](https://github.com/SovereignCloudStack/cluster-stack-operator). They are **not** user applications — they are foundational services: -During the development phase, the build instructions serve as a reference within the repository itself. These instructions may utilize tools like Packer or other image-building techniques. This allows for flexibility and customization, enabling users to define their Node Images according to specific needs and requirements. +| Addon | Purpose | Present in | +|-------|---------|------------| +| **Cilium** (CNI) | Pod networking and network policy | All stacks | +| **metrics-server** | Node/pod resource metrics, enables HPA | All stacks | +| **OCCM** | OpenStack Cloud Controller Manager | OpenStack stacks | +| **Cinder CSI** | Persistent volume provisioning via OpenStack Cinder | OpenStack stacks | -However, when it comes to the release of the cluster stack, the Node Image can be provided in different ways depending on the capabilities of the provider or the desired deployment method. Here are a few examples: +Addon installation timing is controlled by `clusteraddon.yaml` using [CAPI lifecycle hooks](https://cluster-api.sigs.k8s.io/tasks/experimental-features/runtime-sdk/implement-lifecycle-hooks): -1. **URL on a remote endpoint**: In some cases, `providers` may support deploying a Node Image directly from a URL. In this scenario, the Node Image referenced in the `cluster stack`, specifically in the `cluster class`, would be provided as a URL pointing to a pre-built image accessible remotely. -1. **Artifact**: If the provider supports artifacts, the Node Image can be released as an artifact, such as a qcow2 file. The artifact would be uploaded to the provider, and the `cluster stack` references the artifact for node provisioning. -1. **Build Instructions**: In cases where the provider doesn't support direct URL deployment or artifact-based provisioning, the build instructions defined within the repository become critical. The build instructions serve as a comprehensive guide to build the Node Image, specifying all the necessary steps and configurations. +- `AfterControlPlaneInitialized` — CNI (required before workers can join) +- `BeforeClusterUpgrade` — all addons (ensures compatibility during upgrades) -Regardless of the release method, the cluster stack, specifically the cluster class, references the appropriate Node Image to be used for node provisioning. +### Node Images -By allowing flexibility in the release and deployment methods of Node Images, the cluster stack framework caters to various provider capabilities and user requirements. This adaptability ensures the cluster stack can be deployed in diverse environments while maintaining a consistent and manageable approach to managing Kubernetes clusters. +For OpenStack, node images are pre-built Ubuntu images with containerd and kubelet. The image name encodes the Kubernetes version (e.g. `ubuntu-capi-image-v1.32.5`). The `versions.yaml` file maps Kubernetes versions to Ubuntu releases: -## 🌐 IaaS Provider, Kubernetes Service Provider, and Cluster API -In the context of the `cluster-stacks`, we distinguish between two types of providers: +- K8s 1.32 and earlier: Ubuntu 22.04 +- K8s 1.33+: Ubuntu 24.04 -An **IaaS Provider**, in general, offers Infrastructure as a Service - providing the fundamental compute, storage, and network resources on which workloads can be run. In the context of cluster-stacks, an IaaS Provider specifically refers to an entity that owns an API for their infrastructure. If an organization uses a common infrastructure API, such as OpenStack, they are not considered an IaaS Provider in this context. However, if the organization owns the API for its infrastructure, it becomes an IaaS Provider for the purposes of cluster-stacks. +For Docker (development/testing), the `kindest/node` images from the kind project are used. -A **Kubernetes Service Provider**, on the other hand, is an entity that implements a cluster stack. They do so on top of the IaaS Providers, potentially spanning across multiple IaaS Providers. They use the IaaS Provider's infrastructure services and integrate them into their cluster stack implementations. +## Available Stacks -The **Cluster API (CAPI)** is a Kubernetes project aimed at simplifying the process of managing Kubernetes clusters. It offers a declarative API that automates the creation, configuration, and management of clusters, providing a standardized way to interact with Kubernetes. The cluster stack approach leverages CAPI to deliver self-managed Kubernetes clusters. +| Provider | Stack | API Version | Description | +|----------|-------|-------------|-------------| +| `openstack` | `scs` | v1beta1 | Legacy OpenStack stack | +| `openstack` | `scs2` | v1beta2 | Production OpenStack stack with updated variable names and v1beta2 core resources | +| `docker` | `scs` | v1beta1 | Legacy Docker stack for local development | +| `docker` | `scs2` | v1beta2 | Docker stack with v1beta2 core resources, production tuning, OIDC support | -## 📌 Defining and Adding Providers -The structure of this repository is specifically designed to handle multiple providers, multiple cluster stacks per provider, and multiple Kubernetes versions per cluster stack. This organized structure allows us to effectively manage, develop, and maintain multiple cluster stacks across various Kubernetes versions and providers, all in a single repository. +> **Note:** "v1beta2" refers to CAPI core resources (ClusterClass, KubeadmControlPlaneTemplate, KubeadmConfigTemplate). Infrastructure provider resources (OpenStackMachineTemplate, DockerMachineTemplate) remain at their provider's own API version (currently v1beta1). -### 📁 Repository Structure -The repository maintains a specific structure: +## Versioning -* Each IaaS Provider has a directory under providers. -* Each IaaS Provider can have multiple cluster stack implementations. -* Each cluster stack supports multiple Kubernetes major and minor versions. +Release artifacts follow the naming scheme: ``` -providers/ -└── / - └── / - └── / +{provider}-{stack}-{k8s-major}-{k8s-minor}-{cluster-stack-version} ``` -The directory structure for adding a new provider would look something like this: +Examples: +- `openstack-scs2-1-34-v1` — first stable release for K8s 1.34 +- `docker-scs2-1-35-v0` — dev release for K8s 1.35 -``` -providers/// -# example -providers/openstack/scs/1-28 -``` -This granular, hierarchical structure allows us to manage different versions of Kubernetes and their associated cluster stacks across different providers. +Version semantics: +- **`v0`** = development version (published to ttl.sh with 24h TTL or git hash tag) +- **`v1`, `v2`, ...** = stable versions (auto-incremented by querying the OCI registry) -We decided to support multiple Kubernetes major and minor versions to provide the flexibility to accommodate different implementation requirements of the provider. However, we deliberately chose not to support Kubernetes patch versions directly. The reason is the high frequency of patch versions release (often weekly), which would complicate maintenance efforts significantly. +Any change to the cluster-class, cluster-addons, or node images triggers a version bump of the entire cluster stack. -Instead, we represent Kubernetes patch version updates through changes in our cluster stack version. For instance, if a patch version of Kubernetes necessitates a change in the node-image or the cluster-class configuration, this would trigger a version bump in the corresponding cluster stack, hence the cluster class, as reflected in the metadata.yaml. - -In this way, our versioning system, our directory structure, and our approach to Kubernetes versioning are all interlinked, providing us a comprehensive, manageable, and resilient framework for maintaining various Kubernetes distributions or cluster stacks across multiple providers and versions. - -## 📑 Versioning - -Note: This section is subject to change, as our new tool [csctl](https://github.com/SovereignCloudStack/csctl) will incorporate future versioning capabilities. - -A fundamental aspect of the cluster stack approach is the encapsulation of versioning within a cluster stack distribution. Each of the components can be updated independently, leading to a flexible and maintainable system. - -However, the critical point to understand here is the relationship between these component versions and the cluster stack version. Whenever there's a change or an update to either the cluster addon or the node image, the version of the cluster stack must be bumped. And due to the connection between the cluster class and the cluster stack, the cluster class version must be updated to match the new cluster stack version. - -The cluster stack version doesn't simply mirror the versions of its components, but rather, it reflects the "version of change". In essence, the cluster stack version is a reflection of the state of the entire stack as a whole at a particular point in time. Any change in the components warrants a new state, and therefore a new version of the cluster stack. - -So, an update to the cluster addon component will bump the version of the cluster stack, irrespective of the existing version of the node image. The same applies vice versa. When such an update occurs, the version of the cluster class is also incremented to align with the new cluster stack version, maintaining the unity of the cluster stack framework. - -This versioning approach ensures a clear and precise track of changes, promoting efficient management, and isolated testing. It offers enhanced resilience for the Kubernetes distribution or the cluster stack, ensuring safe and secure upgrades even in rapid update cycles. It's an efficient method of maintaining stability in the rapidly changing environment of a Kubernetes stack. - -The versioning of the cluster stack is primarily managed through a file named metadata.yaml, located at the root directory of each cluster stack. This file serves as the source of truth for the versioning information of the cluster stack, cluster class, node images, and cluster addons. - -Here is an example of how metadata.yaml could look like: -``` -apiVersion: metadata.clusterstack.x-k8s.io/v1alpha1 -versions: - clusterStack: v3 - kubernetes: v1.27.3 - components: - clusterAddon: v2 - nodeImage: v1 -``` -In this example, the cluster stack (and thus the cluster class) is on version 3, while the cluster addon is on version 2 and node image is on version 1. - -When there's a change or update in the node images or cluster addons, we would bump the version of the cluster stack and cluster class, while leaving the unaffected component's version intact. So if the node image was updated, the metadata.yaml might then look like this: +## Repository Structure ``` -apiVersion: metadata.clusterstack.x-k8s.io/v1alpha1 -versions: - clusterStack: v4 - kubernetes: v1.27.3 - components: - clusterAddon: v2 - nodeImage: v2 +providers/ + docker/ + scs/ # v1beta1 Docker stack + scs2/ # v1beta2 Docker stack + openstack/ + scs/ # v1beta1 OpenStack stack (legacy) + scs2/ # v1beta2 OpenStack stack (production) +hack/ # Build and utility scripts +docs/ # Documentation (consumed by Docusaurus) +Taskfile.yml # Task runner definitions +justfile # Just runner definitions +Containerfile # Build container image ``` - -Here, the cluster stack and cluster class versions were updated to v4, the node image version was bumped to v2 due to the changes, while the cluster addon remained on v2 as it was not affected by the update. - -This versioning approach allows us to keep track of changes across different components, manage these components effectively, and conduct isolated testing. This ensures that our Kubernetes distribution or cluster stack remains resilient, and we can perform safe and secure upgrades even in the face of rapid update cycles. The metadata.yaml plays a critical role in maintaining this structure and providing an accurate representation of the state of the whole stack at any given time. diff --git a/docs/providers/openstack/configuration.md b/docs/providers/openstack/configuration.md index f72b29f2..924c3858 100644 --- a/docs/providers/openstack/configuration.md +++ b/docs/providers/openstack/configuration.md @@ -1,84 +1,221 @@ -# Configuration +# Configuration (openstack/scs2) -This page lists the custom configuration options available, including their default values and if they are optional. The following example shows how these variables can be used inside the `cluster.yaml` file under `spec.topology.variables`. +This page lists all ClusterClass variables available in the `openstack/scs2` cluster stack. Variables are set in the `Cluster` resource under `spec.topology.variables`. ## Example ```yaml -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: - name: - namespace: + name: my-cluster + namespace: my-tenant labels: managed-secret: cloud-config spec: clusterNetwork: pods: - cidrBlocks: - - 192.168.0.0/16 - serviceDomain: cluster.local + cidrBlocks: ["192.168.0.0/16"] services: - cidrBlocks: - - 10.96.0.0/12 + cidrBlocks: ["10.96.0.0/12"] + serviceDomain: cluster.local topology: - variables: // <-- variables from the table can be set here - - name: controller_flavor - value: "SCS-4V-8-20" - - name: worker_flavor - value: "SCS-4V-8-20" - - name: external_id - value: "ebfe5546-f09f-4f42-ab54-094e457d42ec" - class: openstack-alpha-1-29-v2 + class: openstack-scs2-1-34-v1 + version: v1.34.3 controlPlane: - replicas: 2 - version: v1.29.3 + replicas: 3 workers: machineDeployments: - - class: openstack-alpha-1-29-v2 - failureDomain: nova - name: openstack-alpha-1-29-v2 - replicas: 4 + - class: default-worker + name: default-worker + replicas: 3 + variables: + overrides: + - name: flavor + value: "SCS-4V-8" + variables: + - name: flavor + value: "SCS-2V-4-20s" + - name: rootDisk + value: 50 + - name: networkExternalID + value: "ebfe5546-f09f-4f42-ab54-094e457d42ec" +``` + +Note how `flavor` is set once at cluster level (`SCS-2V-4-20s`) and then overridden for workers via `machineDeployments[].variables.overrides`. You can also do it the other way around — set the worker flavor at cluster level and override the control plane via `topology.controlPlane.variables.overrides`. This works for all unified machine variables (`flavor`, `rootDisk`, `serverGroupID`, `additionalBlockDevices`). + +Object variables (like `identityRef` and `oidcConfig`) are set as nested values: + +```yaml + variables: + - name: identityRef + value: + name: "my-openstack-secret" + cloudName: "my-cloud" + - name: oidcConfig + value: + clientID: "kubectl" + issuerURL: "https://dex.k8s.example.com" +``` + +## Image Variables + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| `imageName` | string | `"ubuntu-capi-image"` | Base name of the OpenStack image. If `imageAddVersion` is enabled, the K8s version is appended (e.g. `ubuntu-capi-image-v1.34.3`). | +| `imageIsOrc` | boolean | `false` | If true, `imageName` refers to an ORC image resource. If false, it filters images in the OpenStack project. | +| `imageAddVersion` | boolean | `true` | Append the Kubernetes version as suffix to `imageName`. | + +## API Server + +| Name | Type | Default | Example | Description | +|------|------|---------|---------|-------------| +| `disableAPIServerFloatingIP` | boolean | `false` | | Disable the floating IP on the API server load balancer. | +| `certSANs` | array | `[]` | `["mydomain.example"]` | Extra Subject Alternative Names for the API server TLS cert. | +| `apiServerLoadBalancer` | string | `"octavia-ovn"` | `"octavia-ovn"` | Load balancer in front of the API server. Options: `none`, `octavia-amphora`, `octavia-ovn`. | +| `apiServerAllowedCIDRs` | array | *(none)* | `["192.168.10.0/24"]` | Restrict API server access to these CIDRs. Requires amphora LB (CAPO >= v2.12). Include the management cluster's outgoing IP. | + +## Network + +| Name | Type | Default | Example | Description | +|------|------|---------|---------|-------------| +| `dnsNameservers` | array | `["9.9.9.9", "149.112.112.112"]` | | DNS nameservers for the cluster subnet. | +| `nodeCIDR` | string | `"10.8.0.0/20"` | | CIDR for the cluster subnet. CAPO creates network, subnet, and router. Leave empty to skip. | +| `networkExternalID` | string | *(none)* | `"ebfe5546-..."` | ID of an external network. Required when multiple external networks exist. | +| `networkMTU` | integer | *(none)* | `1500` | MTU for the private cluster network. | + +## Machine Variables + +These variables apply to **all nodes** by default. Override per control plane or worker via `topology.controlPlane.variables.overrides` and `topology.workers.machineDeployments[].variables.overrides`. + +| Name | Type | Default | Example | Description | +|------|------|---------|---------|-------------| +| `flavor` | string | `"SCS-2V-4"` | `"SCS-4V-8"` | OpenStack instance flavor. | +| `rootDisk` | integer | `50` | `25` | Root disk size in GiB. Use 0 for flavors with ephemeral disk. | +| `serverGroupID` | string | `""` | `"3adf4e92-..."` | Server group UUID for anti-affinity. | +| `additionalBlockDevices` | array | `[]` | see below | Additional Cinder volumes to attach. | + +### additionalBlockDevices + +Each entry is an object: + +```yaml +- name: data + sizeGiB: 100 + type: __DEFAULT__ # uses the default volume type ``` -Variables from the table containing a `.` are to be used in an object with the part before the dot being the object name and the part behind the dot being the value names. The following example demonstrates this with `oidc_config`. +## Cluster-Level Control Plane Settings + +These are CAPO cluster-level settings and apply only to the control plane. They cannot be overridden per worker. + +| Name | Type | Default | Example | Description | +|------|------|---------|---------|-------------| +| `controlPlaneAvailabilityZones` | array | `[]` | `["nova"]` | Availability zones for control plane placement. | +| `controlPlaneOmitAvailabilityZone` | boolean | `false` | `true` | Let Nova scheduler choose the AZ. | + +## Access Management + +| Name | Type | Default | Example | Description | +|------|------|---------|---------|-------------| +| `sshKeyName` | string | `""` | `"capi-keypair"` | SSH key to inject into all nodes (for debugging). | +| `securityGroups` | array | `[]` | `["sg-name"]` | Extra security groups by name for all nodes. | +| `securityGroupIDs` | array | `[]` | `["9ae2f488-..."]` | Extra security groups by UUID for all nodes. Takes precedence over `securityGroups`. | + +## Identity + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| `identityRef` | object | `{"name": "openstack", "cloudName": "openstack"}` | Reference to the OpenStack credentials secret. | +| `identityRef.name` | string | `"openstack"` | Name of the Secret containing `clouds.yaml`. | +| `identityRef.cloudName` | string | `"openstack"` | Cloud name within `clouds.yaml`. | + +## OIDC Configuration + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| `oidcConfig` | object | *(none)* | OIDC configuration for API server authentication. Only applied if both `clientID` and `issuerURL` are set. | +| `oidcConfig.clientID` | string | *(none)* | Client ID for OIDC tokens. | +| `oidcConfig.issuerURL` | string | *(none)* | OIDC provider URL (must be https). | +| `oidcConfig.usernameClaim` | string | `"preferred_username"` | JWT claim for the username. | +| `oidcConfig.groupsClaim` | string | `"groups"` | JWT claim for groups. | +| `oidcConfig.usernamePrefix` | string | `"oidc:"` | Prefix for username claims. | +| `oidcConfig.groupsPrefix` | string | `"oidc:"` | Prefix for group claims. | + +## Registry Mirrors + +| Name | Type | Default | Description | +|------|------|---------|-------------| +| `registryMirrors` | array | `[]` | Configure registry mirrors for both containerd and CRI-O. | + +Each entry is an object: ```yaml ---- -topology: - variables: - - name: oidc_config - value: - issuer_url: "https://dex.k8s.scs.community" - client_id: "kubectl" +- name: registryMirrors + value: + - hostnameUpstream: "docker.io" + urlUpstream: "https://registry-1.docker.io" + urlMirror: "https://registry.example.com/v2/dockerhub" + certMirror: "" + - hostnameUpstream: "gcr.io" + urlUpstream: "https://gcr.io" + urlMirror: "https://registry.example.com/v2/gcr" + certMirror: "" ``` -## Available variables - - -| Name | Type | Default | Example | Description | Required | -| ---------------------------------- | ------- | -------------------------------- | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -| `external_id` | string | "" | "ebfe5546-f09f-4f42-ab54-094e457d42ec" | ExternalNetworkID is the ID of an external OpenStack Network. This is necessary to get public internet to the VMs. | False | -| `controller_flavor` | string | "SCS-2V-4-20s" | "SCS-2V-4-20s" | OpenStack instance flavor for control-plane nodes. | False | -| `worker_flavor` | string | "SCS-2V-4" | "SCS-2V-4" | OpenStack instance flavor for worker nodes. | False | -| `controller_root_disk` | integer | | 25 | Root disk size in GiB for control-plane nodes. OpenStack volume will be created and used instead of an ephemeral disk defined in flavor. Should only be used for the diskless flavors. | False | -| `worker_root_disk` | integer | 25 | 25 | Root disk size in GiB for worker nodes. OpenStack volume will be created and used instead of an ephemeral disk defined in flavor. Should be used for the diskless flavors. | False | -| `openstack_security_groups` | array | [] | ['security-group-1'] | The names of the security groups to assign to the instance | False | -| `cloud_name` | string | "openstack" | "openstack" | The name of the cloud to use from the clouds secret | False | -| `secret_name` | string | "openstack" | "openstack" | The name of the clouds secret | False | -| `controller_server_group_id` | string | "" | "3adf4e92-bb33-4e44-8ad3-afda9dfe8ec3" | The server group to assign the control plane nodes to. | False | -| `worker_server_group_id` | string | "" | "869fe071-1e56-46a9-9166-47c9f228e297" | The server group to assign the worker nodes to. | False | -| `ssh_key` | string | "" | "capi-keypair" | The ssh key to inject in the nodes. | False | -| `apiserver_loadbalancer` | string | "octavia-amphora" | "none, octavia-amphora, octavia-ovn" | "In this cluster-stack we have two kind of loadbalancers. Each of them has its own configuration variable. This setting here is to configure the loadbalancer that is placed in front of the apiserver.
You can choose from 2 options:

none:
No loadbalancer solution will be deployed

octavia-amphora:
(default) Uses openstack's loadbalancer service (provider:amphora)

octavia-ovn:
Uses openstack's loadbalancer service (provider:ovn)
| False | -| `dns_nameservers` | array | ['5.1.66.255', '185.150.99.255'] | ['5.1.66.255', '185.150.99.255'] | "DNSNameservers is the list of nameservers for the OpenStack Subnet being created. Set this value when you need to create a new network/subnet while the access through DNS is required.
| False | -| `node_cidr` | string | "10.8.0.0/20" | "10.8.0.0/20" | "NodeCIDR is the OpenStack Subnet to be created. Cluster actuator will create a network, a subnet with NodeCIDR, and a router connected to this subnet. If you leave this empty, no network will be created.
| False | -| `certSANs` | array | [] | ['mydomain.example'] | CertSANs sets extra Subject Alternative Names for the API Server signing cert. | False | -| `oidc_config.client_id` | string | | kubectl | A client id that all tokens must be issued for. | | -| `oidc_config.issuer_url` | string | | `https://dex.example.com` | URL of the provider that allows the API server to dis cover public signing keys. Only URLs that use the https:// scheme are acc epted. This is typically the provider's discovery URL, changed to have an emp ty path | | -| `oidc_config.username_claim` | string | preferred_username | preferred_username | JWT claim to use as the user name. By default sub, whi ch is expected to be a unique identifier of the end user. Admins can choose oth er claims, such as email or name, depending on their provider. However, cla ims other than email will be prefixed with the issuer URL to prevent naming cla shes with other plugins. | | -| `oidc_config.groups_claim` | string | groups | groups | JWT claim to use as the user's group. If the claim is present it must be an array of strings. | | -| `oidc_config.username_prefix` | string | oidc: | oidc: | Prefix prepended to username claims to prevent cla shes with existing names (such as system: users). For example, the value oid c: will create usernames like oidc:jane.doe. If this flag isn't provided and --o idc-username-claim is a value other than email the prefix defaults to ( Iss uer URL )# where ( Issuer URL ) is the value of --oidc-issuer-url. The value - c an be used to disable all prefixing. | | -| `oidc_config.groups_prefix` | string | oidc: | oidc: | Prefix prepended to group claims to prevent clashes wit h existing names (such as system: groups). For example, the value oidc: will cre ate group names like oidc:engineering and oidc:infra. | | -| `network_mtu` | integer | | 1500 | NetworkMTU sets the maximum transmission unit (MTU) value to address fragmentation for the private network ID. | False | -| `controlPlaneAvailabilityZones` | array | | ['nova'] | ControlPlaneAvailabilityZones is the set of availability zones which control plane machines may be deployed to. | False | -| `controlPlaneOmitAvailabilityZone` | boolean | | True | ControlPlaneOmitAvailabilityZone causes availability zone to be omitted when creating control plane nodes, allowing the Nova scheduler to make a decision on which availability zone to use based on other scheduling constraints. | False | +| Field | Type | Description | +|-------|------|-------------| +| `hostnameUpstream` | string | Hostname of the upstream registry (e.g. `docker.io`). | +| `urlUpstream` | string | Server URL of the upstream registry. | +| `urlMirror` | string | URL of the mirror registry. | +| `certMirror` | string | TLS certificate of the mirror in PEM format (optional). | + +This writes configuration files to all nodes (control plane and workers) for both container runtimes: + +- **containerd**: `hosts.toml` in `/etc/containerd/certs.d/{hostname}/` +- **CRI-O**: drop-in config in `/etc/containers/registries.conf.d/50-mirror-{hostname}.conf` + +If `certMirror` is provided, the CA certificate is written to both `/etc/containerd/certs/{hostname}/ca.crt` and `/etc/containers/certs.d/{hostname}/ca.crt`. + +## Migration from scs (v1beta1) + +The `scs2` stack uses camelCase variable names instead of snake_case. If migrating from `scs`: + +| scs (old) | scs2 (new) | Notes | +|-----------|------------|-------| +| `controller_flavor` | `flavor` | Unified; override per CP or worker via topology overrides | +| `worker_flavor` | `flavor` | | +| `controller_root_disk` | `rootDisk` | Unified; override per CP or worker via topology overrides | +| `worker_root_disk` | `rootDisk` | | +| `external_id` | `networkExternalID` | | +| `controller_server_group_id` | `serverGroupID` | Unified; override per CP or worker via topology overrides | +| `worker_server_group_id` | `serverGroupID` | | +| `ssh_key` | `sshKeyName` | | +| `openstack_security_groups` | `securityGroups` | | +| `cloud_name` | `identityRef.cloudName` | | +| `secret_name` | `identityRef.name` | | +| `dns_nameservers` | `dnsNameservers` | | +| `node_cidr` | `nodeCIDR` | | +| `apiserver_loadbalancer` | `apiServerLoadBalancer` | | +| `restrict_kubeapi` | `apiServerAllowedCIDRs` | Renamed | +| `network_mtu` | `networkMTU` | | +| `oidc_config.*` | `oidcConfig.*` | camelCase sub-fields | + +### Migration from earlier scs2 (pre-simplification) + +If migrating from an earlier `scs2` version that used split variable names: + +| Old scs2 | New scs2 | Notes | +|----------|----------|-------| +| `controlPlaneFlavor` | `flavor` | Unified; override per CP or worker via topology overrides | +| `workerFlavor` | `flavor` | | +| `controlPlaneRootDisk` | `rootDisk` | Unified; override per CP or worker via topology overrides | +| `workerRootDisk` | `rootDisk` | | +| `controlPlaneServerGroupID` | `serverGroupID` | Unified; override per CP or worker via topology overrides | +| `workerServerGroupID` | `serverGroupID` | | +| `workerAdditionalBlockDevices` | `additionalBlockDevices` | Now applies to all nodes | +| `workerSecurityGroups` | *(removed)* | Use `securityGroups` for all nodes | +| `workerSecurityGroupIDs` | *(removed)* | Use `securityGroupIDs` for all nodes | +| `apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs` | `apiServerAllowedCIDRs` | Shortened | + +See `hack/migrate-cluster.sh` for an automated migration script. diff --git a/docs/providers/openstack/quickstart.md b/docs/providers/openstack/quickstart.md deleted file mode 100644 index 00bccef8..00000000 --- a/docs/providers/openstack/quickstart.md +++ /dev/null @@ -1,290 +0,0 @@ -# Quickstart - -This quickstart guide contains steps to install the [Cluster Stack Operator][CSO] (CSO) utilizing the [Cluster Stack Provider OpenStack][CSPO] (CSPO) to provide [ClusterClasses][ClusterClass] which can be used with the [Kubernetes Cluster API][CAPI] to create Kubernetes Clusters. - -This section guides you through all the necessary steps to create a workload Kubernetes cluster on top of the OpenStack infrastructure. The guide describes a path that utilizes the `clusterctl` CLI tool to manage the lifecycle of a CAPI management cluster and employs `kind` to create a local non-production managemnt cluster. - -Note that it is a common practice to create a temporary, local [bootstrap cluster](https://cluster-api.sigs.k8s.io/reference/glossary#bootstrap-cluster) which is then used to provision a target [management cluster](https://cluster-api.sigs.k8s.io/reference/glossary#management-cluster) on the selected infrastructure. - -## Prerequisites - -- Install [Docker](https://docs.docker.com/get-docker/) and [kind](https://helm.sh/docs/intro/install/) -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Helm](https://helm.sh/docs/intro/install/) -- Install [clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl) -- Install [go](https://go.dev/doc/install) -- Install [jq](https://jqlang.github.io/jq/) - -## Initialize the management cluster - -Create the kind cluster: - -```bash -kind create cluster -``` - -Transform the Kubernetes cluster into a management cluster by using `clusterctl init` and bootstrap it with CAPI and Cluster API Provider OpenStack ([CAPO](https://github.com/kubernetes-sigs/cluster-api-provider-openstack)) components: - -```bash -export CLUSTER_TOPOLOGY=true -export EXP_CLUSTER_RESOURCE_SET=true -export EXP_RUNTIME_SDK=true -clusterctl init --infrastructure openstack -kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/latest/download/install.yaml -``` - -Note that the manual deployment of the openstack resource controller (ORC) is required since capo-0.12. If you use `clusterctl upgrade` to upgrade capo from earlier version, you'll also need to manually add ORC to the management host/cluster. - -### CSO and CSPO variables preparation (CSP) - -The CSO and CSPO must be directed to the Cluster Stacks repository housing releases for the OpenStack provider. -Modify and export the following environment variables if you wish to redirect CSO and CSPO to an alternative Git repository - -Be aware that GitHub enforces limitations on the number of API requests per unit of time. To overcome this, -it is recommended to configure a [personal access token](https://github.com/settings/personal-access-tokens/new) for authenticated calls. This will significantly increase the rate limit for GitHub API requests. -Fine grained PAT with `Public Repositories (read-only)` is enough. - -```bash -export GIT_PROVIDER_B64=Z2l0aHVi # github -export GIT_ORG_NAME_B64=U292ZXJlaWduQ2xvdWRTdGFjaw== # SovereignCloudStack -export GIT_REPOSITORY_NAME_B64=Y2x1c3Rlci1zdGFja3M= # cluster-stacks -export GIT_ACCESS_TOKEN_B64=$(echo -n '' | base64 -w0) -``` - -### CSO and CSPO deployment (CSP) - -Install the [envsubst](https://github.com/drone/envsubst) Go package. It is required to enable the expansion of variables specified in CSPO and CSO manifests. - -```bash -GOBIN=/tmp go install github.com/drone/envsubst/v2/cmd/envsubst@latest -``` -Note: On typical Linux distros, you will have a binary `/usr/bin/envsubst` from the gettext package that does *not* work. - -Get the latest CSO release version and apply CSO manifests to the management cluster. - -```bash -# Get the latest CSO release version and apply CSO manifests -curl -sSL https://github.com/SovereignCloudStack/cluster-stack-operator/releases/latest/download/cso-infrastructure-components.yaml | /tmp/envsubst | kubectl apply -f - -``` - -Get the latest CSPO release version and apply CSPO manifests to the management cluster. - -```bash -# Get the latest CSPO release version and apply CSPO manifests -curl -sSL https://github.com/sovereignCloudStack/cluster-stack-provider-openstack/releases/latest/download/cspo-infrastructure-components.yaml | /tmp/envsubst | kubectl apply -f - -``` - -## Define a namespace for a tenant (CSP/per tenant) - -```sh -export CS_NAMESPACE=my-tenant -``` - -### Deploy CSP-helper chart - -The csp-helper chart is meant to create per tenant credentials as well as the tenants namespace where all resources for this tenant will live in. - -Cloud and secret name default to `openstack`. - -Example `clouds.yaml` - -```yaml -clouds: - openstack: - auth: - auth_url: https://api.gx-scs.sovereignit.cloud:5000/v3 - application_credential_id: "" - application_credential_secret: "" - region_name: "RegionOne" - interface: "public" - identity_api_version: 3 - auth_type: "v3applicationcredential" -``` - -```bash -helm upgrade -i csp-helper-"${CS_NAMESPACE}" -n "${CS_NAMESPACE}" --create-namespace https://github.com/SovereignCloudStack/openstack-csp-helper/releases/latest/download/openstack-csp-helper.tgz -f path/to/clouds.yaml -``` - -## Create Cluster Stack definition (CSP/per tenant) - -Configure the Cluster Stack you want to use: - -```sh -# the name of the cluster stack (must match a name of a directory in https://github.com/SovereignCloudStack/cluster-stacks/tree/main/providers/openstack) -export CS_NAME=scs - -# the kubernetes version of the cluster stack (must match a tag for the kubernetes version and the stack version) -export CS_K8S_VERSION=1.29 - -# the version of the cluster stack (must match a tag for the kubernetes version and the stack version) -export CS_VERSION=v1 -export CS_CHANNEL=stable - -# must match a cloud section name in the used clouds.yaml -export CS_CLOUDNAME=openstack -export CS_SECRETNAME="${CS_CLOUDNAME}" -``` - -This will use the cluster-stack as defined in the `providers/openstack/scs` directory. - -```bash -cat >clusterstack.yaml < cluster.yaml < kubeconfig.yaml -# Communicate with the workload cluster -kubectl --kubeconfig kubeconfig.yaml get nodes -``` - -## Check the workload cluster health - -```bash -$ kubectl --kubeconfig kubeconfig.yaml get pods -A -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system cilium-8mzrx 1/1 Running 0 7m58s -kube-system cilium-jdxqm 1/1 Running 0 6m43s -kube-system cilium-operator-6bb4c7d6b6-c77tn 1/1 Running 0 7m57s -kube-system cilium-operator-6bb4c7d6b6-l2df8 1/1 Running 0 7m58s -kube-system cilium-p9tkv 1/1 Running 0 6m44s -kube-system cilium-thbc8 1/1 Running 0 6m45s -kube-system coredns-5dd5756b68-k68j4 1/1 Running 0 8m3s -kube-system coredns-5dd5756b68-vjg9r 1/1 Running 0 8m3s -kube-system etcd-cs-cluster-pwblg-xkptx 1/1 Running 0 8m3s -kube-system kube-apiserver-cs-cluster-pwblg-xkptx 1/1 Running 0 8m3s -kube-system kube-controller-manager-cs-cluster-pwblg-xkptx 1/1 Running 0 8m3s -kube-system kube-proxy-54f8w 1/1 Running 0 6m44s -kube-system kube-proxy-8z8kb 1/1 Running 0 6m43s -kube-system kube-proxy-jht46 1/1 Running 0 8m3s -kube-system kube-proxy-mt69p 1/1 Running 0 6m45s -kube-system kube-scheduler-cs-cluster-pwblg-xkptx 1/1 Running 0 8m3s -kube-system metrics-server-6578bd6756-vztzf 1/1 Running 0 7m57s -kube-system openstack-cinder-csi-controllerplugin-776696786b-ksf77 6/6 Running 0 7m57s -kube-system openstack-cinder-csi-nodeplugin-96dlg 3/3 Running 0 6m43s -kube-system openstack-cinder-csi-nodeplugin-crhc4 3/3 Running 0 6m44s -kube-system openstack-cinder-csi-nodeplugin-d7rzz 3/3 Running 0 7m58s -kube-system openstack-cinder-csi-nodeplugin-nkgq6 3/3 Running 0 6m44s -kube-system openstack-cloud-controller-manager-hp2n2 1/1 Running 0 7m9s -``` - -[CAPI]: https://cluster-api.sigs.k8s.io/ -[CSO]: https://github.com/sovereignCloudStack/cluster-stack-operator/ -[CSPO]: https://github.com/SovereignCloudStack/cluster-stacks/tree/main/providers/openstack -[ClusterClass]: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210526-cluster-class-and-managed-topologies.md diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx new file mode 100644 index 00000000..17f35e20 --- /dev/null +++ b/docs/quickstart.mdx @@ -0,0 +1,322 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Quickstart + +This guide walks you through deploying a Kubernetes workload cluster using Cluster Stacks. You'll create a local management cluster with `kind`, install the required operators, and provision a workload cluster. + +## Prerequisites + +- [Docker](https://docs.docker.com/get-docker/) +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [Helm](https://helm.sh/docs/intro/install/) +- [clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl) + + + + +You also need: +- An OpenStack cloud with application credentials +- A `clouds.yaml` file configured for your project + + + + +No additional prerequisites. The Docker provider runs workload clusters as containers on your machine using `kindest/node` images. + + + + +## 1. Create the management cluster + +```bash +kind create cluster --name capi-management +``` + +## 2. Initialize Cluster API + + + + +```bash +clusterctl init --infrastructure openstack + +# ORC is required since CAPO 0.12 +kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/latest/download/install.yaml +``` + + + + +```bash +clusterctl init --infrastructure docker +``` + + + + +## 3. Install the Cluster Stack Operator (CSO) + +GitHub rate limits unauthenticated API requests. Configure a [personal access token](https://github.com/settings/personal-access-tokens/new) (fine-grained, `Public Repositories (read-only)`) for better reliability: + +```bash +export GIT_PROVIDER_B64=Z2l0aHVi # github +export GIT_ORG_NAME_B64=U292ZXJlaWduQ2xvdWRTdGFjaw== # SovereignCloudStack +export GIT_REPOSITORY_NAME_B64=Y2x1c3Rlci1zdGFja3M= # cluster-stacks +export GIT_ACCESS_TOKEN_B64=$(echo -n '' | base64 -w0) +``` + +Install [envsubst](https://github.com/drone/envsubst) (Go version — the system `envsubst` from gettext does **not** work): + +```bash +GOBIN=/tmp go install github.com/drone/envsubst/v2/cmd/envsubst@latest +``` + +Deploy CSO: + +```bash +curl -sSL https://github.com/SovereignCloudStack/cluster-stack-operator/releases/latest/download/cso-infrastructure-components.yaml \ + | /tmp/envsubst | kubectl apply -f - +``` + + + + +Deploy the Cluster Stack Provider OpenStack (CSPO): + +```bash +curl -sSL https://github.com/SovereignCloudStack/cluster-stack-provider-openstack/releases/latest/download/cspo-infrastructure-components.yaml \ + | /tmp/envsubst | kubectl apply -f - +``` + + + + +For the Docker provider, CSO handles everything directly — no additional provider component is needed. + + + + +## 4. Create a namespace and credentials + +```bash +export CS_NAMESPACE=my-tenant +kubectl create namespace "${CS_NAMESPACE}" +``` + + + + +Deploy the CSP-helper chart to create the OpenStack credentials secret: + +```bash +helm upgrade -i csp-helper-"${CS_NAMESPACE}" \ + -n "${CS_NAMESPACE}" \ + https://github.com/SovereignCloudStack/openstack-csp-helper/releases/latest/download/openstack-csp-helper.tgz \ + -f path/to/clouds.yaml +``` + + + + +No credentials needed for the Docker provider. + + + + +## 5. Create the ClusterStack resource + + + + +```bash +export CS_NAME=scs2 +export CS_K8S_VERSION=1.34 +export CS_VERSION=v1 +``` + +```yaml +apiVersion: clusterstack.x-k8s.io/v1alpha1 +kind: ClusterStack +metadata: + name: clusterstack + namespace: ${CS_NAMESPACE} +spec: + provider: openstack + name: ${CS_NAME} + kubernetesVersion: "${CS_K8S_VERSION}" + channel: stable + autoSubscribe: false + providerRef: + apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1 + kind: OpenStackClusterStackReleaseTemplate + name: cspotemplate + versions: + - ${CS_VERSION} +--- +apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1 +kind: OpenStackClusterStackReleaseTemplate +metadata: + name: cspotemplate + namespace: ${CS_NAMESPACE} +spec: + template: + spec: + identityRef: + kind: Secret + name: openstack +``` + +```bash +envsubst < clusterstack.yaml | kubectl apply -f - +``` + + + + +```bash +export CS_NAME=scs2 +export CS_K8S_VERSION=1.35 +export CS_VERSION=v0 +``` + +```yaml +apiVersion: clusterstack.x-k8s.io/v1alpha1 +kind: ClusterStack +metadata: + name: clusterstack + namespace: ${CS_NAMESPACE} +spec: + provider: docker + name: ${CS_NAME} + kubernetesVersion: "${CS_K8S_VERSION}" + channel: custom + autoSubscribe: false + versions: + - ${CS_VERSION} +``` + +```bash +envsubst < clusterstack.yaml | kubectl apply -f - +``` + + + + +## 6. Create a workload cluster + + + + +```bash +export CS_CLUSTER_NAME=my-cluster +export CS_CLASS_NAME=openstack-${CS_NAME}-${CS_K8S_VERSION/./-}-${CS_VERSION} +``` + +```yaml +apiVersion: cluster.x-k8s.io/v1beta2 +kind: Cluster +metadata: + name: ${CS_CLUSTER_NAME} + namespace: ${CS_NAMESPACE} + labels: + managed-secret: cloud-config +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + services: + cidrBlocks: ["10.96.0.0/12"] + serviceDomain: cluster.local + topology: + class: ${CS_CLASS_NAME} + version: v${CS_K8S_VERSION}.3 + controlPlane: + replicas: 3 + workers: + machineDeployments: + - class: default-worker + name: default-worker + replicas: 3 + variables: + - name: controlPlaneFlavor + value: "SCS-2V-4-20s" + - name: workerFlavor + value: "SCS-4V-8" + - name: workerRootDisk + value: 50 +``` + +```bash +envsubst < cluster.yaml | kubectl apply -f - +``` + + + + +```bash +export CS_CLUSTER_NAME=my-cluster +export CS_CLASS_NAME=docker-${CS_NAME}-${CS_K8S_VERSION/./-}-${CS_VERSION} +``` + +```yaml +apiVersion: cluster.x-k8s.io/v1beta2 +kind: Cluster +metadata: + name: ${CS_CLUSTER_NAME} + namespace: ${CS_NAMESPACE} +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + services: + cidrBlocks: ["10.96.0.0/12"] + serviceDomain: cluster.local + topology: + class: ${CS_CLASS_NAME} + version: v${CS_K8S_VERSION}.1 + controlPlane: + replicas: 1 + workers: + machineDeployments: + - class: default-worker + name: default-worker + replicas: 1 +``` + +```bash +envsubst < cluster.yaml | kubectl apply -f - +``` + + + + +## 7. Monitor and access the cluster + +Watch the cluster provisioning: + +```bash +clusterctl -n "${CS_NAMESPACE}" describe cluster "${CS_CLUSTER_NAME}" +``` + +Once ready, retrieve the kubeconfig: + +```bash +clusterctl -n "${CS_NAMESPACE}" get kubeconfig "${CS_CLUSTER_NAME}" > kubeconfig.yaml +kubectl --kubeconfig kubeconfig.yaml get nodes +``` + +Verify addons are running: + +```bash +kubectl --kubeconfig kubeconfig.yaml get pods -A +``` + +You should see Cilium, metrics-server, and (on OpenStack) the cloud-controller-manager and Cinder CSI running. + +## Next steps + +- [Configuration reference](providers/openstack/configuration.md) — all available ClusterClass variables +- [Build system](build-system.md) — how to build and publish cluster stacks +- [Versioning](versioning.md) — how cluster stack versioning works diff --git a/docs/versioning.md b/docs/versioning.md new file mode 100644 index 00000000..46f7a2ba --- /dev/null +++ b/docs/versioning.md @@ -0,0 +1,87 @@ +# Versioning + +## Cluster Stack Versions + +Each cluster stack is versioned independently per Kubernetes minor version. The version is embedded in the release artifact name: + +``` +{provider}-{stack}-{k8s-major}-{k8s-minor}-{cluster-stack-version} +``` + +Examples: +- `openstack-scs2-1-34-v1` — stable release, K8s 1.34 +- `docker-scs2-1-35-v0` — dev release, K8s 1.35 + +### Version Semantics + +| Version | Meaning | Registry | +|---------|---------|----------| +| `v0` | Development / unstable | ttl.sh (24h TTL) or git hash | +| `v1`, `v2`, ... | Stable releases | OCI registry (e.g. ghcr.io) | + +Stable versions are auto-incremented by the build system: it queries the OCI registry for the highest existing version and increments by one. + +### What Triggers a Version Bump + +Any change to these components requires a new cluster stack version: + +- **Cluster class** — variable defaults, patches, template structure +- **Cluster addons** — CNI, CCM, CSI, metrics-server versions or configuration +- **Node images** — OS version, containerd version, kubelet version + +A cluster stack version is **not** bumped for Kubernetes patch versions. Instead, a new cluster stack version is released that references the updated patch version through its node images and addon compatibility. + +## versions.yaml + +Each stack has a `versions.yaml` that maps Kubernetes minor versions to addon versions and metadata: + +```yaml +# providers/openstack/scs2/versions.yaml +versions: + "1.32": + occm: 2.32.3 + cinder-csi: 2.32.3 + ubuntu: "2204" + "1.33": + occm: 2.33.2 + cinder-csi: 2.33.2 + ubuntu: "2404" + "1.34": + occm: 2.34.0 + cinder-csi: 2.34.0 + ubuntu: "2404" +``` + +### Keys + +- **Addon keys** (e.g. `occm`, `cinder-csi`) must match the Helm dependency names in `cluster-addon/*/Chart.yaml` exactly. The build system uses these to patch `Chart.yaml` with the correct version at build time. +- **Metadata keys** (`kubernetes`, `ubuntu`) are excluded from addon processing. They provide context for image generation and build tooling. + +### Docker Stacks + +Docker stacks have no version-tied addons (Cilium and metrics-server versions are fixed in their Chart.yaml), so their `versions.yaml` only lists supported Kubernetes versions: + +```yaml +# providers/docker/scs2/versions.yaml +versions: + "1.32": {} + "1.33": {} + "1.34": {} + "1.35": {} +``` + +## Kubernetes Version Support + +Each cluster stack supports multiple Kubernetes minor versions simultaneously. The supported range is defined by the entries in `versions.yaml`. + +- Only **minor versions** are tracked — patch versions are handled transparently via node images +- The `csctl.yaml` file records the latest Kubernetes version used during development + +### Ubuntu Image Mapping + +For OpenStack stacks, the `ubuntu` metadata key controls which Ubuntu release is used for node images: + +| Kubernetes Version | Ubuntu Release | Image Name Pattern | +|---|---|---| +| 1.32 and earlier | 22.04 | `ubuntu-capi-image-v1.32.x` | +| 1.33+ | 24.04 | `ubuntu-capi-image-v1.33.x` | diff --git a/hack/build.sh b/hack/build.sh index a4be36e5..092d8ea2 100755 --- a/hack/build.sh +++ b/hack/build.sh @@ -2,7 +2,10 @@ # Build and optionally publish cluster-stack release artifacts. # # Usage: -# ./hack/build.sh [options] +# ./hack/build.sh [stack-dir] [options] +# +# If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +# (default: providers/openstack/scs2). # # Options: # --version Build for a specific K8s minor version (e.g., 1.34) @@ -13,6 +16,8 @@ # Without --version or --all, builds for the version in csctl.yaml. # # Environment: +# PROVIDER Provider name (default: openstack) +# CLUSTER_STACK Cluster stack name (default: scs2) # OCI_REGISTRY OCI registry (default: ttl.sh) # OCI_REPOSITORY OCI repository (auto-generated for ttl.sh) # OCI_USERNAME OCI auth username (optional) @@ -53,8 +58,7 @@ while [[ $# -gt 0 ]]; do done if [[ -z "$STACK_DIR" ]]; then - echo "Usage: $0 [--version X.Y] [--all] [--publish] [--validate]" - exit 1 + STACK_DIR="providers/${PROVIDER:-openstack}/${CLUSTER_STACK:-scs2}" fi if [[ ! -d "$STACK_DIR" ]]; then @@ -72,7 +76,7 @@ fi # ============================================ PROVIDER=$(yq '.config.provider.type' "$STACK_DIR/csctl.yaml") -STACK_NAME=$(yq '.config.clusterStackName' "$STACK_DIR/csctl.yaml") +CLUSTER_STACK=$(yq '.config.clusterStackName' "$STACK_DIR/csctl.yaml") OUTPUT_DIR="${OUTPUT_DIR:-.release}" # ============================================ @@ -123,7 +127,7 @@ setup_oci() { get_release_version() { local k8s_short="$1" local k8s_dash="${k8s_short//./-}" - local tag_prefix="${PROVIDER}-${STACK_NAME}-${k8s_dash}" + local tag_prefix="${PROVIDER}-${CLUSTER_STACK}-${k8s_dash}" if [[ "${OCI_REGISTRY:-}" == "ttl.sh" ]] || [[ -z "${OCI_REPOSITORY:-}" ]]; then # Dev version @@ -158,7 +162,7 @@ build_version() { local k8s_dash="${k8s_short//./-}" echo "" - echo "Building ${PROVIDER}-${STACK_NAME} for K8s ${k8s_version}" + echo "Building ${PROVIDER}-${CLUSTER_STACK} for K8s ${k8s_version}" echo "---" # Get release version @@ -167,7 +171,7 @@ build_version() { fi local release_version release_version=$(get_release_version "$k8s_short") - local release_dir="${OUTPUT_DIR}/${PROVIDER}-${STACK_NAME}-${k8s_dash}-${release_version}" + local release_dir="${OUTPUT_DIR}/${PROVIDER}-${CLUSTER_STACK}-${k8s_dash}-${release_version}" mkdir -p "$release_dir" @@ -181,7 +185,7 @@ build_version() { # Patch cluster-class Chart.yaml: set name and version local class_chart="$work_dir/cluster-class/Chart.yaml" - yq -i ".name = \"${PROVIDER}-${STACK_NAME}-${k8s_dash}-cluster-class\"" "$class_chart" + yq -i ".name = \"${PROVIDER}-${CLUSTER_STACK}-${k8s_dash}-cluster-class\"" "$class_chart" yq -i ".version = \"${release_version}\"" "$class_chart" # Patch csctl.yaml kubernetes version @@ -253,7 +257,7 @@ build_version() { exit 1 fi - local addon_tgz="${PROVIDER}-${STACK_NAME}-${k8s_dash}-cluster-addon-${release_version}.tgz" + local addon_tgz="${PROVIDER}-${CLUSTER_STACK}-${k8s_dash}-cluster-addon-${release_version}.tgz" (cd "$addon_temp" && tar -czf "$(cd "$REPO_ROOT" && pwd)/$release_dir/$addon_tgz" */) rm -rf "$addon_temp" echo " cluster-addon packaged ($addon_count addons)" @@ -329,7 +333,7 @@ publish_version() { local release_dir="$1" local k8s_dash="$2" local release_version="$3" - local oci_tag="${PROVIDER}-${STACK_NAME}-${k8s_dash}-${release_version}" + local oci_tag="${PROVIDER}-${CLUSTER_STACK}-${k8s_dash}-${release_version}" if [[ -z "${OCI_REGISTRY:-}" || -z "${OCI_REPOSITORY:-}" ]]; then echo " OCI_REGISTRY or OCI_REPOSITORY not set" @@ -370,7 +374,7 @@ publish_version() { # Main # ============================================ -echo "Cluster Stack: ${PROVIDER}/${STACK_NAME}" +echo "Cluster Stack: ${PROVIDER}/${CLUSTER_STACK}" echo "K8s versions: $(echo "$VERSIONS" | tr '\n' ' ')" echo "" diff --git a/hack/docugen.py b/hack/docugen.py index b3c45051..76cfedf5 100755 --- a/hack/docugen.py +++ b/hack/docugen.py @@ -6,14 +6,18 @@ Renders the cluster-class Helm template, parses the topology variables (openAPIV3Schema), and outputs a markdown table of all configurable options. +If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +(default: providers/openstack/scs2). + Usage: - ./hack/docugen.py - ./hack/docugen.py --output docs/configuration.md - ./hack/docugen.py --template hack/config-template.md - ./hack/docugen.py --dry-run + ./hack/docugen.py [stack-dir] + ./hack/docugen.py --output docs/configuration.md + ./hack/docugen.py providers/docker/scs2 --template hack/config-template.md + ./hack/docugen.py --dry-run """ import argparse +import os import subprocess import sys from pathlib import Path @@ -109,8 +113,8 @@ def main(): description="Generate docs from ClusterClass variables", ) parser.add_argument( - "stack_dir", type=Path, - help="Path to the cluster stack directory", + "stack_dir", type=Path, nargs="?", default=None, + help="Path to the cluster stack directory (default: providers/$PROVIDER/$CLUSTER_STACK)", ) parser.add_argument( "--template", type=Path, default=None, @@ -126,6 +130,11 @@ def main(): ) args = parser.parse_args() + if args.stack_dir is None: + provider = os.environ.get("PROVIDER", "openstack") + cluster_stack = os.environ.get("CLUSTER_STACK", "scs2") + args.stack_dir = Path("providers") / provider / cluster_stack + # Render and parse template = render_cluster_class(args.stack_dir) table = generate_table(template) diff --git a/hack/generate-image-manifests.sh b/hack/generate-image-manifests.sh index 149db832..077f6a7e 100755 --- a/hack/generate-image-manifests.sh +++ b/hack/generate-image-manifests.sh @@ -8,12 +8,19 @@ # Entries without an "ubuntu" field are skipped (e.g., docker provider stacks). # # Usage: -# ./hack/generate-image-manifests.sh # All versions -# ./hack/generate-image-manifests.sh --version 1.34 # Specific version -# ./hack/generate-image-manifests.sh --output-dir manifests/ # Write to files -# ./hack/generate-image-manifests.sh --skip-checksum # Skip checksum fetch +# ./hack/generate-image-manifests.sh [stack-dir] [options] +# +# If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +# (default: providers/openstack/scs2). +# +# Options: +# --version X.Y Generate for a specific K8s minor version only +# --output-dir Write individual YAML files instead of stdout +# --skip-checksum Skip fetching SHA256 checksums # # Environment: +# PROVIDER Provider name (default: openstack) +# CLUSTER_STACK Cluster stack name (default: scs2) # IMAGE_BASE_URL Base URL for images (default: https://nbg1.your-objectstorage.com/osism/openstack-k8s-capi-images) # CLOUD_NAME CloudCredentialsRef cloud name (default: openstack) # SECRET_NAME CloudCredentialsRef secret name (default: openstack) @@ -51,8 +58,7 @@ while [[ $# -gt 0 ]]; do done if [[ -z "$STACK_DIR" ]]; then - echo "Usage: $0 [--version X.Y] [--output-dir dir] [--skip-checksum]" >&2 - exit 1 + STACK_DIR="providers/${PROVIDER:-openstack}/${CLUSTER_STACK:-scs2}" fi if [[ ! -f "$STACK_DIR/versions.yaml" ]]; then diff --git a/hack/generate-resources.sh b/hack/generate-resources.sh index a49dca22..5bf9b148 100755 --- a/hack/generate-resources.sh +++ b/hack/generate-resources.sh @@ -2,7 +2,10 @@ # Generate ClusterStack and Cluster YAML resources for testing. # # Usage: -# ./hack/generate-resources.sh --version 1.34 [options] +# ./hack/generate-resources.sh [stack-dir] --version 1.34 [options] +# +# If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +# (default: providers/openstack/scs2). # # Options: # --version K8s minor version (required) @@ -44,8 +47,12 @@ while [[ $# -gt 0 ]]; do esac done -if [[ -z "$STACK_DIR" || -z "$K8S_VERSION" ]]; then - echo "Usage: $0 --version X.Y [--namespace ns] [--cluster-name name]" >&2 +if [[ -z "$STACK_DIR" ]]; then + STACK_DIR="providers/${PROVIDER:-openstack}/${CLUSTER_STACK:-scs2}" +fi + +if [[ -z "$K8S_VERSION" ]]; then + echo "Usage: $0 [stack-dir] --version X.Y [--namespace ns] [--cluster-name name]" >&2 exit 1 fi @@ -59,7 +66,7 @@ fi # ============================================ PROVIDER=$(yq '.config.provider.type' "$STACK_DIR/csctl.yaml") -STACK_NAME=$(yq '.config.clusterStackName' "$STACK_DIR/csctl.yaml") +CLUSTER_STACK=$(yq '.config.clusterStackName' "$STACK_DIR/csctl.yaml") K8S_DASH="${K8S_VERSION//./-}" # Resolve full K8s version from versions.yaml if available @@ -72,7 +79,7 @@ fi # Try to find the latest CS version from OCI registry CS_VERSION="v1" if [[ -n "${OCI_REGISTRY:-}" && -n "${OCI_REPOSITORY:-}" ]] && command -v oras >/dev/null 2>&1; then - TAG_PREFIX="${PROVIDER}-${STACK_NAME}-${K8S_DASH}" + TAG_PREFIX="${PROVIDER}-${CLUSTER_STACK}-${K8S_DASH}" LATEST=$(oras repo tags "${OCI_REGISTRY}/${OCI_REPOSITORY}" 2>/dev/null | \ grep -oP "^${TAG_PREFIX}-v\K[0-9]+" | sort -n | tail -1 || echo "") [[ -n "$LATEST" ]] && CS_VERSION="v${LATEST}" @@ -92,7 +99,7 @@ metadata: namespace: ${NAMESPACE} spec: provider: ${PROVIDER} - name: ${STACK_NAME} + name: ${CLUSTER_STACK} kubernetesVersion: "${K8S_VERSION}" channel: custom autoSubscribe: false @@ -106,7 +113,7 @@ fi # ============================================ if [[ "$CLUSTERSTACK_ONLY" != "true" ]]; then - CLUSTER_CLASS="${PROVIDER}-${STACK_NAME}-${K8S_DASH}-${CS_VERSION}" + CLUSTER_CLASS="${PROVIDER}-${CLUSTER_STACK}-${K8S_DASH}-${CS_VERSION}" cat < flavor (+ worker override) +# controlPlaneRootDisk/workerRootDisk -> rootDisk (+ worker override) +# controlPlaneServerGroupID/workerServerGroupID -> serverGroupID (+ worker override) +# workerAdditionalBlockDevices -> additionalBlockDevices +# workerSecurityGroups -> securityGroups (merged) +# workerSecurityGroupIDs -> securityGroupIDs (merged) +# apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs -> apiServerAllowedCIDRs +# +# Usage: +# ./hack/migrate-cluster.sh +# ./hack/migrate-cluster.sh --in-place +# +# Without --in-place, the transformed YAML is written to stdout. + +set -euo pipefail + +if [[ $# -lt 1 ]]; then + echo "Usage: $0 [--in-place]" >&2 + exit 1 +fi + +INPUT="$1" +IN_PLACE=false +[[ "${2:-}" == "--in-place" ]] && IN_PLACE=true + +if [[ ! -f "$INPUT" ]]; then + echo "File not found: $INPUT" >&2 + exit 1 +fi + +if ! command -v yq &>/dev/null; then + echo "yq is required but not found" >&2 + exit 1 +fi + +# Work on a temp copy +TEMP=$(mktemp) +cp "$INPUT" "$TEMP" + +# Helper: get a variable value by name from topology.variables +get_var() { + yq -r ".spec.topology.variables[] | select(.name == \"$1\") | .value" "$TEMP" 2>/dev/null +} + +# Helper: check if a variable exists +has_var() { + local val + val=$(yq -r ".spec.topology.variables[] | select(.name == \"$1\") | .name" "$TEMP" 2>/dev/null) + [[ -n "$val" ]] +} + +# Helper: delete a variable by name +del_var() { + yq -i "del(.spec.topology.variables[] | select(.name == \"$1\"))" "$TEMP" +} + +# Helper: set a variable (add or update) +set_var() { + local name="$1" value="$2" + if has_var "$name"; then + yq -i "(.spec.topology.variables[] | select(.name == \"$name\")).value = $value" "$TEMP" + else + yq -i ".spec.topology.variables += [{\"name\": \"$name\", \"value\": $value}]" "$TEMP" + fi +} + +# Helper: add a worker override +add_worker_override() { + local name="$1" value="$2" + # Ensure overrides array exists on first machineDeployment + yq -i '.spec.topology.workers.machineDeployments[0].variables.overrides //= []' "$TEMP" + yq -i ".spec.topology.workers.machineDeployments[0].variables.overrides += [{\"name\": \"$name\", \"value\": $value}]" "$TEMP" +} + +CHANGES=0 + +# ============================================ +# Rename simple variables +# ============================================ + +if has_var "apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs"; then + val=$(get_var "apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs") + del_var "apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs" + set_var "apiServerAllowedCIDRs" "$val" + echo "Renamed: apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs -> apiServerAllowedCIDRs" >&2 + CHANGES=$((CHANGES + 1)) +fi + +# ============================================ +# Unify CP/Worker split variables +# ============================================ + +for pair in \ + "controlPlaneFlavor:workerFlavor:flavor" \ + "controlPlaneRootDisk:workerRootDisk:rootDisk" \ + "controlPlaneServerGroupID:workerServerGroupID:serverGroupID"; do + + cp_var="${pair%%:*}" + rest="${pair#*:}" + worker_var="${rest%%:*}" + new_var="${rest#*:}" + + cp_val="" + worker_val="" + has_cp=false + has_worker=false + + if has_var "$cp_var"; then + cp_val=$(get_var "$cp_var") + has_cp=true + fi + if has_var "$worker_var"; then + worker_val=$(get_var "$worker_var") + has_worker=true + fi + + if $has_cp || $has_worker; then + # Use CP value as the cluster-level default, or worker if CP not set + if $has_cp; then + set_var "$new_var" "\"$cp_val\"" + del_var "$cp_var" + fi + + if $has_worker; then + if $has_cp && [[ "$cp_val" != "$worker_val" ]]; then + # Different values: CP goes to cluster level, worker to override + add_worker_override "$new_var" "\"$worker_val\"" + elif ! $has_cp; then + # Only worker set: use as cluster level + set_var "$new_var" "\"$worker_val\"" + fi + del_var "$worker_var" + fi + + echo "Unified: $cp_var + $worker_var -> $new_var" >&2 + CHANGES=$((CHANGES + 1)) + fi +done + +# ============================================ +# Rename worker-only variables to unified +# ============================================ + +if has_var "workerAdditionalBlockDevices"; then + val=$(yq '.spec.topology.variables[] | select(.name == "workerAdditionalBlockDevices") | .value' "$TEMP") + del_var "workerAdditionalBlockDevices" + set_var "additionalBlockDevices" "$val" + echo "Renamed: workerAdditionalBlockDevices -> additionalBlockDevices" >&2 + CHANGES=$((CHANGES + 1)) +fi + +# Worker-specific security groups are removed — users should use the unified securityGroups/securityGroupIDs +for old_var in "workerSecurityGroups" "workerSecurityGroupIDs"; do + if has_var "$old_var"; then + echo "WARNING: $old_var is no longer supported. Use securityGroups/securityGroupIDs instead." >&2 + echo " The value has been removed. Please review manually." >&2 + del_var "$old_var" + CHANGES=$((CHANGES + 1)) + fi +done + +# ============================================ +# Output +# ============================================ + +if [[ $CHANGES -eq 0 ]]; then + echo "No migrations needed" >&2 + rm -f "$TEMP" + exit 0 +fi + +echo "Applied $CHANGES migration(s)" >&2 + +if $IN_PLACE; then + cp "$TEMP" "$INPUT" + echo "Updated: $INPUT" >&2 +else + cat "$TEMP" +fi + +rm -f "$TEMP" diff --git a/hack/show-matrix.sh b/hack/show-matrix.sh index 341535c1..8bb71c15 100755 --- a/hack/show-matrix.sh +++ b/hack/show-matrix.sh @@ -4,15 +4,20 @@ # Shows K8s versions, cluster-stack versions (from OCI), and all addon versions. # # Usage: -# ./hack/show-matrix.sh +# ./hack/show-matrix.sh [stack-dir] +# +# If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +# (default: providers/openstack/scs2). # # Environment: +# PROVIDER Provider name (default: openstack) +# CLUSTER_STACK Cluster stack name (default: scs2) # OCI_REGISTRY OCI registry to query for CS versions (optional) # OCI_REPOSITORY OCI repository to query for CS versions (optional) set -euo pipefail -STACK_DIR="${1:?Usage: $0 }" +STACK_DIR="${1:-providers/${PROVIDER:-openstack}/${CLUSTER_STACK:-scs2}}" if [[ ! -f "$STACK_DIR/csctl.yaml" ]]; then echo "csctl.yaml not found in: $STACK_DIR" >&2 @@ -20,7 +25,7 @@ if [[ ! -f "$STACK_DIR/csctl.yaml" ]]; then fi PROVIDER=$(yq '.config.provider.type' "$STACK_DIR/csctl.yaml") -STACK_NAME=$(yq '.config.clusterStackName' "$STACK_DIR/csctl.yaml") +CLUSTER_STACK=$(yq '.config.clusterStackName' "$STACK_DIR/csctl.yaml") # ============================================ # Collect universal addon versions (from Chart.yaml) @@ -44,7 +49,7 @@ done # Build the matrix # ============================================ -echo "Cluster Stack: ${PROVIDER}/${STACK_NAME}" +echo "Cluster Stack: ${PROVIDER}/${CLUSTER_STACK}" echo "" if [[ ! -f "$STACK_DIR/versions.yaml" ]]; then @@ -98,7 +103,7 @@ for ((i=0; i/dev/null 2>&1; then - TAG_PREFIX="${PROVIDER}-${STACK_NAME}-${K8S_DASH}" + TAG_PREFIX="${PROVIDER}-${CLUSTER_STACK}-${K8S_DASH}" LATEST=$(oras repo tags "${OCI_REGISTRY}/${OCI_REPOSITORY}" 2>/dev/null | \ grep -oP "^${TAG_PREFIX}-v\K[0-9]+" | sort -n | tail -1 || echo "") [[ -n "$LATEST" ]] && CS_VERSION="v${LATEST}" diff --git a/hack/update-addons.sh b/hack/update-addons.sh index 53b510bf..4854c09c 100755 --- a/hack/update-addons.sh +++ b/hack/update-addons.sh @@ -7,14 +7,19 @@ # K8s-tied addon list from versions.yaml keys. Works for any provider/stack. # # Usage: -# ./hack/update-addons.sh -# ./hack/update-addons.sh --k8s-version 1.34 -# ./hack/update-addons.sh --yes # auto-approve all updates +# ./hack/update-addons.sh [stack-dir] [options] +# +# If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +# (default: providers/openstack/scs2). +# +# Options: +# --k8s-version X.Y Target K8s minor for version-tied chart filtering +# --yes / -y Auto-approve all updates # # Examples: # ./hack/update-addons.sh providers/openstack/scs2 -# ./hack/update-addons.sh providers/openstack/scs2 --yes -# ./hack/update-addons.sh providers/docker/scs +# ./hack/update-addons.sh --yes +# PROVIDER=docker CLUSTER_STACK=scs ./hack/update-addons.sh set -euo pipefail @@ -42,8 +47,7 @@ while [[ $# -gt 0 ]]; do done if [[ -z "$STACK_DIR" ]]; then - echo "Usage: $0 [--k8s-version X.Y] [--yes]" - exit 1 + STACK_DIR="providers/${PROVIDER:-openstack}/${CLUSTER_STACK:-scs2}" fi if [[ ! -d "$STACK_DIR/cluster-addon" ]]; then diff --git a/hack/update-versions.sh b/hack/update-versions.sh index f73cf3c1..e55a6640 100755 --- a/hack/update-versions.sh +++ b/hack/update-versions.sh @@ -8,7 +8,10 @@ set -euo pipefail # image mappings, adds new minor versions and removes EOL ones. # # Usage: -# ./hack/update-versions.sh [--check|--apply] [--supported-minors N] +# ./hack/update-versions.sh [stack-dir] [--check|--apply] [--supported-minors N] +# +# If is omitted, it is derived from $PROVIDER and $CLUSTER_STACK +# (default: providers/openstack/scs2). # # Options: # --check Show available updates without modifying files (default) @@ -16,13 +19,15 @@ set -euo pipefail # --supported-minors N Keep the N most recent K8s minor versions (default: 4) # # Environment: +# PROVIDER Provider name (default: openstack) +# CLUSTER_STACK Cluster stack name (default: scs2) # GITHUB_TOKEN Optional. GitHub personal access token for higher API rate limits. # Without token: 60 requests/hour. With token: 5000 requests/hour. # # Examples: -# ./hack/update-versions.sh providers/openstack/scs2 --check +# ./hack/update-versions.sh --check # ./hack/update-versions.sh providers/openstack/scs2 --apply -# GITHUB_TOKEN=ghp_xxx ./hack/update-versions.sh providers/openstack/scs2 --apply +# PROVIDER=docker CLUSTER_STACK=scs ./hack/update-versions.sh --apply readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" @@ -162,31 +167,35 @@ detect_addons() { # --- Main logic --- parse_args() { - if [[ $# -lt 1 ]]; then - usage - fi - - STACK_DIR="$1" - shift - - # Resolve relative path - if [[ ! "$STACK_DIR" = /* ]]; then - STACK_DIR="$REPO_ROOT/$STACK_DIR" - fi + STACK_DIR="" while [[ $# -gt 0 ]]; do case "$1" in - --check) MODE="check" ;; - --apply) MODE="apply" ;; + --check) MODE="check"; shift ;; + --apply) MODE="apply"; shift ;; --supported-minors) - shift - SUPPORTED_MINORS="$1" + SUPPORTED_MINORS="$2"; shift 2 ;; -h|--help) usage ;; - *) echo "Unknown option: $1" >&2; usage ;; + -*) echo "Unknown option: $1" >&2; usage ;; + *) + if [[ -z "$STACK_DIR" ]]; then + STACK_DIR="$1"; shift + else + echo "Unexpected argument: $1" >&2; usage + fi + ;; esac - shift done + + if [[ -z "$STACK_DIR" ]]; then + STACK_DIR="providers/${PROVIDER:-openstack}/${CLUSTER_STACK:-scs2}" + fi + + # Resolve relative path + if [[ ! "$STACK_DIR" = /* ]]; then + STACK_DIR="$REPO_ROOT/$STACK_DIR" + fi } main() { diff --git a/justfile b/justfile index 9a298019..2aa62e7f 100644 --- a/justfile +++ b/justfile @@ -79,17 +79,9 @@ update-versions-all *FLAGS: # Resource Generation # ============================================ -# Generate both ClusterStack + Cluster YAML (e.g., just generate-resources 1.34) -generate-resources version *FLAGS: - ./hack/generate-resources.sh --version {{version}} {{FLAGS}} - -# Generate only the ClusterStack resource (e.g., just generate-clusterstack 1.34) -generate-clusterstack version *FLAGS: - ./hack/generate-resources.sh --version {{version}} --clusterstack-only {{FLAGS}} - -# Generate only the Cluster resource (e.g., just generate-cluster 1.34) -generate-cluster version *FLAGS: - ./hack/generate-resources.sh --version {{version}} --cluster-only {{FLAGS}} +# Generate ClusterStack + Cluster YAML for testing (e.g., just generate-resources 1.34) +generate-resources version: + ./hack/generate-resources.sh --version {{version}} # Generate OpenStack Image CRD manifests generate-image-manifests: @@ -128,7 +120,7 @@ container-build: echo "Building {{CONTAINER_IMAGE}} with $runtime..." $runtime build -t {{CONTAINER_IMAGE}} -f Containerfile . -# Run any just recipe inside the container (e.g., just container-run build-all) +# Run any just recipe inside the container (e.g., just container run build-all) container-run *ARGS: #!/usr/bin/env bash set -euo pipefail diff --git a/providers/docker/scs/cluster-class/values.yaml b/providers/docker/scs/cluster-class/values.yaml index a55ba734..96c7ac41 100644 --- a/providers/docker/scs/cluster-class/values.yaml +++ b/providers/docker/scs/cluster-class/values.yaml @@ -3,3 +3,8 @@ images: - name: registry.scs.community/docker.io/kindest/node:v1.30.10 worker: - name: registry.scs.community/docker.io/kindest/node:v1.30.10 + +# ClusterClass variable defaults +# These are referenced by the ClusterClass template and can be overridden per deployment. +variables: + imageRepository: "" diff --git a/providers/docker/scs/versions.yaml b/providers/docker/scs/versions.yaml new file mode 100644 index 00000000..755636fe --- /dev/null +++ b/providers/docker/scs/versions.yaml @@ -0,0 +1,4 @@ +- kubernetes: 1.32.11 +- kubernetes: 1.33.7 +- kubernetes: 1.34.3 +- kubernetes: 1.35.1 diff --git a/providers/docker/scs2/cluster-addon/cni/Chart.yaml b/providers/docker/scs2/cluster-addon/cni/Chart.yaml new file mode 100644 index 00000000..ac865907 --- /dev/null +++ b/providers/docker/scs2/cluster-addon/cni/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: cni +description: Cilium CNI for Docker SCS2 cluster stack +type: application +version: 0.1.0 +dependencies: + - name: cilium + version: 1.19.0 + repository: https://helm.cilium.io/ diff --git a/providers/docker/scs2/cluster-addon/cni/values.yaml b/providers/docker/scs2/cluster-addon/cni/values.yaml new file mode 100644 index 00000000..8a312f0c --- /dev/null +++ b/providers/docker/scs2/cluster-addon/cni/values.yaml @@ -0,0 +1,14 @@ +cilium: + namespaceOverride: kube-system + tls: + secretsNamespace: + name: "kube-system" + sessionAffinity: true + sctp: + enabled: true + ipam: + mode: "kubernetes" + gatewayAPI: + enabled: true + secretsNamespace: + name: "kube-system" diff --git a/providers/docker/scs2/cluster-addon/metrics-server/Chart.yaml b/providers/docker/scs2/cluster-addon/metrics-server/Chart.yaml new file mode 100644 index 00000000..764d3b7c --- /dev/null +++ b/providers/docker/scs2/cluster-addon/metrics-server/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: metrics-server +description: Metrics Server for Docker SCS2 cluster stack +type: application +version: 0.1.0 +dependencies: + - name: metrics-server + version: "3.13.0" + repository: https://kubernetes-sigs.github.io/metrics-server/ diff --git a/providers/docker/scs2/cluster-addon/metrics-server/overwrite.yaml b/providers/docker/scs2/cluster-addon/metrics-server/overwrite.yaml new file mode 100644 index 00000000..7b1dcd5b --- /dev/null +++ b/providers/docker/scs2/cluster-addon/metrics-server/overwrite.yaml @@ -0,0 +1,4 @@ +values: | + metrics-server: + commonLabels: + domain: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" diff --git a/providers/docker/scs2/cluster-addon/metrics-server/values.yaml b/providers/docker/scs2/cluster-addon/metrics-server/values.yaml new file mode 100644 index 00000000..a89bf027 --- /dev/null +++ b/providers/docker/scs2/cluster-addon/metrics-server/values.yaml @@ -0,0 +1,4 @@ +metrics-server: + fullnameOverride: metrics-server + args: + - --kubelet-insecure-tls diff --git a/providers/docker/scs2/cluster-class/.helmignore b/providers/docker/scs2/cluster-class/.helmignore new file mode 100644 index 00000000..ecf9bcc2 --- /dev/null +++ b/providers/docker/scs2/cluster-class/.helmignore @@ -0,0 +1,5 @@ +.git +.gitignore +*.swp +*.bak +*.tmp diff --git a/providers/docker/scs2/cluster-class/Chart.yaml b/providers/docker/scs2/cluster-class/Chart.yaml new file mode 100644 index 00000000..845adb82 --- /dev/null +++ b/providers/docker/scs2/cluster-class/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: docker-scs2-cluster-class +description: | + SCS2 Cluster Class for the Docker infrastructure provider. + Uses CAPI v1beta2 for core/kubeadm resources, v1beta1 for Docker infra resources. +type: application +version: v1 diff --git a/providers/docker/scs2/cluster-class/templates/_helpers.tpl b/providers/docker/scs2/cluster-class/templates/_helpers.tpl new file mode 100644 index 00000000..449a5720 --- /dev/null +++ b/providers/docker/scs2/cluster-class/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "cluster-class.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cluster-class.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cluster-class.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "cluster-class.labels" -}} +helm.sh/chart: {{ include "cluster-class.chart" . }} +{{ include "cluster-class.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cluster-class.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cluster-class.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/providers/docker/scs2/cluster-class/templates/cluster-class.yaml b/providers/docker/scs2/cluster-class/templates/cluster-class.yaml new file mode 100644 index 00000000..a848c31a --- /dev/null +++ b/providers/docker/scs2/cluster-class/templates/cluster-class.yaml @@ -0,0 +1,314 @@ +apiVersion: cluster.x-k8s.io/v1beta2 +kind: ClusterClass +metadata: + name: {{ .Release.Name }}-{{ .Chart.Version }} +spec: + controlPlane: + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + name: {{ .Release.Name }}-{{ .Chart.Version }}-control-plane + machineInfrastructure: + templateRef: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker + infrastructure: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: {{ .Release.Name }}-{{ .Chart.Version }}-cluster + workers: + machineDeployments: + - class: default-worker + bootstrap: + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: KubeadmConfigTemplate + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker + infrastructure: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker + + # + # Variables + # + variables: + - name: imageRepository + required: false + schema: + openAPIV3Schema: + type: string + default: {{ .Values.variables.imageRepository | quote }} + example: "registry.k8s.io" + description: "imageRepository sets the container registry to pull images from. If empty, the kubeadm default will be used." + - name: certSANs + required: false + schema: + openAPIV3Schema: + type: array + default: {{ .Values.variables.certSANs | toJson }} + example: ["mydomain.example"] + description: "certSANs sets extra Subject Alternative Names for the API Server signing cert." + items: + type: string + - name: oidcConfig + required: false + schema: + openAPIV3Schema: + type: object + properties: + clientID: + type: string + example: "kubectl" + description: "A client id that all tokens must be issued for." + issuerURL: + type: string + example: "https://dex.k8s.scs.community" + description: >- + URL of the provider that allows the API server to discover public signing keys. + Only URLs that use the https:// scheme are accepted. + usernameClaim: + type: string + example: "preferred_username" + default: {{ .Values.variables.oidcConfig.usernameClaim | quote }} + description: "JWT claim to use as the user name." + groupsClaim: + type: string + example: "groups" + default: {{ .Values.variables.oidcConfig.groupsClaim | quote }} + description: "JWT claim to use as the user's group. If the claim is present it must be an array of strings." + usernamePrefix: + type: string + example: "oidc:" + default: {{ .Values.variables.oidcConfig.usernamePrefix | quote }} + description: "Prefix prepended to username claims to prevent clashes with existing names." + groupsPrefix: + type: string + example: "oidc:" + default: {{ .Values.variables.oidcConfig.groupsPrefix | quote }} + description: "Prefix prepended to group claims to prevent clashes with existing names." + - name: registryMirrors + required: false + schema: + openAPIV3Schema: + type: array + default: {{ .Values.variables.registryMirrors | toJson }} + description: "Registry mirrors for upstream container registries. Configures both containerd and CRI-O to pull through a mirror." + items: + type: object + properties: + hostnameUpstream: + type: string + example: "docker.io" + description: "The hostname of the upstream registry." + urlUpstream: + type: string + example: "https://registry-1.docker.io" + description: "The server URL of the upstream registry." + urlMirror: + type: string + example: "https://registry.example.com/v2/dockerhub" + description: "The URL of the mirror registry." + certMirror: + type: string + example: "" + description: "TLS certificate of the mirror in PEM format (optional)." + + # + # Patches + # + patches: + - name: imageRepository + description: "Sets the imageRepository used for the KubeadmControlPlane." + enabledIf: {{ `'{{ ne .imageRepository "" }}'` }} + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/imageRepository" + valueFrom: + variable: imageRepository + - name: customImage + description: "Sets the container image that is used for running dockerMachines for the control plane." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + value: {{ (index .Values.images.controlPlane 0).name }} + - name: workerImage + description: "Sets the container image that is used for running dockerMachines for the worker machineDeployments." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + value: {{ (index .Values.images.worker 0).name }} + - name: certSANs + description: "certSANs sets extra Subject Alternative Names for the API Server signing cert." + enabledIf: {{ `'{{ if .certSANs }}true{{end}}'` }} + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/certSANs" + valueFrom: + variable: certSANs + - name: oidcConfig + description: "Configure API Server to use external authentication service." + enabledIf: {{ `'{{ if and .oidcConfig .oidcConfig.clientID .oidcConfig.issuerURL }}true{{end}}'` }} + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + valueFrom: + template: | + name: oidc-client-id + value: {{ `'{{ .oidcConfig.clientID }}'` }} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + valueFrom: + template: | + name: oidc-issuer-url + value: {{ `'{{ .oidcConfig.issuerURL }}'` }} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + valueFrom: + template: | + name: oidc-username-claim + value: {{ `'{{ .oidcConfig.usernameClaim }}'` }} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + valueFrom: + template: | + name: oidc-groups-claim + value: {{ `'{{ .oidcConfig.groupsClaim }}'` }} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + valueFrom: + template: | + name: oidc-username-prefix + value: {{ `'{{ .oidcConfig.usernamePrefix }}'` }} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + valueFrom: + template: | + name: oidc-groups-prefix + value: {{ `'{{ .oidcConfig.groupsPrefix }}'` }} + # + # Registry mirror patches + # + - name: registryMirrorsControlPlane + description: "Configure registry mirrors on control plane nodes (containerd + CRI-O)." + enabledIf: {{ `'{{ if .registryMirrors }}true{{end}}'` }} + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/files" + valueFrom: + template: | + {{ `{{- range $r := .registryMirrors }}` }} + - content: | + server = "{{ `{{ $r.urlUpstream }}` }}" + [host."{{ `{{ $r.urlMirror }}` }}"] + capabilities = ["pull","resolve"] + override_path = true + owner: root:root + path: /etc/containerd/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/hosts.toml + permissions: "0644" + - content: | + [[registry]] + prefix = "{{ `{{ $r.hostnameUpstream }}` }}" + location = "{{ `{{ $r.hostnameUpstream }}` }}" + [[registry.mirror]] + location = "{{ `{{ $r.urlMirror }}` }}" + owner: root:root + path: /etc/containers/registries.conf.d/50-mirror-{{ `{{ $r.hostnameUpstream }}` }}.conf + permissions: "0644" + {{ `{{- if $r.certMirror }}` }} + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containerd/certs/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containers/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + {{ `{{- end }}` }} + {{ `{{- end }}` }} + - name: registryMirrorsWorker + description: "Configure registry mirrors on worker nodes (containerd + CRI-O)." + enabledIf: {{ `'{{ if .registryMirrors }}true{{end}}'` }} + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/files" + valueFrom: + template: | + {{ `{{- range $r := .registryMirrors }}` }} + - content: | + server = "{{ `{{ $r.urlUpstream }}` }}" + [host."{{ `{{ $r.urlMirror }}` }}"] + capabilities = ["pull","resolve"] + override_path = true + owner: root:root + path: /etc/containerd/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/hosts.toml + permissions: "0644" + - content: | + [[registry]] + prefix = "{{ `{{ $r.hostnameUpstream }}` }}" + location = "{{ `{{ $r.hostnameUpstream }}` }}" + [[registry.mirror]] + location = "{{ `{{ $r.urlMirror }}` }}" + owner: root:root + path: /etc/containers/registries.conf.d/50-mirror-{{ `{{ $r.hostnameUpstream }}` }}.conf + permissions: "0644" + {{ `{{- if $r.certMirror }}` }} + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containerd/certs/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containers/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + {{ `{{- end }}` }} + {{ `{{- end }}` }} diff --git a/providers/docker/scs2/cluster-class/templates/docker-cluster-template.yaml b/providers/docker/scs2/cluster-class/templates/docker-cluster-template.yaml new file mode 100644 index 00000000..ca51270d --- /dev/null +++ b/providers/docker/scs2/cluster-class/templates/docker-cluster-template.yaml @@ -0,0 +1,7 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: {{ .Release.Name }}-{{ .Chart.Version }}-cluster +spec: + template: + spec: {} diff --git a/providers/docker/scs2/cluster-class/templates/docker-machine-template.yaml b/providers/docker/scs2/cluster-class/templates/docker-machine-template.yaml new file mode 100644 index 00000000..372949f8 --- /dev/null +++ b/providers/docker/scs2/cluster-class/templates/docker-machine-template.yaml @@ -0,0 +1,10 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" diff --git a/providers/docker/scs2/cluster-class/templates/kubeadm-config-template-worker.yaml b/providers/docker/scs2/cluster-class/templates/kubeadm-config-template-worker.yaml new file mode 100644 index 00000000..8117fedc --- /dev/null +++ b/providers/docker/scs2/cluster-class/templates/kubeadm-config-template-worker.yaml @@ -0,0 +1,15 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 +kind: KubeadmConfigTemplate +metadata: + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + - name: eviction-hard + value: "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%" + - name: fail-swap-on + value: "false" diff --git a/providers/docker/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml b/providers/docker/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml new file mode 100644 index 00000000..c4ee9070 --- /dev/null +++ b/providers/docker/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml @@ -0,0 +1,99 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: KubeadmControlPlaneTemplate +metadata: + name: {{ .Release.Name }}-{{ .Chart.Version }}-control-plane +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + controllerManager: + extraArgs: + - name: bind-address + value: 0.0.0.0 + - name: secure-port + value: "10257" + - name: profiling + value: "false" + - name: terminated-pod-gc-threshold + value: "100" + scheduler: + extraArgs: + - name: bind-address + value: 0.0.0.0 + - name: secure-port + value: "10259" + - name: profiling + value: "false" + etcd: + local: + dataDir: /var/lib/etcd + extraArgs: + - name: listen-metrics-urls + value: http://0.0.0.0:2381 + - name: auto-compaction-mode + value: periodic + - name: auto-compaction-retention + value: 8h + - name: election-timeout + value: "2500" + - name: heartbeat-interval + value: "250" + - name: snapshot-count + value: "6400" + files: + - content: | + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + metricsBindAddress: "0.0.0.0:10249" + path: /etc/kube-proxy-config.yaml + - content: | + #!/usr/bin/env bash + set -o errexit + set -o nounset + set -o pipefail + + dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + readonly dir + + if [[ ! -f ${dir}/kube-proxy-config.yaml ]]; then + exit 0 + fi + + kubeadm_file="/etc/kubeadm.yml" + if [[ ! -f ${kubeadm_file} ]]; then + kubeadm_file="/run/kubeadm/kubeadm.yaml" + fi + + if [[ ! -f ${kubeadm_file} ]]; then + exit 0 + fi + + cat "${dir}/kube-proxy-config.yaml" >> "${kubeadm_file}" + rm "${dir}/kube-proxy-config.yaml" + + echo success > /tmp/kube-proxy-patch + owner: root:root + path: /etc/kube-proxy-patch.sh + permissions: "0755" + preKubeadmCommands: + - bash /etc/kube-proxy-patch.sh + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + - name: eviction-hard + value: "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%" + - name: fail-swap-on + value: "false" + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + - name: eviction-hard + value: "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%" + - name: fail-swap-on + value: "false" diff --git a/providers/docker/scs2/cluster-class/values.yaml b/providers/docker/scs2/cluster-class/values.yaml new file mode 100644 index 00000000..bf6e6ea1 --- /dev/null +++ b/providers/docker/scs2/cluster-class/values.yaml @@ -0,0 +1,20 @@ +# Node images for kind (patched at build time by build.sh for each K8s version) +images: + controlPlane: + - name: registry.scs.community/docker.io/kindest/node:v1.35.1 + worker: + - name: registry.scs.community/docker.io/kindest/node:v1.35.1 + +# ClusterClass variable defaults +# These are referenced by the ClusterClass template and can be overridden per deployment. +variables: + imageRepository: "" + certSANs: [] + registryMirrors: [] + oidcConfig: + clientID: "" + issuerURL: "" + usernameClaim: "preferred_username" + groupsClaim: "groups" + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" diff --git a/providers/docker/scs2/clusteraddon.yaml b/providers/docker/scs2/clusteraddon.yaml new file mode 100644 index 00000000..9a639968 --- /dev/null +++ b/providers/docker/scs2/clusteraddon.yaml @@ -0,0 +1,18 @@ +apiVersion: clusteraddonconfig.x-k8s.io/v1alpha1 +clusterAddonVersion: clusteraddons.clusterstack.x-k8s.io/v1alpha1 +addonStages: + AfterControlPlaneInitialized: + - name: cni + action: apply + - name: metrics-server + action: apply + BeforeClusterUpgrade: + - name: cni + action: apply + - name: metrics-server + action: apply + AfterClusterUpgrade: + - name: cni + action: apply + - name: metrics-server + action: apply diff --git a/providers/docker/scs2/csctl.yaml b/providers/docker/scs2/csctl.yaml new file mode 100644 index 00000000..09b837c5 --- /dev/null +++ b/providers/docker/scs2/csctl.yaml @@ -0,0 +1,7 @@ +apiVersion: csctl.clusterstack.x-k8s.io/v1alpha1 +config: + kubernetesVersion: v1.35.1 + clusterStackName: scs2 + provider: + type: docker + apiVersion: docker.csctl.clusterstack.x-k8s.io/v1alpha1 diff --git a/providers/docker/scs2/versions.yaml b/providers/docker/scs2/versions.yaml new file mode 100644 index 00000000..755636fe --- /dev/null +++ b/providers/docker/scs2/versions.yaml @@ -0,0 +1,4 @@ +- kubernetes: 1.32.11 +- kubernetes: 1.33.7 +- kubernetes: 1.34.3 +- kubernetes: 1.35.1 diff --git a/providers/openstack/scs/versions.yaml b/providers/openstack/scs/versions.yaml index 99fc8c73..2c0f3137 100644 --- a/providers/openstack/scs/versions.yaml +++ b/providers/openstack/scs/versions.yaml @@ -1,9 +1,12 @@ - kubernetes: 1.30.13 - cinder_csi: 2.30.3 - occm: 2.30.5 + ubuntu: "2204" + openstack-cinder-csi: 2.30.3 + openstack-cloud-controller-manager: 2.30.5 - kubernetes: 1.31.9 - cinder_csi: 2.31.7 - occm: 2.31.3 + ubuntu: "2204" + openstack-cinder-csi: 2.31.7 + openstack-cloud-controller-manager: 2.31.3 - kubernetes: 1.32.5 - cinder_csi: 2.32.0 - occm: 2.32.0 + ubuntu: "2204" + openstack-cinder-csi: 2.32.0 + openstack-cloud-controller-manager: 2.32.0 diff --git a/providers/openstack/scs2/cluster-addon/ccm/Chart.yaml b/providers/openstack/scs2/cluster-addon/ccm/Chart.yaml index 7fddcbd4..bd086402 100644 --- a/providers/openstack/scs2/cluster-addon/ccm/Chart.yaml +++ b/providers/openstack/scs2/cluster-addon/ccm/Chart.yaml @@ -7,4 +7,4 @@ dependencies: - alias: openstack-cloud-controller-manager name: openstack-cloud-controller-manager repository: https://kubernetes.github.io/cloud-provider-openstack - version: 2.34.1 + version: 2.34.2 diff --git a/providers/openstack/scs2/cluster-addon/cni/Chart.yaml b/providers/openstack/scs2/cluster-addon/cni/Chart.yaml index 7bc28cb7..4935bb4a 100644 --- a/providers/openstack/scs2/cluster-addon/cni/Chart.yaml +++ b/providers/openstack/scs2/cluster-addon/cni/Chart.yaml @@ -7,4 +7,4 @@ dependencies: - alias: cilium name: cilium repository: https://helm.cilium.io/ - version: 1.18.5 + version: 1.19.0 diff --git a/providers/openstack/scs2/cluster-addon/csi/Chart.yaml b/providers/openstack/scs2/cluster-addon/csi/Chart.yaml index dd303380..cbe0cc17 100644 --- a/providers/openstack/scs2/cluster-addon/csi/Chart.yaml +++ b/providers/openstack/scs2/cluster-addon/csi/Chart.yaml @@ -7,4 +7,4 @@ dependencies: - alias: openstack-cinder-csi name: openstack-cinder-csi repository: https://kubernetes.github.io/cloud-provider-openstack - version: 2.34.1 + version: 2.34.3 diff --git a/providers/openstack/scs2/cluster-class/templates/cluster-class.yaml b/providers/openstack/scs2/cluster-class/templates/cluster-class.yaml index d7fbd338..c9131e4c 100644 --- a/providers/openstack/scs2/cluster-class/templates/cluster-class.yaml +++ b/providers/openstack/scs2/cluster-class/templates/cluster-class.yaml @@ -1,37 +1,36 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: {{ .Release.Name }}-{{ .Chart.Version }} spec: controlPlane: - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: {{ .Release.Name }}-{{ .Chart.Version }}-control-plane machineInfrastructure: - ref: + templateRef: kind: OpenStackMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 name: {{ .Release.Name }}-{{ .Chart.Version }}-control-plane infrastructure: - ref: + templateRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackClusterTemplate name: {{ .Release.Name }}-{{ .Chart.Version }}-cluster workers: machineDeployments: - class: default-worker - template: - bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker - infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker + bootstrap: + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: KubeadmConfigTemplate + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker + infrastructure: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: OpenStackMachineTemplate + name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker variables: # Image variables - name: imageName @@ -44,7 +43,7 @@ spec: If `imageIsOrc` is enabled, this name refers to an ORC image resource. If `imageIsOrc` is disabled, the name is used to filter images available in the OpenStack project. In this case, the specified image must already exist within the project. If `imageAddVersion` is enabled, the Kubernetes version will be appended to form the complete image name (e.g., imageName-v1.32.5) - default: "ubuntu-capi-image" + default: {{ .Values.variables.imageName | quote }} - name: imageIsOrc required: false schema: @@ -54,7 +53,7 @@ spec: Indicates whether the image name refers to an ORC image resource. If set to true (default), the `imageName` is interpreted as a reference to an ORC image. If set to false, the `imageName` is used to filter images in the OpenStack project instead. - default: false + default: {{ .Values.variables.imageIsOrc }} - name: imageAddVersion required: false schema: @@ -62,15 +61,7 @@ spec: type: boolean description: | Add a suffix with the Kubernetes version to the imageName. E.g. imageName-v1.32.5. - default: true - - name: disableAPIServerFloatingIP - required: false - schema: - openAPIV3Schema: - type: boolean - default: false - example: false - description: "DisableAPIServerFloatingIP controls whether a floating IP should be attached to the API server." + default: {{ .Values.variables.imageAddVersion }} # Network variables - name: networkExternalID required: false @@ -79,21 +70,23 @@ spec: type: string example: "ebfe5546-f09f-4f42-ab54-094e457d42ec" format: "uuid4" - description: "networkExternalID is the ID of an external OpenStack Network. This is necessary to get public internet to the VMs in case there are several external networks." + description: |- + ID of an external OpenStack network. Required when multiple + external networks exist and VMs need public internet access. - name: networkMTU required: false schema: openAPIV3Schema: type: integer example: 1500 - description: "networkMTU sets the maximum transmission unit (MTU) value to address fragmentation for the private network ID." + description: "MTU for the private cluster network. Set this to avoid fragmentation issues." - name: dnsNameservers required: false schema: openAPIV3Schema: type: array - description: "dnsNameservers is the list of nameservers for the OpenStack Subnet being created. Set this value when you need to create a new network/subnet which requires access to DNS." - default: ["9.9.9.9", "149.112.112.112"] + description: "DNS nameservers for the cluster subnet. Only used when a new network/subnet is created." + default: {{ .Values.variables.dnsNameservers | toJson }} example: ["9.9.9.9", "149.112.112.112"] items: type: string @@ -103,97 +96,53 @@ spec: openAPIV3Schema: type: string format: "cidr" - default: "10.8.0.0/20" + default: {{ .Values.variables.nodeCIDR | quote }} example: "10.8.0.0/20" description: |- - nodeCIDR is the OpenStack Subnet to be created. - Cluster actuator will create a network, a subnet with nodeCIDR, - and a router connected to this subnet. - If you leave this empty, no network will be created. - # Control plane - - name: controlPlaneFlavor + CIDR for the cluster subnet. CAPO will create a network, subnet, + and router. Leave empty to skip network creation. + # Machine configuration + # These apply to all nodes by default. Use topology.controlPlane.variables.overrides + # or topology.workers.machineDeployments[].variables.overrides to differentiate. + - name: flavor required: false schema: openAPIV3Schema: type: string - default: "SCS-2V-4" - example: "SCS-2V-4-20s" + default: {{ .Values.variables.flavor | quote }} + example: "SCS-2V-4" description: |- - OpenStack instance flavor for control plane nodes. - (Default: SCS-2V-4, replace by SCS-2V-4-20s or specify a controlPlaneRootDisk.) - - name: controlPlaneRootDisk + OpenStack instance flavor. Applies to all nodes by default. + Override per control plane or worker via topology variables overrides. + - name: rootDisk required: false schema: openAPIV3Schema: type: integer minimum: 0 - example: 25 - default: 50 + example: 50 + default: {{ .Values.variables.rootDisk }} description: |- - Root disk size in GiB for control-plane nodes. - OpenStack volume will be created and used instead of an ephemeral disk defined in flavor. - Should only be used for the diskless flavors (>= 20), otherwise set to 0. - - name: controlPlaneServerGroupID + Root disk size in GiB. OpenStack volume will be created and used + instead of an ephemeral disk defined in the flavor. + Set to 0 to use the flavor's ephemeral disk. + - name: serverGroupID required: false schema: openAPIV3Schema: type: string - default: "" + default: {{ .Values.variables.serverGroupID | quote }} example: "3adf4e92-bb33-4e44-8ad3-afda9dfe8ec3" - description: "The server group to assign the control plane nodes to (can be used for anti-affinity)." - - name: controlPlaneAvailabilityZones + description: "Server group for anti-affinity placement. Override per CP/worker via topology." + - name: additionalBlockDevices required: false schema: openAPIV3Schema: type: array - example: ["nova"] - description: "controlPlaneAvailabilityZones is the set of availability zones which control plane machines may be deployed to." - items: - type: string - - name: controlPlaneOmitAvailabilityZone - required: false - schema: - openAPIV3Schema: - type: boolean - example: true + default: {{ .Values.variables.additionalBlockDevices | toJson }} description: |- - controlPlaneOmitAvailabilityZone causes availability zone to be omitted when creating control plane nodes, - allowing the Nova scheduler to make a decision on which availability zone to use based on other scheduling constraints. - # Workers - - name: workerFlavor - required: false - schema: - openAPIV3Schema: - type: string - default: "SCS-4V-8" - example: "SCS-4V-8" - description: "OpenStack instance flavor for worker nodes (default: SCS-4V-8, which requires workerRootDisk)." - - name: workerRootDisk - required: false - schema: - openAPIV3Schema: - type: integer - minimum: 0 - example: 25 - default: 50 - description: |- - Root disk size in GiB for worker nodes. - OpenStack volume will be created and used instead of an ephemeral disk defined in flavor. - Should be used for the diskless flavors (>= 20), otherwise set to 0. - - name: workerServerGroupID - required: false - schema: - openAPIV3Schema: - type: string - default: "" - example: "869fe071-1e56-46a9-9166-47c9f228e297" - description: "The server group to assign the worker nodes to." - - name: workerAdditionalBlockDevices - required: false - schema: - openAPIV3Schema: - type: array - default: [] + Additional block devices (Cinder volumes) to attach to nodes. + Override per CP/worker via topology. items: type: object properties: @@ -205,60 +154,59 @@ spec: type: type: string default: "__DEFAULT__" - required: ["name"] - # Access management - - name: sshKeyName + required: ["name"] + # Cluster-level (control plane only, managed by OpenStackClusterTemplate) + - name: controlPlaneAvailabilityZones required: false schema: openAPIV3Schema: - type: string - default: "" - example: "capi-keypair" - description: "The ssh key name to inject in the nodes (for debugging)." - - name: securityGroups + type: array + default: {{ .Values.variables.controlPlaneAvailabilityZones | toJson }} + example: ["nova"] + description: "Availability zones for control plane nodes (OpenStack cluster-level setting)." + items: + type: string + - name: controlPlaneOmitAvailabilityZone required: false schema: openAPIV3Schema: - type: array - default: [] - example: ["security-group-1"] + type: boolean + default: {{ .Values.variables.controlPlaneOmitAvailabilityZone }} description: |- - The names of extra security groups to assign to worker and control plane nodes. - Will be ignored if `securityGroupIDs` is used. - items: - type: string - - name: securityGroupIDs + Omit availability zone when creating control plane nodes, letting the + Nova scheduler decide based on other scheduling constraints. + # Access management + - name: sshKeyName required: false schema: openAPIV3Schema: - format: "uuid4" - type: array - default: [] - example: ["9ae2f488-30a3-4629-bd51-07acb8eb4278"] - description: "The UUIDs of extra security groups to assign to worker and control plane nodes" - items: - type: string - - name: workerSecurityGroups + type: string + default: {{ .Values.variables.sshKeyName | quote }} + example: "capi-keypair" + description: "SSH key to inject into all nodes (for debugging)." + - name: securityGroups required: false schema: openAPIV3Schema: type: array - default: [] + default: {{ .Values.variables.securityGroups | toJson }} example: ["security-group-1"] description: |- - The names of extra security groups to assign to the worker nodes. - Will be ignored if `workerSecurityGroupIDs` is used. + Extra security groups by name for all nodes. + Ignored if securityGroupIDs is set. Override per CP/worker via topology. items: type: string - - name: workerSecurityGroupIDs + - name: securityGroupIDs required: false schema: openAPIV3Schema: format: "uuid4" type: array - default: [] + default: {{ .Values.variables.securityGroupIDs | toJson }} example: ["9ae2f488-30a3-4629-bd51-07acb8eb4278"] - description: "The UUIDs of extra security groups to assign to the worker nodes" + description: |- + Extra security groups by UUID for all nodes. + Takes precedence over securityGroups. Override per CP/worker via topology. items: type: string - name: identityRef @@ -266,27 +214,34 @@ spec: schema: openAPIV3Schema: type: object - default: {} + default: {{ .Values.variables.identityRef | toJson }} properties: name: type: string example: "openstack" - default: "openstack" + default: {{ .Values.variables.identityRef.name | quote }} description: "The name of the secret that carries the OpenStack clouds.yaml" cloudName: type: string example: "openstack" - default: "openstack" + default: {{ .Values.variables.identityRef.cloudName | quote }} description: "The name of the cloud to use from the clouds.yaml" - # Kubernetes API server + # API server + - name: disableAPIServerFloatingIP + required: false + schema: + openAPIV3Schema: + type: boolean + default: {{ .Values.variables.disableAPIServerFloatingIP }} + description: "Disable the floating IP on the API server load balancer." - name: certSANs required: false schema: openAPIV3Schema: type: array - default: [] + default: {{ .Values.variables.certSANs | toJson }} example: ["mydomain.example"] - description: "certSANs sets extra Subject Alternative Names for the API Server signing cert." + description: "Extra Subject Alternative Names for the API server TLS certificate." items: type: string - name: apiServerLoadBalancer @@ -294,33 +249,22 @@ spec: schema: openAPIV3Schema: type: string - default: "octavia-ovn" - example: "none, octavia-amphora, octavia-ovn" - description: | - Cluster-API by default places a LoadBalancer in front of the kubernetes API server. - (There are also LBs that the CCM creates for a service type LoadBalancer which are configured independently.) - This setting here is to configure the LoadBalancer that is placed in front of the apiServer. - You can choose from 3 options: - - none: - No LoadBalancer solution will be deployed - - octavia-amphora: - Uses OpenStack's LoadBalancer service Octavia (provider:amphora) - - octavia-ovn: - (default) Uses OpenStack's LoadBalancer service Octavia (provider:ovn) - - name: apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs + default: {{ .Values.variables.apiServerLoadBalancer | quote }} + example: "octavia-ovn" + description: |- + Load balancer in front of the API server. + Options: none, octavia-amphora, octavia-ovn (default). + - name: apiServerAllowedCIDRs required: false schema: openAPIV3Schema: type: array example: ["192.168.10.0/24"] description: |- - apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs restrict access to the Kubernetes API server on a network level. - Ensure that at least the outgoing IP of your Management Cluster is added to the list of allowed CIDRs. - Otherwise CAPO can’t reconcile the target Cluster correctly. - This requires amphora as load balancer provider in version >= v2.12. + Restrict access to the API server to these CIDRs (network-level ACL). + Requires amphora as load balancer provider (CAPO >= v2.12). + Ensure the management cluster's outgoing IP is included, + otherwise CAPO cannot reconcile the workload cluster. items: type: string - name: oidcConfig @@ -344,7 +288,7 @@ spec: usernameClaim: type: string example: "preferred_username" - default: "preferred_username" + default: {{ .Values.variables.oidcConfig.usernameClaim | quote }} description: >- JWT claim to use as the user name. By default sub, which is expected to be a unique identifier of the end user. Admins can choose @@ -354,12 +298,12 @@ spec: groupsClaim: type: string example: "groups" - default: "groups" + default: {{ .Values.variables.oidcConfig.groupsClaim | quote }} description: "JWT claim to use as the user's group. If the claim is present it must be an array of strings." usernamePrefix: type: string example: "oidc:" - default: "oidc:" + default: {{ .Values.variables.oidcConfig.usernamePrefix | quote }} description: >- Prefix prepended to username claims to prevent clashes with existing names (such as system: users). For example, the value @@ -370,11 +314,38 @@ spec: groupsPrefix: type: string example: "oidc:" - default: "oidc:" + default: {{ .Values.variables.oidcConfig.groupsPrefix | quote }} description: >- Prefix prepended to group claims to prevent clashes with existing names (such as system: groups). For example, the value oidc: will create group names like oidc:engineering and oidc:infra. + # Container runtime + - name: registryMirrors + required: false + schema: + openAPIV3Schema: + type: array + default: {{ .Values.variables.registryMirrors | toJson }} + description: "Registry mirrors for upstream container registries. Configures both containerd and CRI-O to pull through a mirror." + items: + type: object + properties: + hostnameUpstream: + type: string + example: "docker.io" + description: "The hostname of the upstream registry." + urlUpstream: + type: string + example: "https://registry-1.docker.io" + description: "The server URL of the upstream registry." + urlMirror: + type: string + example: "https://registry.example.com/v2/dockerhub" + description: "The URL of the mirror registry." + certMirror: + type: string + example: "" + description: "TLS certificate of the mirror in PEM format (optional)." # # Patches # @@ -414,9 +385,9 @@ spec: - op: add path: "/spec/template/spec/apiServerLoadBalancer/provider" value: "ovn" - - name: apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs - description: "Takes care of the patches that should be applied when variable allowedCIDRs is set." - enabledIf: {{ `'{{ and .apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs (eq .apiServerLoadBalancer "octavia-amphora") }}'` }} + - name: apiServerAllowedCIDRs + description: "Restricts API server access to the given CIDRs (requires amphora LB)." + enabledIf: {{ `'{{ and .apiServerAllowedCIDRs (eq .apiServerLoadBalancer "octavia-amphora") }}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -427,7 +398,7 @@ spec: - op: add path: "/spec/template/spec/apiServerLoadBalancer/allowedCIDRs" valueFrom: - variable: apiServerLoadBalancerOctaviaAmphoraAllowedCIDRs + variable: apiServerAllowedCIDRs - name: networkExternalID description: "Sets the ID of an external OpenStack Network. This is necessary to get public internet to the VMs." enabledIf: {{ `'{{ if .networkExternalID }}true{{end}}'` }} @@ -537,10 +508,13 @@ spec: valueFrom: variable: disableAPIServerFloatingIP # - # Patches for control plane's OpenStackMachineTemplate resources. - # Note: Control plane patches are only applied when the control plane is managed by Kubeadm. + # Patches for OpenStackMachineTemplate resources (image). + # Image patches must stay separate because they use different builtin variables: + # - .builtin.controlPlane.version for CP + # - .builtin.machineDeployment.version for workers + # - name: controlPlaneImage - description: "Sets the OpenStack image name that is used for creating the control plane servers." + description: "Sets the OpenStack image for control plane nodes." definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -554,120 +528,13 @@ spec: template: | {{ `{{ if .imageIsOrc }}imageRef{{ else }}filter{{ end }}` }}: name: {{ `{{ .imageName }}{{ if .imageAddVersion }}-{{ .builtin.controlPlane.version }}{{ end }}` }} - - name: controlPlaneFlavor - description: "Sets the openstack instance flavor for the KubeadmControlPlane." - enabledIf: {{ `'{{ ne .controlPlaneFlavor "" }}'` }} - definitions: - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - matchResources: - controlPlane: true - jsonPatches: - - op: replace - path: "/spec/template/spec/flavor" - valueFrom: - variable: controlPlaneFlavor - - name: controlPlaneRootDisk - description: "Sets the root disk size in GiB for control-plane nodes." - enabledIf: {{ `'{{ if .controlPlaneRootDisk }}true{{end}}'` }} - definitions: - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - matchResources: - controlPlane: true - jsonPatches: - - op: add - path: "/spec/template/spec/rootVolume" - valueFrom: - template: | - sizeGiB: {{ `{{ .controlPlaneRootDisk }}` }} - - name: controlPlaneServerGroupID - description: "Sets the server group to assign the control plane nodes to." - enabledIf: {{ `'{{ ne .controlPlaneServerGroupID "" }}'` }} - definitions: - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - matchResources: - controlPlane: true - jsonPatches: - - op: add - path: "/spec/template/spec/serverGroup" - valueFrom: - template: | - id: {{ `{{ .controlPlaneServerGroupID }}` }} - # - # Patches for control plane's as well as worker's OpenStackMachineTemplate resources. - # Note: Control plane patches are only applied when the control plane is managed by Kubeadm. - # - # Note: The securityGroups patch must be placed before securityGroupIDs, workerSecurityGroups, and workerSecurityGroupIDs. - # The patch order ensures the last applied patch overwrites previous ones. - - name: securityGroups - description: "Sets the list of the openstack security groups for the worker and the control plane instances." - enabledIf: {{ `'{{ if .securityGroups }}true{{end}}'` }} - definitions: - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - matchResources: - controlPlane: true - machineDeploymentClass: - names: - - default-worker - jsonPatches: - - op: add - path: "/spec/template/spec/securityGroups" - valueFrom: - template: {{ `'[ {{ range .securityGroups }} { filter: { name: {{ . }}}}, {{ end }} ]'` }} - # Note: The securityGroupIDs patch must be placed before workerSecurityGroups, workerSecurityGroupIDs and after securityGroupIDs. - # The patch order ensures the last applied patch overwrites previous ones. - - name: securityGroupIDs - description: "Sets the list of the openstack security groups for the worker and the control plane instances by UUID." - enabledIf: {{ `'{{ if .securityGroupIDs }}true{{end}}'` }} - definitions: - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - matchResources: - controlPlane: true - machineDeploymentClass: - names: - - default-worker - jsonPatches: - - op: add - path: "/spec/template/spec/securityGroups" - valueFrom: - template: {{ `'[ {{ range .securityGroupIDs }} { id: {{ . }} }, {{ end }} ]'` }} - - name: sshKeyName - description: "Sets the ssh key to inject in the nodes." - enabledIf: {{ `'{{ ne .sshKeyName "" }}'` }} - definitions: - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: OpenStackMachineTemplate - matchResources: - controlPlane: true - machineDeploymentClass: - names: - - default-worker - jsonPatches: - - op: add - path: "/spec/template/spec/sshKeyName" - valueFrom: - variable: sshKeyName - # - # Patches for worker's OpenStackMachineTemplate resources. - # - name: workerImage - description: "Sets the OpenStack image name that is used for creating the worker servers." + description: "Sets the OpenStack image for worker nodes." definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false machineDeploymentClass: names: - default-worker @@ -678,15 +545,20 @@ spec: template: | {{ `{{ if .imageIsOrc }}imageRef{{ else }}filter{{ end }}` }}: name: {{ `{{ .imageName }}{{ if .imageAddVersion }}-{{ .builtin.machineDeployment.version }}{{ end }}` }} - - name: workerFlavor - description: "Sets the openstack instance flavor for the worker nodes." - enabledIf: {{ `'{{ ne .workerFlavor "" }}'` }} + # + # Unified machine patches — target both CP and workers. + # Users can override per CP/worker via topology.controlPlane.variables.overrides + # and topology.workers.machineDeployments[].variables.overrides. + # + - name: flavor + description: "Sets the OpenStack instance flavor for all nodes." + enabledIf: {{ `'{{ ne .flavor "" }}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false + controlPlane: true machineDeploymentClass: names: - default-worker @@ -694,16 +566,16 @@ spec: - op: replace path: "/spec/template/spec/flavor" valueFrom: - variable: workerFlavor - - name: workerRootDisk - description: "Sets the root disk size in GiB for worker nodes." - enabledIf: {{ `'{{ if .workerRootDisk }}true{{end}}'` }} + variable: flavor + - name: rootDisk + description: "Sets the root disk size in GiB. 0 means use ephemeral disk from flavor." + enabledIf: {{ `'{{ if .rootDisk }}true{{end}}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false + controlPlane: true machineDeploymentClass: names: - default-worker @@ -712,16 +584,16 @@ spec: path: "/spec/template/spec/rootVolume" valueFrom: template: | - sizeGiB: {{ `{{ .workerRootDisk }}` }} - - name: workerServerGroupID - description: "Sets the server group to assign the worker nodes to." - enabledIf: {{ `'{{ ne .workerServerGroupID "" }}'` }} + sizeGiB: {{ `{{ .rootDisk }}` }} + - name: serverGroupID + description: "Sets the server group for anti-affinity." + enabledIf: {{ `'{{ ne .serverGroupID "" }}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false + controlPlane: true machineDeploymentClass: names: - default-worker @@ -730,15 +602,16 @@ spec: path: "/spec/template/spec/serverGroup" valueFrom: template: | - id: {{ `{{ .workerServerGroupID }}` }} - - name: workerAdditionalBlockDevices - enabledIf: {{ `'{{ if .workerAdditionalBlockDevices }}true{{end}}'` }} + id: {{ `{{ .serverGroupID }}` }} + - name: additionalBlockDevices + description: "Attaches additional Cinder volumes to nodes." + enabledIf: {{ `'{{ if .additionalBlockDevices }}true{{end}}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false + controlPlane: true machineDeploymentClass: names: - default-worker @@ -747,7 +620,7 @@ spec: path: /spec/template/spec/additionalBlockDevices valueFrom: template: | - {{ `{{- range .workerAdditionalBlockDevices }}` }} + {{ `{{- range .additionalBlockDevices }}` }} - name: {{ `{{ .name }}` }} sizeGiB: {{ `{{ .sizeGiB }}` }} storage: @@ -755,17 +628,19 @@ spec: volume: type: {{ `{{ .type }}` }} {{ `{{- end }}` }} - # Note: The workerSecurityGroups patch must be placed before workerSecurityGroupIDs and after securityGroups and securityGroupIDs. - # The patch order ensures the last applied patch overwrites previous ones. - - name: workerSecurityGroups - description: "Sets the list of the openstack security groups for the worker instances." - enabledIf: {{ `'{{ if .workerSecurityGroups }}true{{end}}'` }} + # + # Access patches — target both CP and workers. + # securityGroupIDs takes precedence over securityGroups (last patch wins). + # + - name: securityGroups + description: "Sets security groups by name for all nodes." + enabledIf: {{ `'{{ if .securityGroups }}true{{end}}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false + controlPlane: true machineDeploymentClass: names: - default-worker @@ -773,18 +648,16 @@ spec: - op: add path: "/spec/template/spec/securityGroups" valueFrom: - template: {{ `'[ {{ range .workerSecurityGroups }} { filter: { name: {{ . }}}}, {{ end }} ]'` }} - # Note: The workerSecurityGroupIDs patch must be placed after securityGroups, securityGroupIDs and workerSecurityGroupIDs. - # The patch order ensures the last applied patch overwrites previous ones. - - name: workerSecurityGroupIDs - description: "Sets the list of the openstack security groups for the worker instances by UUID." - enabledIf: {{ `'{{ if .workerSecurityGroupIDs }}true{{end}}'` }} + template: {{ `'[ {{ range .securityGroups }} { filter: { name: {{ . }}}}, {{ end }} ]'` }} + - name: securityGroupIDs + description: "Sets security groups by UUID for all nodes. Takes precedence over securityGroups." + enabledIf: {{ `'{{ if .securityGroupIDs }}true{{end}}'` }} definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: OpenStackMachineTemplate matchResources: - controlPlane: false + controlPlane: true machineDeploymentClass: names: - default-worker @@ -792,14 +665,31 @@ spec: - op: add path: "/spec/template/spec/securityGroups" valueFrom: - template: {{ `'[ {{ range .workerSecurityGroupIDs }} { id: {{ . }} }, {{ end }} ]'` }} + template: {{ `'[ {{ range .securityGroupIDs }} { id: {{ . }} }, {{ end }} ]'` }} + - name: sshKeyName + description: "Sets the SSH key to inject into all nodes." + enabledIf: {{ `'{{ ne .sshKeyName "" }}'` }} + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: OpenStackMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/sshKeyName" + valueFrom: + variable: sshKeyName # - name: certSANs description: "certSANs sets extra Subject Alternative Names for the API Server signing cert." enabledIf: {{ `'{{ if .certSANs }}true{{end}}'` }} definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -813,32 +703,161 @@ spec: enabledIf: {{ `'{{ if and .oidcConfig .oidcConfig.clientID .oidcConfig.issuerURL }}true{{end}}'` }} definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/oidc-client-id" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: oidcConfig.clientID + template: | + name: oidc-client-id + value: {{ `'{{ .oidcConfig.clientID }}'` }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/oidc-issuer-url" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: oidcConfig.issuerURL + template: | + name: oidc-issuer-url + value: {{ `'{{ .oidcConfig.issuerURL }}'` }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/oidc-username-claim" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: oidcConfig.usernameClaim + template: | + name: oidc-username-claim + value: {{ `'{{ .oidcConfig.usernameClaim }}'` }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/oidc-groups-claim" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: oidcConfig.groupsClaim + template: | + name: oidc-groups-claim + value: {{ `'{{ .oidcConfig.groupsClaim }}'` }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/oidc-username-prefix" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: oidcConfig.usernamePrefix + template: | + name: oidc-username-prefix + value: {{ `'{{ .oidcConfig.usernamePrefix }}'` }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/oidc-groups-prefix" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: oidcConfig.groupsPrefix + template: | + name: oidc-groups-prefix + value: {{ `'{{ .oidcConfig.groupsPrefix }}'` }} + # + # Registry mirror patches + # + - name: registryMirrorsControlPlane + description: "Configure registry mirrors on control plane nodes (containerd + CRI-O)." + enabledIf: {{ `'{{ if .registryMirrors }}true{{end}}'` }} + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/files" + valueFrom: + template: | + {{ `{{- range $r := .registryMirrors }}` }} + - content: | + server = "{{ `{{ $r.urlUpstream }}` }}" + [host."{{ `{{ $r.urlMirror }}` }}"] + capabilities = ["pull","resolve"] + override_path = true + owner: root:root + path: /etc/containerd/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/hosts.toml + permissions: "0644" + - content: | + [[registry]] + prefix = "{{ `{{ $r.hostnameUpstream }}` }}" + location = "{{ `{{ $r.hostnameUpstream }}` }}" + [[registry.mirror]] + location = "{{ `{{ $r.urlMirror }}` }}" + owner: root:root + path: /etc/containers/registries.conf.d/50-mirror-{{ `{{ $r.hostnameUpstream }}` }}.conf + permissions: "0644" + {{ `{{- if $r.certMirror }}` }} + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containerd/certs/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containers/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + {{ `{{- end }}` }} + {{ `{{- end }}` }} + - path: /etc/kube-proxy-config.yaml + owner: root:root + permissions: "0644" + content: | + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + metricsBindAddress: "0.0.0.0:10249" + + - path: /etc/kube-proxy-patch.sh + owner: root:root + permissions: "0755" + content: | + #!/usr/bin/env bash + set -euo pipefail + dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + kubeadm_file="/etc/kubeadm.yml" + [[ -f ${kubeadm_file} ]] || kubeadm_file="/run/kubeadm/kubeadm.yaml" + [[ -f ${kubeadm_file} ]] || exit 0 + [[ -f ${dir}/kube-proxy-config.yaml ]] || exit 0 + cat "${dir}/kube-proxy-config.yaml" >> "${kubeadm_file}" + rm -f "${dir}/kube-proxy-config.yaml" + echo success > /tmp/kube-proxy-patch + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/-" + value: "bash /etc/kube-proxy-patch.sh" + - name: registryMirrorsWorker + description: "Configure registry mirrors on worker nodes (containerd + CRI-O)." + enabledIf: {{ `'{{ if .registryMirrors }}true{{end}}'` }} + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/files" + valueFrom: + template: | + {{ `{{- range $r := .registryMirrors }}` }} + - content: | + server = "{{ `{{ $r.urlUpstream }}` }}" + [host."{{ `{{ $r.urlMirror }}` }}"] + capabilities = ["pull","resolve"] + override_path = true + owner: root:root + path: /etc/containerd/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/hosts.toml + permissions: "0644" + - content: | + [[registry]] + prefix = "{{ `{{ $r.hostnameUpstream }}` }}" + location = "{{ `{{ $r.hostnameUpstream }}` }}" + [[registry.mirror]] + location = "{{ `{{ $r.urlMirror }}` }}" + owner: root:root + path: /etc/containers/registries.conf.d/50-mirror-{{ `{{ $r.hostnameUpstream }}` }}.conf + permissions: "0644" + {{ `{{- if $r.certMirror }}` }} + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containerd/certs/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + - content: "{{ `{{ $r.certMirror }}` }}" + owner: root:root + path: /etc/containers/certs.d/{{ `{{ $r.hostnameUpstream }}` }}/ca.crt + permissions: "0644" + {{ `{{- end }}` }} + {{ `{{- end }}` }} diff --git a/providers/openstack/scs2/cluster-class/templates/kubeadm-config-template-worker-openstack.yaml b/providers/openstack/scs2/cluster-class/templates/kubeadm-config-template-worker-openstack.yaml index 4c1494ed..732d78b3 100644 --- a/providers/openstack/scs2/cluster-class/templates/kubeadm-config-template-worker-openstack.yaml +++ b/providers/openstack/scs2/cluster-class/templates/kubeadm-config-template-worker-openstack.yaml @@ -1,4 +1,4 @@ -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: {{ .Release.Name }}-{{ .Chart.Version }}-default-worker @@ -8,6 +8,8 @@ spec: joinConfiguration: nodeRegistration: kubeletExtraArgs: - cloud-provider: external - provider-id: 'openstack:///{{ `{{ instance_id }}` }}' + - name: cloud-provider + value: external + - name: provider-id + value: 'openstack:///{{ `{{ instance_id }}` }}' name: '{{ `{{ local_hostname }}` }}' diff --git a/providers/openstack/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml b/providers/openstack/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml index 767eac68..b199f36c 100644 --- a/providers/openstack/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml +++ b/providers/openstack/scs2/cluster-class/templates/kubeadm-control-plane-template.yaml @@ -1,4 +1,4 @@ -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: {{ .Release.Name }}-{{ .Chart.Version }}-control-plane @@ -7,26 +7,42 @@ spec: spec: kubeadmConfigSpec: clusterConfiguration: - apiServer: {} controllerManager: extraArgs: - cloud-provider: external - bind-address: 0.0.0.0 - secure-port: "10257" + - name: cloud-provider + value: external + - name: bind-address + value: 0.0.0.0 + - name: secure-port + value: "10257" + - name: profiling + value: "false" + - name: terminated-pod-gc-threshold + value: "100" scheduler: extraArgs: - bind-address: 0.0.0.0 - secure-port: "10259" + - name: bind-address + value: 0.0.0.0 + - name: secure-port + value: "10259" + - name: profiling + value: "false" etcd: local: dataDir: /var/lib/etcd extraArgs: - listen-metrics-urls: http://0.0.0.0:2381 - auto-compaction-mode: periodic - auto-compaction-retention: 8h - election-timeout: "2500" - heartbeat-interval: "250" - snapshot-count: "6400" + - name: listen-metrics-urls + value: http://0.0.0.0:2381 + - name: auto-compaction-mode + value: periodic + - name: auto-compaction-retention + value: 8h + - name: election-timeout + value: "2500" + - name: heartbeat-interval + value: "250" + - name: snapshot-count + value: "6400" files: - content: | --- @@ -78,12 +94,16 @@ spec: initConfiguration: nodeRegistration: kubeletExtraArgs: - cloud-provider: external - provider-id: 'openstack:///{{ `{{ instance_id }}` }}' + - name: cloud-provider + value: external + - name: provider-id + value: 'openstack:///{{ `{{ instance_id }}` }}' name: '{{ `{{ local_hostname }}` }}' joinConfiguration: nodeRegistration: kubeletExtraArgs: - cloud-provider: external - provider-id: 'openstack:///{{ `{{ instance_id }}` }}' + - name: cloud-provider + value: external + - name: provider-id + value: 'openstack:///{{ `{{ instance_id }}` }}' name: '{{ `{{ local_hostname }}` }}' diff --git a/providers/openstack/scs2/cluster-class/templates/openstack-cluster-template.yaml b/providers/openstack/scs2/cluster-class/templates/openstack-cluster-template.yaml index 9d03326f..3cc03fbb 100644 --- a/providers/openstack/scs2/cluster-class/templates/openstack-cluster-template.yaml +++ b/providers/openstack/scs2/cluster-class/templates/openstack-cluster-template.yaml @@ -43,3 +43,49 @@ spec: portRangeMax: 4244 protocol: tcp description: "Allow Hubble traffic for Cilium" + - remoteManagedGroups: + - worker + direction: ingress + etherType: IPv4 + name: Prometheus kube-proxy exporter + portRangeMin: 10249 + portRangeMax: 10249 + protocol: tcp + description: "Allow Prometheus traffic for kube-proxy exporter" + - remoteManagedGroups: + - worker + direction: ingress + etherType: IPv4 + name: Prometheus kube-controller-manager exporter + portRangeMin: 10257 + portRangeMax: 10257 + protocol: tcp + description: "Allow Prometheus traffic for kube-controller-manager exporter" + - remoteManagedGroups: + - worker + direction: ingress + etherType: IPv4 + name: Prometheus kube-scheduler exporter + portRangeMin: 10259 + portRangeMax: 10259 + protocol: tcp + description: "Allow Prometheus traffic for kube-scheduler exporter" + - remoteManagedGroups: + - worker + direction: ingress + etherType: IPv4 + name: Prometheus node exporter + portRangeMin: 9100 + portRangeMax: 9100 + protocol: tcp + description: "Allow Prometheus traffic for scraping node exporter" + controlPlaneNodesSecurityGroupRules: + - remoteManagedGroups: + - worker + direction: ingress + etherType: IPv4 + name: Prometheus etcd exporter + portRangeMin: 2381 + portRangeMax: 2381 + protocol: tcp + description: "Allow Prometheus traffic for scraping etcd exporter" \ No newline at end of file diff --git a/providers/openstack/scs2/cluster-class/values.yaml b/providers/openstack/scs2/cluster-class/values.yaml index e69de29b..1344cf85 100644 --- a/providers/openstack/scs2/cluster-class/values.yaml +++ b/providers/openstack/scs2/cluster-class/values.yaml @@ -0,0 +1,50 @@ +# ClusterClass variable defaults +# These are referenced by the ClusterClass template and can be overridden per deployment. +# Variables apply to all nodes by default. Use topology.controlPlane.variables.overrides +# or topology.workers.machineDeployments[].variables.overrides to differentiate. +variables: + # Image configuration + imageName: "ubuntu-capi-image" + imageIsOrc: false + imageAddVersion: true + + # API server + disableAPIServerFloatingIP: false + apiServerLoadBalancer: "octavia-ovn" + certSANs: [] + + # Network + dnsNameservers: ["9.9.9.9", "149.112.112.112"] + nodeCIDR: "10.8.0.0/20" + + # Machine configuration (override per CP/worker via topology) + flavor: "SCS-2V-4" + rootDisk: 50 + serverGroupID: "" + additionalBlockDevices: [] + + # Access management + sshKeyName: "" + securityGroups: [] + securityGroupIDs: [] + + # Cluster-level (control plane only) + controlPlaneAvailabilityZones: [] + controlPlaneOmitAvailabilityZone: false + + # Identity + identityRef: + name: "openstack" + cloudName: "openstack" + + # Container runtime + registryMirrors: [] + + # OIDC + oidcConfig: + clientID: "" + issuerURL: "" + usernameClaim: "preferred_username" + groupsClaim: "groups" + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" diff --git a/providers/openstack/scs2/clusteraddon.yaml b/providers/openstack/scs2/clusteraddon.yaml index d346ba22..bea5fc78 100644 --- a/providers/openstack/scs2/clusteraddon.yaml +++ b/providers/openstack/scs2/clusteraddon.yaml @@ -19,3 +19,12 @@ addonStages: action: apply - name: ccm action: apply + AfterClusterUpgrade: + - name: cni + action: apply + - name: metrics-server + action: apply + - name: csi + action: apply + - name: ccm + action: apply diff --git a/providers/openstack/scs2/versions.yaml b/providers/openstack/scs2/versions.yaml index 85cff539..c8007861 100644 --- a/providers/openstack/scs2/versions.yaml +++ b/providers/openstack/scs2/versions.yaml @@ -1,9 +1,12 @@ - kubernetes: 1.32.8 - cinder_csi: 2.32.2 - occm: 2.32.0 + ubuntu: "2204" + openstack-cinder-csi: 2.32.2 + openstack-cloud-controller-manager: 2.32.0 - kubernetes: 1.33.7 - cinder_csi: 2.33.1 - occm: 2.33.1 + ubuntu: "2404" + openstack-cinder-csi: 2.33.1 + openstack-cloud-controller-manager: 2.33.1 - kubernetes: 1.34.3 - cinder_csi: 2.34.1 - occm: 2.34.1 + ubuntu: "2404" + openstack-cinder-csi: 2.34.3 + openstack-cloud-controller-manager: 2.34.2