diff options
author | 2023-06-01 22:18:35 +0100 | |
---|---|---|
committer | 2023-07-24 14:42:09 +0100 | |
commit | 2b8a40bc1c9ce1520c1e5c370880b83c2f56607a (patch) | |
tree | ccb09fc52f4d561515217b976778c7a157302885 | |
parent | Add agenda for next week (diff) |
Remove kubespray and related roles
35 files changed, 3 insertions, 1842 deletions
diff --git a/.ansible-lint b/.ansible-lint index 3a5a282..b05aae6 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,6 +1,5 @@ --- exclude_paths: - - inventory/group_vars # Copied from kubespray, and not necessary to lint - .github # Not ansible roles skip_list: - fqcn-builtins diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4a5b16c..7b15499 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -14,10 +14,3 @@ updates: interval: "weekly" reviewers: - "python-discord/devops" - - - package-ecosystem: "gitsubmodule" - directory: "/" - schedule: - interval: "weekly" - reviewers: - - "python-discord/devops" diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index f417f1a..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "kubespray"] - path = kubespray - url = https://github.com/python-discord/kubespray.git diff --git a/ansible.cfg b/ansible.cfg index e174a75..53f0162 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,5 @@ [defaults] -remote_user=root +remote_user = root inventory = inventory/hosts.yaml [privilege_escalation] diff --git a/inventory/README.md b/inventory/README.md index 0c621b7..294d80b 100644 --- a/inventory/README.md +++ b/inventory/README.md @@ -1,11 +1,9 @@ # Inventory -The main inventory for the pydis cluster, including configuration for kubespray. +The main inventory for the pydis cluster. ## Content -- `group_vars`: Configuration variables for kubespray in various contexts. This directory is not covered by ansible-lint, and should generally not be used to add new configuration. Instead, that should be placed appropriately within the project as normal. -- `patches` - `hosts.yaml`: The main hosts file for our infrastructure. ## Deployment diff --git a/inventory/group_vars/all/all.yml b/inventory/group_vars/all/all.yml deleted file mode 100644 index d022cb1..0000000 --- a/inventory/group_vars/all/all.yml +++ /dev/null @@ -1,144 +0,0 @@ ---- -## Directory where the binaries will be installed -bin_dir: /usr/local/bin - -## The access_ip variable is used to define how other nodes should access -## the node. This is used in flannel to allow other flannel nodes to see -## this node for example. The access_ip is really useful AWS and Google -## environments where the nodes are accessed remotely by the "public" ip, -## but don't know about that address themselves. -# access_ip: 1.1.1.1 - - -## External LB example config -## apiserver_loadbalancer_domain_name: "elb.some.domain" -# loadbalancer_apiserver: -# address: 1.2.3.4 -# port: 1234 - -## Internal loadbalancers for apiservers -# loadbalancer_apiserver_localhost: true -# valid options are "nginx" or "haproxy" -# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" - -## If the cilium is going to be used in strict mode, we can use the -## localhost connection and not use the external LB. If this parameter is -## not specified, the first node to connect to kubeapi will be used. -# use_localhost_as_kubeapi_loadbalancer: true - -## Local loadbalancer should use this port -## And must be set port 6443 -loadbalancer_apiserver_port: 6443 - -## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. -loadbalancer_apiserver_healthcheck_port: 8081 - -### OTHER OPTIONAL VARIABLES - -## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. -## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. -## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. -# disable_host_nameservers: false - -## Upstream dns servers -# Configured during deployment before the DNS server is available -# Docs: https://kubespray.io/#/docs/dns-stack?id=upstream_dns_servers -upstream_dns_servers: - - 1.1.1.1 - - 1.0.0.1 - -enable_nodelocaldns: false - -## There are some changes specific to the cloud providers -## for instance we need to encapsulate packets with some network plugins -## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' -## When openstack is used make sure to source in the openstack credentials -## like you would do when using openstack-client before starting the playbook. -# cloud_provider: - -## When cloud_provider is set to 'external', you can set the cloud controller to deploy -## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' -## When openstack or vsphere are used make sure to source in the required fields -# external_cloud_provider: - -## Set these proxy values in order to update package manager and docker daemon to use proxies -# http_proxy: "" -# https_proxy: "" - -## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy -# no_proxy: "" - -## Some problems may occur when downloading files over https proxy due to ansible bug -## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable -## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. -# download_validate_certs: False - -## If you need exclude all cluster nodes from proxy and other resources, add other resources here. -# additional_no_proxy: "" - -## If you need to disable proxying of os package repositories but are still behind an http_proxy set -## skip_http_proxy_on_os_packages to true -## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu -## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish -# skip_http_proxy_on_os_packages: false - -## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all -## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the -## no_proxy variable, set below to true: -no_proxy_exclude_workers: false - -## Certificate Management -## This setting determines whether certs are generated via scripts. -## Chose 'none' if you provide your own certificates. -## Option is "script", "none" -# cert_management: script - -## Set to true to allow pre-checks to fail and continue deployment -# ignore_assert_errors: false - -## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. -# kube_read_only_port: 10255 - -## Set true to download and cache container -# download_container: true - -## Deploy container engine -# Set false if you want to deploy container engine manually. -# deploy_container_engine: true - -## Red Hat Enterprise Linux subscription registration -## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination -## Update RHEL subscription purpose usage, role and SLA if necessary -# rh_subscription_username: "" -# rh_subscription_password: "" -# rh_subscription_org_id: "" -# rh_subscription_activation_key: "" -# rh_subscription_usage: "Development" -# rh_subscription_role: "Red Hat Enterprise Server" -# rh_subscription_sla: "Self-Support" - -## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. -# ping_access_ip: true - -# sysctl_file_path to add sysctl conf to -# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" - -## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication -kube_webhook_token_auth: false -kube_webhook_token_auth_url_skip_tls_verify: false -# kube_webhook_token_auth_url: https://... -## base64-encoded string of the webhook's CA certificate -# kube_webhook_token_auth_ca_data: "LS0t..." - -## NTP Settings -# Start the ntpd or chrony service and enable it at system boot. -ntp_enabled: false -ntp_manage_config: false -ntp_servers: - - "0.pool.ntp.org iburst" - - "1.pool.ntp.org iburst" - - "2.pool.ntp.org iburst" - - "3.pool.ntp.org iburst" - -## Used to control no_log attribute -unsafe_show_logs: false diff --git a/inventory/group_vars/all/aws.yml b/inventory/group_vars/all/aws.yml deleted file mode 100644 index dab674e..0000000 --- a/inventory/group_vars/all/aws.yml +++ /dev/null @@ -1,9 +0,0 @@ -## To use AWS EBS CSI Driver to provision volumes, uncomment the first value -## and configure the parameters below -# aws_ebs_csi_enabled: true -# aws_ebs_csi_enable_volume_scheduling: true -# aws_ebs_csi_enable_volume_snapshot: false -# aws_ebs_csi_enable_volume_resizing: false -# aws_ebs_csi_controller_replicas: 1 -# aws_ebs_csi_plugin_image_tag: latest -# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/inventory/group_vars/all/azure.yml b/inventory/group_vars/all/azure.yml deleted file mode 100644 index 176b0f1..0000000 --- a/inventory/group_vars/all/azure.yml +++ /dev/null @@ -1,40 +0,0 @@ -## When azure is used, you need to also set the following variables. -## see docs/azure.md for details on how to get these values - -# azure_cloud: -# azure_tenant_id: -# azure_subscription_id: -# azure_aad_client_id: -# azure_aad_client_secret: -# azure_resource_group: -# azure_location: -# azure_subnet_name: -# azure_security_group_name: -# azure_security_group_resource_group: -# azure_vnet_name: -# azure_vnet_resource_group: -# azure_route_table_name: -# azure_route_table_resource_group: -# supported values are 'standard' or 'vmss' -# azure_vmtype: standard - -## Azure Disk CSI credentials and parameters -## see docs/azure-csi.md for details on how to get these values - -# azure_csi_tenant_id: -# azure_csi_subscription_id: -# azure_csi_aad_client_id: -# azure_csi_aad_client_secret: -# azure_csi_location: -# azure_csi_resource_group: -# azure_csi_vnet_name: -# azure_csi_vnet_resource_group: -# azure_csi_subnet_name: -# azure_csi_security_group_name: -# azure_csi_use_instance_metadata: -# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' - -## To enable Azure Disk CSI, uncomment below -# azure_csi_enabled: true -# azure_csi_controller_replicas: 1 -# azure_csi_plugin_image_tag: latest diff --git a/inventory/group_vars/all/containerd.yml b/inventory/group_vars/all/containerd.yml deleted file mode 100644 index 78ed663..0000000 --- a/inventory/group_vars/all/containerd.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options - -# containerd_storage_dir: "/var/lib/containerd" -# containerd_state_dir: "/run/containerd" -# containerd_oom_score: 0 - -# containerd_default_runtime: "runc" -# containerd_snapshotter: "native" - -# containerd_runc_runtime: -# name: runc -# type: "io.containerd.runc.v2" -# engine: "" -# root: "" - -# containerd_additional_runtimes: -# Example for Kata Containers as additional runtime: -# - name: kata -# type: "io.containerd.kata.v2" -# engine: "" -# root: "" - -# containerd_grpc_max_recv_message_size: 16777216 -# containerd_grpc_max_send_message_size: 16777216 - -# containerd_debug_level: "info" - -# containerd_metrics_address: "" - -# containerd_metrics_grpc_histogram: false - -## An obvious use case is allowing insecure-registry access to self hosted registries. -## Can be ipaddress and domain_name. -## example define mirror.registry.io or 172.19.16.11:5000 -## set "name": "url". insecure url must be started http:// -## Port number is also needed if the default HTTPS port is not used. -# containerd_insecure_registries: -# "localhost": "http://127.0.0.1" -# "172.19.16.11:5000": "http://172.19.16.11:5000" - -# containerd_registries: -# "docker.io": "https://registry-1.docker.io" - -# containerd_max_container_log_line_size: -1 - -# containerd_registry_auth: -# - registry: 10.0.0.2:5000 -# username: user -# password: pass diff --git a/inventory/group_vars/all/coreos.yml b/inventory/group_vars/all/coreos.yml deleted file mode 100644 index 22c2166..0000000 --- a/inventory/group_vars/all/coreos.yml +++ /dev/null @@ -1,2 +0,0 @@ -## Does coreos need auto upgrade, default is true -# coreos_auto_upgrade: true diff --git a/inventory/group_vars/all/cri-o.yml b/inventory/group_vars/all/cri-o.yml deleted file mode 100644 index 3e6e4ee..0000000 --- a/inventory/group_vars/all/cri-o.yml +++ /dev/null @@ -1,6 +0,0 @@ -# crio_insecure_registries: -# - 10.0.0.2:5000 -# crio_registry_auth: -# - registry: 10.0.0.2:5000 -# username: user -# password: pass diff --git a/inventory/group_vars/all/docker.yml b/inventory/group_vars/all/docker.yml deleted file mode 100644 index 4e968c3..0000000 --- a/inventory/group_vars/all/docker.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -## Uncomment this if you want to force overlay/overlay2 as docker storage driver -## Please note that overlay2 is only supported on newer kernels -# docker_storage_options: -s overlay2 - -## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. -docker_container_storage_setup: false - -## It must be define a disk path for docker_container_storage_setup_devs. -## Otherwise docker-storage-setup will be executed incorrectly. -# docker_container_storage_setup_devs: /dev/vdb - -## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) -## Valid options are systemd or cgroupfs, default is systemd -# docker_cgroup_driver: systemd - -## Only set this if you have more than 3 nameservers: -## If true Kubespray will only use the first 3, otherwise it will fail -docker_dns_servers_strict: false - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## Used to set docker daemon iptables options to true -docker_iptables_enabled: "false" - -# Docker log options -# Rotate container stderr/stdout logs at 50m and keep last 5 -docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" - -# define docker bin_dir -docker_bin_dir: "/usr/bin" - -# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' -# kubespray deletes the docker package on each run, so caching the package makes sense -docker_rpm_keepcache: 1 - -## An obvious use case is allowing insecure-registry access to self hosted registries. -## Can be ipaddress and domain_name. -## example define 172.19.16.11 or mirror.registry.io -# docker_insecure_registries: -# - mirror.registry.io -# - 172.19.16.11 - -## Add other registry,example China registry mirror. -# docker_registry_mirrors: -# - https://registry.docker-cn.com -# - https://mirror.aliyuncs.com - -## If non-empty will override default system MountFlags value. -## This option takes a mount propagation flag: shared, slave -## or private, which control whether mounts in the file system -## namespace set up for docker will receive or propagate mounts -## and unmounts. Leave empty for system default -# docker_mount_flags: - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -# docker_options: "" diff --git a/inventory/group_vars/all/etcd.yml b/inventory/group_vars/all/etcd.yml deleted file mode 100644 index 39600c3..0000000 --- a/inventory/group_vars/all/etcd.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -## Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -## Container runtime -## docker for docker, crio for cri-o and containerd for containerd. -## Additionally you can set this to kubeadm if you want to install etcd using kubeadm -## Kubeadm etcd deployment is experimental and only available for new deployments -## If this is not set, container manager will be inherited from the Kubespray defaults -## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. -## Also this makes possible to use different container manager for etcd nodes. -# container_manager: containerd - -## Settings for etcd deployment type -# Set this to docker if you are using container_manager: docker -etcd_deployment_type: host diff --git a/inventory/group_vars/all/gcp.yml b/inventory/group_vars/all/gcp.yml deleted file mode 100644 index 49eb5c0..0000000 --- a/inventory/group_vars/all/gcp.yml +++ /dev/null @@ -1,10 +0,0 @@ -## GCP compute Persistent Disk CSI Driver credentials and parameters -## See docs/gcp-pd-csi.md for information about the implementation - -## Specify the path to the file containing the service account credentials -# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" - -## To enable GCP Persistent Disk CSI driver, uncomment below -# gcp_pd_csi_enabled: true -# gcp_pd_csi_controller_replicas: 1 -# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/inventory/group_vars/all/hcloud.yml b/inventory/group_vars/all/hcloud.yml deleted file mode 100644 index c27035c..0000000 --- a/inventory/group_vars/all/hcloud.yml +++ /dev/null @@ -1,14 +0,0 @@ -## Values for the external Hcloud Cloud Controller -# external_hcloud_cloud: -# hcloud_api_token: "" -# token_secret_name: hcloud -# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support -# service_account_name: cloud-controller-manager -# -# controller_image_tag: "latest" -# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset -# ## Format: -# ## external_hcloud_cloud.controller_extra_args: -# ## arg1: "value1" -# ## arg2: "value2" -# controller_extra_args: {} diff --git a/inventory/group_vars/all/oci.yml b/inventory/group_vars/all/oci.yml deleted file mode 100644 index 541d0e6..0000000 --- a/inventory/group_vars/all/oci.yml +++ /dev/null @@ -1,28 +0,0 @@ -## When Oracle Cloud Infrastructure is used, set these variables -# oci_private_key: -# oci_region_id: -# oci_tenancy_id: -# oci_user_id: -# oci_user_fingerprint: -# oci_compartment_id: -# oci_vnc_id: -# oci_subnet1_id: -# oci_subnet2_id: -## Override these default/optional behaviors if you wish -# oci_security_list_management: All -## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. -# oci_security_lists: -# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q -# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q -## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint -# oci_use_instance_principals: false -# oci_cloud_controller_version: 0.6.0 -## If you would like to control OCI query rate limits for the controller -# oci_rate_limit: -# rate_limit_qps_read: -# rate_limit_qps_write: -# rate_limit_bucket_read: -# rate_limit_bucket_write: -## Other optional variables -# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) -# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/inventory/group_vars/all/offline.yml b/inventory/group_vars/all/offline.yml deleted file mode 100644 index 83eb8fa..0000000 --- a/inventory/group_vars/all/offline.yml +++ /dev/null @@ -1,103 +0,0 @@ ---- -## Global Offline settings -### Private Container Image Registry -# registry_host: "myprivateregisry.com" -# files_repo: "http://myprivatehttpd" -### If using CentOS, RedHat, AlmaLinux or Fedora -# yum_repo: "http://myinternalyumrepo" -### If using Debian -# debian_repo: "http://myinternaldebianrepo" -### If using Ubuntu -# ubuntu_repo: "http://myinternalubunturepo" - -## Container Registry overrides -# kube_image_repo: "{{ registry_host }}" -# gcr_image_repo: "{{ registry_host }}" -# github_image_repo: "{{ registry_host }}" -# docker_image_repo: "{{ registry_host }}" -# quay_image_repo: "{{ registry_host }}" - -## Kubernetes components -# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" -# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" -# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" - -## CNI Plugins -# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" - -## cri-tools -# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" - -## [Optional] etcd: only if you **DON'T** use etcd_deployment=host -# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" - -# [Optional] Calico: If using Calico network plugin -# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" -# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" -# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore -# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" - -# [Optional] Cilium: If using Cilium network plugin -# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" - -# [Optional] Flannel: If using Falnnel network plugin -# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" - -# [Optional] helm: only if you set helm_enabled: true -# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" - -# [Optional] crun: only if you set crun_enabled: true -# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" - -# [Optional] kata: only if you set kata_containers_enabled: true -# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" - -# [Optional] cri-dockerd: only if you set container_manager: docker -# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" - -# [Optional] cri-o: only if you set container_manager: crio -# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" -# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" - -# [Optional] runc,containerd: only if you set container_runtime: containerd -# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" -# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" -# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" - -# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true -# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" -# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" - -## CentOS/Redhat/AlmaLinux -### For EL7, base and extras repo must be available, for EL8, baseos and appstream -### By default we enable those repo automatically -# rhel_enable_repos: false -### Docker / Containerd -# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" -# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" - -## Fedora -### Docker -# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" -# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" -### Containerd -# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" -# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" - -## Debian -### Docker -# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" -# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" -### Containerd -# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" -# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" -# containerd_debian_repo_repokey: 'YOURREPOKEY' - -## Ubuntu -### Docker -# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" -# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" -### Containerd -# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" -# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" -# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/inventory/group_vars/all/openstack.yml b/inventory/group_vars/all/openstack.yml deleted file mode 100644 index 7835664..0000000 --- a/inventory/group_vars/all/openstack.yml +++ /dev/null @@ -1,49 +0,0 @@ -## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) -# openstack_blockstorage_version: "v1/v2/auto (default)" -# openstack_blockstorage_ignore_volume_az: yes -## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. -# openstack_lbaas_enabled: True -# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" -## To enable automatic floating ip provisioning, specify a subnet. -# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" -## Override default LBaaS behavior -# openstack_lbaas_use_octavia: False -# openstack_lbaas_method: "ROUND_ROBIN" -# openstack_lbaas_provider: "haproxy" -# openstack_lbaas_create_monitor: "yes" -# openstack_lbaas_monitor_delay: "1m" -# openstack_lbaas_monitor_timeout: "30s" -# openstack_lbaas_monitor_max_retries: "3" - -## Values for the external OpenStack Cloud Controller -# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" -# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" -# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" -# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" -# external_openstack_lbaas_method: "ROUND_ROBIN" -# external_openstack_lbaas_provider: "octavia" -# external_openstack_lbaas_create_monitor: false -# external_openstack_lbaas_monitor_delay: "1m" -# external_openstack_lbaas_monitor_timeout: "30s" -# external_openstack_lbaas_monitor_max_retries: "3" -# external_openstack_lbaas_manage_security_groups: false -# external_openstack_lbaas_internal_lb: false -# external_openstack_network_ipv6_disabled: false -# external_openstack_network_internal_networks: [] -# external_openstack_network_public_networks: [] -# external_openstack_metadata_search_order: "configDrive,metadataService" - -## Application credentials to authenticate against Keystone API -## Those settings will take precedence over username and password that might be set your environment -## All of them are required -# external_openstack_application_credential_name: -# external_openstack_application_credential_id: -# external_openstack_application_credential_secret: - -## The tag of the external OpenStack Cloud Controller image -# external_openstack_cloud_controller_image_tag: "latest" - -## To use Cinder CSI plugin to provision volumes set this value to true -## Make sure to source in the openstack credentials -# cinder_csi_enabled: true -# cinder_csi_controller_replicas: 1 diff --git a/inventory/group_vars/all/upcloud.yml b/inventory/group_vars/all/upcloud.yml deleted file mode 100644 index f05435d..0000000 --- a/inventory/group_vars/all/upcloud.yml +++ /dev/null @@ -1,24 +0,0 @@ -## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi -## To use UpClouds CSI plugin to provision volumes set this value to true -## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD -# upcloud_csi_enabled: true -# upcloud_csi_controller_replicas: 1 -## Override used image tags -# upcloud_csi_provisioner_image_tag: "v3.1.0" -# upcloud_csi_attacher_image_tag: "v3.4.0" -# upcloud_csi_resizer_image_tag: "v1.4.0" -# upcloud_csi_plugin_image_tag: "v0.3.3" -# upcloud_csi_node_image_tag: "v2.5.0" -# upcloud_tolerations: [] -## Storage class options -# storage_classes: -# - name: standard -# is_default: true -# expand_persistent_volumes: true -# parameters: -# tier: maxiops -# - name: hdd -# is_default: false -# expand_persistent_volumes: true -# parameters: -# tier: hdd diff --git a/inventory/group_vars/all/vsphere.yml b/inventory/group_vars/all/vsphere.yml deleted file mode 100644 index af3cfbe..0000000 --- a/inventory/group_vars/all/vsphere.yml +++ /dev/null @@ -1,32 +0,0 @@ -## Values for the external vSphere Cloud Provider -# external_vsphere_vcenter_ip: "myvcenter.domain.com" -# external_vsphere_vcenter_port: "443" -# external_vsphere_insecure: "true" -# external_vsphere_user: "[email protected]" # Can also be set via the `VSPHERE_USER` environment variable -# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable -# external_vsphere_datacenter: "DATACENTER_name" -# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" - -## Vsphere version where located VMs -# external_vsphere_version: "6.7u3" - -## Tags for the external vSphere Cloud Provider images -## gcr.io/cloud-provider-vsphere/cpi/release/manager -# external_vsphere_cloud_controller_image_tag: "latest" -## gcr.io/cloud-provider-vsphere/csi/release/syncer -# vsphere_syncer_image_tag: "v2.5.1" -## registry.k8s.io/sig-storage/csi-attacher -# vsphere_csi_attacher_image_tag: "v3.4.0" -## gcr.io/cloud-provider-vsphere/csi/release/driver -# vsphere_csi_controller: "v2.5.1" -## registry.k8s.io/sig-storage/livenessprobe -# vsphere_csi_liveness_probe_image_tag: "v2.6.0" -## registry.k8s.io/sig-storage/csi-provisioner -# vsphere_csi_provisioner_image_tag: "v3.1.0" -## registry.k8s.io/sig-storage/csi-resizer -## makes sense only for vSphere version >=7.0 -# vsphere_csi_resizer_tag: "v1.3.0" - -## To use vSphere CSI plugin to provision volumes set this value to true -# vsphere_csi_enabled: true -# vsphere_csi_controller_replicas: 1 diff --git a/inventory/group_vars/etcd.yml b/inventory/group_vars/etcd.yml deleted file mode 100644 index f07c720..0000000 --- a/inventory/group_vars/etcd.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -## Etcd auto compaction retention for mvcc key value store in hour -# etcd_compaction_retention: 0 - -## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. -# etcd_metrics: basic - -## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. -## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. -## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` -# etcd_memory_limit: "512M" - -## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than -## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check -## etcd documentation for more information. -# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. -# etcd_quota_backend_bytes: "2147483648" - -# Maximum client request size in bytes the server will accept. -# etcd is designed to handle small key value pairs typical for metadata. -# Larger requests will work, but may increase the latency of other requests -# etcd_max_request_bytes: "1572864" - -### ETCD: disable peer client cert authentication. -# This affects ETCD_PEER_CLIENT_CERT_AUTH variable -# etcd_peer_client_auth: true diff --git a/inventory/group_vars/k8s_cluster/addons.yml b/inventory/group_vars/k8s_cluster/addons.yml deleted file mode 100644 index 73e4dfe..0000000 --- a/inventory/group_vars/k8s_cluster/addons.yml +++ /dev/null @@ -1,227 +0,0 @@ ---- -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -# dashboard_enabled: false - -# Helm deployment -helm_enabled: false - -# Registry deployment -registry_enabled: false -# registry_namespace: kube-system -# registry_storage_class: "" -# registry_disk_size: "10Gi" - -# Metrics Server deployment -metrics_server_enabled: false -# metrics_server_container_port: 4443 -# metrics_server_kubelet_insecure_tls: true -# metrics_server_metric_resolution: 15s -# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" -# metrics_server_host_network: false - -# Rancher Local Path Provisioner -local_path_provisioner_enabled: false -# local_path_provisioner_namespace: "local-path-storage" -# local_path_provisioner_storage_class: "local-path" -# local_path_provisioner_reclaim_policy: Delete -# local_path_provisioner_claim_root: /opt/local-path-provisioner/ -# local_path_provisioner_debug: false -# local_path_provisioner_image_repo: "rancher/local-path-provisioner" -# local_path_provisioner_image_tag: "v0.0.22" -# local_path_provisioner_helper_image_repo: "busybox" -# local_path_provisioner_helper_image_tag: "latest" - -# Local volume provisioner deployment -local_volume_provisioner_enabled: false -# local_volume_provisioner_namespace: kube-system -# local_volume_provisioner_nodelabels: -# - kubernetes.io/hostname -# - topology.kubernetes.io/region -# - topology.kubernetes.io/zone -# local_volume_provisioner_storage_classes: -# local-storage: -# host_dir: /mnt/disks -# mount_dir: /mnt/disks -# volume_mode: Filesystem -# fs_type: ext4 -# fast-disks: -# host_dir: /mnt/fast-disks -# mount_dir: /mnt/fast-disks -# block_cleaner_command: -# - "/scripts/shred.sh" -# - "2" -# volume_mode: Filesystem -# fs_type: ext4 -# local_volume_provisioner_tolerations: -# - effect: NoSchedule -# operator: Exists - -# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots -# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller -# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray -# csi_snapshot_controller_enabled: false -# csi snapshot namespace -# snapshot_controller_namespace: kube-system - -# CephFS provisioner deployment -cephfs_provisioner_enabled: false -# cephfs_provisioner_namespace: "cephfs-provisioner" -# cephfs_provisioner_cluster: ceph -# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" -# cephfs_provisioner_admin_id: admin -# cephfs_provisioner_secret: secret -# cephfs_provisioner_storage_class: cephfs -# cephfs_provisioner_reclaim_policy: Delete -# cephfs_provisioner_claim_root: /volumes -# cephfs_provisioner_deterministic_names: true - -# RBD provisioner deployment -rbd_provisioner_enabled: false -# rbd_provisioner_namespace: rbd-provisioner -# rbd_provisioner_replicas: 2 -# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" -# rbd_provisioner_pool: kube -# rbd_provisioner_admin_id: admin -# rbd_provisioner_secret_name: ceph-secret-admin -# rbd_provisioner_secret: ceph-key-admin -# rbd_provisioner_user_id: kube -# rbd_provisioner_user_secret_name: ceph-secret-user -# rbd_provisioner_user_secret: ceph-key-user -# rbd_provisioner_user_secret_namespace: rbd-provisioner -# rbd_provisioner_fs_type: ext4 -# rbd_provisioner_image_format: "2" -# rbd_provisioner_image_features: layering -# rbd_provisioner_storage_class: rbd -# rbd_provisioner_reclaim_policy: Delete - -# Nginx ingress controller deployment -ingress_nginx_enabled: false -# ingress_nginx_host_network: false -ingress_publish_status_address: "" -# ingress_nginx_nodeselector: -# kubernetes.io/os: "linux" -# ingress_nginx_tolerations: -# - key: "node-role.kubernetes.io/master" -# operator: "Equal" -# value: "" -# effect: "NoSchedule" -# - key: "node-role.kubernetes.io/control-plane" -# operator: "Equal" -# value: "" -# effect: "NoSchedule" -# ingress_nginx_namespace: "ingress-nginx" -# ingress_nginx_insecure_port: 80 -# ingress_nginx_secure_port: 443 -# ingress_nginx_configmap: -# map-hash-bucket-size: "128" -# ssl-protocols: "TLSv1.2 TLSv1.3" -# ingress_nginx_configmap_tcp_services: -# 9000: "default/example-go:8080" -# ingress_nginx_configmap_udp_services: -# 53: "kube-system/coredns:53" -# ingress_nginx_extra_args: -# - --default-ssl-certificate=default/foo-tls -# ingress_nginx_termination_grace_period_seconds: 300 -# ingress_nginx_class: nginx - -# ALB ingress controller deployment -ingress_alb_enabled: false -# alb_ingress_aws_region: "us-east-1" -# alb_ingress_restrict_scheme: "false" -# Enables logging on all outbound requests sent to the AWS API. -# If logging is desired, set to true. -# alb_ingress_aws_debug: "false" - -# Cert manager deployment -cert_manager_enabled: false -# cert_manager_namespace: "cert-manager" -# cert_manager_tolerations: -# - key: node-role.kubernetes.io/master -# effect: NoSchedule -# - key: node-role.kubernetes.io/control-plane -# effect: NoSchedule -# cert_manager_affinity: -# nodeAffinity: -# preferredDuringSchedulingIgnoredDuringExecution: -# - weight: 100 -# preference: -# matchExpressions: -# - key: node-role.kubernetes.io/control-plane -# operator: In -# values: -# - "" -# cert_manager_nodeselector: -# kubernetes.io/os: "linux" - -# cert_manager_trusted_internal_ca: | -# -----BEGIN CERTIFICATE----- -# [REPLACE with your CA certificate] -# -----END CERTIFICATE----- -# cert_manager_leader_election_namespace: kube-system - -# MetalLB deployment -metallb_enabled: false -metallb_speaker_enabled: "{{ metallb_enabled }}" -# metallb_ip_range: -# - "10.5.0.50-10.5.0.99" -# metallb_pool_name: "loadbalanced" -# metallb_auto_assign: true -# metallb_avoid_buggy_ips: false -# metallb_speaker_nodeselector: -# kubernetes.io/os: "linux" -# metallb_controller_nodeselector: -# kubernetes.io/os: "linux" -# metallb_speaker_tolerations: -# - key: "node-role.kubernetes.io/master" -# operator: "Equal" -# value: "" -# effect: "NoSchedule" -# - key: "node-role.kubernetes.io/control-plane" -# operator: "Equal" -# value: "" -# effect: "NoSchedule" -# metallb_controller_tolerations: -# - key: "node-role.kubernetes.io/master" -# operator: "Equal" -# value: "" -# effect: "NoSchedule" -# - key: "node-role.kubernetes.io/control-plane" -# operator: "Equal" -# value: "" -# effect: "NoSchedule" -# metallb_version: v0.12.1 -# metallb_protocol: "layer2" -# metallb_port: "7472" -# metallb_memberlist_port: "7946" -# metallb_additional_address_pools: -# kube_service_pool: -# ip_range: -# - "10.5.1.50-10.5.1.99" -# protocol: "layer2" -# auto_assign: false -# avoid_buggy_ips: false -# metallb_protocol: "bgp" -# metallb_peers: -# - peer_address: 192.0.2.1 -# peer_asn: 64512 -# my_asn: 4200000000 -# - peer_address: 192.0.2.2 -# peer_asn: 64513 -# my_asn: 4200000000 - -argocd_enabled: false -# argocd_version: v2.4.16 -# argocd_namespace: argocd -# Default password: -# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli -# --- -# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: -# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 -# --- -# Use the following var to set admin password -# argocd_admin_password: "password" - -# The plugin manager for kubectl -krew_enabled: false -krew_root_dir: "/usr/local/krew" diff --git a/inventory/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/group_vars/k8s_cluster/k8s-cluster.yml deleted file mode 100644 index 26c94bd..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-cluster.yml +++ /dev/null @@ -1,350 +0,0 @@ ---- -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location and namespace. -# Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -kube_api_anonymous_auth: true - -## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.25.4 - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the user that owns tha cluster installation. -kube_owner: kube - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changeable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Directory where credentials will be stored -credentials_dir: "{{ inventory_dir }}/credentials" - -## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) -# kube_oidc_auth: false -# kube_token_auth: false - - -## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ -## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) - -# kube_oidc_url: https:// ... -# kube_oidc_client_id: kubernetes -## Optional settings for OIDC -# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" -# kube_oidc_username_claim: sub -# kube_oidc_username_prefix: 'oidc:' -# kube_oidc_groups_claim: groups -# kube_oidc_groups_prefix: 'oidc:' - -## Variables to control webhook authn/authz -# kube_webhook_token_auth: false -# kube_webhook_token_auth_url: https://... -# kube_webhook_token_auth_url_skip_tls_verify: false - -## For webhook authorization, authorization_modes must include Webhook -# kube_webhook_authorization: false -# kube_webhook_authorization_url: https://... -# kube_webhook_authorization_url_skip_tls_verify: false - -# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: calico - -# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni -kube_network_plugin_multus: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node for pod IP address allocation. Note that the number of pods per node is -# also limited by the kubelet_max_pods variable which defaults to 110. -# -# Example: -# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: -# - kube_pods_subnet: 10.233.64.0/18 -# - kube_network_node_prefix: 24 -# - kubelet_max_pods: 110 -# -# Example: -# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: -# - kube_pods_subnet: 10.233.64.0/18 -# - kube_network_node_prefix: 25 -# - kubelet_max_pods: 110 -kube_network_node_prefix: 24 - -# Configure Dual Stack networking (i.e. both IPv4 and IPv6) -enable_dual_stack_networks: false - -# Kubernetes internal network for IPv6 services, unused block of space. -# This is only used if enable_dual_stack_networks is set to true -# This provides 4096 IPv6 IPs -kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 - -# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. -# This network must not already be in your network infrastructure! -# This is only used if enable_dual_stack_networks is set to true. -# This provides room for 256 nodes with 254 pods per node. -kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 - -# IPv6 subnet size allocated to each for pods. -# This is only used if enable_dual_stack_networks is set to true -# This provides room for 254 pods per node. -kube_network_node_prefix_ipv6: 120 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) - -# Kube-proxy proxyMode configuration. -# Can be ipvs, iptables -kube_proxy_mode: ipvs - -# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface -# must be set to true for MetalLB, kube-vip(ARP enabled) to work -kube_proxy_strict_arp: false - -# A string slice of values which specify the addresses to use for NodePorts. -# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). -# The default empty string slice ([]) means to use all local addresses. -# kube_proxy_nodeport_addresses_cidr is retained for legacy config -kube_proxy_nodeport_addresses: >- - {%- if kube_proxy_nodeport_addresses_cidr is defined -%} - [{{ kube_proxy_nodeport_addresses_cidr }}] - {%- else -%} - [] - {%- endif -%} - -# If non-empty, will use this string as identification instead of the actual hostname -# kube_override_hostname: >- -# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} -# {%- else -%} -# {{ inventory_hostname }} -# {%- endif -%} - -## Encrypting Secret Data at Rest -kube_encrypt_secret_data: false - -# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ -# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow -# non-critical podsa to also terminate gracefully -# kubelet_shutdown_grace_period: 60s -# kubelet_shutdown_grace_period_critical_pods: 20s - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# dns_timeout: 2 -# dns_attempts: 2 -# Custom search domains to be added in addition to the default cluster search domains -# searchdomains: -# - svc.{{ cluster_name }} -# - default.svc.{{ cluster_name }} -# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). -# remove_default_searchdomains: false -# Can be coredns, coredns_dual, manual or none -dns_mode: coredns -# Set manual server if using a custom cluster DNS server -# manual_dns_server: 10.x.x.x -# Enable nodelocal dns cache -enable_nodelocaldns: true -enable_nodelocaldns_secondary: false -nodelocaldns_ip: 169.254.25.10 -nodelocaldns_health_port: 9254 -nodelocaldns_second_health_port: 9256 -nodelocaldns_bind_metrics_host_ip: false -nodelocaldns_secondary_skew_seconds: 5 -# nodelocaldns_external_zones: -# - zones: -# - example.com -# - example.io:1053 -# nameservers: -# - 1.1.1.1 -# - 2.2.2.2 -# cache: 5 -# - zones: -# - https://mycompany.local:4453 -# nameservers: -# - 192.168.0.53 -# cache: 0 -# - zones: -# - mydomain.tld -# nameservers: -# - 10.233.0.3 -# cache: 5 -# rewrite: -# - name website.tld website.namespace.svc.cluster.local -# Enable k8s_external plugin for CoreDNS -enable_coredns_k8s_external: false -coredns_k8s_external_zone: k8s_external.local -# Enable endpoint_pod_names option for kubernetes plugin -enable_coredns_k8s_endpoint_pod_names: false -# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config -# dns_upstream_forward_extra_opts: -# policy: sequential - -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: host_resolvconf -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -## Container runtime -## docker for docker, crio for cri-o and containerd for containerd. -## Default: containerd -container_manager: containerd - -# Additional container runtimes -kata_containers_enabled: false - -kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# audit log for kubernetes -kubernetes_audit: false - -# define kubelet config dir for dynamic kubelet -# kubelet_config_dir: -default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" - -# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) -podsecuritypolicy_enabled: false - -# Custom PodSecurityPolicySpec for restricted policy -# podsecuritypolicy_restricted_spec: {} - -# Custom PodSecurityPolicySpec for privileged policy -# podsecuritypolicy_privileged_spec: {} - -# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts -# kubeconfig_localhost: false -# Use ansible_host as external api ip when copying over kubeconfig. -# kubeconfig_localhost_ansible_host: false -# Download kubectl onto the host that runs Ansible in {{ bin_dir }} -# kubectl_localhost: false - -# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. -# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". -# kubelet_enforce_node_allocatable: pods - -## Optionally reserve resources for OS system daemons. -# system_reserved: true -## Uncomment to override default values -# system_memory_reserved: 512Mi -# system_cpu_reserved: 500m -# system_ephemeral_storage_reserved: 2Gi -## Reservation for master hosts -# system_master_memory_reserved: 256Mi -# system_master_cpu_reserved: 250m -# system_master_ephemeral_storage_reserved: 2Gi - -## Eviction Thresholds to avoid system OOMs -# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds -# eviction_hard: {} -# eviction_hard_control_plane: {} - -# An alternative flexvolume plugin directory -# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec - -## Supplementary addresses that can be added in kubernetes ssl keys. -## That can be useful for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] - -## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. -## See https://github.com/kubernetes-sigs/kubespray/issues/2141 -## Set this variable to true to get rid of this issue -volume_cross_zone_attachment: false -## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, -## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) -persistent_volumes_enabled: false - -## Container Engine Acceleration -## Enable container acceleration feature, for example use gpu acceleration in containers -# nvidia_accelerator_enabled: true -## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. -## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' -## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. -## Labels and taints won't be set to nodes if they are not in the array. -# nvidia_gpu_nodes: -# - kube-gpu-001 -# nvidia_driver_version: "384.111" -## flavor can be tesla or gtx -# nvidia_gpu_flavor: gtx -## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. -# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 -# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 -## NVIDIA GPU device plugin image. -# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" - -## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. -# tls_min_version: "" - -## Support tls cipher suites. -# tls_cipher_suites: {} -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 -# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 -# - TLS_ECDHE_RSA_WITH_RC4_128_SHA -# - TLS_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA256 -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_RSA_WITH_RC4_128_SHA - -## Amount of time to retain events. (default 1h0m0s) -event_ttl_duration: "1h0m0s" - -## Automatically renew K8S control plane certificates on first Monday of each month -auto_renew_certificates: false -# First Monday of each month -# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" - -# kubeadm patches path -kubeadm_patches: - enabled: false - source_dir: "{{ inventory_dir }}/patches" - dest_dir: "{{ kube_config_dir }}/patches" diff --git a/inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/inventory/group_vars/k8s_cluster/k8s-net-calico.yml deleted file mode 100644 index cc0499d..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-calico.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -# see roles/network_plugin/calico/defaults/main.yml - -# the default value of name -calico_cni_name: k8s-pod-network - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -# peer_with_router: false - -# Enables Internet connectivity from containers -# nat_outgoing: true - -# Enables Calico CNI "host-local" IPAM plugin -# calico_ipam_host_local: true - -# add default ippool name -# calico_pool_name: "default-pool" - -# add default ippool blockSize (defaults kube_network_node_prefix) -calico_pool_blocksize: 26 - -# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) -# calico_pool_cidr: 1.2.3.4/5 - -# add default ippool CIDR to CNI config -# calico_cni_pool: true - -# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. -# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 - -# Add default IPV6 IPPool CIDR to CNI config -# calico_cni_pool_ipv6: true - -# Global as_num (/calico/bgp/v1/global/as_num) -# global_as_num: "64512" - -# If doing peering with node-assigned asn where the globas does not match your nodes, you want this -# to be true. All other cases, false. -# calico_no_global_as_num: false - -# You can set MTU value here. If left undefined or empty, it will -# not be specified in calico CNI config, so Calico will use built-in -# defaults. The value should be a number, not a string. -# calico_mtu: 1500 - -# Configure the MTU to use for workload interfaces and tunnels. -# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) -# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) -# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) -# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) -# calico_veth_mtu: 1440 - -# Advertise Cluster IPs -# calico_advertise_cluster_ips: true - -# Advertise Service External IPs -# calico_advertise_service_external_ips: -# - x.x.x.x/24 -# - y.y.y.y/32 - -# Advertise Service LoadBalancer IPs -# calico_advertise_service_loadbalancer_ips: -# - x.x.x.x/24 -# - y.y.y.y/16 - -# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) -# calico_datastore: "kdd" - -# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" -# calico_iptables_backend: "Auto" - -# Use typha (only with kdd) -# typha_enabled: false - -# Generate TLS certs for secure typha<->calico-node communication -# typha_secure: false - -# Scaling typha: 1 replica per 100 nodes is adequate -# Number of typha replicas -# typha_replicas: 1 - -# Set max typha connections -# typha_max_connections_lower_limit: 300 - -# Set calico network backend: "bird", "vxlan" or "none" -# bird enable BGP routing, required for ipip and no encapsulation modes -# calico_network_backend: vxlan - -# IP in IP and VXLAN is mutualy exclusive modes. -# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" -# calico_ipip_mode: 'Never' - -# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" -# calico_vxlan_mode: 'Always' - -# set VXLAN port and VNI -# calico_vxlan_vni: 4096 -# calico_vxlan_port: 4789 - -# Enable eBPF mode -# calico_bpf_enabled: false - -# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: -# * can-reach=DESTINATION -# * interface=INTERFACE-REGEX -# see https://docs.projectcalico.org/reference/node/configuration -# calico_ip_auto_method: "interface=eth.*" -# calico_ip6_auto_method: "interface=eth.*" - -# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. -# see https://projectcalico.docs.tigera.io/reference/felix/configuration -# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" - -# Choose the iptables insert mode for Calico: "Insert" or "Append". -# calico_felix_chaininsertmode: Insert - -# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) -# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS -# calico_use_default_route_src_ipaddr: false - -# Enable calico traffic encryption with wireguard -# calico_wireguard_enabled: false - -# Under certain situations liveness and readiness probes may need tunning -# calico_node_livenessprobe_timeout: 10 -# calico_node_readinessprobe_timeout: 10 - -# Calico apiserver (only with kdd) -# calico_apiserver_enabled: false diff --git a/inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/inventory/group_vars/k8s_cluster/k8s-net-canal.yml deleted file mode 100644 index 60b9da7..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-canal.yml +++ /dev/null @@ -1,10 +0,0 @@ -# see roles/network_plugin/canal/defaults/main.yml - -# The interface used by canal for host <-> host communication. -# If left blank, then the interface is choosing using the node's -# default route. -# canal_iface: "" - -# Whether or not to masquerade traffic to destinations not within -# the pod network. -# canal_masquerade: "true" diff --git a/inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/group_vars/k8s_cluster/k8s-net-cilium.yml deleted file mode 100644 index d6e5bfa..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-cilium.yml +++ /dev/null @@ -1,245 +0,0 @@ ---- -# cilium_version: "v1.12.1" - -# Log-level -# cilium_debug: false - -# cilium_mtu: "" -# cilium_enable_ipv4: true -# cilium_enable_ipv6: false - -# Cilium agent health port -# cilium_agent_health_port: "9879" - -# Identity allocation mode selects how identities are shared between cilium -# nodes by setting how they are stored. The options are "crd" or "kvstore". -# - "crd" stores identities in kubernetes as CRDs (custom resource definition). -# These can be queried with: -# `kubectl get ciliumid` -# - "kvstore" stores identities in an etcd kvstore. -# - In order to support External Workloads, "crd" is required -# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta -# - KVStore operations are only required when cilium-operator is running with any of the below options: -# - --synchronize-k8s-services -# - --synchronize-k8s-nodes -# - --identity-allocation-mode=kvstore -# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations -# cilium_identity_allocation_mode: kvstore - -# Etcd SSL dirs -# cilium_cert_dir: /etc/cilium/certs -# kube_etcd_cacert_file: ca.pem -# kube_etcd_cert_file: cert.pem -# kube_etcd_key_file: cert-key.pem - -# Limits for apps -# cilium_memory_limit: 500M -# cilium_cpu_limit: 500m -# cilium_memory_requests: 64M -# cilium_cpu_requests: 100m - -# Overlay Network Mode -# cilium_tunnel_mode: vxlan -# Optional features -# cilium_enable_prometheus: false -# Enable if you want to make use of hostPort mappings -# cilium_enable_portmap: false -# Monitor aggregation level (none/low/medium/maximum) -# cilium_monitor_aggregation: medium -# The monitor aggregation flags determine which TCP flags which, upon the -# first observation, cause monitor notifications to be generated. -# -# Only effective when monitor aggregation is set to "medium" or higher. -# cilium_monitor_aggregation_flags: "all" -# Kube Proxy Replacement mode (strict/probe/partial) -# cilium_kube_proxy_replacement: probe - -# If upgrading from Cilium < 1.5, you may want to override some of these options -# to prevent service disruptions. See also: -# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action -# cilium_preallocate_bpf_maps: false - -# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 -# cilium_tofqdns_enable_poller: false - -# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 -# cilium_enable_legacy_services: false - -# Unique ID of the cluster. Must be unique across all conneted clusters and -# in the range of 1 and 255. Only relevant when building a mesh of clusters. -# This value is not defined by default -# cilium_cluster_id: - -# Deploy cilium even if kube_network_plugin is not cilium. -# This enables to deploy cilium alongside another CNI to replace kube-proxy. -# cilium_deploy_additionally: false - -# Auto direct nodes routes can be used to advertise pods routes in your cluster -# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). -# This works only if you have a L2 connectivity between all your nodes. -# You wil also have to specify the variable `cilium_native_routing_cidr` to -# make this work. Please refer to the cilium documentation for more -# information about this kind of setups. -# cilium_auto_direct_node_routes: false - -# Allows to explicitly specify the IPv4 CIDR for native routing. -# When specified, Cilium assumes networking for this CIDR is preconfigured and -# hands traffic destined for that range to the Linux network stack without -# applying any SNAT. -# Generally speaking, specifying a native routing CIDR implies that Cilium can -# depend on the underlying networking stack to route packets to their -# destination. To offer a concrete example, if Cilium is configured to use -# direct routing and the Kubernetes CIDR is included in the native routing CIDR, -# the user must configure the routes to reach pods, either manually or by -# setting the auto-direct-node-routes flag. -# cilium_native_routing_cidr: "" - -# Allows to explicitly specify the IPv6 CIDR for native routing. -# cilium_native_routing_cidr_ipv6: "" - -# Enable transparent network encryption. -# cilium_encryption_enabled: false - -# Encryption method. Can be either ipsec or wireguard. -# Only effective when `cilium_encryption_enabled` is set to true. -# cilium_encryption_type: "ipsec" - -# Enable encryption for pure node to node traffic. -# This option is only effective when `cilium_encryption_type` is set to `ipsec`. -# cilium_ipsec_node_encryption: false - -# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. -# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, -# it will fallback on the wireguard-go user-space implementation of WireGuard. -# This option is only effective when `cilium_encryption_type` is set to `wireguard`. -# cilium_wireguard_userspace_fallback: false - -# IP Masquerade Agent -# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ -# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded -# cilium_ip_masq_agent_enable: false - -### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded -# cilium_non_masquerade_cidrs: -# - 10.0.0.0/8 -# - 172.16.0.0/12 -# - 192.168.0.0/16 -# - 100.64.0.0/10 -# - 192.0.0.0/24 -# - 192.0.2.0/24 -# - 192.88.99.0/24 -# - 198.18.0.0/15 -# - 198.51.100.0/24 -# - 203.0.113.0/24 -# - 240.0.0.0/4 -### Indicates whether to masquerade traffic to the link local prefix. -### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. -# cilium_masq_link_local: false -### A time interval at which the agent attempts to reload config from disk -# cilium_ip_masq_resync_interval: 60s - -# Hubble -### Enable Hubble without install -# cilium_enable_hubble: false -### Enable Hubble Metrics -# cilium_enable_hubble_metrics: false -### if cilium_enable_hubble_metrics: true -# cilium_hubble_metrics: {} -# - dns -# - drop -# - tcp -# - flow -# - icmp -# - http -### Enable Hubble install -# cilium_hubble_install: false -### Enable auto generate certs if cilium_hubble_install: true -# cilium_hubble_tls_generate: false - -# IP address management mode for v1.9+. -# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ -# cilium_ipam_mode: kubernetes - -# Extra arguments for the Cilium agent -# cilium_agent_custom_args: [] - -# For adding and mounting extra volumes to the cilium agent -# cilium_agent_extra_volumes: [] -# cilium_agent_extra_volume_mounts: [] - -# cilium_agent_extra_env_vars: [] - -# cilium_operator_replicas: 2 - -# The address at which the cillium operator bind health check api -# cilium_operator_api_serve_addr: "127.0.0.1:9234" - -## A dictionary of extra config variables to add to cilium-config, formatted like: -## cilium_config_extra_vars: -## var1: "value1" -## var2: "value2" -# cilium_config_extra_vars: {} - -# For adding and mounting extra volumes to the cilium operator -# cilium_operator_extra_volumes: [] -# cilium_operator_extra_volume_mounts: [] - -# Extra arguments for the Cilium Operator -# cilium_operator_custom_args: [] - -# Name of the cluster. Only relevant when building a mesh of clusters. -# cilium_cluster_name: default - -# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. -# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. -# Available for Cilium v1.10 and up. -# cilium_cni_exclusive: true - -# Configure the log file for CNI logging with retention policy of 7 days. -# Disable CNI file logging by setting this field to empty explicitly. -# Available for Cilium v1.12 and up. -# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" - -# -- Configure cgroup related configuration -# -- Enable auto mount of cgroup2 filesystem. -# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at -# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. -# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted -# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the -# volume will be mounted inside the cilium agent pod at the same path. -# Available for Cilium v1.11 and up -# cilium_cgroup_auto_mount: true -# -- Configure cgroup root where cgroup2 filesystem is mounted on the host -# cilium_cgroup_host_root: "/run/cilium/cgroupv2" - -# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic -# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. -# cilium_bpf_map_dynamic_size_ratio: "0.0" - -# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. -# Available for Cilium v1.10 and up -# cilium_enable_ipv4_masquerade: true -# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. -# Available for Cilium v1.10 and up -# cilium_enable_ipv6_masquerade: true - -# -- Enable native IP masquerade support in eBPF -# cilium_enable_bpf_masquerade: false - -# -- Configure whether direct routing mode should route traffic via -# host stack (true) or directly and more efficiently out of BPF (false) if -# the kernel supports it. The latter has the implication that it will also -# bypass netfilter in the host namespace. -# cilium_enable_host_legacy_routing: true - -# -- Enable use of the remote node identity. -# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity -# cilium_enable_remote_node_identity: true - -# -- Enable the use of well-known identities. -# cilium_enable_well_known_identities: false - -# cilium_enable_bpf_clock_probe: true - -# -- Whether to enable CNP status updates. -# cilium_disable_cnp_status_updates: true diff --git a/inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/inventory/group_vars/k8s_cluster/k8s-net-flannel.yml deleted file mode 100644 index 1a38ba7..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-flannel.yml +++ /dev/null @@ -1,19 +0,0 @@ -# see roles/network_plugin/flannel/defaults/main.yml - -## interface that should be used for flannel operations -## This is actually an inventory cluster-level item -# flannel_interface: - -## Select interface that should be used for flannel operations by regexp on Name or IP -## This is actually an inventory cluster-level item -## example: select interface with ip from net 10.0.0.0/23 -## single quote and escape backslashes -# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' - -# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw' -# for experimental backend -# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md -# flannel_backend_type: "vxlan" -# flannel_vxlan_vni: 1 -# flannel_vxlan_port: 8472 -# flannel_vxlan_direct_routing: false diff --git a/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml deleted file mode 100644 index d580e15..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- - -# geneve or vlan -kube_ovn_network_type: geneve - -# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module -kube_ovn_tunnel_type: geneve - -## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. -# kube_ovn_iface: eth1 -## The MTU used by pod iface in overlay networks (default iface MTU - 100) -# kube_ovn_mtu: 1333 - -## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. -kube_ovn_hw_offload: false -# traffic mirror -kube_ovn_traffic_mirror: false - -# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 -# kube_ovn_default_interface_name: eth0 - -kube_ovn_external_address: 8.8.8.8 -kube_ovn_external_address_ipv6: 2400:3200::1 -kube_ovn_external_dns: alauda.cn - -# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 -kube_ovn_default_gateway_check: true -kube_ovn_default_logical_gateway: false -# kube_ovn_default_exclude_ips: 10.16.0.1 -kube_ovn_node_switch_cidr: 100.64.0.0/16 -kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 - -## vlan config, set default interface name and vlan id -# kube_ovn_default_interface_name: eth0 -kube_ovn_default_vlan_id: 100 -kube_ovn_vlan_name: product - -## pod nic type, support: veth-pair or internal-port -kube_ovn_pod_nic_type: veth_pair - -## Enable load balancer -kube_ovn_enable_lb: true - -## Enable network policy support -kube_ovn_enable_np: true - -## Enable external vpc support -kube_ovn_enable_external_vpc: true - -## Enable checksum -kube_ovn_encap_checksum: true - -## enable ssl -kube_ovn_enable_ssl: false - -## dpdk -kube_ovn_dpdk_enabled: false diff --git a/inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml deleted file mode 100644 index e969633..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml +++ /dev/null @@ -1,64 +0,0 @@ -# See roles/network_plugin/kube-router//defaults/main.yml - -# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP -# kube_router_run_router: true - -# Enables Network Policy -- sets up iptables to provide ingress firewall for pods -# kube_router_run_firewall: true - -# Enables Service Proxy -- sets up IPVS for Kubernetes Services -# see docs/kube-router.md "Caveats" section -# kube_router_run_service_proxy: false - -# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. -# kube_router_advertise_cluster_ip: false - -# Add External IP of service to the RIB so that it gets advertised to the BGP peers. -# kube_router_advertise_external_ip: false - -# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. -# kube_router_advertise_loadbalancer_ip: false - -# Adjust manifest of kube-router daemonset template with DSR needed changes -# kube_router_enable_dsr: false - -# Array of arbitrary extra arguments to kube-router, see -# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md -# kube_router_extra_args: [] - -# ASN number of the cluster, used when communicating with external BGP routers -# kube_router_cluster_asn: ~ - -# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. -# kube_router_peer_router_asns: ~ - -# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. -# kube_router_peer_router_ips: ~ - -# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. -# kube_router_peer_router_ports: ~ - -# Setups node CNI to allow hairpin mode, requires node reboots, see -# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode -# kube_router_support_hairpin_mode: false - -# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. -# kube_router_dns_policy: ClusterFirstWithHostNet - -# Array of annotations for master -# kube_router_annotations_master: [] - -# Array of annotations for every node -# kube_router_annotations_node: [] - -# Array of common annotations for every node -# kube_router_annotations_all: [] - -# Enables scraping kube-router metrics with Prometheus -# kube_router_enable_metrics: false - -# Path to serve Prometheus metrics on -# kube_router_metrics_path: /metrics - -# Prometheus metrics port to use -# kube_router_metrics_port: 9255 diff --git a/inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml deleted file mode 100644 index d2534e7..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# private interface, on a l2-network -macvlan_interface: "eth1" - -# Enable nat in default gateway network interface -enable_nat_default_gateway: true diff --git a/inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/inventory/group_vars/k8s_cluster/k8s-net-weave.yml deleted file mode 100644 index 269a77c..0000000 --- a/inventory/group_vars/k8s_cluster/k8s-net-weave.yml +++ /dev/null @@ -1,64 +0,0 @@ -# see roles/network_plugin/weave/defaults/main.yml - -# Weave's network password for encryption, if null then no network encryption. -# weave_password: ~ - -# If set to 1, disable checking for new Weave Net versions (default is blank, -# i.e. check is enabled) -# weave_checkpoint_disable: false - -# Soft limit on the number of connections between peers. Defaults to 100. -# weave_conn_limit: 100 - -# Weave Net defaults to enabling hairpin on the bridge side of the veth pair -# for containers attached. If you need to disable hairpin, e.g. your kernel is -# one of those that can panic if hairpin is enabled, then you can disable it by -# setting `HAIRPIN_MODE=false`. -# weave_hairpin_mode: true - -# The range of IP addresses used by Weave Net and the subnet they are placed in -# (CIDR format; default 10.32.0.0/12) -# weave_ipalloc_range: "{{ kube_pods_subnet }}" - -# Set to 0 to disable Network Policy Controller (default is on) -# weave_expect_npc: "{{ enable_network_policy }}" - -# List of addresses of peers in the Kubernetes cluster (default is to fetch the -# list from the api-server) -# weave_kube_peers: ~ - -# Set the initialization mode of the IP Address Manager (defaults to consensus -# amongst the KUBE_PEERS) -# weave_ipalloc_init: ~ - -# Set the IP address used as a gateway from the Weave network to the host -# network - this is useful if you are configuring the addon as a static pod. -# weave_expose_ip: ~ - -# Address and port that the Weave Net daemon will serve Prometheus-style -# metrics on (defaults to 0.0.0.0:6782) -# weave_metrics_addr: ~ - -# Address and port that the Weave Net daemon will serve status requests on -# (defaults to disabled) -# weave_status_addr: ~ - -# Weave Net defaults to 1376 bytes, but you can set a smaller size if your -# underlying network has a tighter limit, or set a larger size for better -# performance if your network supports jumbo frames (e.g. 8916) -# weave_mtu: 1376 - -# Set to 1 to preserve the client source IP address when accessing Service -# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works -# only with Weave IPAM (default). -# weave_no_masq_local: true - -# set to nft to use nftables backend for iptables (default is iptables) -# weave_iptables_backend: iptables - -# Extra variables that passing to launch.sh, useful for enabling seed mode, see -# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ -# weave_extra_args: ~ - -# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error -# weave_npc_extra_args: ~ diff --git a/inventory/hosts.yaml b/inventory/hosts.yaml index 4599b40..c9f597c 100644 --- a/inventory/hosts.yaml +++ b/inventory/hosts.yaml @@ -21,36 +21,11 @@ all: ip: 89.58.12.244 access_ip: 89.58.12.244 children: - kube_control_plane: - hosts: - hopper: - turing: - kube_node: - hosts: - hopper: - turing: - lovelace: - neumann: - ritchie: - etcd: - hosts: - hopper: - turing: - lovelace: - k8s_cluster: - children: - kube_control_plane: - kube_node: - calico_rr: - hosts: {} podman: hosts: turing: - lovelace: - hopper: ritchie: + neumann: nginx: hosts: turing: - ritchie: - neumann: diff --git a/inventory/patches/kube-controller-manager+merge.yaml b/inventory/patches/kube-controller-manager+merge.yaml deleted file mode 100644 index 3f0fbbc..0000000 --- a/inventory/patches/kube-controller-manager+merge.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: kube-controller-manager - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '10257' diff --git a/inventory/patches/kube-scheduler+merge.yaml b/inventory/patches/kube-scheduler+merge.yaml deleted file mode 100644 index 00f4572..0000000 --- a/inventory/patches/kube-scheduler+merge.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: kube-scheduler - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '10259' diff --git a/kubespray b/kubespray deleted file mode 160000 -Subproject 60dbf854de8c67192f9971fede21552c8aed286 |