diff --git a/ansible/roles/k0s/prereq/tasks/main.yml b/ansible/roles/k0s/prereq/tasks/main.yml index 0eb5797..4cf8042 100644 --- a/ansible/roles/k0s/prereq/tasks/main.yml +++ b/ansible/roles/k0s/prereq/tasks/main.yml @@ -35,4 +35,4 @@ owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: 0600 - when: not k0s_use_custom_config + when: not k0s_use_custom_config \ No newline at end of file diff --git a/ansible/roles/k0s/prereq/templates/k0s.yaml.j2 b/ansible/roles/k0s/prereq/templates/k0s.yaml.j2 index 896a448..3bfbbdd 100644 --- a/ansible/roles/k0s/prereq/templates/k0s.yaml.j2 +++ b/ansible/roles/k0s/prereq/templates/k0s.yaml.j2 @@ -4,17 +4,38 @@ metadata: name: k0s spec: api: + extraArgs: + feature-gates: "TopologyAwareHints=true" +{% if enable_ha_mode %} externalAddress: {{ lb_address }} + sans: + - {{ lb_address }} +{% endif %} address: {{ ansible_default_ipv4.address }} port: 6443 k0sApiPort: 9443 +{% if not enable_ha_mode %} sans: - - {{ lb_address }} + - {{ ansible_default_ipv4.address }} + - 10.244.0.1 + tunneledNetworkingMode: false +{% endif %} + controllerManager: + extraArgs: + feature-gates: "TopologyAwareHints=true" storage: etcd: peerAddress: {{ ansible_default_ipv4.address }} externalCluster: null type: etcd +{% if not enable_ha_mode %} + network: + calico: null + clusterDomain: cluster.local + dualStack: {} + kubeProxy: + mode: iptables +{% elif enable_ha_mode %} network: podCIDR: 10.244.0.0/16 serviceCIDR: 10.96.0.0/12 @@ -26,6 +47,7 @@ spec: peerRouterIPs: "" peerRouterASNs: "" autoMTU: true +{% endif %} podSecurityPolicy: defaultPolicy: 00-k0s-privileged telemetry: @@ -40,14 +62,14 @@ spec: kubeSchedulerUser: kube-scheduler images: konnectivity: - image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent - version: v0.0.25 + image: quay.io/k0sproject/apiserver-network-proxy-agent + version: 0.0.32-k0s1 metricsserver: - image: gcr.io/k8s-staging-metrics-server/metrics-server - version: v0.5.0 + image: k8s.gcr.io/metrics-server/metrics-server + version: v0.6.1 kubeproxy: image: k8s.gcr.io/kube-proxy - version: v1.24.2 + version: v1.24.6 coredns: image: docker.io/coredns/coredns version: 1.7.0 @@ -64,11 +86,16 @@ spec: kuberouter: cni: image: docker.io/cloudnativelabs/kube-router - version: v1.2.1 + version: v1.5.1 cniInstaller: image: quay.io/k0sproject/cni-node - version: 0.1.0 + version: 1.1.1-k0s.0 default_pull_policy: IfNotPresent konnectivity: agentPort: 8132 adminPort: 8133 + workerProfiles: + - name: custom-feature-gate + values: + featureGates: + TopologyAwareHints: "true" \ No newline at end of file diff --git a/ansible/upgrade.yaml b/ansible/upgrade.yaml index 78f08f1..a503beb 100644 --- a/ansible/upgrade.yaml +++ b/ansible/upgrade.yaml @@ -6,14 +6,14 @@ gather_facts: yes tasks: - - name: Get list of k0s workers hosts + - name: get hosts local_action: module: shell _raw_params: "kubectl --kubeconfig $KUBECONFIG get nodes" become: false register: k0s_workers - - name: Descriptions + - name: verbose ansible.builtin.debug: msg: - "The following playbook allows you to upgrade the k0s version on nodes safely and one at a time" @@ -21,13 +21,13 @@ - "If you are upgrading a k0s controller node check inventory for the desired hostname." - "The k0s worker nodes in your current cluster are:" - - name: List nodes + - name: nodes ansible.builtin.debug: msg: - "{{ item }}" with_items: "{{ k0s_workers.stdout_lines }}" - - name: Prompt for target_host + - name: target_host pause: prompt: "Specify which node to upgrade:" echo: yes @@ -41,21 +41,24 @@ target_host: "{{ target_host.user_input }}" # End of inventory on the fly block + - ansible.builtin.debug: + msg: + - "{{ target_host }}" - - name: Drain/cordon node + - name: drain node local_action: module: shell - _raw_params: "kubectl --kubeconfig $KUBECONFIG drain {{ target_host }} --force --ignore-daemonsets --delete-emptydir-data" + _raw_params: "kubectl --kubeconfig $KUBECONFIG drain {{ target_host }} --disable-eviction=true --force --ignore-daemonsets --delete-emptydir-data" become: false register: node_drain_out - - name: Set k8s_version_upgrade_eviction_seconds fact + - name: set k8s_version_upgrade_eviction_seconds fact set_fact: k8s_version_upgrade_eviction_seconds: 45 run_once: true when: k8s_version_upgrade_eviction_seconds is undefined - - name: Allow some time for the evictions + - name: allow some time for the evictions pause: seconds: "{{ k8s_version_upgrade_eviction_seconds }}" @@ -73,7 +76,7 @@ tags: download - hosts: dynamically_created_hosts - name: Start the k0s service + name: start the k0s service become: yes tasks: - shell: k0s start @@ -81,7 +84,10 @@ - hosts: localhost gather_facts: yes tasks: - - name: Uncordon node + - name: Sleep for 30 seconds to give k0s a chance to start up + ansible.builtin.wait_for: + timeout: 30 + - name: uncordon node local_action: module: shell _raw_params: "kubectl --kubeconfig $KUBECONFIG uncordon {{ target_host }}" diff --git a/helmfiles/templates/graph-goerli.yaml b/helmfiles/templates/graph-goerli.yaml index 07ff98c..e7144ac 100644 --- a/helmfiles/templates/graph-goerli.yaml +++ b/helmfiles/templates/graph-goerli.yaml @@ -16,7 +16,7 @@ templates: launchpad-release-template-graph-node: &launchpad-release-template-graph-node <<: *launchpad-release-template-defaults chart: graphops/graph-node - version: 0.1.0 + version: 0.1.1 launchpad-release-template-graph-network-indexer: &launchpad-release-template-graph-network-indexer <<: *launchpad-release-template-defaults diff --git a/taskfiles/hosts.yml b/taskfiles/hosts.yml index b2191ef..56ea1e7 100644 --- a/taskfiles/hosts.yml +++ b/taskfiles/hosts.yml @@ -19,9 +19,9 @@ tasks: cmds: - ansible-playbook -vv launchpad-core/ansible/install_k0s_cluster.yaml --diff -i {{.INVENTORY_PATH}} {{.CLI_ARGS}} - upgrade-k0s-node: + k0s-node-upgrade: interactive: true # for interactive scripts - desc: Playbook to upgrade version of k0s on one cluster node at a time + desc: Safely upgrade the version of a k0s node in a cluster cmds: - ansible-playbook -vv launchpad-core/ansible/upgrade.yaml --diff -i {{.INVENTORY_PATH}} {{.CLI_ARGS}}