Skip to content

Commit

Permalink
Rename ansible groups to use _ instead of - (kubernetes-sigs#7552)
Browse files Browse the repository at this point in the history
* rename ansible groups to use _ instead of -

k8s-cluster -> k8s_cluster
k8s-node -> k8s_node
calico-rr -> calico_rr
no-floating -> no_floating

Note: kube-node,k8s-cluster groups in upgrade CI
      need clean-up after v2.16 is tagged

* ensure old groups are mapped to the new ones
  • Loading branch information
cristicalin authored Apr 29, 2021
1 parent d261913 commit 360aff4
Show file tree
Hide file tree
Showing 106 changed files with 403 additions and 373 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv

# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml

# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
Expand Down
4 changes: 2 additions & 2 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,8 @@ Vagrant.configure("2") do |config|
ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s-cluster:children" => ["kube_control_plane", "kube-node"],
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
}
end
end
Expand Down
24 changes: 9 additions & 15 deletions cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,8 @@
- name: Check ansible version
import_playbook: ansible_version.yml

- name: Add kube-master nodes to kube_control_plane
# This is for old inventory which contains kube-master instead of kube_control_plane
hosts: kube-master
gather_facts: false
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml

- hosts: bastion[0]
gather_facts: False
Expand All @@ -18,7 +12,7 @@
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }

- hosts: k8s-cluster:etcd
- hosts: k8s_cluster:etcd
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
Expand All @@ -31,7 +25,7 @@
tags: always
import_playbook: facts.yml

- hosts: k8s-cluster:etcd
- hosts: k8s_cluster:etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand All @@ -54,7 +48,7 @@
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: not etcd_kubeadm_enabled| default(false)

- hosts: k8s-cluster
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand All @@ -67,7 +61,7 @@
etcd_events_cluster_setup: false
when: not etcd_kubeadm_enabled| default(false)

- hosts: k8s-cluster
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand All @@ -85,7 +79,7 @@
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }

- hosts: k8s-cluster
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand All @@ -95,7 +89,7 @@
- { role: network_plugin, tags: network }
- { role: kubernetes/node-label, tags: node-label }

- hosts: calico-rr
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand Down Expand Up @@ -131,7 +125,7 @@
- { role: kubespray-defaults }
- { role: kubernetes-apps, tags: apps }

- hosts: k8s-cluster
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand Down
4 changes: 2 additions & 2 deletions contrib/aws_inventory/kubespray-aws-inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def search_tags(self):
hosts['_meta'] = { 'hostvars': {} }

##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube_control_plane", "kube-node", "etcd"]:
for group in ["kube_control_plane", "kube_node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
Expand Down Expand Up @@ -70,7 +70,7 @@ def search_tags(self):
hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host

hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
print(json.dumps(hosts, sort_keys=True, indent=2))

SearchEC2Tags()
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@
{% endif %}
{% endfor %}

[kube-node]
[kube_node]
{% for vm in vm_list %}
{% if 'kube-node' in vm.tags.roles %}
{% if 'kube_node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}

[k8s-cluster:children]
kube-node
[k8s_cluster:children]
kube_node
kube_control_plane
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@
{% endif %}
{% endfor %}

[kube-node]
[kube_node]
{% for vm in vm_roles_list %}
{% if 'kube-node' in vm.tags.roles %}
{% if 'kube_node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}

[k8s-cluster:children]
kube-node
[k8s_cluster:children]
kube_node
kube_control_plane

Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
],
"tags": {
"roles": "kube-node"
"roles": "kube_node"
},
"apiVersion": "{{apiVersion}}",
"properties": {
Expand Down Expand Up @@ -112,4 +112,4 @@
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}
}
2 changes: 1 addition & 1 deletion contrib/dind/run-test-distros.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ test_distro() {
pass_or_fail "$prefix: netcheck" || return 1
}

NODES=($(egrep ^kube-node hosts))
NODES=($(egrep ^kube_node hosts))
NETCHECKER_HOST=localhost

: ${OUTPUT_DIR:=./out}
Expand Down
28 changes: 14 additions & 14 deletions contrib/inventory_builder/inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@
import subprocess
import sys

ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
'calico-rr']
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
'calico_rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
'load']
Expand Down Expand Up @@ -269,7 +269,7 @@ def delete_host_by_ip(self, existing_hosts, ip):

def purge_invalid_hosts(self, hostnames, protected_names=[]):
for role in self.yaml_config['all']['children']:
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
for host in all_hosts.keys():
if host not in hostnames and host not in protected_names:
Expand All @@ -290,7 +290,7 @@ def add_host_to_group(self, group, host, opts=""):
if self.yaml_config['all']['hosts'] is None:
self.yaml_config['all']['hosts'] = {host: None}
self.yaml_config['all']['hosts'][host] = opts
elif group != 'k8s-cluster:children':
elif group != 'k8s_cluster:children':
if self.yaml_config['all']['children'][group]['hosts'] is None:
self.yaml_config['all']['children'][group]['hosts'] = {
host: None}
Expand All @@ -307,37 +307,37 @@ def set_all(self, hosts):

def set_k8s_cluster(self):
k8s_cluster = {'children': {'kube_control_plane': None,
'kube-node': None}}
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
'kube_node': None}}
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster

def set_calico_rr(self, hosts):
for host in hosts:
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
self.debug("Not adding {0} to calico-rr group because it "
self.debug("Not adding {0} to calico_rr group because it "
"conflicts with kube_control_plane "
"group".format(host))
continue
if host in self.yaml_config['all']['children']['kube-node']:
self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube-node group".format(host))
if host in self.yaml_config['all']['children']['kube_node']:
self.debug("Not adding {0} to calico_rr group because it "
"conflicts with kube_node group".format(host))
continue
self.add_host_to_group('calico-rr', host)
self.add_host_to_group('calico_rr', host)

def set_kube_node(self, hosts):
for host in hosts:
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
self.debug("Not adding {0} to kube-node group because of "
self.debug("Not adding {0} to kube_node group because of "
"scale deployment and host is in etcd "
"group.".format(host))
continue
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
self.debug("Not adding {0} to kube-node group because of "
self.debug("Not adding {0} to kube_node group because of "
"scale deployment and host is in "
"kube_control_plane group.".format(host))
continue
self.add_host_to_group('kube-node', host)
self.add_host_to_group('kube_node', host)

def set_etcd(self, hosts):
for host in hosts:
Expand Down
10 changes: 5 additions & 5 deletions contrib/inventory_builder/tests/test_inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,8 @@ def test_set_all(self):
self.inv.yaml_config['all']['hosts'].get(host), opt)

def test_set_k8s_cluster(self):
group = 'k8s-cluster'
expected_hosts = ['kube-node', 'kube_control_plane']
group = 'k8s_cluster'
expected_hosts = ['kube_node', 'kube_control_plane']

self.inv.set_k8s_cluster()
for host in expected_hosts:
Expand All @@ -251,7 +251,7 @@ def test_set_k8s_cluster(self):
self.inv.yaml_config['all']['children'][group]['children'])

def test_set_kube_node(self):
group = 'kube-node'
group = 'kube_node'
host = 'node1'

self.inv.set_kube_node([host])
Expand Down Expand Up @@ -280,7 +280,7 @@ def test_scale_scenario_one(self):
for h in range(3):
self.assertFalse(
list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
self.inv.yaml_config['all']['children']['kube_node']['hosts'])

def test_scale_scenario_two(self):
num_nodes = 500
Expand All @@ -296,7 +296,7 @@ def test_scale_scenario_two(self):
for h in range(5):
self.assertFalse(
list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
self.inv.yaml_config['all']['children']['kube_node']['hosts'])

def test_range2ips_range(self):
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
Expand Down
2 changes: 1 addition & 1 deletion contrib/network-storage/glusterfs/glusterfs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
roles:
- { role: glusterfs/server }

- hosts: k8s-cluster
- hosts: k8s_cluster
roles:
- { role: glusterfs/client }

Expand Down
6 changes: 3 additions & 3 deletions contrib/network-storage/glusterfs/inventory.example
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@
# node2
# node3

# [kube-node]
# [kube_node]
# node2
# node3
# node4
# node5
# node6

# [k8s-cluster:children]
# kube-node
# [k8s_cluster:children]
# kube_node
# kube_control_plane

# [gfs-cluster]
Expand Down
4 changes: 2 additions & 2 deletions contrib/network-storage/heketi/inventory.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ all:
heketi_admin_key: "11elfeinhundertundelf"
heketi_user_key: "!!einseinseins"
children:
k8s-cluster:
k8s_cluster:
vars:
kubelet_fail_swap_on: false
children:
Expand All @@ -13,7 +13,7 @@ all:
etcd:
hosts:
node2:
kube-node:
kube_node:
hosts: &kube_nodes
node1:
node2:
Expand Down
2 changes: 1 addition & 1 deletion contrib/packaging/rpm/kubespray.spec
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
%config %{_sysconfdir}/%{name}/ansible.cfg
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
%license %{_docdir}/%{name}/LICENSE
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
%{_datarootdir}/%{name}/roles/
Expand Down
8 changes: 4 additions & 4 deletions contrib/terraform/aws/templates/inventory.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,18 @@ ${public_ip_address_bastion}
${list_master}


[kube-node]
[kube_node]
${list_node}


[etcd]
${list_etcd}


[k8s-cluster:children]
kube-node
[k8s_cluster:children]
kube_node
kube_control_plane


[k8s-cluster:vars]
[k8s_cluster:vars]
${elb_api_fqdn}
6 changes: 3 additions & 3 deletions contrib/terraform/exoscale/templates/inventory.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
[etcd]
${list_master}

[kube-node]
[kube_node]
${list_worker}

[k8s-cluster:children]
[k8s_cluster:children]
kube_control_plane
kube-node
kube_node
Loading

0 comments on commit 360aff4

Please sign in to comment.