|
1 | 1 | - name: Set up Kubernetes local cluster |
2 | 2 | hosts: all |
3 | 3 | roles: |
4 | | - - install-k8s-jobs-dependences |
| 4 | + - export-vexxhost-openrc |
5 | 5 | become: yes |
6 | 6 | tasks: |
7 | 7 | - name: Set up Kubernetes local cluster |
8 | 8 | shell: |
9 | 9 | cmd: | |
| 10 | + set -x |
10 | 11 | set -e |
| 12 | + set -o pipefail |
| 13 | +
|
11 | 14 | apt-get install python-pip -y |
12 | 15 | pip install -U python-openstackclient |
13 | 16 |
|
14 | | - export OS_DOMAIN_NAME=$(echo '{{ vexxhost_credentials.user_domain_name }}') |
15 | | - export OS_AUTH_TYPE=$(echo '{{ vexxhost_credentials.auth_type }}') |
16 | | - export OS_IDENTITY_API_VERSION=$(echo '{{ vexxhost_credentials.identity_api_version }}') |
17 | | - export OS_VOLUME_API_VERSION=$(echo '{{ vexxhost_credentials.volume_api_version }}') |
18 | | - export OS_INTERFACE=$(echo '{{ vexxhost_credentials.interface }}') |
19 | | - export OS_AUTH_URL=$(echo '{{ vexxhost_credentials.auth_url }}') |
20 | | - export OS_PROJECT_ID=$(echo '{{ vexxhost_credentials.project_id }}') |
21 | | - export OS_PROJECT_NAME=$(echo '{{ vexxhost_credentials.project_name }}') |
22 | | - export OS_USER_DOMAIN_NAME=$(echo '{{ vexxhost_credentials.user_domain_name }}') |
23 | | - export OS_PROJECT_DOMAIN_ID=$(echo '{{ vexxhost_credentials.project_domain_id }}') |
24 | | - export OS_USERNAME=$(echo '{{ vexxhost_credentials.username }}') |
25 | | - export OS_PASSWORD=$(echo '{{ vexxhost_credentials.password }}') |
26 | | - export OS_REGION_NAME=$(echo '{{ vexxhost_credentials.region_name }}') |
| 17 | + mkdir -p /etc/kubernetes/ |
| 18 | + cp ./examples/webhook/policy.json /etc/kubernetes/ |
| 19 | + sed -i "s/c1f7910086964990847dc6c8b128f63c/$OS_PROJECT_ID/g" /etc/kubernetes/policy.json |
| 20 | + sed -i -e "s/k8s-admin/creator/g" /etc/kubernetes/policy.json |
27 | 21 |
|
28 | | - if [[ ! -d "/etc/kubernetes/" ]]; then |
29 | | - sudo mkdir -p /etc/kubernetes/ |
30 | | - fi |
31 | | - chown zuul /etc/kubernetes/ |
| 22 | + # Create cloud-config |
32 | 23 | cat << EOF >> /etc/kubernetes/cloud-config |
33 | 24 | [Global] |
34 | | - domain-name = ${OS_PROJECT_DOMAIN_NAME-$OS_PROJECT_DOMAIN_ID} |
| 25 | + domain-name = $OS_USER_DOMAIN_NAME |
35 | 26 | tenant-id = $OS_PROJECT_ID |
36 | 27 | auth-url = $OS_AUTH_URL |
37 | 28 | password = $OS_PASSWORD |
38 | 29 | username = $OS_USERNAME |
39 | 30 | region = $OS_REGION_NAME |
40 | 31 | [BlockStorage] |
41 | 32 | bs-version = v2 |
| 33 | + ignore-volume-az = yes |
42 | 34 | EOF |
43 | 35 |
|
| 36 | + # Create webhook.kubeconfig |
44 | 37 | cat << EOF >> /etc/kubernetes/webhook.kubeconfig |
45 | 38 | apiVersion: v1 |
46 | 39 | clusters: |
|
60 | 53 | - name: webhook |
61 | 54 | EOF |
62 | 55 |
|
63 | | - set -x |
64 | | - make depend |
65 | | - make build |
66 | | - mkdir -p "{{ ansible_user_dir }}/.kube" |
67 | | - export API_HOST_IP="172.17.0.1" |
| 56 | + # Go where we cloned kubernetes repository |
| 57 | + cd $GOPATH/src/k8s.io/kubernetes/ |
| 58 | + export API_HOST_IP=$(ifconfig | awk '/^docker0/ {getline; print $2}' | awk -F ':' '{print $2}') |
68 | 59 | export KUBELET_HOST="0.0.0.0" |
69 | | -
|
70 | | - echo "Stopping firewall and allow all traffic..." |
71 | | - iptables -F |
72 | | - iptables -X |
73 | | - iptables -t nat -F |
74 | | - iptables -t nat -X |
75 | | - iptables -t mangle -F |
76 | | - iptables -t mangle -X |
77 | | - iptables -P INPUT ACCEPT |
78 | | - iptables -P FORWARD ACCEPT |
79 | | - iptables -P OUTPUT ACCEPT |
80 | 60 | export ALLOW_SECURITY_CONTEXT=true |
81 | 61 | export ENABLE_CRI=false |
82 | 62 | export ENABLE_HOSTPATH_PROVISIONER=true |
83 | 63 | export ENABLE_SINGLE_CA_SIGNER=true |
84 | | - # export KUBE_ENABLE_CLUSTER_DASHBOARD=true |
85 | 64 | export KUBE_ENABLE_CLUSTER_DNS=false |
86 | | - export LOG_LEVEL=10 |
87 | | - # we want to use the openstack cloud provider |
| 65 | + export LOG_LEVEL=4 |
| 66 | + # We want to use the openstack cloud provider |
88 | 67 | export CLOUD_PROVIDER=openstack |
89 | | - # we want to run a separate cloud-controller-manager for openstack |
| 68 | + # We want to run a separate cloud-controller-manager for openstack |
90 | 69 | export EXTERNAL_CLOUD_PROVIDER=true |
91 | 70 | # DO NOT change the location of the cloud-config file. It is important for the old cinder provider to work |
92 | 71 | export CLOUD_CONFIG=/etc/kubernetes/cloud-config |
93 | | - # specify the OCCM binary |
94 | | - export EXTERNAL_CLOUD_PROVIDER_BINARY="{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/openstack-cloud-controller-manager" |
95 | | - # Cleanup some directories just in case |
96 | | - sudo rm -rf /var/lib/kubelet/* |
| 72 | + # Specify the OCCM binary |
| 73 | + export EXTERNAL_CLOUD_PROVIDER_BINARY='{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/openstack-cloud-controller-manager' |
97 | 74 |
|
98 | 75 | # location of where the kubernetes processes log their output |
99 | | - mkdir -p "{{ ansible_user_dir }}/workspace/logs/kubernetes" |
100 | | - export LOG_DIR="{{ ansible_user_dir }}/workspace/logs/kubernetes" |
| 76 | + mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' |
| 77 | + export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' |
101 | 78 | # We need this for one of the conformance tests |
102 | 79 | export ALLOW_PRIVILEGED=true |
103 | 80 | # Just kick off all the processes and drop down to the command line |
104 | 81 | export ENABLE_DAEMON=true |
105 | | - # We need the hostname to match the name of the vm started by openstack |
106 | 82 | export HOSTNAME_OVERRIDE=$(curl http://169.254.169.254/openstack/latest/meta_data.json | python -c "import sys, json; print json.load(sys.stdin)['name']") |
107 | | -
|
108 | | - # copy the same policy json and fix up the hard coded project id |
109 | | - cp ./examples/webhook/policy.json /etc/kubernetes/ |
110 | | - sed -i -e "s|c1f7910086964990847dc6c8b128f63c|$OS_PROJECT_ID|g" /etc/kubernetes/policy.json |
111 | | - # pick an existing role of the user and replace the k8s-admin role in policy.json |
112 | | - sed -i -e "s|k8s-admin|creator|g" /etc/kubernetes/policy.json |
113 | | - # print the modified file |
114 | | - cat /etc/kubernetes/policy.json |
115 | | -
|
116 | | - pushd ${GOPATH}/src/k8s.io/kubernetes |
| 83 | + export MAX_TIME_FOR_URL_API_SERVER=5 |
117 | 84 | export AUTHORIZATION_MODE="Node,Webhook,RBAC" |
118 | 85 |
|
119 | 86 | sed -i -e "/kube::util::wait_for_url.*$/,+1d" hack/local-up-cluster.sh |
|
122 | 89 |
|
123 | 90 | # -E preserves the current env vars, but we need to special case PATH |
124 | 91 | sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O |
125 | | -
|
126 | 92 | nohup "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/k8s-keystone-auth" \ |
127 | 93 | --tls-cert-file /var/run/kubernetes/serving-kube-apiserver.crt \ |
128 | 94 | --tls-private-key-file /var/run/kubernetes/serving-kube-apiserver.key \ |
129 | 95 | --keystone-policy-file /etc/kubernetes/policy.json \ |
130 | 96 | --log-dir=${LOG_DIR} \ |
131 | 97 | --v=10 \ |
132 | | - --keystone-url ${OS_AUTH_URL} >"${LOG_DIR}/keystone-auth.log" 2>&1 & |
133 | | -
|
134 | | - # sudo of local-up-cluster mucks with permissions |
135 | | - sudo chmod -R 777 "{{ ansible_user_dir }}/.kube" |
136 | | - sudo chmod 777 /var/run/kubernetes/client-admin.key |
| 98 | + --keystone-url ${OS_AUTH_URL} >"${LOG_DIR}/keystone-auth.log" 2>&1 & |
137 | 99 |
|
138 | 100 | # set up the config we need for kubectl to work |
139 | 101 | cluster/kubectl.sh config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt |
|
150 | 112 | cluster/kubectl.sh create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin |
151 | 113 |
|
152 | 114 | { |
153 | | - TOKEN=$(openstack token issue -f value -c id) |
154 | | - authenticated_info=`cat << EOF | curl -kvs -XPOST -d @- https://localhost:8443/webhook | python -c "import sys, json; print json.load(sys.stdin)" |
155 | | - { |
156 | | - "apiVersion": "authentication.k8s.io/v1beta1", |
157 | | - "kind": "TokenReview", |
158 | | - "metadata": { |
159 | | - "creationTimestamp": null |
160 | | - }, |
161 | | - "spec": { |
162 | | - "token": "$TOKEN" |
163 | | - } |
164 | | - } |
165 | | - EOF` |
166 | | - base_body=`cat << EOF | python -c "import sys, json; print json.load(sys.stdin)" |
167 | | - { |
168 | | - "apiVersion": "authorization.k8s.io/v1beta1", |
169 | | - "kind": "SubjectAccessReview", |
170 | | - "spec": { |
171 | | - "resourceAttributes": { |
172 | | - "namespace": "default", |
173 | | - "verb": "get", |
174 | | - "group": "", |
175 | | - "resource": "pods" |
176 | | - } |
177 | | - } |
178 | | - } |
179 | | - EOF` |
180 | | - authorization_body=$(python -c "import json; s1=${authenticated_info}; s2=${base_body}; \ |
181 | | - s2['spec']['user']=s1['status']['user']['username']; \ |
182 | | - s2['spec']['group']=s1['status']['user']['groups']; \ |
183 | | - s2['spec']['extra']=s1['status']['user']['extra'];print json.dumps(s2)") |
184 | | - allowed=$(echo $authorization_body | curl -kvs -XPOST -d @- https://localhost:8443/webhook | python -mjson.tool) |
185 | | - } 1> /dev/null 2>&1 |
186 | | - echo ${allowed} |
187 | | - [[ "${allowed}" =~ '"allowed": true' ]] && echo "Testing k8s-keystone-auth sucessfully!" |
| 115 | + authenticated_info=$(cat <<< ' |
| 116 | + { |
| 117 | + "apiVersion": "authentication.k8s.io/v1beta1", |
| 118 | + "kind": "TokenReview", |
| 119 | + "metadata": { |
| 120 | + "creationTimestamp": null |
| 121 | + }, |
| 122 | + "spec": { |
| 123 | + "token": "'$(openstack token issue -f value -c id)'" |
| 124 | + } |
| 125 | + }' | curl -kvs -XPOST -d @- https://localhost:8443/webhook | python -c "import sys, json; print json.load(sys.stdin)" |
| 126 | + ) |
| 127 | + base_body=$(cat <<< ' |
| 128 | + { |
| 129 | + "apiVersion": "authorization.k8s.io/v1beta1", |
| 130 | + "kind": "SubjectAccessReview", |
| 131 | + "spec": { |
| 132 | + "resourceAttributes": { |
| 133 | + "namespace": "default", |
| 134 | + "verb": "get", |
| 135 | + "group": "", |
| 136 | + "resource": "pods" |
| 137 | + } |
| 138 | + } |
| 139 | + }' | python -c "import sys, json; print json.load(sys.stdin)" |
| 140 | + ) |
| 141 | + update_auth_info=$(echo " |
| 142 | + import json; |
| 143 | + s1=$authenticated_info; |
| 144 | + s2=$base_body; |
| 145 | + s2['spec']['user']=s1['status']['user']['username']; |
| 146 | + s2['spec']['group']=s1['status']['user']['groups']; |
| 147 | + s2['spec']['extra']=s1['status']['user']['extra']; |
| 148 | + print json.dumps(s2)" | sed 's/^ \+//' |
| 149 | + ) |
| 150 | + authorization_body=$(python -c "$update_auth_info") |
| 151 | + allowed=$(echo "$authorization_body" | curl -kvs -XPOST -d @- https://localhost:8443/webhook | python -mjson.tool) |
| 152 | + } > /dev/null 2>&1 |
| 153 | +
|
| 154 | + echo "$allowed" |
| 155 | + [[ "$allowed" =~ '"allowed": true' ]] && echo "Testing k8s-keystone-auth sucessfully!" |
188 | 156 |
|
189 | 157 | cluster/kubectl.sh config set-credentials openstackuser --auth-provider=openstack |
190 | 158 | cluster/kubectl.sh config set-context --cluster=local --user=openstackuser openstackuser@local |
|
193 | 161 | echo "Testing kubernetes+keystone authentication and authorizatio failed!" |
194 | 162 | exit 1 |
195 | 163 | fi |
196 | | - popd |
197 | 164 | executable: /bin/bash |
198 | 165 | chdir: '{{ zuul.project.src_dir }}' |
199 | | - environment: '{{ golang_env }}' |
| 166 | + environment: '{{ golang_env | combine(vexxhost_openrc) }}' |
0 commit comments