Keycloak provides a Go programming language adapter for use with OpenID Connect (OIDC) that supports both access tokens in a browser cookie or bearer tokens.
download kops
curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
change permissions
chmox +x kops-linux-amd64
move to /usr/local/bin/
sudo mv kops-linux-amd64 /usr/local/bin/kops
create ROUTE53 hosted zone
create s3 bucket
aws s3 mb s3://clusters-eu-central-1.kops.mile-kitic.com
export kops vars
export KOPS_STATE_STORE=s3://clusters-eu-central-1.kops.mile-kitic.com
export KOPS_CLUSTER_NAME=clusters-eu-central-1.kops.mile-kitic.com
create cluster
kops create cluster --name=clusters-eu-central-1.kops.mile-kitic.com \
--kubernetes-version=1.16.0 \
--zones=eu-central-1a \
--node-count=2 \
--master-count=1 \
--master-size=t2.medium \
--node-size=t2.medium \
--networking flannel-vxlan \
--topology private \
--cloud=aws \
--dns-zone="kops.mile-kitic.com" \
--bastion \
--image="ami-0e342d72b12109f91" \
--yes \
--dry-run \
--output yaml
edit cluster to add oidc to kube-apiserver and extend route53 IAM permissions for masters and nodes
kops update cluster --yes
to match
metadata:
creationTimestamp: "2020-05-04T12:24:15Z"
generation: 4
name: clusters-eu-central-1.kops.mile-kitic.com
spec:
additionalPolicies:
master: |
[
{
"Effect": "Allow",
"Action": ["route53:ListHostedZonesByName","route53:ChangeResourceRecordSets"],
"Resource": ["*"]
}
]
node: |
[
{
"Effect": "Allow",
"Action": ["route53:ChangeResourceRecordSets"],
"Resource": ["arn:aws:route53:::hostedzone/*"]
},
{
"Effect": "Allow",
"Action": ["route53:ListHostedZones","route53:ListHostedZonesByName","route53:ListResourceRecordSets"],
"Resource": ["*"]
}
]
api:
...
kubeAPIServer:
oidcClientID: kubernetes
oidcGroupsClaim: groups
oidcGroupsPrefix: 'oidc:'
oidcIssuerURL: https://keycloak.kops.mile-kitic.com/auth/realms/kops.mile-kitic.com
oidcUsernameClaim: preferred_username
oidcUsernamePrefix: 'oidc:'
kubelet:
...
update cluster
kops rolling-update cluster --yes
Download helm and install tiller in k8s cluster via
k apply -f tiller-config.yaml
tiller-config.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
install tiller
helm init --service-account tiller
add helm repos via:
helm repo add jetstack https://charts.jetstack.io
where final list should look like:
▶ helm repo list
NAME URL
stable https://kubernetes-charts.storage.googleapis.com
local http://127.0.0.1:8879/charts
incubator http://storage.googleapis.com/kubernetes-charts-incubator
codecentric https://codecentric.github.io/helm-charts
gabibbo97 https://gabibbo97.github.io/charts/
jetstack https://charts.jetstack.io
bitnami https://charts.bitnami.com/bitnami
install clusterissuers
clusterissuers.yaml
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: [email protected]
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: [email protected]
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
class: nginx
install crd and helm cert-manager chart
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.14.3/cert-manager.crds.yaml
helm install --name cert-manager \
--namespace cert-manager jetstack/cert-manager \
--set ingressShim.defaultIssuerName=letsencrypt-prod \
--set ingressShim.defaultIssuerKind=ClusterIssuer \
--set ingressShim.defaultIssuerGroup=cert-manager.io
This resources is needed for creating ingress resources or exposing k8s apps to the internet. install helm chart for it
helm install stable/nginx-ingress --name quickstart --set controller.publishService.enabled=true
This resources is needed for creating route53 records for ingress resources.Two records will be created for each ingress A and TXT.
install helm repo and helm chart for external-dns
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install --name external-dns bitnami/external-dns -f values-external-dns.yaml
values-external-dns.yaml
---
aws:
zoneType: public
domainFilters:
- kops.mile-kitic.com
securityContext:
fsGroup: 65534
txtOwnerId: kops.mile-kitic.com/public
install dashboard helm chart
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml -O dashboard.yaml
k apply -f dashboard.yaml
k apply -f dashboard-adminuser.yaml
or
helm install stable/kubernetes-dashboard --name dashboard -f values-dashboard.yaml --namespace kube-system
dashboard-adminuser.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
values-dashboard.yaml
image:
repository: k8s.gcr.io/kubernetes-dashboard-amd64
tag: v1.10.1
pullPolicy: IfNotPresent
pullSecrets: []
replicaCount: 1
annotations: {}
labels: {}
enableSkipLogin: false
# this is set as user will be redirected to gatekeeper to login
enableInsecureLogin: true
extraEnv: []
podAnnotations: {}
dashboardContainerSecurityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
service:
type: ClusterIP
# http port will be used as this service will not be exposed via ingress
externalPort: 80
nameOverride: "kubernetes-dashboard"
annotations: {}
labels: {}
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
paths:
- /
rbac:
create: true
clusterAdminRole: true
clusterReadOnlyRole: false
serviceAccount:
create: true
# name of the serviceaccount which can be used to login (as admin) to the dashboard
name: "dashboard-test"
livenessProbe:
initialDelaySeconds: 30
timeoutSeconds: 30
podDisruptionBudget:
enabled: false
minAvailable:
maxUnavailable:
securityContext: {}
networkPolicy: false
Install keycloak helm chart and repo
helm repo add codecentric https://codecentric.github.io/helm-charts
helm install codecentric/keycloak --name keycloak -f values-keycloak.yaml
values-keycloak.yaml
fullnameOverride: keycloak
nameOverride: keycloak
keycloak:
username: admin
password: admin
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
path: /
hosts:
- keycloak.kops.mile-kitic.com
tls:
- hosts:
- keycloak.kops.mile-kitic.com
secretName: tls-keycloak
persistence:
deployPostgres: true
dbVendor: postgres
dbPassword: keycloak
test:
enabled: false
Login to https://keycloak.kops.mile-kitic.com and do the following:
kops.mile-kitic.comkubernetes as oidc, type confidential and set valid redirect to http://* and `https://*
Idea here is to have service which is exposed to the internet via gatekeeper ingress/deployment.Backend service will be protected from the internet via gatekeeper/keycloak.When user first try to access to the frontend service (gatekeeper) it
will redirect client’s request to keycloak for authn and authz.Afterwards, gatekeeper will use redirection-url info to redirect authenticated user
to the frontend again.When user’s request again come to the frontend but now with authentication info gatekeeper will
redirect user’s request to upstream-url (eg. backend).
Install echo service for debugging
echo.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: echo
name: echo
spec:
replicas: 1
selector:
matchLabels:
app: echo
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo
name: http-https-echo
resources: {}
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: echo
name: echo
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: echo
type: ClusterIP
status:
loadBalancer: {}
install echo deployment to k8s
k apply -f echo.yaml
helm install -f values-keycloak-gatekeeper.yaml gabibbo97/keycloak-gatekeeper --name keycloak-gatekeeper
values-keycloak-gatekeeper.yaml
---
ClientID: "kubernetes"
ClientSecret: "7e5bd7ed-9d12-4c97-bf28-3b4df10c6ab1"
addClaims:
- given_name
- name
- family_name
affinity: {}
debug: false
defaultDeny: true
discoveryURL: "https://keycloak.kops.mile-kitic.com/auth/realms/kops.mile-kitic.com"
droolsPolicyEnabled: false
encryptionKey: ""
extraArgs:
- redirection-url=http://gatekeeper.kops.mile-kitic.com
- secure-cookie=false
extraEnvs: {}
forwarding:
domains: []
enable: false
generateSecret: true
password: ""
username: ""
fullnameOverride: "keycloak-gatekeeper"
image:
pullPolicy: IfNotPresent
repository: quay.io/keycloak/keycloak-gatekeeper
ingress:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-buffer-size: "64k"
enabled: true
hosts:
- gatekeeper.kops.mile-kitic.com
path: /
tls:
- hosts:
- gatekeeper.kops.mile-kitic.com
secretName: keycloak-gatekeeper-tls
logging: true
matchClaims: {}
nameOverride: "keycloak-gatekeeper"
nodeSelector: {}
prometheusMetrics: false
rbac:
create: true
refreshTokens: true
replicaCount: 1
resources: {}
rules: []
scopes: []
secret:
create: true
service:
port: 3000
type: ClusterIP
serviceAccount:
create: true
name: "keycloak-gatekeeper"
sessionCookies: false
skipOpenidProviderTlsVerify: true
skipUpstreamTlsVerify: true
tolerations: []
upstreamURL: "http://dashboard-kubernetes-dashboard.kube-system.svc.cluster.local"
Install gatekeeper deployment
proxy-deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dasboard-proxy
labels:
app.kubernetes.io/name: dasboard-proxy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: dasboard-proxy
template:
metadata:
labels:
app.kubernetes.io/name: dasboard-proxy
spec:
containers:
- name: dasboard-proxy
image: "keycloak/keycloak-gatekeeper:latest"
command:
- /opt/keycloak-gatekeeper
# this is location of the keycloak
- --discovery-url=https://keycloak.kops.mile-kitic.com/auth/realms/kops.mile-kitic.com
# client_id from client created in keycloak
- --client-id=kubernetes
# client_secret from client created in keycloak
- --client-secret=7e5bd7ed-9d12-4c97-bf28-3b4df10c6ab1
- --listen=0.0.0.0:3000
- --encryption-key=AgXa7xRcoClDEU0ZDSH4X0XhL5Qy2Z2j
# this is the url where will gatekeeper redirect request after login.This should match `valid redirect url` in keycloak's clients settings (/oauth/callback can be added in addition)
- --redirection-url=http://gatekeeper.kops.mile-kitic.com
- --enable-refresh-tokens=true
- --secure-cookie=false
# this is the url which will be used in for authenticated request from client (eg. one which was already went through gatekeeper and keycloak)
- --upstream-url=http://dashboard-kubernetes-dashboard.kube-system.svc.cluster.local
# debug:
#- --upstream-url=http://echo.default.svc.cluster.local
# for self sign cert or custom ca
- --skip-upstream-tls-verify=true
- --skip-openid-provider-tls-verify=true
ports:
- name: http
containerPort: 3000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: dasboard-proxy
labels:
app.kubernetes.io/name: dasboard-proxy
namespace: default
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: dasboard-proxy
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: dasboard-proxy
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-buffer-size: "64k"
cert-manager.io/cluster-issuer: letsencrypt-prod
namespace: default
spec:
tls:
- hosts:
- gatekeeper.kops.mile-kitic.com
secretName: dasboard-proxy-tls
rules:
- host: gatekeeper.kops.mile-kitic.com
http:
paths:
- backend:
serviceName: dasboard-proxy
servicePort: 3000
install proxy-deployment to k8s
k apply -f proxy-deployment.yaml
When user login to the dashboard he cannot see the resources as rbac is not set for oidc users.
install oidc rules
oidc-rbac.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keycloak-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: "oidc:cluster-admins"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keycloak-cluster-users
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: "oidc:cluster-users"
apply rbac rules to k8s
k apply -f oidc-rbac.yaml
▶ helm list
NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
cert-manager 1 Mon May 4 14:38:01 2020 DEPLOYED cert-manager-v0.14.3 v0.14.3 cert-manager
dashboard 1 Fri May 8 11:10:16 2020 DEPLOYED kubernetes-dashboard-1.10.1 1.10.1 kube-system
external-dns 1 Mon May 4 20:23:30 2020 DEPLOYED external-dns-2.22.1 0.7.1 default
keycloak 1 Thu May 7 14:30:14 2020 DEPLOYED keycloak-7.7.0 9.0.2 default
keycloak-gatekeeper 1 Tue May 12 12:39:31 2020 DEPLOYED keycloak-gatekeeper-3.3.0 10.0.0 default
quickstart 1 Mon May 4 18:50:44 2020 DEPLOYED nginx-ingress-1.36.3 0.30.0 default
https://medium.com/@carlosedp/adding-authentication-to-your-kubernetes-front-end-applications-with-keycloak-6571097be090
https://www.openshift.com/blog/adding-authentication-to-your-kubernetes-web-applications-with-keycloak
https://keycloak.discourse.group/t/how-to-retrieve-jwt-token-in-http-header-with-keycloak-gatekeeper/499
https://medium.com/@mrbobbytables/kubernetes-day-2-operations-authn-authz-with-oidc-and-a-little-help-from-keycloak-de4ea1bdbbe
https://thenewstack.io/three-realistic-approaches-to-kubernetes-rbac/
https://medium.com/@rschoening/read-only-access-to-kubernetes-cluster-fcf84670b698
https://medium.com/@jessgreb01/kubernetes-authn-authz-with-google-oidc-and-rbac-74509ca8267e
https://banzaicloud.com/blog/k8s-rbac/
https://www.cncf.io/wp-content/uploads/2020/04/2020_04_Introduction-to-Kubernetes-RBAC.pdf
https://gardener.cloud/050-tutorials/content/howto/oidc-login/
https://gist.github.com/carlosedp/80ea54104cc6303f04b3755033f9c4fe
https://github.com/keycloak/keycloak-containers/blob/10.0.1/server/README.md
https://www.sovsystems.com/blog/authentication-and-authorization-in-kubernetes
https://github.com/pjeby/keycloak-proxy
https://habr.com/ru/post/489172/
https://appsoft.pro/activedirectory-authorization-to-kubernetes-with-keycloak/
http://gatekeeper.kops.mile-kitic.com/oauth/callback
https://github.com/mendhak/docker-http-https-echo
https://github.com/keycloak/keycloak-documentation/blob/master/securing_apps/topics/oidc/keycloak-gatekeeper.adoc