From 10b9cbeead12e57d96fc7db4f6078d4a43c67567 Mon Sep 17 00:00:00 2001 From: Sharad Ahlawat Date: Tue, 20 Apr 2021 12:34:43 -0700 Subject: [PATCH] . --- configs/etc/defaults/devfs.rules | 1 + jails/config/elk/elasticsearch.yml | 5 ++ jails/config/elk/jvm.options | 2 +- jails/config/elk/kibana.yml | 3 + jails/config/elk/logstash.conf | 19 ++++++- jails/config/elk/metricbeat.yml | 4 +- k8s/apps/deploy-pyserver.txt | 25 +++++++++ k8s/apps/pyserver-deployment.yml | 21 +++++++ k8s/apps/pyserver/Dockerfile | 9 +++ k8s/apps/pyserver/app.py | 14 +++++ k8s/apps/pyserver/requirements.txt | 3 + k8s/apps/pyserver/service.yaml | 35 ++++++++++++ k8s/kind-cilium-config.yaml | 18 ++++++ k8s/kind-vanilla-config.yaml | 17 ++++++ k8s/kind-vanilla-config.yaml-ingress | 56 +++++++++++++++++++ k8s/metallb-configmap.yaml | 12 ++++ k8s/metallb-test.yaml | 37 +++++++++++++ k8s/registry/kind-registry.sh | 28 ++++++++++ k8s/registry/portainer-agent-k8s.yaml | 80 +++++++++++++++++++++++++++ k8s/setup-kind-cilium.txt | 72 ++++++++++++++++++++++++ k8s/setup-kind-vanilla.txt | 15 +++++ sync-k8s.sh | 11 ++++ 22 files changed, 483 insertions(+), 4 deletions(-) create mode 100644 k8s/apps/deploy-pyserver.txt create mode 100644 k8s/apps/pyserver-deployment.yml create mode 100644 k8s/apps/pyserver/Dockerfile create mode 100644 k8s/apps/pyserver/app.py create mode 100644 k8s/apps/pyserver/requirements.txt create mode 100644 k8s/apps/pyserver/service.yaml create mode 100644 k8s/kind-cilium-config.yaml create mode 100644 k8s/kind-vanilla-config.yaml create mode 100644 k8s/kind-vanilla-config.yaml-ingress create mode 100644 k8s/metallb-configmap.yaml create mode 100644 k8s/metallb-test.yaml create mode 100755 k8s/registry/kind-registry.sh create mode 100644 k8s/registry/portainer-agent-k8s.yaml create mode 100644 k8s/setup-kind-cilium.txt create mode 100644 k8s/setup-kind-vanilla.txt create mode 100755 sync-k8s.sh diff --git a/configs/etc/defaults/devfs.rules b/configs/etc/defaults/devfs.rules index 1d9f577..3603d77 100644 --- a/configs/etc/defaults/devfs.rules +++ b/configs/etc/defaults/devfs.rules @@ -86,6 +86,7 @@ add include $devfsrules_unhide_basic add include $devfsrules_unhide_login add path fuse unhide add path zfs unhide +add path 'bpf*' unhide # members of group uucp can access all usb and tty devices [usbrules=100] diff --git a/jails/config/elk/elasticsearch.yml b/jails/config/elk/elasticsearch.yml index 672bc78..eaa6e9f 100755 --- a/jails/config/elk/elasticsearch.yml +++ b/jails/config/elk/elasticsearch.yml @@ -28,6 +28,7 @@ node.name: node-1 xpack.security.audit.enabled: true xpack.security.enabled: true +xpack.security.authc.api_key.enabled: true xpack.security.http.ssl.enabled: true xpack.security.transport.ssl.enabled: true xpack.security.http.ssl.key: certs/diyprivkeyr.pem @@ -36,6 +37,10 @@ xpack.security.http.ssl.certificate_authorities: certs/cacert.pem xpack.security.transport.ssl.key: certs/diyprivkeyr.pem xpack.security.transport.ssl.certificate: certs/diyfullchain.pem xpack.security.transport.ssl.certificate_authorities: certs/cacert.pem + +xpack.monitoring.collection.enabled: true +xpack.monitoring.elasticsearch.collection.enabled: false + # # ----------------------------------- Paths ------------------------------------ # diff --git a/jails/config/elk/jvm.options b/jails/config/elk/jvm.options index dda380e..50af168 100755 --- a/jails/config/elk/jvm.options +++ b/jails/config/elk/jvm.options @@ -23,7 +23,7 @@ -Xms8G -Xmx8G -XX:MaxMetaspaceSize=2G --Xss2G +-Xss1G -Xnoclassgc -XX:MaxDirectMemorySize=2G diff --git a/jails/config/elk/kibana.yml b/jails/config/elk/kibana.yml index 5dcb9e7..6463ebc 100644 --- a/jails/config/elk/kibana.yml +++ b/jails/config/elk/kibana.yml @@ -58,6 +58,9 @@ server.ssl.key: /mnt/certs/diyprivkeyr.pem #elasticsearch.ssl.certificate: /path/to/your/client.crt #elasticsearch.ssl.key: /path/to/your/client.key +xpack.security.enabled: true +xpack.encryptedSavedObjects.encryptionKey: "something_at_least_32_characters_this_is_it" + # Optional setting that enables you to specify a path to the PEM file for the certificate # authority for your Elasticsearch instance. elasticsearch.ssl.certificateAuthorities: [ "/mnt/certs/cacert.pem" ] diff --git a/jails/config/elk/logstash.conf b/jails/config/elk/logstash.conf index dd20319..4d7c2f7 100644 --- a/jails/config/elk/logstash.conf +++ b/jails/config/elk/logstash.conf @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2021, diyIT.org +# Copyright (c) 2018-2020, diyIT.org # All rights reserved. # # BSD 2-Clause License ("Simplified BSD License" or "FreeBSD License") @@ -18,6 +18,23 @@ input { } } +filter { + if "suricata" in [tags] { + json { + source => "message" + } + date { + match => [ "timestamp", "ISO8601" ] + } + if ![geoip] and [src_ip] !~ /^(10\.|192\.168\.)/ { + geoip { + add_tag => [ "GeoIP" ] + source => "src_ip" + } + } + } +} + output { elasticsearch { ssl => true diff --git a/jails/config/elk/metricbeat.yml b/jails/config/elk/metricbeat.yml index 493f376..c04e63d 100644 --- a/jails/config/elk/metricbeat.yml +++ b/jails/config/elk/metricbeat.yml @@ -98,8 +98,8 @@ output.elasticsearch: # Authentication credentials - either API key or username/password. #api_key: "id:api_key" - #username: "elastic" - #password: "changeme" + username: "demo" + password: "demo" # ------------------------------ Logstash Output ------------------------------- #output.logstash: diff --git a/k8s/apps/deploy-pyserver.txt b/k8s/apps/deploy-pyserver.txt new file mode 100644 index 0000000..0387d92 --- /dev/null +++ b/k8s/apps/deploy-pyserver.txt @@ -0,0 +1,25 @@ +cd pyserver +docker build -t localhost:5000/pyserver:0.1 -f Dockerfile . +# docker build -t sahlawat/pyserver:0.1 -f Dockerfile . +docker run -it -p 8080:8080 --rm localhost:5000/pyserver:0.1 +# curl localhost:8080 +# docker run -d -p 8080:8080 diyit/pyserver:0.1 + +docker push localhost:5000/pyserver:0.1 + +kubectl create namespace demo +kubectl config set-context --current --namespace=demo + +kubectl create deployment myapp --image=localhost:5000/pyserver:0.1 +kubectl expose deployment myapp --port=8080 --type=LoadBalancer +# kubectl get service +# kubectl get all +# curl 172.18.255.200:8080 + +kubectl scale deployment myapp --replicas=3 +# kubectl get service + +kubectl delete service myapp +kubectl delete deployment myapp + +kubectl delete namespace/demo diff --git a/k8s/apps/pyserver-deployment.yml b/k8s/apps/pyserver-deployment.yml new file mode 100644 index 0000000..15856f3 --- /dev/null +++ b/k8s/apps/pyserver-deployment.yml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: myapp + name: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: myapp + image: localhost:5000/pyserver:v1 + ports: + - containerPort: 8080 diff --git a/k8s/apps/pyserver/Dockerfile b/k8s/apps/pyserver/Dockerfile new file mode 100644 index 0000000..4fe2566 --- /dev/null +++ b/k8s/apps/pyserver/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.8-slim-buster +WORKDIR /usr/src/app +COPY requirements.txt ./ +RUN pip install --upgrade pip +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +ENTRYPOINT ["python"] +EXPOSE 8080 +CMD ["app.py"] diff --git a/k8s/apps/pyserver/app.py b/k8s/apps/pyserver/app.py new file mode 100644 index 0000000..203b33d --- /dev/null +++ b/k8s/apps/pyserver/app.py @@ -0,0 +1,14 @@ +from flask import Flask +import os + +app = Flask(__name__) + + +@app.route('/') +def hello_world(count=0): + count += 1 + return "Hello World! #" + str(count) + "\nfrom: " + os.getenv('HOSTNAME', "unknown") + "\n" + + +if __name__ == '__main__': + app.run(host='0.0.0.0', port='8080') diff --git a/k8s/apps/pyserver/requirements.txt b/k8s/apps/pyserver/requirements.txt new file mode 100644 index 0000000..3c1fa55 --- /dev/null +++ b/k8s/apps/pyserver/requirements.txt @@ -0,0 +1,3 @@ +flask +flask_cors +dapr diff --git a/k8s/apps/pyserver/service.yaml b/k8s/apps/pyserver/service.yaml new file mode 100644 index 0000000..aca733b --- /dev/null +++ b/k8s/apps/pyserver/service.yaml @@ -0,0 +1,35 @@ +kind: Pod +apiVersion: v1 +metadata: + name: pyserver + labels: + app: pyserver +spec: + containers: + - name: pyserver + image: localhost:5000/pyserver:0.1 +--- +kind: Service +apiVersion: v1 +metadata: + name: pyserver +spec: + selector: + app: pyserver + ports: + # Port used by the Docker image + - port: 8080 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: pyserver-ingress +spec: + rules: + - http: + paths: + - path: / + backend: + serviceName: pyserver + servicePort: 8080 +--- diff --git a/k8s/kind-cilium-config.yaml b/k8s/kind-cilium-config.yaml new file mode 100644 index 0000000..57acbd4 --- /dev/null +++ b/k8s/kind-cilium-config.yaml @@ -0,0 +1,18 @@ +# a cluster with 3 control-plane nodes and 3 workers +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: control-plane +- role: control-plane +- role: worker +- role: worker +- role: worker +networking: + disableDefaultCNI: true + podSubnet: "10.10.0.0/16" + serviceSubnet: "10.11.0.0/16" +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] + endpoint = ["http://kind-registry:5000"] diff --git a/k8s/kind-vanilla-config.yaml b/k8s/kind-vanilla-config.yaml new file mode 100644 index 0000000..d9e1b8d --- /dev/null +++ b/k8s/kind-vanilla-config.yaml @@ -0,0 +1,17 @@ +# a cluster with 3 control-plane nodes and 3 workers +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: control-plane +- role: control-plane +- role: worker +- role: worker +- role: worker +networking: + podSubnet: "10.20.0.0/16" + serviceSubnet: "10.21.0.0/16" +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] + endpoint = ["http://kind-registry:5000"] diff --git a/k8s/kind-vanilla-config.yaml-ingress b/k8s/kind-vanilla-config.yaml-ingress new file mode 100644 index 0000000..6c1fdb7 --- /dev/null +++ b/k8s/kind-vanilla-config.yaml-ingress @@ -0,0 +1,56 @@ +# a cluster with 3 control-plane nodes and 3 workers +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP +- role: worker +- role: worker +- role: worker +networking: + podSubnet: "10.20.0.0/16" + serviceSubnet: "10.21.0.0/16" +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] + endpoint = ["http://kind-registry:5000"] diff --git a/k8s/metallb-configmap.yaml b/k8s/metallb-configmap.yaml new file mode 100644 index 0000000..3ec87b6 --- /dev/null +++ b/k8s/metallb-configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - 172.18.255.200-172.18.255.250 diff --git a/k8s/metallb-test.yaml b/k8s/metallb-test.yaml new file mode 100644 index 0000000..c790d2e --- /dev/null +++ b/k8s/metallb-test.yaml @@ -0,0 +1,37 @@ +kind: Pod +apiVersion: v1 +metadata: + name: foo-app + labels: + app: http-echo +spec: + containers: + - name: foo-app + image: hashicorp/http-echo:0.2.3 + args: + - "-text=foo" +--- +kind: Pod +apiVersion: v1 +metadata: + name: bar-app + labels: + app: http-echo +spec: + containers: + - name: bar-app + image: hashicorp/http-echo:0.2.3 + args: + - "-text=bar" +--- +kind: Service +apiVersion: v1 +metadata: + name: foo-service +spec: + type: LoadBalancer + selector: + app: http-echo + ports: + # Default port used by the image + - port: 5678 diff --git a/k8s/registry/kind-registry.sh b/k8s/registry/kind-registry.sh new file mode 100755 index 0000000..a6aebe9 --- /dev/null +++ b/k8s/registry/kind-registry.sh @@ -0,0 +1,28 @@ +#!/bin/sh +reg_name='kind-registry' +reg_port='5000' +running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" +if [ "${running}" != 'true' ]; then + docker run \ + -d --restart=always -p "${reg_port}:5000" --name "${reg_name}" \ + registry:latest +fi + +docker network connect "kind" "kind-registry" +# http://localhost:5000/v2/_catalog + +# Portainer local +# https://documentation.portainer.io/v2.0/deploy/ceinstalldocker/ +docker volume create portainer_data +docker run -d -p 9000:9000 --name=portainer --restart=unless-stopped -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ce +docker run -d -p 9001:9001 --name portainer_agent --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/docker/volumes:/var/lib/docker/volumes portainer/agent +# Endpoints->Type:Docker->Connect via:socket +# http://localhost:9000 + +# Portainer kind +# https://documentation.portainer.io/v2.0/deploy/ceinstallk8s/ +helm repo add portainer https://portainer.github.io/k8s/ +helm repo update +helm install --create-namespace -n portainer portainer portainer/portainer --set service.type=LoadBalancer +# kubectl get services --all-namespaces +# http://lbIP:9000 diff --git a/k8s/registry/portainer-agent-k8s.yaml b/k8s/registry/portainer-agent-k8s.yaml new file mode 100644 index 0000000..b141387 --- /dev/null +++ b/k8s/registry/portainer-agent-k8s.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: LoadBalancer + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:latest + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: DEBUG + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/k8s/setup-kind-cilium.txt b/k8s/setup-kind-cilium.txt new file mode 100644 index 0000000..856ea5d --- /dev/null +++ b/k8s/setup-kind-cilium.txt @@ -0,0 +1,72 @@ +https://docs.cilium.io/en/v1.9/gettingstarted/kind/ + +kind create cluster --name=kind-cilium --config kind-cilium-config.yaml + +kubectl cluster-info --context kind-kind-cilium +kubectl config use-context kind-kind-cilium + +helm repo add cilium https://helm.cilium.io/ + +docker pull cilium/cilium:v1.9.5 +kind load docker-image --name kind-cilium cilium/cilium:v1.9.5 + +helm install cilium cilium/cilium --version 1.9.5 \ + --namespace kube-system \ + --set nodeinit.enabled=true \ + --set kubeProxyReplacement=partial \ + --set hostServices.enabled=false \ + --set externalIPs.enabled=true \ + --set nodePort.enabled=true \ + --set hostPort.enabled=true \ + --set bpf.masquerade=false \ + --set image.pullPolicy=IfNotPresent \ + --set ipam.mode=kubernetes + +kubectl create ns cilium-test +kubectl apply -n cilium-test -f https://raw.githubusercontent.com/cilium/cilium/v1.9/examples/kubernetes/connectivity-check/connectivity-check.yaml +# kubectl get pods --namespace cilium-test + +export CILIUM_NAMESPACE=kube-system +helm upgrade cilium cilium/cilium --version 1.9.5 \ + --namespace $CILIUM_NAMESPACE \ + --reuse-values \ + --set hubble.listenAddress=":4244" \ + --set hubble.relay.enabled=true \ + --set hubble.ui.enabled=true +> kubectl port-forward -n $CILIUM_NAMESPACE svc/hubble-ui --address 0.0.0.0 --address :: 12000:80 +# open http://localhost:12000/ to access the UI + +export HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt) +curl -LO "https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz" +curl -LO "https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz.sha256sum" +sha256sum --check hubble-linux-amd64.tar.gz.sha256sum +tar zxf hubble-linux-amd64.tar.gz +sudo mv hubble /usr/local/bin +> kubectl port-forward -n $CILIUM_NAMESPACE svc/hubble-relay --address 0.0.0.0 --address :: 4245:80 +# hubble --server localhost:4245 status +# hubble --server localhost:4245 observe + +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/master/manifests/namespace.yaml +kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/master/manifests/metallb.yaml +# kubectl get pods -n metallb-system --watch + +docker network inspect -f '{{.IPAM.Config}}' kind +kubectl apply -f metallb-configmap.yaml + + +Test metallb: +kubectl apply -f metallb-test.yaml +LB_IP=$(kubectl get svc/foo-service -o=jsonpath='{.status.loadBalancer.ingress[0].ip}') +# should output foo and bar on separate lines +for _ in {1..10}; do + curl ${LB_IP}:5678 +done + + +Ingress TBD: +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/kind/deploy.yaml +kubectl wait --namespace ingress-nginx \ + --for=condition=ready pod \ + --selector=app.kubernetes.io/component=controller \ + --timeout=90s diff --git a/k8s/setup-kind-vanilla.txt b/k8s/setup-kind-vanilla.txt new file mode 100644 index 0000000..27d381a --- /dev/null +++ b/k8s/setup-kind-vanilla.txt @@ -0,0 +1,15 @@ +https://kind.sigs.k8s.io/docs/user/quick-start/ +https://kind.sigs.k8s.io/docs/user/loadbalancer + +kind create cluster --name=kind-vanilla --config kind-vanilla-config.yaml + +kubectl cluster-info --context kind-kind-vanilla +kubectl config use-context kind-kind-vanilla + +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/master/manifests/namespace.yaml +kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/master/manifests/metallb.yaml +# kubectl get pods -n metallb-system --watch + +docker network inspect -f '{{.IPAM.Config}}' kind +kubectl apply -f metallb-configmap.yaml diff --git a/sync-k8s.sh b/sync-k8s.sh new file mode 100755 index 0000000..eaeffb4 --- /dev/null +++ b/sync-k8s.sh @@ -0,0 +1,11 @@ +#!/usr/local/bin/bash + +# Copyright (c) 2018-2021, diyIT.org +# All rights reserved. +# +# BSD 2-Clause License ("Simplified BSD License" or "FreeBSD License") +# https://diyit.org/license/ +# +# + +rsync -av --del ahlawat@cvm-b:k8s/ k8s/