journalctl --rotate
journalctl --vacuum-time=1s
Author Archives: Vitalijus Ryzakovas
grep 500 errors
awk match pattern in column
Grep only 502 && 503:
cat /var/log/nginx/access.log| awk '$9 ~ /^50[23]/'
Grep all 50X:
cat /var/log/nginx/access.log| awk '$9 ~ /^50./'
Pug
Pug is a high-performance template engine heavily influenced by HTML and implemented with JavaScript for Node.js and browsers. But there are ports for other languages like Java, Python, Ruby, etc.
arkade
arkade is how developers install the latest versions of their favourite tools and Kubernetes apps.
curl -sLS https://get.arkade.dev | sudo sh
arkade get kubectl
arkade get kubectl \
helm \
istioctl
actions-runner Install GitHub Actions Runner
buildkitd Install Buildkitd
cni Install CNI plugins
containerd Install containerd
firecracker Install Firecracker
gitlab-runner Install Gitlab Runner
go Install Go
node Install Node.js
prometheus Install Prometheus
tc-redirect-tap Install tc-redirect-tap
registry Install Open Source Registry implementation for storing and distributing container images using the OCI Distribution Specification
mercure
wget https://github.com/dunglas/mercure/releases/download/v0.10.2/mercure_0.10.2_Linux_x86_64.tar.gz && mkdir mercure && tar -zxvf mercure_0.10.2_Linux_x86_64.tar.gz -C mercure
vim /etc/supervisor/conf.d/mercure.conf
[program:mercure]
command=/usr/sbin/mercure
process_name=%(program_name)s_%(process_num)s
numprocs=1
environment=JWT_KEY=”secret_jwt_key”,ADDR=’:3333′, DEMO=1, ALLOW_ANONYMOUS=1, CORS_ALLOWED_ORIGINS=, PUBLISH_ALLOWED_ORIGINS=”, USE_FORWARDED_HEADERS=1, DEBUG=1
directory=/tmp
autostart=true
autorestart=true
startsecs=5
startretries=10
user=www-data
redirect_stderr=false
stdout_capture_maxbytes=1MB
stderr_capture_maxbytes=1MB
stdout_logfile=/path/to/mercure/out.log
stderr_logfile=/path/to/mercure/error.log
supervisorctl reread
supervisorctl update
supervisorctl start mercure
Generate token online here: www.JWT.io
You need use your secret_jwt_key to get token
{
“mercure”: {
“publish”: [
“*”
]
}
}
Test token bash script:
#!/usr/bin/env bash
curl –request POST \
–url http://127.0.0.1:3333/.well-known/mercure \
–header ‘authorization: Bearer Paste_your_generated_token_here’ \
–header ‘content-type: application/x-www-form-urlencoded’ \
–data topic=test \
–data ‘data={
“headline”: “Hello there this is Mercure.Rocks”
}’
zfs access files lxc
nsenter -t $(cat /var/snap/lxd/common/lxd.pid) -m
lxc change srorage
lxc move container_name -s new_storage_pool
mysql lock all tables
FLUSH TABLES WITH READ LOCK;
UNLOCK TABLES;
systemd listen on port and run command
systemd listen on port and run command on connect
vin /usr/lib/systemd/system/restart-db.service
[Unit]
Description=Restart MySQL Listener
After=network.target
[Service]
User=restart
Type=simple
ExecStart=/bin/bash -xc 'echo -e "HTTP/1.1 204 No Content\\r\\nConnection: close\\r\\n\\r" | nc -p 7777 -l -w 1; sudo systemctl restart mysql'
Restart=always
StartLimitInterval=1min
StartLimitBurst=60
[Install]
WantedBy=multi-user.target
systemctl start restart-db.service
● restart-db.service – Restart MySQL Listener
Loaded: loaded (/lib/systemd/system/restart-db.service; disabled; vendor preset: enabled)
Active: active (running) since Mon 2023-11-27 21:29:28 UTC; 11s ago
Main PID: 41246 (bash)
Tasks: 2 (limit: 76710)
Memory: 572.0K
CPU: 1ms
CGroup: /system.slice/restart-db.service
├─41246 /bin/bash -xc “echo -e \”HTTP/1.1 204 No Content\\r\\nConnection: close\\r\\n\\r\” | nc -p 7777 -l -w 1; sudo systemctl restart mysql”
└─41248 nc -p 7777 -l -w
vim /etc/sudoers.d/restart-db
restart ALL=(ALL) NOPASSWD: /usr/bin/systemctl restart mysql
Also possible use socat with some simple auth:
socat -u TCP-LISTEN:7777,keepalive,reuseaddr,rcvbuf=7777 STDOUT | grep -w -q “mypassword” && sudo systemctl restart mysql
ceph osd pool list
ceph osd lspools
deleting all pods in all namespaces
exclude kubernetes system
for ns in $(kubectl get namespaces -o name | grep -v kube- | cut -c 11-);
do
kubectl delete pods --all -n $ns;
done
k3s lxc container
Run k3s inside LXD:
vim /etc/sysctl.d/90-lxd-limits.conf
fs.aio-max-nr = 524288
fs.inotify.max_queued_events = 1048576
fs.inotify.max_user_instances = 1048576
fs.inotify.max_user_watches = 1048576
kernel.dmesg_restrict = 1
kernel.keys.maxbytes = 2000000
kernel.keys.maxkeys = 2000
net.ipv4.neigh.default.gc_thresh3 = 8192
net.ipv6.neigh.default.gc_thresh3 = 8192
vm.max_map_count = 262144
lxc profile create k3s
wget https://raw.githubusercontent.com/ubuntu/microk8s/master/tests/lxc/microk8s.profile -O k3s.profile
cat k3s.profile
name: k3s
config:
boot.autostart: "true"
linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter
raw.lxc: |
lxc.apparmor.profile=unconfined
lxc.mount.auto=proc:rw sys:rw cgroup:rw
lxc.cgroup.devices.allow=a
lxc.cap.drop=
security.nesting: "true"
security.privileged: "true"
description: ""
devices:
aadisable:
path: /sys/module/nf_conntrack/parameters/hashsize
source: /sys/module/nf_conntrack/parameters/hashsize
type: disk
aadisable2:
path: /dev/kmsg
source: /dev/kmsg
type: unix-char
aadisable3:
path: /sys/fs/bpf
source: /sys/fs/bpf
type: disk
aadisable4:
path: /proc/sys/net/netfilter/nf_conntrack_max
source: /proc/sys/net/netfilter/nf_conntrack_max
type: disk
cat k3s.profile | lxc profile edit k3s
CNAME="$(hostname)-k3s"
lxc launch -p default -p k3s ubuntu:20.04 ${CNAME}
lxc exec ${CNAME} -- unlink /etc/resolv.conf
lxc exec ${CNAME} -- bash -c "echo 'nameserver 1.1.1.1' > /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo '127.0.1.1 ${CNAME}' >> /etc/hosts"
lxc exec ${CNAME} -- apt install -y apparmor-utils avahi-daemon
lxc exec ${CNAME} -- bash -c "echo 'L /dev/kmsg - - - - /dev/console' > /etc/tmpfiles.d/kmsg.conf"
lxc exec ${CNAME} -- bash -c "curl -sfL https://get.k3s.io | sh -s - server --snapshotter=native --disable traefik"
lxc exec ${CNAME} -- k3s kubectl get pods --all-namespaces
NAME STATUS ROLES AGE VERSION
my-node-k3s Ready control-plane,master 19m v1.27.7+k3s2
Kill containers to fix LXC shutdown issue:
vim /etc/systemd/system/[email protected]
[Unit]
Description=Kill cgroup procs on shutdown for %i
DefaultDependencies=false
Before=shutdown.target umount.target
[Service]
# Instanced units are not part of system.slice for some reason
# without this, the service isn't started at shutdown
Slice=system.slice
ExecStart=/bin/bash -c "/usr/local/bin/k3s-killall.sh"
Type=oneshot
[Install]
WantedBy=shutdown.target
lxc exec ${CNAME} -- systemctl enable [email protected]
Helm install:
lxc exec ${CNAME} -- snap install helm --classic
helm 3.10.1 from Snapcrafters✪ installed
lxc exec ${CNAME} -- bash -c "mkdir -p \${HOME}/.kube/; cat /etc/rancher/k3s/k3s.yaml > \${HOME}/.kube/config"
lxc exec ${CNAME} -- bash -c "chmod 600 \${HOME}/.kube/config"
LoadBalancer:
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace \
--set service.type=LoadBalancer
kubectl --namespace ingress-nginx get services -o wide -w ingress-nginx-controller
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
ingress-nginx-controller LoadBalancer 10.43.128.233 10.71.214.196 80:31957/TCP,443:30437/TCP 51s app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
i
Install dashboard:
GITHUB_URL=https://github.com/kubernetes/dashboard/releases
VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml
Or if not working specify version:
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
vim dashboard.admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
vim dashboard.admin-user-role.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
Ingress:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard-nginx-ingress
namespace: kubernetes-dashboard
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
rules:
- host: e7470-k3s.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
Add your current user:
lxc config set ${CNAME} security.idmap.isolated true
lxc config set ${CNAME} security.idmap.size 200000
printf "uid $(id -u) $(id -u)\ngid $(id -g) $(id -g)" | lxc config set ${CNAME} raw.idmap -
lxc restart ${CNAME}
Create user (host) inside the container:
lxc exec ${CNAME} -- bash -c "groupadd -r k3s"
lxc exec ${CNAME} -- bash -c "chown root:k3s /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "chmod g+r /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "userdel ubuntu"
lxc exec ${CNAME} -- bash -c "groupadd -g $(id -g) $(id -gn)"
lxc exec ${CNAME} -- bash -c "useradd -u $(id -u) -g $(id -g) -m -G sudo $(id -un)"
lxc exec ${CNAME} -- bash -c "usermod -a -G k3s $(id -un)"
lxc exec ${CNAME} -- bash -c "mkdir /home/$(id -un)/.kube"
lxc exec ${CNAME} -- bash -c "cat /etc/rancher/k3s/k3s.yaml > /home/$(id -un)/.kube/config"
lxc exec ${CNAME} -- bash -c "chown -R $(id -u):$(id -g) /home/$(id -un)/.kube/"
Map directory:
lxc config device add ${CNAME} Projects disk source=/home/vit/1 path=/home/vit/1
Wrapper scripts:
vim ${HOME}/.local/bin/k3s
#!/usr/bin/env bash
CNAME="${CNAME:-e7470-k3s}"
lxc exec ${CNAME} --mode interactive --cwd "${PWD}" --user $(id -u) --group $(\
lxc exec ${CNAME} -- getent group k3s | awk 'BEGIN{FS=":"}{print $3}'
) --env "HOME=/home/$(id -un)" -- \
$(basename $0) $@
chmod +x ${HOME}/.local/bin/k3s
ln -s ${HOME}/.local/bin/k3s .local/bin/kubectl
ln -s ${HOME}/.local/bin/k3s .local/bin/helm
Errors were encountered while processing: netfilter-persistent iptables-persistent
dpkg: error processing package netfilter-persistent (–configure):
installed netfilter-persistent package post-installation script subprocess returned error exit status 1
dpkg: dependency problems prevent configuration of iptables-persistent:
iptables-persistent depends on netfilter-persistent (= 1.0.4+nmu2ubuntu1.1); however:
Package netfilter-persistent is not configured yet.
dpkg: error processing package iptables-persistent (–configure):
dependency problems – leaving unconfigured
Processing triggers for systemd (237-3ubuntu10.57) …
No apport report written because the error message indicates its a followup error from a previous failure.
Processing triggers for man-db (2.8.3-2ubuntu0.1) …
Processing triggers for ureadahead (0.100.0-21) …
Errors were encountered while processing:
netfilter-persistent
iptables-persistent
Updating available updates count …
E: Sub-process /usr/bin/dpkg returned an error code (1)
apt-get remove –purge netfilter-persistent
apt install iptables-persistent
The connection to the server localhost:8080 was refused – did you specify the right host or port?
mkdir ~/.kube
sudo k3s kubectl config view –raw | tee ~/.kube/config
chmod 600 ~/.kube/config
mysql 8 reset root password ubuntu
systemctl stop mysql.service
systemctl set-environment MYSQLD_OPTS=”–skip-networking –skip-grant-tables”
systemctl start mysql.service
ALTER USER ‘root’@’localhost’ IDENTIFIED BY ‘the-new-password’;
flush privileges;
systemctl unset-environment MYSQLD_OPTS
systemctl revert mysql.service
systemctl restart mysql.service