From 186ab5659e498c536b303fd1d7f31d36ff9a5ea8 Mon Sep 17 00:00:00 2001 From: Daniel Hoherd Date: Wed, 25 Dec 2024 18:53:54 -0500 Subject: [PATCH] Add link: https://minds.md/zakirullin/cognitive --- 3d-printing/index.html | 12 + 404.html | 12 + airflow/index.html | 12 + airport/index.html | 12 + amazon/index.html | 12 + android/index.html | 12 + ansible/index.html | 12 + apfs/index.html | 12 + aptly/index.html | 12 + aria2/index.html | 12 + arpwatch/index.html | 12 + atomicparsley/index.html | 12 + automotive/index.html | 12 + autonomous-vehicles/index.html | 12 + avahi/index.html | 12 + awk/index.html | 12 + awless/index.html | 12 + aws-cloudformation/index.html | 12 + awscli/index.html | 12 + backups/index.html | 12 + badblocks/index.html | 12 + bash/index.html | 12 + bbcp/index.html | 12 + bc/index.html | 12 + bind/index.html | 12 + blkid/index.html | 12 + bluetooth/index.html | 12 + bpf/index.html | 12 + c/index.html | 12 + calico/index.html | 12 + calver/index.html | 12 + centos/index.html | 12 + ceph/index.html | 12 + chkconfig/index.html | 12 + chocolatey/index.html | 12 + chronos/index.html | 12 + circleci/index.html | 12 + circuitpython/index.html | 12 + cncf/index.html | 12 + cobbler/index.html | 12 + colorblindness/index.html | 12 + computing/index.html | 12 + consul/index.html | 12 + cookiecutter/index.html | 12 + cowsay/index.html | 12 + cpp/index.html | 12 + cradlepoint/index.html | 12 + cron/index.html | 12 + cryptsetup/index.html | 12 + csplit/index.html | 12 + css/index.html | 12 + curl/index.html | 12 + cut/index.html | 12 + d2/index.html | 12 + dasel/index.html | 12 + data/index.html | 12 + datasette/index.html | 12 + date/index.html | 12 + dcfldd/index.html | 12 + dcgc/index.html | 12 + dcraw/index.html | 12 + dd-wrt/index.html | 12 + dd/index.html | 12 + ddrescue/index.html | 12 + deb/index.html | 12 + debian/index.html | 12 + defaults/index.html | 12 + devops/index.html | 12 + dhcp/index.html | 12 + dhcpd.conf/index.html | 12 + diagrams/index.html | 15 +- dig/index.html | 54 ++ dmidecode/index.html | 12 + dns/index.html | 12 + docker/index.html | 12 + document-query/index.html | 12 + drone/index.html | 12 + dsrc/index.html | 12 + dtrace/index.html | 12 + du/index.html | 12 + duplicity/index.html | 12 + e-bike/index.html | 12 + education/index.html | 12 + elasticsearch/index.html | 12 + eleduino/index.html | 12 + etcd/index.html | 12 + ethtool/index.html | 12 + exiftool/index.html | 12 + fediverse/index.html | 12 + fedramp/index.html | 12 + ffmpeg/index.html | 12 + figlet/index.html | 12 + finance/index.html | 12 + find/index.html | 12 + findmnt/index.html | 12 + fio/index.html | 12 + flask/index.html | 12 + flipper-zero/index.html | 12 + fluent-bit/index.html | 12 + fluentd/index.html | 12 + fortune/index.html | 12 + fuser/index.html | 12 + gaming/index.html | 12 + genicam/index.html | 12 + geodata/index.html | 12 + gige-vision/index.html | 12 + git/index.html | 12 + github/index.html | 12 + gitlab/index.html | 12 + gitolite/index.html | 12 + gnu-screen/index.html | 12 + golang/index.html | 12 + google-cloud/index.html | 12 + google-earth/index.html | 12 + google-sheets/index.html | 12 + google/index.html | 12 + graphicsmagick/index.html | 12 + graphql/index.html | 12 + grep/index.html | 12 + grub/index.html | 12 + hadoop/index.html | 12 + handbrake/index.html | 12 + hashids/index.html | 12 + helm/index.html | 12 + home-assistant/index.html | 12 + hp/index.html | 12 + htmx/index.html | 12 + htop/index.html | 12 + httpstat/index.html | 12 + iTunes/index.html | 12 + image-formats/index.html | 12 + imagemagick/index.html | 12 + img2xterm/index.html | 12 + index.html | 15 + inotify/index.html | 12 + internet/index.html | 12 + interview/index.html | 12 + iotop/index.html | 12 + ip/index.html | 59 +- iperf/index.html | 12 + ipmi/index.html | 12 + iptables/index.html | 12 + irc/index.html | 12 + irssi/index.html | 12 + jargon/index.html | 12 + javascript/index.html | 12 + jdupes/index.html | 12 + jmespath/index.html | 12 + join/index.html | 12 + jot/index.html | 12 + jpeginfo/index.html | 12 + jq/index.html | 12 + json/index.html | 12 + jsonnet/index.html | 12 + jsonpath/index.html | 12 + juniper/index.html | 12 + jupyter/index.html | 12 + jwt/index.html | 12 + kaniko/index.html | 12 + keel/index.html | 12 + keybase/index.html | 12 + kubernetes/index.html | 18 +- launchd/index.html | 12 + ldap/index.html | 12 + lego/index.html | 12 + lektor/index.html | 12 + linksys/index.html | 12 + linux-performance-monitoring/index.html | 12 + linux/index.html | 12 + logstash/index.html | 12 + lsblk/index.html | 12 + lshw/index.html | 12 + lsof/index.html | 12 + lua/index.html | 12 + lvm/index.html | 12 + lxc/index.html | 12 + machine-learning/index.html | 12 + macos/index.html | 12 + make/index.html | 12 + marlin/index.html | 12 + math/index.html | 12 + mdraid/index.html | 12 + mechanical-keyboards/index.html | 12 + metallb/index.html | 12 + microbit/index.html | 12 + microcontrollers/index.html | 12 + micropython/index.html | 12 + minikube/index.html | 12 + misc/index.html | 17 +- mkdocs/index.html | 12 + molecule/index.html | 12 + mongodb/index.html | 12 + mqtt/index.html | 12 + mutt/index.html | 12 + myrepos/index.html | 12 + mysql/index.html | 12 + namei/index.html | 12 + ncftp/index.html | 12 + neopixel/index.html | 12 + netgear/index.html | 12 + nethogs/index.html | 12 + networking/index.html | 12 + networksetup/index.html | 12 + nfc/index.html | 12 + nfs/index.html | 12 + ngrok/index.html | 12 + nintendo-3ds/index.html | 12 + nintendo-amiibo/index.html | 12 + nintendo-nes/index.html | 12 + nintendo-switch/index.html | 12 + nintendo-wii/index.html | 12 + nintendo-wiiu/index.html | 12 + ntop/index.html | 12 + ntp/index.html | 12 + nvidia/index.html | 12 + onboarding/index.html | 12 + openvpn/index.html | 12 + orbstack/index.html | 12 + osquery/index.html | 12 + outline/index.html | 12 + pac/index.html | 12 + pandoc/index.html | 12 + panopticlick/index.html | 12 + passwords/index.html | 12 + perl/index.html | 12 + pgp/index.html | 12 + philips-hue/index.html | 12 + photography/index.html | 12 + php/index.html | 12 + plex/index.html | 12 + postgres/index.html | 12 + powershell/index.html | 12 + powertop/index.html | 12 + procurve/index.html | 12 + prometheus/index.html | 12 + protobuf/index.html | 12 + ps/index.html | 12 + ps_mem/index.html | 12 + psp/index.html | 12 + pssh/index.html | 12 + ptp/index.html | 12 + puppet/index.html | 12 + pv/index.html | 12 + pxe/index.html | 12 + pytest/index.html | 12 + python/index.html | 12 + q/index.html | 12 + raspberry-pi/index.html | 12 + redis/index.html | 12 + retropie/index.html | 12 + rhel/index.html | 12 + robotics/index.html | 12 + roku/index.html | 12 + rook/index.html | 12 + ros/index.html | 12 + rpm/index.html | 12 + rrd/index.html | 12 + rst/index.html | 12 + rsync/index.html | 12 + rust/index.html | 12 + saltstack/index.html | 12 + samba/index.html | 12 + samsung/index.html | 12 + science/index.html | 12 + screenshot/index.html | 12 + sdr/index.html | 12 + search/search_index.json | 2 +- security/index.html | 12 + sed/index.html | 12 + selinux/index.html | 12 + semver/index.html | 12 + sensu/index.html | 12 + serverless/index.html | 12 + sgdisk/index.html | 12 + shairport-sync/index.html | 12 + shred/index.html | 12 + shutdown/index.html | 12 + sips/index.html | 12 + sitemap.xml | 680 +++++++++--------- sitemap.xml.gz | Bin 1987 -> 1987 bytes slides/index.html | 12 + smartctl/index.html | 12 + smartstack/index.html | 12 + snap/index.html | 12 + snmp/index.html | 12 + solo/index.html | 12 + sort/index.html | 12 + sound-and-music/index.html | 12 + sphinx/index.html | 12 + split/index.html | 12 + splunk/index.html | 12 + spotlight/index.html | 12 + sqlite/index.html | 12 + ss/index.html | 12 + ssh/index.html | 12 + sshuttle/index.html | 12 + stat/index.html | 12 + .../written-by-human-not-by-ai-white.svg | 28 + strace/index.html | 12 + sudo/index.html | 12 + swagger/index.html | 12 + swift/index.html | 12 + sysctl/index.html | 12 + sysdig/index.html | 12 + systemd-resolved/index.html | 12 + systemd/index.html | 12 + tar/index.html | 12 + tcl/index.html | 12 + tcpdump/index.html | 16 +- tcpflow/index.html | 12 + terminal-emulator/index.html | 12 + terraform/index.html | 12 + time/index.html | 21 +- tls/index.html | 12 + tmux/index.html | 12 + top-variant-list/index.html | 12 + top/index.html | 12 + touch/index.html | 12 + tr/index.html | 12 + ubuntu/index.html | 12 + udev/index.html | 12 + upstart/index.html | 12 + utm/index.html | 12 + vagrant/index.html | 12 + vector/index.html | 12 + velero/index.html | 12 + vim/index.html | 12 + virtual-reality/index.html | 12 + virtualbox/index.html | 12 + vpn/index.html | 12 + wasm/index.html | 12 + webgl/index.html | 12 + wget/index.html | 12 + winbind/index.html | 12 + wireshark/index.html | 12 + wonder-workshop/index.html | 12 + wuzz/index.html | 12 + xargs/index.html | 12 + yaml/index.html | 12 + youtube-dl/index.html | 12 + yum/index.html | 12 + zerotier/index.html | 12 + zfs/index.html | 12 + zookeeper/index.html | 12 + zsh/index.html | 12 + 345 files changed, 4558 insertions(+), 363 deletions(-) create mode 100644 static_assets/written-by-human-not-by-ai-white.svg diff --git a/3d-printing/index.html b/3d-printing/index.html index 19d5f09ef..d95aaf485 100644 --- a/3d-printing/index.html +++ b/3d-printing/index.html @@ -7163,6 +7163,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/404.html b/404.html index 6d31ec159..95b1c65b8 100644 --- a/404.html +++ b/404.html @@ -7030,6 +7030,18 @@

404 - Not found

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/airflow/index.html b/airflow/index.html index a359c1ea7..df1b323b7 100644 --- a/airflow/index.html +++ b/airflow/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/airport/index.html b/airport/index.html index 977f097b9..d4b554ff2 100644 --- a/airport/index.html +++ b/airport/index.html @@ -7134,6 +7134,18 @@

Using old Airp Material for MkDocs + + +
+ + + + + + + + +
diff --git a/amazon/index.html b/amazon/index.html index 5a8cfe6b3..b134ba85c 100644 --- a/amazon/index.html +++ b/amazon/index.html @@ -7381,6 +7381,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/android/index.html b/android/index.html index dacd023f2..03ee94820 100644 --- a/android/index.html +++ b/android/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/ansible/index.html b/ansible/index.html index 48ade3b67..6f1a357df 100644 --- a/ansible/index.html +++ b/ansible/index.html @@ -7612,6 +7612,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/apfs/index.html b/apfs/index.html index 4af483851..98250190a 100644 --- a/apfs/index.html +++ b/apfs/index.html @@ -7409,6 +7409,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/aptly/index.html b/aptly/index.html index 97c3e002f..b523d673a 100644 --- a/aptly/index.html +++ b/aptly/index.html @@ -7072,6 +7072,18 @@

Aptly

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/aria2/index.html b/aria2/index.html index 9f2db2ff9..a9533049e 100644 --- a/aria2/index.html +++ b/aria2/index.html @@ -7190,6 +7190,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/arpwatch/index.html b/arpwatch/index.html index 20f80a6a8..a609fb98a 100644 --- a/arpwatch/index.html +++ b/arpwatch/index.html @@ -7160,6 +7160,18 @@

Fork and log to file, not to e-mail< Material for MkDocs + + +
+ + + + + + + + +
diff --git a/atomicparsley/index.html b/atomicparsley/index.html index c68aacede..695b1c453 100644 --- a/atomicparsley/index.html +++ b/atomicparsley/index.html @@ -7203,6 +7203,18 @@

Remove Person Material for MkDocs + + +
+ + + + + + + + +
diff --git a/automotive/index.html b/automotive/index.html index 3867777c0..4b7ff8ef4 100644 --- a/automotive/index.html +++ b/automotive/index.html @@ -7131,6 +7131,18 @@

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/autonomous-vehicles/index.html b/autonomous-vehicles/index.html index 4543d0868..443625317 100644 --- a/autonomous-vehicles/index.html +++ b/autonomous-vehicles/index.html @@ -7190,6 +7190,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/avahi/index.html b/avahi/index.html index c046560cf..6721a3adb 100644 --- a/avahi/index.html +++ b/avahi/index.html @@ -7162,6 +7162,18 @@

Service configs

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/awk/index.html b/awk/index.html index ffaed9713..e54ab769a 100644 --- a/awk/index.html +++ b/awk/index.html @@ -7769,6 +7769,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/awless/index.html b/awless/index.html index bc80b26c0..8293eaf89 100644 --- a/awless/index.html +++ b/awless/index.html @@ -7388,6 +7388,18 @@

See also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/aws-cloudformation/index.html b/aws-cloudformation/index.html index cfe381621..adc13bace 100644 --- a/aws-cloudformation/index.html +++ b/aws-cloudformation/index.html @@ -7188,6 +7188,18 @@

Import cloudformati Material for MkDocs + + +
+ + + + + + + + +
diff --git a/awscli/index.html b/awscli/index.html index fecbe8227..414ae6b6c 100644 --- a/awscli/index.html +++ b/awscli/index.html @@ -7182,6 +7182,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/backups/index.html b/backups/index.html index 9260a0b52..d73341f61 100644 --- a/backups/index.html +++ b/backups/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/badblocks/index.html b/badblocks/index.html index e0e6cff7d..922dc0622 100644 --- a/badblocks/index.html +++ b/badblocks/index.html @@ -7190,6 +7190,18 @@

See also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/bash/index.html b/bash/index.html index 5a17da789..81034e13b 100644 --- a/bash/index.html +++ b/bash/index.html @@ -8418,6 +8418,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/bbcp/index.html b/bbcp/index.html index efa71ab1e..da12d187c 100644 --- a/bbcp/index.html +++ b/bbcp/index.html @@ -7070,6 +7070,18 @@

bbcp

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/bc/index.html b/bc/index.html index 3bafdce27..7df209878 100644 --- a/bc/index.html +++ b/bc/index.html @@ -7228,6 +7228,18 @@

Subtract two from the Material for MkDocs + + +
+ + + + + + + + +
diff --git a/bind/index.html b/bind/index.html index 9330d3d3c..34d10a653 100644 --- a/bind/index.html +++ b/bind/index.html @@ -7184,6 +7184,18 @@

Flush all records

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/blkid/index.html b/blkid/index.html index 456fff342..83f985c79 100644 --- a/blkid/index.html +++ b/blkid/index.html @@ -7185,6 +7185,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/bluetooth/index.html b/bluetooth/index.html index d4d7a5c68..c9ea13fad 100644 --- a/bluetooth/index.html +++ b/bluetooth/index.html @@ -7160,6 +7160,18 @@

Linux software

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/bpf/index.html b/bpf/index.html index b121b5016..856e10f9d 100644 --- a/bpf/index.html +++ b/bpf/index.html @@ -7077,6 +7077,18 @@

bpf

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/c/index.html b/c/index.html index ad346005e..196d7deb0 100644 --- a/c/index.html +++ b/c/index.html @@ -7126,6 +7126,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/calico/index.html b/calico/index.html index d2bbb8ee4..23ec714d2 100644 --- a/calico/index.html +++ b/calico/index.html @@ -7208,6 +7208,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/calver/index.html b/calver/index.html index b1e40d6a0..1fa886e30 100644 --- a/calver/index.html +++ b/calver/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/centos/index.html b/centos/index.html index 7750920fa..d266a6229 100644 --- a/centos/index.html +++ b/centos/index.html @@ -7450,6 +7450,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/ceph/index.html b/ceph/index.html index 413ebdcd0..e64140746 100644 --- a/ceph/index.html +++ b/ceph/index.html @@ -7469,6 +7469,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/chkconfig/index.html b/chkconfig/index.html index c6f3842cc..ed97313a0 100644 --- a/chkconfig/index.html +++ b/chkconfig/index.html @@ -7202,6 +7202,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/chocolatey/index.html b/chocolatey/index.html index 6c51c03cb..0e03306c7 100644 --- a/chocolatey/index.html +++ b/chocolatey/index.html @@ -7181,6 +7181,18 @@

Install software and all its Material for MkDocs + + +
+ + + + + + + + +
diff --git a/chronos/index.html b/chronos/index.html index b8cd6057d..8244622d8 100644 --- a/chronos/index.html +++ b/chronos/index.html @@ -7072,6 +7072,18 @@

chronos

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/circleci/index.html b/circleci/index.html index a42365bb2..7d8961592 100644 --- a/circleci/index.html +++ b/circleci/index.html @@ -7133,6 +7133,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/circuitpython/index.html b/circuitpython/index.html index 8c87f2bdb..159410120 100644 --- a/circuitpython/index.html +++ b/circuitpython/index.html @@ -7221,6 +7221,18 @@

See also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cncf/index.html b/cncf/index.html index 671dac4f0..f9931c9c0 100644 --- a/cncf/index.html +++ b/cncf/index.html @@ -7069,6 +7069,18 @@

CNCF

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cobbler/index.html b/cobbler/index.html index 2acafd34e..681426a8a 100644 --- a/cobbler/index.html +++ b/cobbler/index.html @@ -7154,6 +7154,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/colorblindness/index.html b/colorblindness/index.html index d1e59ce81..9f381908d 100644 --- a/colorblindness/index.html +++ b/colorblindness/index.html @@ -7141,6 +7141,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/computing/index.html b/computing/index.html index 9536404c4..11d2b8ae8 100644 --- a/computing/index.html +++ b/computing/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/consul/index.html b/consul/index.html index 9cf1e31b1..a187be0f2 100644 --- a/consul/index.html +++ b/consul/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cookiecutter/index.html b/cookiecutter/index.html index 945967ef5..9c26376e6 100644 --- a/cookiecutter/index.html +++ b/cookiecutter/index.html @@ -7181,6 +7181,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cowsay/index.html b/cowsay/index.html index 41d1a9b1c..8dcc124d2 100644 --- a/cowsay/index.html +++ b/cowsay/index.html @@ -7236,6 +7236,18 @@

cowthink

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cpp/index.html b/cpp/index.html index ab8faf3a3..bdb818723 100644 --- a/cpp/index.html +++ b/cpp/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cradlepoint/index.html b/cradlepoint/index.html index d0d6e3f8f..60795824c 100644 --- a/cradlepoint/index.html +++ b/cradlepoint/index.html @@ -7806,6 +7806,18 @@

Getting help

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cron/index.html b/cron/index.html index 190debcd6..709b42983 100644 --- a/cron/index.html +++ b/cron/index.html @@ -7412,6 +7412,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cryptsetup/index.html b/cryptsetup/index.html index 9b97544bc..2a559e858 100644 --- a/cryptsetup/index.html +++ b/cryptsetup/index.html @@ -7072,6 +7072,18 @@

cryptsetup

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/csplit/index.html b/csplit/index.html index edd88d4c6..f336c3a5a 100644 --- a/csplit/index.html +++ b/csplit/index.html @@ -7198,6 +7198,18 @@

Split a multi-doc yaml file

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/css/index.html b/css/index.html index b526f607d..70673e71c 100644 --- a/css/index.html +++ b/css/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/curl/index.html b/curl/index.html index 0ec01f449..7d7aeda76 100644 --- a/curl/index.html +++ b/curl/index.html @@ -7365,6 +7365,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/cut/index.html b/cut/index.html index b67a5d8c1..bd39ebe9d 100644 --- a/cut/index.html +++ b/cut/index.html @@ -7229,6 +7229,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/d2/index.html b/d2/index.html index 271638f1c..7841764e6 100644 --- a/d2/index.html +++ b/d2/index.html @@ -7154,6 +7154,18 @@

See also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dasel/index.html b/dasel/index.html index 39130fade..e58f71841 100644 --- a/dasel/index.html +++ b/dasel/index.html @@ -7242,6 +7242,18 @@

Compact format a gpx file

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/data/index.html b/data/index.html index 2c8b13265..5dd2800cd 100644 --- a/data/index.html +++ b/data/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/datasette/index.html b/datasette/index.html index 629266400..946cabe7f 100644 --- a/datasette/index.html +++ b/datasette/index.html @@ -7208,6 +7208,18 @@

Ingest a json file

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/date/index.html b/date/index.html index 6faef7242..7a6407769 100644 --- a/date/index.html +++ b/date/index.html @@ -7499,6 +7499,18 @@

See also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dcfldd/index.html b/dcfldd/index.html index 13889f9a0..7c5d25f84 100644 --- a/dcfldd/index.html +++ b/dcfldd/index.html @@ -7242,6 +7242,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dcgc/index.html b/dcgc/index.html index 9c2fac15b..b3b5cf33b 100644 --- a/dcgc/index.html +++ b/dcgc/index.html @@ -7176,6 +7176,18 @@

Sync script

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dcraw/index.html b/dcraw/index.html index a5203a8bc..4d7e42ad4 100644 --- a/dcraw/index.html +++ b/dcraw/index.html @@ -7159,6 +7159,18 @@

Identify corrupt DNG files

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dd-wrt/index.html b/dd-wrt/index.html index 352bb6ae0..c6209e63c 100644 --- a/dd-wrt/index.html +++ b/dd-wrt/index.html @@ -7669,6 +7669,18 @@

2018-04-04 v3.0-r35550M kongac

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dd/index.html b/dd/index.html index 9dc45030a..85a645979 100644 --- a/dd/index.html +++ b/dd/index.html @@ -7252,6 +7252,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/ddrescue/index.html b/ddrescue/index.html index ac4ff3113..0fa485bd1 100644 --- a/ddrescue/index.html +++ b/ddrescue/index.html @@ -7254,6 +7254,18 @@

See Also

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/deb/index.html b/deb/index.html index 3db3619f7..d108df653 100644 --- a/deb/index.html +++ b/deb/index.html @@ -7450,6 +7450,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/debian/index.html b/debian/index.html index be7a530ed..d01a3a9e6 100644 --- a/debian/index.html +++ b/debian/index.html @@ -7070,6 +7070,18 @@

debian

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/defaults/index.html b/defaults/index.html index 9b3d9bba5..d06cba217 100644 --- a/defaults/index.html +++ b/defaults/index.html @@ -7185,6 +7185,18 @@

Add a value (an array) to a dict

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/devops/index.html b/devops/index.html index 8b29618c2..fb7523ac1 100644 --- a/devops/index.html +++ b/devops/index.html @@ -7162,6 +7162,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dhcp/index.html b/dhcp/index.html index 5b95aab4c..cb2764c00 100644 --- a/dhcp/index.html +++ b/dhcp/index.html @@ -7205,6 +7205,18 @@

Handshake Process

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dhcpd.conf/index.html b/dhcpd.conf/index.html index 62e648b26..81e353110 100644 --- a/dhcpd.conf/index.html +++ b/dhcpd.conf/index.html @@ -7299,6 +7299,18 @@

Example config

Material for MkDocs + + +
+ + + + + + + + +
diff --git a/diagrams/index.html b/diagrams/index.html index 3b931e832..b7dd5a654 100644 --- a/diagrams/index.html +++ b/diagrams/index.html @@ -7095,7 +7095,8 @@ @@ -7131,6 +7132,18 @@ Material for MkDocs + + +
+ + + + + + + + +
diff --git a/dig/index.html b/dig/index.html index 9862f4a85..3ff45fcb4 100644 --- a/dig/index.html +++ b/dig/index.html @@ -1706,6 +1706,24 @@ + + +
  • + + + Query multicast DNS for a hostname + + + +
  • + +
  • + + + Do a reverse lookup against multicast DNS + + +
  • @@ -7151,6 +7169,24 @@ + + +
  • + + + Query multicast DNS for a hostname + + + +
  • + +
  • + + + Do a reverse lookup against multicast DNS + + +
  • @@ -7192,6 +7228,12 @@

    Trace a query from the root servers

    If this doesn't give you a trace you must specify an alternate DNS server

    dig @8.8.8.8 +trace renovo.auto
     
    +

    Query multicast DNS for a hostname

    +
    dig gibson.local @224.0.0.251 -p 5353
    +
    +

    Do a reverse lookup against multicast DNS

    +
    dig -x 10.31.33.7 @224.0.0.251 -p 5353
    +
    @@ -7226,6 +7268,18 @@

    Trace a query from the root servers Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/dmidecode/index.html b/dmidecode/index.html index 0cc5d30d2..525839a05 100644 --- a/dmidecode/index.html +++ b/dmidecode/index.html @@ -7282,6 +7282,18 @@

    Show all keywords and their values:< Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/dns/index.html b/dns/index.html index 684c08d2b..1417d3dc2 100644 --- a/dns/index.html +++ b/dns/index.html @@ -7188,6 +7188,18 @@

    Tips

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/docker/index.html b/docker/index.html index 79efed573..a0c809b29 100644 --- a/docker/index.html +++ b/docker/index.html @@ -7763,6 +7763,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/document-query/index.html b/document-query/index.html index 0c15e75b4..9176797ed 100644 --- a/document-query/index.html +++ b/document-query/index.html @@ -7140,6 +7140,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/drone/index.html b/drone/index.html index 26ff50333..373691a13 100644 --- a/drone/index.html +++ b/drone/index.html @@ -7232,6 +7232,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/dsrc/index.html b/dsrc/index.html index 2620c23ee..d35c17081 100644 --- a/dsrc/index.html +++ b/dsrc/index.html @@ -7132,6 +7132,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/dtrace/index.html b/dtrace/index.html index e3ad28968..b45d571b3 100644 --- a/dtrace/index.html +++ b/dtrace/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/du/index.html b/du/index.html index 968d5ba7b..1cb07d85f 100644 --- a/du/index.html +++ b/du/index.html @@ -7163,6 +7163,18 @@

    Summarize low level directory uage + + +
    + + + + + + + + +
    diff --git a/duplicity/index.html b/duplicity/index.html index db81f2fd9..75855ae97 100644 --- a/duplicity/index.html +++ b/duplicity/index.html @@ -7072,6 +7072,18 @@

    duplicity

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/e-bike/index.html b/e-bike/index.html index d1a3d354f..dbfc290c3 100644 --- a/e-bike/index.html +++ b/e-bike/index.html @@ -7161,6 +7161,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/education/index.html b/education/index.html index 60a01e172..7845f8b03 100644 --- a/education/index.html +++ b/education/index.html @@ -7073,6 +7073,18 @@

    education

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/elasticsearch/index.html b/elasticsearch/index.html index cdf4f1350..f6b774014 100644 --- a/elasticsearch/index.html +++ b/elasticsearch/index.html @@ -7392,6 +7392,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/eleduino/index.html b/eleduino/index.html index 13be0a32e..8a85f56ca 100644 --- a/eleduino/index.html +++ b/eleduino/index.html @@ -7264,6 +7264,18 @@

    Sort and unique /boot/cmdline.txt

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/etcd/index.html b/etcd/index.html index 6c5dc2dc2..5d4ecc9df 100644 --- a/etcd/index.html +++ b/etcd/index.html @@ -7072,6 +7072,18 @@

    etcd

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/ethtool/index.html b/ethtool/index.html index 193bf03d6..b7258e708 100644 --- a/ethtool/index.html +++ b/ethtool/index.html @@ -7188,6 +7188,18 @@

    Show interface error count by type + + +
    + + + + + + + + +
    diff --git a/exiftool/index.html b/exiftool/index.html index fd21d882e..329acfba0 100644 --- a/exiftool/index.html +++ b/exiftool/index.html @@ -8033,6 +8033,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/fediverse/index.html b/fediverse/index.html index f86cd862a..125bf1f67 100644 --- a/fediverse/index.html +++ b/fediverse/index.html @@ -7134,6 +7134,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/fedramp/index.html b/fedramp/index.html index cbd709e87..764db029c 100644 --- a/fedramp/index.html +++ b/fedramp/index.html @@ -7072,6 +7072,18 @@

    Fedramp

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/ffmpeg/index.html b/ffmpeg/index.html index 53667896f..d0863e155 100644 --- a/ffmpeg/index.html +++ b/ffmpeg/index.html @@ -7578,6 +7578,18 @@

    Crop a video

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/figlet/index.html b/figlet/index.html index 0b038a9a5..b71758754 100644 --- a/figlet/index.html +++ b/figlet/index.html @@ -7245,6 +7245,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/finance/index.html b/finance/index.html index c50a740ed..44d5c8d46 100644 --- a/finance/index.html +++ b/finance/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/find/index.html b/find/index.html index 284ed9cdd..bc5a21fbd 100644 --- a/find/index.html +++ b/find/index.html @@ -7328,6 +7328,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/findmnt/index.html b/findmnt/index.html index 86c5c2fad..5c710c610 100644 --- a/findmnt/index.html +++ b/findmnt/index.html @@ -7336,6 +7336,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/fio/index.html b/fio/index.html index 46d73c6ad..8ec19a788 100644 --- a/fio/index.html +++ b/fio/index.html @@ -7195,6 +7195,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/flask/index.html b/flask/index.html index f883cd07e..67885e59c 100644 --- a/flask/index.html +++ b/flask/index.html @@ -7205,6 +7205,18 @@

    Debug in a shell

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/flipper-zero/index.html b/flipper-zero/index.html index 46ee9d5c7..72f76ee38 100644 --- a/flipper-zero/index.html +++ b/flipper-zero/index.html @@ -7153,6 +7153,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/fluent-bit/index.html b/fluent-bit/index.html index 5e3eb8cd2..9fbda66bb 100644 --- a/fluent-bit/index.html +++ b/fluent-bit/index.html @@ -7160,6 +7160,18 @@

    Simple stdout log server

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/fluentd/index.html b/fluentd/index.html index 5235e7186..0f3364876 100644 --- a/fluentd/index.html +++ b/fluentd/index.html @@ -7184,6 +7184,18 @@

    + + + + + + + + + diff --git a/fortune/index.html b/fortune/index.html index 42a964c97..20e57a108 100644 --- a/fortune/index.html +++ b/fortune/index.html @@ -7131,6 +7131,18 @@

    Make a fortune file

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/fuser/index.html b/fuser/index.html index 7ef4504a6..01194bfc8 100644 --- a/fuser/index.html +++ b/fuser/index.html @@ -7240,6 +7240,18 @@

    Check if a file is being accessed

    + + +
    + + + + + + + + +
    diff --git a/gaming/index.html b/gaming/index.html index 89c3fff20..43c06c2a7 100644 --- a/gaming/index.html +++ b/gaming/index.html @@ -7134,6 +7134,18 @@

    Video Game Engines and Frameworks

    + + +
    + + + + + + + + +
    diff --git a/genicam/index.html b/genicam/index.html index 18a12cf3b..d0a8d8f87 100644 --- a/genicam/index.html +++ b/genicam/index.html @@ -7069,6 +7069,18 @@

    GenICam

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/geodata/index.html b/geodata/index.html index be5a7c8e7..616a788b8 100644 --- a/geodata/index.html +++ b/geodata/index.html @@ -7151,6 +7151,18 @@

    Hexagonal geodata

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/gige-vision/index.html b/gige-vision/index.html index e70d89473..51f98a631 100644 --- a/gige-vision/index.html +++ b/gige-vision/index.html @@ -7153,6 +7153,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/git/index.html b/git/index.html index d969b8d20..79893dee7 100644 --- a/git/index.html +++ b/git/index.html @@ -9094,6 +9094,18 @@

    Exclude certain direc Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/github/index.html b/github/index.html index aca2f66a4..5d005836a 100644 --- a/github/index.html +++ b/github/index.html @@ -7319,6 +7319,18 @@

    Get a json file Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/gitlab/index.html b/gitlab/index.html index 4b88ffd50..c2388d46e 100644 --- a/gitlab/index.html +++ b/gitlab/index.html @@ -7526,6 +7526,18 @@

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/gitolite/index.html b/gitolite/index.html index a72ab4e15..83dfe06d4 100644 --- a/gitolite/index.html +++ b/gitolite/index.html @@ -7159,6 +7159,18 @@

    Get info about available reposito Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/gnu-screen/index.html b/gnu-screen/index.html index 280f0bbe6..166171398 100644 --- a/gnu-screen/index.html +++ b/gnu-screen/index.html @@ -7413,6 +7413,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/golang/index.html b/golang/index.html index e1c5c0d52..565b1a419 100644 --- a/golang/index.html +++ b/golang/index.html @@ -7243,6 +7243,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/google-cloud/index.html b/google-cloud/index.html index a174b4703..bee93b70a 100644 --- a/google-cloud/index.html +++ b/google-cloud/index.html @@ -8232,6 +8232,18 @@

    Restore a backup fr Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/google-earth/index.html b/google-earth/index.html index 5d96bfc76..33f097280 100644 --- a/google-earth/index.html +++ b/google-earth/index.html @@ -7130,6 +7130,18 @@

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/google-sheets/index.html b/google-sheets/index.html index 07a127e39..0101b8175 100644 --- a/google-sheets/index.html +++ b/google-sheets/index.html @@ -7302,6 +7302,18 @@

    Automatically resolve the DOW Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/google/index.html b/google/index.html index df105f8de..0a2101502 100644 --- a/google/index.html +++ b/google/index.html @@ -7134,6 +7134,18 @@

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/graphicsmagick/index.html b/graphicsmagick/index.html index 4ccdd1191..77e054dc3 100644 --- a/graphicsmagick/index.html +++ b/graphicsmagick/index.html @@ -7224,6 +7224,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/graphql/index.html b/graphql/index.html index 7b84119c4..f4338a49a 100644 --- a/graphql/index.html +++ b/graphql/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/grep/index.html b/grep/index.html index 455b5ea17..794ea36fa 100644 --- a/grep/index.html +++ b/grep/index.html @@ -7191,6 +7191,18 @@

    Notable commands, files and dirs

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/hadoop/index.html b/hadoop/index.html index 1cd88d030..3fbf94e94 100644 --- a/hadoop/index.html +++ b/hadoop/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/handbrake/index.html b/handbrake/index.html index dfb86b641..c8924e73f 100644 --- a/handbrake/index.html +++ b/handbrake/index.html @@ -7241,6 +7241,18 @@

    Fix Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/hashids/index.html b/hashids/index.html index 49a9f25c4..6c28683ac 100644 --- a/hashids/index.html +++ b/hashids/index.html @@ -7163,6 +7163,18 @@

    Python

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/helm/index.html b/helm/index.html index 74283f24e..55d35119f 100644 --- a/helm/index.html +++ b/helm/index.html @@ -7448,6 +7448,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/home-assistant/index.html b/home-assistant/index.html index 696b276a3..7ba1f2425 100644 --- a/home-assistant/index.html +++ b/home-assistant/index.html @@ -7070,6 +7070,18 @@

    Home Assistant

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/hp/index.html b/hp/index.html index 48e38a46f..ba7d89ae7 100644 --- a/hp/index.html +++ b/hp/index.html @@ -7133,6 +7133,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/htmx/index.html b/htmx/index.html index 00fd4f0c1..0889c742c 100644 --- a/htmx/index.html +++ b/htmx/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/htop/index.html b/htop/index.html index 6bfa80de1..0fea9610d 100644 --- a/htop/index.html +++ b/htop/index.html @@ -7129,6 +7129,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/httpstat/index.html b/httpstat/index.html index 3ebb96975..e535296d1 100644 --- a/httpstat/index.html +++ b/httpstat/index.html @@ -7209,6 +7209,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/iTunes/index.html b/iTunes/index.html index 34d30fc2a..0fa7a85e2 100644 --- a/iTunes/index.html +++ b/iTunes/index.html @@ -7131,6 +7131,18 @@

    Metadata

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/image-formats/index.html b/image-formats/index.html index 7d040831b..52d86d69e 100644 --- a/image-formats/index.html +++ b/image-formats/index.html @@ -7079,6 +7079,18 @@

    Graphical Image Formats

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/imagemagick/index.html b/imagemagick/index.html index 1cf55d5fc..3ab70a1e4 100644 --- a/imagemagick/index.html +++ b/imagemagick/index.html @@ -7430,6 +7430,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/img2xterm/index.html b/img2xterm/index.html index d32fed713..c223fe2c1 100644 --- a/img2xterm/index.html +++ b/img2xterm/index.html @@ -7070,6 +7070,18 @@

    img2xterm

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/index.html b/index.html index 4a429b70c..8bbaac69d 100644 --- a/index.html +++ b/index.html @@ -7039,6 +7039,9 @@

    About these notes

  • https://danielhoherd.github.io/tech-notes/
  • https://github.com/danielhoherd/tech-notes
  • +

    + Written By Human, Not By AI +

    @@ -7073,6 +7076,18 @@

    About these notes

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/inotify/index.html b/inotify/index.html index 2011e8563..6a8a286d1 100644 --- a/inotify/index.html +++ b/inotify/index.html @@ -7164,6 +7164,18 @@

    Continuously show filesys Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/internet/index.html b/internet/index.html index 24e023043..0d6d779b0 100644 --- a/internet/index.html +++ b/internet/index.html @@ -7153,6 +7153,18 @@

    Health

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/interview/index.html b/interview/index.html index 267a0c5dc..2976beb44 100644 --- a/interview/index.html +++ b/interview/index.html @@ -7071,6 +7071,18 @@

    Interviews

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/iotop/index.html b/iotop/index.html index 659a77b68..a7d5eff27 100644 --- a/iotop/index.html +++ b/iotop/index.html @@ -7181,6 +7181,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/ip/index.html b/ip/index.html index f716e93de..509d97622 100644 --- a/ip/index.html +++ b/ip/index.html @@ -3071,6 +3071,15 @@ + + +
  • + + + Show multicast addresses + + +
  • @@ -7241,6 +7250,15 @@ + + +
  • + + + Show multicast addresses + + +
  • @@ -7268,38 +7286,45 @@

    ip

    Examples

    The commands here can be shortened like cisco or other network device commands.

    Get the default interface

    -
    ip route get 8.8.8.8
    +
    ip route get 8.8.8.8
     

    Show IP neighbors

    -
    ip neighbor show
    +
    ip neighbor show
     

    Show all configured IP addresses

    This is more reliable than ifconfig, which sometimes omits entries.

    -
    ip -f inet addr
    +
    ip -f inet addr
     

    Show information about eth0

    This shows information about eth0 and all the virtual interfaces brought up on the physical interface.

    -
    ip addr show eth0
    +
    ip addr show eth0
     

    Monitor IP changes

    -
    ip mon all
    +
    ip mon all
     

    Show interfaces that would route to a given network address

    -
    ip addr show to 10.1.8.0/24
    +
    ip addr show to 10.1.8.0/24
     

    Show negotiated speeds for all interfaces

    -
    ip -o link show | awk -F: '{print $2}' | while read -r X ; do
    -  sudo ethtool ${X} | egrep 'Settings|Speed' | xargs echo
    -done | column -t -s:
    +
    ip -o link show | awk -F: '{print $2}' | while read -r X ; do
    +  sudo ethtool "${X}" 2>/dev/null |
    +  grep -E 'Settings|Speed' |
    +  xargs echo
    +done |
    +awk '{print $3, $5}' |
    +column -t -s:
     

    Add a static route

    -
    ip route add 192.168.100.0/24 via 0.0.0.0 dev eth0
    +
    ip route add 192.168.100.0/24 via 0.0.0.0 dev eth0
     

    Set MTU for a specific route

    -
    ip route add default via 10.0.0.1 mtu 296
    +
    ip route add default via 10.0.0.1 mtu 296
    +
    +

    Show multicast addresses

    +
    ip maddr show
     
    @@ -7335,6 +7360,18 @@

    Set MTU for a specific route

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/iperf/index.html b/iperf/index.html index 6e20c2eed..34339d667 100644 --- a/iperf/index.html +++ b/iperf/index.html @@ -7190,6 +7190,18 @@

    Example client (sending side)

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/ipmi/index.html b/ipmi/index.html index 871dd5035..d78251c01 100644 --- a/ipmi/index.html +++ b/ipmi/index.html @@ -7677,6 +7677,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/iptables/index.html b/iptables/index.html index 03d00deb4..9657fda84 100644 --- a/iptables/index.html +++ b/iptables/index.html @@ -7226,6 +7226,18 @@

    Show all tables

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/irc/index.html b/irc/index.html index 5674b2e85..071d43eb9 100644 --- a/irc/index.html +++ b/irc/index.html @@ -7182,6 +7182,18 @@

    Register a room

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/irssi/index.html b/irssi/index.html index 43a56a560..b41ebd8dc 100644 --- a/irssi/index.html +++ b/irssi/index.html @@ -7202,6 +7202,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jargon/index.html b/jargon/index.html index c9cc8d9b9..5eed986bc 100644 --- a/jargon/index.html +++ b/jargon/index.html @@ -7088,6 +7088,18 @@

    jargon

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/javascript/index.html b/javascript/index.html index 590c629be..478bf2f8d 100644 --- a/javascript/index.html +++ b/javascript/index.html @@ -7137,6 +7137,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jdupes/index.html b/jdupes/index.html index cd060e292..e5fa52ce3 100644 --- a/jdupes/index.html +++ b/jdupes/index.html @@ -7225,6 +7225,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jmespath/index.html b/jmespath/index.html index 73a0c58bd..c65b4b767 100644 --- a/jmespath/index.html +++ b/jmespath/index.html @@ -7252,6 +7252,18 @@

    Grab some ku Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/join/index.html b/join/index.html index aab78bbdc..6b1833bc6 100644 --- a/join/index.html +++ b/join/index.html @@ -7166,6 +7166,18 @@

    Fill in missing data an arbitr Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jot/index.html b/jot/index.html index 0dd368b2f..8201bbdaf 100644 --- a/jot/index.html +++ b/jot/index.html @@ -7201,6 +7201,18 @@

    Generate 5 capital alphanumeric Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jpeginfo/index.html b/jpeginfo/index.html index f526d1274..362a84db0 100644 --- a/jpeginfo/index.html +++ b/jpeginfo/index.html @@ -7205,6 +7205,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jq/index.html b/jq/index.html index e915ff6ea..1e6e658e6 100644 --- a/jq/index.html +++ b/jq/index.html @@ -7625,6 +7625,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/json/index.html b/json/index.html index 842f30ca4..f1d1d4901 100644 --- a/json/index.html +++ b/json/index.html @@ -7201,6 +7201,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jsonnet/index.html b/jsonnet/index.html index c5908ef49..187375267 100644 --- a/jsonnet/index.html +++ b/jsonnet/index.html @@ -7069,6 +7069,18 @@

    jsonnet

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jsonpath/index.html b/jsonpath/index.html index 680b8d6e5..b7a52192a 100644 --- a/jsonpath/index.html +++ b/jsonpath/index.html @@ -7184,6 +7184,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/juniper/index.html b/juniper/index.html index 42455eddd..f8d548392 100644 --- a/juniper/index.html +++ b/juniper/index.html @@ -7484,6 +7484,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jupyter/index.html b/jupyter/index.html index 83adba33b..37e0a8c64 100644 --- a/jupyter/index.html +++ b/jupyter/index.html @@ -7132,6 +7132,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/jwt/index.html b/jwt/index.html index ae077804c..7e1bb3c4a 100644 --- a/jwt/index.html +++ b/jwt/index.html @@ -7162,6 +7162,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/kaniko/index.html b/kaniko/index.html index b1ad301ea..f69c72c0b 100644 --- a/kaniko/index.html +++ b/kaniko/index.html @@ -7069,6 +7069,18 @@

    kaniko

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/keel/index.html b/keel/index.html index eefb796c5..c1de5222d 100644 --- a/keel/index.html +++ b/keel/index.html @@ -7070,6 +7070,18 @@

    keel

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/keybase/index.html b/keybase/index.html index 38436488c..70ec60985 100644 --- a/keybase/index.html +++ b/keybase/index.html @@ -7135,6 +7135,18 @@

    My ID

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/kubernetes/index.html b/kubernetes/index.html index f8898a368..d47adb498 100644 --- a/kubernetes/index.html +++ b/kubernetes/index.html @@ -8555,9 +8555,8 @@

    linux kernel namespaces

    @@ -8629,6 +8629,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/launchd/index.html b/launchd/index.html index 4c331e778..f5533c1c1 100644 --- a/launchd/index.html +++ b/launchd/index.html @@ -7201,6 +7201,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/ldap/index.html b/ldap/index.html index 46422ad47..440c95db4 100644 --- a/ldap/index.html +++ b/ldap/index.html @@ -7321,6 +7321,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lego/index.html b/lego/index.html index c135d402f..4f713360f 100644 --- a/lego/index.html +++ b/lego/index.html @@ -7392,6 +7392,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lektor/index.html b/lektor/index.html index a923742df..20a1b3d1f 100644 --- a/lektor/index.html +++ b/lektor/index.html @@ -7155,6 +7155,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/linksys/index.html b/linksys/index.html index 4b0a37ba0..ce4280514 100644 --- a/linksys/index.html +++ b/linksys/index.html @@ -7163,6 +7163,18 @@

    1.1.40.162464

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/linux-performance-monitoring/index.html b/linux-performance-monitoring/index.html index c2040c365..9fa967135 100644 --- a/linux-performance-monitoring/index.html +++ b/linux-performance-monitoring/index.html @@ -7280,6 +7280,18 @@

    Tracing

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/linux/index.html b/linux/index.html index 71fe36135..29d3a2cea 100644 --- a/linux/index.html +++ b/linux/index.html @@ -7616,6 +7616,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/logstash/index.html b/logstash/index.html index b5db43f83..6fe3fde7c 100644 --- a/logstash/index.html +++ b/logstash/index.html @@ -7069,6 +7069,18 @@

    logstash

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lsblk/index.html b/lsblk/index.html index d1b121820..2f17da8e2 100644 --- a/lsblk/index.html +++ b/lsblk/index.html @@ -7421,6 +7421,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lshw/index.html b/lshw/index.html index d13642cf8..721ad9973 100644 --- a/lshw/index.html +++ b/lshw/index.html @@ -7281,6 +7281,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lsof/index.html b/lsof/index.html index 71416e4f2..b30407184 100644 --- a/lsof/index.html +++ b/lsof/index.html @@ -7274,6 +7274,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lua/index.html b/lua/index.html index c8d31ee03..e139c5e1b 100644 --- a/lua/index.html +++ b/lua/index.html @@ -7132,6 +7132,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lvm/index.html b/lvm/index.html index 791206ecd..f806b0664 100644 --- a/lvm/index.html +++ b/lvm/index.html @@ -7384,6 +7384,18 @@

    Extend a volume group to its max

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/lxc/index.html b/lxc/index.html index 06bd23cbc..227a27121 100644 --- a/lxc/index.html +++ b/lxc/index.html @@ -7129,6 +7129,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/machine-learning/index.html b/machine-learning/index.html index 4c42d5467..0ddf2c7d5 100644 --- a/machine-learning/index.html +++ b/machine-learning/index.html @@ -7213,6 +7213,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/macos/index.html b/macos/index.html index 847df236d..980e97aca 100644 --- a/macos/index.html +++ b/macos/index.html @@ -7473,6 +7473,18 @@

    Remap caps-lock to escape

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/make/index.html b/make/index.html index 30bb60809..11e017101 100644 --- a/make/index.html +++ b/make/index.html @@ -7170,6 +7170,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/marlin/index.html b/marlin/index.html index f9f9c982b..2f515e835 100644 --- a/marlin/index.html +++ b/marlin/index.html @@ -7219,6 +7219,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/math/index.html b/math/index.html index 7fd67833b..c26a29074 100644 --- a/math/index.html +++ b/math/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/mdraid/index.html b/mdraid/index.html index 81b0f8208..47cbea789 100644 --- a/mdraid/index.html +++ b/mdraid/index.html @@ -7185,6 +7185,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/mechanical-keyboards/index.html b/mechanical-keyboards/index.html index 27c2a7a4c..a4348cfa5 100644 --- a/mechanical-keyboards/index.html +++ b/mechanical-keyboards/index.html @@ -7134,6 +7134,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/metallb/index.html b/metallb/index.html index ab268fc5d..c1d60b63c 100644 --- a/metallb/index.html +++ b/metallb/index.html @@ -7155,6 +7155,18 @@

    See Also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/microbit/index.html b/microbit/index.html index fa0885a3a..34dfd29e0 100644 --- a/microbit/index.html +++ b/microbit/index.html @@ -7157,6 +7157,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/microcontrollers/index.html b/microcontrollers/index.html index 491eea035..c5846642d 100644 --- a/microcontrollers/index.html +++ b/microcontrollers/index.html @@ -7292,6 +7292,18 @@

    Additional hardware

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/micropython/index.html b/micropython/index.html index 74ebbec37..ac0018df7 100644 --- a/micropython/index.html +++ b/micropython/index.html @@ -7234,6 +7234,18 @@

    See also

    Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/minikube/index.html b/minikube/index.html index c3de8e932..172d20367 100644 --- a/minikube/index.html +++ b/minikube/index.html @@ -7241,6 +7241,18 @@ Material for MkDocs + + +
    + + + + + + + + +
    diff --git a/misc/index.html b/misc/index.html index ccfab53fe..dda3ecea0 100644 --- a/misc/index.html +++ b/misc/index.html @@ -7225,7 +7225,10 @@

    Misc

  • https://clig.dev: "Command Line Interface Guidelines"
  • https://www.atlassian.com/devops/frameworks/dora-metrics: "DevOps Research and Assessment (DORA) provides a standard set of DevOps metrics used for evaluating process performance and maturity."
  • https://en.wikipedia.org/wiki/Tuckman%27s_stages_of_group_development: "Forming, Storming, Norming, Performing"
  • -
  • https://bitfieldconsulting.com/posts/career: "Where will you be when you realise that this is where you’ve always wanted to be?"
  • +
  • https://bitfieldconsulting.com/posts/career: "Where will you be when you realise that this is where you've always wanted to be?"
  • +
  • https://calebhearth.com/dont-get-distracted: "But don’t get distracted by all this; the software was intended to kill people."
  • +
  • https://grep.app: Search Github with better syntax, like regular expressions.
  • +
  • https://minds.md/zakirullin/cognitive: "We should reduce any cognitive load above and beyond what is intrinsic to the work we do."
  • Kids

      @@ -7273,6 +7276,18 @@

      Finance

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/mkdocs/index.html b/mkdocs/index.html index 9cb0d2baa..3d2d93548 100644 --- a/mkdocs/index.html +++ b/mkdocs/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/molecule/index.html b/molecule/index.html index 8ea2f8ae7..162a6fb0b 100644 --- a/molecule/index.html +++ b/molecule/index.html @@ -7317,6 +7317,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/mongodb/index.html b/mongodb/index.html index aebd33d32..f97c6cfe0 100644 --- a/mongodb/index.html +++ b/mongodb/index.html @@ -7182,6 +7182,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/mqtt/index.html b/mqtt/index.html index c2b3bd716..c81bb3046 100644 --- a/mqtt/index.html +++ b/mqtt/index.html @@ -7069,6 +7069,18 @@

      mqtt

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/mutt/index.html b/mutt/index.html index f2c568559..636a6619b 100644 --- a/mutt/index.html +++ b/mutt/index.html @@ -7185,6 +7185,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/myrepos/index.html b/myrepos/index.html index e432230fa..3f4c8fc70 100644 --- a/myrepos/index.html +++ b/myrepos/index.html @@ -7182,6 +7182,18 @@

      Update all of your registered repos Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/mysql/index.html b/mysql/index.html index b3cf9b15d..b36b4f0b8 100644 --- a/mysql/index.html +++ b/mysql/index.html @@ -8131,6 +8131,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/namei/index.html b/namei/index.html index 44e69aa70..9d03f5efa 100644 --- a/namei/index.html +++ b/namei/index.html @@ -7204,6 +7204,18 @@

      Show permissions of all entries

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ncftp/index.html b/ncftp/index.html index d0e46bd58..2b0ff4064 100644 --- a/ncftp/index.html +++ b/ncftp/index.html @@ -7227,6 +7227,18 @@

      Recursively delete a remote direc Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/neopixel/index.html b/neopixel/index.html index 93fa3db34..ddb5f7140 100644 --- a/neopixel/index.html +++ b/neopixel/index.html @@ -7158,6 +7158,18 @@

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/netgear/index.html b/netgear/index.html index d74e7c6a6..78cb255e0 100644 --- a/netgear/index.html +++ b/netgear/index.html @@ -7281,6 +7281,18 @@

      Cons

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nethogs/index.html b/nethogs/index.html index 9124e5e45..f8e7349b2 100644 --- a/nethogs/index.html +++ b/nethogs/index.html @@ -7130,6 +7130,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/networking/index.html b/networking/index.html index 29f39c8e0..f44f1f6ae 100644 --- a/networking/index.html +++ b/networking/index.html @@ -7164,6 +7164,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/networksetup/index.html b/networksetup/index.html index 3d1149d01..768221c74 100644 --- a/networksetup/index.html +++ b/networksetup/index.html @@ -7315,6 +7315,18 @@

      Delete a bunch of VLAN interfaces

      + + +
      + + + + + + + + +
      diff --git a/nfc/index.html b/nfc/index.html index fe8c7d419..68605067f 100644 --- a/nfc/index.html +++ b/nfc/index.html @@ -7176,6 +7176,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nfs/index.html b/nfs/index.html index b94110612..4e7d53411 100644 --- a/nfs/index.html +++ b/nfs/index.html @@ -7446,6 +7446,18 @@

      Create persistent NFS mount in Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ngrok/index.html b/ngrok/index.html index 1f0e9eeec..b7f9f894a 100644 --- a/ngrok/index.html +++ b/ngrok/index.html @@ -7321,6 +7321,18 @@

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nintendo-3ds/index.html b/nintendo-3ds/index.html index 229a08ea8..1a78fb76f 100644 --- a/nintendo-3ds/index.html +++ b/nintendo-3ds/index.html @@ -7184,6 +7184,18 @@

      Hacking and CFW

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nintendo-amiibo/index.html b/nintendo-amiibo/index.html index 4175bd195..2e4bc0017 100644 --- a/nintendo-amiibo/index.html +++ b/nintendo-amiibo/index.html @@ -7183,6 +7183,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nintendo-nes/index.html b/nintendo-nes/index.html index 3a92572d5..c6d85f81c 100644 --- a/nintendo-nes/index.html +++ b/nintendo-nes/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nintendo-switch/index.html b/nintendo-switch/index.html index d49bfa804..06a337bf4 100644 --- a/nintendo-switch/index.html +++ b/nintendo-switch/index.html @@ -7256,6 +7256,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nintendo-wii/index.html b/nintendo-wii/index.html index 0b8a899f3..d7bd531d3 100644 --- a/nintendo-wii/index.html +++ b/nintendo-wii/index.html @@ -7187,6 +7187,18 @@

      Hack notes

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nintendo-wiiu/index.html b/nintendo-wiiu/index.html index 17acc5534..ab9ec02e0 100644 --- a/nintendo-wiiu/index.html +++ b/nintendo-wiiu/index.html @@ -7074,6 +7074,18 @@

      Nintendo Wii U

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ntop/index.html b/ntop/index.html index 50c63ecee..3d9acfdb6 100644 --- a/ntop/index.html +++ b/ntop/index.html @@ -7130,6 +7130,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ntp/index.html b/ntp/index.html index 5d6efc55d..3a3445d93 100644 --- a/ntp/index.html +++ b/ntp/index.html @@ -7179,6 +7179,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/nvidia/index.html b/nvidia/index.html index 0ad86d33b..580eb3052 100644 --- a/nvidia/index.html +++ b/nvidia/index.html @@ -7074,6 +7074,18 @@

      nvidia

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/onboarding/index.html b/onboarding/index.html index c613f38d6..760a8fcdd 100644 --- a/onboarding/index.html +++ b/onboarding/index.html @@ -7153,6 +7153,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/openvpn/index.html b/openvpn/index.html index 62a4d667d..11b6b0465 100644 --- a/openvpn/index.html +++ b/openvpn/index.html @@ -7161,6 +7161,18 @@

      Check status of logged in clients

      + + +
      + + + + + + + + +
      diff --git a/orbstack/index.html b/orbstack/index.html index d509addc9..2d0c3dfd2 100644 --- a/orbstack/index.html +++ b/orbstack/index.html @@ -7070,6 +7070,18 @@

      orbstack

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/osquery/index.html b/osquery/index.html index 517de8e43..3bdff05a6 100644 --- a/osquery/index.html +++ b/osquery/index.html @@ -7135,6 +7135,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/outline/index.html b/outline/index.html index 1050ad8c2..ebeeab4c9 100644 --- a/outline/index.html +++ b/outline/index.html @@ -7073,6 +7073,18 @@

      outline

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pac/index.html b/pac/index.html index c4b807b0b..f54770c4c 100644 --- a/pac/index.html +++ b/pac/index.html @@ -7166,6 +7166,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pandoc/index.html b/pandoc/index.html index 227e3b710..d12628523 100644 --- a/pandoc/index.html +++ b/pandoc/index.html @@ -7160,6 +7160,18 @@

      Convert a doc from mediawiki t Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/panopticlick/index.html b/panopticlick/index.html index 7ecfb17fa..93f9a4385 100644 --- a/panopticlick/index.html +++ b/panopticlick/index.html @@ -7069,6 +7069,18 @@

      panopticlick

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/passwords/index.html b/passwords/index.html index ec5e0b248..7a006d75b 100644 --- a/passwords/index.html +++ b/passwords/index.html @@ -7208,6 +7208,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/perl/index.html b/perl/index.html index c986e07d1..40127cd1f 100644 --- a/perl/index.html +++ b/perl/index.html @@ -7428,6 +7428,18 @@

      Add one to the last octet of Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pgp/index.html b/pgp/index.html index d5d5eb61f..d7598fc11 100644 --- a/pgp/index.html +++ b/pgp/index.html @@ -7210,6 +7210,18 @@

      Philosophy

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/philips-hue/index.html b/philips-hue/index.html index d78168c57..2dee830b6 100644 --- a/philips-hue/index.html +++ b/philips-hue/index.html @@ -7150,6 +7150,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/photography/index.html b/photography/index.html index 5016ef5b8..b45faf5e3 100644 --- a/photography/index.html +++ b/photography/index.html @@ -7133,6 +7133,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/php/index.html b/php/index.html index 032369e81..8aad3aa64 100644 --- a/php/index.html +++ b/php/index.html @@ -7312,6 +7312,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/plex/index.html b/plex/index.html index 09db1ad13..325658f58 100644 --- a/plex/index.html +++ b/plex/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/postgres/index.html b/postgres/index.html index b2347b046..326236755 100644 --- a/postgres/index.html +++ b/postgres/index.html @@ -7612,6 +7612,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/powershell/index.html b/powershell/index.html index b857c713f..4162f6333 100644 --- a/powershell/index.html +++ b/powershell/index.html @@ -7572,6 +7572,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/powertop/index.html b/powertop/index.html index 37889af7e..e12a68ffc 100644 --- a/powertop/index.html +++ b/powertop/index.html @@ -7181,6 +7181,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/procurve/index.html b/procurve/index.html index 7cf3f27fe..1d12142c1 100644 --- a/procurve/index.html +++ b/procurve/index.html @@ -7641,6 +7641,18 @@

      Example Config

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/prometheus/index.html b/prometheus/index.html index 7729eaa45..0804ed22f 100644 --- a/prometheus/index.html +++ b/prometheus/index.html @@ -7220,6 +7220,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/protobuf/index.html b/protobuf/index.html index 71f71d9b8..8bfe1c52d 100644 --- a/protobuf/index.html +++ b/protobuf/index.html @@ -7070,6 +7070,18 @@

      protobuf

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ps/index.html b/ps/index.html index 57b933d1c..9e0089472 100644 --- a/ps/index.html +++ b/ps/index.html @@ -7282,6 +7282,18 @@

      Show linux kernel namespa Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ps_mem/index.html b/ps_mem/index.html index 4b02defa3..103d002c1 100644 --- a/ps_mem/index.html +++ b/ps_mem/index.html @@ -7178,6 +7178,18 @@

      Simple usage

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/psp/index.html b/psp/index.html index 1cce12cf4..3397a8885 100644 --- a/psp/index.html +++ b/psp/index.html @@ -7129,6 +7129,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pssh/index.html b/pssh/index.html index 6fb7e48eb..ff7f16a26 100644 --- a/pssh/index.html +++ b/pssh/index.html @@ -7250,6 +7250,18 @@

      Install a package on many hosts

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ptp/index.html b/ptp/index.html index baf065d5e..7395a819d 100644 --- a/ptp/index.html +++ b/ptp/index.html @@ -7155,6 +7155,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/puppet/index.html b/puppet/index.html index 1a090072c..0f4c51747 100644 --- a/puppet/index.html +++ b/puppet/index.html @@ -7878,6 +7878,18 @@

      r10k

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pv/index.html b/pv/index.html index 11ec9c345..a17cea939 100644 --- a/pv/index.html +++ b/pv/index.html @@ -7230,6 +7230,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pxe/index.html b/pxe/index.html index 1329a9fed..00775a519 100644 --- a/pxe/index.html +++ b/pxe/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/pytest/index.html b/pytest/index.html index ba374f2f3..f3b5072f9 100644 --- a/pytest/index.html +++ b/pytest/index.html @@ -7363,6 +7363,18 @@

      Plugins

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/python/index.html b/python/index.html index 8158a2033..f35ecd488 100644 --- a/python/index.html +++ b/python/index.html @@ -8123,6 +8123,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/q/index.html b/q/index.html index 8ef6e0d85..2765fda31 100644 --- a/q/index.html +++ b/q/index.html @@ -7229,6 +7229,18 @@

      Select Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/raspberry-pi/index.html b/raspberry-pi/index.html index 7784bb955..0e308530d 100644 --- a/raspberry-pi/index.html +++ b/raspberry-pi/index.html @@ -7379,6 +7379,18 @@

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/redis/index.html b/redis/index.html index 5892af93f..a172a479a 100644 --- a/redis/index.html +++ b/redis/index.html @@ -7228,6 +7228,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/retropie/index.html b/retropie/index.html index fe523cdc4..b0811ec49 100644 --- a/retropie/index.html +++ b/retropie/index.html @@ -7224,6 +7224,18 @@

      Enable rewind

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rhel/index.html b/rhel/index.html index f91270872..0d90191c4 100644 --- a/rhel/index.html +++ b/rhel/index.html @@ -7130,6 +7130,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/robotics/index.html b/robotics/index.html index 8dcea4a46..53202bca8 100644 --- a/robotics/index.html +++ b/robotics/index.html @@ -7156,6 +7156,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/roku/index.html b/roku/index.html index 7d42cc9e0..250db7e07 100644 --- a/roku/index.html +++ b/roku/index.html @@ -7128,6 +7128,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rook/index.html b/rook/index.html index fa26e9b0b..caaebad8c 100644 --- a/rook/index.html +++ b/rook/index.html @@ -7070,6 +7070,18 @@

      rook

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ros/index.html b/ros/index.html index 4012f0824..5b50048d7 100644 --- a/ros/index.html +++ b/ros/index.html @@ -7135,6 +7135,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rpm/index.html b/rpm/index.html index 79715bd96..6b81bea41 100644 --- a/rpm/index.html +++ b/rpm/index.html @@ -7521,6 +7521,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rrd/index.html b/rrd/index.html index 05ba851ec..0c57a99a8 100644 --- a/rrd/index.html +++ b/rrd/index.html @@ -7207,6 +7207,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rst/index.html b/rst/index.html index ead01d459..b290869c6 100644 --- a/rst/index.html +++ b/rst/index.html @@ -7072,6 +7072,18 @@

      reStructuredText

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rsync/index.html b/rsync/index.html index 419085356..4741b35bc 100644 --- a/rsync/index.html +++ b/rsync/index.html @@ -7459,6 +7459,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/rust/index.html b/rust/index.html index 7ecd5fc5e..b0c3ff257 100644 --- a/rust/index.html +++ b/rust/index.html @@ -7161,6 +7161,18 @@

      Books

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/saltstack/index.html b/saltstack/index.html index 46cd31069..f5567ded8 100644 --- a/saltstack/index.html +++ b/saltstack/index.html @@ -7746,6 +7746,18 @@

      A Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/samba/index.html b/samba/index.html index c863abf04..58c8cc683 100644 --- a/samba/index.html +++ b/samba/index.html @@ -7462,6 +7462,18 @@

      Upgrading Supermicro firmware

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/samsung/index.html b/samsung/index.html index d38abae88..5f888b3e6 100644 --- a/samsung/index.html +++ b/samsung/index.html @@ -7180,6 +7180,18 @@

      Frame info

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/science/index.html b/science/index.html index 0a5972ab1..6e91fdd1f 100644 --- a/science/index.html +++ b/science/index.html @@ -7128,6 +7128,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/screenshot/index.html b/screenshot/index.html index ced294d32..6d48769df 100644 --- a/screenshot/index.html +++ b/screenshot/index.html @@ -7225,6 +7225,18 @@

      macOS

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/sdr/index.html b/sdr/index.html index 872257109..55724d70e 100644 --- a/sdr/index.html +++ b/sdr/index.html @@ -7158,6 +7158,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/search/search_index.json b/search/search_index.json index 2be897d17..cd496bd35 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"About these notes","text":"

      These are notes I've taken on technologies that I have used or would like to use.

      These notes started out some time before 2005 in VoodooPad 2. In December 2005 I discovered that you could self-host MediaWiki, so I moved my content into a private MediaWiki installation. Both VoodooPad and self-hosted MediaWiki worked fine for me, but as my notes became more useful and I wanted to show different sections to people in a way that let them discover useful content, the private nature of my self-hosted MediaWiki installation became problematic. MediaWiki also had the problem of being hosted by a web service, which meant it was not possible to access or edit content when my laptop was offline. I solved this for a while by running MediaWiki in a VM on my laptop, but that meant I couldn't access notes from other computers if my laptop was offline, and it meant I had a VM running at all time just to serve notes, which wasted a lot of resources. In 2015 I decided to move out of MediaWiki into markdown files in git, and in 2016 I began using mkdocs to publish these notes publicly to github pages.

      Since 2016, these notes are rendered from markdown files and published to github-pages using mkdocs gh-deploy. If you have suggestions, please open a github issue. Please do not submit PRs.

      • https://danielhoherd.github.io/tech-notes/
      • https://github.com/danielhoherd/tech-notes
      "},{"location":"3d-printing/","title":"3D Printing","text":""},{"location":"3d-printing/#links","title":"Links","text":"
      • https://www.makerbot.com
      • http://www.meshlab.net: \"the open source system for processing and editing 3D triangular meshes.\"
      • https://www.thingiverse.com
      • https://all3dp.com/1/petg-filament-3d-printing/
      • https://www.monoprice.com/product?p_id=33820: Monoprice MP Voxel 3D Printer
      • https://www.shapeways.com/materials
      • https://cadquery.readthedocs.io: \"CadQuery is an intuitive, easy-to-use Python library for building parametric 3D CAD models.\"
      • https://www.prusa3d.com/category/original-prusa-i3-mk3s
      • https://youtu.be/ibsOYzXduYc: Ender 3 Pro build
      • https://youtu.be/_EfWVUJjBdA: Ender 3 Pro bed leveling
      • https://youtu.be/kG_YKeJDaX8: Ender 3 Pro mods
      • https://www.3dbenchy.com/features
      • https://ultimaker.com/software/ultimaker-cura
      • https://teachingtechyt.github.io: \"Teaching Tech 3D Printer Site\"
      "},{"location":"3d-printing/#see-also","title":"See Also","text":"
      • Marlin
      "},{"location":"airflow/","title":"Airflow","text":"

      \"Airflow is a platform created by the community to programmatically author, schedule and monitor workflows.\" - https://airflow.apache.org/

      "},{"location":"airflow/#links","title":"Links","text":"
      • https://airflow.apache.org/docs/stable/concepts.html
      • https://airflow.readthedocs.io/en/latest/executor/kubernetes.html
      • https://www.astronomer.io/blog/dag-writing-best-practices-in-apache-airflow / https://youtu.be/HvjnLCQygO4
      "},{"location":"airport/","title":"Apple Airport","text":"

      Apple Airport hardware was discontinued in November 2016.

      "},{"location":"airport/#using-old-airport-utility-apps-with-new-versions-of-os-x","title":"Using old Airport Utility apps with new versions of OS X","text":"
      • https://zcs.zyniker.org/airport-utility-v5-6-1
      • https://support.apple.com/kb/DL1536

      Or use the 5.6.1 Utility in Windows? Not sure if this works.

      • https://support.apple.com/kb/dl1547
      "},{"location":"amazon/","title":"Amazon","text":"

      Mostly related to the technological offerings of Amazon, not the shopping experience.

      "},{"location":"amazon/#kindle","title":"Kindle","text":"
      • https://blog.lidskialf.net/2021/02/08/turning-an-old-kindle-into-a-eink-development-platform/
      • https://wiki.mobileread.com/wiki/Kindle_Hacks_Information
      • https://www.mobileread.com/forums/showthread.php?t=225030
      • https://goodereader.com/blog/kindle/interesting-ways-to-infuse-new-life-to-old-kindle-ereader-devices
      "},{"location":"amazon/#aws","title":"AWS","text":"

      \"Amazon Elastic Compute Cloud (Amazon EC2) is a web service that provides secure, resizable compute capacity in the cloud. It is designed to make web-scale cloud computing easier for developers.\" - https://aws.amazon.com/ec2/

      "},{"location":"amazon/#cloudformation","title":"Cloudformation","text":"

      cloudformation has its own notes page.

      "},{"location":"amazon/#links","title":"Links","text":"
      • Amazon EC2 Instance Types
      • Amazon service availability by region
      • awless - awless is a powerful, innovative and small surface command line interface (CLI) to manage Amazon Web Services.
      • AWS Console
      • AWS in plain english
      • EC2Instances.info - Easy Amazon EC2 Instance Comparison
      • Github - Troposphere
      • localstack - A fully functional local AWS cloud stack for use in development.
      • s3toosl - Command Line S3 Client and Backup
      • https://aws.amazon.com/answers/account-management/aws-tagging-strategies/
      "},{"location":"amazon/#tips","title":"Tips","text":""},{"location":"amazon/#determine-if-you-are-on-an-ec2-instance","title":"Determine if you are on an EC2 instance","text":"
      grep -i '^ec2' /sys/hypervisor/uuid\n
      "},{"location":"amazon/#reformat-accesskeyscsv-into-awscredentials-format","title":"Reformat accessKeys.csv into .aws/credentials format","text":"
      awk -F, 'BEGIN { print \"[temp_name]\" ; } !/Access/ {print \"aws_access_key_id = \"$1\"\\naws_secret_access_key = \"$2}' ~/Downloads/accessKeys.csv\n
      "},{"location":"amazon/#force-reset-mfa-credentials","title":"Force reset mfa credentials","text":"

      https://github.com/broamski/aws-mfa

      aws-mfa --device arn:aws:iam::$UID:mfa/$USER --force\n
      "},{"location":"amazon/#create-eks-cluster-from-cli","title":"Create eks cluster from cli","text":"

      https://github.com/weaveworks/eksctl

      eksctl create cluster\n
      "},{"location":"amazon/#get-eks-cluster-config","title":"Get eks cluster config","text":"
      # find your cluster name\naws eks list-clusters | jq -r '.clusters[]'\n\n# configure the current KUBECONFIG for the given cluster\naws eks update-kubeconfig --name the_cluster_name\n
      "},{"location":"amazon/#see-also","title":"See Also","text":"
      • aws cli
      • CloudFormation
      • AWS Glossary
      "},{"location":"android/","title":"Android","text":"

      \"Android is a mobile operating system based on a modified version of the Linux kernel and other open-source software, designed primarily for touchscreen mobile devices such as smartphones and tablets.\" - https://en.wikipedia.org/wiki/Android_(operating_system)

      "},{"location":"android/#links","title":"Links","text":"
      • https://github.com/melontini/bootloader-unlock-wall-of-shame
      "},{"location":"ansible/","title":"Ansible","text":"
      • https://docs.ansible.com/intro_inventory.html
      • https://docs.ansible.com/playbooks_best_practices.html
      • https://galaxy.ansible.com
      • https://www.azavea.com/blogs/labs/2014/10/creating-ansible-roles-from-scratch-part-1
      • https://ryandlane.com/blog/2014/08/04/moving-away-from-puppet-saltstack-or-ansible/
      • https://docs.ansible.com/ansible/latest/user_guide/intro_patterns.html: Targeting hosts
      • https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable
      • https://zwischenzugs.com/2021/08/27/five-ansible-techniques-i-wish-id-known-earlier
      "},{"location":"ansible/#modules","title":"Modules","text":"
      • https://docs.ansible.com/ansible/latest/collections/ansible/builtin/#modules
      • https://docs.ansible.com/ansible/latest/modules/apt_module.html
      • https://docs.ansible.com/ansible/latest/modules/apt_repository_module.html
      • https://docs.ansible.com/ansible/latest/modules/sysctl_module.html
      • https://docs.ansible.com/ansible/latest/modules/user_module.html
      "},{"location":"ansible/#see-also","title":"See also","text":"
      • Molecule: testing of Ansible roles
      "},{"location":"ansible/#examples","title":"Examples","text":""},{"location":"ansible/#generate-a-copy-block-for-a-given-file","title":"Generate a copy block for a given file","text":"

      Not perfect because the output is json, but json is yaml and easy enough to fix up quickly.

      ## stat -c '{\"copy\": {\"src\": \"SOURCE_FILE_NAME\", \"dest\": \"%n\", \"mode\": \"0%a\", \"owner\": \"%U\", \"group\": \"%G\"}}' /etc/logrotate.d/backup | jq .\n{\n  \"copy\": {\n    \"src\": \"SOURCE_FILE_NAME\",\n    \"dest\": \"/etc/logrotate.d/backup\",\n    \"mode\": \"0644\",\n    \"owner\": \"root\",\n    \"group\": \"root\"\n  }\n}\n
      "},{"location":"ansible/#show-a-list-of-installed-modules","title":"Show a list of installed modules","text":"
      ansible-doc --list\n
      "},{"location":"ansible/#run-a-playbook-and-prompt-for-sudo-password","title":"Run a playbook and prompt for sudo password","text":"
      ansible-playbook --ask-become-pass -i inventory/hosts.yaml create_users.yaml\n
      "},{"location":"ansible/#run-an-ad-hoc-command","title":"Run an ad-hoc command","text":"

      You can run one-off ad-hoc commands by passing a module and args to the module.

      ansible localhost \\\n  -m get_url \\\n  -a \"mode=755\n    url=https://github.com/bcicen/ctop/releases/download/v0.7.1/ctop-0.7.1-linux-amd64\n    dest=/usr/local/bin/ctop\n    checksum=sha256:38cfd92618ba2d92e0e1262c0c43d7690074b4b8dc77844b654f8e565166b577\n    owner=root\n    group=root\"\n
      "},{"location":"ansible/#validate-and-inspect-your-inventory-file","title":"Validate and inspect your inventory file","text":"

      This command parses your inventory and group_vars and outputs a json data structure if no syntax faults are found.

      ansible-inventory -i inventory/hosts.yml --list\n
      "},{"location":"ansible/#use-arbitrary-groups-in-static-inventory-file","title":"Use arbitrary groups in static inventory file","text":"
      $ nl -w 2 -s ' ' -ba inventory/example.yml\n 1 all:\n 2   hosts:\n 3     client:\n 4       ansible_host: 192.168.1.2\n 5     server:\n 6       ansible_host: 192.168.2.3\n 7\n 8 linux:\n 9   hosts:\n10     server:\n11\n12 windows:\n13   hosts:\n14     client:\n15\n16 california:\n17   hosts:\n18     client:\n19     server:\n$ ansible-inventory -i inventory/example.yml --graph\n@all:\n  |--@california:\n  |  |--client\n  |  |--server\n  |--@linux:\n  |  |--server\n  |--@windows:\n  |  |--client\n
      "},{"location":"ansible/#merge-multiple-inventory-files","title":"Merge multiple inventory files","text":"

      The below example gives higher precedence to the later files.

      ## cat foo.yml\nall:\n  hosts:\n    client:\n      ansible_host: 192.168.1.2\n      service_hostname: hostname-from-file-1\n    server:\n      ansible_host: 192.168.2.3\n      file_number: one\n\n## cat bar.yml\nall:\n  hosts:\n    client:\n      ansible_host: 10.1.2.3\n    server:\n      ansible_host: 10.2.3.4\n      file_number: two\n\n## ansible-inventory -i foo.yml -i bar.yml --list | json-to-yaml.py\n_meta:\n  hostvars:\n    client:\n      ansible_host: 10.1.2.3\n      service_hostname: hostname-from-file-1\n    server:\n      ansible_host: 10.2.3.4\n      file_number: two\nall:\n  children:\n  - ungrouped\nungrouped:\n  hosts:\n  - client\n  - server\n
      "},{"location":"ansible/#show-all-resolved-variables-for-a-given-inventory-host","title":"Show all resolved variables for a given inventory host","text":"

      This wisll show all host vars, including variables resolved from all the different variable locations.

      ansible -i inventory target_hostname -m debug -a \"var=hostvars[inventory_hostname]\"\n
      "},{"location":"ansible/#gather-all-facts-and-save-them-to-files","title":"Gather all facts and save them to files","text":"

      This will create a directory called facts and save results as one json file per host.

      ansible -i inventory target_group_or_hostname -m gather_facts --tree host_facts\n
      "},{"location":"ansible/#generate-an-deterministic-random-number","title":"Generate an deterministic random number","text":"

      This is similar to the Puppet fqdn_rand() function, which is really useful to splay cron jobs. Splaying cron jobs avoids the thundering herd problem by spreading the jobs out over time with deterministic randomness.

      ---\n## defaults/main.yml\n\ndemo_cron_minute: \"{{ 59 | random(seed=inventory_hostname) }}\"\ndemo_cron_hour: \"{{ 23 | random(seed=inventory_hostname) }}\"\n

      See also: https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#randomizing-data

      "},{"location":"ansible/#simple-ansible-playbook","title":"Simple ansible playbook","text":"

      This may be useful for testing syntax and experimenting with ansible modules.

      ---\n## playbook.yml\n\n- name: A local play\n  hosts: localhost\n  connection: local\n  gather_facts: no\n  tasks:\n    - name: Run cmd\n      shell: /bin/date\n      register: cmd_out\n\n    - debug:\n        var: cmd_out.stdout\n

      ansible-playbook -i localhost playbook.yml

      Slightly more complicated example:

      ## playbook.yml\n## run with: ansible-playbook -i localhost playbook.yml\n\n- name: A local play\n  hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: test_region\n    subnets:\n      - subnet_name: Public_2a\n        subnet_cidr: 192.168.100.0/26\n        subnet_az: \"{{ region }}_a\"\n      - subnet_name: Public_2b\n        subnet_cidr: 192.168.100.64/26\n        subnet_az: \"{{ region }}_b\"\n      - subnet_name: Private_2a\n        subnet_cidr: 192.168.100.128/26\n        subnet_az: \"{{ region }}_a\"\n      - subnet_name: Private_2b\n        subnet_cidr: 192.168.100.192/26\n        subnet_az: \"{{ region }}_b\"\n\n  tasks:\n    - name: Run cmd\n      shell: echo \"{{ item.subnet_name }} {{ item.subnet_cidr }} {{ item.subnet_az }}\"\n      register: cmd_out\n      loop: \"{{ subnets }}\"\n\n    - debug:\n        var: cmd_out\n
      "},{"location":"ansible/#get-a-list-of-failed-hosts","title":"Get a list of failed hosts","text":"
      {{ ansible_play_hosts_all | difference(ansible_play_hosts) }}\n
      "},{"location":"ansible/#links","title":"Links","text":"
      • https://github.com/danielhoherd/ansible-playground: Examples I've made of Ansible techniques
      • Ansible Tests with Molecule - https://molecule.readthedocs.io/en/latest/ / https://www.digitalocean.com/community/tutorials/how-to-test-ansible-roles-with-molecule
      • Molecule sequence of scenario events - https://molecule.readthedocs.io/en/latest/configuration.html#scenario
      • Test-driven infrastructure development with Ansible & Molecule - https://blog.codecentric.de/en/2018/12/test-driven-infrastructure-ansible-molecule/ / https://github.com/jonashackt/molecule-ansible-docker-vagrant
      • Testkitchen modules (for Molecule tests): https://testinfra.readthedocs.io/en/latest/modules.html
      • https://www.hashicorp.com/resources/ansible-terraform-better-together
      "},{"location":"apfs/","title":"Apple APFS","text":"

      A lot of notes here are as of macOS 10.13, and don't apply specifically to any other devices that run APFS.

      APFS got some big bumps in macOS 12, including big snapshot improvements.

      "},{"location":"apfs/#usage","title":"Usage","text":"
      $ diskutil apfs\n2017-11-04 18:23:55-0700\nUsage:  diskutil [quiet] ap[fs] <verb> <options>\n        where <verb> is as follows:\n\n     list                (Show status of all current APFS Containers)\n     convert             (Nondestructively convert from HFS to APFS)\n     create              (Create a new APFS Container with one APFS Volume)\n     createContainer     (Create a new empty APFS Container)\n     deleteContainer     (Delete an APFS Container and reformat disks to HFS)\n     resizeContainer     (Resize an APFS Container and its disk space usage)\n     addVolume           (Export a new APFS Volume from an APFS Container)\n     deleteVolume        (Remove an APFS Volume from its APFS Container)\n     eraseVolume         (Erase contents of, but keep, an APFS Volume)\n     changeVolumeRole    (Change the Role metadata bits of an APFS Volume)\n     unlockVolume        (Unlock an encrypted APFS Volume which is locked)\n     lockVolume          (Lock an encrypted APFS Volume (diskutil unmount))\n     listCryptoUsers     (List cryptographic users of encrypted APFS Volume)\n     changePassphrase    (Change the passphrase of a cryptographic user)\n     setPassphraseHint   (Set or clear passphrase hint of a cryptographic user)\n     encryptVolume       (Start async encryption of an unencrypted APFS Volume)\n     decryptVolume       (Start async decryption of an encrypted APFS Volume)\n     updatePreboot       (Update the APFS Volume's related APFS Preboot Volume)\n\ndiskutil apfs <verb> with no options will provide help on that verb\n
      "},{"location":"apfs/#file-clones","title":"File clones","text":"

      APFS supports deduplicated file copies, which it calls clonefiles. Copying a file by option-dragging it in Finder creates a clonefile. To create a clonefile on the CLI use cp -c src dst. Creating clonefiless of any size file is instantaneous because no file data is actually being copied. This differs from hard links because if you modify the clone, only the new blocks will be written to disk, and the source of the cloned file will not be modified.

      "},{"location":"apfs/#snapshots","title":"Snapshots","text":"

      Snapshots appear to be tied pretty directly to Time Machine, and do not appear to be general purpose. There appear to be many limitations in how they can be used, and what information you can get about them.

      There was previously a tool called apfs_snapshot but it was removed before macOS 10.13 was released.

      "},{"location":"apfs/#create-a-snapshot","title":"Create a snapshot","text":"

      You cannot choose a name for your snapshot, it is tied to the date the snapshot was taken in the form of YYYY-MM-DD-HHMMSS, or date \"+%Y-%m-%d-%H%M%S\"

      $ sudo tmutil localsnapshot\nNOTE: local snapshots are considered purgeable and may be removed at any time by deleted(8).\nCreated local snapshot with date: 2021-08-23-101843\n
      "},{"location":"apfs/#show-snapshots","title":"Show snapshots","text":"
      $ sudo tmutil listlocalsnapshots /\ncom.apple.TimeMachine.2017-11-01-161748\ncom.apple.TimeMachine.2017-11-02-100755\ncom.apple.TimeMachine.2017-11-03-084837\ncom.apple.TimeMachine.2017-11-04-182813\n
      "},{"location":"apfs/#mount-a-snapshot","title":"Mount a snapshot","text":"

      The easiest way to mount snapshots is to open Time Machine.app and browse backwards in time. This will mount your snapshots at /Volumes/com.apple.TimeMachine.localsnapshots/Backups.backupdb/$HOSTNAME/$SNAPSHOT_DATE/Data or a similar path.

      If you just want to mount a single snapshot, fill in $snapshot_name using one of the lines from tmutil listlocalsnapshots /, then:

      mkdir apfs_snap\nmount_apfs -o nobrowse,ro -s \"$snapshot_name\" /System/Volumes/data \"$PWD/apfs_snap\"\n

      Older version of macOS have a slightly different syntax

      mkdir apfs_snap\nsudo mount_apfs -s \"$snapshot_name\" / \"${PWD}/apfs_snap\"\n
      "},{"location":"apfs/#delete-a-snapshot","title":"Delete a snapshot","text":"

      You can only delete snapshots based off of their date.

      $ sudo tmutil deletelocalsnapshots 2017-11-04-183813\nDeleted local snapshot '2017-11-04-183813'\n
      "},{"location":"apfs/#delete-all-snapshots","title":"Delete all snapshots","text":"
      /usr/bin/tmutil listlocalsnapshots / |\ngrep -oE '2[0-9]{3}-[0-9]{2}-[0-9]{2}-[0-9]{6}'\nwhile read -r snap ; do\n  tmutil deletelocalsnapshots \"${snap##*.}\"\ndone\n
      "},{"location":"apfs/#thin-out-snapshots","title":"Thin out snapshots","text":"

      On the given drive, reclaim the given space by thinning out snapshots. As of tmutil 4.0.0, you cannot use any data unit other than bytes. (EG: 1G or 1GB will not work)

      $ sudo tmutil thinlocalsnapshots / 250000000\nThinned local snapshots:\n2017-11-04-184425\n2017-11-04-184433\n2017-11-04-184440\n
      "},{"location":"apfs/#see-also","title":"See also","text":"
      /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util\n/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs_invert\n/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs_preflight_converter\n/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs_stats\n
      "},{"location":"apfs/#links","title":"Links","text":"
      • Apple File System
      • Rich Trouton - Storing our digital lives - Mac filesystems from MFS to APFS
      • Apple File System Guide
      • Russ Bishop - Apple File System
      • A ZFS developer\u2019s analysis of the good and bad in Apple\u2019s new APFS file system
      • https://www.jinx.de/zfs/hfsfailure.html - Demo of how awful HFS+ is at detecting failures
      • https://eclecticlight.co/2021/11/09/disk-utility-now-has-full-features-for-managing-snapshots/
      "},{"location":"aptly/","title":"Aptly","text":"
      • \"Aptly is a swiss army knife for Debian repository management.\"
      • https://github.com/sepulworld/aptly-vagrant
      "},{"location":"aria2/","title":"Aria2","text":"

      \"aria2 is a lightweight multi-protocol & multi-source command-line download utility. It supports HTTP/HTTPS, FTP, SFTP, BitTorrent and Metalink. aria2 can be manipulated via built-in JSON-RPC and XML-RPC interfaces.\" - https://aria2.github.io/

      Of particular interest is the ability to download a single file from multiple sources, even using multiple protocols, to have increased download speed.

      "},{"location":"aria2/#examples","title":"Examples","text":""},{"location":"aria2/#download-a-file-in-place","title":"Download a file in place","text":"

      This command can be canceled and given again to resume the file download.

      ## -x5 Connect once to each server\n## -c Continue a partially downloaded file (HTTP/FTP)\n## --file-allocation=none Do not pre-allocate disk space for the file (begin downloading immediately. see man page for more options.)\n## --max-overall-download-limit=3 (K = 1024, M = 1024K)\n## --max-download-limit=1M per connection speed limits\naria2c -x5 -c --file-allocation=none --max-overall-download-limit=3 --max-download-limit=1M http://example.com/foo.iso\n
      "},{"location":"aria2/#see-also","title":"See Also","text":"
      • curl
      • httpstat - download and show a some useful connection information
      • wget
      "},{"location":"arpwatch/","title":"arpwatch","text":"

      \"arpwatch - keep track of ethernet/ip address pairings\" - man arpwatch

      "},{"location":"arpwatch/#examples","title":"Examples","text":""},{"location":"arpwatch/#fork-and-log-to-file-not-to-e-mail","title":"Fork and log to file, not to e-mail","text":"
      arpwatch -Q\ntail -F /var/lib/arpwatch/arp.dat\n
      "},{"location":"atomicparsley/","title":"AtomicParsley","text":"

      AtomicParsley is a lightweight command line program for reading, parsing and setting metadata into MPEG-4 files. This is a functional mp4 equivalent of what i3dv2 is for mp3 files.

      "},{"location":"atomicparsley/#examples","title":"Examples","text":""},{"location":"atomicparsley/#set-metadata-on-multiple-files","title":"Set metadata on multiple files","text":"

      Unfortunately the syntax of this tool requires you to edit one file at a time, so you have to iterate each item of an album using shell loops or xargs or whatever you prefer.

      for file in *.m4a ; do\n  AtomicParsley \"${file}\" --artist \"Various Artists\" ;\ndone ;\n
      "},{"location":"atomicparsley/#remove-personally-identifiable-information-pii-from-files","title":"Remove Personally Identifiable Information (pii) from files","text":"

      Useful if you want to remove your personal info from iTunes Match files.

      for file in *.m4a ; do\n  AtomicParsley \\\n    \"$file\" \\\n    --DeepScan \\\n    --manualAtomRemove \"moov.trak.mdia.minf.stbl.mp4a.pinf\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.----.name:[iTunMOVI]\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.apID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.atID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.cnID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.cprt\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.flvr\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.geID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.plID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.purd\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.rtng\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.sfID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.soal\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.stik\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.xid\" \\\ndone\n
      "},{"location":"automotive/","title":"Automotive","text":""},{"location":"automotive/#links","title":"Links","text":"
      • Autonomous Vehicles
      • https://ciechanow.ski/internal-combustion-engine
      • http://animatedengines.com
      • Animagraffs - How a Car Engine Works
      "},{"location":"autonomous-vehicles/","title":"Autonomous Vehicle Links","text":""},{"location":"autonomous-vehicles/#terms","title":"Terms","text":"
      • ACES: Automated, Connected, Electric, Shared
      • Levels: Refers to the 6 levels of autonomous vehicles
      • SAE: Society of Automotive Engineers
      "},{"location":"autonomous-vehicles/#autonomy-levels","title":"Autonomy Levels","text":"
      • L0: No automation. Human performs all operations. Base cost.
      • L1: Driver assistance. Human performs most driving operations, system can perform steering OR speed controls. Increased cost.
      • L2: Partial automation. Human performs most driving operations, system can perform steering AND speed controls. Increased cost.
      • L3: Conditional automation. System can perform all driving operations, human must be available to intervene in complex situations. Increased cost.
      • L4: High automation. System can perform all driving operations. Complex situations do not require a human to intervene but intervention is still an option. Decreased cost.
      • L5: Full automation. System performs all driving operations. Humans are all passengers with no requirement to intervene. Decreased cost.
      "},{"location":"autonomous-vehicles/#links","title":"Links","text":"
      • https://apollo.auto
      • https://automotivelinux.org
      • https://autonomoustuff.com
      • https://www.autosar.org
      • https://avs.auto/demo/index.html
      • https://en.wikipedia.org/wiki/ISO_26262
      • https://en.wikipedia.org/wiki/Units_of_transportation_measurement
      • https://github.com/visgl/deck.gl
      • https://renovo.auto dead link, acquired by https://woven.toyota
      • https://ros.org
      • https://some-ip.com
      • https://kevinchen.co/blog/autonomous-trucking-harder-than-rideshare (2024)
      "},{"location":"avahi/","title":"Avahi","text":"

      The Avahi mDNS/DNS-SD daemon implements Multicast DNS like Apple's Zeroconf architecture (also known as \"Rendezvous\" or \"Bonjour\").

      "},{"location":"avahi/#tips","title":"Tips","text":"

      After installing avahi-daemon it may not start. To fix this you may need to run service messagebus start

      Service types are defined in /usr/share/avahi/service-types

      "},{"location":"avahi/#service-configs","title":"Service configs","text":"

      Correctly formatted and named files in /etc/avahi/services/whatever.service are loaded on the fly, no need to restart avahi-daemon. If your service doesn't immediately show up, check syslog for errors.

      <?xml version=\"1.0\" standalone='no'?><!--*-nxml-*-->\n<!DOCTYPE service-group SYSTEM \"avahi-service.dtd\">\n<service-group>\n  <name replace-wildcards=\"yes\">%h</name>\n  <service>\n    <type>_ssh._tcp</type>\n    <port>22</port>\n  </service>\n  <service>\n    <type>_http._tcp</type>\n    <port>80</port>\n  </service>\n</service-group>\n
      "},{"location":"awk/","title":"awk","text":"

      \"pattern-directed scanning and processing language\" - man awk

      "},{"location":"awk/#examples","title":"Examples","text":"

      Some of these require GNU awk.

      "},{"location":"awk/#print-the-first-column-of-a-file","title":"Print the first column of a file","text":"
      awk '${print $1}' filename.txt\n
      "},{"location":"awk/#print-column-2-if-column-1-matches-a-string","title":"Print column 2 if column 1 matches a string","text":"
      ps aux | awk '$1 == \"root\" {print $2}'\n
      "},{"location":"awk/#pass-in-a-variable-and-value","title":"Pass in a variable and value","text":"
      ps | awk -v host=\"$HOSTNAME\" '{print host,$0}'\n
      "},{"location":"awk/#sort-a-file-by-line-lengths","title":"Sort a file by line lengths","text":"
      awk '{print length, $0}' testfile.txt | sort -n\n
      "},{"location":"awk/#tdl-to-csv","title":"TDL to CSV","text":"
      awk '{gsub(\"\\t\",\"\\\",\\\"\",$0); print;}' | sed 's#^#\"#;s#$#\"#;'\n
      "},{"location":"awk/#print-the-first-column-of-every-other-line","title":"Print the first column of every other line","text":"

      % is the modulus operator, which finds the remainder after an integer divide.

      awk 'NR % 2 == 0 { print $1 }'\n
      "},{"location":"awk/#print-only-even-numbered-lines","title":"Print only even numbered lines","text":"
      ls | awk 'NR % 2 == 0 { print $0 }'\n
      "},{"location":"awk/#print-only-odd-numbered-lines","title":"Print only odd numbered lines","text":"
      ls | awk 'NR % 2 != 0 { print $0 }'\n
      "},{"location":"awk/#print-even-numbered-lines-on-the-same-line-before-odd-numbered-lines","title":"Print even numbered lines on the same line before odd numbered lines","text":"
      awk '{if (NR%2==0) { print $0 \" \" prev } else { prev=$0 }}'\n
      "},{"location":"awk/#print-sum-all-the-first-columns-of-each-line-in-a-file","title":"Print sum all the first columns of each line in a file","text":"
      awk '{sum += $1} END {print sum}' filename\n
      "},{"location":"awk/#print-count-sum-and-average-of-the-first-column-of-stdin","title":"Print count, sum, and average of the first column of stdin","text":"
      for _ in {1..100} ; do echo $((RANDOM % 100)) ; done |\nawk '{sum += $1} END {avg = sum/NR ; printf \"Count:   %s\\nSum:     %s\\nAverage: %s\\n\", NR, sum, avg}'\n
      "},{"location":"awk/#split-file-by-recurring-string","title":"Split file by recurring string","text":"

      This will create a new file every time the string \"SERVER\" is found, essentially splitting the file by that string. Concatenating all of the output files would create the original file (potentially adding an extra newline).

      awk '/SERVER/{n++}{print >\"out\" sprintf(\"%02d\", n) \".txt\" }' example.txt\n
      "},{"location":"awk/#show-count-of-syslog-messages-per-minute","title":"Show count of syslog messages per minute","text":"
      awk -F: {'print $1 `\u201c`:`\u201d` $2'} /var/log/messages |uniq -c\n
      "},{"location":"awk/#show-count-of-root-logins-per-minute","title":"Show count of root logins per minute","text":"
      awk -F: '/root/{print $1 \":\" $2}' /var/log/auth.log |uniq -c\n
      "},{"location":"awk/#print-lines-in-ls-where-uid-is-numeric","title":"Print lines in ls where UID is numeric","text":"
      ls -la | awk '$3 ~/[0-9]/{print}'\n
      "},{"location":"awk/#show-only-zfs-snapshots-whose-size-is-zero","title":"Show only zfs snapshots whose size is zero","text":"
      zfs list -t snapshot | awk '$2 == 0'\n
      "},{"location":"awk/#print-a-line-if-the-third-field-does-not-match-a-regex","title":"Print a line if the third field does not match a regex","text":"
      echo {100..200} | fold -w 12 | awk '$3 !~ /[13579]$/ {print}'\n
      "},{"location":"awk/#show-500-errors-in-a-standard-apache-access-log","title":"Show 500 errors in a standard apache access log","text":"
      awk '$9 ~ /5[0-9][0-9]/' access.log\n
      "},{"location":"awk/#show-total-rss-and-vsz-count-for-all-cronolog-processes","title":"Show total rss and vsz count for all cronolog processes","text":"
      ps aux |\n  grep -i cronolo[g] |\n  awk '{vsz += $5; rss += $6} END {print \"vsz total = \"vsz ; print \"rss total = \"rss}'\n
      "},{"location":"awk/#get-ipv4-address-on-bsdosx","title":"Get IPv4 address on BSD/OSX","text":"
      ifconfig | awk '$1 == \"inet\" && $2 != \"127.0.0.1\" {print $2}'\n
      "},{"location":"awk/#get-ipv6-address-on-bsdosx","title":"Get IPv6 address on BSD/OSX","text":"
      ifconfig | awk '$1 == \"inet6\" && $2 !~ \"::1|.*lo\" {print $2}'\n
      "},{"location":"awk/#print-the-last-element","title":"Print the last element","text":"
      ls -la | awk -F\" \" '{print $NF}'\n
      "},{"location":"awk/#print-2nd-to-last-element","title":"Print 2nd to last element","text":"
      ls -la | awk -F\" \" '{print $(NF - 1)}'\n
      "},{"location":"awk/#print-the-previous-line-on-string-match","title":"Print the previous line on string match","text":"

      This works by storing the previous line. If the current line matches the regex, the previous line is printed from the stored value.

      $ awk '/32 host/ { print previous_line } {previous_line=$0}' /proc/net/fib_trie | column -t | sort -u\n|--  10.134.243.137\n|--  127.0.0.1\n|--  169.50.9.172\n
      "},{"location":"awk/#add-content-to-line-1-if-there-is-no-match","title":"Add content to line 1 if there is no match","text":"

      This adds a yaml document separator to the beginning of all yaml files in the current directory only if it does not already have one.

      tempfile=$(mktemp)\nfor file in ./*.yaml ; do\n  awk 'NR == 1 && $0 != \"---\" {print \"---\"} {print}' \"${file}\" > \"${tempfile}\" \\\n  && mv \"${tempfile}\" \"${file}\"\ndone\n
      "},{"location":"awk/#show-all-docker-images-in-a-helm-chart-and-their-https-links","title":"Show all docker images in a helm chart and their https links","text":"
      helm template . --set global.baseDomain=foo.com -f /Users/danielh/a/google-environments/prod/cloud/app/config.yaml 2>/dev/null |\nawk '/image: / {match($2, /(([^\"]*):[^\"]*)/, a) ; printf \"https://%s %s\\n\", a[2], a[1] ;}' |\nsort -u |\ncolumn -t\n

      A less complicated awk form of this that uses other shell commands would be

      helm template . --set global.baseDomain=foo.com -f /Users/danielh/a/google-environments/prod/cloud/app/config.yaml 2>/dev/null |\ngrep 'image: ' |\nawk '{print $2}' |\nsed 's/\"//g' |\nsed 's/\\(\\(.*\\):.*\\)/https:\\/\\/\\2 \\1/' |\nsort -u |\ncolumn -t\n

      So it really depends on where you want to put your complications, how performant you want to be, and how readable you want it to be. These both produce identical output, but some people find it easier to read shorter commands with simpler syntaxes, which is great for maintainability when performance is not an issue.

      https://quay.io/astronomer/ap-alertmanager  quay.io/astronomer/ap-alertmanager:0.23.0\nhttps://quay.io/astronomer/ap-astro-ui      quay.io/astronomer/ap-astro-ui:0.25.4\nhttps://quay.io/astronomer/ap-base          quay.io/astronomer/ap-base:3.14.2\nhttps://quay.io/astronomer/ap-cli-install   quay.io/astronomer/ap-cli-install:0.25.2\n...snip...\n
      "},{"location":"awk/#show-a-list-of-dns-hostname-queries-with-domain-stripped-sorted-by-hostname-length","title":"Show a list of dns hostname queries with domain stripped, sorted by hostname length","text":"

      This samples 100k dns queries, strips off all the domain names in the queried hostname, and prints the length of that first component of the FQDN (the bare hostname) along with the bare hostname itself, and shows the longest 25 entries.

      tcpdump -c 100000 -l -n -e dst port 53 |\nawk '$14 == \"A?\" {gsub(/\\..*/, \"\", $15) ; print(length($15), $15) ; fflush(\"/dev/stdout\") ;}' |\nsort -u |\nsort -n |\ntail -n 25\n

      Run this on your kube-dns nodes to see how close you're getting to the 63 character limit. You will never see errors though, because any name with components that are longer than 63 characters are not sent over the wire, so you'll need to check your logs for those. A good string to search for is \"63 characters\".

      "},{"location":"awk/#see-also","title":"See Also","text":"
      • https://www.grymoire.com/Unix/Awk.html
      • https://github.com/TheMozg/awk-raycaster: An FPS written in awk
      "},{"location":"awless/","title":"awless","text":"

      \"A Mighty CLI for AWS\" - https://github.com/wallix/awless

      "},{"location":"awless/#examples","title":"Examples","text":"
      • Example templates - https://github.com/wallix/awless-templates

      A lot of these syntax examples can be found by issuing the command, verb and entity but no parameters. Such as awless create stack, which will drop you into a prompt series to complete the necessary and optional parameters.

      "},{"location":"awless/#list-ec2-instances-sorted-by-uptime","title":"List ec2 instances sorted by uptime","text":"
      $ awless list instances --sort=uptime\n|         ID          |    ZONE    |           NAME          |  STATE  |    TYPE    | PUBLIC IP |   PRIVATE IP  | UPTIME \u25b2 | KEYPAIR |\n|---------------------|------------|-------------------------|---------|------------|-----------|---------------|----------|---------|\n| i-050ad501b33c6ad07 | us-west-1a | faruko-nal              | running | m4.xlarge  |           | 172.19.15.172 | 85 mins  | foo-ops |\n| i-5b381e9b          | us-west-1b | planted-collector11.foo | running | m4.xlarge  |           | 172.27.26.159 | 6 days   | foo-ops |\n| i-04ced9880586c009b | us-west-1a | hadoop07.foo            | running | m4.4xlarge |           | 172.27.37.100 | 8 days   | foo-ops |\n| i-0e583dcd3bc2444d8 | us-west-1a | db-na-historical06.foo  | running | m2.4xlarge |           | 172.19.48.79  | 12 days  | foo-ops |\n
      "},{"location":"awless/#sum-the-amount-of-unattached-disks-in-your-environment","title":"Sum the amount of unattached disks in your environment","text":"
      awless list volumes \\\n    --filter state=available \\\n    --format json |\n  jq .[].Size |\n  awk '{sum += $1 ; count += 1 ;} END {print sum \"G in \" count \" volumes\"}'\n
      "},{"location":"awless/#switch-to-a-different-aws-profile","title":"Switch to a different AWS profile","text":"

      This uses the ~/.aws/credentials file for its profiles

      Short way:

      awless switch prod\n

      Long way:

      awless config set aws.profile prod\n
      "},{"location":"awless/#customize-output-columns","title":"Customize output columns","text":"
      awless list instances --columns name,type,launched\n
      "},{"location":"awless/#add-a-user-to-a-group","title":"Add a user to a group","text":"
      awless \\\n  --aws-profile govcloud \\\n  --aws-region us-gov-west-1 \\\n  attach user \\\n  group=SystemAdministrators \\\n  name=SpaceGhost\n
      "},{"location":"awless/#create-an-access-key-for-a-user","title":"Create an access key for a user","text":"

      This creates an access key and saves it in ~/.aws/credentials

      awless \\\n  --aws-profile govcloud \\\n  --aws-region us-gov-west-1 \\\n  create accesskey \\\n  user=SpaceGhost \\\n  save=true\n
      "},{"location":"awless/#create-a-tag","title":"Create a tag","text":"
      awless create tag key=test_tag resource=i-9ba90158 value=true\n
      "},{"location":"awless/#delete-a-tag","title":"Delete a tag","text":"
      awless delete tag key=test_tag_dhoherd resource=i-9ba90158\n
      "},{"location":"awless/#create-an-instance","title":"Create an instance","text":"
      awless create instance \\\n  count=1 \\\n  image=ami-5ab82fa8 \\\n  keypair=ops \\\n  name=new-hostname \\\n  securitygroup=[sg-c4321fd1,sg-c4321cb0] \\\n  subnet=subnet-c4321c33 \\\n  type=t2.medium\n
      "},{"location":"awless/#see-also","title":"See also","text":"
      • amazon
      • aws-cloudformation
      • awscli
      "},{"location":"aws-cloudformation/","title":"Amazon AWS Cloudformation","text":"

      \"AWS CloudFormation is a service that helps you model and set up your Amazon Web Services resources so that you can spend less time managing those resources and more time focusing on your applications that run in AWS.\" - http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html

      "},{"location":"aws-cloudformation/#links","title":"Links","text":"
      • http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html
      • CloudFormer - CloudFormer is a template creation beta tool that creates an AWS CloudFormation template from existing AWS resources in your account. You select any supported AWS resources that are running in your account, and CloudFormer creates a template in an Amazon S3 bucket.
      • Sceptre - Sceptre is a tool to drive Cloudformation. Sceptre manages the creating, updating and deletion of stacks, and provides meta commands to allow users to get information about their stacks.
      • rain - A development workflow tool for working with AWS CloudFormation. (See also its other tools section)
      • https://www.singlestoneconsulting.com/blog/cloudformation-mapping-and-conditionals-making-your-templates-more-universal
      • https://sbstjn.com/blog/cloudformation - CloudFormation Best-Practices
      "},{"location":"aws-cloudformation/#examples","title":"Examples","text":""},{"location":"aws-cloudformation/#import-cloudformation-stack-entities-into-datasette","title":"Import cloudformation stack entities into Datasette","text":"
      aws cloudformation list-stack-resources --stack-name \"$STACK_NAME\" --region \"$REGION\"  |\njq -c '.[]' |\nsqlite-utils insert datasette.db stack -\n
      "},{"location":"awscli/","title":"Amazon awscli","text":"

      Official Amazon AWS command-line interface - https://aws.amazon.com/cli

      "},{"location":"awscli/#example-usage","title":"Example usage","text":""},{"location":"awscli/#show-subnets-for-a-particular-region-and-account","title":"Show subnets for a particular region and account","text":"
      aws --profile=dev --region=us-west-2 ec2 describe-subnets\n
      "},{"location":"awscli/#see-also","title":"See Also","text":"
      • Amazon
      • awless
      "},{"location":"backups/","title":"Backups","text":"

      Notes about backing up data.

      "},{"location":"backups/#links","title":"Links","text":"
      • http://duplicity.nongnu.org
      • https://www.nixtutor.com/linux/off-site-encrypted-backups-using-rsync-and-aes
      • http://www.nongnu.org/rdiff-backup
      "},{"location":"badblocks/","title":"badblocks","text":"

      badblocks is a program to test storage devices for bad blocks. - https://wiki.archlinux.org/index.php/badblocks

      "},{"location":"badblocks/#examples","title":"Examples","text":""},{"location":"badblocks/#destroy-all-data-on-a-disk-while-logging-bad-blocks","title":"Destroy all data on a disk while logging bad blocks","text":"
      ## -v verbose output writes error info to stderr\n## -s show scan progress, including percent complete, time elapsed, and error count\n## -w destructive write test, vs -n (nondestructive read/write test)\n## -b 4096 byte blocks\n## -t random test pattern\n## -o output file containing list of bad blocks, which can be passed back to badblocks, fsck or mke2fs\nbadblocks -v -s -w -b 4096 -t random -o ~/sdc.txt /dev/sdc\n
      "},{"location":"badblocks/#see-also","title":"See also","text":"
      • dcfldd
      • dd
      • ddrescue
      • pv
      "},{"location":"bash/","title":"GNU bash","text":"

      Bash is one of the most common mainstream unix shells.

      "},{"location":"bash/#tips-and-usage-examples","title":"Tips and Usage Examples","text":""},{"location":"bash/#navigating-on-the-command-line","title":"Navigating on the command line","text":"

      The following can be seen by running: stty -a

      • ctrl-a - move cursor to the beginning of the line
      • ctrl-e - move cursor to the end of the line
      • ctrl-l - do a \"clear\" on the terminal window
      • ctrl-r - reverse history command search
      • ctrl-t - get status of foreground process
      • ctrl-w - delete previous word
      "},{"location":"bash/#view-a-list-of-all-commands-etc","title":"View a list of all commands, etc..","text":"
      • compgen -b will list all the built-ins you could run.
      • compgen -a will list all the aliases you could run.
      • compgen -c will list all the commands you could run.
      • compgen -k will list all the keywords you could run.
      • compgen -A function will list all the functions you could run.
      • compgen -back will list all the above in one go.
      "},{"location":"bash/#remove-leading-zeroes","title":"Remove leading zeroes","text":"

      This method converts the numbers from base-10 to base-10, which has the side effect of removing leading zeroes. You can also use this to convert from other base systems

      for X in 00{1..20..2} ; do\n  echo \"$X = $(( 10#${X} ))\"\ndone\n

      Or use bc, a CLI calculator...

      for X in {1..50..5} ; do\n  Y=00${X}\n  echo \"${X} with zeroes is ${Y} and removed with bc is $(echo ${Y} | bc)\"\ndone ;\n
      "},{"location":"bash/#print-several-files-side-by-side","title":"Print several files side by side","text":"
      printf \"%s\\n\" {a..z} > alpha.txt\nprintf \"%s\\n\" {1..26} > num.txt\npr -w 10 -t -m alpha.txt num.txt\n

      The following output will be printed:

      a    1\nb    2\nc    3\nd    4\ne    5\nf    6\ng    7\nh    8\ni    9\nj    10\nk    11\nl    12\nm    13\nn    14\no    15\np    16\nq    17\nr    18\ns    19\nt    20\nu    21\nv    22\nw    23\nx    24\ny    25\nz    26\n
      "},{"location":"bash/#convert-base-36-to-decimal","title":"Convert base 36 to decimal","text":"

      This converts the base 36 number z to a decimal value

      echo $((36#z))\n
      "},{"location":"bash/#run-a-command-for-5-seconds-then-kill-it","title":"Run a command for 5 seconds, then kill it","text":"
      ping -f & sleep 5 ; kill %1\n

      Alternatively, use the timeout command if it's available. In macOS this can be installed through brew install coreutils and accessed with gtimeout.

      timeout 300 cmd\n
      "},{"location":"bash/#test-if-a-variable-is-empty","title":"Test if a variable is empty","text":"
      if [[ -z \"$var\" ]]\n
      "},{"location":"bash/#date","title":"Date","text":"

      For date stuff, see date, because it differs by platform.

      "},{"location":"bash/#show-random-statistics","title":"Show RANDOM statistics","text":"
      for X in {0..9999} ; do\n  echo $(($RANDOM % 5)) ;\ndone |\nsort |\nuniq -c\n
      "},{"location":"bash/#named-pipes","title":"named pipes","text":"
      mkfifo baz ; ps aux > baz\n

      then, in another terminal

      cat baz\n
      "},{"location":"bash/#alternate-redirection-outputs","title":"alternate redirection outputs","text":"
      exec 3> /tmp/baz ; ps aux >&3 # sends the output of ps aux to /tmp/baz\n
      "},{"location":"bash/#redirect-all-output-of-a-script-into-a-file","title":"Redirect all output of a script into a file","text":"

      This is not bash specific, but works in bash.

      ##!/usr/bin/env bash\n\nexec >> /tmp/$0.log\nexec 2>&1\n\ndate \"+%F %T%z $0 This is stdout, and will be written to the log\"\ndate \"+%F %T%z $0 This is stderr, and will also be written to the log\"\n
      "},{"location":"bash/#show-size-of-each-users-home-folder","title":"Show size of each user's home folder","text":"
      getent passwd |\nwhile IFS=: read -r user _ uid _ _ home _ ; do\n  if [[ $uid -ge 500 ]] ; then\n    printf \"$user \" ;\n    sudo du -sh $home ;\n  fi ;\ndone\n
      "},{"location":"bash/#previous-commands-args","title":"Previous command's args","text":"
      mkdir temp ; cd !!:*\n

      Be aware of the location of the tokens. For example:

      mkdir -p {foo,bar}/{a,b,c}\nstat !!:*\n

      This creates a problem because you can't stat -p so you must stat -p !!:2*

      "},{"location":"bash/#debug-a-script","title":"Debug a script","text":"

      This will show everything bash is executing

      bash -x scriptname.sh\n

      Or debug with a function:

      function debug {\n  if [ \"${debug:-0}\" -gt 0 ] ; then\n    echo \"$@\" 2>&1\n  fi\n}\n
      "},{"location":"bash/#debug-nested-scripts","title":"Debug nested scripts","text":"
      PS4=\"+(\\${BASH_SOURCE}:\\${LINENO}): \\${FUNCNAME[0]:+\\${FUNCNAME[0]}(): }\" bash -x some-command\n
      "},{"location":"bash/#find-where-all-the-inodes-are","title":"Find where all the inodes are","text":"
      find ~/ -type d -print0 |\nxargs -I %% -0 bash -c \"echo -n %% ; ls -a '%%' | wc -l\" >> ~/inodes.txt\n
      "},{"location":"bash/#build-and-print-an-array","title":"Build and print an array","text":"
      array=(\"one is the first element\");\narray+=(\"two is the second element\" \"three is the third\");\necho \"${array[@]}\"\n

      This is useful for building command line strings. For example, gpsbabel requires each input file to be prepended with -f. The following script takes a list of files and uses a bash array to create a command line in the form of gpsbabel -i gpx -f input_file_1.gpx -f input_file_2.gpx -o gpx -F output.gpx

      ##!/usr/bin/env bash\n\n## Check for at least one argument, print usage if fail\nif [ $# -lt 2 ] ; then\n    echo \"This script merges gpx files and requires at least two gpx files passed as arguments. Output is output.gpx\";\n    echo \"Usage:    $0 <gpx file> <gpx file> [...<gpx file>]\";\n    exit 1;\nfi\n\n## Create an array of arguments to pass to gpsbabel\nargs=();\nfor item in \"$@\" ; do\n    if [ -f \"$item\" ] || [ -h \"$item\" ] ; then\n        args+=( \"-f\" \"$item\" );\n    else\n        echo \"Skipping $item, it's not a file or symlink.\"\n    fi\ndone;\n\n## Verify we have at least two files to work with\nif [ \"${#args[@]}\" -lt 4 ] ; then\n    echo \"We don't have enough actual files to work with. Exiting.\"\n    exit 1\nfi\n\ngpsbabel -i gpx \"${args[@]}\" -o gpx -F output.gpx\n
      "},{"location":"bash/#build-and-print-an-associative-array-dict-hash","title":"Build and print an associative array (dict, hash)","text":"
      declare -A animals=(\n  [\"cow\"]=\"moo\"\n  [\"dog\"]=\"woof woof\"\n  [\"cat\"]=\"meow\"\n) ;\nfor animal in \"${!animals[@]}\" ; do\n  echo \"The $animal says '${animals[$animal]}'\" ;\ndone ;\n
      "},{"location":"bash/#show-permissions-in-rwx-and-octal-format","title":"Show permissions in rwx and octal format","text":"

      Linux:

      stat -c '%A %a %n' filename\n

      OSX:

      stat -f '%A %N' filename\n

      See stat for more stat usage.

      "},{"location":"bash/#find-the-length-of-a-variable","title":"Find the length of a variable","text":"
      echo ${#SHELL}\n
      "},{"location":"bash/#print-all-variables-that-start-with-the-substring-sh","title":"Print all variables that start with the substring SH","text":"
      echo \"${!SH@}\"\n
      "},{"location":"bash/#tertiary-type-variables","title":"Tertiary type variables","text":"
      ${V:-empty} # means \"return the value of the variable V or the string 'empty' if $V isn't set.\n
      "},{"location":"bash/#do-a-command-and-if-it-returns-false-so-some-more-stuff","title":"Do a command, and if it returns false, so some more stuff","text":"
      until command_that_will_fail ; do something_else ; done ;\n
      "},{"location":"bash/#print-two-digit-months","title":"Print two digit months","text":"

      echo {1..12} may not work. If not, use echo $(seq -w 1 12)

      "},{"location":"bash/#get-filename-extension-or-path","title":"Get filename, extension or path","text":"

      Taken from http://mywiki.wooledge.org/BashFAQ/073

      "},{"location":"bash/#rename-files-to-a-sequence-and-change-their-extension-at-the-same-time","title":"Rename files to a sequence and change their extension at the same time","text":"
      ls | while read -r line ; do\n  stub=${line%.*} ;\n  (( i += 1 )) ;\n  mv \"${line}\" \"${i}-${stub}.txt3\" ;\ndone ;\n
      FullPath=/path/to/name4afile-00809.ext   # result:   #   /path/to/name4afile-00809.ext\nFilename=${FullPath##*/}                             #   name4afile-00809.ext\nPathPref=${FullPath%\"$Filename\"}                     #   /path/to/\nFileStub=${Filename%.*}                              #   name4afile-00809\nFileExt=${Filename#\"$FileStub\"}                      #   .ext\n
      "},{"location":"bash/#sort-a-line-by-spaces","title":"Sort a line by spaces","text":"
      s=( whiskey tango foxtrot );\nsorted=$(printf \"%s\\n\"` `${s[@]}|sort);\necho $sorted\n
      "},{"location":"bash/#calculate-the-difference-between-two-dates","title":"Calculate the difference between two dates","text":"
      echo $(( $(gdate +%s -d 20120203) - $(gdate +%s -d 20120115) ))\n
      "},{"location":"bash/#substring-replace-a-variable","title":"substring replace a variable","text":"

      This is not regex, just a simple string replacement.

      ## ${VAR/search/replace} does only the first\n## ${VAR//search/replace} does all replacements\necho \"Paths in your path: ${PATH//:/ }\"\n
      "},{"location":"bash/#subtract-two-from-a-mac-address","title":"Subtract two from a MAC address","text":"
      ## printf -v defines a variable instead of printing to stdout\nprintf -v dec \"%d\" 0x$(echo 00:25:9c:52:1c:2a | sed 's/://g') ;\nlet dec=${dec}-2 ;\nprintf \"%012X\" ${dec} \\\n| sed -E 's/(..)(..)(..)(..)(..)(..)/\\1:\\2:\\3:\\4:\\5:\\6/g'\n
      "},{"location":"bash/#print-the-last-for-chars-of-a-variable","title":"Print the last for chars of a variable","text":"
      • echo ${foo:$((${#foo}-4))}
      • echo ${foo: -4} The space is necessary to prevent it from
      • doing a completely different thing. See the next example...
      "},{"location":"bash/#dereference-a-variable","title":"Dereference a variable","text":"
      $ for var in ${!BASH_V*} ; do echo \"${var}: ${!var}\" ; done ;\nBASH_VERSINFO: 5\nBASH_VERSION: 5.0.7(1)-release\n
      "},{"location":"bash/#print-something-else-if-a-variable-doesnt-exist","title":"Print something else if a variable doesn't exist","text":"
      • echo ${foo:-foo isn't assigned}
      • echo ${foo:-${bar}}

      This can even be recursively done...

      • echo ${foo:-${bar:-foo and bar are not assigned}}
      "},{"location":"bash/#print-every-third-number-starting-with-1-and-ending-with-30","title":"Print every third number starting with 1 and ending with 30","text":"

      echo {1..30..3}

      "},{"location":"bash/#print-every-5th-letter-of-the-alphabet","title":"Print every 5th letter of the alphabet","text":"

      echo {a..z..5}

      "},{"location":"bash/#process-all-lines-but-print-out-status-about-what-line-we-are-on-every-nth-line","title":"Process all lines, but print out status about what line we are on every Nth line","text":"

      Sometimes during a series of long-running jobs you want to see the status of where you are at, or at least some indicator that things have not paused. when ctrl-t is not available (and even when it is) this pattern can help you monitor that things are still moving a long.

      N=0\nfind \"/usr/bin\" -type f |\nwhile read -r X ; do\n  N=$((N + 1))\n  [[ \"$((N % 50))\" -eq 0 ]] && date \"+%F %T file number $N $X\" >&2\n  shasum -a 512 \"${X}\" >> ~/usr_bin_shasums.txt\ndone\n

      Example terminal output from the above command, while all shasum output goes into ~/usr_bin_shasums.txt:

      $ find \"/usr/bin\" -type f |\n> while read -r X ; do\n>   N=$((N + 1))\n>   [[ \"$((N % 50))\" -eq 0 ]] && date \"+%F %T file number $N $X\" >&2\n>   shasum -a 512 \"${X}\" >> ~/usr_bin_shasums.txt\n> done\n2018-02-24 15:30:29 file number 50 /usr/bin/toe\n2018-02-24 15:30:30 file number 100 /usr/bin/db_hotbackup\n2018-02-24 15:30:32 file number 150 /usr/bin/host\n2018-02-24 15:30:33 file number 200 /usr/bin/groffer\n2018-02-24 15:30:35 file number 250 /usr/bin/mail\n2018-02-24 15:30:36 file number 300 /usr/bin/dbicadmin\n2018-02-24 15:30:38 file number 350 /usr/bin/fwkpfv\n2018-02-24 15:30:39 file number 400 /usr/bin/tab2space\n
      "},{"location":"bash/#make-a-directory-structure-of-every-combination-of-adjectivenoun","title":"Make a directory structure of every combination of /adjective/noun","text":"

      mkdir -p {red,green,blue}/{fish,bird,flower}

      "},{"location":"bash/#generate-a-zero-padded-random-2-byte-hex-number","title":"Generate a zero padded random 2 byte hex number","text":"

      printf \"%02X\\n\" $((RANDOM % 256))

      "},{"location":"bash/#grep-many-log-files-and-sort-output-by-date","title":"grep many log files and sort output by date","text":"
      sudo grep cron /var/log/* |\nsed 's/:/ /' |\nwhile read file month day hour line ; do\n  date -d \"$month $day $hour\" \"+%F %T%z ${file} ${line}\" ;\ndone |\nsort\n
      "},{"location":"bash/#get-command-line-switches","title":"Get command line switches","text":"

      From the docs

      • If a character is followed by a colon, the option is expected to have an argument.
      • If the first character of optstring is a colon, silent error reporting is used.
      while getopts p:l:t: opt; do\n  case $opt in\n    p) pages=$OPTARG ;;\n    l) length=$OPTARG ;;\n    t) time=$OPTARG ;;\n  esac\ndone\n\nshift $((OPTIND - 1))\necho \"pages is ${pages}\"\necho \"length is ${length}\"\necho \"time is ${time}\"\necho \"\\$1 is $1\"\necho \"\\$2 is $2\"\n

      Call this script as ./foo.sh -p \"this is p\" -l llll -t this\\ is\\ t foo bar

      "},{"location":"bash/#unexpected-code-execution","title":"Unexpected code execution","text":"

      When using numeric comparison operators that use array syntax, code that determines the array index is executed:

      $ rm -f pwnd ; [[ -v '$(echo hello > pwnd)' ]] ; cat pwnd ; # does not use array syntax\ncat: pwnd: No such file or directory\n$ rm -f pwnd ; [[ -v 'x[$(echo hello > pwnd)]' ]] ; cat pwnd ; # uses array syntax\nhello\n

      This also happens with -eq

      $ rm -f pwnd ; [[ 0 -eq 'x$(echo hello > pwnd)' ]] ; cat pwnd ; # does not use array syntax\n-bash: [[: x$(echo hello > pwnd): syntax error: invalid arithmetic operator (error token is \"$(echo hello > pwnd)\")\ncat: pwnd: No such file or directory\n$ rm -f pwnd ; [[ 0 -eq 'x[$(echo hello > pwnd)]' ]] ; cat pwnd ; # uses array syntax\nhello\n

      Via https://yossarian.net/til/post/some-surprising-code-execution-sources-in-bash

      "},{"location":"bash/#files","title":"Files","text":"

      These files can change the behavior of bash.

      "},{"location":"bash/#bash_profile","title":".bash_profile","text":"

      ~/.bash_profile is executed every time you log into the system or initiate a login shell. Inclusion of things that write to stdout is allowed here.

      If you want to write scripts that change your interactive shell environment, such as changing your CWD, define functions here instead of using stand-alone scripts.

      "},{"location":"bash/#example-bash_profile","title":"Example .bash_profile","text":"

      The ~/.bash_profile file can be quite long and complicated. The following example is an incomplete sample:

      export EDITOR=/usr/bin/vim\nexport GZIP='-9'\nexport HISTSIZE=5000\nexport HISTTIMEFORMAT='%F %T%z '\nexport PS1=\"\\u@\\h:\\w$ \"\nexport TERM=xterm-256color\nexport TMOUT=\"1800\"  # log out after this many seconds of shell inactivity\n\nalias ll='ls -la'\nalias temp='date_F=$(date +%F) ; mkdir -p ~/temp/$date_F 2>/dev/null ; cd ~/temp/$date_F'\n\nsprunge() { curl -F 'sprunge=<-' http://sprunge.us < \"${1:-/dev/stdin}\"; } # usage: sprunge FILE # or some_command | sprunge\n\n## Don't record some commands\nexport HISTIGNORE=\"&:[ ]*:exit:ls:bg:fg:history:clear\"\n\n## Avoid duplicate entries\nHISTCONTROL=\"erasedups:ignoreboth\"\n\n## Perform file completion in a case insensitive fashion\nbind \"set completion-ignore-case on\"\n
      "},{"location":"bash/#bashrc","title":".bashrc","text":"

      ~/.bashrc is executed every time you open a sub-shell. It should not output any text, otherwise certain things (eg: scp) will fail.

      "},{"location":"bash/#inputrc","title":"~/.inputrc","text":"

      This file defines some bash behaviors. It also affects some other tools.

      ## Ignore case while completing\nset completion-ignore-case on\n
      "},{"location":"bash/#links","title":"Links","text":"
      • Command Line Quicksheet: http://www.pixelbeat.org/cmdline.html
      • Tons of BASH examples: http://mywiki.wooledge.org/BashFAQ
      • Bash Manual: Bash Variables
      • Bash pitfalls: http://mywiki.wooledge.org/BashPitfalls
      • Bash prompt howto, including colors: http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html
      • Bash Automated Test System
      • http://www.kfirlavi.com/blog/2012/11/14/defensive-bash-programming/
      • https://google.github.io/styleguide/shellguide.html
      • https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html
      • https://www.shellcheck.net/: Use this cli tool to help you write awesome shell scripts.
      • https://www.pcwdld.com/bash-cheat-sheet: Cheatsheet with some interesting examples not covered in many other resources.
      • https://kapeli.com/cheat_sheets/Bash_Test_Operators.docset/Contents/Resources/Documents/index
      • https://utcc.utoronto.ca/~cks/space/blog/unix/DropShellCommandHashing
      "},{"location":"bbcp/","title":"bbcp","text":"

      \"Securely and quickly copy data from source to target.\" - https://www.slac.stanford.edu/~abh/bbcp/

      This is a useful tool for copying files. Notably it gets around some bandwidth limitations of nc that I ran into when trying to copy one large file across an 80gbps network.

      "},{"location":"bc/","title":"GNU bc","text":"

      bc is a tool that does math on the CLI.

      "},{"location":"bc/#examples","title":"Examples","text":""},{"location":"bc/#divide-one-number-into-another-and-show-two-decimal-places","title":"Divide one number into another and show two decimal places","text":"

      The scale variable sets the number of significant digits.

      echo \"scale=2 ; 7 / 3\" | bc

      "},{"location":"bc/#convert-decimal-to-hexadecimal","title":"Convert decimal to hexadecimal","text":"

      echo \"obase=16 ; 10\" | bc

      "},{"location":"bc/#convert-hexadecimal-to-binary","title":"Convert hexadecimal to binary","text":"

      echo \"ibase=16 ; obase=2 ; AF\" | bc

      "},{"location":"bc/#subtract-two-from-the-last-octet-of-a-mac-address","title":"Subtract two from the last octet of a MAC address","text":"
      echo 24:b6:fd:ff:ba:31 |\nwhile read -r X ; do\n  echo ${X%??}$(\n    echo \"obase=16 ; $(( 0x${X#*:??:??:??:??:} )) - 2\" |\n      bc |\n      sed 's/^\\(.\\)$/0\\1/' |\n      tr A-Z a-z\n  ) ;\ndone ;\n
      "},{"location":"bind/","title":"BIND","text":"

      BIND, or named, is the most widely used Domain Name System (DNS) software on the Internet.

      • https://www.isc.org/downloads/bind/doc/
      • https://en.wikipedia.org/wiki/BIND
      "},{"location":"bind/#flush-records","title":"Flush records","text":""},{"location":"bind/#flush-a-single-record","title":"Flush a single record","text":"
      rndc flushname github.com\n
      "},{"location":"bind/#flush-all-records","title":"Flush all records","text":"
      rndc flush\n
      "},{"location":"blkid/","title":"blkid","text":"

      \"The blkid program is the command-line interface to working with the libblkid(3) library. It can determine the type of content (e.g. filesystem or swap) that a block device holds, and also attributes (tokens, NAME=value pairs) from the content metadata (e.g. LABEL or UUID fields). blkid has two main forms of operation: either searching for a device with a specific NAME=value pair, or displaying NAME=value pairs for one or more specified devices.\" - man blkid

      "},{"location":"blkid/#examples","title":"Examples","text":""},{"location":"blkid/#simple-usage","title":"Simple usage","text":"

      Here is the output of blkid on an Ubuntu 16.04 Vagrant box:

      $ blkid\n/dev/sda1: LABEL=\"cloudimg-rootfs\" UUID=\"743b1402-d445-494c-af0b-749040bb33e4\" TYPE=\"ext4\" PARTUUID=\"95a4c157-01\"\n/dev/sdb: UUID=\"2017-12-12-14-38-00-00\" LABEL=\"cidata\" TYPE=\"iso9660\"\n
      "},{"location":"blkid/#see-also","title":"See Also","text":"
      • findmnt
      • lsblk
      "},{"location":"bluetooth/","title":"bluetooth","text":""},{"location":"bluetooth/#examples","title":"Examples","text":""},{"location":"bluetooth/#linux-software","title":"Linux software","text":"
      • bluetoothctl: pair, connect, get info on bluetooth devices
      • gatttool
      "},{"location":"bpf/","title":"bpf","text":"

      \"Linux Socket Filtering (LSF) is derived from the Berkeley Packet Filter. Though there are some distinct differences between the BSD and Linux Kernel filtering, but when we speak of BPF or LSF in Linux context, we mean the very same mechanism of filtering in the Linux kernel.\"

      • https://www.kernel.org/doc/Documentation/networking/filter.txt
      • https://lwn.net/Articles/599755/
      • https://www.facebook.com/atscaleevents/videos/1693888610884236/
      • http://iovisor.github.io/bcc/
      • http://www.brendangregg.com/blog/2015-05-15/ebpf-one-small-step.html
      • https://github.com/sharklinux/shark
      "},{"location":"c/","title":"C","text":"

      \"C (pronounced like the letter c) is a general-purpose computer programming language. It was created in the 1970s by Dennis Ritchie, and remains very widely used and influential.\" - https://en.wikipedia.org/wiki/C_(programming_language)

      The linux kernel is > 98% C code.

      "},{"location":"c/#links","title":"Links","text":"
      • https://en.wikipedia.org/wiki/C_(programming_language)
      • https://tmewett.com/c-tips
      "},{"location":"calico/","title":"calico","text":"

      \"Calico provides secure network connectivity for containers and virtual machine workloads.\" - https://docs.projectcalico.org/v3.1/introduction/

      "},{"location":"calico/#kubernetes-examples","title":"Kubernetes Examples","text":"

      Calico works in several environments, but these examples all apply to Kubernetes.

      "},{"location":"calico/#installation","title":"Installation","text":"

      https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/

      "},{"location":"calico/#show-a-bunch-of-info-about-your-calico-config","title":"Show a bunch of info about your calico config","text":"

      See also https://docs.projectcalico.org/v2.0/reference/calicoctl/resources/

      for X in bgpPeer hostEndpoint ipPool node policy profile workloadEndpoint ; do\n  echo \"=========== $X\"\n  calicoctl get $X 2>/dev/null\ndone\n
      "},{"location":"calico/#links","title":"Links","text":"
      • https://docs.projectcalico.org/latest/
      • https://docs.projectcalico.org/v2.0/reference/calicoctl/resources/
      • https://kubernetes.io/docs/concepts/services-networking/network-policies/
      "},{"location":"calver/","title":"CalVer","text":"

      \"CalVer is a software versioning convention that is based on your project's release calendar, instead of arbitrary numbers.\" - https://calver.org/

      "},{"location":"calver/#links","title":"Links","text":"
      • https://calver.org/
      • Cockroach Labs: Why we're switching to calendar versioning / https://news.ycombinator.com/item?id=19658969
      • https://news.ycombinator.com/item?id=21967879
      "},{"location":"centos/","title":"CentOS Linux","text":"

      \"The CentOS Project is a community-driven free software effort focused on delivering a robust open source ecosystem.\" - https://www.centos.org/

      "},{"location":"centos/#centos-7","title":"CentOS 7","text":"
      • Released 2014-07-07
      "},{"location":"centos/#new-things-in-centos-7","title":"New things in CentOS 7","text":"
      • firewalld manages the firewall
      • hostnamectl changes the hostname and applies the setting immediately
      • journalctl shows log files of services launched by systemd
      • systemctl manages systemd services
      "},{"location":"centos/#initial-setup","title":"Initial setup","text":"

      Set up some base parameters on a fresh instance

      yum install -y bash-completion bc curl git lsof mlocate mutt net-snmp ntpd smartmontools strace sysstat vim wget\nln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime\nntpdate {0..3}.pool.ntp.org\nsystemctl start ntpd\n
      "},{"location":"centos/#centos-6","title":"CentOS 6","text":"
      • Released 2011-07-10
      "},{"location":"centos/#centos-6-initial-setup","title":"CentOS 6 Initial Setup","text":"
      yum install -y ntp\nchkconfig --levels 345 ntpd on && ntpdate time.apple.com && service ntpd start\nyum upgrade -y\nyum install -y arping avahi avahi-tools bc bind-utils curl elinks fping lsof net-snmp man mlocate mutt openssh openssh-clients openssh-server perl-Crypt-SSLeay perl-libwww-perl rsync strace vim wget yum-cron\nln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime\nchkconfig --levels 345 yum-cron on && service yum-cron start\nyum install -y dcfldd nfs-utils smartmontools dmidecode lshw dstat htop iotop\nchkconfig --levels 345 smartd on && service smartd start\n
      "},{"location":"centos/#tweaks-and-tricks","title":"Tweaks and Tricks","text":""},{"location":"centos/#get-past-protected-lib-problems","title":"Get past protected lib problems","text":"

      yum update --setopt=protected_multilib=false --skip-broken

      "},{"location":"centos/#enable-dhcp-hostname-for-dns-resolution","title":"Enable DHCP Hostname for DNS resolution","text":"

      add \"DHCP_HOSTNAME=whatever\" to /etc/sysconfig/network-scripts/ifcfg-eth0

      "},{"location":"centos/#install-os-from-usb","title":"Install OS from USB","text":"
      • From Windows: http://iso2usb.sourceforge.net/
      • From Linux: https://fedoraproject.org/wiki/Livecd-iso-to-disk
      "},{"location":"centos/#show-installed-repository-keys","title":"Show installed repository keys","text":"

      rpm -q gpg-pubkey --qf '%{name}-%{version}-%{release} --> %{summary}\\n'

      "},{"location":"centos/#dhcp-with-ddns-hostname","title":"DHCP with DDNS hostname","text":"

      Model your /etc/sysconfig/network-scripts/ifcfg-eth0 like this:

      TYPE=Ethernet\nDEVICE=eth0\nONBOOT=yes\nBOOTPROTO=dhcp\n## Without the following line, dhclient will not update /etc/resolv.conf and may not get an IP address at all\nDHCP_HOSTNAME=some_hostname\n
      • To configure your hostname, edit /etc/sysconfig/network and add HOSTNAME=
      • You also may need to delete these files:
      • rm -f /etc/dhclient-eth?.conf /var/lib/dhclient/dhclient-eth?.leases /etc/udev/rules.d/70-persistent-net.rules /etc/sysconfig/network-scripts/ifcfg-eth1\n
        "},{"location":"centos/#bond-configs","title":"Bond Configs","text":"
        $ cat /etc/modprobe.d/bond0.conf\nalias bond0 bonding\noptions bond0 max_bonds=1 fail_over_mac=2 mode=1 num_grat_arp=2 primary=em1 primary_reselect=1 arp_validate=1 arp_interval=100 arp_ip_target=10.1.5.15,10.1.1.1\n
        "},{"location":"centos/#see-also","title":"See Also","text":"
        • chkconfig
        • rpm
        • selinux - https://wiki.centos.org/HowTos/SELinux
        • yum
        "},{"location":"ceph/","title":"Ceph","text":"

        \"Ceph is a unified, distributed storage system designed for excellent performance, reliability and scalability.\" - https://ceph.com

        "},{"location":"ceph/#glossary","title":"Glossary","text":"

        http://docs.ceph.com/docs/master/glossary/

        • Ceph OSD: The Ceph OSD software, which interacts with a logical disk (OSD).
        • CephFS: The POSIX filesystem components of Ceph.
        • MDS: (Ceph Metadata Server) The Ceph metadata software.
        • MGR: (Ceph Manager) The Ceph manager software, which collects all the state from the whole cluster in one place.
        • MON: (Ceph Monitor) The Ceph monitor software.
        • OSD: (Object Storage Device) A physical or logical storage unit.
        • RADOS: Reliable Autonomic Distributed Object Store.
        • RBD: The block storage component of Ceph.
        • RGW: The S3/Swift gateway component of Ceph.
        • PG: Placement Group. http://docs.ceph.com/docs/master/rados/operations/placement-groups/
        "},{"location":"ceph/#examples","title":"Examples","text":""},{"location":"ceph/#activate-all-osds","title":"Activate all OSDs","text":"
        sudo ceph-disk activate-all\n

        Starting with ceph 13, use:

        ceph-volume lvm activate --all\n
        "},{"location":"ceph/#start-all-ceph-services","title":"Start all ceph services","text":"
        sudo systemctl start ceph.target\n
        "},{"location":"ceph/#stop-all-ceph-services","title":"Stop all ceph services","text":"

        Unfortunately there's not a single service or target to stop, so you have to use globs

        sudo systemctl stop '*ceph*'\n
        "},{"location":"ceph/#show-the-status-of-all-osds-in-the-cluster","title":"Show the status of all osds in the cluster","text":"
        ceph osd status\n

        Or alternatively

        ceph osd tree\n
        "},{"location":"ceph/#show-metadata-about-all-osds-in-the-cluster","title":"Show metadata about all osds in the cluster","text":"

        This produces a json list with a dict for each osd.

        ceph osd metadata\n
        "},{"location":"ceph/#show-all-pools","title":"Show all pools","text":"
        ceph osd lspools\n
        "},{"location":"ceph/#list-all-rbd-images-in-a-pool","title":"List all RBD images in a pool","text":"
        pool_name=\"kube\"\nrbd list \"$pool_name\"\n
        "},{"location":"ceph/#show-rbd-usage-stats","title":"Show rbd usage stats","text":"

        This will show name, provisioned, used, and will have a sum at the bottom, with sizes defaulting to human readable units. You can use --format json to get raw byte usage.

        rbd disk-usage --pool $pool_name $optional_rbd_name\n
        "},{"location":"ceph/#map-an-rbd-image-to-a-system-device","title":"Map an RBD image to a system device","text":"
        pool_name=\"kube\"\nrbd_image_name=\"testimage\"\nrbd map \"$pool_name/$rbd_image_name\"\n

        Then you can mount whatever the resulting device is. -o X-mount.mkdir automatically creates the destination mount point, but may not be available on some systems.

        mount -o X-mount.mkdir /dev/rbd8 /mnt/rbd8\n
        "},{"location":"ceph/#list-snapshots-for-an-rbd-image","title":"List snapshots for an RBD image","text":"
        pool_name=\"kube\"\nrbd_image_name=\"testimage\"\nrbd snap list \"$pool_name/$rbd_image_name\"\n
        "},{"location":"ceph/#copy-a-snapshot-to-an-image-so-it-can-be-mounted","title":"Copy a snapshot to an image so it can be mounted","text":"
        pool_name=\"kube\"\nrbd_image_name=\"testimage\"\nsnap_name=\"snap-072519-213210\"\nrbd clone \"$pool_name/$rbd_image_name@$snap_name\" \"$pool_name/image-$snap_name\"\n

        After this you can map the new image and mount it as described above.

        "},{"location":"ceph/#monitor-existing-operations","title":"Monitor existing operations","text":"
        ceph daemon \"mon.$MON_HOSTNAME\" ops\n
        "},{"location":"ceph/#links","title":"Links","text":"
        • Ceph Intro & Architectural Overview - https://youtu.be/7I9uxoEhUdY
        • http://docs.ceph.com/ceph-ansible/master/
        • http://docs.ceph.com/docs/master/rados/configuration/network-config-ref/
        • http://docs.ceph.com/docs/master/rados/operations/operating/
        • http://docs.ceph.com/docs/master/start/quick-ceph-deploy/
        "},{"location":"chkconfig/","title":"chkconfig","text":"

        chkconfig is a tool to interact with sys-v init scripts on centos/rhel hosts, and probably others.

        "},{"location":"chkconfig/#examples","title":"Examples","text":""},{"location":"chkconfig/#list-services-and-their-runlevels","title":"List services and their runlevels","text":"
        chkconfig --list\n
        "},{"location":"chkconfig/#turn-on-mysql-at-runlevels-3-and-5","title":"Turn on mysql at runlevels 3 and 5","text":"
        chkconfig --level 35 mysql on\n
        "},{"location":"chkconfig/#see-also","title":"See Also","text":"
        • update-rc.d - similar feature for ubuntu sysvinit
        "},{"location":"chocolatey/","title":"Choclatey","text":"

        \"The package manager for Windows\" - https://chocolatey.org

        "},{"location":"chocolatey/#examples","title":"Examples","text":"

        choco has to be run from an admin shell.

        "},{"location":"chocolatey/#search-for-a-package","title":"Search for a package","text":"
        choco search xencenter\n
        "},{"location":"chocolatey/#install-software-and-all-its-requirements","title":"Install software and all its requirements","text":"
        choco install xencenter -y\n
        "},{"location":"chronos/","title":"chronos","text":"

        \"Chronos is a replacement for cron. It is a distributed and fault-tolerant scheduler that runs on top of Apache Mesos that can be used for job orchestration.\" - https://mesos.github.io/chronos/

        • Uses ISO 8601 Repeating Interval notation, but the P is required so you can only use the syntax Rn/<datespec>/PT4H - see http://mesos.github.io/chronos/docs/api.html#adding-a-scheduled-job
        "},{"location":"circleci/","title":"CircleCI","text":"

        \"CircleCI's continuous integration and delivery platform makes it easy for teams of all sizes to rapidly build and release quality software at scale. Build for Linux, macOS, and Android, in the cloud or behind your firewall.\" - https://circleci.com/

        "},{"location":"circleci/#links","title":"Links","text":"
        • https://circleci.com/docs/2.0/configuration-reference/
        • https://circleci.com/docs/2.0/sample-config/
        • https://circleci.com/docs/2.0/circleci-images/
        • https://circleci.com/docs/2.0/workflows/
        • https://discuss.circleci.com/
        "},{"location":"circuitpython/","title":"circuitpython","text":"

        \"The easiest way to program microcontrollers. CircuitPython is a programming language designed to simplify experimenting and learning to code on low-cost microcontroller boards. \" - https://circuitpython.org

        "},{"location":"circuitpython/#examples","title":"Examples","text":""},{"location":"circuitpython/#disable-circuitpython-usb-drive","title":"Disable CIRCUITPYTHON usb drive","text":"

        Create a boot.py file in the drive with the following contents:

        import storage\n\nstorage.disable_usb_drive()\n

        To re-enable the drive, use the serial console to access the repl and comment out all of the contents of boot.py by running:

        import storage\n\nwith open('boot.py', mode='r') as f:\n    contents = [f'#{x}' for x in f.readlines()]\n\nwith open('boot.py', mode='w') as f:\n    f.write(''.join(contents))\n

        or just delete the boot.py file with:

        import os, storage\nstorage.remount('/', readonly=False)\nos.remove('/boot.py')\n
        "},{"location":"circuitpython/#paste-mode","title":"Paste mode","text":"

        You can enter paste mode by pressing CTRL-E. This lets you paste in multi-line code that will not be interpreted until you press CTRL-D. This also lets you paste code that has spaces init without the REPL saying your syntax is invalid due to indentation problems or other whitespace nuances of the normal REPL.

        "},{"location":"circuitpython/#see-also","title":"See also","text":"
        • python
        • micropython
        • microcontrollers
        "},{"location":"cncf/","title":"CNCF","text":"

        \"CNCF serves as the vendor-neutral home for many of the fastest-growing projects on GitHub, including Kubernetes, Prometheus and Envoy, fostering collaboration between the industry\u2019s top developers, end users, and vendors.\" - https://www.cncf.io

        "},{"location":"cobbler/","title":"Cobbler","text":"

        \"Cobbler is a Linux installation server that allows for rapid setup of network installation environments.\" -- http://cobbler.github.io

        "},{"location":"cobbler/#notes","title":"Notes","text":"
        • Versions prior to 2.6.9 can no longer auto-download loaders
        "},{"location":"cobbler/#links","title":"Links","text":"
        • https://cobbler.github.io/
        • https://cobbler.github.io/quickstart/
        • https://download.opensuse.org/repositories/home:/libertas-ict:/cobbler26/CentOS_CentOS-6/noarch/ - Newer than epel CentOS 6 rpms
        • https://github.com/rhinstaller/pykickstart
        "},{"location":"colorblindness/","title":"Colorblindness","text":"

        A significant fraction of the human population has color deficient vision. Designing information systems so these people can access them is important, and often overlooked.

        "},{"location":"colorblindness/#links","title":"Links","text":"
        • https://jfly.uni-koeln.de/color: \"Color Universal Design (CUD) - How to make figures and presentations that are friendly to Colorblind people\"
        • https://www.cs.unm.edu/~aaron/creative/colorTest.htm: Reverse colorblind test
        • https://paletton.com/: Pick color palletes, with a colorblind simulator
        • https://colororacle.org: Colorblindness simulation app
        • https://wearecolorblind.com
        • https://www.color-blindness.com
        • https://asada0.tumblr.com/post/11517603099/the-day-i-saw-van-goghs-genius-in-a-new-light
        • http://www.vischeck.com
        • https://www.biyee.net/color-science/color-vision-test
        • https://www.joshwcomeau.com/css/make-beautiful-gradients: avoid grey, dull colors when making gradients by using HSL instead of RGB.
        • https://bsago.me/posts/that-annoying-shade-of-blue: Not really color blindness, but discusses human color perception and technology.
        • https://ericportis.com/posts/2024/okay-color-spaces: Exploraiton of color spaces.
        • https://jlongster.com/why-chromaticity-shape: \"Why does the chromaticity diagram look like that?\"
        "},{"location":"computing/","title":"Computing","text":"

        General notes about technology. Basically tech bookmarks.

        "},{"location":"computing/#links","title":"Links","text":"
        • http://emulator101.com - Great introduction to CPU architectures and assembly language.
        • https://www.progsbase.com/blog/flow-charts-of-programming-language-constructs/ - Flow-Charts of Programming Language Constructs
        "},{"location":"consul/","title":"consul","text":"

        \"Service Discovery and Configuration Made Easy\" - https://www.consul.io/

        "},{"location":"consul/#links","title":"Links","text":"
        • https://www.consul.io/docs/internals/architecture.html
        "},{"location":"cookiecutter/","title":"cookiecutter","text":""},{"location":"cookiecutter/#examples","title":"Examples","text":""},{"location":"cookiecutter/#find-a-list-of-cookiecutter-variables","title":"Find a list of cookiecutter variables","text":"
        grep -h -o '{{cookiecutter[^}]*}}' \\{\\{cookiecutter.repo_name\\}\\}/* | sort | uniq -c\n
        "},{"location":"cookiecutter/#links","title":"Links","text":"
        • Cookiecutter: Better Project Templates
        • A pantry full of cookiecutters
        "},{"location":"cowsay/","title":"cowsay","text":"

        In Linux etc., print a cow that is saying something. Also works as cowthink, and a variety of other animals and artwork are available.

        "},{"location":"cowsay/#examples","title":"Examples","text":""},{"location":"cowsay/#get-a-list-of-things-that-can-talk","title":"Get a list of things that can talk","text":"
        $ cowthink -l\nCow files in /usr/share/cowsay/cows:\napt beavis.zen bong bud-frogs bunny calvin cheese cock cower daemon default\ndragon dragon-and-cow duck elephant elephant-in-snake eyes flaming-sheep\nghostbusters gnu head-in hellokitty kiss kitty koala kosh luke-koala\nmech-and-cow meow milk moofasa moose mutilated pony pony-smaller ren sheep\nskeleton snowman sodomized-sheep stegosaurus stimpy suse three-eyes turkey\nturtle tux unipony unipony-smaller vader vader-koala www\n
        "},{"location":"cowsay/#cowsay_1","title":"cowsay","text":"
        $ cowsay \"Hello world!\"\n ______________\n< Hello world! >\n --------------\n        \\   ^__^\n         \\  (oo)\\_______\n            (__)\\       )\\/\\\n                ||----w |\n                ||     ||\n
        "},{"location":"cowsay/#cowthink","title":"cowthink","text":"
        $ cowthink -f dragon \"On the internet, nobody knows you're a dragon!\"\n ________________________________________\n( On the internet, nobody knows you're a )\n( dragon!                                )\n ----------------------------------------\n      o                    / \\  //\\\n       o    |\\___/|      /   \\//  \\\\\n            /0  0  \\__  /    //  | \\ \\\n           /     /  \\/_/    //   |  \\  \\\n           @_^_@'/   \\/_   //    |   \\   \\\n           //_^_/     \\/_ //     |    \\    \\\n        ( //) |        \\///      |     \\     \\\n      ( / /) _|_ /   )  //       |      \\     _\\\n    ( // /) '/,_ _ _/  ( ; -.    |    _ _\\.-~        .-~~~^-.\n  (( / / )) ,-{        _      `-.|.-~-.           .~         `.\n (( // / ))  '/\\      /                 ~-. _ .-~      .-~^-.  \\\n (( /// ))      `.   {            }                   /      \\  \\\n  (( / ))     .----~-.\\        \\-'                 .~         \\  `. \\^-.\n             ///.----..>        \\             _ -~             `.  ^-`  ^-_\n               ///-._ _ _ _ _ _ _}^ - - - - ~                     ~-- ,.-~\n                                                                  /.-~\n
        "},{"location":"cpp/","title":"C++","text":"

        \"C++ (pronounced \"C plus plus\") is a high-level general-purpose programming language created by Danish computer scientist Bjarne Stroustrup as an extension of the C programming language, or \"C with Classes\".\" - https://en.wikipedia.org/wiki/C%2B%2B

        "},{"location":"cpp/#links","title":"Links","text":"
        • https://www.learncpp.com
        • https://cplusplus.com
        • https://github.com/federico-busato/Modern-CPP-Programming
        "},{"location":"cradlepoint/","title":"Cradlepoint","text":"

        \"Founded in 2006, Cradlepoint has grown to become the industry leader in cloud-delivered 4G LTE network solutions for business, service providers, and government organizations, and we are committed to extending our leadership into the emerging 5G space.\" - https://cradlepoint.com/company/about

        "},{"location":"cradlepoint/#cradlepoint-ibr900","title":"Cradlepoint IBR900","text":"
        • https://cradlepoint.com/products/cor-ibr900-series
        • https://cradlepoint.com/sites/default/files/upload-file/cradlepoint_ibr900_manual_0.pdf

        The CLI is not a normal shell, but is a minimal appliance type UI.

        "},{"location":"cradlepoint/#get-config-data","title":"Get config data","text":"

        This will get all config data, which can be over 25k lines of JSON.

        get\n

        To get just a subset of the data, use something like:

        [admin@IBR900-13e: /]$ get config/vlan\n[\n    {\n        \"mode\": \"wan\",\n        \"ports\": [],\n        \"uid\": \"wan\",\n        \"vid\": 1\n    },\n    {\n        \"mode\": \"lan\",\n        \"ports\": [\n            {\n                \"mode\": \"untagged\",\n                \"port\": 0\n            },\n            {\n                \"mode\": \"untagged\",\n                \"port\": 1\n            },\n            {\n                \"mode\": \"untagged\",\n                \"port\": 2\n            }\n        ],\n        \"uid\": \"lan\",\n        \"vid\": 2\n    }\n]\n
        "},{"location":"cradlepoint/#set-and-fetch-variables","title":"Set and fetch variables","text":"
        [admin@IBR900-13e: /]$ set foo/bar: \"baz\"\n[admin@IBR900-13e: /]$ get foo\n{\n    \"bar:\": \"baz\"\n}\n
        "},{"location":"cradlepoint/#getting-help","title":"Getting help","text":"
        [admin@IBR900-13e: /]$ help\nAvailable Commands:\n        SupportQA      adduser        append         arpdump        atterm         banner         bgp\n        cd             clear          clients        cpconnect      date           delete         deluser\n        devices        diff           edit           exit           factory_reset  find           free\n        get            gre            grep           help           inspect        ips            ipset\n        lan            log            ls             mkdir          nemo           netcloud       netfilter\n        netstat        nhrp           ospf           passwd         ping           ping6          pwd\n        qos            reboot         reset          resources      rip            ripng          route\n        rtpolicy       serial         set            sleep          sms            ssh            stp\n        switch         tcpdump        telnet         threads        traceroute     uptime         vlan\n        vpn            vrrp           wan            wireless       workqueue      xfrm           zebra\n\nAvailable Aliases:\n        cat  => get\n        dir  => ls\n        ll   => ls -l 1\n        more => get\n        post => append\n        put  => set\n        quit => exit\n        rm   => delete\n\nTo get help for a specific command run: \"help CMD\"\n[admin@IBR900-13e: /]$ help SupportQA\nCommand to provide debugging data\n[admin@IBR900-13e: /]$ help adduser\nAdd a new user account and set the password for this account.\nUsage: adduser USER\n\n[admin@IBR900-13e: /]$ help append\nAppend new item to an array in the config\n    append PATH VALUE\n[admin@IBR900-13e: /]$ help arpdump\nThe arpdump command shows the current ARP table.\n[admin@IBR900-13e: /]$ help atterm\nThe atterm command can be used to get direct access to a modem's AT command channel.\nIf a modem interface is not given then the system will pick the highest priority modem.\nTo discover a modem's interface to use, use the command 'devices' and use\nthe value found under the 'SubIface' column.\n\nUSAGE: atterm [interface] [-c ATCMD] [-t SOCKET_TIMEOUT]\n\n[admin@IBR900-13e: /]$ help banner\nbanner [set|unset]\n[admin@IBR900-13e: /]$ help bgp\nUsage: bgp [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga BGP CLI is given.\nUse \"bgp list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help cd\nChange to a new config directory\nUsage: cd DIR/[DIR...]\n[admin@IBR900-13e: /]$ help clear\nClear the screen\n[admin@IBR900-13e: /]$ help clients\nThe clients command can be used to show the currently connected clients,\nboth wireless and wired as well as hotspot.\n\nUSAGE:    clients [show | revoke | kick ] [SUBOPTIONS...]\n    show [CONNECTION TYPE]: Show clients by connection type.\n        If no connection type option is given, all connection types will be shown.\n    Connection Types:\n        all  : All Known Clients, both wired and wireless, that the router knows about.\n        wlan : Wireless LAN Clients\n        hotspot : Authenticated Hotspot Clients\n    revoke [-t [ip | mac | hn]]: Revoke a client's Hotspot authentication.\n        -t : The type of adddress to be used to find the client in the client list\n            ip  : IP address of the client (default)\n            mac : MAC address of the client\n            hn  : Hostname of the client (cannot be used if client does not have a hostname)\n    kick [-t [ip | mac | hn]]: Remove wireless access until the router is rebooted.\n        -t : The type of adddress to be used to find the client in the client list\n            ip  : IP address of the client (default)\n            mac : MAC address of the client\n            hn  : Hostname of the client (cannot be used if client does not have a hostname)\n[admin@IBR900-13e: /]$ help cpconnect\nUsage: cpconnect {COMMAND} [OPTIONS]\n    where COMMAND := { add | remove | show }\n          OPTIONS := {-n[ame] | -u[ser] | -p[asswd] | -s[server] } [VAR]\n\n    example: cpconnect add -n test1 -u user1 -p pass1 -s vpn.accessmylan.com\n             cpconnect show\n\n[admin@IBR900-13e: /]$ help date\nShow system clock.\n[admin@IBR900-13e: /]$ help delete\nDelete an item from the config\nUsage: delete PATH\n[admin@IBR900-13e: /]$ help deluser\nDelete a user account.\nUsage: deluser USER\n\n[admin@IBR900-13e: /]$ help devices\nDisplay network devices connected to the router.\n\nUSAGE:    devices [-v]                 Show all devices(s)\n          devices [-v] [DEVICE_UID...] Show one or more specific devices.\n            -v     Verbose output\n[admin@IBR900-13e: /]$ help diff\nShow differences between the current and the default configuration.\n[admin@IBR900-13e: /]$ help edit\nUsage: edit CONFIG_PATH\nExamples:\n    edit /config/system/gps\n    edit .\n\n[admin@IBR900-13e: /]$ help exit\nExit the shell\n[admin@IBR900-13e: /]$ help factory_reset\nReset config to factory defaults\n[admin@IBR900-13e: /]$ help find\nFind or list files and combine with grep to locate specific files\nUsage: find PATH [| grep file]\n[admin@IBR900-13e: /]$ help free\nShow amount (kilobytes) of free and used system memory.\n\"free\" memory is presently unused, while \"available\" includes used memory,\ntypically for cache, that is readily reclaimed for something else when needed.\n[admin@IBR900-13e: /]$ help get\nGet value for config item(s)\nUsage: get PATH [PATH...]\n[admin@IBR900-13e: /]$ help gre\n\nStart, stop, or show status of all gre tunnels.\nStart, stop or restart a tunnel.\nRenew or release the DHCP lease for a tunnel (if DHCP for GRE is enabled).\n\ngre [show|start|stop]\ngre [starttunnel|stoptunnel|restarttunnel|renew|release] -n <TUNNEL NAME>\n\n[admin@IBR900-13e: /]$ help grep\nUsage: grep PATTERN [FILE]...\n[admin@IBR900-13e: /]$ help help\nTo get help for a specific command run: \"help CMD\"\n[admin@IBR900-13e: /]$ help inspect\nInspect a directory\nUsage: inspect [PATH]\n[admin@IBR900-13e: /]$ help ips\nInteract with the IPS engine.\n\nUsage: ips [load|show|mode|status] [SUBOPTIONS..]\n    load [-f PATH]: Load a rule file into the IPS engine\n        -f [PATH]: Load rule file from PATH (Must contain the filename)\n    show [-o [ids | cats | sec | app | anom]]: Show information from the signatures loaded.\n        ids: Print all the signatures (default)\n        cats: Print all the categories\n        sec: Print only the signatures for the Security rules\n        app: Print only the signatures for the Application rules\n        anom: Print only the signatures for the Anomaly rules\n    mode [-o [off | ips | ids]]: Change the IPS Global mode\n        off: Set Default IPS  mode to 'Disabled'\n             (If no other Category or Signature is 'Enabled' then the kernel\n              modules will be unloaded)\n        ips: Set Default IPS mode to 'Detect and Prevent'\n        ids: Set Default IPS mode to 'Detect Only'\n    status: Print the status of the IPS engine\n    update: Request a Rule File Update\n\n[admin@IBR900-13e: /]$ help ipset\nipset [list {name}]\nspecify the name of the set to list, or nothing to see the names of the sets\n[admin@IBR900-13e: /]$ help lan\nShow the current LAN configuration and status.\n[admin@IBR900-13e: /]$ help log\nShow and manipulate the log system.\nUsage: log [show|clear|service|level|msg] [SUBOPTONS...]\n        show [FILTER] [FILTERN] [[-bhi] -s SEARCH]:\n              FILTER can be one or more space-separated names or levels.\n                 eg. log show wanmgr kernel DEBUG INFO\n              -b bold new entries\n              -h HIGHLIGHT Same usage as searching but does not filter results.\n              -i makes the search case insensitive.\n              -s SEARCH can be any string to search for in the log message contents.\n                 eg. log show -s Firmware\n              -f [LINES_OF_HISTORY] Follow mode with optional argument for number of lines of history to show.\n              -r recover crash log if one exists.\n        service [level (DEBUG|INFO|WARNING|ERROR|CRITICAL)]:\n             Display all service log levels.\n             level change service log level.\n        clear: Erase all logs from memory\n        level [NEW_LEVEL]: View current level or set new log level to: critical, error, warning, info, or debug\n        msg [-l LEVEL] MESSAGE: Write a message to the logging system. LEVEL defaults to: info\n[admin@IBR900-13e: /]$ help ls\nList files in the current config directory\n[admin@IBR900-13e: /]$ help mkdir\nCreate an empty container\nUsage: mkdir DIRNAME\n[admin@IBR900-13e: /]$ help nemo\nShow status and configuration of NEMO session\nUsage: nemo [-v]\n[admin@IBR900-13e: /]$ help netcloud\nManage connection to the Cradlepoint NetCloud.\n\nUsage: netcloud [alert|status|register|stop|start|restart|triggers]\n     status: [DEFAULT] Show current status information.\n     register [--username=USER --password=PW]|[--token_id=TID --token_secret=TS]: (Re)register\n       --username: The NetCloud username that should be used to authenticate.\n       --password: The NetCloud password that should be used to authenticate.\n       --token_id: [EXPERT] Token ID for token authentication mode.\n       --token_secret: [EXPERT] Token secret for token authentication mode.\n     unregister: Unregister this router from NetCloud and unregister.\n     stop: Manually stop the NetCloud client.\n     start: Manually start the NetCloud client.\n     restart: Manually restart the NetCloud client.\n\n[admin@IBR900-13e: /]$ help netfilter\nShow info and debug from netfilter2 rule(s) and trigger(s).\nUsage: netfilter [active|all|upnp|triggers|states] [input|routed|output] [ip/ip6] [-v] [-s] [-r RULE_INDEX] [-t TRIGGER_INDEX] [-s STATE_INDEX]\n\n[admin@IBR900-13e: /]$ help netstat\nUsage: netstat [-al] [-tuwx] [-enWp]\n\nDisplay networking information\n\n-a     All sockets\n-l     Listening sockets\n        Else: connected sockets\n-t     TCP sockets\n-u     UDP sockets\n-w     Raw sockets\n-x     Unix sockets\n        Else: all socket types\n-e     Other/more information\n-n     Don't resolve names\n-W     Wide display\n-p     Show PID/program name for sockets\n\n[admin@IBR900-13e: /]$ help nhrp\n\nUsage:    nhrp show\n          nhrp flush\n          nhrp flush nbma ip\n\n[admin@IBR900-13e: /]$ help ospf\nUsage: ospf [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga OSPF CLI is given.\nUse \"ospf list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help passwd\nSet the password for a user.\nUsage: passwd [USER]\n\n[admin@IBR900-13e: /]$ help ping\nSend ICMP echo_request(s) to a networked host\n\nUsage: ping [-w DEADLINE] [-c COUNT] [-i INTERVAL] [-I INTERFACE [-P]] [-M PMTUDISC_OPT] [-s SIZE] HOSTNAME\n\n    -w DEADLINE\n        Stop after a total of DEADLINE seconds, or (if specified) COUNT packets\n        are sent, whichever comes first.\n\n    -c COUNT\n        Stop after sending COUNT packets, or (if specified) DEADLINE seconds,\n        whichever comes first\n\n    -i INTERVAL\n        Wait INTERVAL seconds between packet transmissions. Default is 1.\n\n    -I INTERFACE\n        Specify the source of the ICMP requests. If INTERFACE is an IP address,\n        then source the ICMP requests from that address. If INTERFACE is not\n        an IP address, treat it as an interface name and source from that\n        interface. When treated as a name, exact matches of interface \"Ifaces\"\n        are chosen first, followed by exact matches of \"Device UIDs\", and\n        finally case-insensitive matches to Network names are returned. See\n        the \"devices\" command for a list of valid \"Ifaces\" and \"Device UIDs\".\n\n    -P\n        Requires the -I INTERFACE option. When -P is present, ping will\n        source from an IP address on the specified interface, instead of the\n        interface itself.\n\n    -s SIZE\n        Specifiy the ICMP data length, in bytes. The default is 56 bytes, which\n        will result in an ICMP packet length of 64 bytes (56 data bytes plus 8\n        ICMP header bytes)\n\n    -M PMTU_OPT\n        Select Path MTU discovery. PMTU_OPT must be one of: \"do\", \"want\" or \"dont\".\n        If the PMTU_OPT is \"do\" (default), then ping will set the Don't Fragment (DF)\n        flag in the ICMP requests, which will prohibit packet fragmentation. If\n        PMTU_OPT is \"want\", then ping will fragment if the ICMP request exceeds\n        the local outbound interfaces' MTU. Finally if PMTU_OPT is \"dont\" (do not\n        set the DF flag), then fragmentation is allowed and ICMP Requests will be\n        fragmented as necessary in response to ICMP Fragmentation Responses.\n\n\n[admin@IBR900-13e: /]$ help ping6\nSend ICMPv6 echo_request(s) to a networked host\n\nUsage: ping6 [-w DEADLINE] [-c COUNT] [-i INTERVAL] [-I INTERFACE [-P]] [-M PMTUDISC_OPT] [-s SIZE] HOSTNAME\n\n    -w DEADLINE\n        Stop after a total of DEADLINE seconds, or (if specified) COUNT packets\n        are sent, whichever comes first.\n\n    -c COUNT\n        Stop after sending COUNT packets, or (if specified) DEADLINE seconds,\n        whichever comes first\n\n    -i INTERVAL\n        Wait INTERVAL seconds between packet transmissions. Default is 1.\n\n    -I INTERFACE\n        Specify the source of the ICMP requests. If INTERFACE is an IP address,\n        then source the ICMP requests from that address. If INTERFACE is not\n        an IP address, treat it as an interface name and source from that\n        interface. When treated as a name, exact matches of interface \"Ifaces\"\n        are chosen first, followed by exact matches of \"Device UIDs\", and\n        finally case-insensitive matches to Network names are returned. See\n        the \"devices\" command for a list of valid \"Ifaces\" and \"Device UIDs\".\n\n    -P\n        Requires the -I INTERFACE option. When -P is present, ping will\n        source from an IP address on the specified interface, instead of the\n        interface itself.\n\n    -s SIZE\n        Specifiy the ICMP data length, in bytes. The default is 56 bytes, which\n        will result in an ICMP packet length of 64 bytes (56 data bytes plus 8\n        ICMP header bytes)\n\n\n[admin@IBR900-13e: /]$ help pwd\nPrint the current working directory\n[admin@IBR900-13e: /]$ help qos\nShow QoS statistics.\nUsage: qos\n\n[admin@IBR900-13e: /]$ help reboot\nReboot the router\n[admin@IBR900-13e: /]$ help reset\nReset the tty to default settings\n[admin@IBR900-13e: /]$ help resources\nReport the system resource usage.\n[admin@IBR900-13e: /]$ help rip\nUsage: rip [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga RIP CLI is given.\nUse \"rip list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help ripng\nUsage: ripng [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga RIPNG CLI is given.\nUse \"ripng list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help route\nThe route command can be used to show the current routing tables as\nwell as make changes to the user defined static routing table.\n\nUSAGE:    route [show [{TABLE}]]                        Show route(s)\n          route config {bgp|ospf|rip|ripng|static}      Show routing protocol configuration\n          route add IP/NM [gw IP] [dev UID] [auto]    Add new user defined static route\n          route del IP/NM [gw IP] [dev UID]     Remove user defined static route(s)\n\nEXAMPLES: route add 172.0.0.100 dev primarylan\n          route add 10.0.0.0/8 gw 172.0.0.100\n          route add 2000::/3 dev guestlan metric 100\n\nNOTE: Only the user defined static routing table may be modified directly.\n[admin@IBR900-13e: /]$ help rtpolicy\nShow the routing policies used by the router.  These policies control which\nrouting table will be used based on certain packet matching criteria and can\nalso control certain routing decisions.\n\nUSAGE:    rtpolicy                                 Show policies(s)\n\n[admin@IBR900-13e: /]$ help serial\nThe serial command can be used to access a serial adapter connected to the router.\nUSAGE:  serial [--force]          Connects to the first serial device using the serial configuration in config/system/serial\nUSAGE:  serial [--force] [number] Connects to the [number] port of a multi-port serial device using the serial configuration in config/system/serial\nOnly one remote user can be connected to a serial port at a time.  The --force option will force quit another user to allow this command to take over.\n[admin@IBR900-13e: /]$ help set\nSet a value to a config item\nUsage: set PATH VALUE\n[admin@IBR900-13e: /]$ help sleep\nsleep SECONDS\n[admin@IBR900-13e: /]$ help sms\n The sms command is used to send a msg to the given address (optionally from the modem on port)\n  Usage: sms addr msg [port]\n  Example: sms 2081234567 'hello world' usb1\n  Note: Add a '+' before the addr for international numbers\n[admin@IBR900-13e: /]$ help ssh\nCreate an SSH connection to an SSH server.\n\nusage:    ssh [-v] [-C] [-1] [-2] [-l username] [-p port] [-c cipher_spec] [user@]hostname\n\nrequired arguments:\n  hostname    Either the hostname or a user@hostname pair\n\noptional arguments:\n  -v          Debug level. May be specified up to 3 times (-v, -vv, -vvv).\n  -C          Requests compression of all data.\n  -1          Force ssh to try protocol version 1 only.\n  -2          Force ssh to try protocol version 2 only.\n  -l          Specifies login name.\n  -p          Specifies port.\n  -c          Comma separated list of ciphers (e.g. aes256-ctr,aes192-ctr,aes128-ctr,).\n\nSupported ciphers:\naes256-ctr,aes192-ctr,aes128-ctr,aes256-cbc,aes192-cbc,aes128-cbc,3des-cbc,blowfish-cbc\nNote: Cradlepoint routers support only PCI-compliant ciphers by default.\nTo relax this behavior, set /config/firewall/ssh_admin/weak_ciphers to true.\nDoing so will set the ciphers to the list of supported ciphers (above).\n[admin@IBR900-13e: /]$ help stp\nShow the current STP configuration and status.\n[admin@IBR900-13e: /]$ help switch\nShow ethernet switch status.\nUsage: switch [-v] [show|set|clearstats]\n\n      show [port PORT_NUM]: Only show the port specified\n\n      set PORT_NUM [--link_speed=LINK_SPD]\n                   [--enabled=true/false]\n                   [--port_name=PORT_NAME]\n      clearstats [PORT_NUM]\n[admin@IBR900-13e: /]$ help tcpdump\nUsage: tcpdump [-aAdDeflLnNOpqRStuvxX] [-c count] [ -E algo:secret ]\n               [ -i interface ] [ -M secret ] [ -s snaplen ] [ -T type ]\n               [ -y datalinktype ] [ expression ]\n\n[admin@IBR900-13e: /]$ help telnet\nThe telnet command can be used to access telnet services located on the routers LAN.\nUSAGE: telnet [host] [port] [timeout]\n[admin@IBR900-13e: /]$ help threads\nShow currently active threads.\n USAGE: threads [-v]\n[admin@IBR900-13e: /]$ help traceroute\nUsage: traceroute [options] host [packet_len]\n\nAvailable options:\n  -4                  Use IPv4\n  -6                  Use IPv6\n  -F                  Do not fragment packets\n  -f                  Start from the first_ttl hop (instead from 1)\n  -g                  Route packets through the specified gateway\n  -I                  Use ICMP ECHO for tracerouting\n  -T                  Use TCP SYN for tracerouting (default port is 80)\n  -i                  Specify a network interface to operate with\n  -m                  Set the max number of hops (max TTL to be reached). Default is 30\n  -n                  Do not resolve IP addresses to their domain names\n  -p                  Set the destination port to use\n  -t                  Set the TOS (IPv4 type of service) or TC (IPv6 traffic class) value for outgoing packets\n  -l                  Use specified flow_label for IPv6 packets\n  -w                  Set the number of seconds to wait for response to a probe (default is 5.0)\n  -q                  Set the number of probes per each hop. Default is 3\n  -r                  Bypass the normal routing and send directly to a host on an attached network\n  -s                  Use source src_addr for outgoing packets\n  -z                  Minimal time interval between probes (default 0)\n  -e                  Show ICMP extensions (if present), including MPLS\n  --sport=num         Use source port num for outgoing packets. Implies \"-N 1\"\n  --fwmark=num        Set firewall mark for outgoing packets\n  -U                  Use UDP to particular port for tracerouting\n  -UL                 Use UDPLITE for tracerouting (default dest port is 53)\n  -P                  Use raw packet of protocol prot for tracerouting\n  --mtu               Discover MTU along the path being traced. Implies \"-F -N 1\"\n  --back              Guess the number of hops in the backward path and print if it differs\n  -V                  Print version info and exit\n\n[admin@IBR900-13e: /]$ help uptime\nShow system uptime and load avg.\n[admin@IBR900-13e: /]$ help vlan\nShow VLAN configuration.\n\n[admin@IBR900-13e: /]$ help vpn\n\nStart, stop, or show status of all vpn tunnels.\nStart, stop or restart a tunnel.\nAdjust logging for various ipsec subsystems.  Reset sets the default\nand all subsystems to log level 1.\n\nvpn [show|start|stop]\nvpn [starttunnel|stoptunnel|restarttunnel] -n <TUNNEL NAME>\nvpn config\nvpn loglevel reset\nvpn loglevel default [-1|0|1|2|3|4]\nvpn loglevel [app|asn|cfg|chd|dmn|enc|esp|ike|imc|imv|job|knl|lib|mgr|net|pts|tls|tnc] [-1|0|1|2|3|4]\n\n[admin@IBR900-13e: /]$ help vrrp\nShow the current VRRP configuration and status.\n[admin@IBR900-13e: /]$ help wan\nShow all the attached wan devices and their current state.\nUsage: wan [monitor] [UID] [CONFIG...]\nCONFIG: Can be any number of --key=[value] pairs as defined in the /config/wan/rules2 config section.\n        If the optional [value] argument is ommited then the current value (if any) will be printed.\n        Get example: wan cp1 --ip_mode --static.ip_address\n        Set example: wan cp1 --ip_mode=\"static\" --static.ip_address=\"10.0.0.1\" --static.netmask=\"255.0.0.0\"\n[admin@IBR900-13e: /]$ help wireless\nShow the current wireless configuration and status.\nUsage: wireless [OPTIONS...]\n    -w Print information about enabled Access Point profiles\n    -d Print information about enabled WiFi as WAN / WiFi Bridge profiles\n    -c Print information about currently associated WiFi clients\n    -v Print additional driver specific debug when available\n    -t [CHANNEL] Set wireless channel\n    -k [MAC ADDRESS] Kick wireless client\n    -s [OPTIONAL DWELL TIME] Perform a wireless survey\n    -a [OPTIONAL DWELL TIME] Do a survey and autoselect a channel\n    -r [RADIO] Radio to operate on (0: first, 1: second)\n\n[admin@IBR900-13e: /]$ help workqueue\nPrint current workqueue activities and schedules.\nUSAGE: workqueue [-v] [-t TASKID] [-a]\n           -v: VERBOSE\n    -t TASKID: Only show information for a single task\n           -a: Show information for active tasks (ie. currently executing)\n\n[admin@IBR900-13e: /]$ help xfrm\nUsage: xfrm policy list [ SELECTOR ] [ dir DIR ]\n        [ index INDEX ] [ action ACTION ]\nUsage: xfrm policy flush\nSELECTOR := [ src ADDR[/PLEN] ] [ dst ADDR[/PLEN] ] [ dev DEV ] [ UPSPEC ]\nUPSPEC := proto { { tcp | udp | sctp | dccp } [ sport PORT ] [ dport PORT ] |\n                  { icmp | ipv6-icmp | 135 } [ type NUMBER ] [ code NUMBER ] |\n                  gre [ key { DOTTED-QUAD | NUMBER } ] | PROTO }\nDIR := in | out | fwd\nACTION := allow | block\nUsage: xfrm state list [ ID ] [ mode MODE ]\nUsage: xfrm state flush\nID := [ src ADDR ] [ dst ADDR ] [ spi SPI ]\nMODE := transport | tunnel\n[admin@IBR900-13e: /]$ help zebra\nUsage: zebra [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga CLI is given.\nUse \"zebra list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$\n
        "},{"location":"cron/","title":"cron","text":"

        Per-user and system-wide scheduled tasks, handled by the cron daemon.

        "},{"location":"cron/#locations","title":"Locations","text":"

        Cron scripts and entries can run from several locations. By using /etc/crontab.d/scriptname you can set different MAILTO and ENV variables and isolate your scheduled jobs. User jobs can be edited via crontab -e.

        "},{"location":"cron/#dst","title":"DST","text":"

        Some cron daemons don't handle DST correctly. Because of this, do not schedule jobs within the our of 1am. During DST changes this hour happens twice or is skipped altogether.

        Cronie says it handled DST gracefully, running jobs that should have run but haven't yet due to time changes, or no running jobs twice when time goes back.

        "},{"location":"cron/#syntax-quirks","title":"Syntax quirks","text":"

        Some systems have problems with #/# syntax. (eg: */5 * * * * /usr/bin/whatever)

        "},{"location":"cron/#default-editor","title":"Default Editor","text":"

        In some systems, the default editor is found by the symlink located at /etc/defaults/editor. To override this, export the EDITOR environment variable. (eg: export EDITOR=/usr/bin/vim)

        "},{"location":"cron/#examples","title":"Examples","text":""},{"location":"cron/#quick-and-dirty-realignment","title":"Quick and dirty realignment","text":"

        This will definitely fail in some circumstances.

        alias crontab-align='crontab -l | while read -r a b c d e f ; do\n  if [[ \"$a\" =~ ^# ]] ; then\n    echo \"$a $b $c $d $e $f\" ;\n  else\n    printf \"% -20s %s\\n\" \"$a $b $c $d $e\" \"$f\" ;\n  fi ;\ndone'\n\ncrontab-align | crontab -\n
        "},{"location":"cron/#add-a-random-delay","title":"Add a random delay","text":"

        This example sleeps for a random number of seconds lower than 1800, including 0. The % symbol has to be escaped in crontabs.

        0 * * * *   sleep $((RANDOM \\% 1800)) ; /usr/local/bin/do-a-thing.sh ;\n
        "},{"location":"cron/#programmatic-editing-of-the-crontab","title":"Programmatic editing of the crontab","text":"

        This is potentially dangerous because you can wipe out a user's crontab.

        crontab -l | sed -e '/downtime/s/^\\#//' | crontab -\necho \"* * * * * /usr/local/bin/every_minute.sh\" | crontab -\n
        "},{"location":"cron/#see-if-and-when-parts-are-running","title":"See if and when parts are running","text":"

        Put this in /etc/cron.*/01-cron-log and when those parts run you will see the message in syslog.

        logger -t cron Running `basename $PWD`\n
        "},{"location":"cron/#os-x-alarm-clock","title":"OS X Alarm Clock","text":"
        59 5 * * 1-5    /usr/bin/osascript -e 'tell application \"iTunes\"' -e 'set the sound volume to 100' -e 'end tell'\n0  6 * * 1-5    /usr/bin/osascript -e 'tell application \"iTunes\"' -e 'play playlist \"Old Podcasts\"' -e 'end tell'\n15 8 * * 1-5    /usr/bin/osascript -e 'tell application \"iTunes\" to stop'\n
        "},{"location":"cron/#detect-if-you-are-running-in-an-interactive-shell","title":"Detect if you are running in an interactive shell","text":"

        When writing scripts to use with crontab, sometimes you want to give different flags if they are given by a human. The most obvious case of this is verbosity, where you may want to be very verbose when a human runs the command but be quiet in cron.

        # Check if we're running in an interactive shell\nif [ -t 0 ] ; then\n  verbosity=\"--verbose\"\nelse\n  verbosity=\"--quiet\"\nfi\n\n/usr/bin/some-command \"${verbosity}\"\n
        "},{"location":"cron/#troubleshooting","title":"Troubleshooting","text":"

        Having junk files like temp vim files in /var/cron/tabs can make cron go to 100% cpu usage. Remove all non crontab files and kill cron to fix it.

        "},{"location":"cron/#links","title":"Links","text":"
        • https://cronheatmap.com: Visualize a daily heatmap of your cron jobs
        • https://crontab.guru/: Online crontab linter and explainer
        • https://github.com/kiorky/croniter: python module that \"provides iteration for the datetime object with a cron like format\"
        "},{"location":"cryptsetup/","title":"cryptsetup","text":"

        \"Cryptsetup is utility used to conveniently setup disk encryption based on DMCrypt kernel module.\" - https://gitlab.com/cryptsetup/cryptsetup/blob/master/README.md

        \"LUKS is the standard for Linux hard disk encryption. By providing a standard on-disk-format, it does not only facilitate compatibility among distributions, but also provides secure management of multiple user passwords.\" - https://gitlab.com/cryptsetup/cryptsetup/blob/master/README.md

        "},{"location":"csplit/","title":"csplit","text":"

        \"split files based on context\" - man csplit

        There is a similar, simpler tool called split.

        "},{"location":"csplit/#gnu-examples","title":"GNU Examples","text":"

        GNU and BSD cpsplit are not compatible. In macOS you can use gcsplit if you have brew coreutils installed.

        "},{"location":"csplit/#split-amazon-rds-global-certs-into-one-cert-per-file","title":"Split amazon RDS global certs into one cert per file","text":"
        curl -fsSL https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem |\ncsplit --elide-empty-files --quiet --prefix global-rds-crt -k - '/-BEGIN CERTIFICATE-/' '{*}'\n
        "},{"location":"csplit/#split-a-multi-doc-yaml-file","title":"Split a multi-doc yaml file","text":"

        This is great for splitting helm template or kubectl get pod,svc,sts,ds -o yaml output

        $ wc -l k-get-all.yaml  # lots of lines in this one yaml file\n9717 k-get-all.yaml\n\n$ grep -c '^---$' k-get-all.yaml  # lots of docs too\n161\n\n$ csplit k-get-all.yaml -s --elide-empty-files --prefix=yaml-split- --suffix-format='%03d.yaml' '/^---$/' '{*}'\n\n$ wc -l yaml-split-???.yaml\n    38 yaml-split-000.yaml\n    32 yaml-split-001.yaml\n...long-list-of-files...\n   227 yaml-split-159.yaml\n   230 yaml-split-160.yaml\n  9717 total\n
        "},{"location":"css/","title":"Cascading Style Sheets","text":""},{"location":"css/#links","title":"Links","text":"
        • https://cssprinciples.com/3/grid: \"dabble in precision with css grid\"
        • https://developer.mozilla.org/en-US/docs/Web/CSS
        • https://www.fffuel.co/css-selectors: \"CSS Selectors: A Visual Guide\"
        "},{"location":"curl/","title":"cURL","text":"

        \"command line tool and library for transferring data with URLs\" - https://curl.haxx.se

        curl is a tool to transfer data from or to a server, using one of the supported protocols (DICT, FILE, FTP, FTPS, GOPHER, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, TELNET and TFTP). The command is designed to work without user interaction.

        "},{"location":"curl/#examples","title":"Examples","text":""},{"location":"curl/#follow-location-redirects","title":"Follow location redirects","text":"
        curl -L http://whatever\n
        "},{"location":"curl/#show-the-http-code-for-a-given-request","title":"Show the HTTP code for a given request","text":"

        Use the http method HEAD to only the headers of the remote URI and shows the HTTP code. This is useful for efficiently checking the existence of a URI that would be a large download.

        curl --head -s -w \"%{http_code}\\n\" -o /dev/null http://www.example.com/\n

        Not all HTTP servers support the HEAD method though, so a slightly worse alternative is:

        curl --max-filesize 1 -s -w \"%{http_code}\\n\" -o /dev/null http://www.example.com/\n

        --max-filesize 1 will download something more than 0 bytes, though usually not the whole file. Notable is that curl does not seem to tell the truth about how many bytes were actually downloaded when using --max-filesize 1 -w '%{size_download}'. This can be verified by opening a simple http server (eg: python3 -m http.server), then running tcpflow -c, and then requesting the root dir of your simple http server. You will see the directory listing being sent across the wire, but curl will report 0 bytes being downloaded. The true download size depends on what file is being downloaded and how big it is. For large binary files, I have observed this as being 0 bytes.

        "},{"location":"curl/#request-a-specific-vhost-from-a-server","title":"Request a specific vhost from a server","text":"

        This is useful for testing production code on non-production multi-tennant name based virtual hosts.

        curl -H 'Host: www.domain.com' http://example.com\n
        "},{"location":"curl/#get-the-length-of-the-file-to-be-downloaded","title":"Get the length of the file to be downloaded","text":"
        curl -qI  https://www.google.com/index.php 2>/dev/null | awk '/Length/ {print $2}'\n
        "},{"location":"curl/#fetch-only-http-headers-not-content","title":"Fetch only HTTP headers, not content","text":"
        curl -I http://www.example.com/some_huge_file.iso\n
        "},{"location":"curl/#send-post-variables","title":"Send POST variables","text":"
        curl --data \"user=foo&pass=bar\" http://example.com/login.php\n
        "},{"location":"curl/#scrape-urls-from-a-page","title":"Scrape URLs from a page","text":"

        This appears to have problems with some strings. For instance, this doesn't catch the full https://accounts.google.com string. The regex is correct according to http://regexpal.com, but egrep is apparently not handling it correctly.

        curl -s http://www.google.com | egrep -o '(((https?|ftp|gopher)://|(mailto|file|news):)[^\u2019 <>\\n\"]+|(www|web|w3)\\.[-a-z0-9.]+)[^\u2019 .,;<>\":]'\n
        "},{"location":"curl/#use-curl-to-fetch-the-current-rfc-2822-time","title":"Use curl to fetch the current RFC 2822 time","text":"

        If you don't have NTP you can use this to manually feed the current time into date -s to set your system clock to within a few seconds of accuracy.

        curl -sIH 'Cache-Control: no-cache' example.org | grep '^Date'\n

        Using GNU cut and date (IE: on linux, like on a Raspberry Pi image that does not have NTP properly set up) you can set your time using this command:

        sudo date -s \"$(curl -sIH 'Cache-Control: no-cache' example.org | grep '^Date:' | cut -f 1 -d ' ' --complement)\"\n
        "},{"location":"curl/#links","title":"Links","text":"
        • Release Notes
        "},{"location":"curl/#see-also","title":"See Also","text":"
        • aria2
        • httpie
        • httpstat - download and show a some useful connection information
        • wget
        "},{"location":"cut/","title":"cut","text":"

        \"cut out selected portions of each line of a file\" - man cut

        "},{"location":"cut/#examples","title":"Examples","text":""},{"location":"cut/#keep-only-the-selected-characters","title":"keep only the selected characters","text":"

        cut is 1-indexed

        $ echo {0..9}\n0 1 2 3 4 5 6 7 8 9\n$ echo {0..9} | cut -c 1\n0\n$ echo -n {0..9} | cut -c 1-5\n0 1 2\n$ echo -n {0..9} | cut -c 1,19\n09\n$ echo -n {a..z} | sed 's/ //g' | cut -c 10-20\njklmnopqrst\n
        "},{"location":"cut/#keep-only-the-given-field-index","title":"keep only the given field index","text":"

        By default, cut works with tab delimited fields, which is not really useful. You can specify a different field delimiter with -d

        $ echo {a..z} | cut -d ' ' -f 3\nc\n$ echo {a..z} | cut -d ' ' -f 3-10\nc d e f g h i j\n$ echo 'a^b^c' | cut -d'^' -f 3\nc\n

        It's not really intuitive though because every single space separates one field from the next, even if the field is null

        $ echo 'a b c d' | cut -d ' ' -f 1-3\na b c\n$ echo 'a  b  c  d' | cut -d ' ' -f 1-3\na  b\n

        Using -w allows all consecutive whitespace to be treated as one separator, which is usually the desired behavior, but then the output fields are separated by tabs.

        $ echo 'a  b  c  d' | cut -w -f 1-3\na       b       c\n
        "},{"location":"cut/#see-also","title":"See Also","text":"
        • cut is often used with [tr](tr.md)
        • [awk](awk.md) is what I usually reach for instead of cut when working with words.
        "},{"location":"d2/","title":"d2 diagram language","text":"

        \"Create beautiful diagrams in minutes. Simple syntax. Endlessly customizable. D2 is the fastest and easiest way to get a mental model from your head onto the screen, then make edits with your team.\" - https://d2lang.com

        "},{"location":"d2/#links","title":"Links","text":"
        • https://d2lang.com
        • https://play.d2lang.com
        "},{"location":"d2/#see-also","title":"See also","text":"
        • https://mermaid.js.org
        • http://www.plantuml.com
        • https://text-to-diagram.com
        "},{"location":"dasel/","title":"dasel","text":"

        \"Select, put and delete data from JSON, TOML, YAML, XML and CSV files with a single tool. Supports conversion between formats and can be used as a Go package.\" - https://github.com/TomWright/dasel

        "},{"location":"dasel/#example-usage","title":"Example usage","text":""},{"location":"dasel/#convert-between-formats","title":"Convert between formats","text":"

        yaml to toml

        dasel --read yaml --write toml --file vector.yaml > vector.toml\n

        json to yaml with a data filter

        dasel -r json -w yaml -f ansible-facts.json '.ansible_facts.ansible_default_ipv4'\n
        "},{"location":"dasel/#restructure-a-pagerduty-csv","title":"Restructure a pagerduty csv","text":"

        Download the csv using this shell function that uses BSD (macOS) date and open:

        pagerduty-csv-download() {\n  TZ=America/Los_Angeles\n  past=\"$(date -v-7d \"+%FT%T\")\"\n  present=\"$(date \"+%FT%T\")\"\n  open \"$(date \"+https://company-name.pagerduty.com/api/v1/reports/raw/incidents.csv?since=${past}&until=${present}&time_zone=${TZ}\")\"\n}\n

        Then restructure it using --format to interpolate variables into a template string:

        dasel -f incidents.csv -w json -m --format '{{ .created_on }} https://company-name.pagerduty.com/incidents/{{ .id }} {{ .description }}' '.[*]'\n

        The output will be something like:

        2022-02-02T20:02:02-08:00 https://company-name.pagerduty.com/incidents/Q0ZL9NU2 [FIRING:1] TargetDown (cat-downloader)\n
        "},{"location":"dasel/#pretty-format-a-gpx-file","title":"Pretty format a gpx file","text":"

        This is useful for comparing two files, for instance where one may have been appended, and you need to make sure.

        dasel -r xml -f 2022-01-02-18-20-00.gpx > old.gpx\n

        Keep in mind though that pretty-formatted gpx files take up significantly more space.

        "},{"location":"dasel/#compact-format-a-gpx-file","title":"Compact format a gpx file","text":"

        dasel supports compact formatting, which can save disk space by eliminating whitespace characters. In dasel 1.x this is -c, but in 2.x it is --pretty=false.

        dasel -r xml -f books.xml --pretty=false\n

        My tests show this compact output saving ~25% in gpx files compared to a formatted gpx file using whitespace, and 15% compared to a gpx file using tabs.

        "},{"location":"data/","title":"data","text":"

        General information about data.

        "},{"location":"data/#links","title":"Links","text":"
        • https://en.wikipedia.org/wiki/Information_assurance
        • https://en.wikipedia.org/wiki/CAP_theorem
        • https://raft.github.io / http://thesecretlivesofdata.com/raft/
        "},{"location":"datasette/","title":"Datasette","text":"

        \"An open source multi-tool for exploring and publishing data\" - https://docs.datasette.io

        "},{"location":"datasette/#links","title":"Links","text":"
        • https://github.com/simonw/datasette
        • https://docs.datasette.io
        • sqlite
        "},{"location":"datasette/#examples","title":"Examples","text":""},{"location":"datasette/#start-a-server","title":"Start a server","text":"

        If you're starting from scratch, just touch existing-file.db before running this. -o opens your browser automatically.

        datasette existing-file.db -o\n
        "},{"location":"datasette/#ingest-a-json-file","title":"Ingest a json file","text":"

        You can use sqlite-utils to ingest a json file into a sqlite table, which can then be explored in datasette

        curl -s \"https://hub.docker.com/v2/repositories/ubuntu/\" |\njq .results |\nsqlite-utils insert datasette.db docker/docker.io/ubuntu -\n
        "},{"location":"date/","title":"date","text":"

        The date shell command

        date behaves differently between gnu and bsd. In OS X you can install gnu date by doing brew install coreutils

        "},{"location":"date/#gnu-date","title":"GNU date","text":""},{"location":"date/#show-adjusted-datetime","title":"Show adjusted date/time","text":"
        date -d -2month # two months ago\ndate -d +1hour # one hour in the future\ndate -d +15minute\ndate -d \"last week + 1 hour\"\ndate -d \"january 10 1978 + 5 years\" +%a\n
        "},{"location":"date/#convert-a-string-date-to-epoch-seconds","title":"Convert a string date to epoch seconds","text":"
        date -d \"Fri Sep  7  2:00 2012\" +%s\n
        "},{"location":"date/#convert-epoch-seconds-to-string-date","title":"Convert epoch seconds to string date","text":"
        date -d @1375899534\n
        "},{"location":"date/#output-various-rfc-3339-time-formats","title":"Output various RFC 3339 time formats","text":"
        date --rfc-3339=date\ndate --rfc-3339=seconds\ndate --rfc-3339=ns\n
        "},{"location":"date/#show-and-number-all-previous-weeks-from-one-year-ago","title":"Show and number all previous weeks from one year ago","text":"
        for X in {1..53} ; do printf \"%02s \" ${X} ; date -d -49weeks-2days+${X}week \"+%b %d %Y\" ; done ;\n
        "},{"location":"date/#show-and-number-all-weeks-from-the-point-i-started-working-at-zoosk","title":"Show and number all weeks from the point I started working at Zoosk","text":"
        for X in {1..90} ; do printf \"%02s \" ${X} ; date -d \"June 10 2013 - 1 week + ${X} week\" \"+%a %b %d %Y\" ; done ;\n
        "},{"location":"date/#show-how-many-seconds-old-i-am","title":"Show how many seconds old I am","text":"
        echo \"$(date +%s) - $(date -d \"January 10 1978 7:46pm\" +%s)\" | bc\n
        "},{"location":"date/#show-subsecond-date-without-going-full-nano","title":"Show subsecond date, without going full nano","text":"
        for X in {1..100} ; do date +%s.%N | cut -c1-15 ; done ;\n
        "},{"location":"date/#sleep-until-the-next-5-minute-0-seconds-mark","title":"Sleep until the next 5 minute 0 seconds mark","text":"
        while sleep $(date \"+60 - %S.%N\" | bc) 240 ; do date \"+%F %T.%N\" ; done ;\n
        "},{"location":"date/#show-all-format-strings-and-their-values","title":"Show all format strings and their values","text":"
        $ man date | awk '/[[:space:]]%/ {X = $1 ; $1 = \"\" ; print X,$0}' | while read -r Y Z ; do date \"+%$Y^$Y^${Z//%/%%}\" ; done | column -t -s^\n%      %                                MON literal %\n%a     Mon                              locale's abbreviated weekday name (e.g., Sun)\n%A     Monday                           locale's full weekday name (e.g., Sunday)\n%b     Nov                              locale's abbreviated month name (e.g., Jan)\n%B     November                         locale's full month name (e.g., January)\n%c     Mon 22 Nov 2021 10:33:55 AM PST  locale's date and time (e.g., Thu Mar 3 23:05:25 2005)\n%C     20                               century; like %Y, except omit last two digits (e.g., 20)\n%d     22                               day of month (e.g., 01)\n%D     11/22/21                         date; same as %m/%d/%y\n%e     22                               day of month, space padded; same as %_d\n%F     2021-11-22                       full date; like %+4Y-%m-%d\n%g     21                               last two digits of year of ISO week number (see %G)\n%G     2021                             year of ISO week number (see %V); normally useful only with %V\n%h     Nov                              same as %b\n%H     10                               hour (00..23)\n%I     10                               hour (01..12)\n%j     326                              day of year (001..366)\n%k     10                               hour, space padded ( 0..23); same as %_H\n%l     10                               hour, space padded ( 1..12); same as %_I\n%m     11                               month (01..12)\n%M     33                               minute (00..59)\n%n                                      a newline\n%N     258608657                        nanoseconds (000000000..999999999)\n%p     AM                               locale's equivalent of either AM or PM; blank if not known\n%P     am                               like %p, but lower case\n%q     4                                quarter of year (1..4)\n%r     10:33:55 AM                      locale's 12-hour clock time (e.g., 11:11:04 PM)\n%R     10:33                            24-hour hour and minute; same as %H:%M\n%s     1637606035                       seconds since 1970-01-01 00:00:00 UTC\n%S     55                               second (00..60)\n%t                                      a tab\n%T     10:33:55                         time; same as %H:%M:%S\n%u     1                                day of week (1..7); 1 is Monday\n%U     47                               week number of year, with Sunday as first day of week (00..53)\n%V     47                               ISO week number, with Monday as first day of week (01..53)\n%w     1                                day of week (0..6); 0 is Sunday\n%W     47                               week number of year, with Monday as first day of week (00..53)\n%x     11/22/2021                       locale's date representation (e.g., 12/31/99)\n%X     10:33:55 AM                      locale's time representation (e.g., 23:13:48)\n%y     21                               last two digits of year (00..99)\n%Y     2021                             year\n%z     -0800                            +hhmm numeric time zone (e.g., -0400)\n%:z    -08:00                           +hh:mm numeric time zone (e.g., -04:00)\n%::z   -08:00:00                        +hh:mm:ss numeric time zone (e.g., -04:00:00)\n%:::z  -08                              numeric time zone with : to necessary precision (e.g., -04, +05:30)\n%Z     PST                              alphabetic time zone abbreviation (e.g., EDT)\n
        "},{"location":"date/#bsd-date","title":"BSD date","text":""},{"location":"date/#show-adjusted-datetime_1","title":"Show adjusted date/time","text":"
        date -v-2m # two months ago\ndate -v+1H # one hour in the future\n
        "},{"location":"date/#convert-epoch-seconds-to-string-date_1","title":"Convert epoch seconds to string date","text":"
        date -r 1514308711\n
        "},{"location":"date/#see-also","title":"See also","text":"
        • ntp
        • ptp
        • time
        "},{"location":"dcfldd/","title":"dcfldd","text":"

        dcfldd is an advanced version of dd which is more useful than pv in some situations.

        One simple advantage dcfldd has over dd is a progress counter displayed by default, although even with dd you can see progress by pressing ctrl-t. However, if all you need is a progress display, pv is really your best bet.

        Another useful advantage dcfldd has is the ability to specify hex and ascii patterns, as well as the output of a command as the source. You may also specify multiple outputs.

        "},{"location":"dcfldd/#examples","title":"Examples","text":""},{"location":"dcfldd/#wipe-a-hard-disk","title":"Wipe a hard disk","text":"

        This wipes hard disk /dev/rdisk9 with binary 01010101 pattern.

        dcfldd pattern=AAAA of=/dev/rdisk9\n
        "},{"location":"dcfldd/#resume-wiping-a-hard-disk","title":"Resume wiping a hard disk","text":"

        You can use seek to skip past the first N blocks on the destination disk. If you have to resume multiple times, perhaps the best option is to use bash's arithmetic expansion to add up the number of blocks written.

        $ dcfldd pattern=AAAA of=/dev/rdisk3\n3328 blocks (104Mb) written.^C\n3466+0 records in\n3465+0 records out\n$ dcfldd pattern=AAAA of=/dev/rdisk3 seek=3328\n2936064 blocks (91752Mb) written.^C\n2936132+0 records in\n2936131+0 records out\n$ dcfldd pattern=AAAA of=/dev/rdisk3 seek=$((3328+2936064))\n
        "},{"location":"dcfldd/#view-progress-with-pv","title":"View progress with pv","text":"

        pv is useful for seeing the transfer rate of the pipe, which can help diagnose continued success or lack thereof with failing hard disks.

        root# dcfldd pattern=AAAA | pv | dcfldd of=/dev/rdisk3 seek=$((4192000+504000+10240000+2936064))\n512 blocks (16Mb) written.22.1MiB 0:00:07 [21.7MiB/s] [   <=>\n1280 blocks (40Mb) written.43.5MiB 0:00:08 [21.5MiB/s] [    <=>\n2304 blocks (72Mb) written.79.4MiB 0:00:09 [35.9MiB/s] [      <=>\n3584 blocks (112Mb) written. 114MiB 0:00:10 [35.2MiB/s] [       <=>\n
        "},{"location":"dcfldd/#see-also","title":"See Also","text":"
        • dd
        • ddrescue
        • pv
        "},{"location":"dcgc/","title":"Docker Custodian","text":"

        \"Keep docker hosts tidy\" - https://github.com/Yelp/docker-custodian

        "},{"location":"dcgc/#examples","title":"Examples","text":""},{"location":"dcgc/#sync-script","title":"Sync script","text":"

        This script will pull the latest version of some containers and delete old containers and images

        ##!/usr/bin/env bash\n\ncontainers=(\n  debian:latest\n  homeassistant/home-assistant:latest\n  linuxserver/unifi:latest\n  ubuntu:xenial\n  yelp/docker-custodian:latest\n  )\n\nfor c in \"${containers[@]}\" ; do\n  docker pull \"$c\"\n  keepers+=( \"--exclude-image\" \"$c\" )\ndone\n\ndocker run -ti -v /var/run/docker.sock:/var/run/docker.sock yelp/docker-custodian \\\n  dcgc \"${keepers[@]}\" --dangling-volumes --max-image-age 10w --max-container-age 5w\n
        "},{"location":"dcraw/","title":"dcraw","text":"

        \"dcraw decodes raw photos, displays metadata, and extracts thumbnails.\" - man dcraw

        "},{"location":"dcraw/#examples","title":"Examples","text":""},{"location":"dcraw/#identify-corrupt-dng-files","title":"Identify corrupt DNG files","text":"
        find /photos -iname '*.dng' | xargs dcraw -i > /dev/null 2> corrupt-files.txt\n
        "},{"location":"dd-wrt/","title":"dd-wrt","text":"

        \"DD-WRT is a Linux based alternative OpenSource firmware suitable for a great variety of WLAN routers and embedded systems.\" - https://www.dd-wrt.com

        "},{"location":"dd-wrt/#netgear-r7000","title":"Netgear R7000","text":"
        • https://www.myopenrouter.com/downloads/dd-wrt-r7000
        • http://www.desipro.de/ddwrt/K3-AC-Arm/
        "},{"location":"dd-wrt/#restart-script","title":"Restart script","text":"

        This device with dd-wrt has caused me so much trouble I have to monitor it and reboot it when it fails. Here is a short script to do that. I have this set up in cron to run every 5 minutes. The router will not reboot unless it's been up for 10 minutes.

        fping -q google.com || {\n  date '+%F %T%z Router is locked up. Restarting it.' | tee -a \"${HOME}/router_reboot.log\"\n  ssh root@192.168.1.1 'set -x ; uptime ; awk \"int(\\$1) < 600 { exit 1 }\" /proc/uptime && reboot ;' | tee -a \"${HOME}/router_reboot.log\"\n}\n
        "},{"location":"dd-wrt/#version-notes","title":"Version notes","text":""},{"location":"dd-wrt/#2015-12-24-v30-r28598-kongac","title":"2015-12-24 v3.0-r28598 kongac","text":"
        • Cannot edit DHCP reservations. Only can push and pop from the list, but cannot edit the added entries.
        "},{"location":"dd-wrt/#2015-12-03-v30-r28600m-kongac","title":"2015-12-03 v3.0-r28600M kongac","text":"
        • No observed differences from v3.0-r28598
        "},{"location":"dd-wrt/#2017-01-18-v30-r31160m-kongac","title":"2017-01-18 v3.0-r31160M kongac","text":"
        • General instability. Periodic lockups requiring power cycle to fix.
        • Potential weirdness playing with other wifi access points, unable to roam from this to Airport AC as I used to.
        "},{"location":"dd-wrt/#2017-03-10-v30-r31520m-kongac","title":"2017-03-10 v3.0-r31520M kongac","text":"
        • http://www.dd-wrt.com/phpBB2/viewtopic.php?p=1071890
        • Installed 2017-03-20
        • Experienced hard lock within 24 hours, had to power cycle to fix. Found posts in dd-wrt forum about other folks experiencing the same issue.
        "},{"location":"dd-wrt/#2017-03-26-v30-r31575m-kongac","title":"2017-03-26 v3.0-r31575M kongac","text":"
        • Installed on 2017-03-21
        • Appears to have fixed the hard lock-ups
        "},{"location":"dd-wrt/#2017-03-26-v30-r31780m-kongac","title":"2017-03-26 v3.0-r31780M kongac","text":"
        • Installed on 2017-03-31 via ddup --flash-latest. First attempt failed. Rebooted, and second attempt worked.
        • Never had any problems with this
        "},{"location":"dd-wrt/#2017-03-31-v30-r31800m-kongac","title":"2017-03-31 v3.0-r31800M kongac","text":"
        • Installed on 2017-04-01
        • 1 router lockup 3 days after installation
        • 2 router lockups on day 4
        "},{"location":"dd-wrt/#2017-04-08-v30-r31830m-kongac","title":"2017-04-08 v3.0-r31830M kongac","text":"
        • Installed on 2017-04-07
        • Locked up after 10 days
        "},{"location":"dd-wrt/#2017-04-16-v30-r31870m-kongac","title":"2017-04-16 v3.0-r31870M kongac","text":"
        • Installed on 2017-04-17
        • Router locked up after 4 days
        "},{"location":"dd-wrt/#2017-04-30-v30-r31920m-kongac","title":"2017-04-30 v3.0-r31920M kongac","text":"
        • Installed on 2017-05-02
        • Had periodic lockups
        "},{"location":"dd-wrt/#2017-05-11-v30-r31980m-kongac","title":"2017-05-11 v3.0-r31980M kongac","text":"
        • Installed on 2017-05-14
        • Experienced wifi problem, dhcp problem, and routing problems within 24 hours
        • Experienced the same problems within 12 hours after reboot
        • Uptime peak is 9 days
        "},{"location":"dd-wrt/#2017-06-03-v30-r32170m-kongac","title":"2017-06-03 v3.0-r32170M kongac","text":"
        • Installed on 2017-06-08
        • Sometimes wireless clients are unable to connect to the network.
        • Sometimes the router becomes inaccessible to clients even though it is still up.
        "},{"location":"dd-wrt/#2017-08-02-v30-r33000m-kongac","title":"2017-08-02 v3.0-r33000M kongac","text":"
        • Installed on 2017-08-05
        • Quite stable
        "},{"location":"dd-wrt/#2017-10-22-v30-r33575m-kongac","title":"2017-10-22 v3.0-r33575M kongac","text":"
        • Installed on 2017-10-22
        • Seeing lock-ups and wifi unavailability after 24 hours
        • Seeing more lock-ups and wifi unavailability hours after restart
        "},{"location":"dd-wrt/#2017-11-03-v30-r33655m-kongac","title":"2017-11-03 v3.0-r33655M kongac","text":"
        • Installed on 2017-11-04
        • WiFi instability observed within hours
        "},{"location":"dd-wrt/#2017-11-03-v30-r33675m-kongac","title":"2017-11-03 v3.0-r33675M kongac","text":"
        • 2017-12-25: Discovered this firmware had been installed
        "},{"location":"dd-wrt/#2018-01-03-v30-r34320m-kongac","title":"2018-01-03 v3.0-r34320M kongac","text":"
        • Installed on 2018-01-16
        • DD-WRT v3.0-r34320M kongac (c) 2017 NewMedia-NET GmbH
        • Release: 01/03/18
        • http://www.desipro.de/ddwrt/K3-AC-Arm/TEST/dd-wrt.v24-K3_AC_ARM_STD.bin
        • Frequent lock-ups
        "},{"location":"dd-wrt/#2018-02-11-v30-r34900m-kongac","title":"2018-02-11 v3.0-r34900M kongac","text":"
        • Installed on 2018-02-16
        • DD-WRT v3.0-r34900M kongac (c) 2018 NewMedia-NET GmbH
        • Release: 02/11/18
        • Very unstable wifi
        "},{"location":"dd-wrt/#2018-02-19-v30-r35030m-kongac","title":"2018-02-19 v3.0-r35030M kongac","text":"
        • Installed on 2018-02-24 (?)
        • DD-WRT v3.0-r35030M kongac (c) 2018 NewMedia-NET GmbH
        • Release: 02/19/18
        • This version cannot seem to stay up for more than 12 hours without losing connectivity.
        "},{"location":"dd-wrt/#2018-04-04-v30-r35550m-kongac","title":"2018-04-04 v3.0-r35550M kongac","text":"
        • Installed on 2018-04-04
        • DD-WRT v3.0-r35550M kongac (03/28/18)
        • Still having stability problems, didn't make it 2h before needing a reboot.
        "},{"location":"dd/","title":"dd","text":"

        Disk Dump, used for cloning disks, wiping data, copying blocks of data.

        "},{"location":"dd/#tips-and-tricks","title":"Tips and Tricks","text":""},{"location":"dd/#write-random-data","title":"Write random data","text":"
        dd if=/dev/urandom of=/dev/hda\n
        "},{"location":"dd/#write-zeros","title":"Write zeros","text":"
        dd if=/dev/zero of=/dev/hda\n
        "},{"location":"dd/#wipe-a-failed-disk","title":"Wipe a failed disk","text":"

        If you can't use shred or ddrescue, this is a very slow but portable alternative

        i=0\nwhile true ; do\n  echo \"Writing block $i\"\n  dd if=/dev/zero of=/dev/sda count=1 bs=1 seek=\"$i\"\n  let i=i+1\ndone\n
        "},{"location":"dd/#wipe-first-and-last-1g-of-a-hard-disk","title":"Wipe first and last 1G of a hard disk","text":"
        dd bs=4096 if=/dev/zero of=/dev/sdx count=256 seek=$(( $(blockdev --getsz /dev/sdx) - 256))\n
        "},{"location":"dd/#see-also","title":"See Also","text":"
        • dcfldd
        • ddrescue
        • pv
        "},{"location":"ddrescue/","title":"ddrescue","text":"

        \"GNU ddrescue - Data recovery tool. Copies data from one file or block device to another, trying to rescue the good parts first in case of read errors.\" - man ddrescue

        There are actually two tools called ddrescue: dd_rescue and gddrescue. gddrescue is the best.

        • Software home page - http://www.gnu.org/software/ddrescue/
        • Instruction Manual - http://www.gnu.org/software/ddrescue/manual/ddrescue_manual.html
        "},{"location":"ddrescue/#examples","title":"Examples","text":""},{"location":"ddrescue/#attempt-to-mirror-an-entire-hard-disk","title":"Attempt to mirror an entire hard disk","text":"
        ddrescue -f -n --min-read-rate=500000 /dev/source_disk /dev/target_disk ~/ddrescue.log\n

        This uses the minimum read rate to skip sctors that may be bad on the input device.

        "},{"location":"ddrescue/#wipe-a-hard-disk-and-log-bad-sectors","title":"Wipe a hard disk and log bad sectors","text":"
        sudo ddrescue --force /dev/zero /dev/disk/by-id/ata-foo ~/ddrescue-ata-foo.log\n

        You can re-run this exact same command to resume the wipe of a hard disk.

        "},{"location":"ddrescue/#attempt-to-continue-the-mirror-of-a-hard-disk","title":"Attempt to continue the mirror of a hard disk","text":"
        ddrescue -f -n -A /dev/source_disk /dev/target_disk ~/ddrescue.log\n
        "},{"location":"ddrescue/#wipe-the-good-sectors-of-a-failing-disk","title":"Wipe the good sectors of a failing disk","text":"

        This requires a valid rescue log file mapping out the good sectors that were recovered.

        ddrescue --fill=+ --force /dev/zero /dev/bad_drive ~/bad_drive_wipe.log\n
        "},{"location":"ddrescue/#see-also","title":"See Also","text":"
        • dcfldd
        • dd
        • pv
        "},{"location":"deb/","title":"deb","text":"

        Notes and tips about working with the .deb package format.

        "},{"location":"deb/#examples","title":"Examples","text":""},{"location":"deb/#show-packages-that-can-be-updated","title":"Show packages that can be updated","text":"
        apt list --upgradable\n
        "},{"location":"deb/#show-installed-package-versions","title":"Show installed package versions","text":"
        ## -V = sort by version (GNU sort only)\n/usr/bin/dpkg-query -W --showformat '${Package} ${Version} ${Status}\\n' | sort -k2 -V | column -t\n
        "},{"location":"deb/#list-files-in-packages-that-are-available-in-configured-repositories","title":"List files in packages that are available in configured repositories","text":"
        apt-file list package_name\n
        "},{"location":"deb/#find-a-file-available-inside-packages-that-are-available-in-configured-repositories","title":"Find a file available inside packages that are available in configured repositories","text":"
        apt-file find libmysqlclient.so\n
        "},{"location":"deb/#show-a-list-of-packages-that-are-installed-or-have-left-things-on-the-filesystem","title":"Show a list of packages that are installed or have left things on the filesystem","text":"
        dpkg --list\n
        "},{"location":"deb/#show-which-package-a-file-came-from","title":"Show which package a file came from","text":"
        dpkg -S /bin/bash\n
        "},{"location":"deb/#list-files-in-package-that-is-installed","title":"List files in package that is installed","text":"
        dpkg-query -L klibc-utils\n
        "},{"location":"deb/#list-files-in-package-that-is-not-installed","title":"List files in package that is not installed","text":"
        dpkg -c package.deb\n
        "},{"location":"deb/#list-packages-available-in-the-repository","title":"List packages available in the repository","text":"
        apt-cache dumpavail\n
        "},{"location":"deb/#show-information-about-a-package","title":"Show information about a package","text":"
        apt-cache show coreutils\n
        "},{"location":"deb/#show-reverse-dependencies-of-a-package","title":"Show reverse dependencies of a package","text":"
        apt-cache rdepends ec2-api-tools\n
        "},{"location":"deb/#show-reverse-dependencies-of-installed-package","title":"Show reverse dependencies of installed package","text":"
        aptitude why openjdk-7-jre-headless\n
        "},{"location":"deb/#re-install-many-packages-and-validate-that-they-were-re-installed","title":"Re-install many packages and validate that they were re-installed","text":"

        When apt-get install --reinstall isn't good enough, this is the next option. This should not be done unless you're willing to reload the system if it fails.

        ## Generate a list of packages\ndpkg -l | grep 'python-' > dpkg-l-python ;\n\n## Remove and re-install each individual package one at a time\nawk '{print $2,$3}' dpkg-l-python |\n  while read -r p v ; do\n    echo \"Working on $p version $v\" ;\n    sudo dpkg --purge --force-depends \"$p\" ;\n    sudo apt-get install \"${p}=${v}\" ;\n  done ;\n\n## Validate that all packages are re-installed with the right version\nawk '{print $2,$3}' dpkg-l-python |\n  while read -r p v ; do\n    dpkg -l \"$p\" | grep \"$v\" || echo \"ERROR: Problem with $p $v\" ;\n  done ;\n
        "},{"location":"deb/#links","title":"Links","text":"
        • https://wiki.debian.org/RPM
        "},{"location":"debian/","title":"debian","text":"

        \"Debian is a free operating system, developed and maintained by the Debian project.\" - https://www.debian.org/intro

        Debian is a solid linux distribution that serves as the upstream base for many other linux distributions, including Ubuntu and Raspberry Pi OS.

        "},{"location":"defaults/","title":"defaults","text":"

        defaults allows users to read, write, and delete Mac OS X user defaults from a command-line shell.

        "},{"location":"defaults/#examples","title":"Examples","text":""},{"location":"defaults/#set-some-boolean-values","title":"Set some boolean values","text":"
        defaults write NSGlobalDomain     NSAutomaticQuoteSubstitutionEnabled -bool false\ndefaults write NSGlobalDomain     NSAutomaticDashSubstitutionEnabled  -bool false\ndefaults write com.apple.TextEdit SmartQuotes                         -bool false\ndefaults write com.apple.TextEdit SmartDashes                         -bool false\n
        "},{"location":"defaults/#add-a-value-an-array-to-a-dict","title":"Add a value (an array) to a dict","text":"
        FILENAME=\"${HOME}/Library/Preferences/com.googlecode.iterm2.plist\"\ndefaults write \"${FILENAME}\" GlobalKeyMap -dict-add 0xf703-0x280000 '{ Action = 10; Text = f; }'\ndefaults write \"${FILENAME}\" GlobalKeyMap -dict-add 0xf702-0x280000 '{ Action = 10; Text = b; }'\n
        "},{"location":"devops/","title":"devops","text":""},{"location":"devops/#devops-lifecycle","title":"DevOps lifecycle","text":"
        • Plan - Jira, Github tickets, Gitlab Milestones
        • Code - Git, Eclipse, pycharm
        • Build - Ant, Maven, Gradle
        • Test - Selenium, JUnit
        • Release - Gitlab CI, Jenkins
        • Deploy - Puppet, Chef, Ansible, Saltstack
        • Operate - Linux, Chrome, iOS
        • Monitor - Sensu, Splunk, Nagios, Kibana
        "},{"location":"devops/#links","title":"Links","text":"
        • The 9 Circles of Deployment Hell
        • Raft: Understandable distributed consensus - Good visualization of the Raft algorithm.
        • https://aws.amazon.com/devops/what-is-devops/
        • https://www.reddit.com/r/devops/comments/aqc8cj/interested_in_getting_into_devops_start_here/
        • https://landing.google.com/sre/books/
        • https://web.devopstopologies.com/ Team topologies, not technology stack topologies.
        "},{"location":"dhcp/","title":"DHCP","text":"

        Dynamic Host Configuration Protocol

        "},{"location":"dhcp/#isc-dhcpd","title":"isc dhcpd","text":"

        https://www.isc.org/downloads/dhcp/

        "},{"location":"dhcp/#test-configuration-file","title":"Test configuration file","text":"
        dhcpd3 -t\n
        "},{"location":"dhcp/#test-lease-file","title":"Test lease file","text":"
        dhcpd3 -T\n
        "},{"location":"dhcp/#handshake-process","title":"Handshake Process","text":"
        Apr 21 15:33:00 ops1prod dhcpd: DHCPDISCOVER from 08:9e:01:8b:18:94 via eth0\nApr 21 15:33:01 ops1prod dhcpd: DHCPOFFER on 10.1.14.127 to 08:9e:01:8b:18:94 via eth0\nApr 21 15:33:01 ops1prod dhcpd: DHCPREQUEST for 10.1.225.43 from 00:1e:0b:bc:8a:c4 via eth1\nApr 21 15:33:01 ops1prod dhcpd: DHCPACK on 10.1.225.43 to 00:1e:0b:bc:8a:c4 via eth1\n
        "},{"location":"dhcpd.conf/","title":"dhcpd.conf","text":"

        This is the configuration file for the linux dhcp daemon.

        "},{"location":"dhcpd.conf/#example-config","title":"Example config","text":"
        ###\n## Pikachu dhcpd.conf for redacted ny1 facility.\n###\n\n\nddns-update-style none ;\ndeny bootp ;    #default\nauthoritative ;\n\noption domain-name \"ny1.redacted.net\" ;\noption subnet-mask 255.255.255.0 ;\noption domain-name-servers 10.8.5.220 ;\noption ntp-servers ntp.ny1.redacted.net, pool.ntp.org ;\noption time-servers ntp.ny1.redacted.net, pool.ntp.org ;\n#option time-offset -25200 ;\noption time-offset -28800 ;\ndefault-lease-time 28800 ;\nmax-lease-time 7200 ;\n\noption boot-server code 66 = string ;\n\n## APC Cookie bullshit\noption vendor-encapsulated-options 01:04:31:41:50:43;\n\n### Old Net 188\nsubnet 10.172.188.0 netmask 255.255.255.0\n{\n    option routers 10.172.188.1 ;\n\n    range   10.172.188.3    10.172.188.195 ;\n#   host winserv    {   hardware ethernet 00:11:2f:37:a2:34 ;   fixed-address 10.172.188.196 ;  }\n    range   10.172.188.197  10.172.188.201 ;\n#   host clicktracks    {   hardware ethernet 00:13:20:5B:EF:2A ;   fixed-address 10.172.188.202 ;  }\n    host redactedbeast  {   hardware ethernet 00:30:48:2A:E3:1E ;   fixed-address 10.172.188.203 ;  }\n    range   10.172.188.204  10.172.188.216 ;\n#   host carnage    {   hardware ethernet 00:13:20:5B:E5:B1 ;   fixed-address 10.172.188.217 ;  }\n#   host sipura-2   {   hardware ethernet 00:0E:08:FA:AB:A4 ;   fixed-address 10.172.188.222 ;  }\n    range   10.172.188.226  10.172.188.254 ;\n}\n\n## Services - 10.8.1.0/24 - VLAN 101\n##\n##    There should be NO DHCP RANGE IN THIS SUBNET.\n##    This will keep us in order with what devices are using\n##    what addresses.  Only use pre-defined host-addresses.\n##\nsubnet 10.8.1.0 netmask 255.255.255.0\n{\n    option routers 10.8.1.1 ;\n    host terastation  {   hardware ethernet 00:0d:0b:7a:cd:ea ;   fixed-address 10.8.1.11 ;   }\n    host switchvox    {   hardware ethernet 00:13:d4:e8:c1:2d ;   fixed-address 10.8.1.12 ;   }\n    host eng-svn-1    {   hardware ethernet 00:0C:29:7E:68:DB ;   fixed-address 10.8.1.233 ;  }\n    host eng-esx-1    {   hardware ethernet 00:50:56:47:7e:bc ;   fixed-address 10.8.1.234 ;  }\n}\n\n################\n##\n##  This information is incomplete, make sure to check IP's for usage before assigning them, and double check the Wiki:\n##  https://it.redacted.com/wiki/index.php?title=IP_Addresses_-_ny1#Load_Test_-_10.8.2.0.2F24_-_VLAN_102\n##\n################\n\n## Load Test - 10.8.2.0/24 - VLAN 102\n##\nsubnet 10.8.2.0 netmask 255.255.255.0\n{\n    filename \"pxelinux.0\" ;\n    option subnet-mask 255.255.255.0 ;\n    option broadcast-address 10.8.2.255 ;\n    option routers 10.8.2.1 ;\n    next-server 10.8.2.240 ;\n    range 10.8.2.100 10.8.2.199 ;\n\n    host honey-b-drac   {   hardware ethernet 00:18:8B:40:DC:78 ;   fixed-address 10.8.2.19 ;   }\n## 10.8.2.30-39 reserved for Eng VMs\n    host eng-vm-01  {   hardware ethernet 00:0c:29:b9:3e:bb ;   fixed-address 10.8.2.31 ;   }\n    host eng-vm-25  {   hardware ethernet 00:0c:29:00:35:66 ;   fixed-address 10.8.2.35 ;   }\n    host eng-vm-26  {   hardware ethernet 00:0c:29:69:b2:b9 ;   fixed-address 10.8.2.36 ;   }\n    host eng-vm-27  {   hardware ethernet 00:0c:29:c5:e3:59 ;   fixed-address 10.8.2.37 ;   }\n    host eng-vm-28  {   hardware ethernet 00:0c:29:a0:8b:a4 ;   fixed-address 10.8.2.38 ;   }\n    host eng-vm-29  {   hardware ethernet 00:0c:29:bd:42:7b ;   fixed-address 10.8.2.39 ;   }\n}\n\n## 10.8.2.240-250 reserved for Puppet installs\n    host ion-vm     {   hardware ethernet 00:0c:29:d6:7b:90 ;   fixed-address 10.8.2.253 ;  }\n    host shinseivm  {   hardware ethernet 00:0c:29:1d:90:07 ;   fixed-address 10.8.2.252 ;  }\n    host star       {   hardware ethernet 00:03:ba:d9:50:1a ;   fixed-address 10.8.2.251 ;  }\n\n}\n\n## QA Test - 10.8.3.0/24 - VLAN 103\nsubnet 10.8.3.0 netmask 255.255.255.0\n{\n    range 10.8.3.10 10.8.3.200 ;\n    option routers 10.8.3.1 ;\n}\n\n## Professional Services - 10.8.4.0/24 - VLAN 104\nsubnet 10.8.4.0 netmask 255.255.255.0\n{\n    option routers 10.8.4.1 ;\n    range 10.8.4.10 10.8.4.200 ;\n    host caracal        {   hardware ethernet 00:13:72:58:7C:C9 ;   fixed-address 10.8.4.201 ;  }\n    host caracal-drac   {   hardware ethernet 00:13:72:57:86:33 ;   fixed-address 10.8.4.202 ;  }\n}\n\n## IT - 10.8.5.0/24 - VLAN 105\nsubnet 10.8.5.0 netmask 255.255.255.0\n{\n    option routers 10.8.5.1 ;\n    option netbios-name-servers 10.8.5.220 ;\n    option netbios-node-type 8 ;\n    range 10.8.5.10 10.8.5.99 ;\n    host demo-esx-1-drac    {   hardware ethernet 00:1e:4f:25:87:f9 ;   fixed-address 10.8.5.121 ;  }\n    host pikachu        {   hardware ethernet 00:13:46:78:25:20 ;   fixed-address 10.8.5.220 ;  }\n    host mammoth        {   hardware ethernet 00:30:48:20:E4:C2 ;   fixed-address 10.8.5.221 ;  }\n    host hq-esx-1       {   hardware ethernet 00:50:56:45:d1:07 ;   fixed-address 10.8.5.222 ;  }\n    host hq-esx-1-drac  {   hardware ethernet 00:1e:4f:1d:37:87 ;   fixed-address 10.8.5.223 ;  }\n    host eng-esx-2      {   hardware ethernet 00:1e:4f:1d:37:87 ;   fixed-address 10.8.5.223 ;  }\n    host eng-esx-1-drac {   hardware ethernet 00:1e:c9:de:1c:5b ;   fixed-address 10.8.5.224 ;  }\n}\n\n## VPN - 10.8.6.0/24 - VLAN 106\nsubnet 10.8.6.0 netmask 255.255.255.0\n{\n    range 10.8.6.10 10.8.6.200 ;\n    option routers 10.8.6.1 ;\n}\n\n## DMZ - 10.8.6.0/24 - VLAN 107\nsubnet 10.8.7.0 netmask 255.255.255.0\n{\n    range 10.8.7.100 10.8.7.200 ;\n    option routers 10.8.7.1 ;\n    host engineering-ext-1  {   hardware ethernet 00:13:72:53:a3:78 ;   fixed-address 10.8.7.10 ;   }\n}\n\n## Jail network - 10.8.9.0/24 - VLAN 109\nsubnet 10.8.9.0 netmask 255.255.255.0\n{\n    range 10.8.9.100 10.8.9.199 ;\n    option routers 10.8.9.1 ;\n}\n\n## Wireless - 10.8.10.0/24 - VLAN 110\nsubnet 10.8.10.0 netmask 255.255.255.0\n{\n    range 10.8.10.10 10.8.10.200 ;\n    option routers 10.8.10.1 ;\n\n    host linksys-ap-1   {   hardware ethernet 00:0C:41:17:E2:AD ;   fixed-address 10.8.10.201 ; }\n    host linksys-ap-2   {   hardware ethernet 00:0C:41:D7:2D:53 ;   fixed-address 10.8.10.202 ; }\n    host linksys-ap-3   {   hardware ethernet 00:0C:41:DE:23:D8 ;   fixed-address 10.8.10.203 ; }\n    host linksys-ap-4   {   hardware ethernet 00:18:F8:26:D8:46 ;   fixed-address 10.8.10.204 ; }\n    host linksys-ap-5   {   hardware ethernet 00:18:F8:26:D8:51 ;   fixed-address 10.8.10.207 ; }\n    host airport-1      {   hardware ethernet 00:14:51:77:76:4E ;   fixed-address 10.8.10.205 ; }\n    host airport-2      {   hardware ethernet 00:14:51:77:8F:F0 ;   fixed-address 10.8.10.206 ; }\n}\n\n## Polycom phone boot configuration\ngroup {\n    default-lease-time 600 ;\n    max-lease-time 600 ;\n\n    option boot-server \"ftp://sip:sip@phoneboot\" ;\n\n    host 01-c1-b6   {   hardware ethernet 00:04:f2:01:c1:b6 ;   }\n    host 01-71-89   {   hardware ethernet 00:04:f2:01:71:89 ;   }\n    host 01-b6-e1   {   hardware ethernet 00:04:f2:01:b6:e1 ;   }\n    host 01-be-91   {   hardware ethernet 00:04:f2:01:be:91 ;   }\n    host e3-2a-f2   {   hardware ethernet 00:04:f2:e3:2a:f2 ;   }\n}\n
        "},{"location":"diagrams/","title":"diagrams","text":"

        Sometimes a diagram is the best way to communicate an idea.

        "},{"location":"diagrams/#links","title":"Links","text":"
        • https://d2lang.com / https://github.com/terrastruct/d2
        • https://victorbjorklund.com/build-diagrams-as-code-with-d2-d2lang: D2 tutorial
        • https://github.com/mermaid-js/mermaid / https://mermaid.js.org
        "},{"location":"dig/","title":"dig","text":"

        dig (domain information groper) is a flexible tool for interrogating DNS name servers. The syntax for this tool is a bit cryptic and is not standard.

        "},{"location":"dig/#usage","title":"Usage","text":""},{"location":"dig/#simple-usage","title":"Simple usage","text":"
        dig gwos.com\n
        "},{"location":"dig/#show-only-the-answer-section","title":"Show only the Answer section","text":"
        dig +noall +answer \"zoosk.com\"\n
        "},{"location":"dig/#show-only-the-returned-ip-address","title":"Show only the returned IP Address","text":"
        dig +short myip.opendns.com @resolver1.opendns.com\n
        "},{"location":"dig/#trace-a-query-from-the-root-servers","title":"Trace a query from the root servers","text":"

        This is the most accurate way to get a DNS record as it will appear to anybody else on the internet who has not queried it before, and will show you all the DNS steps involved in the resolution.

        dig +trace yelp.com\n

        If this doesn't give you a trace you must specify an alternate DNS server

        dig @8.8.8.8 +trace renovo.auto\n
        "},{"location":"dmidecode/","title":"dmidecode","text":"

        tool for listing hardware information and decoding it into human readable form. This tool decodes the DMI information.

        "},{"location":"dmidecode/#examples","title":"Examples","text":""},{"location":"dmidecode/#show-system-serial-number","title":"Show system serial number","text":"
        sudo dmidecode -t system | grep Serial\nsudo dmidecode -s system-serial-number\n
        "},{"location":"dmidecode/#show-memory-info-including-max-installable","title":"Show memory info including max installable","text":"
        sudo dmidecode -t memory\n
        "},{"location":"dmidecode/#show-bios-version","title":"Show bios version","text":"

        You may need to grep for a different string, but even then it doesn't always show the info because not all machines support this.

        sudo dmidecode -t bios | grep -i revision\n
        "},{"location":"dmidecode/#shower-power-supply-information","title":"Shower power supply information","text":"

        This doesn't always work. Some power supplies are not supported.

        dmidecode -t 39\n
        "},{"location":"dmidecode/#see-also","title":"See Also","text":""},{"location":"dmidecode/#show-all-keywords-and-their-values","title":"Show all keywords and their values:","text":"
        ## -s without a keyword lists all keywords\n## -s with a keyword shows only the value of that keyword\ndmidecode -s |& grep '^  ' | while read -r X ; do echo $X: $(sudo dmidecode -s $X) ; done ;\n
        • lshw - list hardware
        "},{"location":"dns/","title":"DNS","text":"

        Domain Name System

        \"The Domain Name System (DNS) is a hierarchical decentralized naming system for computers, services, or any resource connected to the Internet or a private network.\"

        • https://en.wikipedia.org/wiki/List_of_DNS_record_types
        • https://en.wikipedia.org/wiki/Category:Application_layer_protocols
        • https://miek.nl/2009/july/31/dns-classes/
        "},{"location":"dns/#query-system-resolvers","title":"Query system resolvers","text":""},{"location":"dns/#macos","title":"MacOS","text":"
        dscacheutil -q host -a name github.com\n
        "},{"location":"dns/#tips","title":"Tips","text":"
        • If you need a public address that resolves to localhost, you can use localtest.me
        • If you need a globally resolvable hostname that point to any arbitrary ip address, use nip.io
        "},{"location":"docker/","title":"docker","text":"

        \"An open source project to pack, ship and run any application as a lightweight container.\" - https://www.docker.com

        "},{"location":"docker/#naming-inconsistencies","title":"Naming inconsistencies","text":"

        As of 2024, there are a bunch of frustrating naming inconsistencies with Docker and OCI images. The docker image tag documentation shows the \"image name\" as being broken down into the following components: [registry[:port]/][namespace/]repository[:tag]. Unfortunately this does not harmonize with what is used in various tools, including the official Docker tools.

        For example, the docker command line shows the full \"image name\" minus the \"tag\" component if you ask it for the \"repository\":

        $ docker images quay.io:443/prometheus/busybox:glibc --format=json | jq -r .Repository\nquay.io:443/prometheus/busybox\n

        And the docker python module shows the entire \"image name\" when you ask it for the \"tag\"

        >>> client.images.get('quay.io:443/prometheus/busybox:glibc').tags\n['quay.io/prometheus/busybox:glibc']\n

        Other documents list other definitions. I think that the community needs to get this terminology straight in order for us to build consistent, resilient software. There are some discussions open about this topic, but it does not seem to be high priority:

        • https://github.com/opencontainers/artifacts/issues/32#issuecomment-954898503
        • https://github.com/opencontainers/distribution-spec/issues/279
        "},{"location":"docker/#docker-desktop","title":"Docker Desktop","text":"

        In August 2021, Docker pulled a license bait and switch with Docker Desktop. If you want a Docker Desktop alternative on macOS that has a docker command but doesn't use Docker Desktop, you can do the following:

        brew install hyperkit minikube docker kubernetes-cli\nminikube config set driver hyperkit\nminikube start\neval $(minikube docker-env)\n

        This will give you docker commands that targets the minikube CRI, and is actually a great dev environment.

        Alternatively, if you have a linux machine that runs docker handy, you can skip the minikube stuff and export DOCKER_HOST=ssh://linux-docker-host to launch containers on the linux dockerd. This has the caveats that you cannot mount local filesystems into the remote docker host, and if you want to use it for building your project directory will be sent over the network to the remote docker host.

        "},{"location":"docker/#examples","title":"Examples","text":""},{"location":"docker/#show-help-on-the-run-command","title":"Show help on the run command","text":"
        docker help run\n
        "},{"location":"docker/#show-the-history-of-an-image-and-count-its-layers","title":"Show the history of an image, and count its layers","text":"
        docker history ubuntu:bionic | nl -ba -v0\n
        "},{"location":"docker/#run-the-docker-command-against-a-remote-host","title":"Run the docker command against a remote host","text":"

        Using this method can save your mac a lot of resources and easily get a laptop to have access to a much larger machine's resources. Not all features work, such as bind mounts from the local machine.

        DOCKER_HOST=ssh://some-linux-machine docker ps\n
        "},{"location":"docker/#run-a-docker-image-in-an-interactive-shell","title":"Run a docker image in an interactive shell","text":"
        docker run -i -t ubuntu:focal bash\n
        • -i, --interactive
        • -t, --tty

        https://docs.docker.com/engine/reference/commandline/run

        "},{"location":"docker/#get-a-bash-terminal-on-a-running-docker-container","title":"Get a bash terminal on a running docker container","text":"
        docker exec -i -t running-container-name bash\n
        "},{"location":"docker/#determine-if-you-are-running-inside-docker","title":"Determine if you are running inside docker","text":"

        Exit code will be 0 in docker:

        grep -q docker /proc/1/cgroup\n
        "},{"location":"docker/#run-a-docker-image-and-assign-it-a-hostname-and-a-docker-name","title":"Run a docker image and assign it a hostname, and a docker name","text":"
        docker run --hostname=somehost1 --name=\"host1\" -ti centos:centos6 bash\n

        The hostname shows up to the OS. The docker name can be used to interact with the container:

        docker ps host1\n
        "},{"location":"docker/#show-a-complete-vertically-oriented-list-of-docker-processes","title":"Show a complete vertically oriented list of docker processes","text":"

        docker ps has no --json flag, but you can work around that with golang style formatting.

        docker ps --no-trunc --format='{{ . | json }}' | jq -S .\n

        This trick also works with docker images, which also lacks a --json arg.

        "},{"location":"docker/#show-a-table-of-docker-containers-sorted-by-space-used-in-the-container-not-by-the-image","title":"Show a table of docker containers sorted by space used in the container (not by the image)","text":"
        $ docker ps --format=\"{{.Size}}\\t{{.ID}}\\t{{.Image}}\\t{{.Names}}\" |\nsort -h -k1 |\ncolumn -t\n0B      (virtual  101MB)  2f7ba92f1e66  wan-connection-logger         wan-connection-logger\n0B      (virtual  413MB)  21d474032755  gitlab/gitlab-runner          gitlab-runner\n2B      (virtual  392MB)  c15b2ad88901  mariadb:10.4-bionic           mariadb\n312kB   (virtual  710MB)  ccee541f32c2  jacobalberty/unifi            unifi\n1.45MB  (virtual  2.3GB)  a9a60f4c6efc  homeassistant/home-assistant  home-assistant\n239MB   (virtual  412MB)  5d9f9cc3b46a  plexinc/pms-docker:plexpass   plex\n
        "},{"location":"docker/#run-a-container-with-a-tcp-port-map","title":"Run a container with a tcp port map","text":"

        This maps port 18022 of the host to 22 of the guest.

        docker run -ti -p 18022:22 centos:7 bash\n
        "},{"location":"docker/#run-a-container-with-a-shared-directory","title":"Run a container with a shared directory","text":"

        We are specifying :ro to make this a read-only mount. Default is rw.

        docker run -d -v \"$HOME/www/:/var/www/html/:ro\" php:5.4.35-apache\n
        "},{"location":"docker/#show-configuration-parameters-for-a-container","title":"Show configuration parameters for a container","text":"

        This shows more things that you can configure, like DNS, DNS search, etc..

        docker inspect host1\n
        "},{"location":"docker/#show-what-has-changed-since-a-container-was-started","title":"Show what has changed since a container was started","text":"
        docker diff \"$some_running_image\"\n

        https://docs.docker.com/engine/reference/commandline/diff

        "},{"location":"docker/#view-the-terminal-scrollback-of-a-a-container","title":"View the terminal scrollback of a a container","text":"
        docker logs \"$some_running_image\"\n
        "},{"location":"docker/#list-all-containers-including-ones-that-have-been-stopped","title":"List all containers, including ones that have been stopped","text":"

        This allows you to restart previous instances of a container.

        docker ps -a\n

        https://docs.docker.com/engine/reference/commandline/ps

        "},{"location":"docker/#start-a-named-container","title":"Start a named container","text":"

        By default containers don't restart when your system restarts, so you have to start them manually.

        docker start ttrss\n
        "},{"location":"docker/#stop-a-named-container","title":"Stop a named container","text":"
        docker stop ttrss\n
        "},{"location":"docker/#update-the-restart-policy-on-a-running-container","title":"Update the restart policy on a running container","text":"
        docker update --restart=unless-stopped \"$some_running_image\"\n
        "},{"location":"docker/#delete-all-unused-stuff","title":"Delete all unused stuff","text":"

        This will be interactive.

        docker system prune\n
        "},{"location":"docker/#delete-old-containers","title":"Delete old containers","text":"

        https://docs.docker.com/engine/reference/commandline/rm

        You may have to remove -r from xargs on non-GNU systems.

        docker ps -a --format=\"{{.ID}} {{.Status}}\" |\nawk '$2 == \"Exited\" && $5 ~ /(days|weeks|months)/ {print $1}' |\nxargs -r docker rm\n

        A more systematic approach is to use Docker Custodian.

        "},{"location":"docker/#delete-old-images","title":"Delete old images","text":"

        This is safe to run as long as valuable containers are running, as it won't delete any images that are attached to running containers.

        docker rmi $(docker images | grep '^<none>' | awk '{print $3}')\n

        https://docs.docker.com/engine/reference/commandline/rmi

        A more systematic approach is to use Docker Custodian.

        "},{"location":"docker/#show-processes-running-inside-all-docker-containers","title":"Show processes running inside all docker containers","text":"

        On hosts without cgroup integration, run:

        pgrep docker | xargs -n1 pstree\n
        "},{"location":"docker/#show-a-list-of-tags-for-a-given-image-on-docker-hub","title":"Show a list of tags for a given image on docker hub","text":"
        $ curl --silent -f -lSL \"https://index.docker.io/v1/repositories/org-name/image-name/tags\" |\njq '.[].name'\n\"latest\"\n\"0.11.1\"\n\"1.3.0\"\n\"1.5.0\"\n\"2.0.0\"\n

        Quay has a swagger console to discover more API uses

        $ curl --silent -f -lSL \"https://quay.io/api/v1/repository/org-name/image-name/tag\" |\njq '.tags[].name'\n\"2.0.0\"\n\"latest\"\n\"1.3.0\"\n
        "},{"location":"docker/#see-also","title":"See Also","text":"
        • https://www.docker.io: Main page
        • http://dockerfile.github.io: Trusted builds of FOSS software
        • https://registry.hub.docker.com: Public docker images
        • https://docs.docker.com/build/builders: How to build Dockerfiles
        • https://cloud.google.com/solutions/best-practices-for-building-containers
        • https://github.com/wagoodman/dive: A tool for exploring each layer in a docker image
        • https://hpcw.github.io: High Performance Container Workshop videos
        • https://github.com/regclient/regclient: Perform operations on OCI registries
        • https://github.com/oras-project/oras: CLI tool to work with arbitrary artifacts stored in OCI registries
        • https://github.com/containers/skopeo: \"Work with remote images registries - retrieving information, images, signing content\"
        • https://danielquinn.org/blog/developing-with-docker
        "},{"location":"document-query/","title":"document query tools","text":"

        Tools to query documents from the command line.

        "},{"location":"document-query/#links","title":"Links","text":"
        • https://github.com/BurntSushi/xsv: \"A fast CSV command line toolkit written in Rust.\"
        • https://github.com/TomWright/dasel: \"Select, put and delete data from JSON, TOML, YAML, XML and CSV files with a single tool.\" Also check out my notes on dasel
        • https://github.com/harelba/q: \"Run SQL directly on delimited files and multi-file sqlite databases.\" Also check out my notes on q
        • https://github.com/itchyny/gojq: \"Pure Go implementation of jq.\"
        • https://github.com/mgdm/htmlq: \"Like jq, but for HTML.\"
        • https://github.com/mikefarah/yq: \"yq is a portable command-line YAML, JSON, XML, CSV, TOML and properties processor.\"
        • https://github.com/jqlang/jq: \"Command-line JSON processor.\" Also check out my notes on jq.
        • https://github.com/johnkerl/miller: \"Miller is like awk, sed, cut, join, and sort for name-indexed data such as CSV, TSV, and tabular JSON\"
        • https://github.com/wwkimball/yamlpath: \"Command-line get/set/merge/validate/scan/convert/diff processors for YAML/JSON/Compatible data using powerful, intuitive, command-line friendly syntax.\"
        • https://github.com/simeji/jid: \"json incremental digger\" is an interactive json digging tool.
        • https://github.com/jmespath/jp: \"Command line interface to JMESPath - http://jmespath.org\" Also check out my notes on jmespath.
        • https://github.com/simonw/sqlite-utils: \"Python CLI utility and library for manipulating SQLite databases.\" Can directly import json, csv, and tsv files for querying in sqlite.
        "},{"location":"drone/","title":"Drone","text":"

        Drones, UAV (unmanned aerial vehicles), UAS (unmanned aerial systems). The notes here are focused on USA jurisdiction.

        Be aware that the FAA only regulates outdoor flying within the USA. FAA regulations do not apply when flying indoors or outside the USA. FAA regulates US outdoor airspace starting at ground level, so if you are flying outdoors within American territories, you are flying in FAA regulated air space.

        "},{"location":"drone/#glossary","title":"Glossary","text":""},{"location":"drone/#faa-terms","title":"FAA terms","text":"
        • LAANC: Low Altitude Authorization and Notifications Capability. You must request LAANC authorization when flying in controlled airspace. https://www.faa.gov/uas/getting_started/laanc (I live in controlled airspace so I have to do this every day I fly my drone at my house, and it doesn't always work when using the DJI controller 2. \ud83d\ude44)
        • TRUST: The Recreational UAS Safety Test. A certification that all drone pilots in USA are required to take. https://www.faa.gov/uas/recreational_flyers/knowledge_test_updates
        • UAV: Unmanned Aerial Vehicle. The actual vehicle part of a UAS, such as the plane, quad copter, etc..
        • UAS: Unmanned Aereial System. The vehicle, controller, goggles, and anything else included in the complete system needed to pilot a UAV.
        • VLOS: Visual Line Of Sight.
        "},{"location":"drone/#drone-subculture-terms","title":"Drone subculture terms","text":"
        • Tiny Whoop: small FPV drones, typically around 65mm, and weighing around 50g, with prop guards. These are safe enough to be flown inside buildings without worrying about breaking things.
        • Cine Whoop: an FPV drone with prop guards that is big enough to carry a camera of some sort. Having prop guards makes it easy for these drones to fly through tight spaces without crashing if they collide with other objects, which makes them great for taking videos in a large variety of scenes where some other camera drones that do not have prop guards would not be as resilient.
        "},{"location":"drone/#links","title":"Links","text":"
        • https://betaflight.com: Flight Controller software
        • https://drone-laws.com
        • https://dronedj.com/2023/09/25/list-dji-drone-remote-id
        • https://edgetx.org: Radio controller firmware
        • https://fpvfc.org: FPV Freedom Coalition
        • https://github.com/Matthias84/awesome-flying-fpv
        • https://github.com/OpenVTx/OpenVTx: Open source VTX system
        • https://intofpv.com
        • https://newbeedrone.com
        • https://openipc.org: Open source camera firmware that works with a variety of cameras, including some drone cameras.
        • https://rotorriot.com/pages/beginners-guide
        • https://web.archive.org/web/20240223154621/https://store.dji.com/guides/properly-maintain-drone/
        • https://www.drl.io/academy
        • https://www.dronepilotgroundschool.com
        • https://www.expresslrs.org: Radio receiver firmware
        • https://www.faa.gov/uas/getting_started/remote_id
        • https://www.faa.gov/uas
        • https://www.fpvknowitall.com
        • https://www.getfpv.com
        • https://www.open-tx.org: Radio controller firmware
        • https://www.tinywhoop.com
        • https://www.youtube.com/@IvanEfimovLimon
        • https://www.youtube.com/@JoshuaBardwell
        "},{"location":"dsrc/","title":"dsrc","text":"

        \"Dedicated Short Range Communications is a two-way short-to-medium range wireless communications capability that permits very high data transmission critical in communications-based active safety applications\" - https://www.its.dot.gov/factsheets/dsrc_factsheet.htm

        \"Dedicated short-range communications are one-way or two-way short-range to medium-range wireless communication channels specifically designed for automotive use and a corresponding set of protocols and standards.\" - https://en.wikipedia.org/wiki/Dedicated_short-range_communications

        "},{"location":"dsrc/#links","title":"Links","text":"
        • https://www.its.dot.gov/factsheets/dsrc_factsheet.htm
        • https://en.wikipedia.org/wiki/Dedicated_short-range_communications
        • https://www.fcc.gov/wireless/bureau-divisions/mobility-division/dedicated-short-range-communications-dsrc-service
        "},{"location":"dtrace/","title":"dtrace","text":"

        \"dynamic tracing compiler and tracing utility\" - man dtrace

        "},{"location":"dtrace/#links","title":"Links","text":"
        • http://www.brendangregg.com/DTrace/dtrace_oneliners.txt
        "},{"location":"du/","title":"du","text":"

        \"estimate file space usage\" - man du

        "},{"location":"du/#examples","title":"Examples","text":""},{"location":"du/#summarize-low-level-directory-uage","title":"Summarize low level directory uage","text":"

        When a partition fills up this is a good place to begin looking. Some flags may not be available, such as sort -h

        ## -x      --one-file-system\n## -d 3    --max-depth=3\n## -h      --human-readable\nsudo du -x -d 3 -h / | sort -h\n
        "},{"location":"duplicity/","title":"duplicity","text":"

        Encrypted bandwidth-efficient backup using the rsync algorithm

        • http://duplicity.nongnu.org/
        "},{"location":"e-bike/","title":"E-bike","text":"

        Electronic motor bicycles.

        "},{"location":"e-bike/#aventon-aventure2","title":"Aventon Aventure.2","text":"
        • Motor: 750w
        • Claimed Battery: 48v, 15Ah (720Wh)
        • Actual Battery: 47.1V (14.4Ah) 678.2Wh (6% less than claimed)
        • Assist sensor: torque sensor
        • Sprockets: 48 T front, 8 12-32T rear
        • Gear ratios: 1.5 - 4.0
        • Tires: 26x4
        "},{"location":"e-bike/#links","title":"Links","text":"
        • https://fucarebike.com/blogs/news/electric-bike-classes-comparison
        • https://www.heybike.com/blogs/heybike-blog/ebike-classes-1-2-3
        • https://www.michigan.gov/dnr/things-to-do/hike-and-bike/ebikes
        • https://www.michigan.gov/dnr/about/newsroom/releases/2024/03/15/proposed-change-would-expand-allowable-e-bike-operation
        • https://www.metroparks.com/rules-and-regulations: E-bikes are mentioned in this FAQ
        "},{"location":"education/","title":"education","text":"
        • https://www.coursera.org/
        • https://www.edx.org/
        • https://www.udemy.com/
        "},{"location":"elasticsearch/","title":"Elasticsearch","text":"

        \"Elasticsearch is a distributed, free and open search and analytics engine for all types of data, including textual, numerical, geospatial, structured, and unstructured.\" - https://www.elastic.co/what-is/elasticsearch

        "},{"location":"elasticsearch/#examples","title":"Examples","text":""},{"location":"elasticsearch/#dev-console","title":"Dev console","text":"

        Kibana ships with a dev console available which is useful for accessing the below examples. More documentation about APIs that can be used in the dev console can be found here: https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html

        "},{"location":"elasticsearch/#interact-with-elasticsearch-over-http","title":"Interact with elasticsearch over HTTP","text":"

        The Compact Aligned Text interface is available at something like https://${elasticsearch_host}:9200/_cat/ and has a variety of endpoinds you can inspect over http in a human friendly output.

        /_cat/allocation\n/_cat/shards\n/_cat/shards/{index}\n/_cat/master\n/_cat/nodes\n/_cat/tasks\n/_cat/indices\n/_cat/indices/{index}\n/_cat/segments\n/_cat/segments/{index}\n/_cat/count\n/_cat/count/{index}\n/_cat/recovery\n/_cat/recovery/{index}\n/_cat/health\n/_cat/pending_tasks\n/_cat/aliases\n/_cat/aliases/{alias}\n/_cat/thread_pool\n/_cat/thread_pool/{thread_pools}\n/_cat/plugins\n/_cat/fielddata\n/_cat/fielddata/{fields}\n/_cat/nodeattrs\n/_cat/repositories\n/_cat/snapshots/{repository}\n/_cat/templates\n

        Accessing any of these will show columns of data. For example, to see all shards, you can do:

        curl -s \"https://${elasticsearch_host}:9200/_cat/shards?v=true\"\n

        Which will show something like:

        index                                        shard  prirep  state    docs    store   ip            node\nfluentd.quasaric-spacecraft-0412.2021.10.15  0      r       STARTED  53277   7.6mb   10.32.4.26    example-elasticsearch-data-3\nfluentd.quasaric-spacecraft-0412.2021.10.15  0      p       STARTED  53277   7.6mb   10.32.63.204  example-elasticsearch-data-9\nfluentd.true-ion-0733.2021.10.16             0      p       STARTED  47771   8.2mb   10.32.78.225  example-elasticsearch-data-11\nfluentd.true-ion-0733.2021.10.16             0      r       STARTED  47771   8.2mb   10.32.70.57   example-elasticsearch-data-10\nfluentd.desolate-terminator-1537.2021.10.19  0      p       STARTED  31216   5.7mb   10.32.70.57   example-elasticsearch-data-10\nfluentd.desolate-terminator-1537.2021.10.19  0      r       STARTED  31216   5.7mb   10.32.63.205  example-elasticsearch-data-6\nfluentd.false-perihelion-2673.2021.10.14     0      p       STARTED  144118  19.8mb  10.32.4.26    example-elasticsearch-data-3\nfluentd.false-perihelion-2673.2021.10.14     0      r       STARTED  144118  19.8mb  10.32.35.26   example-elasticsearch-data-2\n

        The ?v=true enables column headers. ?help is also available. More documentation is available at the following URLs:

        • https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html
        • https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html
        • https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html
        "},{"location":"elasticsearch/#delete-indexes-by-regex","title":"Delete indexes by regex","text":"

        Assuming the indexes you want to delete all have a common string, and assuming you have local http access to elasticserach (EG: you did sudo -E kubfwd svc -n es-namespace)

        curl -fsSL 'http://redacted-elasticsearch:9200/_cat/shards' |\nawk '$1 ~ /\\.2021\\.10\\.14$/ {print $1}' |\nsort -u |\nwhile read -r index ; do\n    curl -X DELETE \"http://redacted-elasticsearch:9200/${index}\"\ndone\n

        You could also use this same logic to delete large shards by using bytes=b and filtering on the index size:

        curl -fsSL 'http://redacted-elasticsearch:9200/_cat/shards?bytes=b' |\nawk '$1 ~ /^fluentd/ && $6 > 7500000 {print}'\n
        "},{"location":"elasticsearch/#move-a-large-shard-to-a-full-node-to-one-that-has-lots-of-free-data","title":"Move a large shard to a full node to one that has lots of free data","text":"

        Assuming you have elasticsearch available on localhost, eg from kubectl -n \"$namespace\" port-forward svc/elasticsearch 9200:9200,

        "},{"location":"elasticsearch/#find-a-large-shard","title":"Find a large shard","text":"
        curl -s http://localhost:9200/_cat/shards?bytes=b | sort -n -k6 | grep <name of node that is full>\n

        If you have GNU sort installed you can append | gsort -k6 -h to sort by shard size.

        "},{"location":"elasticsearch/#find-a-node-with-lots-of-free-space","title":"Find a node with lots of free space","text":"

        The following output shows \"free_space, hostname\"

        curl -s http://localhost:9200/_nodes/stats |\n    jq -rc '.nodes | to_entries | map([.value.fs.data[].free_in_bytes/1024/1024/1024, .value.name])[] | \"\\(.[0]) \\(.[1])\"' |\n    column -t |\n    sort -n\n
        "},{"location":"elasticsearch/#move-the-large-shard-to-the-new-node","title":"Move the large shard to the new node","text":"
        curl -s --location --request POST 'http://localhost:9200/_cluster/reroute' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{\n    \"commands\" : [\n        {\n            \"move\" : {\n                \"index\" : \"<name of shard to move>\",\n                \"shard\" : <shard number, probably 0>,\n                \"from_node\" : \"<node large shard was on that is low on volume>\",\n                \"to_node\" : \"<node that has low volume to move shard to>\"\n            }\n        }\n    ]\n}' | jq .\n
        "},{"location":"elasticsearch/#links","title":"Links","text":"
        • https://www.elastic.co/blog/how-many-shards-should-i-have-in-my-elasticsearch-cluster
        "},{"location":"eleduino/","title":"Eleduino","text":"

        \"Cool and High Quality raspberry pi accessories at Wholesale Price from China\" - http://www.eleduino.com

        "},{"location":"eleduino/#eleduino-spotpear-touchscreen-32-inch","title":"Eleduino SpotPear touchscreen 3.2 inch","text":"

        Taken from https://github.com/notro/rpi-firmware/issues/6#issuecomment-63180647, this is tested to work on Raspbian 7 (wheezy) and 8 (jessie).

        "},{"location":"eleduino/#update-the-firmware-to-support-fbtft","title":"update the firmware to support FBTFT","text":"
        sudo apt-get install -y rpi-update\nsudo REPO_URI=https://github.com/notro/rpi-firmware BRANCH=builtin rpi-update\nsudo reboot\n
        "},{"location":"eleduino/#modify-boot-args-to-enable-the-device","title":"Modify boot args to enable the device","text":"
        sed -i \"s/$/ \\\nfbtft_device.custom \\\nfbtft_device.name=fb_ili9340 \\\nfbtft_device.gpios=dc:22,reset:27 \\\nfbtft_device.bgr=1 \\\nfbtft_device.speed=48000000/\" /boot/cmdline.txt\n
        "},{"location":"eleduino/#enable-console-on-boot","title":"Enable console on boot","text":"
        sed -i \"s/$/ \\\nfbcon=map:10 \\\nfbcon=font:ProFont6x11 \\\nlogo.nologo/\" /boot/cmdline.txt\n
        "},{"location":"eleduino/#rotation-etc","title":"Rotation etc..","text":"
        sed -i \"s/$/ \\\ndma.dmachans=0x7f35 \\\nconsole=tty1 \\\nconsoleblank=0 \\\nfbtft_device.fps=50 \\\nfbtft_device.rotate=270/\" /boot/cmdline.txt\n
        "},{"location":"eleduino/#sort-and-unique-bootcmdlinetxt","title":"Sort and unique /boot/cmdline.txt","text":"
        cat /boot/cmdline.txt |\n    tee /root/cmdline.txt-$(date +%s) |\n    tr \" \" \"\\n\" |\n    sort -u |\n    tr \"\\n\" \" \" > /boot/cmdline.txt.tmp && \\\nmv /boot/cmdline.txt.tmp /boot/cmdline.txt\n
        "},{"location":"etcd/","title":"etcd","text":"

        \"etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines. It\u2019s open-source and available on GitHub. etcd gracefully handles leader elections during network partitions and will tolerate machine failure, including the leader.\" - https://coreos.com/etcd/

        • https://coreos.com/etcd/docs/latest/demo.html
        "},{"location":"ethtool/","title":"ethtool","text":"

        \"ethtool - query or control network driver and hardware settings\" - man ethtool

        "},{"location":"ethtool/#examples","title":"Examples","text":""},{"location":"ethtool/#force-ethernet-adapter-to-re-negotiate-its-speed","title":"Force ethernet adapter to re-negotiate its speed","text":"
        ethtool -r\n
        "},{"location":"ethtool/#show-interface-error-count-by-type","title":"Show interface error count by type","text":"
        $ sudo ethtool -S ens5f0 | grep -i error\n     rx_errors: 13551\n     tx_errors: 0\n     rx_length_errors: 0\n     rx_crc_errors: 13551\n     fcoe_last_error: 0\n     port.tx_errors: 0\n     port.rx_crc_errors: 13551\n     port.rx_length_errors: 0\n
        "},{"location":"exiftool/","title":"exiftool","text":"

        CLI Tool to read and write image metadata for many kinds of images.

        • https://exiftool.org
        "},{"location":"exiftool/#tricks","title":"Tricks","text":""},{"location":"exiftool/#strip-all-tags","title":"Strip all tags","text":"
        exiftool -all= -- \"$filename\"\n
        "},{"location":"exiftool/#show-tags-in-a-format-that-you-can-use-to-rewrite-them","title":"Show tags in a format that you can use to rewrite them","text":"
        exiftool -S -- \"$filename\"\n

        For example

        $ exiftool -S -- \"$filename\" | grep Daniel\nArtist: Daniel Austin Hoherd\nCopyright: \u00a9Daniel Austin Hoherd\nCreator: Daniel Austin Hoherd\nRights: \u00a9Daniel Austin Hoherd\n$ exiftool -Rights='All rights reserved' -- \"$filename\"\n    1 image files updated\n$ exiftool -Rights -- \"$filename\"\nRights                          : All rights reserved\n
        "},{"location":"exiftool/#expanded-basic-usage","title":"Expanded basic usage","text":"

        This prints out a lot more information than normal usage, and indicates what type of metadata it is.

        exiftool -a -u -G:1:2 -- \"$filename\"\n

        Here is an example of each unique column 1 in a file

        $ exiftool -a -u -G:1:2 -- \"$filename\" | sort -u -k1,1\n[Adobe:Image]   DCT Encode Version              : 100\n[Composite:Camera] Scale Factor To 35 mm Equivalent: 7.0\n[Composite:Image] Aperture                      : 1.8\n[Composite:Location] GPS Latitude               : 37 deg 15' 53.04\" N\n[Composite:Time] Date/Time Created              : 2019:01:08 15:59:06\n[ExifIFD:Camera] Exposure Program               : Program AE\n[ExifIFD:Image] Exposure Time                   : 1/120\n[ExifIFD:Time]  Date/Time Original              : 2019:01:08 15:59:06\n[ExifTool:ExifTool] ExifTool Version Number     : 11.11\n[File:Image]    File Type                       : JPEG\n[GPS:Location]  GPS Version ID                  : 2.2.0.0\n[GPS:Time]      GPS Time Stamp                  : 23:59:06\n[ICC-header:Image] Profile CMM Type             : Linotronic\n[ICC-header:Time] Profile Date Time             : 1998:02:09 06:49:00\n[ICC-meas:Image] Measurement Observer           : CIE 1931\n[ICC-view:Image] Viewing Cond Illuminant        : 19.6445 20.3718 16.8089\n[ICC_Profile:Camera] Device Mfg Desc            : IEC http://www.iec.ch\n[ICC_Profile:Image] Profile Copyright           : Copyright (c) 1998 Hewlett-Packard Company\n[IFD0:Author]   Artist                          : Daniel Austin Hoherd\n[IFD0:Camera]   Make                            : Apple\n[IFD0:Image]    X Resolution                    : 240\n[IFD0:Time]     Modify Date                     : 2019:01:09 13:50:29\n[IFD1:Image]    Compression                     : JPEG (old-style)\n[IFD1:Preview]  Thumbnail Image                 : (Binary data 12008 bytes, use -b option to extract)\n[IPTC:Author]   By-line                         : Daniel Austin Hoherd\n[IPTC:Other]    Coded Character Set             : UTF8\n[IPTC:Time]     Date Created                    : 2019:01:08\n[Photoshop:Author] Copyright Flag               : True\n[Photoshop:Image] X Resolution                  : 240\n[Photoshop:Preview] Photoshop Thumbnail         : (Binary data 12008 bytes, use -b option to extract)\n[System:Image]  File Name                       : 2019-01-08-15-59-06-46628465322_d1657e4c95_o.jpg\n[System:Time]   File Modification Date/Time     : 2019:01:22 09:00:22-08:00\n[XMP-aux:Camera] Distortion Correction Already Applied: True\n[XMP-crs:Image] Already Applied                 : True\n[XMP-dc:Author] Creator                         : Daniel Austin Hoherd\n[XMP-dc:Image]  Format                          : image/jpeg\n[XMP-photoshop:Image] Headline                  : ljwZuD\n[XMP-photoshop:Time] Date Created               : 2019:01:08 15:59:06.448\n[XMP-x:Document] XMP Toolkit                    : Image::ExifTool 11.11\n[XMP-xmp:Image] Creator Tool                    : Adobe Photoshop Lightroom 6.14 (Macintosh)\n[XMP-xmp:Time]  Create Date                     : 2019:01:08 15:59:06.448\n[XMP-xmpMM:Other] Derived From Document ID      : 9880573B7AACBFC189C795E182E8A05D\n[XMP-xmpMM:Time] History When                   : 2019:01:09 13:50:29-08:00\n[XMP-xmpRights:Author] Marked                   : True\n
        "},{"location":"exiftool/#add-missing-lens-data-on-rokinon-85mm","title":"Add missing lens data on Rokinon 85mm","text":"

        Rokinon 85mm is a mechanical lens with no electronics, so no data about photos taken with it are stored in the image. This adds some stock metadata describing characteristics of the lens that are always true, which helps these photos sort accurately, etc..

        exiftool \\\n  -overwrite_original \\\n  -LensModel='Rokinon 85mm f/1.4' \\\n  -FocalLength='85' \\\n  -LongFocal='85' \\\n  -ShortFocal='85' \\\n  -- \\\n  \"$filename\"\n
        "},{"location":"exiftool/#correct-exif-time-for-instance-to-sync-with-gps-time","title":"Correct EXIF time, for instance to sync with GPS time","text":"

        The following example increases all metadata dates by 1 minute and 56 seconds.

        # exiftool -AllDates-='Y:M:D H:M:S'\nexiftool -AllDates+='0:0:0 0:1:56' -- \"$filename\"\n
        "},{"location":"exiftool/#set-all-dates-to-something-obviously-wrong","title":"Set all dates to something obviously wrong","text":"

        This is useful when scanning or photographing film or prints where you do not want the current date associated with the image.

        exiftool -alldates='1900:01:01 01:01:01' -- *.tif\n
        "},{"location":"exiftool/#delete-certain-keywords-from-files","title":"Delete certain keywords from files","text":"

        This example uses bash expansion to create multiple -keywords-= statements from the words inside of the braces. Use echo exiftool to see what command is actually being called when testing. Keywords can also be stored in the subject tag, so we clean that too.

        find ~/your/pictures/ -type f -name '*.jpg' |\nxargs exiftool -overwrite_original -{keywords,subject}-={keyword1,\"a keyword with spaces\",keyword3,\"another keyword with spaces\"} --\n

        A more readable way to do this is to use an array and loop over it to create args, then pass the args to exiftool. This technique is quite useful for use with a variety of tools. You could also change this logic to add tags instead of deleting them.

        tags=(\n  \"private tag 1\"\n  \"another private tag\"\n  \"some other tag that is private\"\n)\n\nargs=()\nfor tag in \"${tags[@]}\" ; do\n  args+=( \"-subject-=$tag\" \"-keywords-=$tag\" )\ndone\n\nexiftool -overwrite_original \"${args[@]}\" -- \"$@\"\n
        "},{"location":"exiftool/#append-keywords-to-a-file","title":"Append keywords to a file","text":"

        When adding keywords, the default behavior allows duplicates. This case is covered in FAQ #17 and indicates that you must remove and re-add each keyword in one operation in order to prevent duplicates. A bash function to do that follows. Be careful to use it on only ONE FILE at a time, otherwise you will add filenames as keywords!

        add_keyword_to_file(){\n  local args=()\n  [[ \"$#\" -ge 2 ]] || { echo \"ERROR: Must have at least 2 args: <keyword> [keyword]... <file>\" ; return 1 ;}\n  while [[ \"$#\" -gt 1 ]] ; do\n    args+=(\"-keywords-=${1}\" \"-keywords+=${1}\")\n    shift\n  done\n  filename=$1\n  exiftool \"${args[@]}\" -- \"${filename}\"\n}\n

        Here it is in action:

        $ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show there are no keywords\nWarning: [Minor] Tag 'keywords' not defined - 20211016-21-25-03_450QaA.jpg\n\n$ add_keyword_to_file \"Sutro Heights\" \"San Francisco\" 20211016-21-25-03_450QaA.jpg  # add keywords\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that keywords were added\nSutro Heights, San Francisco 20211016-21-25-03_450QaA.jpg\n\n$ add_keyword_to_file \"Sutro Heights\" \"San Francisco\" 20211016-21-25-03_450QaA.jpg  # re-add existing keywords\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that duplicates were not added\nSutro Heights, San Francisco 20211016-21-25-03_450QaA.jpg\n

        It even works to remove duplicates where they already exist, likely because the -= matches all instances of the keyword.

        $ exiftool -keywords+=\"San Francisco\" -- 20211016-21-25-03_450QaA.jpg  # add a duplicate\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that there are duplicates\nSutro Heights, San Francisco, San Francisco 20211016-21-25-03_450QaA.jpg\n\n$ add_keyword_to_file \"Sutro Heights\" \"San Francisco\" 20211016-21-25-03_450QaA.jpg\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that duplicates have been removed\nSutro Heights, San Francisco 20211016-21-25-03_450QaA.jpg\n

        To add the same keywords to many files, loop through the files one at a time using something like:

        for file in *.jpg ; do add_keyword_to_file \"Sutro Heights\" \"San Francisco\" \"${file}\" ; done ;\n
        "},{"location":"exiftool/#set-file-modify-time-to-image-capture-time","title":"Set file modify time to image capture time","text":"

        Useful when you want to sort in your file browser by modification time and get a chronological order of files.

        exiftool \"-FileModifyDate<DateTimeOriginal\" -- *.jpg\n
        "},{"location":"exiftool/#generate-a-table-of-filename-camera-model-and-file-size-in-bytes-sorted-by-bytes","title":"Generate a table of Filename, Camera Model and File Size in bytes, sorted by bytes","text":"

        The -n flag here tells exiftool not to convert numbers into human readable formats. This is somewhat ironic in some circumstances, such as with location where using -n makes the GPS location show up as decimal, which IMHO is much more reaable.

        $ find /src_dir/ -iname '*.dng' |\n  xargs exiftool -p '$filename,$Model,$FileSize#' -- 2>/dev/null |\n  sort -t, -k3 -n |\n  column -s, -t\n2012-01-26-23-19-54-6795223065_2e771d1012_o.jpg   iPhone 4S             1242739\n2013-02-03-10-01-56-8441346635_df4404a1f6_o.jpg   NIKON D5200           1646481\n2012-01-22-15-16-38-6746574603_d52311264f_o.jpg   Canon EOS REBEL T3i   1671734\n2011-01-22-23-44-31-6271225963_f9b95b2d7a_o.jpg   NIKON D3S             1773081\n2010-01-27-13-07-00-4313649499_835a6649c2_o.jpg   NIKON D300            1829578\n2016-02-03-07-26-32-24522158414_4aaf116d2a_o.jpg  iPhone 6              2319061\n2018-10-24-13-39-09-44676649345_1de0f581cd_o.jpg  iPhone XS Max         2971254\n2015-02-02-19-17-09-24587486051_3032823e4e_o.jpg  NIKON D800            3309696\n2014-01-27-13-52-41-12951707465_79a8dd3827_o.jpg  iPhone 5              3401479\n2017-01-22-18-33-28-31693592473_40478df088_o.jpg  ILCE-7                4230661\n2018-12-23-22-33-40-45536007225_8fdd50691a_o.jpg  NIKON D850            4924617\n2017-02-06-08-04-18-44658317900_98e04997fb_o.jpg  iPhone 6s             8712631\n2018-12-28-16-56-42-39713091073_c57ec1a8a8_o.jpg  Canon EOS 5D Mark II  8741601\n2019-01-08-16-11-49-39716361093_479e6a2323_o.jpg  iPhone 8 Plus         12041600\n
        "},{"location":"exiftool/#generate-rsync-commands-for-files-matching-a-string","title":"Generate rsync commands for files matching a string","text":"

        Useful for reviewing commands before running them, the following example generates a command for every file, then uses awk to do a numeric comparison on the last field to sort out images under a certain ImageHeight. These rsync commands can be pasted into a terminal to run. (Generating a list of files for use with rsync --files-from would be a better option for this specific use case, but this illustration could be adapted for commands that do not have such an option.)

        $ exiftool -d \"%s\" -p 'rsync -aP $filename otherhost:~/Pictures/ # $ImageHeight' -- * 2>/dev/null | awk '$NF >= 2800 {print}'\nrsync -aP 2017-02-06-08-04-18-44658317900_98e04997fb_o.jpg otherhost:~/Pictures/ # 2869\nrsync -aP 2018-02-06-09-50-04-31514483967_a422a3e3aa_o.jpg otherhost:~/Pictures/ # 2880\nrsync -aP 2018-02-06-15-04-43-45541501845_8dbdc3b208_o.jpg otherhost:~/Pictures/ # 2880\nrsync -aP 2018-02-06-15-05-43-31514485997_e2551fdbbc_o.jpg otherhost:~/Pictures/ # 2880\nrsync -aP 2018-12-19-10-53-27-45663859984_0f93ac24ec_o.jpg otherhost:~/Pictures/ # 2880\n
        "},{"location":"exiftool/#print-filenames-that-are-missing-a-tag","title":"Print filenames that are missing a tag","text":"

        This example creates a file with all full path names for jpg and dng files that do not have GPS Coordinates

        find /some/dir -iname '*.jpg' -or -iname '*.dng' -print0 |\nxargs -0 exiftool -p '${Directory}/${Filename}' -if 'not defined $GPSPosition' -- >> ~/no-geo.txt\n
        "},{"location":"exiftool/#print-filenames-of-photos-that-are-older-than-10-years","title":"Print filenames of photos that are older than 10 years","text":"
        exiftool -if '$now ge ${DateTimeOriginal;ShiftTime($_,\"10:0:0 0\")}' -p '$FileName' *.jpg\n
        "},{"location":"exiftool/#use-testname-tag-target-to-test-what-files-would-be-renamed-to","title":"Use TestName tag target to test what files would be renamed to","text":"

        This block builds an array of possible tags to use as a filename, creates an exiftool argument string from that array, then tests what files would be named to. This is useful when dealing with files from various sources that don't all use the same tag to store the original media creation time. By using TestName instead of FileName as the target, we observe what would occur, essentially a dry-run, instead of actually renaming the files.

        There is a funky behavior of %-c when you operate on a file that should ideally not be renamed. Exiftool will toggle back and forth each run appending and removing -1.

        This assumes GNU xargs for the -r flag.

        #!/usr/bin/env bash\nset -x\n\n# The last valid variable from this list is used as the filename source\ncreate_date_sources=(\n  TrackCreateDate\n  RIFF:DateTimeOriginal\n  MediaCreateDate\n  FileModifyDate\n  DateTimeOriginal\n  CreateDate\n)\n\nfor opt in \"${create_date_sources[@]}\" ; do\n  args+=( \"-TestName<${opt}\" ) ;\ndone ;\n\nargs+=( '-d' './%Y/%m/%Y%m%d-%H-%M-%S%%-c.%%le' )\n\nfind . -maxdepth 1 -type f ! -name '*.sh' -print0 | xargs -0 -r exiftool \"${args[@]}\" --\n
        "},{"location":"exiftool/#rename-files-to-their-shuttercount","title":"Rename files to their ShutterCount","text":"

        Filenames will not be changed if ShutterCount field is not populated.

        exiftool -P '-filename<${ShutterCount;}.%e' -- *.dng\n
        "},{"location":"exiftool/#rename-files-based-on-a-set-of-possible-names","title":"Rename files based on a set of possible names","text":"

        Exiftool will use the last parameter where all variables are present.

        exiftool -P -d '%F-%H-%M-%S' \\\n  '-filename<${DateTimeOriginal} - ${Make;}.%e' \\\n  '-filename<${CreateDate} - ${Make;}.%e' \\\n  '-filename<${DateTimeOriginal} - ${Make;} - ${Model;}.%e' \\\n  '-filename<${CreateDate} - ${Make;} - ${Model;}.%e' \\\n  '-filename<${DateTimeOriginal} - ${Make;} - ${Model;} - ${ShutterCount}.%e' \\\n  '-filename<${CreateDate} - ${Make;} - ${Model;} - ${ShutterCount}.%e' \\\n  -- \\\n  *.dng\n
        "},{"location":"exiftool/#rename-gpx-files-based-on-the-capture-time","title":"Rename GPX files based on the capture time","text":"

        You will end up with a filename like 2013-09-30-23-35-40.gpx based off of the first trkpt timestamp.

        exiftool -d '%Y%m%d-%H-%M-%S' '-FileName<${GpxTrkTrksegTrkptTime;tr/ /-/;tr/:/-/;tr(/Z/)()d;}%-c.gpx' -- *.gpx\n
        "},{"location":"exiftool/#rename-files-to-their-original-date-and-time-using-a-lower-case-file-extension","title":"Rename files to their original date and time using a lower case file extension","text":"
        # %le = lowercase extension\n# %-c = unique filenames when the timestamp is exactly the same. EG: filename-1.jpg\nexiftool \"-FileName<CreateDate\" -d \"%Y%m%d-%H-%M-%S%%-c.%%le\" -- *.jpg\n
        "},{"location":"exiftool/#rename-files-using-a-combination-of-tags","title":"Rename files using a combination of tags","text":"

        Using the name of the tag as output by exiftool -S, you can create complicated filenames by combining tags:

        exiftool -d '%Y%m%d-%H-%M-%S' '-FileName<${CreateDate;}_${Headline;}%-c.%e'\n
        "},{"location":"exiftool/#rename-music-files-in-a-directory","title":"Rename music files in a directory","text":"

        If you use a semicolon inside of a tag that is used to generate a filename, it will have filename-invalid characters stripped. The invalid character list is: / \\ ? * : | \" < >. See the next section for more examples of semicolon behavior.

        exiftool \\\n  '-FileName<${Artist;} - ${Title;}.%e' \\\n  '-FileName<${Artist;} - ${Album;} - ${Title;}.%e' \\\n  -- \\\n  *.mp3 *.m4a\n

        The way I solved this prior to knowing the semicolon behavior was to use a regex replace, which is included here because it could be useful in other circumstances:

        exiftool \\\n  '-FileName<${Artist;s/\\//_/} - ${Title;s/\\//_/}.%e' \\\n  '-FileName<${Artist;s/\\//_/} - ${Album;s/\\//_/} - ${Title;s/\\//_/}.%e' \\\n  -- \\\n  *.mp3 *.m4a\n
        "},{"location":"exiftool/#rename-files-into-directories-with-date-components-as-directory-names","title":"Rename files into directories with date components as directory names","text":"

        Using the above technique, it's not possible to create directories using date components as parts of the directory structure.

        $ exiftool -d '%Y/%m/%d/%F-%H-%M-%S' '-TestName<${DateTimeOriginal;}.%le' -- example.jpg\n'example.jpg' --> '201803042018-03-04-00-01-29.jpg'\n

        Notice how all the directory delimiters were left out. To work around this, you can use a date format string with DafeFmt directly in the date tag instead of in -d:

        $ exiftool '-TestName<${DateTimeOriginal;DateFmt(\"%Y/%m/%d/%F-%H-%M-%S\")}.%le' -- example.jpg\n'example.jpg' --> '2018/03/04/2018-03-04-00-01-29.jpg'\n
        "},{"location":"exiftool/#rename-files-into-subdir-based-on-multiple-tags","title":"Rename files into subdir based on multiple tags","text":"

        Making sure not use put a semicolon into the tags, as described in the last section, you can use more than one tag to rename a file, so long as you format your date string correctly.

        find ./ -type f -iname '*.jpg' -print0 |\nxargs -0 exiftool -d \"%Y/%m/%d/%Y%m%d-%H-%M-%S\" '-FileName<${DateTimeOriginal}_${Headline}%-c.%le' --\n

        EG:

        $ find . -type f -iname '*.jpg' -print0 | xargs -0 exiftool -d \"%Y/%m/%d/%Y%m%d-%H-%M-%S\" '-TestName<${DateTimeOriginal}_${Headline}%-c.%le' --\n'./20170406-17-11-59.jpg' --> '2017/04/06/20170406-17-11-59_qrWLGF.jpg'\n'./20170401-22-20-56.jpg' --> '2017/04/01/20170401-22-20-56_907nMU.jpg'\n'./20170403-07-14-18.jpg' --> '2017/04/03/20170403-07-14-18_JMPDVd.jpg'\n    0 image files updated\n    3 image files unchanged\n

        But if we use a semicolon, the invalid characters are stripped, and thus directories are not created.

        $ find . -type f -iname '*.jpg' -print0 | xargs -0 exiftool -d \"%Y/%m/%d/%Y%m%d-%H-%M-%S\" '-TestName<${DateTimeOriginal;}_${Headline}%-c.%le' --\n'./20170406-17-11-59.jpg' --> './2017040620170406-17-11-59_qrWLGF.jpg'\n'./20170401-22-20-56.jpg' --> './2017040120170401-22-20-56_907nMU.jpg'\n'./20170403-07-14-18.jpg' --> './2017040320170403-07-14-18_JMPDVd.jpg'\n    0 image files updated\n    3 image files unchanged\n
        "},{"location":"exiftool/#move-short-videos-to-one-dir-long-videos-to-another-dir","title":"Move short videos to one dir, long videos to another dir","text":"

        In iOS, if you have Live Photo enabled it creates little movies each time you take a photo. While these can be very interesting context around photos, they can be quite irritating if you're playing through a collection of videos where these are mixed with videos of more moderate duration. The following code snip separates videos with a duration of more than 10 seconds from those with equal or lesser duration.

        # -TestName is used here so it does not destroy data. Replace this with FileName to make this actually work.\n# $Duration# has the # sign appended to make this tag machine readable so it can accurately be compared.\n# We must use perl's numeric comparisons (>, <=), not string comparisons (gt, le)\n# exiftool does not support if else syntax, so for the else condition you must run a second command.\n\nlong_args=(  \"-TestName<${opt}\" '-d' \"${working_path}/long/%Y/%m/%Y%m%d-%H-%M-%S%%-c.%%le\"  '-if' '${Duration#} >  10' )\nshort_args=( \"-TestName<${opt}\" '-d' \"${working_path}/short/%Y/%m/%Y%m%d-%H-%M-%S%%-c.%%le\" '-if' '${Duration#} <= 10' )\n\nfind \"${PWD}\" -maxdepth 1 -type f -print0 | xargs -0 -r exiftool \"${long_args[@]}\" --\nfind \"${PWD}\" -maxdepth 1 -type f -print0 | xargs -0 -r exiftool \"${short_args[@]}\" --\n
        "},{"location":"exiftool/#add-missing-date-metadata-to-nintendo-switch-screenshots","title":"Add missing date metadata to Nintendo Switch screenshots","text":"

        Nintendo Switch screenshots are named with the date, but do not contain this information in the EXIF, which makes this data fragile.

        # Filename like: 2020041909511400-87C68A817A974473877AC288310226F6.jpg\nfor X in 202?????????????-????????????????????????????????.{jpg,mp4} ; do\n  echo \"${X}\" |\n  sed -E 's/^((....)(..)(..)(..)(..)(..).*)/\\2 \\3 \\4 \\5 \\6 \\7 \\1/'\ndone | while read -r Y M D h m s f ; do\n  exiftool \\\n    -overwrite_original \\\n    \"-alldates=$Y:$M:$D $h:$m:$s\" \\\n    '-FileName<DateTimeOriginal' \\\n    -d '%Y%m%d-%H-%M-%S%%-c.%%le' \\\n    -- \"$f\"\ndone\n
        "},{"location":"exiftool/#copy-all-gps-location-data-from-one-file-into-other-files","title":"Copy all GPS location data from one file into other files","text":"
        exiftool -tagsfromfile source-file.jpg '-gpsl*<gpsl*' -- dest-file-1.jpg dest-file-2.jpg\n
        "},{"location":"exiftool/#review-and-delete-all-dji-photos-that-are-looking-at-the-sky","title":"Review and delete all DJI photos that are looking at the sky","text":"

        When taking panorama's with a DJI drone, you end up with a lot of photos of clouds and blue sky. These can be found by looking at GimbalPitchDegree. Review them in macOS Preview.app with:

        find PANORAMA -type f |\nxargs exiftool -if '$GimbalPitchDegree > 40' -p '${Directory}/${Filename}' -- 2>/dev/null |\nxargs -r open\n

        Once you've verified that none of them are worth preserving, delete them with:

        find PANORAMA -type f |\nxargs exiftool -if '$GimbalPitchDegree > 40' -p '${Directory}/${Filename}' -- 2>/dev/null |\nxargs -r rm -fv\n

        If you want to filter out photos that are mostly sky but also contain a bit of the ground in the bottom third of the frame, use > 9 instead of > 40.

        "},{"location":"exiftool/#geotag-non-geotagged-files-using-a-specific-tz","title":"Geotag non-geotagged files using a specific TZ","text":"

        Timezones in photo images is kind of a mess. In order to be specific about what TZ you took photos in, you can override it using the syntax in the example below. For instance, I keep all my photos in UTC so I never have to wonder what TZ I took them in and I never have to worry about DST. This example also skips any files that have existing geotags.

        find ~/Pictures/whatever -type f -iname '*.dng' -print0 |\n  xargs -0 exiftool -if 'not defined $GPSPosition' -geotag ~/gps_tracks.gpx '-Geotime<${createdate}+00:00' --\n

        This page gives more examples: https://exiftool.org/geotag.html

        "},{"location":"exiftool/#export-exif-data-as-json","title":"Export exif data as JSON","text":"

        You can use the -J/-json flag to output JSON data, which is obviously really helpful.

        $ exiftool -J -ExifToolVersion -LensFStops 20241027-20-44-44_177lgJ.dng | jq .\n[\n  {\n    \"SourceFile\": \"20241027-20-44-44_177lgJ.dng\",\n    \"ExifToolVersion\": 12.5,\n    \"LensFStops\": 6\n  }\n]\n

        However, by default, all numeric looking values are not quoted, even if they are not numeric values, like version numbers. In the above example, the version number is 12.50, not 12.5, and the LensFSops is 6.00, not 6. To work around this, you can use -api StructFormat=JSONQ, where JSONQ is \"JSON with quoted numbers\". (See https://exiftool.org/ExifTool.html#StructFormat for more details.) You must be using exiftool >= 12.88 (2024-07-11) for this feature to be available, otherwise it will silently produce non-quoted numeric values.

        $ exiftool -api StructFormat=JSONQ -json -ExifToolVersion -LensFStops 20241027-20-44-44_177lgJ.dng | jq .\n[\n  {\n    \"SourceFile\": \"20241027-20-44-44_177lgJ.dng\",\n    \"ExifToolVersion\": \"13.00\",\n    \"LensFStops\": \"6.00\"\n  }\n]\n
        "},{"location":"exiftool/#see-also","title":"See Also","text":"
        • graphicsmagick
        • imagemagick
        • jpeginfo
        • sips
        "},{"location":"fediverse/","title":"fediverse","text":"

        \"The fediverse is an ensemble of federated (i.e. interconnected) servers that are used for web publishing and file hosting, which, while independently hosted, can communicate with each other.\" - https://en.wikipedia.org/wiki/Fediverse

        "},{"location":"fediverse/#links","title":"Links","text":"
        • https://en.wikipedia.org/wiki/Fediverse
        • https://en.wikipedia.org/wiki/ActivityPub: The fediverse is largely interconnected using the ActivityPub protocol.
        • https://fedidevs.org: Fediverse Developer Network
        • https://joinfediverse.wiki
        • https://lemmy.world/post/256146: Several fediverse links
        • https://www.jvt.me/posts/2019/10/20/indieweb-talk: Not fediverse, but definitely along the same line of thinking in a lot of ways.
        "},{"location":"fedramp/","title":"Fedramp","text":"
        • https://en.wikipedia.org/wiki/FedRAMP
        • https://www.fedramp.gov/
        "},{"location":"ffmpeg/","title":"ffmpeg","text":"

        ffmpeg is a tool for editing movie files.

        • https://ffmpeg.org
        "},{"location":"ffmpeg/#links","title":"Links","text":"
        • https://fileconverter.tommyjepsen.com: ffmpeg based video converter that runs in-browser
        "},{"location":"ffmpeg/#examples","title":"Examples","text":""},{"location":"ffmpeg/#convert-container-format","title":"Convert container format","text":"

        This will copy all streams from every mkv container file in the current directory into an mp4 container file.

        for X in *.mkv ; do ffmpeg -i \"${X}\" -codec copy -map 0 \"${X%.mkv}.mp4\" ; done ;\n

        Some codecs will not be compatible and will need to be transcoded in order to be mp4 compatible. Here is an example that transcodes video to h264 using hardware hardware transcoding (h264_videotoolbox):

        FILE=all-your-base.mkv\nffmpeg -i \"${FILE}\" -c:v h264_videotoolbox -b:v 4000k -c:a copy \"${FILE%.mkv}.mp4\"\n
        "},{"location":"ffmpeg/#sample-video-output-settings","title":"Sample video output settings","text":"

        Before spending a long time converting a video, it's good to sample what you would see. You can render only a portion of the movie to make sure your settings are not too low. The following example starts encoding from 35 minutes into the source file (-ss HH:MM:SS.ss) and produces 20 seconds of output (-t HH:MM:SS.ss):

        ffmpeg -ss 00:35:00 -t 00:00:20.00 -i \"${FILE}\" -c:v h264_videotoolbox -b:v 4000k -c:a copy \"${FILE%.mkv}.mp4\"\n

        Note that the HH:MM:SS.ss is given in the time duration spec that is detailed in man ffmpeg-utils and can have other forms.

        Also note that -ss behaves differently depending on where it is placed in the command in regard to the input and output files.

        "},{"location":"ffmpeg/#time-lapse","title":"Time Lapse","text":""},{"location":"ffmpeg/#generate-a-movie-from-an-image-sequence-like-001jpg-999jpg","title":"Generate a movie from an image sequence like 001.jpg-999.jpg","text":"
        ffmpeg -r 10 -b 1800 -i %03d.jpg test1800.mp4\n
        "},{"location":"ffmpeg/#rename-files-as-a-sequence","title":"Rename files as a sequence:","text":"
        i=0 ;\nfind . -type f |\n  while read -r F ; do\n    let i=${i}+1 ;\n    fn=$(printf %06d ${i}) ;\n    mv \"${F}\" \"${fn}.jpg\" ;\n  done ;\n
        "},{"location":"ffmpeg/#sample-some-of-the-middle-of-the-time-lapse","title":"Sample some of the middle of the time-lapse","text":"
        ffmpeg -pattern_type sequence -start_number 3000 -r 30 -i %06d.jpg -s 1440x1080 -frames 120 \"$(date +%F_%T).mp4\"\n
        "},{"location":"ffmpeg/#turn-these-images-into-a-video","title":"Turn these images into a video","text":"
        ffmpeg -pattern_type sequence -r 30 -i %06d.jpg -s 1440x1080 \"$(date +%F_%T).mp4\"\n
        "},{"location":"ffmpeg/#audio-replace","title":"Audio Replace","text":"

        Replace the audio of DSC_4436.AVI with 01 Gymnopedie 1.mp3 and limit the duration of the output so the music doesn't play beyond the end of the video.

        ffmpeg -t 00:00:47.99 -i DSC_4436.AVI -i \"01 Gymnopedie 1.mp3\" -map 0:0 -map 1:0 -vcodec copy -acodec copy output.AVI\n
        "},{"location":"ffmpeg/#slow-down-video-to-half-speed-drop-audio","title":"Slow down video to half speed, drop audio","text":"
        ffmpeg -i DHO_8751.MOV -an -vf \"setpts=(2/1)*PTS\" output.mp4\n
        "},{"location":"ffmpeg/#extract-two-seconds-worth-of-frames-at-24fps-starting-at-15m","title":"Extract two seconds worth of frames at 24fps starting at 15m","text":"
        ffmpeg -i movie.mkv -r 24 -t 00:00:02.00 -ss 00:15:00 temp/movie-%4d.jpg\n
        "},{"location":"ffmpeg/#detect-errors-in-files","title":"Detect errors in files","text":"
        ffmpeg -v error -i 20091024-08-46-00.mpg -f null - 2>> error.log\n
        "},{"location":"ffmpeg/#dump-a-raw-stream","title":"Dump a raw stream","text":"

        If you encounter a file that has an unsupported stream and you want to dump it for analysis, you can use ffprobe to see what streams there are. ffprobe will produce output including something like:

        Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'DJI_20230929174507_0003_D.MP4':\n  Metadata:\n    major_brand     : isom\n    minor_version   : 512\n    compatible_brands: isomiso2mp41\n    creation_time   : 2023-09-29T21:45:07.000000Z\n    encoder         : DJI Mini4 Pro\n  Duration: 00:00:18.28, start: 0.000000, bitrate: 93326 kb/s\n  Stream #0:0[0x1](und): Video: hevc (Main) (hvc1 / 0x31637668), yuv420p(tv, bt709), 3840x2160, 90422 kb/s, 29.97 fps, 29.97 tbr, 30k tbn (default)\n    Metadata:\n      creation_time   : 2023-09-29T21:45:07.000000Z\n      handler_name    : VideoHandler\n      vendor_id       : [0][0][0][0]\n  Stream #0:1[0x2](und): Data: none (djmd / 0x646D6A64), 68 kb/s\n    Metadata:\n      creation_time   : 2023-09-29T21:45:07.000000Z\n      handler_name    : DJI meta\n  Stream #0:2[0x3](und): Data: none (dbgi / 0x69676264), 2315 kb/s\n    Metadata:\n      creation_time   : 2023-09-29T21:45:07.000000Z\n      handler_name    : DJI dbgi\n  Stream #0:3[0x0]: Video: mjpeg (Baseline), yuvj420p(pc, bt470bg/unknown/unknown), 1280x720 [SAR 1:1 DAR 16:9], 90k tbr, 90k tbn (attached pic)\nUnsupported codec with id 0 for input stream 1\nUnsupported codec with id 0 for input stream 2\n

        Here we see 4 streams. 0:0 is a video stream, 0:1 is a DJI meta data stream, 0:2 is a DJI dbgi data stream, and 0:3 is a video mjpeg stream. Using this list as a reference, we can dump an individual stream. For instance, to dump the 0:2 stream:

        ffmpeg -i DJI_20230929174507_0003_D.MP4 -map 0:2 -f data -c copy stream2.bin\n
        "},{"location":"ffmpeg/#reduce-frame-rate","title":"Reduce frame rate","text":"

        This example is taken directly from https://trac.ffmpeg.org/wiki/ChangingFrameRate

        ffmpeg -i src.mp4 -filter:v fps=30 dest.mp4\n
        "},{"location":"ffmpeg/#record-video-from-a-macos-webcam","title":"Record video from a macOS webcam","text":"

        This is video only, no audio

        ffmpeg -f avfoundation -r 30 -i 1 webcam.mov\n
        "},{"location":"ffmpeg/#screen-record-a-macos-desktop","title":"Screen record a macOS desktop","text":"
        ffmpeg -f avfoundation -r 30 -i 3 screen_capture.mov\n
        "},{"location":"ffmpeg/#generate-a-timelapse-from-dji-hyperlapse-photos","title":"Generate a timelapse from DJI hyperlapse photos","text":"

        The DJI Hyperlapse videos are pretty awful, with unnecessary cropping and perspetive warping. You're better off generating your own movie from the still images it took. To do so, cd to the directory with the hyperlapse photos in it and run:

        ffmpeg -framerate 30 -pattern_type glob -i \"HYPERLAPSE*.JPG\" -s:v 4032x3024 -c:v libx264 -crf 17 -pix_fmt yuv420p timelapse.mp4\n

        You may need to adjust the 4032x3024 dimensions if you are not using the Mini 4 Pro.

        "},{"location":"ffmpeg/#crop-a-video","title":"Crop a video","text":"

        Find the dimensions of your video with exiftool -ImageSize \"$FileName\" or ffprobe -v error -show_entries stream=width,height -of default=noprint_wrappers=1 \"$FileName\". Once you have that, you must construct a command like this:

        ffmpeg -i Sunset.mp4 -filter:v \"crop=2884:2160:478:0\" Sunset-cropped.mp4\n

        Where 2884 is the width, 2160 is the height, 478 is how far to shift the crop from the left edge of the frame, and 0 is how far to shift the crop from the top of the frame.

        "},{"location":"figlet/","title":"figlet","text":"

        Figlet prints horizontal text in ascii drawings.

        "},{"location":"figlet/#examples","title":"Examples","text":""},{"location":"figlet/#print-text","title":"Print text","text":"
        $ figlet hello\n  _          _ _\n | |__   ___| | | ___\n | '_ \\ / _ \\ | |/ _ \\\n | | | |  __/ | | (_) |\n |_| |_|\\___|_|_|\\___/\n
        "},{"location":"figlet/#show-available-fonts","title":"Show available fonts","text":"
        $ showfigfonts | head\n3-d :\n  ****             **\n */// *           /**\n/    /*           /**\n   ***  *****  ******\n  /// */////  **///**\n *   /*      /**  /**\n/ ****       //******\n ////         //////\n
        "},{"location":"figlet/#use-a-font","title":"Use a font","text":"
        $ figlet -f 3-d hello\n **               **  **\n/**              /** /**\n/**       *****  /** /**  ******\n/******  **///** /** /** **////**\n/**///**/******* /** /**/**   /**\n/**  /**/**////  /** /**/**   /**\n/**  /**//****** *** ***//******\n//   //  ////// /// ///  //////\n
        "},{"location":"figlet/#see-also","title":"See Also","text":"
        • cowsay
        "},{"location":"finance/","title":"Finance","text":"

        Misc financial information

        "},{"location":"finance/#links","title":"Links","text":"
        • How to place a credit freeze on your credit report: https://www.usa.gov/credit-freeze
        "},{"location":"find/","title":"find","text":"

        The find util letes you search a filesystem for things that match filesystem attributes. Unfortunately this is one of those tools where BSD and GNU deviate syntactically and featurewise, and GNU mostly wins.

        "},{"location":"find/#examples","title":"Examples","text":""},{"location":"find/#find-and-delete-empty-directories-2-levels-deep-or-deeper","title":"Find and delete empty directories 2 levels deep or deeper","text":"

        find \"${PWD}\" -mindepth 2 -type d -empty -delete

        "},{"location":"find/#find-based-on-a-regex","title":"Find based on a regex","text":"

        find /tank/movies -regextype egrep -iregex '.*\\.(mov|mp4)$'

        "},{"location":"find/#find-files-and-perform-operations-on-them","title":"Find files and perform operations on them","text":"

        One at a time:

        find \"${PWD}\" -type d -exec dot_clean {} \\;

        Or several in batches, similar to how xargs handles things:

        find \"${PWD}\" -type d -exec dot_clean {} \\+

        "},{"location":"find/#find-files-that-match-a-glob","title":"Find files that match a glob","text":"

        find \"${PWD}\" -name '????????-??-??-??_[0-9][0-9][0-9]???.dng'

        "},{"location":"find/#alter-permissions-on-some-files-that-are-not-already-set-correctly","title":"Alter permissions on some files that are not already set correctly","text":"

        find . -mindepth 2 -type f ! -perm 444 -exec chmod 444 {} \\+

        "},{"location":"find/#find-files-in-the-current-directory-that-do-not-match-any-of-several-listed-filenames","title":"Find files in the current directory that do not match any of several listed filenames","text":"

        find . -maxdepth 1 -type f ! -iname '.*' ! -name .DS_Store ! -name '*.db'

        "},{"location":"find/#correctly-handle-spaces-when-piping-to-xargs","title":"Correctly handle spaces when piping to xargs","text":"

        find /Applications -mindepth 1 -maxdepth 1 -type d -name '* *' -print0 | xargs -0 -n1 echo

        "},{"location":"find/#find-executable-files","title":"Find executable files","text":"

        This finds all files where an executable bit is set.

        With BSD find:

        find . -type f -perm +111

        With GNU find:

        find . -type f -executable

        "},{"location":"find/#see-also","title":"See also","text":"
        • https://github.com/jhspetersson/fselect: Find files with SQL-like queries
        • https://github.com/junegunn/fzf: fzf is a general-purpose command-line fuzzy finder
        "},{"location":"findmnt/","title":"findmnt","text":"

        \"findmnt will list all mounted filesystems or search for a filesystem. The findmnt command is able to search in /etc/fstab, /etc/fstab.d, /etc/mtab or /proc/self/mountinfo. If device or mountpoint is not given, all filesystems are shown.\" - man findmnt

        "},{"location":"findmnt/#examples","title":"Examples","text":""},{"location":"findmnt/#simple-usage","title":"Simple usage","text":"

        Here is the output of findmnt on an Ubuntu 16.04 Vagrant box:

        TARGET                                SOURCE     FSTYPE     OPTIONS\n/                                     /dev/sda1  ext4       rw,relatime,data=ordered\n\u251c\u2500/sys                                sysfs      sysfs      rw,nosuid,nodev,noexec,relatime\n\u2502 \u251c\u2500/sys/kernel/security              securityfs securityfs rw,nosuid,nodev,noexec,relatime\n\u2502 \u251c\u2500/sys/fs/cgroup                    tmpfs      tmpfs      ro,nosuid,nodev,noexec,mode=755\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/systemd          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/cpu,cpuacct      cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,cpu,cpuacct\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/perf_event       cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,perf_event\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/hugetlb          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,hugetlb\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/blkio            cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,blkio\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/devices          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,devices\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/cpuset           cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,cpuset\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/memory           cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,memory\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/net_cls,net_prio cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,net_cls,net_prio\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/freezer          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,freezer\n\u2502 \u2502 \u2514\u2500/sys/fs/cgroup/pids             cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,pids\n\u2502 \u251c\u2500/sys/fs/pstore                    pstore     pstore     rw,nosuid,nodev,noexec,relatime\n\u2502 \u251c\u2500/sys/kernel/debug                 debugfs    debugfs    rw,relatime\n\u2502 \u2514\u2500/sys/fs/fuse/connections          fusectl    fusectl    rw,relatime\n\u251c\u2500/proc                               proc       proc       rw,nosuid,nodev,noexec,relatime\n\u2502 \u2514\u2500/proc/sys/fs/binfmt_misc          systemd-1  autofs     rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct\n\u251c\u2500/dev                                udev       devtmpfs   rw,nosuid,relatime,size=500888k,nr_inodes=125222,mode=755\n\u2502 \u251c\u2500/dev/pts                          devpts     devpts     rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000\n\u2502 \u251c\u2500/dev/shm                          tmpfs      tmpfs      rw,nosuid,nodev\n\u2502 \u251c\u2500/dev/hugepages                    hugetlbfs  hugetlbfs  rw,relatime\n\u2502 \u2514\u2500/dev/mqueue                       mqueue     mqueue     rw,relatime\n\u251c\u2500/run                                tmpfs      tmpfs      rw,nosuid,noexec,relatime,size=101596k,mode=755\n\u2502 \u251c\u2500/run/lock                         tmpfs      tmpfs      rw,nosuid,nodev,noexec,relatime,size=5120k\n\u2502 \u2514\u2500/run/user/1000                    tmpfs      tmpfs      rw,nosuid,nodev,relatime,size=101596k,mode=700,uid=1000,gid=1000\n\u251c\u2500/var/lib/lxcfs                      lxcfs      fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other\n\u2514\u2500/vagrant                            vagrant    vboxsf     rw,nodev,relatime\n
        "},{"location":"findmnt/#output-as-keyvalue-pairs-per-device","title":"Output as key/value pairs per device","text":"
        $ findmnt -P\nTARGET=\"/sys\" SOURCE=\"sysfs\" FSTYPE=\"sysfs\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/proc\" SOURCE=\"proc\" FSTYPE=\"proc\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/dev\" SOURCE=\"udev\" FSTYPE=\"devtmpfs\" OPTIONS=\"rw,nosuid,relatime,size=500888k,nr_inodes=125222,mode=755\"\nTARGET=\"/dev/pts\" SOURCE=\"devpts\" FSTYPE=\"devpts\" OPTIONS=\"rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000\"\nTARGET=\"/run\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,noexec,relatime,size=101596k,mode=755\"\nTARGET=\"/\" SOURCE=\"/dev/sda1\" FSTYPE=\"ext4\" OPTIONS=\"rw,relatime,data=ordered\"\nTARGET=\"/sys/kernel/security\" SOURCE=\"securityfs\" FSTYPE=\"securityfs\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/dev/shm\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,nodev\"\nTARGET=\"/run/lock\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,size=5120k\"\nTARGET=\"/sys/fs/cgroup\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"ro,nosuid,nodev,noexec,mode=755\"\nTARGET=\"/sys/fs/cgroup/systemd\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd\"\nTARGET=\"/sys/fs/pstore\" SOURCE=\"pstore\" FSTYPE=\"pstore\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/sys/fs/cgroup/net_cls,net_prio\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,net_cls,net_prio\"\nTARGET=\"/sys/fs/cgroup/perf_event\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,perf_event\"\nTARGET=\"/sys/fs/cgroup/cpu,cpuacct\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,cpu,cpuacct\"\nTARGET=\"/sys/fs/cgroup/hugetlb\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,hugetlb\"\nTARGET=\"/sys/fs/cgroup/memory\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,memory\"\nTARGET=\"/sys/fs/cgroup/devices\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,devices\"\nTARGET=\"/sys/fs/cgroup/freezer\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,freezer\"\nTARGET=\"/sys/fs/cgroup/cpuset\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,cpuset\"\nTARGET=\"/sys/fs/cgroup/blkio\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,blkio\"\nTARGET=\"/sys/fs/cgroup/pids\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,pids\"\nTARGET=\"/proc/sys/fs/binfmt_misc\" SOURCE=\"systemd-1\" FSTYPE=\"autofs\" OPTIONS=\"rw,relatime,fd=26,pgrp=1,timeout=0,minproto=5,maxproto=5,direct\"\nTARGET=\"/sys/kernel/debug\" SOURCE=\"debugfs\" FSTYPE=\"debugfs\" OPTIONS=\"rw,relatime\"\nTARGET=\"/dev/hugepages\" SOURCE=\"hugetlbfs\" FSTYPE=\"hugetlbfs\" OPTIONS=\"rw,relatime\"\nTARGET=\"/dev/mqueue\" SOURCE=\"mqueue\" FSTYPE=\"mqueue\" OPTIONS=\"rw,relatime\"\nTARGET=\"/sys/fs/fuse/connections\" SOURCE=\"fusectl\" FSTYPE=\"fusectl\" OPTIONS=\"rw,relatime\"\nTARGET=\"/var/lib/lxcfs\" SOURCE=\"lxcfs\" FSTYPE=\"fuse.lxcfs\" OPTIONS=\"rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other\"\nTARGET=\"/vagrant\" SOURCE=\"vagrant\" FSTYPE=\"vboxsf\" OPTIONS=\"rw,nodev,relatime\"\nTARGET=\"/run/user/1000\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,nodev,relatime,size=101596k,mode=700,uid=1000,gid=1000\"\n
        "},{"location":"findmnt/#output-as-json","title":"Output as JSON","text":"
        $ findmnt -J\n{\n   \"filesystems\": [\n      {\"target\": \"/\", \"source\": \"/dev/sda1\", \"fstype\": \"ext4\", \"options\": \"rw,relatime,data=ordered\",\n         \"children\": [\n            {\"target\": \"/sys\", \"source\": \"sysfs\", \"fstype\": \"sysfs\", \"options\": \"rw,nosuid,nodev,noexec,relatime\",\n               \"children\": [\n                  {\"target\": \"/sys/kernel/security\", \"source\": \"securityfs\", \"fstype\": \"securityfs\", \"options\": \"rw,nosuid,nodev,noexec,relatime\"},\n                  {\"target\": \"/sys/fs/cgroup\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"ro,nosuid,nodev,noexec,mode=755\",\n                     \"children\": [\n                        {\"target\": \"/sys/fs/cgroup/systemd\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd\"},\n                        {\"target\": \"/sys/fs/cgroup/net_cls,net_prio\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,net_cls,net_prio\"},\n                        {\"target\": \"/sys/fs/cgroup/perf_event\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,perf_event\"},\n                        {\"target\": \"/sys/fs/cgroup/cpu,cpuacct\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,cpu,cpuacct\"},\n                        {\"target\": \"/sys/fs/cgroup/hugetlb\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,hugetlb\"},\n                        {\"target\": \"/sys/fs/cgroup/memory\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,memory\"},\n                        {\"target\": \"/sys/fs/cgroup/devices\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,devices\"},\n                        {\"target\": \"/sys/fs/cgroup/freezer\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,freezer\"},\n                        {\"target\": \"/sys/fs/cgroup/cpuset\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,cpuset\"},\n                        {\"target\": \"/sys/fs/cgroup/blkio\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,blkio\"},\n                        {\"target\": \"/sys/fs/cgroup/pids\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,pids\"}\n                     ]\n                  },\n                  {\"target\": \"/sys/fs/pstore\", \"source\": \"pstore\", \"fstype\": \"pstore\", \"options\": \"rw,nosuid,nodev,noexec,relatime\"},\n                  {\"target\": \"/sys/kernel/debug\", \"source\": \"debugfs\", \"fstype\": \"debugfs\", \"options\": \"rw,relatime\"},\n                  {\"target\": \"/sys/fs/fuse/connections\", \"source\": \"fusectl\", \"fstype\": \"fusectl\", \"options\": \"rw,relatime\"}\n               ]\n            },\n            {\"target\": \"/proc\", \"source\": \"proc\", \"fstype\": \"proc\", \"options\": \"rw,nosuid,nodev,noexec,relatime\",\n               \"children\": [\n                  {\"target\": \"/proc/sys/fs/binfmt_misc\", \"source\": \"systemd-1\", \"fstype\": \"autofs\", \"options\": \"rw,relatime,fd=26,pgrp=1,timeout=0,minproto=5,maxproto=5,direct\"}\n               ]\n            },\n            {\"target\": \"/dev\", \"source\": \"udev\", \"fstype\": \"devtmpfs\", \"options\": \"rw,nosuid,relatime,size=500888k,nr_inodes=125222,mode=755\",\n               \"children\": [\n                  {\"target\": \"/dev/pts\", \"source\": \"devpts\", \"fstype\": \"devpts\", \"options\": \"rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000\"},\n                  {\"target\": \"/dev/shm\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,nodev\"},\n                  {\"target\": \"/dev/hugepages\", \"source\": \"hugetlbfs\", \"fstype\": \"hugetlbfs\", \"options\": \"rw,relatime\"},\n                  {\"target\": \"/dev/mqueue\", \"source\": \"mqueue\", \"fstype\": \"mqueue\", \"options\": \"rw,relatime\"}\n               ]\n            },\n            {\"target\": \"/run\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,noexec,relatime,size=101596k,mode=755\",\n               \"children\": [\n                  {\"target\": \"/run/lock\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,nodev,noexec,relatime,size=5120k\"},\n                  {\"target\": \"/run/user/1000\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,nodev,relatime,size=101596k,mode=700,uid=1000,gid=1000\"}\n               ]\n            },\n            {\"target\": \"/var/lib/lxcfs\", \"source\": \"lxcfs\", \"fstype\": \"fuse.lxcfs\", \"options\": \"rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other\"},\n            {\"target\": \"/vagrant\", \"source\": \"vagrant\", \"fstype\": \"vboxsf\", \"options\": \"rw,nodev,relatime\"},\n         ]\n      }\n   ]\n}\n
        "},{"location":"findmnt/#see-also","title":"See also","text":"
        • lsblk
        "},{"location":"fio/","title":"fio","text":"

        \"fio - flexible I/O tester\" - man fio

        This seems to work well in linux, but not so well in macOS.

        "},{"location":"fio/#examples","title":"Examples","text":""},{"location":"fio/#simple-disk-benchmark","title":"Simple disk benchmark","text":"
        mkdir temp  # somewhere on the disk you want to test\ncd temp\nfio\n  --bs=4k \\\n  --end_fsync=1 \\\n  --iodepth=1 \\\n  --ioengine=posixaio \\\n  --name=random-write \\\n  --numjobs=1 \\\n  --runtime=60 \\\n  --rw=randwrite \\\n  --size=4g \\\n  --time_based\n
        "},{"location":"fio/#see-also","title":"See also","text":"
        • pv - Pipe viewer can give you stats about arbitrary pipeline throughput.
        • How fast are your disks? Find out the open source way, with fio
        "},{"location":"flask/","title":"flask","text":"

        \"Flask is a lightweight WSGI web application framework. It is designed to make getting started quick and easy, with the ability to scale up to complex applications. \" - https://palletsprojects.com/p/flask/

        "},{"location":"flask/#links","title":"Links","text":"
        • https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world
        • https://restfulapi.net - General REST API guidelines
        "},{"location":"flask/#tips","title":"Tips","text":""},{"location":"flask/#show-routes","title":"Show routes","text":"
        flask routes\n
        "},{"location":"flask/#debug-in-a-shell","title":"Debug in a shell","text":"
        export FLASK_APP=app\nexport FLASK_ENV=development\nflask shell\n
        "},{"location":"flipper-zero/","title":"Flipper Zero","text":"

        \"Multi-tool Device for Geeks\" - https://flipperzero.one

        "},{"location":"flipper-zero/#links","title":"Links","text":"
        • https://docs.flipper.net: The official docs, which are awesome.
        • https://instantiator.dev/post/flipper-zero-app-tutorial-01
        "},{"location":"flipper-zero/#see-also","title":"See also","text":"
        • My NFC notes
        • My Nintendo Amiibo notes
        "},{"location":"fluent-bit/","title":"fluent-bit","text":"

        \"Fluent Bit is an open source Log Processor and Forwarder which allows you to collect any data like metrics and logs from different sources, enrich them with filters and send them to multiple destinations. It's the preferred choice for containerized environments like Kubernetes.\" - https://fluentbit.io

        "},{"location":"fluent-bit/#examples","title":"Examples","text":""},{"location":"fluent-bit/#simple-stdout-log-server","title":"Simple stdout log server","text":"

        Useful for debugging.

        fluent-bit -i tcp -p port=4444 -p format=none -f1 -o stdout\n
        "},{"location":"fluentd/","title":"fluentd","text":"

        \"Fluentd is an open source data collector for unified logging layer.\" - https://www.fluentd.org/

        "},{"location":"fluentd/#snips","title":"Snips","text":""},{"location":"fluentd/#parse-the-tail-pos-file-into-decimal-position-inode-and-inspect-the-position","title":"Parse the tail pos file into decimal position, inode, and inspect the position","text":"

        This pos_file's columns are Filename,Position,Inode. In the below examples we don't actually do anything with the inode number, but you could use in in debugfs etc..

        POS_FILES=\"/var/log/fluentd-containers.log.pos\"\nwhile read -r file pos inode ; do\n    echo \"$file $((16#$pos)) $((16#$inode))\"\ndone < \"$POS_FILE\"\n

        This will output something like:

        /var/log/containers/calico-node-0am...cb0.log 2797 5347425\n

        You can feed this through some math to see how far behind fluentd is for each file, and then into dd to see what the data is that fluentd has yet to process:

        while read -r file pos _ ; do\n    if f=$(readlink -f \"$file\") && [ -f \"$f\" ] ; then  # resolve symlinks and check that file exists\n        f_size=\"$(stat -c \"%s\" \"$f\")\"  # get file size in bytes\n        pos_dec=$((16#$pos))  # convert pos from hex to dec\n        if [[ \"$f_size\" -gt \"${pos_dec}\" ]]; then\n            echo \"$file f_size=${f_size}, pos=${pos_dec}, diff=$(( f_size - 16#$pos ))\"\n            dd status=none bs=1 if=\"$f\" skip=\"${pos_dec}\" count=256\n            echo\n        fi\n    fi\ndone < \"$POS_FILE\"\n

        Which will output a bunch of lines like:

        /var/log/containers/network-metering-agent-tsl6s_kube-system_agent-25c3e4bc7bd0ddfdda571d8279b040d0a2f3dac03786a40b19dac11873a6af5a.log f_size=1996377, pos=1995147, diff=1230\n{\"log\":\"W0809 18:03:09.184540       1 reflector.go:289] k8s.io/client-go/informers/factory.go:133: watch of *v1.ConfigMap ended with: too old resource version: 1489908695 (1489955501)\\n\",\"stream\":\"stderr\",\"time\":\"2021-08-09T18:03:09.184786383Z\"}\n{\"log\":\"W0\n
        "},{"location":"fortune/","title":"fortune","text":"

        fortune is a unix command that displays a random fortune on the CLI.

        "},{"location":"fortune/#make-a-fortune-file","title":"Make a fortune file","text":"
        1. Create a file that has each fortune separated by a line containing only a % symbol.
        2. Run strfile fortunes.txt, which will create fortunes.txt.dat
        3. You can then see the fortunes with fortune fortunes.txt. This also works with many files in a single directory: for file in *.txt ; do strfile \"${file}\" ; done ; fortune .
        "},{"location":"fuser/","title":"fuser","text":"

        \"fuser - identify processes using files or sockets\" - man fuser

        This command is not directly related to the FUSE command fusermount.

        The output here is a bit unusual in that it sends PIDs to stdout and everything else to stderr, but interleaves them so what you see in the terminal is much different from what you get via pipes.

        "},{"location":"fuser/#examples","title":"Examples","text":""},{"location":"fuser/#show-what-is-using-varlog","title":"Show what is using /var/log","text":"
        $ fuser -m /var/log\n/var/log:             2858m  4608rce  4609rce  4749rce\n

        See man fuser for the meaning of each letter.

        But what you get via a pipe is just the pids. The first line of output is all of stderr, and beyond that is stdout.

        $ fuser -m /var/log | xargs -n1 echo\n/var/log:           mrcercerce\n2858\n4608\n4609\n4749\n
        "},{"location":"fuser/#kill-all-processes-accessing-a-given-file","title":"Kill all processes accessing a given file","text":"
        FILE=\"/path/to/somefile.log\"\nfuser -km \"$FILE\"\n
        "},{"location":"fuser/#show-processes-accessing-the-local-ssh-port","title":"Show processes accessing the local ssh port","text":"

        This only works for local ports.

        ## By service/protocol (see /etc/services)\nsudo fuser ssh/tcp\n## By arbitrary port/protocol\nsudo fuser 22/tcp\n
        "},{"location":"fuser/#check-if-a-file-is-being-accessed","title":"Check if a file is being accessed","text":"
        FILE=\"/path/to/somefile.vmdk\nfuser \"$FILE\" && echo \"Yes, $FILE is being used.\"\n
        "},{"location":"gaming/","title":"gaming","text":""},{"location":"gaming/#video-game-engines-and-frameworks","title":"Video Game Engines and Frameworks","text":"
        • https://love2d.org (2d lua)
        • https://lovr.org (VR lua)
        • https://godotengine.org
        • https://www.pygame.org (python)
        • https://www.lexaloffle.com/pico-8.php (2d lua fantasy console)
        • https://www.lexaloffle.com/voxatron.php (3d lua fantasy console)
        • https://paladin-t.github.io/bitty
        "},{"location":"genicam/","title":"GenICam","text":"

        \"The Generic Interface for Cameras standard is the base for plug & play handling of cameras and devices.\" - http://www.emva.org/standards-technology/genicam/

        "},{"location":"geodata/","title":"geodata","text":"
        • https://geojson.org
        • https://geojson.io: Online GeoJSON editor
        • https://tools.ietf.org/html/rfc7946
        • https://www.gpsbabel.org
        • https://en.wikipedia.org/wiki/Military_Grid_Reference_System
        • https://en.wikipedia.org/wiki/Open_Location_Code
        • https://en.wikipedia.org/wiki/World_Geodetic_System
        • https://what3words.com
        • https://www.geocaching.com
        • https://macwright.com/lonlat: \"Geospatial software has a fundamental inconsistency: which order we put longitude and latitude in.\"
        • https://platform.leolabs.space/visualization: Live visualization of low earth orbit traffic, including satellites, rockets and debris.
        • https://satellitemap.space: Live visualization of a few different types of satellites like Starlink, GPS, Oneweb
        • https://en.wikipedia.org/wiki/Dilution_of_precision_(navigation)
        • https://healpix.jpl.nasa.gov: \"Hierarchical Equal Area isoLatitude Pixelization of a sphere\"
        • https://hivekit.io/blog/the-headaches-of-distributed-spatial-indices: \"Hexagons and Hilbert Curves - The Headaches of Distributed Spatial Indices\"
        • https://app.regrid.com/us: Property lines
        • https://shademap.app: Visualize shadows from trees and landscape for any given time and place
        "},{"location":"geodata/#hexagonal-geodata","title":"Hexagonal geodata","text":"
        • https://www.redblobgames.com/grids/hexagons/: Lots of interactive learning fun with hexagons. Really great resource for playing with hexagon theory.
        • https://www.uber.com/blog/h3/: Uber\u2019s Hexagonal Hierarchical Spatial Index
        • https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-statistics/h-whyhexagons.htm: short explanation of the benefits of a hexagonal geo datum
        • https://www.gamedev.net/articles/programming/general-and-gameplay-programming/coordinates-in-hexagon-based-tile-maps-r1800/: interesting theory for addressing hexagonal space in games
        • https://james.darpinian.com/satellites \"See a satellite tonight. No telescope required\"
        "},{"location":"gige-vision/","title":"GigE Vision","text":"

        \"GigE Vision is an interface standard introduced in 2006 for high-performance industrial cameras. It provides a framework for transmitting high-speed video and related control data over Ethernet networks. The distribution of software or development, manufacture or sale of hardware that implement the standard, require the payment of annual licensing fees.\" - https://en.wikipedia.org/wiki/GigE_Vision

        "},{"location":"gige-vision/#technology","title":"Technology","text":"
        • GigE Vision Control Protocol (GVCP)
        • GigE Vision Stream Protocol (GVSP)
        • GigE Device Discovery Mechanism
        "},{"location":"gige-vision/#see-also","title":"See also","text":"
        • http://www.emva.org/standards-technology/genicam/
        "},{"location":"git/","title":"Git","text":"

        baddass version control

        "},{"location":"git/#links","title":"Links","text":"
        • https://git-scm.com/book/
        • https://docs.github.com/en/get-started/using-github/github-flow
        • https://github.com/metacloud/gilt: gilt - A GIT layering tool
        • https://github.com/git/git/tree/master/Documentation/RelNotes: Git release notes
        • https://guides.github.com/introduction/flow/index.html: Understanding the GitHub flow
        • http://nvie.com/posts/a-successful-git-branching-model: A successful Git branching model
        • https://chris.beams.io/posts/git-commit: How to Write a Git Commit Message
        • https://www.conventionalcommits.org: Conventional Commits: A specification for adding human and machine readable meaning to commit messages
        • https://github.com/googleapis/release-please: Release Please automates CHANGELOG generation, the creation of GitHub releases, and version bumps for your projects.
        • https://diziet.dreamwidth.org/14666.html: Never use git submodules
        • https://forgejo.org: git hosting software with organization and user management in a web UI
        • https://github.blog/2023-10-16-measuring-git-performance-with-opentelemetry
        • https://glasskube.dev/guides/git/: \"The guide to Git I never had.\"
        • https://pre-commit.com: I use this in nearly every git repo I create, and I suggest everybody else do the same.
        • https://stefaniemolin.com/tags/pre-commit%20hooks/: Stefanie Molin's pre-commit articles
        • https://www.golinuxcloud.com/git-head-caret-vs-tilde-at-sign-examples: \"Understanding git HEAD~ vs HEAD^ vs HEAD@{}\"
        • https://blog.izissise.net/posts/gitconfig
        "},{"location":"git/#examples","title":"Examples","text":""},{"location":"git/#git-init","title":"git init","text":""},{"location":"git/#create-a-git-repository-for-the-cwd","title":"Create a git repository for the CWD","text":"
        git init\necho \"\" >> .gitignore\necho \"# Ignore other unneeded files.\n*.swp\n*~\n.DS_Store\" >> .gitignore\n
        "},{"location":"git/#git-clone","title":"git clone","text":""},{"location":"git/#clone-a-local-repo","title":"Clone a local repo","text":"
        git clone /path/to/repo\n
        "},{"location":"git/#clone-a-remote-git-repo-via-ssh","title":"Clone a remote git repo via ssh","text":"
        git clone user@ssh_server:/opt/git/project\n
        "},{"location":"git/#clone-and-specify-a-key-when-using-ssh-agent","title":"Clone and specify a key when using ssh-agent","text":"

        When using ssh-agent, if you have several keys loaded this can present problems. One of those problems is if you have multiple keys loaded that have different authorizations on a remote git server like Github. For instance, say you have ~/.ssh/id_ed25519_home as your personal private key and ~/.ssh/id_ed225519_work as your private work key. If you try to clone a work repo and git tries to authenticate with your home identity first, it will be unauthorized and the clone will fail, even though you have a second identity loaded that could have succeeded. To work around this, do something like:

        export GIT_SSH_COMMAND=\"ssh -o IdentitiesOnly=yes -i $HOME/.ssh/id_ed225519_work\"\ngit clone git@github.com:your-work-gh-org/super-secret-repo.git\n

        This works fine for one key having authorization to all private repos. This becomes difficult when you have multiple repos, each with a key that only has authorization for that single repo, such as using deploy keys tied to a single repo. If you try that when doing something like yarn install, which is trying to clone multiple private repos, it will fail 100% of the time. In that case, you can follow the next example. You may have to read it twice, because setting up the configuration makes more sense if you do ssh then git, but logically it makes more sense if you go from git into ssh, which is how the process actually flows.

        Construct an ssh_config file that configures a unique Host for each private repo you need to clone and private key file that has access to it:

        Host secret_repo_1\n   Hostname github.com\n   IdentityFile ~/.ssh/private_key_for_repo_1\n\nHost secret_repo_2\n   Hostname github.com\n   IdentityFile ~/.ssh/private_key_for_repo_2\n\nIdentitiesOnly yes\n

        The ssh_configs above allow ssh access to what is essentially a host alias where the key that is authorized for the repo is used when ssh tries to connect to the correlated Host entry. In the next step, we do the same thing in reverse by crafting a gitconfig file with one stanza for each of the ssh Host entries from your ssh_config, pointing it back to github:

        [url \"git@secret_repo_1:your-work-gh-org/super-secret-repo-1.git\"]\n    insteadOf = git@github.com:your-work-gh-org/super-secret-repo-1.git\n\n[url \"git@secret_repo_2:your-work-gh-org/super-secret-repo-2.git\"]\n    insteadOf = git@github.com:your-work-gh-org/super-secret-repo-2.git\n

        We then export two variables in the shell:

        export GIT_SSH_COMMAND=\"ssh -F $PWD/your_crafted_ssh_config_file\"\nexport GIT_CONFIG_GLOBAL=\"$PWD/your_crafted_gitconfig_file\"\n

        What happens next is when you execute git clone git@github.com:your-work-gh-org/super-secret-repo-1.git, which is the originally source git URL, your git_config alters the URL to be git@secret_repo_1:your-work-gh-org/super-secret-repo-1.git. The server name is passed into ssh, which uses your custom ssh_config file to connect to github.com using the options for that entry that are specified in the git_config, which includes using the identity file that is unique to that git repository. The same series of steps happens for secret_repo_2. The result is that each of these git repositories can be cloned using their original github URL, but these custom ssh configs are used in the process, which allows the right authentication mechanisms to be used for each individual git repository. This all happens without us having to alter the source code of the repo we are building, EG: without modifying package.json which is used by yarn. Using these techniques, we can set up CI to build software from private repositories using deploy keys where otherwise we would be unable to due to ssh authentication errors that might work fine for somebody who has a single ssh key that is authorized to clone all of the repositories.

        "},{"location":"git/#git-filesystem-operations","title":"git filesystem operations","text":""},{"location":"git/#add-everything-in-the-cwd-to-the-git-repo","title":"Add everything in the CWD to the git repo","text":"
        git add .\n
        "},{"location":"git/#rename-a-file-in-the-git-repo","title":"Rename a file in the git repo","text":"

        This also renames the filesystem file.

        git mv README.rdoc README.md\n
        "},{"location":"git/#delete-a-file-from-the-repo","title":"Delete a file from the repo","text":"
        git rm filename\n
        "},{"location":"git/#git-status","title":"git status","text":""},{"location":"git/#check-the-status-of-git","title":"Check the status of git","text":"
        git status\n
        "},{"location":"git/#git-commit","title":"git commit","text":""},{"location":"git/#commit-the-current-changes","title":"Commit the current changes","text":"
        git commit -m \"Initial commit\"\n
        "},{"location":"git/#commit-all-changes-with-commit-a","title":"Commit all changes with commit -a","text":"
        git commit -a -m \"Improve the README file\"\n
        "},{"location":"git/#skip-git-commit-hooks","title":"Skip git commit hooks","text":"
        git commit --no-verify\n
        "},{"location":"git/#git-tag","title":"git tag","text":"

        https://git-scm.com/book/en/v2/Git-Basics-Tagging

        Git supports two types of tags: lightweight and annotated.

        "},{"location":"git/#create-an-annotated-tag","title":"Create an annotated tag","text":"

        Annotated tags, are stored as full objects in the Git database.

        git tag -m \"Improve X and Y.\" v0.5.3\n
        "},{"location":"git/#create-a-light-tag","title":"Create a light tag","text":"

        This is basically DNS A records for git SHAs. The SHA is referenced by the tag, no other info is stored. Using them is generally frowned upon because tags tend to be used where context is important, so the annotations that go with an annotated tag are more suitable.

        git tag v0.5.3\n
        "},{"location":"git/#delete-a-local-tag","title":"Delete a local tag","text":"
        git tag -d v0.5.3\n
        "},{"location":"git/#delete-a-remote-tag","title":"Delete a remote tag","text":"
        git push --delete origin v0.5.3\n
        "},{"location":"git/#show-what-tags-contain-a-given-sha","title":"Show what tags contain a given sha","text":"
        git tag --contains abc123\n
        "},{"location":"git/#git-config","title":"git config","text":"

        git config interacts with configs. There are three scopes: --local, --global, --system.

        • Local = per-repo settings. IE: stored in .git/config directory for the repo
        • Global = per-user settings. IE: stored in ~/.gitconfig
        • System = per-system settings, found in /etc/ or wherever git is looking for system settings.
        "},{"location":"git/#always-use-ssh-for-githubcom","title":"Always use ssh for github.com","text":"
        git config --global url.\"git@github.com:\".insteadOf \"https://github.com/\"\n
        "},{"location":"git/#git-client-setup","title":"Git client setup","text":"

        This creates and modifies ~/.gitconfig with some parameters:

        git config --global user.name \"Daniel Hoherd\"\ngit config --global user.email daniel.hoherd@gmail.com\ngit config --global alias.co checkout\ngit config --global core.editor \"vim\"\ngit config --global merge.tool vimdiff\ngit config --global log.date iso\n
        "},{"location":"git/#edit-a-gitconfig-file-with-some-params","title":"Edit a .git/config file with some params","text":"
        git config --replace-all svn-remote.svn.url https://svn.example.com/ops/\ngit config --replace-all svn-remote.svn.fetch ops:refs/remotes/trunk\ngit config --add svn-remote.svn.preserve-empty-dirs true\ngit config --unset svn-remote.svn.branches\ngit config --unset svn-remote.svn.tags\ngit config --add svn.authorsfile /srv-cluster/git-svn/git/author.txt\n
        "},{"location":"git/#show-your-configs-in-a-dotted-one-one-per-option-format","title":"Show your configs in a dotted one-one-per-option format","text":"
        git config --list\n

        Also include the file that each configuration setting is defined in:

        git config --list --show-origin\n
        "},{"location":"git/#git-diff","title":"git diff","text":"

        Show differences between objects and stuff.

        "},{"location":"git/#diff-between-staged-and-committed","title":"diff between staged and committed","text":"

        This is useful when you're adding files that were not previously in the repo alongside changes to existing files, since a bare git diff before adding the files will only show changes to files that were already in the repo.

        git diff --staged\n
        "},{"location":"git/#diff-that-shows-per-word-colored-differences","title":"diff that shows per-word colored differences","text":"
        git diff --color-words\n
        "},{"location":"git/#machine-readable-word-diff","title":"Machine readable word diff","text":"
        git diff --word-diff\n
        "},{"location":"git/#diff-and-ignore-whitespace","title":"Diff and ignore whitespace","text":"

        This does not ignore line ending changes or blank line insertion and removals.

        git diff -w\n
        "},{"location":"git/#show-diffs-between-master-and-a-given-date","title":"Show diffs between master and a given date","text":"
        git diff $(git rev-list -n1 --before=\"1 month ago\" master)\n
        "},{"location":"git/#show-what-has-changed-since-a-point-in-time","title":"Show what has changed since a point in time","text":"
        git whatchanged --since=\"18 hours ago\" -p\n

        or...

        git whatchanged --since=\"18 hours ago\" --until=\"6 hours ago\" -p\n
        "},{"location":"git/#git-blame","title":"git blame","text":"

        git blame shows information about the commit associated with each line of a file.

        "},{"location":"git/#simple-usage","title":"Simple usage","text":"
        git blame <filename>\n
        "},{"location":"git/#show-non-whitespace-changes-in-blame","title":"Show non-whitespace changes in blame","text":"

        When somebody has reformatted code but didn't make any code changes, this will show the prior commits where something more than whitespace changed.

        git blame -w <filename>\n
        "},{"location":"git/#git-log","title":"git log","text":"

        Shows commit history.

        "},{"location":"git/#view-the-commit-history","title":"View the commit history","text":"
        git log\n
        "},{"location":"git/#show-one-log-entry","title":"Show one log entry","text":"
        git log -1\n
        "},{"location":"git/#show-git-commits-that-contain-a-given-string","title":"Show git commits that contain a given string","text":"

        This searches the content of the diff, not the commit message.

        git log -S search_string\n
        "},{"location":"git/#show-commit-messages-that-match-a-given-regex","title":"Show commit messages that match a given regex","text":"
        git log --grep='[Ww]hitespace'\n
        "},{"location":"git/#show-logs-for-a-given-dir-in-the-last-3-days","title":"Show logs for a given dir in the last 3 days","text":"
        git log --since=3.days modules/profile_sensu\n
        "},{"location":"git/#show-raw-log-history-for-5-most-recent-commits","title":"Show raw log history for 5 most recent commits","text":"

        Useful for seeing TZ settings.

        git log --format=raw -5\n
        "},{"location":"git/#really-pretty-logs","title":"Really pretty logs","text":"
        log --graph --oneline --decorate --all\n
        "},{"location":"git/#git-shortlog","title":"git shortlog","text":""},{"location":"git/#show-number-of-commits-by-user-including-e-mail","title":"Show number of commits by user, including e-mail","text":"

        Using the -e flag includes e-mail address. The list is unique per entry, so if you use a different name along with the same e-mail address, that shows up as two entries in the list.

        git shortlog -ens\n

        Keep in mind this is commits, not lines within the current codebase. If the repo is old, this information may not be useful for finding people who are in-the-know about the current contents of the repo. This is useful for preparing a user list for a git filter-repo operation.

        "},{"location":"git/#git-show","title":"git show","text":""},{"location":"git/#show-the-changes-from-a-specific-sha","title":"Show the changes from a specific SHA","text":"
        git show f73f9ec7c07e\n
        "},{"location":"git/#show-a-complete-file-as-of-a-given-sha","title":"Show a complete file as of a given SHA","text":"

        This is an absolute path from the git root, not relative to CWD. This command will show the whole file as of the given SHA.

        git show f73f9ec7c07e:dir/filename.yaml\n
        "},{"location":"git/#git-branches","title":"git branches","text":"

        Branches are an integral part of git. They allow you to work on distinct changes without mixing them all up together.

        "},{"location":"git/#create-a-branch","title":"Create a branch","text":"
        git checkout -b readme-fix\n
        "},{"location":"git/#check-which-branch-youre-in","title":"Check which branch you're in","text":"
        git branch\n
        "},{"location":"git/#rename-move-a-branch","title":"Rename (move) a branch","text":"
        git branch -m oldname newname\n
        "},{"location":"git/#show-what-branches-contain-a-given-sha","title":"Show what branches contain a given sha","text":"
        git branch --contains abc123\n
        "},{"location":"git/#git-merge","title":"git merge","text":"

        This lets you merge two branches.

        "},{"location":"git/#merge-branch-with-master","title":"Merge branch with master","text":"
        git checkout master\ngit merge readme-fix-branch\ngit branch -d readme-fix-branch\n
        "},{"location":"git/#disable-fast-forward-merges","title":"disable fast-forward merges","text":"

        You can control how the history is kept when merging. By default, fast-forward merges occur, which replays the commits on the branch that is being merged into. By disabling this you can see several commits being merged from one branch into another, making it easier to roll back that whole series of commits without digging through the history to see where each commit from the branch came from.

        git config --global merge.ff false\n
        "},{"location":"git/#git-filter-repo","title":"git filter-repo","text":"

        git filter-repo is not a standard tool with git, but can be installed separately. It is used for rewriting the history of the git repo, allowing move, rename, merging and trimming operations, rewriting commit IDs, and more. This is a destructive tool, so it should be performed on a fresh clone of the repo while you iterate on finding the right sequence of operations to perform to get to your desired state. The destructive nature is that it rewrites the entire history of the repo, so if your repo depends on specific SHAs or any other specific history, you probably need to take a harder look at how to solve your problem.

        "},{"location":"git/#extract-one-subdir-into-its-own-repo-renaming-some-files","title":"Extract one subdir into its own repo, renaming some files","text":"

        This is great if you want to extract part of a repo for public release, or just for organizational purposes.

        Extract the path scratch-work/scripts into bin, removing all other history in the repo.

        git-filter-repo --path scratch-work/scripts --path-rename scratch-work/scripts:bin\n
        "},{"location":"git/#remotes","title":"remotes","text":""},{"location":"git/#add-a-remote","title":"Add a remote","text":"
        git remote add upstream https://github.com/danielhoherd/homepass\n
        "},{"location":"git/#push-to-a-specific-remote","title":"Push to a specific remote","text":"
        # push to the master branch of the remote named upstream\ngit push upstream master\n
        "},{"location":"git/#alter-the-source-of-origin","title":"Alter the source of origin","text":"

        If you move your repo to another location, use this command to change the upstream URL:

        git remote set-url origin https://user@newhost/newpath/reponame\n
        "},{"location":"git/#git-reset","title":"git reset","text":"

        git reset allows you to reset your state to what it was at a previous point.

        "},{"location":"git/#reset-to-a-prior-state-based-on-what-has-been-done-locally","title":"Reset to a prior state based on what has been done locally","text":"

        The reflog is a log of what steps have been performed locally. You can view the reflog, then reset to a prior state.

        git reflog # show all HEAD changes\ngit reset --hard 45e0ae5 # reset all git tracked state to 45e0ae5\n

        Alternately, you can use a date:

        git reflog --date=iso # absolute date based reflog references\ngit reset \"HEAD@{2015-03-25 14:45:30 -0700}\" --hard\n
        "},{"location":"git/#reset-feature-branch-to-state-when-it-was-branched-from-master","title":"Reset feature branch to state when it was branched from master","text":"

        Do this if you want to start your branch over with only the current changes. This is useful if you've been iterating through lots of bad changes that were committed and want to clean them all out. It basically lets you squash to a single commit on your branch.

        git reset $(git merge-base master $(git rev-parse --abbrev-ref HEAD))\n
        "},{"location":"git/#hard-reset-of-local-changes","title":"Hard reset of local changes","text":"

        This will abandon all local changes and resolve merge conflicts

        git fetch origin\ngit reset --hard origin/master\n
        "},{"location":"git/#git-clean","title":"git clean","text":""},{"location":"git/#remove-all-untracked-files-and-directories","title":"Remove all untracked files and directories","text":"

        This is useful after your reset to a prior state. It deletes all files and directories that show up in the untracked section of git status

        git clean -ffdx\n
        "},{"location":"git/#miscellaneous-tricks","title":"Miscellaneous tricks","text":""},{"location":"git/#refresh-all-git-repos-in-a-path","title":"Refresh all Git repos in a path","text":"
        find /var/www/html/mediawiki/ -name .git | while read -r X ; do\n  pushd \"$(dirname \"${X}\")\" && \\\n  [ $(git remote -v | wc -l) -gt 0 ] && \\\n  git pull && \\\n  popd ;\ndone ;\n
        "},{"location":"git/#show-a-numbered-list-of-remote-branches-sorted-by-last-commit-date","title":"Show a numbered list of remote branches sorted by last commit date","text":"
        git branch -r | grep -v HEAD | xargs -r -n1 git log -1 \\\n--pretty=format:'%ad %h%d %an | %s %n' --date=iso -1 | sort | nl -ba\n
        "},{"location":"git/#branch-cleanup","title":"Branch cleanup","text":"
        git gc --prune=now\ngit remote prune origin\n
        "},{"location":"git/#git-grep","title":"git grep","text":""},{"location":"git/#find-a-string-in-all-branches","title":"Find a string in all branches","text":"

        This finds the word \"hunter2\" in the tests directory of all branches.

        git grep '\\bhunter2\\b' $(git branch -a --format='%(refname:short)') tests/\n
        "},{"location":"git/#exclude-certain-directories-from-git-grep-results","title":"Exclude certain directories from git grep results","text":"

        You can accomplish this using pathspec syntax.

        git grep searchstring -- ':!excluded-dir' ':!*junk-glob*'\n

        See the pathspec definition in the git glossary for more info.

        "},{"location":"github/","title":"Github","text":"

        \"GitHub is a development platform inspired by the way you work. From open source to business, you can host and review code, manage projects, and build software alongside 50 million developers.\" - https://github.com

        \"GitHub, Inc. is a provider of Internet hosting for software development and version control using Git. It offers the distributed version control and source code management (SCM) functionality of Git, plus its own features. It provides access control and several collaboration features such as bug tracking, feature requests, task management, continuous integration and wikis for every project. Headquartered in California, it has been a subsidiary of Microsoft since 2018.\" - https://en.wikipedia.org/wiki/GitHub

        "},{"location":"github/#tips","title":"Tips","text":""},{"location":"github/#get-all-public-keys-for-a-user","title":"Get all public keys for a user","text":"

        Append .keys to the user profile URL, so https://github.com/danielhoherd becomes https://github.com/danielhoherd.keys. This is useful for adding to ~/.ssh/authorized_keys

        "},{"location":"github/#get-a-downloadable-patch-for-a-git-commit","title":"Get a downloadable patch for a git commit","text":"

        Append .patch to a commit URL, so https://github.com/apache/airflow/commit/86e2ab53aff becomes https://github.com/apache/airflow/commit/86e2ab53aff.patch

        "},{"location":"github/#get-a-list-of-repositories-for-a-user","title":"Get a list of repositories for a user","text":"

        This gives a list of repositories sorted by the last time they were pushed to.

        curl -s https://api.github.com/users/danielhoherd/repos?per_page=200 |\njq -r '.[] | \"\\(.pushed_at) \\(.html_url)\"' |\nsort -d |\nnl\n

        https://developer.github.com/v3/repos/

        "},{"location":"github/#add-a-collapsible-section-in-markdown","title":"Add a collapsible section in markdown","text":"

        Markdown supports adding HTML elements. One element that can be useful for hiding large chunks of data that are related to a comment but might drown it out is the <details> element. This works in markdown documents, and PR and issue descriptions and comments.

        <details>\n<summary>Clickable thing that unfolds the details</summary>\nwhatever markdown you want\n\n1. list item 1\n2. list item 2\n\n```py\nimport platform\nplatform.uname()\n```\n\n</details>\n
        "},{"location":"github/#show-your-api-rate-limits","title":"Show your API rate limits","text":"
        gh api rate_limit\n
        "},{"location":"github/#links","title":"Links","text":"
        • CLI interface: https://github.com/cli/cli
        • Python API: https://github.com/PyGithub/PyGithub
        • All issues in all repositories assigned to the logged in user
        • All issues in all repositories assigned to the logged in user that are not in a project
        "},{"location":"github/#get-a-json-file-of-the-last-2000-issues-in-a-repository","title":"Get a json file of the last 2000 issues in a repository","text":"

        Using the gh CLI:

        gh issue list --state all --limit 2000 --json author,createdAt,title > issues.json\n
        "},{"location":"gitlab/","title":"Gitlab","text":"

        \"A single application for the complete DevOps lifecycle\" - https://about.gitlab.com

        "},{"location":"gitlab/#examples","title":"Examples","text":""},{"location":"gitlab/#gitlab-cli","title":"Gitlab CLI","text":"

        First pip3 install --user python-gitlab, then add a ~/.python-gitlab.cfg file like:

        [global]\ndefault = default\nssl_verify = true\ntimeout = 5\n\n[default]\nurl = https://www.gitlab.com/\nprivate_token = <some_value>\n

        Where private_token is generated in https://gitlab.com/profile/personal_access_tokens, after which, you can do things like:

        $ gitlab -o yaml -f name_with_namespace,web_url project list --owned=1 --per-page=1\n- name_with_namespace: org-name / sub-group / project-name\n  web_url: https://gitlab.com/orgname/subgroup/projectname\n
        "},{"location":"gitlab/#run-ci-steps-locally","title":"Run CI steps locally","text":"

        Using Gitlab Runner you can run stages of your CI pipeline locally. EG: if you have docker installed, you can run the following command to run the build step of your pipeline:

        gitlab-runner exec docker build\n

        The gitlab-runner command has good inline options.

        "},{"location":"gitlab/#skip-ci-via-git-push-option","title":"Skip CI via git push option","text":"
        git push -o ci.skip\n
        "},{"location":"gitlab/#skip-ci-via-commit-message","title":"Skip CI via commit message","text":"

        https://docs.gitlab.com/ce/ci/yaml/#skipping-jobs

        ##!/usr/bin/env bash\n## Skip CI if all changed files are inside dir \"foo/\"\n\nset -x\nregex='^foo\\/'\n\nfiles=( $(git diff --cached --name-only --diff-filter=ACM ) )\nfor X in \"${files[@]}\" ; do\n  # If any file is not inside dir foo exit 0\n  if [[ ! \"$X\" =~ $regex ]] ; then\n    exit 0\n  fi\ndone\n\n## If we've made it here, all changed files are inside dir foo/\n## so we append '[no ci]' to commit message to skip CI in Gitlab\necho \"[no ci]\" >> \"$1\"\n
        "},{"location":"gitlab/#run-privileged-mode-gitlab-runners-in-gke","title":"Run privileged mode gitlab-runners in GKE","text":"

        https://docs.gitlab.com/runner/install/kubernetes.html#installing-gitlab-runner-using-the-helm-chart

        First init the gitlab chart repo:

        helm repo add gitlab https://charts.gitlab.io\n

        Privileged mode is needed to run docker commands, which is useful for building containers, running ansible molecule, etc..

        The runners.tags includes ${ORG_NAME} which is great for making sure jobs run on your own runners instead of publicly shared runners. This is important because DOCKER_HOST is different in Kubernetes than it is on public dind runners.

        export REGISTRATION_TOKEN=\"foobar\"\nexport ORG_NAME=\"acme\"\nhelm \\\n  install gitlab/gitlab-runner \\\n  --name \"gitlab-runner-${ORG_NAME}-$(date +%s)\" \\\n  --set \"concurrent=20\" \\\n  --set \"gitlabUrl=https://gitlab.com/\" \\\n  --set \"runnerRegistrationToken=${REGISTRATION_TOKEN}\" \\\n  --set \"runners.builds.cpuRequests=1\" \\\n  --set \"runners.env.DOCKER_HOST=tcp://localhost:2375/\" \\\n  --set \"runners.env.DOCKER_TLS_CERTDIR=\" \\\n  --set \"runners.imagePullPolicy=always\" \\\n  --set \"runners.privileged=true\" \\\n  --set \"runners.request_concurrency=10\" \\\n  --set \"runners.tags=${ORG_NAME}\\,dind\\,gke\"\n

        the runners.privileged=true is the magic that is needed to enable docker commands in your .gitlab-ci.yml files to succeed. this --set flag creates the pod environment:

        spec:\n  containers:\n    env:\n    - name: KUBERNETES_PRIVILEGED\n      value: \"true\"\n

        runners.env.DOCKER_TLS_CERTDIR= is required to fix the changes with Docker 19.03 outlined in https://about.gitlab.com/2019/07/31/docker-in-docker-with-docker-19-dot-03/ and https://gitlab.com/gitlab-org/gitlab-ce/issues/64959

        See more variables that you can set by running helm inspect gitlab/gitlab-runner

        "},{"location":"gitlab/#use-helm-from-within-the-tiller-pod","title":"Use helm from within the tiller pod","text":"

        In Gitlab managed k8s clusters there are some TLS hurdles to jump over to get access to Helm:

        kubectl exec -ti -n gitlab-managed-apps $(kubectl get pods -n gitlab-managed-apps -l app=helm,name=tiller -o name) sh\nexport HELM_HOST=:44134\nexport HELM_TLS_CA_CERT=/etc/certs/ca.crt\nexport HELM_TLS_CERT=/etc/certs/tls.crt\nexport HELM_TLS_KEY=/etc/certs/tls.key\nexport HELM_TLS_ENABLE=true\n/helm list\n
        "},{"location":"gitlab/#show-kubernetes-gitlab-runner-pods-their-age-their-job-url-and-who-started-the-job","title":"Show kubernetes gitlab runner pods, their age, their job URL, and who started the job","text":"
        kubectl get pods -o custom-columns='NAME:.metadata.name,START_TIME:.status.startTime,CI_JOB_URL:.spec.containers[0].env[?(@.name == \"CI_JOB_URL\")].value,GITLAB_USER_EMAIL:.spec.containers[0].env[?(@.name == \"GITLAB_USER_EMAIL\")].value' | grep -E 'NAME|jobs'\n

        The output of the above command looks like

        NAME                                                 START_TIME             CI_JOB_URL                                    GITLAB_USER_EMAIL\nrunner-ppzmy1zx-project-11144552-concurrent-0q2pmk   2019-10-23T17:00:56Z   https://gitlab.com/foo/bar/-/jobs/330824976   user2@example.com\nrunner-ppzmy1zx-project-11144552-concurrent-1f7nfx   2019-10-23T17:04:27Z   https://gitlab.com/foo/bar/-/jobs/330827586   user1@example.com\nrunner-ppzmy1zx-project-11144552-concurrent-2n84rv   2019-10-23T17:04:19Z   https://gitlab.com/foo/bar/-/jobs/330827587   user1@example.com\n
        "},{"location":"gitlab/#find-k8s-gitlab-runner-pods-that-are-over-1h-old","title":"Find k8s gitlab-runner pods that are over 1h old","text":"
        kubectl get pods --no-headers=true -o custom-columns=\"NAME:.metadata.name,START_TIME:.status.startTime\" |\ngrep '^runner-' |\nwhile read -r pod starttime ; do\n  if [ \"$(( $(date +%s) - $(gdate -d \"$starttime\" \"+%s\") ))\" -gt 3600 ] ; then\n    echo \"$pod\"\n  fi\ndone\n
        "},{"location":"gitlab/#host-a-private-gitlab-pages-site","title":"Host a private gitlab pages site","text":"

        This does not appear to be a documented feature, but it is quite useful. You can host a website with a static address that tracks any given branch. Normal gitlab pages export a public facing website, but this is essentially a private gitlab pages site.

        ## .gitlab-ci.yml\ndocs:\n  stage: test\n  script:\n    - mkdocs build\n\n  artifacts:\n    paths:\n      - site\n

        Then hit https://gitlab.com/${ORG}/${GROUP}/${PROJECT}/-/jobs/artifacts/master/file/site/index.html?job=docs in your browser. You will be able to browse the built website, but only if you have access to the repository.

        "},{"location":"gitlab/#pros-and-cons","title":"Pros and cons","text":""},{"location":"gitlab/#pros","title":"Pros","text":"
        • You can create a new repo by locally initializing a git repo, setting a remote_url to where you want your project to be, and pushing your code. The server gives you a notification that the project has been created and gives you a URL to it.
        • Built in docker registry for every project
        • Built in CI with on-prem runners
        "},{"location":"gitlab/#cons","title":"Cons","text":"
        • Push-button GKE is configured at the project level, not the group level, so setting up k8s runners is more involved than it could be.
        • User permissions do not have a distinct group entity, they are managed by creating a project sub-group which functions as both a place to put code and a permission level. This shows up in a variety of places, and I suspect is the reason we cannot export groups over SAML.
        • There is no command line tool equivalent to the hub command that github has, which makes it easier to script pull requests etc..
        • Terraform provider for Gitlab is pretty limited compared to Github.
        "},{"location":"gitlab/#links","title":"Links","text":"
        • https://docs.gitlab.com/ce/administration/
        • https://docs.gitlab.com/ce/ci/
        • https://docs.gitlab.com/ce/workflow/gitlab_flow.html - Good git branching and review strategy for teams.
        • https://docs.gitlab.com/ee/ci/docker/using_docker_build.html
        • https://docs.gitlab.com/ee/ci/multi_project_pipelines.html
        • https://docs.gitlab.com/ee/ci/variables/
        • https://medium.com/devopslinks/gitlab-pipeline-to-run-cross-multiple-projects-3563af5d6dca
        • https://gitlab.com/help/user/project/clusters/index.md Lots of info about configuring k8s clusters. (This document does not go into enough detail about using helm with TLS secured Gitlab apps.)
        "},{"location":"gitolite/","title":"gitolite","text":"

        \"Gitolite allows you to setup git hosting on a central server, with fine-grained access control and many more powerful features.\" - http://gitolite.com

        "},{"location":"gitolite/#examples","title":"Examples","text":""},{"location":"gitolite/#get-info-about-available-repositories","title":"Get info about available repositories","text":"
        ssh git@gitserver info\n
        "},{"location":"gnu-screen/","title":"GNU Screen","text":"

        GNU screen is a terminal multiplexer, which is a CLI tool that enables virtual terminals which you can attach and detach from, allowing you to leave commands running on the server when you log out. You can log back in and reattach to the screen session and it appears that you are right back at the original terminal.

        See also tmux, which is a more modern replacement. If you haven't used tmux or screen before, use tmux unless you need a feature that screen has but tmux does not.

        "},{"location":"gnu-screen/#examples","title":"Examples","text":"

        These all assume that your config has ctrl-a set up as the the command character, which is the default.

        "},{"location":"gnu-screen/#rename-a-screen-tab","title":"Rename a screen tab","text":"
        ctrl-a shift-a\n
        "},{"location":"gnu-screen/#show-a-list-of-sessions","title":"Show a list of sessions","text":"
        ctrl-a \"\n
        "},{"location":"gnu-screen/#detach-from-a-screen-session","title":"Detach from a screen session","text":"
        ctrl-a d\n
        "},{"location":"gnu-screen/#re-attach-to-a-specific-screen-session","title":"re-attach to a specific screen session","text":"
        screen -x \"$screen_session_id\"\n
        "},{"location":"gnu-screen/#sharing-your-screen","title":"Sharing your screen","text":"

        In order to share your screen the /usr/bin/screen binary needs to be suid, which is a significant security risk.

        ctrl-a :multiuser on\nctrl-a :acladd [username]\n
        "},{"location":"gnu-screen/#open-three-ipmi-consoles","title":"Open three IPMI consoles","text":"

        This snip opens several new tabs, each with a name and a start command including the name.

        for host in app{213..215}prod ; do\n    screen -t \"${host}\" consoleto \"${host}\"\ndone\n
        "},{"location":"gnu-screen/#open-a-series-of-new-tabs-and-run-ssh-as-root","title":"Open a series of new tabs and run ssh as root","text":"

        For some reason screen doesn't like the ssh user@host syntax, so use ssh -l

        for host in app{215..222}prod ; do\n    screen -t \"${host}\" ssh -l root \"${X}\" puppetd -ov\ndone\n
        "},{"location":"gnu-screen/#terminal-emulation-for-serial-ports","title":"Terminal Emulation for Serial Ports","text":"

        You must first figure out the name of the device that is connecting to your serial port, such as a USB adapter. Then use syntax such as the following:

        screen /dev/tty.usbDeviceName 9600\n
        "},{"location":"gnu-screen/#split-screen","title":"Split Screen","text":"

        ctrl-a S to create a split screen, ctrl-a [tab] to switch between. The splits are destroyed when re-attaching.

        "},{"location":"gnu-screen/#screenrc","title":".screenrc","text":"

        Using ~/.screenrc you can define many variables to customize the look of your screen tool, including tabs, clock and colors. Here's an example that gives all three:

        caption always \"%{Mk}%?%-Lw%?%{km}[%n*%f %t]%?(%u)%?%{mk}%?%+Lw%? %{mk}\"\nhardstatus alwayslastline \"%{kW}%H %{kB}|%{km} %l ~ %=%{km}%c:%s %D %M/%d/%Y \"\n
        "},{"location":"gnu-screen/#bugs","title":"Bugs","text":"

        In Ubuntu with a Mac keyboard connected, sometimes the backspace key functions incorrectly. set TERM=vt100 before running screen to fix this.

        "},{"location":"gnu-screen/#see-also","title":"See also","text":"
        • tmux - similar functionality, way more resource efficient and more widely used.
        • dvtm - similar functionality.
        • reptyr - Takes over a pty, useful for moving a pid running outside of screen to running within screen.
        "},{"location":"golang/","title":"golang","text":"

        \"Go is an open source programming language that makes it easy to build simple, reliable, and efficient software.\" - https://golang.org/

        Golang keeps to a 6-month minor release cadence. See https://golang.org/doc/devel/release.html

        "},{"location":"golang/#tips-and-examples","title":"Tips and examples","text":"
        • Default GOPATH is ${HOME}/go on unix systems.
        • See a bunch of other defaults with go env
        "},{"location":"golang/#view-default-go-environment","title":"View default go environment","text":"
        go env\n
        "},{"location":"golang/#build-code-found-on-github","title":"Build code found on github","text":"
        go get github.com/solarkennedy/uq\ngo build github.com/solarkennedy/uq\n

        With newer golang versions, you can simply go install github.com/solarkennedy/uq@latest

        "},{"location":"golang/#show-all-modules-used-by-a-golang-project","title":"Show all modules used by a golang project","text":"

        This will list all dependencies recursively in two columns with the library on the left and its direct dependencies on the right.

        cd \"$SOURCE_REPO\"\ngo mod graph\n
        "},{"location":"golang/#links","title":"Links","text":"
        • https://go.dev/learn
        • https://golang.org/cmd/go
        • https://labix.org/gopkg.in
        • https://play.golang.org
        • https://www.programming-books.io/essential/go/
        • https://thewhitetulip.gitbook.io/bo: \"This is an easy to understand example based tutorial aimed at those who know a little of Go and nothing of webdev and want to learn how to write a webserver in Go.\"
        • https://changelog.com/gotime: \"Your source for diverse discussions from around the Go community\"
        • https://www.youtube.com/channel/UC_BzFbxG2za3bp5NRRRXJSw: justforfunc YT series about programming in Go
        • https://algorithmswithgo.com
        • https://gobyexample.com
        • https://go.dev/wiki/
        • https://www.youtube.com/playlist?list=PLoILbKo9rG3skRCj37Kn5Zj803hhiuRK6: Golang class vids
        "},{"location":"google-cloud/","title":"Google Cloud","text":"

        \"Google Cloud SDK is a set of tools that you can use to manage resources and applications hosted on Google Cloud Platform. These include the gcloud, gsutil, and bq command line tools. The gcloud command-line tool is downloaded along with the Cloud SDK\" - https://cloud.google.com/sdk/docs/

        "},{"location":"google-cloud/#links","title":"Links","text":"
        • https://cloud.google.com/compute/docs/ssh-in-browser
        • https://cloud.google.com/container-builder/docs/build-config
        • https://cloud.google.com/container-builder/docs/create-custom-build-steps
        • https://cloud.google.com/container-registry/docs/quickstart
        • https://cloud.google.com/docs
        • https://cloud.google.com/sdk/gcloud/reference
        • https://github.com/GoogleCloudPlatform/cloud-builders
        • https://cloud.google.com/iam/docs/permissions-reference (Large download)
        • https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type
        • https://cloud.google.com/container-optimized-os/docs/how-to/toolbox
        "},{"location":"google-cloud/#gcloud-cli-examples","title":"gcloud CLI Examples","text":""},{"location":"google-cloud/#working-with-gcloud-configurations","title":"Working with gcloud configurations","text":"

        When working with several projects, it's best to use multiple configurations, with one for each project. Run gcloud init to create a new configuration. This interactive prompt will let you re-initialize the default, or create a new named config. Once you have done that, you can run gcloud config configurations list to show what you have configured. To activate a different configuration, run gcloud config configurations activate <new-config-name>.

        $ gcloud config configurations list\nNAME     IS_ACTIVE  ACCOUNT          PROJECT          COMPUTE_DEFAULT_ZONE  COMPUTE_DEFAULT_REGION\ndefault  False      user@example.io  example-prod\nstaging  True       user@example.io  example-staging  us-west4-a            us-west4\n$ gcloud config configurations activate default\nActivated [default].\n

        Alternately, you can specify the config you want to use as an environment variable.

        $ gcloud config configurations list\nNAME     IS_ACTIVE  ACCOUNT                PROJECT                  COMPUTE_DEFAULT_ZONE  COMPUTE_DEFAULT_REGION\ndev      False      danielh@example.com    example-dev-369821\nstaging  True       danielh@example.com    example-staging-550891   us-east4-c            us-east4\n$ CLOUDSDK_ACTIVE_CONFIG_NAME=dev gcloud compute instances list\nNAME                                                 ZONE           MACHINE_TYPE    PREEMPTIBLE  INTERNAL_IP  EXTERNAL_IP     STATUS\ncat-pic-download-1                                   us-central1-a  e2-medium                    10.128.0.2                   TERMINATED\ngibson-access                                        us-east4-a     e2-micro                     10.0.0.69    256.24.927.306  RUNNING\ngke-dev-cluster-terraform-20090525165-d1058ae2-c0mc  us-east4-a     n1-standard-16               10.0.0.13                    RUNNING\ngke-dev-cluster-terraform-20210624231-9d581566-1oo9  us-east4-a     n1-standard-8                10.0.0.56                    RUNNING\n
        "},{"location":"google-cloud/#launch-cloud-shell-from-cli","title":"Launch cloud shell from CLI","text":"

        Google Cloud Shell is a free VM in GCP that you can use to access GCP cli tools, or do whatever else you might want to do in a free VM. The home directory is kept for like 6 months or something, so configuring your shell and storing useful scripts isn't a bad idea.

        gcloud cloud-shell ssh --authorize-session\n
        "},{"location":"google-cloud/#use-vscode-with-cloud-shell","title":"Use vscode with cloud-shell","text":"

        First, make sure your cloud-shell is started by logging into it via gcloud cloud-shell ssh --authorize-session --command=uptime

        Generate an ssh_config entry for your cloudshell:

        gcloud cloud-shell ssh --authorize-session --dry-run |\nwhile read -r _ _ _ port _ _ _ _ creds _ ; do\n  printf \"Host cloud-shell\\n  User %s\\n  Port %s\\n  Hostname %s\\n\" ${creds/@*/} $port ${creds/*@/}\ndone\n

        This will print something like:

        Host cloud-shell\n  User dade_murphy\n  Port 53000\n  Hostname 5.4.3.1\n

        You'll have to put this into your ssh_config somehow. I recommend using Include statement in ~/.ssh/config and having the above command redirect to a file with only that content so it can be updated in-place. Then use vscode like you usually do with remote ssh, or by running

        code --folder-uri \\\nvscode-remote://ssh-remote+cloud-shell/home/dade_murphy/git_workspace/garbage_file_recovery_work\n
        "},{"location":"google-cloud/#list-google-cloud-projects","title":"List google cloud projects","text":"
        gcloud projects list\n
        "},{"location":"google-cloud/#switch-to-a-different-project","title":"Switch to a different project","text":"
        gcloud config set project \"$project_name\"\n
        "},{"location":"google-cloud/#grant-a-user-permission-to-a-docker-registry","title":"Grant a user permission to a docker registry","text":"
        gsutil iam ch 'user:user@example.com:objectViewer' 'gs://artifacts.example.appspot.com/'\n
        "},{"location":"google-cloud/#list-google-compute-zones","title":"List google compute zones","text":"

        gcloud compute zones list

        "},{"location":"google-cloud/#list-compute-nodes","title":"List compute nodes","text":"
        gcloud compute instances list\n
        "},{"location":"google-cloud/#list-all-disks","title":"List all disks","text":"
        gcloud compute disks list\n
        "},{"location":"google-cloud/#generate-ssh-commands-for-all-nodes","title":"Generate ssh commands for all nodes","text":"
        gcloud compute instances list | awk 'NR>1 {printf \"gcloud compute ssh --zone %s %s\\n\", $2, $1;}'\n
        "},{"location":"google-cloud/#ssh-to-a-compute-node","title":"ssh to a compute node","text":"

        This is useful for getting system level access to an EKS node.

        ssh-to-gke-node() {\n  [ \"$#\" -gt 0 ] || { echo \"Usage: ssh-to-gke-node <node_name> [command]\" ; return 1 ; }\n  read -r zone host < <(gcloud compute instances list --filter=\"$1\" | awk 'NR==2 {print $2, $1 ;}') ;\n  shift\n  gcloud compute ssh --tunnel-through-iap --zone \"$zone\" \"$host\" -- \"${@}\" ;\n}\n
        "},{"location":"google-cloud/#loop-through-all-gcloud-instances-and-perform-a-command","title":"Loop through all gcloud instances and perform a command","text":"
        gcloud compute instances list |\nawk 'NR>1 {printf \"gcloud compute ssh --zone %s %s\\n\", $2, $1;}' |\nwhile read -r ssh_cmd ; do\n  $ssh_cmd -- \"docker images\" </dev/null\ndone |\nsort -u\n
        "},{"location":"google-cloud/#create-a-one-off-compute-node","title":"Create a one-off compute node","text":"
        gcloud compute instances create $USER-temp-node --zone=us-west4-a --network-interface=no-address\n

        Leave off the --network-interface=no-address if you want a public IP address.

        "},{"location":"google-cloud/#delete-a-compute-node","title":"Delete a compute node","text":"

        Sometimes autoscalers have a hard time scaling down, requiring manual termination of idle nodes. The following commands are equivalent:

        gcloud compute instances delete \"projects/$project_name/zones/$zone/instances/$node_name\"\ngcloud compute instances delete --project \"$project_name\" --zone=\"$zone\" \"$node_name\"\n

        To connect to VMs that don't have a public ip address, you need to give --tunnel-through-iap on the CLI and also have IAP-secured Tunnel User permission.

        "},{"location":"google-cloud/#add-an-eks-context-to-kubectl","title":"Add an EKS context to kubectl","text":"

        https://cloud.google.com/sdk/gcloud/reference/container/clusters/get-credentials

        This adds the cluster to your .kube/config with authentication done via an access token.

        CLUSTER_NAME='foo-dev'\nPROJECT_NAME='some-project'\nREGION='us-west42'\ngcloud container clusters get-credentials \"${CLUSTER_NAME}\" \\\n  --region \"${REGION}\" \\\n  --project \"${PROJECT_NAME}\"\n
        "},{"location":"google-cloud/#add-separate-kubectl-configs-for-different-eks-clusters","title":"Add Separate kubectl configs for different EKS clusters","text":"

        This keeps each config in a different file, which is useful for requiring explicit enabling of a given environment, vs the normal behavior of inheriting the last used context.

        # Set up individual kube config files for dev, prod and staging\n\nKUBECONFIG=\"$HOME/.kube/foo-dev-config\"\ngcloud container clusters get-credentials dev-cluster --region vn-west4 --project foo-dev\nkubectl config rename-context $(kubectl config current-context) foo-dev\n\nKUBECONFIG=\"$HOME/.kube/foo-prod-config\"\ngcloud container clusters get-credentials prod-cluster --region vn-west4 --project foo-prod\nkubectl config rename-context $(kubectl config current-context) foo-prod\n\nKUBECONFIG=\"$HOME/.kube/foo-staging-config\"\ngcloud container clusters get-credentials staging-cluster --region vn-west4 --project foo-staging\nkubectl config rename-context $(kubectl config current-context) foo-staging\n

        Then setup aliases like

        # ~/.bash_aliases\nalias foo-k-dev=\"export KUBECONFIG=$HOME/.kube/foo-dev-config ; kubectl config set-context foo-dev --namespace=default ;\"\nalias foo-k-prod=\"export KUBECONFIG=$HOME/.kube/foo-prod-config ; kubectl config set-context foo-prod --namespace=default ;\"\nalias foo-k-stage=\"export KUBECONFIG=$HOME/.kube/foo-staging-config ; kubectl config set-context foo-staging --namespace=default ;\"\n
        "},{"location":"google-cloud/#list-images-available-in-google-container-registry","title":"List images available in Google Container Registry","text":"
        gcloud container images list\n
        "},{"location":"google-cloud/#pull-a-docker-container-from-google-container-registry","title":"Pull a docker container from Google Container Registry","text":"
        gcloud docker -- pull gcr.io/project-id/hello-world\n
        "},{"location":"google-cloud/#control-access-to-registries","title":"Control access to registries","text":"

        \"Container Registry uses a Cloud Storage bucket as the backend for serving container images. You can control who has access to your Container Registry images by adjusting permissions for the Cloud Storage bucket.

        Caution: Container Registry only recognizes permissions set on the Cloud Storage bucket. Container Registry will ignore permissions set on individual objects within the Cloud Storage bucket.

        You manage access control in Cloud Storage by using the GCP Console or the gsutil command-line tool. Refer to the gsutil acl and gsutil defacl documentation for more information.\" - https://cloud.google.com/container-registry/docs/access-control

        "},{"location":"google-cloud/#authenticate-a-private-gcr-registry-in-kubernetes","title":"Authenticate a private GCR registry in kubernetes","text":"

        This is likely not copy/paste material, but the flow is generally correct.

        PARTNER=other_company\nPROJECT=\"our_company-$PARTNER\"\nUSER=service-account-user-for-$PARTNER\nEMAIL=\"$USER@$PROJECT.iam.gserviceaccount.com\"\ngcloud iam service-accounts create $USER\ngcloud iam service-accounts keys create \\\n  --display-name \"$USER\" \\\n  --iam-account \"$EMAIL\" \\\n  key.json\ngcloud projects add-iam-policy-binding \"$PROJECT\" \\\n  --member \"serviceAccount:$EMAIL\" \\\n  --role \"roles/storage.objectAdmin\"\ndone\nkubectl create secret \"docker-pull-$PROJECT\" \"$PROJECT\" \\\n  --docker-server \"https://gcr.io\" \\\n  --docker-username _json_key \\\n  --docker-email \"$EMAIL\" \\\n  --docker-password \"$(cat key.json)\"\n

        Then use the value of docker-pull-${PROJECT} as your ImagePullSecret.

        "},{"location":"google-cloud/#set-cache-expiration-of-gcp-bucket-items-to-5-minutes","title":"Set cache expiration of GCP bucket items to 5 minutes","text":"

        By default, GCP bucket items have 1 hour of public cache, which means items can be cached outside of the control of the GCP admin. This means that within that cache time window, any requests for the item will have unpredictable results. Set your Cache-Control max-age to something low for files that change, like page content and indexes, but long for files that never change, like images and archives.

        gsutil setmeta -h \"Cache-Control: public, max-age=300\" gs://helm-repo.example.org/index.yaml\n

        More info: https://medium.com/@val_deleplace/public-cache-dont-panic-11038a073a9

        "},{"location":"google-cloud/#view-the-metadata-of-a-gcp-bucket-item","title":"View the metadata of a GCP bucket item","text":"
        gsutil stat gs://helm-repo.example.org/index.yaml\n

        The output will be something like:

        gs://helm-repo.example.org/index.yaml:\n    Creation time:          Fri, 22 Jul 2020 23:20:11 GMT\n    Update time:            Mon, 23 Jul 2020 19:17:53 GMT\n    Storage class:          MULTI_REGIONAL\n    Cache-Control:          public, max-age=300\n    Content-Length:         3195714\n    Content-Type:           application/octet-stream\n    Hash (crc32c):          vA/Awm==\n    Hash (md5):             2AJ32cECSriE0UQStsXxyw==\n    ETag:                   COP7ew7D5+CAEoI=\n    Generation:             1595460011829230\n    Metageneration:         5\n
        "},{"location":"google-cloud/#show-a-bar-chart-of-disk-usage-of-gcp-bucket-contents","title":"Show a bar chart of disk usage of gcp bucket contents","text":"

        The general idea here is you use gsutil du gs://whatever/somepath, use swap the first and second columns, and pipe that to termgraph. In this example I use awk to do the column swap, filter out all files so we're only checking full directory sizes, and trim the directory name and part of the filename:

        $ gsutil du gs://cat-pic-downloader-backups/backups/*full* | awk '/\\/$/ {gsub(/.*velero-/, \"\", $2) ; print $2,$1 ;}' | termgraph\n\nfull-back-up-20190128040005/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 307.60M\nfull-back-up-20190129040006/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 305.13M\nfull-back-up-20190130040007/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 298.71M\nfull-back-up-20190201040008/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 298.80M\nfull-back-up-20190202040009/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 297.77M\nfull-back-up-20190203040010/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 297.64M\nfull-back-up-20190204040011/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 297.61M\nfull-back-up-20190205040012/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 296.78M\nfull-back-up-20190206040013/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 294.78M\nfull-back-up-20190207040014/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 292.45M\nfull-back-up-20190208040015/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 292.05M\nfull-back-up-20190209040016/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 292.74M\nfull-back-up-20190210040017/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 291.43M\nfull-back-up-20190211040018/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 291.73M\nfull-back-up-20190212040019/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 291.55M\nfull-back-up-20190213040020/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 289.23M\nfull-back-up-20190214040022/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 287.54M\nfull-back-up-20190215040023/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.56M\nfull-back-up-20190216040024/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.87M\nfull-back-up-20190217040025/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.58M\nfull-back-up-20190218040026/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.01M\nfull-back-up-20190219040027/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.23M\nfull-back-up-20190220040028/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 285.57M\nfull-back-up-20190221040029/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.66M\nfull-back-up-20190222040030/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.62M\nfull-back-up-20190223040031/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.29M\nfull-back-up-20190224040032/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.27M\nfull-back-up-20190225040033/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.31M\nfull-back-up-20190226040034/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.19M\nfull-back-up-20190227040035/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.44M\n
        "},{"location":"google-cloud/#set-maintenance-window-on-a-gke-cluster","title":"Set maintenance window on a GKE cluster","text":"

        This syntax is a bit difficult to work with. It must be given in RFC-5545 (iCal), but GCP only supports a subset of the spec. Most frustratingly, GCP doesn't support timezones. They warn you about this, saying the \"timezone will not be stored\", so if you want to start something at Wednesday 03:00:00+0600 you have to specify that as Tuesday 21:00:00. \ud83d\ude44

        N.B.: in my testing while writing this, local TZ was used if not TZ was sent, so it looks like their docs may be wrong and maybe the conversion step can be avoided.

        "},{"location":"google-cloud/#convert-local-time-to-rfc-5545-utc","title":"Convert local time to rfc-5545 UTC","text":"

        So if you want to start your maintenance on Thursday at 5am Pacific time and end at Thursday 5pm Pacific time, the first step is to figure out what the rfc-5545 UTC format would be:

        $ TZ=Z gdate '+%Y%m%dT%H%M%SZ %A' -d 2021-05-20T05:00:00-0800\n20210520T130000Z Thursday\n$ TZ=Z gdate '+%Y%m%dT%H%M%SZ %A' -d 2021-05-20T17:00:00-0800\n20210521T010000Z Friday\n
        "},{"location":"google-cloud/#set-and-verify-the-maintenance-schedule","title":"Set and verify the maintenance schedule:","text":"
        gcloud container clusters update \\\n  --project=\"$PROJECT_NAME\" \\\n  --zone=\"$ZONE\" \\\n  \"$CLUSTER_NAME\" \\\n  --maintenance-window-start=\"20210520T130000Z\" \\\n  --maintenance-window-end=\"20210521T010000Z\" \\\n  --maintenance-window-recurrence=\"FREQ=WEEKLY;BYDAY=TH\"\n

        You should see Updating <cluster_name>...done. followed by some hyperlinks. Next you should validate that it was set right:

        gcloud container clusters describe \\\n  --format=json \\\n  --project=\"$PROJECT_NAME\" \\\n  --zone=\"$ZONE\" \\\n  \"$CLUSTER_NAME\" |\njq .maintenancePolicy.window.recurringWindow\n

        You should see a small json blob like:

        {\n  \"recurrence\": \"FREQ=WEEKLY;BYDAY=TH\",\n  \"window\": {\n    \"endTime\": \"2021-05-21T02:00:00Z\",\n    \"startTime\": \"2021-05-20T14:00:00Z\"\n  }\n}\n

        Grab the start time and feed it back into gdate to validate that your desired local time is set:

        gdate -d \"2021-05-20T14:00:00Z\"  # comes out to: Thu May 20 07:00:00 PDT 2021\n
        "},{"location":"google-cloud/#gcloud-web-console-examples","title":"gcloud web console examples","text":""},{"location":"google-cloud/#logs-explorer-examples","title":"Logs Explorer examples","text":"
        • Sample queries using the Logs Explorer
        "},{"location":"google-cloud/#show-namespace-deletions","title":"Show namespace deletions","text":"
        protoPayload.methodName=\"io.k8s.core.v1.namespaces.delete\"\n
        "},{"location":"google-cloud/#show-all-images-pulled-by-gke","title":"Show all images pulled by GKE","text":"

        This will show pulls of all images except with substrings matching the references on line 3. The OR appears to be case sensitive.

        resource.type=\"k8s_pod\"\njsonPayload.reason=\"Pulling\"\n-jsonPayload.message : ( \"Pulling image \\\"dade-murphy/leet-image\\\"\" OR \"Pulling image \\\"mr-the-plague/da-vinci\\\"\" )\n
        "},{"location":"google-cloud/#show-all-k8s-control-plane-upgrades","title":"Show all k8s control plane upgrades","text":"

        You can search using the same syntax at the CLI, which is great for local saving of search results so you can search them more quickly using grep and easily archive them for postmortem documentation.

        gcloud beta logging read 'timestamp>=\"2021-01-01T00\" AND protoPayload.metadata.operationType: UPGRADE_MASTER AND operation.producer: container.googleapis.com' > control-plane-upgrades.yaml\n
        • https://console.cloud.google.com/logs/query;query=%20%20resource.type%3D%22k8s_pod%22%0A%20%20jsonPayload.reason%3D%22Pulling%22
        "},{"location":"google-cloud/#cloud-sql-examples","title":"Cloud SQL examples","text":"

        Docs are here: https://cloud.google.com/sql/docs/postgres/

        "},{"location":"google-cloud/#list-instances","title":"List instances","text":"
        gcloud sql instances list\n
        "},{"location":"google-cloud/#create-a-new-postgres-db-instance","title":"Create a new postgres db instance","text":"

        In the following example, the tier is custom, with 24 cpu cores and 122880mb of memory. You can see the standard tiers that are available with gcloud sql tiers list. Also, this creates an instance of POSTGRES_14, but other versions are listed here: https://cloud.google.com/sql/docs/postgres/create-instance#gcloud

        gcloud sql instances create \\\n  --project=${GCP_PROJECT} \\\n  --region=${INSTANCE_REGION} \\\n  --database-version=POSTGRES_14 \\\n  --tier=db-custom-24-122880 \\\n  ${INSTANCE_NAME}\n
        "},{"location":"google-cloud/#upgrade-postgres-to-a-new-version-in-place","title":"Upgrade postgres to a new version in-place","text":"

        https://cloud.google.com/sql/docs/postgres/upgrade-major-db-version-inplace

        gcloud beta sql instances patch \"${INSTANCE_NAME}\" --database-version=POSTGRES_14\n
        "},{"location":"google-cloud/#list-backups-for-an-instance","title":"List backups for an instance","text":"
        gcloud sql backups list --instance=\"${INSTANCE_NAME}\"\n
        "},{"location":"google-cloud/#create-a-backup","title":"Create a backup","text":"
        gcloud sql backups create \\\n  --instance=\"${INSTANCE_NAME}\" \\\n  --description=\"Pre pg14 upgrade\"\n
        "},{"location":"google-cloud/#restore-a-backup-from-one-instance-to-another","title":"Restore a backup from one instance to another","text":"

        This has to be done within the same GCP project

        gcloud sql backups restore \\\n  --backup-instance=\"${OLD_INSTANCE}\" \\\n  --restore-instance=\"${NEW_INSTANCE}\" \\\n  \"${BACKUP_ID}\"\n
        "},{"location":"google-cloud/#restore-a-backup-from-a-sql-file-stored-in-a-bucket","title":"Restore a backup from a sql file stored in a bucket","text":"

        Depending on the contents of your sql backup, you may have to create the target database first. If the db does not exist and the sql file does not working CREATE statements you'll see the ambiguous error system error occurred pre-import and an unhelpful python stack trace.

        gcloud sql import sql ${INSTANCE_NAME} \\\n  --user \"${DB_USERNAME}\" \\\n  --database \"${TARGET_DB}\" \\\n  gs://some-bucket-name/some_backup.sql \\\n  --verbosity debug\n

        Add --async if you just want it to go to the background and return you to your shell prompt.

        "},{"location":"google-earth/","title":"Google Earth","text":"

        \"Google Earth is a computer program that renders a 3D representation of Earth based on satellite imagery. The program maps the Earth by superimposing satellite images, aerial photography, and GIS data onto a 3D globe, allowing users to see cities and landscapes from various angles.\" - https://en.wikipedia.org/wiki/Google_Earth

        "},{"location":"google-earth/#links","title":"Links","text":"
        • simplekml - \"The python package simplekml was created to generate kml (or kmz). It was designed to alleviate the burden of having to study KML in order to achieve anything worthwhile with it. If you have a simple understanding of the structure of KML, then simplekml is easy to run with and create usable KML.\"
        • https://doarama.com
        "},{"location":"google-sheets/","title":"Google Sheets","text":""},{"location":"google-sheets/#links","title":"Links","text":"
        • gspread - Python module for interacting with google spreadsheets
        "},{"location":"google-sheets/#techniques","title":"Techniques","text":"
        • Function list - https://support.google.com/docs/table/25273?hl=en
        "},{"location":"google-sheets/#conditional-formatting","title":"Conditional Formatting","text":""},{"location":"google-sheets/#regex-matching-to-color","title":"Regex matching to color","text":"

        Colorize rows with conditional formatting by using an expression like this:

        =REGEXMATCH($E:$E, \"some_regex\")\n

        This regex is not anchored, so there is no need to prepend or append .*

        Cell references in this case are relative unless prepended by a \\$. So, if you want to match the cell you are working on you would use A1:A1.

        "},{"location":"google-sheets/#color-every-other-row","title":"Color every other row","text":"
        =MOD(ROW(),2)\n
        "},{"location":"google-sheets/#import-an-rss-feed","title":"Import an RSS feed","text":"
        =IMPORTFEED(\"https://api.flickr.com/services/feeds/photos_public.gne\", B2, TRUE, 10)\n
        "},{"location":"google-sheets/#sum-lines-that-match-a-string","title":"Sum lines that match a string","text":"

        This uses syntax similar to a glob search, but uses ~ instead of \\

        =COUNTIF(D:D,\"3*\")\n
        "},{"location":"google-sheets/#automatically-resolve-the-dow-from-a-date","title":"Automatically resolve the DOW from a date","text":"
        =CHOOSE( weekday(A4), \"Sun\", \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\")\n
        "},{"location":"google/","title":"Google","text":"

        \"Google LLC is an American multinational technology company that specializes in Internet-related services and products.\" - https://en.wikipedia.org/wiki/Google

        "},{"location":"google/#links","title":"Links","text":"
        • Google App Script
        • Extending Google Docs
        • Document Service
        • Web Store Developer Dashboard
        • Service Accounts
        • API Explorer
        "},{"location":"graphicsmagick/","title":"GraphicsMagick","text":"

        \"GraphicsMagick is the swiss army knife of image processing.\" - http://www.graphicsmagick.org/

        This software purports to be more favorable than ImageMagick.

        "},{"location":"graphicsmagick/#usage-tips","title":"Usage tips","text":"

        Docs on scripting: http://www.graphicsmagick.org/GraphicsMagick.html

        "},{"location":"graphicsmagick/#convert-a-bunch-of-dng-files-to-low-resolution-jpeg","title":"Convert a bunch of DNG files to low-resolution JPEG","text":"

        The early -size option here is an optimization, but the actual resizing happens in -resize.

        SRC_PATH=\"${HOME}/Pictures/\"\nDEST_PATH=\"${HOME}/Desktop/output/\"\nSIZE=\"400x400\"\nfind \"${SRC_PATH}\" -type f -iname '*.dng' -print0 |\nxargs -0 -n1 -P\"$(nproc 2>/dev/null || echo 2)\" \\\ngm mogrify \\\n  -verbose \\\n  -size \"${SIZE}\" \\\n  -format jpeg \\\n  -resize \"${SIZE}\" \\\n  -create-directories \\\n  -output-directory \"$DEST_PATH\"\n
        "},{"location":"graphicsmagick/#create-a-contact-sheet","title":"Create a contact sheet","text":"
        gm montage \\\n  -geometry 400x400+10+10 \\\n  -background \"#222\" \\\n  -tile 8 *.jpg \\\n  \"$HOME/output.jpg\"\n
        "},{"location":"graphicsmagick/#see-also","title":"See Also","text":"
        • exiftool
        • imagemagick
        • jpeginfo
        • sips
        • dcraw
        "},{"location":"graphql/","title":"GraphQL","text":"

        \"GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data.\" - https://graphql.org

        "},{"location":"graphql/#links","title":"Links","text":"
        • Apollo Federation
        • Why not use GraphQL?
        "},{"location":"grep/","title":"grep","text":"

        grep checks for matches per-line in a file or input stream and prints out matches, and is a standard tool in the linux admin's toolbox. It's easy to use, but there are some neat things you can do with it that aren't so obvious. This doc is mostly focused on the non-obvious things.

        Unless specified, grep here means GNU grep. BSD (macOS) grep functions differently in many cases.

        "},{"location":"grep/#examples","title":"Examples","text":""},{"location":"grep/#print-only-the-matching-string","title":"Print only the matching string","text":"

        We use -E so we don't have to escape +.

        $ echo 'Yummy fooood!' | grep -Eo 'fo+'\nfoooo\n
        "},{"location":"grep/#print-only-part-of-a-matching-string","title":"Print only part of a matching string","text":"

        -P uses perl regex, which supports more features, like lookbehind. This lets us use -o but print only part of the string.

        Use \\K in place of lookbehind to trim the beginning of the match.

        $ echo 'Yummy fooood!' | grep -Po 'foo\\Ko+'\noo\n

        Use lookahead to trim the end of the match

        $ echo 'Yummy fooood!' | grep -Po 'foo(?=o+)'\nfoo\n

        More info: https://www.regular-expressions.info/keep.html

        "},{"location":"grub/","title":"GNU GRUB","text":"

        \"GNU GRUB is a Multiboot boot loader. It was derived from GRUB, the GRand Unified Bootloader, which was originally designed and implemented by Erich Stefan Boleyn.\" - https://www.gnu.org/software/grub/

        "},{"location":"grub/#examples","title":"Examples","text":""},{"location":"grub/#update-defaults","title":"Update defaults","text":"

        The basic workflow for updating grub is to edit /etc/default/grub then run sudo update-grub. The update-grub man page states that \"update-grub is a stub for running grub-mkconfig -o /boot/grub/grub.cfg to generate a grub2 config file.\", and thus you can run run grub-mkconfig to see what would be created.

        "},{"location":"grub/#disable-onboard-frame-buffer","title":"Disable onboard frame buffer","text":"

        I used this configuration to get text mode linux to boot on a Mac Mini with a bad graphics card that would not load a desktop environment. The machine was locking up at boot in OS X. Hardware test would boot with a striped color anomalies, but would never finish. Ubuntu Xenial would not boot correctly even to text mode without these settings.

        In /etc/default/grub:

        GRUB_CMDLINE_LINUX_DEFAULT=\"video=vesafb:off nofb vga=normal nomodeset\"\n
        "},{"location":"grub/#force-brightness-at-boot","title":"Force brightness at boot","text":"

        On a 15\" macbook pro, ubuntu 18.04 was giving me a problem where the LCD was set very dim and the keys to change brightness were not working. I had to configure /etc/default/grub with the following:

        GRUB_CMDLINE_LINUX_DEFAULT=\"acpi_backlight=video\"\n

        An alternate was acpi_backlight=vendor, but for me this was still quite dim. You can also alter your screen brightness on the fly by sending a value between 0-100 like this: echo 0 > /sys/class/backlight/acpi_video0/brightness

        "},{"location":"grub/#serial-over-ipmi","title":"Serial over IPMI","text":"

        First, your BMC has to be configured for SOL. If you see POST in an IPMI sol session, this is the next step. Edit /etc/default/grub and add something like the following lines:

        GRUB_CMDLINE_LINUX=\"console=tty0 console=ttyS1,115200\"\nGRUB_TERMINAL_INPUT=\"console serial\"\nGRUB_TERMINAL_OUTPUT=\"gfxterm serial\"\nGRUB_SERIAL_COMMAND=\"serial --unit=1 --speed=115200\"\n

        This was tested on a SuperMicro running Ubuntu 19.10.

        "},{"location":"grub/#notable-commands-files-and-dirs","title":"Notable commands, files and dirs","text":"
        • /boot/grub/grub.cfg - The grub config that is actually used at boot
        • /etc/grub.d - A directory with some of the configs that are combined to create /boot/grub/grub.cfg
        • /etc/default/grub - Default grub options
        • update-grub - Used to regenerate /boot/grub/grub.cfg
        • grub-set-default - Used to configure the default menu entry during reboot, only for bare metal machines
        • grub-set-default-legacy-ec2 - Used to configure the default menu entry on ec2 machines
        "},{"location":"hadoop/","title":"Hadoop","text":"

        \"The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models.\" - http://hadoop.apache.org/

        "},{"location":"hadoop/#links","title":"Links","text":"
        • Oozie - Oozie is a workflow scheduler system to manage Apache Hadoop jobs.
        "},{"location":"handbrake/","title":"Handbrake","text":"

        Handbrake is a tool for ripping DVD's into MPEG or AVI files.

        "},{"location":"handbrake/#cli-examples","title":"CLI Examples","text":"
        • https://trac.handbrake.fr/wiki/CLIGuide
        "},{"location":"handbrake/#deinterlacing-for-ipad","title":"Deinterlacing for iPad","text":"
        HandbrakeCLI -Z \"AppleTV\" --deinterlace fast --maxWidth 1024 -i infile -o outfile\n
        "},{"location":"handbrake/#show-information-about-the-source-media-for-use-with-extended-flags","title":"Show information about the source media for use with extended flags","text":"
        HandBrakeCLI -t 0 -i VIDEO_TS\n
        "},{"location":"handbrake/#generate-a-1000-frame-preview-of-the-appletv-preset","title":"Generate a 1000 frame preview of the AppleTV preset","text":"

        --stop-at is relative to the start, so it describes the number of frames in the output.

        HandBrakeCLI -i 2046/VIDEO_TS/ --start-at frame:5000 --stop-at frame:1000 -o foo.mp4 -Z AppleTV\n
        "},{"location":"handbrake/#fix-43-aspect-ratio-with-the-expanded-syntax-of-the-appletv-preset","title":"Fix 4:3 aspect ratio with the expanded syntax of the AppleTV preset","text":"
        HandBrakeCLI \\\n-e x264 \\\n-q 20.0 \\\n-a 1,1 \\\n-E faac,ac3 \\\n-B 160,160 \\\n-6 dpl2,auto \\\n-R 48,Auto -D 0.0,0.0 \\\n-f mp4 \\\n-4 \\\n-X 960 \\\n--loose-anamorphic \\\n-m \\\n-x cabac=0:ref=2:me=umh:b-adapt=2:weightb=0:trellis=0:weightp=0 \\\n--custom-anamorphic \\\n--pixel-aspect 4:3\n
        "},{"location":"hashids/","title":"Hashids","text":"

        \"Hashids is a small open-source library that generates short, unique, non-sequential ids from numbers. It converts numbers like 347 into strings like 'yr8', or array of numbers like [27, 986] into '3kTMd'. You can also decode those ids back. This is useful in bundling several parameters into one or simply using them as short UIDs.\" - https://hashids.org/

        "},{"location":"hashids/#examples","title":"Examples","text":""},{"location":"hashids/#python","title":"Python","text":"

        https://github.com/davidaurelio/hashids-python

        from hashids import Hashids\nhashids = Hashids(salt=\"this is my salt\")\nid = hashids.encode(1, 2, 3)  # str('laHquq')\nnumbers = hashids.decode(id)\n
        "},{"location":"helm/","title":"helm","text":"

        \"The Kubernetes Package Manager\" - https://github.com/kubernetes/helm

        These notes are all about helm version 3. Charts that require helm 3 should use apiVersion: v2, though helm 3 does support v1.

        "},{"location":"helm/#links","title":"Links","text":"
        • https://helm.sh/docs/intro/quickstart/
        "},{"location":"helm/#tips","title":"Tips","text":""},{"location":"helm/#list-all-versions-of-a-chart-in-a-given-repo","title":"List all versions of a chart in a given repo","text":"
        helm search repo repo_name/chart_name --devel --versions\n
        "},{"location":"helm/#include-pre-release-versions-in-all-versions","title":"Include pre-release versions in all versions","text":"

        You have to use mastermind/semver constraints with helm, which dictate that you have to include a pre-release component if you want to match against prereleases:

        helm search repo repo_name/chart_name --devel --versions --version '^1.5-0'\n

        This would show all versions including prereleases before or equal to 1.5.x. If you only want to show the latest version even if it is a prerelease, leave off --versions.

        "},{"location":"helm/#get-values-of-a-deployed-chart","title":"Get values of a deployed chart","text":"

        This only shows values that were passed in, not default values.

        $release_name is the NAME column in helm list

        helm get values -o yaml \"$release_name\" > values.yaml\n

        To get a list of all values, use

        helm get values --all -o yaml \"$release_name\" > values.yaml`\n
        "},{"location":"helm/#show-notes-for-a-deployed-service","title":"Show notes for a deployed service","text":"

        Notes are printed when you install a service, but they can be viewed again by running helm status <release_name> where <release_name> is one of the releases from helm list.

        "},{"location":"helm/#install-the-stable-repo","title":"Install the stable repo","text":"
        helm repo add stable https://charts.helm.sh/stable\n
        "},{"location":"helm/#install-the-incubator-repo","title":"Install the incubator repo","text":"

        https://github.com/helm/charts#how-do-i-enable-the-incubator-repository

        helm repo add incubator https://charts.helm.sh/incubator\n
        "},{"location":"helm/#show-metadata-about-a-specific-release-in-json","title":"Show metadata about a specific release in json","text":"

        You can find the revision in a few places, like helm list -A. Default is to store data about the last 10 releases per release_name.

        helm history -n \"$NS\" \"$RELEASE_NAME\" -o json | jq '.[] | select(.revision == 157)'\n
        "},{"location":"helm/#show-raw-data-about-what-helm-sent-to-the-k8s-server-for-a-recent-release","title":"Show raw data about what helm sent to the k8s server for a recent release","text":"

        First, find the secret that you will want to search. You can get the release number from helm history -n \"$NS\" foo or dig in kubectl -n $NS get secret

        $ k -n \"$NS\" get secret | grep sh.helm.release | tail -n 3 | column -t\nsh.helm.release.v1.foo.v338  helm.sh/release.v1  1  14d\nsh.helm.release.v1.foo.v339  helm.sh/release.v1  1  13d\nsh.helm.release.v1.foo.v340  helm.sh/release.v1  1  4d23h\n

        Then send that secret into the following command to get the full manifest that was sent to the k8s api:

        k -n \"$NS\" get secret \"$SECRET\" -o go-template='{{ .data.release | base64decode | base64decode }}' |\n  gzip -d\n

        The result is a json blob with all the details of how the helm chart was applied, including hook manifests, app manifests, and other metadata.

        "},{"location":"helm/#splay-cron-jobs","title":"Splay cron jobs","text":"

        Splaying cron jobs avoids the thundering herd problem by spreading the jobs out over time with deterministic randomness.

        The functions available when using Helm templates are not as plentiful and general purpose like you would expect in a normal programming language, so we have to get creative for some things. One thing that would be great is if Helm provided a deterministic random feature. It does have randomness, but there is no way to seed the random number generator. To work around this, we can use other functions that do take inputs in order to generate deterministic random-ish numbers. One such example is using adler32sum, which returns a base-10 hash value of an input.

        The following example splays a cron job that runs every 15 minutes from 3-12 inclusive, which should avoid high-demand minutes at the beginning of 0/15, and still give a few minutes for work to complete before coming back around to the next 0/15 interval.

        '{{- add 3 (regexFind \".$\" (adler32sum .Release.Name)) -}}-59/15 * * * *'\n

        This bit of code aler32sums the .Release.Name value, which is expected to be unique for every deployment but may not be in your environment, takes only the right-most digit, which is 0-9, and adds 3 to it, then uses that number as the starting minute in the cron schedule, EG: 7-59/* * * * *.

        Here is a python script that shows what minute number would be generated for 235,976 inputs:

        #!/usr/bin/env python3\n\nfrom zlib import adler32\nfrom pathlib import Path\n\n_input = Path(\"/usr/share/dict/words\").read_text().split()\noutput = {x + 3: 0 for x in range(10)}\n\nfor item in _input:\n    cksum = adler32(str.encode(item))\n    splay_minute = int(str(cksum)[-1]) + 3\n    output[splay_minute] += 1\n\nfor k, v in output.items():\n    print(f\"{k:2d}: {v}\")\n

        And the output shows a pretty even splay across all minutes, with each minute containing roughly 1/10th of the input lines:

        $ ./test-adler32sum.py\n 3: 23483\n 4: 23523\n 5: 23699\n 6: 23628\n 7: 23464\n 8: 23750\n 9: 23435\n10: 23833\n11: 23605\n12: 23556\n

        Surely there is more rigorous statistical analysis needed to better understand exactly how the inputs are being spread, but if you care that much about it, you are probably better off submitting a pr with your desired behavior to https://github.com/Masterminds/sprig, which is where the helm template functions come from.

        "},{"location":"helm/#detect-helm-resources-that-do-not-have-proper-annotations","title":"Detect helm resources that do not have proper annotations","text":"

        Helm requires that certain annotations exist. This this check will return all deployments that do not contain the required anntoations:

        kubectl get deployments -A -o json |\njq '.items[] | select((.metadata.annotations.\"meta.helm.sh/release-name\" == null) or (.metadata.annotations.\"meta.helm.sh/release-namespace\" == null)) | .metadata.name'\n
        "},{"location":"helm/#links_1","title":"Links","text":"
        • https://github.com/databus23/helm-diff: \"This is a Helm plugin giving your a preview of what a helm upgrade would change.\"
        • https://blog.flant.com/advanced-helm-templating
        "},{"location":"home-assistant/","title":"Home Assistant","text":"

        \"Home Assistant is an open-source home automation platform running on Python 3. Track and control all devices at home and automate control.\" - https://home-assistant.io/

        \"Hass.io turns your Raspberry Pi (or another device) into the ultimate home automation hub powered by Home Assistant. With Hass.io you can focus on integrating your devices and writing automations.\" - https://home-assistant.io/hassio/

        "},{"location":"hp/","title":"HP","text":"

        Information about HP Inc and Hewlett-Packard Company hardware and software.

        "},{"location":"hp/#links","title":"Links","text":"
        • http://cciss.sourceforge.net - RAID software for linux
        • iLO firmware upgrade is done from in linux using CP012567.scexe
        • Procurve ethernet switches
        • Microserver N40L or N54L for zfs or FreeNAS
        • Custom Microserver BIOS: http://www.avforums.com/forums/networking-nas/1521657-hp-n36l-n40l-n54l-microserver-updated-ahci-bios-support.html
        "},{"location":"htmx/","title":"htmx","text":"

        \"htmx gives you access to AJAX, CSS Transitions, WebSockets and Server Sent Events directly in HTML, using attributes, so you can build modern user interfaces with the simplicity and power of hypertext\" - https://htmx.org/

        "},{"location":"htmx/#links","title":"Links","text":"
        • https://htmx.org/docs/
        • https://github.com/rajasegar/awesome-htmx
        "},{"location":"htop/","title":"htop","text":"

        \"an interactive process viewer for Unix systems.\" - http://hisham.hm/htop/

        "},{"location":"htop/#see-also","title":"See also","text":"
        • Top variant list
        "},{"location":"httpstat/","title":"httpstat","text":"

        \"curl statistics made simple\" - https://github.com/reorx/httpstat

        "},{"location":"httpstat/#usage","title":"Usage","text":""},{"location":"httpstat/#simple-usage","title":"Simple usage","text":"
        $ httpstat http://hoherd.com/\nConnected to 192.30.252.153:80 from 127.0.0.1:61646\n\nHTTP/1.1 200 OK\nServer: GitHub.com\nDate: Mon, 29 Jan 2018 23:24:52 GMT\nContent-Type: text/html; charset=utf-8\nContent-Length: 405\nVary: Accept-Encoding\nVary: Accept-Encoding\nLast-Modified: Tue, 04 Apr 2017 16:43:44 GMT\nAccess-Control-Allow-Origin: *\nX-GitHub-Request-Id: F0D0:1973:5CF2FD:846C00:5A6FAD44\nExpires: Mon, 29 Jan 2018 23:34:52 GMT\nCache-Control: max-age=600\nAccept-Ranges: bytes\n\nBody stored in: /var/folders/2t/rnzxpxd54y7832mx_xjvxl30bb2qzp/T/tmphVaBFx\n\n  DNS Lookup   TCP Connection   Server Processing   Content Transfer\n[     5ms    |       0ms      |       237ms       |        1ms       ]\n             |                |                   |                  |\n    namelookup:5ms            |                   |                  |\n                        connect:5ms               |                  |\n                                      starttransfer:242ms            |\n                                                                 total:243ms\n
        "},{"location":"httpstat/#see-also","title":"See also","text":"
        • aria2
        • curl - what httpstat wraps to get its stats
        • httpstat - download and show a some useful connection information
        • wget
        "},{"location":"iTunes/","title":"iTunes","text":"
        • https://github.com/liamks/pyitunes
        "},{"location":"iTunes/#metadata","title":"Metadata","text":"
        • Skips are counted after within 2-10 seconds in iTunes, 3-10(?) seconds in iOS.
        "},{"location":"image-formats/","title":"Graphical Image Formats","text":"
        • arw - Sony Alpha raw format
        • cr2 - Canon raw format
        • dng - Adobe digital negative
        • gif - Compuserve Graphics Interchange Format
        • jpg - Most common image format on the internet
        • nef - Nikon raw format
        • png - Lossless rasterized image format
        • tiff - Lossless rasterized image format
        • webp - https://en.wikipedia.org/wiki/WebP
        "},{"location":"imagemagick/","title":"ImageMagick","text":"

        \"ImageMagick is a free and open-source software suite for displaying, converting, and editing raster image and vector image files. It can read and write over 200 image file formats.\" - https://en.wikipedia.org/wiki/ImageMagick

        ImageMagick is a framework for manipulating images. convert is the CLI front-end for it, and there are other modules/libraries for php, perl, etc..

        "},{"location":"imagemagick/#limitations","title":"Limitations","text":"
        • Does '''NOT''' handle DNG files. In OS X use sips to do this.
        "},{"location":"imagemagick/#techniques","title":"Techniques","text":""},{"location":"imagemagick/#overlay-the-date-on-the-image","title":"Overlay the date on the image","text":"
        for X in *.jpg ; do\n  convert ${X} -font Times-Roman -pointsize 70 -fill black -annotate +100+100 %[exif:DateTimeOriginal] ${X}-date.jpg\ndone\n\nfor X in *date.jpg ; do\n  convert ${X} -font Times-Roman -pointsize 70 -fill white -annotate +98+98 %[exif:DateTimeOriginal] ${X}-date2.jpg\ndone\n
        "},{"location":"imagemagick/#delete-all-tags-and-metadata","title":"Delete all tags and metadata","text":"
        convert -strip infile.jpg outfile.jpg\n
        "},{"location":"imagemagick/#generate-blurry-dark-terminal-backgrounds-from-normal-backgrounds","title":"Generate blurry, dark terminal backgrounds from normal backgrounds","text":"
        for X in * ; do\n  convert -resize 1100x1100 \"${X}\" -blur 0x4 -fill black -colorize 75% terminal.\"${X}\"\ndone\n
        "},{"location":"imagemagick/#generate-shady-versions-of-desktop-pictures-in-os-x","title":"Generate shady versions of desktop pictures in OS X","text":"
        for X in /Library/Desktop\\ Pictures/*.jpg ; do\n  IMG=$(basename \"${X}\")\n  convert -resize 1100x1100 \"${X}\" -blur 0x4 -set option:modulate:colorspace hsb -modulate 20 ~/Pictures/terminal.\"${IMG}\"\ndone\n
        "},{"location":"imagemagick/#crop-the-center-of-images-out","title":"Crop the center of images out","text":"
        for X in /Volumes/data-b/Timelapse/20120407-14* ; do\n  convert \"${X}\" -gravity Center -crop 1920x1080+0+0 $(basename ${X})\ndone\n
        "},{"location":"imagemagick/#average-many-photos-for-a-long-exposure-style-shot","title":"Average many photos for a long-exposure style shot","text":"
        convert *.jpg -average average.jpg\n
        "},{"location":"imagemagick/#multiply-several-images","title":"Multiply several images","text":"
        convert *.jpg -background white -compose multiply -flatten multiply.jpg\n
        "},{"location":"imagemagick/#combine-images-always-using-the-minimum-value","title":"Combine images always using the minimum value","text":"
        convert *.jpg -background white -compose darken -flatten minimum.jpg\n
        "},{"location":"imagemagick/#combine-images-always-using-the-maximum-value","title":"Combine images always using the maximum value","text":"
        convert *.jpg -background black -compose lighten -flatten maximum.jpg\n
        "},{"location":"imagemagick/#swap-red-and-blue-channels-for-ir-photos","title":"Swap red and blue channels (for IR photos)","text":"
        convert infile.jpg -separate -swap 0,2 -combine swapped.jpg\n
        "},{"location":"imagemagick/#animate-some-images","title":"Animate some images","text":"
        convert -delay 20 -loop 0 *.jpg animation.gif\n
        "},{"location":"imagemagick/#see-also","title":"See Also","text":"
        • exiftool
        • graphicsmagick
        • jpeginfo
        • sips
        "},{"location":"img2xterm/","title":"img2xterm","text":"

        Converts images into xterm 256 color output for viewing when there is no graphical display. Unfortuntely as of 2016-09-07 I can't find this in an easily distributable package.

        https://github.com/rossy/img2xterm

        "},{"location":"inotify/","title":"inotify","text":"
        • \"inotify - monitoring file system events\" - man inotify
        • \"inotifywatch - gather filesystem access statistics using inotify\" - man inotifywatch
        • \"The inotify cron daemon (incrond) is a daemon which monitors filesystem events and executes commands defined in system and user tables. It's (sic) use is generally similar to cron(8).\" - man incrond
        "},{"location":"inotify/#examples","title":"Examples","text":""},{"location":"inotify/#continuously-show-filesystem-events-on-a-file","title":"Continuously show filesystem events on a file","text":"

        This shows a datestamp when /var/log/syslog is modified. Theoretically we could use %N to get millisecond precision, but it doesn't work.

        sudo sudo inotifywait -m --timefmt '%F %T.%z' --format '%T %w %e %f' /var/log/syslog\n
        "},{"location":"internet/","title":"The Internet","text":""},{"location":"internet/#history","title":"History","text":"
        • http://textfiles.com/underconstruction/: Archive of \"under construction\" banners
        "},{"location":"internet/#health","title":"Health","text":"
        • https://map.internetintel.oracle.com/
        • http://www.internettrafficreport.com/
        • https://outage.report/
        • https://www.slac.stanford.edu/comp/net/wan-mon/netmon.html: \"Interesting web sites for Internet Monitoring\"
        "},{"location":"interview/","title":"Interviews","text":"
        • https://sockpuppet.org/blog/2015/03/06/the-hiring-post/
        "},{"location":"iotop/","title":"iotop","text":"

        \"A top utility for IO\" - https://github.com/Tomas-M/iotop

        iotop tracks disk I/O by process, and prints a summary report that is refreshed every interval.

        "},{"location":"iotop/#linux-examples","title":"Linux Examples","text":""},{"location":"iotop/#show-cumulative-stats-for-processes-actually-using-io","title":"Show cumulative stats for processes actually using IO","text":"

        iotop -oa

        "},{"location":"iotop/#see-also","title":"See also","text":"
        • Top variant list
        "},{"location":"ip/","title":"ip","text":"

        This is about the ip command in Linux.

        "},{"location":"ip/#examples","title":"Examples","text":"

        The commands here can be shortened like cisco or other network device commands.

        "},{"location":"ip/#get-the-default-interface","title":"Get the default interface","text":"
        ip route get 8.8.8.8\n
        "},{"location":"ip/#show-ip-neighbors","title":"Show IP neighbors","text":"
        ip neighbor show\n
        "},{"location":"ip/#show-all-configured-ip-addresses","title":"Show all configured IP addresses","text":"

        This is more reliable than ifconfig, which sometimes omits entries.

        ip -f inet addr\n
        "},{"location":"ip/#show-information-about-eth0","title":"Show information about eth0","text":"

        This shows information about eth0 and all the virtual interfaces brought up on the physical interface.

        ip addr show eth0\n
        "},{"location":"ip/#monitor-ip-changes","title":"Monitor IP changes","text":"
        ip mon all\n
        "},{"location":"ip/#show-interfaces-that-would-route-to-a-given-network-address","title":"Show interfaces that would route to a given network address","text":"
        ip addr show to 10.1.8.0/24\n
        "},{"location":"ip/#show-negotiated-speeds-for-all-interfaces","title":"Show negotiated speeds for all interfaces","text":"
        ip -o link show | awk -F: '{print $2}' | while read -r X ; do\n  sudo ethtool ${X} | egrep 'Settings|Speed' | xargs echo\ndone | column -t -s:\n
        "},{"location":"ip/#add-a-static-route","title":"Add a static route","text":"
        ip route add 192.168.100.0/24 via 0.0.0.0 dev eth0\n
        "},{"location":"ip/#set-mtu-for-a-specific-route","title":"Set MTU for a specific route","text":"
        • http://lartc.org/howto/lartc.cookbook.mtu-discovery.html
        ip route add default via 10.0.0.1 mtu 296\n
        "},{"location":"iperf/","title":"iperf","text":"

        \"iperf3: A TCP, UDP, and SCTP network bandwidth measurement tool\" - https://github.com/esnet/iperf

        "},{"location":"iperf/#links","title":"Links","text":"
        • Sample usage - https://fasterdata.es.net/performance-testing/network-troubleshooting-tools/iperf/
        "},{"location":"iperf/#example-usage","title":"Example usage","text":""},{"location":"iperf/#example-server-receiving-side","title":"Example server (receiving side)","text":"
        iperf3 -s\n
        "},{"location":"iperf/#example-client-sending-side","title":"Example client (sending side)","text":"
        iperf3 -c remote-hostname --interval 0.5 --omit 2 --time 30 --set-mss 1460\n
        "},{"location":"ipmi/","title":"IPMI","text":"

        The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system's CPU, firmware (BIOS or UEFI) and operating system.

        "},{"location":"ipmi/#managing-servers-with-ipmi","title":"Managing servers with IPMI","text":""},{"location":"ipmi/#default-users","title":"Default Users","text":"

        The default users are 'Administrator' for HPs, 'root' for Dells, and 'ADMIN' for Silicon Mechanics.

        "},{"location":"ipmi/#server-setup","title":"Server Setup","text":"

        IPMI uses COM2 aka ttyS1 for the serial port on Dells and HPs, COM3 aka ttyS2 on Silicon Mechanics.

        "},{"location":"ipmi/#common-remote-commands","title":"Common Remote Commands","text":""},{"location":"ipmi/#see-if-a-server-is-on","title":"See if a server is on","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power status\n
        "},{"location":"ipmi/#turn-a-server-on","title":"Turn a server on","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power on\n
        "},{"location":"ipmi/#turn-a-server-off","title":"Turn a server off","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power off\n
        "},{"location":"ipmi/#tell-a-server-to-pxeboot","title":"Tell a server to PXEBoot","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power off\nipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis bootdev pxe\nipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power on\n
        "},{"location":"ipmi/#connect-to-the-serial-console","title":"Connect to the serial console","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sol activate\n
        "},{"location":"ipmi/#display-the-system-event-log","title":"Display the system event log","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sel list\n
        "},{"location":"ipmi/#clear-the-system-event-log","title":"Clear the system event log","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sel clear\n
        "},{"location":"ipmi/#display-sensor-information","title":"Display sensor information","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sdr list\n
        "},{"location":"ipmi/#disconnect-another-serial-console-session","title":"Disconnect another serial console session","text":"
        ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sol deactivate\n
        "},{"location":"ipmi/#show-bootdev-help","title":"Show bootdev help","text":"
        ipmitool -H 10.5.8.30 -U ADMIN -P ADMIN chassis bootdev none options=help\n
        "},{"location":"ipmi/#common-local-commands","title":"Common Local Commands","text":"

        These commands require root access in most environments.

        "},{"location":"ipmi/#view-all-configured-lan-parameters","title":"View all configured LAN parameters","text":"
        ipmitool lan print\n

        You can view individual \"channels\" which are logical interfaces by giving the number:

        ipmitool lan print 1\n
        "},{"location":"ipmi/#configure-ipmi-static-ip-information","title":"Configure IPMI static IP information","text":"
        IPMI_IP_ADDR=192.168.1.250\nIPMI_IP_NETMASK=255.255.255.0\nIPMI_IP_GW_IP_ADDR=192.168.1.1\nIPMI_IP_GW_MAC_ADDR=00:01:02:aa:bb:cc\nipmitool lan set 1 ipsrc static\nipmitool lan set 1 ipaddr \"${IPMI_IP_ADDR}\"\nipmitool lan set 1 netmask \"${IPMI_IP_NETMASK}\"\nipmitool lan set 1 defgw ipaddr \"${IPMI_IP_GW_IP_ADDR}\"\nipmitool lan set 1 defgw macaddr \"${IPMI_IP_GW_MAC_ADDR}\"\nipmitool lan set 1 arp respond on\n
        "},{"location":"ipmi/#configure-ipmi-admin-user","title":"Configure IPMI admin user","text":"
        USERNAME=admin\nPASSWORD=hunter2\nipmitool user set name 2 \"$USERNAME\"\nipmitool user set password 2 \"$PASSWORD\"\n
        "},{"location":"ipmi/#reset-the-bmc","title":"Reset the BMC","text":"

        If a host loses it's IPMI (iLO, etc.) IP connectivity, issue this command from the host itself

        ipmitool mc reset cold\n
        "},{"location":"ipmi/#how-to-fix-devipmi-errors","title":"How to fix /dev/ipmi errors","text":"

        For errors like Could not open device at /dev/ipmi0 or /dev/ipmi/0 or /dev/ipmidev/0:

        modprobe ipmi_msghandler\nmodprobe ipmi_devintf\nmodprobe ipmi_si\n
        "},{"location":"ipmi/#reset-the-admin-password-to-admin-on-a-supermicro-bmc","title":"Reset the ADMIN password to ADMIN on a Supermicro BMC","text":"
        sudo ipmitool -I open user set password 2 ADMIN\n
        "},{"location":"ipmi/#reset-all-settings-to-defaults","title":"Reset all settings to defaults","text":"

        This may not work with your BMC, but has been known to work with some supermicro BMCs.

        ipmitool raw 0x30 0x40\n
        "},{"location":"ipmi/#conigure-sol-on-a-systemd-server","title":"Conigure sol on a systemd server","text":"

        A better way to do this is via Grub.

        /lib/systemd/system/ttyS1.service should look something like:

        [Unit]\nDescription=Serial Console Service: ttyS1\n\n[Service]\nExecStart=/sbin/getty -L 115200 ttyS1 vt102\nRestart=always\n\n[Install]\nWantedBy=system.target\n

        Then run:

        systemctl enable ttyS1.service\nsystemctl start ttyS1.service\n
        "},{"location":"ipmi/#see-also","title":"See Also","text":"
        • http://www.intel.com/design/servers/ipmi/
        "},{"location":"iptables/","title":"iptables","text":"

        iptables is the built-in linux firewall.

        "},{"location":"iptables/#examples","title":"Examples","text":"
        • How to simulate a slow network link: http://blogs.kde.org/node/1878
        "},{"location":"iptables/#allow-mysql","title":"Allow MySQL","text":"
        iptables -A INPUT -i eth0 -p tcp -m tcp --dport 3306 -j ACCEPT\n
        "},{"location":"iptables/#ssh-blocking","title":"SSH blocking","text":"

        Better idea: fail2ban

        Basically, it lets people connect with SSH 5 times within a minute, but with a mandatory 5 second wait before connection attempts. Once they hit 5 attempts in a minute they get banned for an hour. Several IP ranges are exceptions where access is always allowed.

        iptables -I INPUT 1 -s 172.16.0.0/16 -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -I INPUT 2 -s 17.1.2.0/27   -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -I INPUT 3 -s 18.3.4.0/27   -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -I INPUT 4 -s 19.5.6.0/24   -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -N SSH\niptables -N SSH_ABL\niptables -A SSH -m recent --name SSH_ABL --update --seconds 3600 -j REJECT\niptables -A SSH -m recent --name SSH --rcheck --seconds 60 --hitcount 5 -j SSH_ABL\niptables -A SSH_ABL -m recent --name SSH_ABL --set -j LOG --log-level warn --log-prefix \"ABL: +SSH: \"\niptables -A SSH_ABL -j REJECT\niptables -A SSH -m recent --name SSH --rcheck --seconds 5 -j LOG --log-level warn --log-prefix \"RATE: \"\niptables -A SSH -m recent --name SSH --update --seconds 5 -j REJECT\niptables -A SSH -m recent --name SSH_ABL --remove -j LOG --log-level warn --log-prefix \"ABL: -SSH: \"\niptables -A SSH -m recent --name SSH --set -j ACCEPT\niptables -A INPUT -m state --state NEW -p tcp -m tcp --dport 22 -j SSH\niptables -L\n
        "},{"location":"iptables/#show-all-tables","title":"Show all tables","text":"

        Not all tables are shown by default. To view all, issue the following commands as root:

        iptables -vL -t filter\niptables -vL -t nat\niptables -vL -t mangle\niptables -vL -t raw\niptables -vL -t security\n
        "},{"location":"irc/","title":"irc","text":"

        Internet Relay Chat

        "},{"location":"irc/#clients","title":"Clients","text":"
        • Colloquy - Mac GUI client
        • irssi - CLI client
        "},{"location":"irc/#chanserv","title":"ChanServ","text":"

        \"ChanServ allows you to register and control various aspects of channels.\" - http://www.geekshed.net/commands/chanserv

        "},{"location":"irc/#register-a-room","title":"Register a room","text":"

        /msg ChanServ REGISTER #git-cfe

        "},{"location":"irssi/","title":"irssi","text":"

        TUI irc client.

        "},{"location":"irssi/#how-to","title":"How To","text":""},{"location":"irssi/#add-an-auto-connect-ssl-server-that-requires-a-password","title":"Add an auto-connect SSL server that requires a password","text":"

        /server add -auto -ssl servername port user:pass network

        "},{"location":"irssi/#add-an-auto-connect-channel","title":"Add an auto-connect channel","text":"

        /channel add -auto #channelname network

        "},{"location":"irssi/#links","title":"Links","text":"
        • http://irssi.org/help
        • http://quadpoint.org/articles/irssi: A Guide to Efficiently Using Irssi and Screen
        • https://archive.li/TEp7p: Irssi \u2014 How to use it in a comfortable way?
        "},{"location":"jargon/","title":"jargon","text":"
        • AMoD: Autonomous Mobility on Demand
        • bikeshedding: The term was coined as a metaphor to illuminate Parkinson's Law of Triviality. Parkinson observed that a committee whose job is to approve plans for a nuclear power plant may spend the majority of its time on relatively unimportant but easy-to-grasp issues, such as what materials to use for the staff bikeshed, while neglecting the design of the power plant itself, which is far more important but also far more difficult to criticize constructively.
        • cargo cult: software containing elements that are included because of successful utilization elsewhere, unnecessary for the task at hand.
        • Conway's Law: \"organizations which design systems ... are constrained to produce designs which are copies of the communication structures of these organizations.\"
        • Cunningham's law: The best way to get the right answer on the Internet is not to ask a question, it's to post the wrong answer.
        • cybernetics: \"Cybernetics\" comes from a Greek word meaning \"the art of steering\". Cybernetics is about having a goal and taking action to achieve that goal.
        • [deterministic]: In mathematics and physics, a deterministic system is a system in which no randomness is involved in the development of future states of the system. A deterministic model will thus always produce the same output from a given starting condition or initial state.
        • DRY: Don't Repeat Yourself. \"Every piece of knowledge must have a single, unambiguous, authoritative representation within a system\"
        • idempotent: Idempotence is the property of certain operations in mathematics and computer science, that they can be applied multiple times without changing the result beyond the initial application.
        • KISS: Keep It Simple, Stupid!
        • martian packet: A Martian packet is an IP packet which specifies a source or destination address that is reserved for special-use by Internet Assigned Numbers Authority.
        • monotonic: A function or set of values that always increases or always decreases.
        • PRD: product requirements document
        • teleology: Teleology or finality is a reason or explanation for something in function of its end, purpose, or goal.
        • transitive dependency: a functional dependency which holds by virtue of transitivity among various software components. (EG: a dependency of a dependency)
        • warrant canary: Text on a website that states the company or person has never been served with a secret government subpoena. Once the statement is removed, the users can assume the company or person has been served and has been told not to talk about it.
        • YAGNI: a principle that states a programmer should not add functionality until deemed necessary.
        • yak shaving: Any apparently useless activity which, by allowing you to overcome intermediate difficulties, allows you to solve a larger problem.
        "},{"location":"javascript/","title":"JavaScript","text":"

        JavaScript is a scripting language that is ubiquitous in web browsers, and is found in may other places from back-end servers to microcontrollers to lego robots.

        "},{"location":"javascript/#links","title":"Links","text":"
        • https://eloquentjavascript.net: a book about JavaScript, programming, and the wonders of the digital.
        • https://github.com/getify/You-Dont-Know-JS: a series of books diving deep into the core mechanisms of the JavaScript language.
        • https://webpack.js.org: a static module bundler for modern JavaScript applications
        • https://gruntjs.com: the JavaScript task runner
        • https://www.espruino.com: JavaScript for microcontrollers
        • https://developer.mozilla.org/en-US/docs/Learn/JavaScript
        • https://developer.mozilla.org/en-US/docs/Learn/Common_questions/Tools_and_setup/What_are_browser_developer_tools
        • https://eloquentjavascript.net
        • https://jestjs.io: \"Jest is a delightful JavaScript Testing Framework with a focus on simplicity.\"
        "},{"location":"jdupes/","title":"jdupes","text":"

        \"finds and performs actions upon duplicate files\" - man jdupes

        jdupes is based on fdupes and is not written in java as the name may lead you to assume.

        "},{"location":"jdupes/#examples","title":"Examples","text":""},{"location":"jdupes/#recursively-find-all-duplicates-in-a-dir","title":"Recursively find all duplicates in a dir","text":"
        jdupes -r /path/to/dir\n
        "},{"location":"jdupes/#create-hard-links-of-any-duplicate-files","title":"Create hard-links of any duplicate files","text":"
        jdupes -r -L /path/\n
        "},{"location":"jdupes/#delete-all-but-one-duplicate","title":"Delete all but one duplicate","text":"

        Although the flags do not indicate this, the following command keeps the first file (see man page for details), and prints a summary of what was kept and what was deleted.

        jdupes -r --delete --noprompt /path/\n
        "},{"location":"jdupes/#links","title":"Links","text":"
        • https://github.com/jbruchon/jdupes
        "},{"location":"jmespath/","title":"jmespath","text":"

        \"JMESPath is a query language for JSON.\" - http://jmespath.org

        "},{"location":"jmespath/#links","title":"Links","text":"
        • http://jmespath.org/examples.html
        • http://jmespath.org/tutorial.html
        • https://github.com/jmespath/jmespath.terminal
        • https://github.com/jmespath/jp
        "},{"location":"jmespath/#quotes-matter","title":"Quotes matter","text":"

        Something that is completely absent from the jmespath tutorial is that quotes matter. You must quote value strings with single quotes or back-ticks, which means you have to quote the python function args with double quotes, use back-ticks, or escape your single quotes:

        >>> print(l)\n[{\"name\": \"foo-name\", \"age\": \"foo-age\"}, {\"name\": \"bar-name\", \"age\": \"bar-age\"}]\n>>> print(yaml.dump(l))\n- age: foo-age\n  name: foo-name\n- age: bar-age\n  name: bar-name\n\n>>> jmespath.search(\"[?name == 'bar-name']\", l)\n[{'name': 'bar-name', 'age': 'bar-age'}]\n>>> jmespath.search('[?name == \"bar-name\"]', l)\n[]\n>>> jmespath.search('[?name == \\'bar-name\\']', l)\n[{'name': 'bar-name', 'age': 'bar-age'}]\n>>> jmespath.search(\"[?name == `bar-name`]\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n

        However, in jmespath you also must double-quote keys (variable names) that contain dots. (Double-quotes are optional for keys that do not contain dots.) This becomes a bit burdensome to keep track of, and also failure-prone:

        >>> l = [{\"name\": \"foo-name\", \"the.age\": \"foo-the.age\"}, {\"name\": \"bar-name\", \"the.age\": \"bar-the.age\"}]\n>>> l[1]['the.age']\n'bar-the.age'\n>>> jmespath.search(\"[?'the.age' == 'bar-the.age']\", l)\n[]\n>>> jmespath.search('[?\"the.age\" == \"bar-the.age\"]', l)\n[]\n>>> jmespath.search('[?\"the.age\" == \\'bar-the.age\\']', l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n>>> jmespath.search('[?\"the.age\" == `bar-the.age`]', l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n>>> jmespath.search(\"[?\\\"the.age\\\" == 'bar-the.age']\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n

        Triple quotes can help here by avoiding the need to escape both single-quotes and double-quotes:

        >>> jmespath.search(\"\"\"[?'the.age' == 'bar-the.age']\"\"\", l)\n[]\n>>> jmespath.search(\"\"\"[?\"the.age\" == \"bar-the.age\"]\"\"\", l)\n[]\n>>> jmespath.search(\"\"\"[?\"the.age\" == 'bar-the.age']\"\"\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n>>> jmespath.search(\"\"\"[?\"the.age\" == `bar-the.age`]\"\"\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n

        The TL;DR is to only double-quote variable names, and single-quote or back-tick-quote anything that is a string literal. This requirement is inconsistent with normal python comparisons. The behavior is strict and unintuitive to people unfamiliar with jmespath. The fact that the failures are silent is quite problematic and leads to low confidence that jmespath is behaving as expected. jmespath would do well to have a linter to warn about this behavior, and at least mention the different types of quotes and their behaviors in the tutorial. (FWIW, these details are buried in the jmespath spec, including the nuanced differences between single-quotes and back-ticks.)

        "},{"location":"jmespath/#examples","title":"Examples","text":""},{"location":"jmespath/#grab-some-kubernetes-fields-and-remap-them-to-be-less-deep","title":"Grab some kubernetes fields and remap them to be less deep","text":"
        kubectl get po --all-namespaces -o json |\njp \"items[*].{name: metadata.name, namespace: metadata.namespace, imagePullSecrets: spec.imagePullSecrets[*].name}\"\n

        Or filter only to non-default namespace where imagePullSecrets is populated

        kubectl get po --all-namespaces -o json |\njp \"items[?metadata.namespace != 'default' && spec.imagePullSecrets != null].{name: metadata.name, namespace: metadata.namespace, imagePullSecrets: spec.imagePullSecrets[*].name}\"\n
        "},{"location":"join/","title":"join","text":"

        \"The join utility performs an 'equality join' on the specified files and writes the result to the standard output.\" - man join

        join is a unix tool that is similar to a sql join, combining two files by joining on a column. The macOS man page has more example than the GNU man page, but as usual the syntaxes aren't exactly the same.

        "},{"location":"join/#examples","title":"Examples","text":"

        The following example use the following dates.txt file as file 1. Notice it is missing data between December 25 and December 31 (inclusive).

        2022-12-21 9\n2022-12-22 2\n2022-12-23 1\n2022-12-24 5\n2023-01-01 6\n2023-01-02 6\n2023-01-03 2\n

        We also use a loop that produces dates that contain the dates missing from dates.txt but do not go back as far in time:

        $ for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done\n2022-12-24 offset=10\n2022-12-25 offset=9\n2022-12-26 offset=8\n2022-12-27 offset=7\n2022-12-28 offset=6\n2022-12-29 offset=5\n2022-12-30 offset=4\n2022-12-31 offset=3\n2023-01-01 offset=2\n2023-01-02 offset=1\n2023-01-03 offset=0\n
        "},{"location":"join/#show-only-lines-with-common-columns","title":"Show only lines with common columns","text":"
        $ join dates.txt <(for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F $OFFSET\" ; done)\n2022-12-24 5 10\n2023-01-01 6 2\n2023-01-02 6 1\n2023-01-03 2 0\n
        "},{"location":"join/#show-all-lines-as-long-as-data-is-present-in-file-1","title":"Show all lines, as long as data is present in file 1","text":"
        $ join -a 1 dates.txt <(for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n2022-12-21 9\n2022-12-22 2\n2022-12-23 1\n2022-12-24 5 offset=10\n2023-01-01 6 offset=2\n2023-01-02 6 offset=1\n2023-01-03 2 offset=0\n
        "},{"location":"join/#show-all-lines-as-long-as-data-is-present-in-file-2","title":"Show all lines, as long as data is present in file 2","text":"
        $ join -a 2 dates.txt <(for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n2022-12-24 5 offset=10\n2022-12-25 offset=9\n2022-12-26 offset=8\n2022-12-27 offset=7\n2022-12-28 offset=6\n2022-12-29 offset=5\n2022-12-30 offset=4\n2022-12-31 offset=3\n2023-01-01 6 offset=2\n2023-01-02 6 offset=1\n2023-01-03 2 offset=0\n
        "},{"location":"join/#only-show-certain-columns-in-the-output","title":"Only show certain columns in the output","text":"

        We can specify which columns we want to see in the output, which includes the ability to join on a column that is not shown the output:

        $ join -o 1.2,2.2 dates.txt <(for OFFSET in {10..0} ; do gdate -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n5 offset=10\n6 offset=2\n6 offset=1\n2 offset=0\n
        "},{"location":"join/#fill-in-missing-data-an-arbitrary-string","title":"Fill in missing data an arbitrary string","text":"

        We use -e Null to indicate we want to fill in missing values with Null. In order for -e to work right, e have to specify the output columns with -o. We also have to specify -a 2 to indicate we want to see all lines from file 2. Because we are showing all lines in file 2, we use -o 2.1 instead of -o 1.1 so that the date column is not populated with Null values.

        $ join -e Null -o 2.1,1.2,2.2 -a 2 dates.txt <(for OFFSET in {10..0} ; do gdate -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n2022-12-24 5 offset=10\n2022-12-25 Null offset=9\n2022-12-26 Null offset=8\n2022-12-27 Null offset=7\n2022-12-28 Null offset=6\n2022-12-29 Null offset=5\n2022-12-30 Null offset=4\n2022-12-31 Null offset=3\n2023-01-01 6 offset=2\n2023-01-02 6 offset=1\n2023-01-03 2 offset=0\n

        A practical example of the above is making sure you have an entire sequence filled in, for example when graphing a set where no entry is created for days that have no data points. This ensures we're seeing a complete set, not just the days that have data:

        $ join -e 0 -o 2.1,1.2 -a 2 dates.txt <(for OFFSET in {13..0} ; do gdate -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done) | termgraph --format='{:.0f}'\n\n2022-12-21: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 9\n2022-12-22: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 2\n2022-12-23: \u2587\u2587\u2587\u2587\u2587 1\n2022-12-24: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 5\n2022-12-25:  0\n2022-12-26:  0\n2022-12-27:  0\n2022-12-28:  0\n2022-12-29:  0\n2022-12-30:  0\n2022-12-31:  0\n2023-01-01: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 6\n2023-01-02: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 6\n2023-01-03: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 2\n
        "},{"location":"jot/","title":"jot","text":"

        jot is a BSD CLI tool to generate sequences or random data, usually numbers.

        "},{"location":"jot/#tricks","title":"Tricks","text":""},{"location":"jot/#generate-37-evenly-space-floating-point-numbers-with-two-significant-digits-between-10-and-10","title":"Generate 37 evenly space floating point numbers (with two significant digits) between -10 and +10","text":"
        jot 37 -10 10.00\n
        "},{"location":"jot/#generate-two-12-alphanumeric-random-character-passwords-with-rs","title":"Generate two 12 alphanumeric random character passwords with rs","text":"
        jot -rc 24 48 123 | rs -g 0 12\n
        "},{"location":"jot/#generate-5-capital-alphanumeric-strings","title":"Generate 5 capital alphanumeric strings","text":"
        jot -rc 500 48 90 | grep '[A-Z0-9]' | rs -g 5 32\n
        "},{"location":"jpeginfo/","title":"jpeginfo","text":"

        \"jpeginfo - prints information and tests integrity of JPEG/JFIF files.\" - man jpeginfo

        "},{"location":"jpeginfo/#example","title":"Example","text":""},{"location":"jpeginfo/#test-integrity-of-all-files-in-a-dir","title":"Test integrity of all files in a dir","text":"
        jpeginfo -c *\n
        "},{"location":"jpeginfo/#test-integrity-of-files-and-delete-any-corrupt-files","title":"Test integrity of files and delete any corrupt files","text":"
        jpeginfo -c -d *\n
        "},{"location":"jpeginfo/#see-also","title":"See Also","text":"
        • exiftool
        • graphicsmagick
        • imagemagick
        • sips
        "},{"location":"jq/","title":"jq","text":"

        \"jq is a lightweight and flexible command-line JSON processor.\" - https://stedolan.github.io/jq/

        "},{"location":"jq/#examples","title":"Examples","text":""},{"location":"jq/#sort-a-json-file","title":"Sort a json file","text":"
        jq -S . foo.json\n
        "},{"location":"jq/#select-key-name-with-dots","title":"Select key name with dots","text":"

        The syntax .foo.bar is the same as .[\"foo\"][\"bar\"], so to select keys that have dots, you would do .annotations[\"deployment.kubernetes.io/revision\"]

        "},{"location":"jq/#grab-first-element-of-an-array-and-print-the-value-of-timestamp-of-that-element","title":"Grab first element of an array, and print the value of 'timestamp' of that element.","text":"
        echo '\n[\n  {\n    \"foo\": \"this is foo string\",\n    \"timestamp\": \"this is the timestamp\"\n  },\n  {\n    \"second element\": \"second element value\"\n  }\n]' | jq '.[0].timestamp'\n
        "},{"location":"jq/#construct-flickr-urls-from-an-api-call","title":"Construct Flickr URLs from an API call","text":"
        curl -s \"https://api.flickr.com/services/rest/?\"\\\n\"&api_key=0123456789abcdef\"\\\n\"&format=json\"\\\n\"&method=flickr.photos.getRecent\"\\\n\"&nojsoncallback=1\" |\n  jq -S '\n    .photos.photo[] |\n    \"https://flickr.com/photos/\" + .owner + \"/\" + .id\n  '\n
        "},{"location":"jq/#use-mco-to-find-packages-of-a-certain-version-on-a-certain-os","title":"Use mco to find packages of a certain version on a certain OS","text":"

        This example could be used as an alternative to grep, where only the value of a key/value pair is matched.

        mco rpc package status package=apt -j -F lsbdistcodename=trusty |\n  jq -c '\n    .[] |\n    select(.data.ensure == \"1.0.1ubuntu2\") | {\n      version: .data.ensure, hostname: .sender\n    }\n  '\n
        "},{"location":"jq/#print-only-objects-whose-name-matches-a-string","title":"Print only objects whose name matches a string","text":"

        This example echoes some yaml, uses python to convert it to json, then filters matching data using jq. It could be used as an alternative to grep, where only the key of a key/value pair is matched.

        echo \"\ndata:\n  - This is a string, not an object, and contains the substrings foo and bar\n  - name: foo_name\n    value: foo_value\n  - name: bar_name\n    value: bar_value\" |\npython -c \"import yaml, sys, json; print(json.dumps(yaml.safe_load(sys.stdin)))\" |\njq '\n  .data[] |\n  select(type==\"object\") |\n  select (.name | . and contains(\"bar_n\"))\n'\n
        "},{"location":"jq/#build-a-json-entry-from-scratch","title":"Build a json entry from scratch","text":"

        This uses bash paramber expansion and subshell syntax, and may not work in other shells.

        create_json() {\n  local user=${1:-${USER}}\n  local host=${2:-${HOSTNAME}}\n  local more_stuff=${3:-$(uname -a)}\n  json=$(\n    jq -c -n \\\n      --arg timestamp  \"$(date \"+%F %T%z\")\" \\\n      --arg host       \"${host}\" \\\n      --arg user       \"${user}\" \\\n      --arg more_stuff \"${more_stuff}\" \\\n      '{\n        timestamp:  $timestamp,\n        host:       $host,\n        user:       $user,\n        more_stuff: $more_stuff\n      }'\n  )\n  echo \"$json\"\n}\n
        "},{"location":"jq/#render-yaml-with-anchors-as-json-data","title":"Render yaml with anchors as json data","text":"

        This example shows how you can use python and jq to view the result of dereferenced yaml anchors, a construct that is not supported by json. This example is less about how to use jq syntaxes, and more about how it can be used to view data that is otherwise difficult to sort through.

        echo \"\njob1: &template\n  directory: /tmp\n  extra_parameters: nosuid,noatime\n  remote_host: 10.1.1.1\n  user: nobody\njob2:\n  <<: *template\n  remote_host: 10.2.2.2\njob3:\n  <<: *template\n  remote_host: 10.3.3.3\n\" |\npython -c \"import yaml, sys, json; print(json.dumps(yaml.safe_load(sys.stdin)))\" |\njq -S .\n
        "},{"location":"jq/#select-matches-and-print-a-subset-of-values","title":"Select matches, and print a subset of values","text":"
        jq '.[] | select(.data.ensure != \"purged\") | [.sender,.data.ensure]' $*\n
        "},{"location":"jq/#output-bare-values-for-use-as-inputs","title":"Output bare values for use as inputs","text":"

        This is a contrived example, the better way to get this info would be awless list instances --format tsv --columns name,privateip,launched

        $ awless list instances --format json | jq -r '.[] | \"\\(.Name) \\(.PrivateIP) \\(.Launched)\"' | column -t\nsalt-master       172.18.9.48   2015-04-10T21:28:03Z\nconsul-server-01  172.18.9.116  2015-05-15T06:13:19Z\nconsul-server-02  172.18.9.117  2015-05-15T06:13:19Z\nconsul-server-03  172.18.9.118  2015-05-15T06:13:19Z\n
        "},{"location":"jq/#show-labels-for-each-locally-stored-docker-sha","title":"Show labels for each locally stored docker SHA","text":"
        docker images --format '{{.ID}}' |\nwhile read -r X ; do\n  docker inspect $X |\n  jq '.[] | [ .RepoTags, .Config.Labels ]'\ndone\n
        "},{"location":"jq/#sort-all-json-contents","title":"Sort all JSON contents","text":"

        Be aware that sometimes JSON should not be sorted, as arrays are sometimes expected to maintain their order.

        jq -S '. | walk( if type == \"array\" then sort else . end )'\n

        Or set up a shell alias

        alias jqsort=\"jq -S '. | walk( if type == \\\"array\\\" then sort else . end )'\"\n
        "},{"location":"jq/#store-a-value-as-a-variable","title":"Store a value as a variable","text":"

        When creating long pipelines, it's useful to be able to store a deep value as a variable. In the following example we store .metadata.namespace as $namespace and .metadata.name as $podname before digging into .status where we would no longer have access to .metadata:

        kubectl get pod -A -l k8s-app=kube-dns -o=json |\njq -r '\n  .items[] |\n  .metadata.namespace as $namespace |\n  .metadata.name as $podname |\n  .status.containerStatuses[] |\n  \"\\($namespace) \\($podname) \\(.name) \\(.restartCount)\"\n' |\ncolumn -t\n

        The output of this command is something like:

        kube-system  kube-dns-66f64447b8-7tzkn  dnsmasq           5\nkube-system  kube-dns-66f64447b8-7tzkn  kubedns           0\nkube-system  kube-dns-66f64447b8-7tzkn  prometheus-to-sd  0\nkube-system  kube-dns-66f64447b8-7tzkn  sidecar           0\nkube-system  kube-dns-66f64447b8-b2jsf  dnsmasq           3\nkube-system  kube-dns-66f64447b8-b2jsf  kubedns           0\nkube-system  kube-dns-66f64447b8-b2jsf  prometheus-to-sd  0\nkube-system  kube-dns-66f64447b8-b2jsf  sidecar           0\n
        "},{"location":"jq/#default-value-for-missing-keys","title":"Default value for missing keys","text":"
        $ echo '{\"foo\": 1, \"bar\": 2}' | jq '[.foo // \"missing\", .bar // \"missing\", .baz // \"missing\"]'\n[\n  1,\n  2,\n  \"missing\"\n]\n
        "},{"location":"jq/#craft-json-data-for-use-with-curl","title":"Craft json data for use with curl","text":"

        When using curl --data, in-line JSON can get out of hand real quick. To avoid confusing syntaxes, use jq to create a temp file, then reference that file in curl:

        JSON_DATA=$(mktemp)\njq -n '{service_account: env.SERVICE_ACCOUNT_ID, secret_key: env.SERVICE_ACCOUNT_KEY}' > \"$JSON_DATA\"\n

        The above commands create a temporary file with the correct and valid JSON:

        $ cat \"$JSON_DATA\"\n{\n  \"service_account\": \"whoever@serviceaccount.example.com\",\n  \"secret_key\": \"abc123\"\n}\n

        Then reference that in curl:

        $ curl -s --request POST --header \"content-type: application/json\" --data \"@${JSON_DATA}\" https://httpbin.org/anything\n{\n  \"args\": {},\n  \"data\": \"{  \\\"service_account\\\": \\\"whoever@serviceaccount.example.com\\\",  \\\"secret_key\\\": \\\"abc123\\\"}\",\n  \"files\": {},\n  \"form\": {},\n  \"headers\": {\n    \"Accept\": \"*/*\",\n    \"Content-Length\": \"84\",\n...\n
        "},{"location":"jq/#get-the-most-recently-dated-blob","title":"Get the most recently dated blob","text":"
        curl -fssL https://storage.googleapis.com/updates.astronomer.io/astronomer-certified |\njq '.available_releases | sort_by(.release_date)[-1]'\n
        "},{"location":"jq/#see-also","title":"See Also","text":"
        • Tutorial
        "},{"location":"json/","title":"JSON","text":"

        \"JSON (JavaScript Object Notation) is a lightweight data-interchange format.\" - https://www.json.org/

        "},{"location":"json/#see-also","title":"See also","text":"
        • toml: \"TOML aims to be a minimal configuration file format that's easy to read due to obvious semantics.\"
        • yaml: Better human readability, more options.
        "},{"location":"json/#is-yaml-a-superset-of-json","title":"Is YAML a superset of JSON?","text":"

        Many people say that JSON is a subset of YAML, but that is not strictly true. See https://metacpan.org/pod/JSON::XS#JSON-and-YAML

        Here's an example of json that does not work as yaml:

        $ sed 's/\\t/--->/g' break-yaml.json\n--->{\n--->--->\"list\": [\n--->--->--->{},\n--->--->--->{}\n--->--->]\n--->}\n$ jq -c . break-yaml.json\n{\"list\":[{},{}]}\n$ json-to-yaml.py break-yaml.json\nlist:\n- {}\n- {}\n$ yaml-to-json.py break-yaml.json\nERROR: break-yaml.json could not be parsed\nwhile scanning for the next token\nfound character '\\t' that cannot start any token\nin \"break-yaml.json\", line 1, column 1\n$ sed 's/\\t/    /g' break-yaml.json | yaml-to-json.py\n{\"list\": [{}, {}]}\n
        "},{"location":"json/#links","title":"Links","text":"
        • https://goessner.net/articles/JsonPath/index.html: JSONpath is used by kubernetes as a native way to restructure kubctl output.
        • https://stedolan.github.io/jq/: jq is generally useful for working with JSON in a shell.
        • http://jmespath.org/: jmespath is used in AWS APIs to restructure data.
        • https://json5.org/: Now with trailing comma and comment support!
        • https://github.com/antonmedv/fx: TUI with clickable expansion and search features, written in golang.
        • https://github.com/TomWright/dasel: Universal serialized data tool, supports json, yaml, csv, and more.
        • https://github.com/josephburnett/jd: \"jd is a commandline utility and Go library for diffing and patching JSON and YAML values. It supports a native jd format (similar to unified format) as well as JSON Merge Patch (RFC 7386) and a subset of JSON Patch (RFC 6902).\"
        • https://www.jsonfeed.org: \"The JSON Feed format is a pragmatic syndication format, like RSS and Atom, but with one big difference: it\u2019s JSON instead of XML.\"
        "},{"location":"jsonnet/","title":"jsonnet","text":"

        \"A data templating language for app and tool developers\" - https://jsonnet.org

        "},{"location":"jsonpath/","title":"JSONPath","text":"

        \"XPath for JSON\" - https://goessner.net/articles/JsonPath/index.html

        "},{"location":"jsonpath/#examples","title":"Examples","text":"

        There are other examples in kubernetes

        "},{"location":"jsonpath/#show-the-api-server-of-your-current-kubernetes-context","title":"Show the API server of your current kubernetes context","text":"
        kubectl config view --minify -o=jsonpath='{.clusters[0].cluster.server}'\n
        "},{"location":"jsonpath/#links","title":"Links","text":"
        • http://jsonpath.com/ - online expression evaluator
        • https://kubernetes.io/docs/reference/kubectl/jsonpath/
        • https://extendsclass.com/jsonpath-tester.html
        "},{"location":"juniper/","title":"Juniper","text":"

        \"We bring simplicity to networking with products, solutions, and services that connect the world.\" - https://www.juniper.net/us/en/company/

        "},{"location":"juniper/#examples","title":"Examples","text":""},{"location":"juniper/#default-credentials","title":"Default credentials","text":"
        • root:(blank)
        • admin:abc123
        "},{"location":"juniper/#load-configuration-from-terminal","title":"Load configuration from terminal","text":"

        I have experienced errors when pasting into screen /dev/tty.usbserial, but having iTerm2 paste at 256 bytes per second appears to fix them.

        Amnesiac (ttyu0)\n\nlogin: root\n\n--- JUNOS 13.2X51-D35.3 built 2015-04-09 20:48:22 UTC\nroot@:RE:0% cli\n{master:0}\nroot> configure\nEntering configuration mode\nUsers currently editing the configuration:\n  autodconfig (pid 1291) on since 2018-01-06 17:32:28 UTC\n      exclusive\n\n{master:0}[edit]\nroot# load override terminal\n[Type ^D at a new line to end input]\n

        Then:

        1. paste your config
        2. press ctrl-d
        3. run commit
        "},{"location":"juniper/#some-example-terminal-buffer","title":"Some example terminal buffer","text":"
        login: root\n\n--- JUNOS 13.2X51-D35.3 built 2015-04-09 20:48:22 UTC\nroot@:RE:0% cli\n{master:0}\nroot> set cli screen-length 75\nScreen length set to 75\n\n{master:0}\nroot> configure\nEntering configuration mode\n\n{master:0}[edit]\nroot# show ?\nPossible completions:\n  <[Enter]>            Execute this command\n> access               Network access configuration\n> access-profile       Access profile for this instance\n> accounting-options   Accounting data configuration\n+ apply-groups         Groups from which to inherit configuration data\n> chassis              Chassis configuration\n> class-of-service     Class-of-service configuration\n> diameter             Diameter protocol layer\n> event-options        Event processing configuration\n> firewall             Define a firewall configuration\n> forwarding-options   Configure options to control packet forwarding\n> groups               Configuration groups\n> interfaces           Interface configuration\n> jsrc                 JSRC partition configuration\n> jsrc-partition       JSRC partition configuration\n> multi-chassis\n> multicast-snooping-options  Multicast snooping option configuration\n> poe                  Power-over-Ethernet options\n> policy-options       Policy option configuration\n> protocols            Routing protocol configuration\n> routing-instances    Routing instance configuration\n> routing-options      Protocol-independent routing option configuration\n> security             Security configuration\n> services             System services\n> snmp                 Simple Network Management Protocol configuration\n> switch-options       Options for default routing-instance of type virtual-switch\n> system               System parameters\n> unified-edge\n> virtual-chassis      Virtual chassis configuration\n> vlans                VLAN configuration\n  |                    Pipe through a command\n{master:0}[edit]\nroot# show chassis\nauto-image-upgrade;\n\n{master:0}[edit]\n
        "},{"location":"juniper/#show-forwarding-table","title":"Show forwarding table","text":"

        This is exactly the same as typing netstat -nr at the system shell.

        show route forwarding-table detail\n
        "},{"location":"juniper/#remove-virtual-chassis-from-an-ex4300","title":"Remove virtual-chassis from an EX4300","text":"
        request virtual-chassis vc-port delete pic-slot 1 port 0\nrequest virtual-chassis vc-port delete pic-slot 1 port 1\nrequest virtual-chassis vc-port delete pic-slot 1 port 2\nrequest virtual-chassis vc-port delete pic-slot 1 port 3\n

        You can validate this by looking for the following interfaces. If you do not see the et-0/1/{0..3} interfaces then the virtual-chassis may still exist in full or in part.

        root> show interfaces terse | match et-\net-0/1/0                up    up\net-0/1/0.0              up    up   eth-switch\net-0/1/1                up    up\net-0/1/1.0              up    up   eth-switch\net-0/1/2                up    up\net-0/1/2.0              up    up   aenet    --> ae0.0\net-0/1/3                up    up\net-0/1/3.0              up    up   aenet    --> ae0.0\n
        "},{"location":"juniper/#ex4300-os-install","title":"EX4300 OS install","text":"
        • https://kb.juniper.net/InfoCenter/index?page=content&id=KB20551&cat=SWITCHING&actp=LIST#USB

        After copying the install image to a usb disk and inserting it into the EX4300

        mount_msdosfs /dev/da1s1 /mnt\ncp /mnt/jinstall-ex-4300-14.1X53-D45.3-domestic-signed.tgz /var/tmp/\ncli\nrequest system software add /var/tmp/jinstall-ex-4300-14.1X53-D45.3-domestic-signed.tgz\n
        "},{"location":"juniper/#clear-a-dhcp-client-lease","title":"Clear a DHCP client lease","text":"
        root@junos> show dhcp server binding\nIP address        Session Id  Hardware address   Expires     State      Interface\n10.8.52.6         2           00:1e:7c:f8:be:34  85166       BOUND      irb.100\n10.8.52.9         5           6c:c1:11:4e:52:8c  86299       BOUND      irb.100\n10.8.52.5         3           d8:fa:97:b8:1a:dd  85222       BOUND      irb.100\n\n{master:0}\nroot@junos> clear dhcp server binding 2\n\n{master:0}\nroot@junos> show dhcp server binding\nIP address        Session Id  Hardware address   Expires     State      Interface\n10.8.52.9         5           6c:c1:11:4e:52:8c  86275       BOUND      irb.100\n10.8.52.5         3           d8:fa:97:b8:1a:dd  85198       BOUND      irb.100\n\n{master:0}\n
        "},{"location":"juniper/#show-config-diff-before-commit","title":"Show config diff before commit","text":"

        You can view the changes that will be committed, which is useful when the commit is invalid and you need to inspect it.

        root# show | compare\n[edit access address-assignment pool p1 family inet]\n        host server1-ipmi { ... }\n+       host server2 {\n+           hardware-address 00:11:22:33:44:33;\n+           ip-address 172.17.1.6;\n+       }\n+       host server3 {\n+           hardware-address 00:11:22:33:44:35;\n+           ip-address 172.17.1.7;\n+       }\n+       host server4 {\n+           hardware-address 00:11:22:33:44:1d;\n+           ip-address 172.17.1.8;\n+       }\n
        "},{"location":"juniper/#abort-config-changes-without-committing","title":"Abort config changes without committing","text":"

        If you made changes you want to abandon, while still in configure mode run 'rollback 0'

        {master:0}[edit]\nroot@ex4300# rollback 0\nload complete\n\n{master:0}[edit]\nroot@voyage-van-3-ex4300# exit\nExiting configuration mode\n
        "},{"location":"juniper/#links","title":"Links","text":"
        • https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-cli-overview.html
        • https://www.juniper.net/documentation/en_US/junos/topics/task/configuration/virtual-chassis-mx-series-vc-ports-deleting.html
        • https://junos-ansible-modules.readthedocs.io/en/2.1.0/
        "},{"location":"jupyter/","title":"Jupyter","text":"

        \"Project Jupyter exists to develop open-source software, open-standards, and services for interactive computing across dozens of programming languages.\" - http://jupyter.org/

        "},{"location":"jupyter/#links","title":"Links","text":"
        • A gallery of interesting Jupyter Notebooks
        • Interactive coding challenges
        • Presenting Code Using Jupyter Notebook Slides
        • JupyterBook: Jupyter Book is an open source project for building beautiful, publication-quality books and documents from computational material.
        "},{"location":"jwt/","title":"jwt","text":"

        \"JSON Web Token (JWT) is an open standard (RFC 7519) that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. This information can be verified and trusted because it is digitally signed. JWTs can be signed using a secret (with the HMAC algorithm) or a public/private key pair using RSA.\" - https://jwt.io/introduction/

        \"A JSON web token, or JWT (\u201cjot\u201d) for short, is a standardized, optionally validated and/or encrypted container format that is used to securely transfer information between two parties.\" - A plain English introduction to JWT

        "},{"location":"jwt/#tidbits","title":"Tidbits","text":"
        • JWT is abstract. The concrete forms are signed (JWS) or encrypted (JWE)
        • Unsigned have \"alg\": \"none\" in the header, but are still JWS format.
        • JWS has three sections: header.payload.signature
        • JWE comes in two forms with either 5 or 6 sections
        • Signatures can be created using a shared key (required for signing and validating) or using public/private key pair where the private key is used to sign, and only the public key is needed to validate. In either case, there is a piece of information that must be configured ahead of time for JWT to function, so it is not a self mechanism.
        "},{"location":"jwt/#links","title":"Links","text":"
        • https://jwt.io
        • https://medium.com/ag-grid/a-plain-english-introduction-to-json-web-tokens-jwt-what-it-is-and-what-it-isnt-8076ca679843
        • https://docs.mongodb.com/realm/authentication/custom-jwt
        • https://hasura.io/blog/best-practices-of-using-jwt-with-graphql/
        • https://tools.ietf.org/html/rfc7519
        • https://medium.com/dataseries/public-claims-and-how-to-validate-a-jwt-1d6c81823826
        • https://auth0.com/docs/tokens/json-web-tokens
        "},{"location":"kaniko/","title":"kaniko","text":"

        \"kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster. kaniko doesn't depend on a Docker daemon and executes each command within a Dockerfile completely in userspace. This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster.\" - https://github.com/GoogleContainerTools/kaniko

        "},{"location":"keel/","title":"keel","text":"

        \"Automated Kubernetes deployment updates\" - https://github.com/keel-hq/keel

        \"Kubectl is the new SSH. If you are using it to update production workloads, you are doing it wrong.\" - https://keel.sh/

        "},{"location":"keybase/","title":"keybase","text":"

        Keybase is a free, open source security app. It's also a public directory of people.

        • Keybase.io
        • Introducing the Keybase filesystem
        • Github.com/Keybase
        "},{"location":"keybase/#my-id","title":"My ID","text":"
        • https://keybase.io/hoherd
        • keybase id hoherd
        "},{"location":"kubernetes/","title":"kubernetes","text":"

        \"Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts, providing container-centric infrastructure.\" - https://kubernetes.io/docs/whatisk8s

        "},{"location":"kubernetes/#glossary","title":"Glossary","text":"

        More terms in the k8s glossary: https://kubernetes.io/docs/reference/glossary/

        • Container Network Interface (CNI) - https://github.com/containernetworking/cni
        • Container Runtime Interface (CRI) - https://github.com/containerd/cri
        • Container Storage Interface (CSI) - https://github.com/container-storage-interface/spec
        • Custom Resource Definition (CRD)
        • Horizontal Pod Autoscaling (HPA)
        "},{"location":"kubernetes/#cli-usage","title":"cli usage","text":""},{"location":"kubernetes/#learn-about-kubernetes","title":"Learn about kubernetes","text":"
        kubectl explain roles\n
        "},{"location":"kubernetes/#show-what-api-permissions-you-have","title":"Show what API permissions you have","text":"
        $ kubectl auth can-i --list\nResources                                       Non-Resource URLs   Resource Names   Verbs\n*.*                                             []                  []               [*]\n                                                [*]                 []               [*]\nselfsubjectaccessreviews.authorization.k8s.io   []                  []               [create]\nselfsubjectrulesreviews.authorization.k8s.io    []                  []               [create]\n                                                [/api/*]            []               [get]\n                                                [/api]              []               [get]\n                                                [/apis/*]           []               [get]\n                                                [/apis]             []               [get]\n                                                [/healthz]          []               [get]\n                                                [/healthz]          []               [get]\n                                                [/livez]            []               [get]\n                                                [/livez]            []               [get]\n                                                [/openapi/*]        []               [get]\n                                                [/openapi]          []               [get]\n                                                [/readyz]           []               [get]\n                                                [/readyz]           []               [get]\n                                                [/version/]         []               [get]\n                                                [/version/]         []               [get]\n                                                [/version]          []               [get]\n                                                [/version]          []               [get]\n
        "},{"location":"kubernetes/#multiple-kubernetes-client-configs","title":"Multiple kubernetes client configs","text":"

        The default config is ~/.kube/config, but if you want to use multiple configs you can do this:

        export KUBECONFIG=\"${HOME}/code/kubespray/artifacts/admin.conf:${HOME}/.kube/config\"\n

        I have seen weird problems when the order of configs is changed, such as certificate-authority-data and client-certificate-data being missing.

        "},{"location":"kubernetes/#kubeadm","title":"kubeadm","text":"

        \"kubeadm: easily bootstrap a secure Kubernetes cluster.\" - kubeadm --help

        • https://github.com/kubernetes/kubeadm
        "},{"location":"kubernetes/#show-your-kubeadm-tokens","title":"Show your kubeadm tokens","text":"
        $ sudo kubeadm token list\nTOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS\nubyc9a.1eq2ihwtnz7c7c9e   23h       2018-05-24T16:19:33-04:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token\n

        See sudo kubeadm token -h for more usage.

        "},{"location":"kubernetes/#kubectl","title":"kubectl","text":"

        \"kubectl controls the Kubernetes cluster manager.\" - kubectl --help

        • https://github.com/kubernetes/kubectl

        • kubectl get - show all resource types with short-hand versions.

        • kubectl completion -h - show how to configure completion for your shell.
        • kubectl config get-contexts - show which k8s configuration contexts you can control.
        • kubectl config use-context foo - switch to the foo context.
        • kubectl get nodes - show the nodes in the k8s cluster.
        • kubectl get pods - show deployed pods. there can be many pods per deployment.
        • kubectl get pods -n kube-system - show pods in a specific namespace.
        • kubectl get pods,hpa,deployment --all-namespaces - get several resource types at once, from all namespaces
        • kubectl describe pod foo
        • kubectl get deployment
        • kubectl describe deployment foo
        • kubectl get ns - show namespaces.
        • kubectl get pv - show physical volumes.
        • kubectl get svc -n kube-system - show a table of important details about running services in the kube-system namespace.
        • kubectl get pods -o yaml - show the yaml configs for the currently running status of every pod.
        • kubectl explain pods.spec - show documentation about pod specifications.
        • kubectl describe pods/echoserver - describe the pod whose Name is echoserver.
        • kubectl get rs - show replica sets.
        • kubectl expose deployment <deployment_name> --type=NodePort - create a service for the given deployment.
        • kubectl scale deployment <deployment_name> --replicas=5 - scale a deployment to 5 pods.
        • kubectl rollout history deployment <deployment_name>
        • kubectl get cm - get a list of config maps.
        • kubectl get apiservices - get a list of api service endpoints. Show -o yaml to view status about availability, endpoint, etc..
        "},{"location":"kubernetes/#working-with-several-configs","title":"Working with several configs","text":"

        Sometimes you want to have individual configs, such as when you are using configs that are updated by other engineers and pulled down via git, and sometimes you want to have one monolithic config, such as when you are using a tool that cannot easily work with multiple configs.

        "},{"location":"kubernetes/#use-multiple-configs-via-alias","title":"Use multiple configs via alias","text":"

        This is a great method for requiring explicit selection of the environment, which is good for not accidentally operating in prod. Using KUBECONFIG also allows your to set different k8s environments per terminal session, which is great for doing comparisons across clusters.

        alias k8s-foo-prod=\"export KUBECONFIG=$HOME/.kube/foo-prod-config ; kubectl config set-context foo-prod --namespace=default ;\"\n

        See also Google Cloud for more examples like this related to GCP.

        "},{"location":"kubernetes/#merge-several-configs","title":"Merge several configs","text":"

        This produces a monolithic file named kube_config which can be moved to ~/.kube/config. It merges the contents of your existing ~/.kube/config file.

        REPO_DIR=/path/to/repo/\nexport KUBECONFIG=\"${HOME}/.kube/config\"\nfor X in $(find \"$REPO_DIR/kube_config.d\" -name '*.config') ; do\n    KUBECONFIG+=\":$X\"\ndone\nkubectl config view --flatten > kube_config\necho \"Config file successfully created at ${PWD}/kube_config\"\necho \"Run: mv -i ${PWD}/kube_config ${HOME}/.kube/config\"\n
        "},{"location":"kubernetes/#create-a-kubeconfig-env-var-from-several-config-files","title":"Create a KUBECONFIG env var from several config files","text":"

        This produces a KUBECONFIG that looks like file1:file2:file3

        REPO_DIR=/path/to/repo/\nKUBECONFIG=\"${HOME}/.kube/config\"\nfor config in $(find \"$REPO_DIR/kube_config.d\" -name '*.config') ; do\n    KUBECONFIG+=\":$config\"\ndone\necho \"KUBECONFIG=${KUBECONFIG}\" ;\n
        "},{"location":"kubernetes/#show-nodes-and-their-taints","title":"Show nodes and their taints","text":"
        kubectl get nodes --output 'jsonpath={range $.items[*]}{.metadata.name} {.spec.taints[*]}{\"\\n\"}{end}'\n
        "},{"location":"kubernetes/#drain-and-cordon-a-node","title":"Drain and cordon a node","text":"

        Do this before deleting or reloading a node.

        kubectl drain --ignore-daemonsets --force --delete-emptydir-data \"$NODE_NAME\"\n
        "},{"location":"kubernetes/#drain-all-but-the-top-20-nodes-in-some-node-pool-selected-by-most-cpu-usage","title":"Drain all but the top 20 nodes in some node-pool selected by most CPU usage","text":"
        kubectl top nodes --sort-by=cpu |\nawk '/node-pool-identifiable-string/ {print $1}' |\ntail -n +20 |\nsargs kubectl drain --ignore-daemonsets --force --delete-emptydir-data\n
        "},{"location":"kubernetes/#show-namespaces-and-how-many-hours-old-they-are","title":"Show namespaces and how many hours old they are","text":"
        kubectl get namespace --sort-by=\".metadata.creationTimestamp\" -o json |\njq -r '\n  .items[] |\n  ((now - (.metadata.creationTimestamp | fromdateiso8601))/3600 | floor) as $hours_old |\n  \"\\(.metadata.name) \\($hours_old)\"\n'\n
        "},{"location":"kubernetes/#show-pods-sorted-by-creation-time","title":"Show pods, sorted by creation time","text":"

        Only descending sort is supported

        kubectl get pods --sort-by=.metadata.creationTimestamp\n

        To sort ascending you can use awk and tac (which is cat in reverse)

        kubectl get pods --sort-by=.metadata.creationTimestamp |\nawk 'NR == 1; NR > 1 {print | \"tac\"}'\n
        "},{"location":"kubernetes/#show-pods-that-are-not-running","title":"Show pods that are not running","text":"
        kubectl get pods --all-namespaces --field-selector='status.phase!=Running' --sort-by=.metadata.creationTimestamp\n
        "},{"location":"kubernetes/#show-pods-that-are-not-running-or-did-not-exit-cleanly","title":"Show pods that are not running or did not exit cleanly","text":"
        kubectl get pods --all-namespaces --field-selector='status.phase!=Running,status.phase!=Succeeded' --sort-by=.metadata.creationTimestamp\n
        "},{"location":"kubernetes/#show-pods-that-are-terminating","title":"Show pods that are terminating","text":"

        Unfortunately \"Terminating\" shows up as a status, but is not a phase, so we have to jump through some hoops to show this list. Here's one way to do this:

        kubectl get pods -A |\nawk '$4 == \"Terminating\" {print $1,$2}' |\nwhile read -r NS POD ; do\n  kubectl get pod \"$POD\" -n \"$NS\" -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,TERMINATION_GRACE_PERIOD:.spec.terminationGracePeriodSeconds\ndone |\ncolumn -t |\nsort -u\n

        And the output will be something like:

        NAMESPACE                       NAME                                                TERMINATION_GRACE_PERIOD\notv-blazing-ray-3043            blazing-ray-3043-miner-7556f86b76-8mpdj            600\notv-gravitational-century-8705  gravitational-century-8705-miner-66b6dd97cc-c2mqq  600\notv-lunar-nova-0800             lunar-nova-0800-miner-86684cd6f8-d79wm             600\n
        "},{"location":"kubernetes/#show-all-images-referenced-by-your-k8s-manifests","title":"Show all images referenced by your k8s manifests","text":"
        kubectl get pods --all-namespaces -o jsonpath=\"{..image}\" |\ntr -s '[[:space:]]' '\\n' |\nsort |\nuniq -c |\nsort -n\n
        "},{"location":"kubernetes/#show-a-list-of-containers-formatted-as-pod_name-container_name","title":"Show a list of containers formatted as pod_name container_name","text":"
        kubectl get pods -n \"$NS\" -o json |\n  jq -r '.items[] | .metadata.name as $podname | .spec.containers[] | \"\\($podname) \\(.name)\"' |\n  column -t\n
        "},{"location":"kubernetes/#show-a-list-of-containers-formatted-as-pod_managerpod_name-container_name","title":"Show a list of containers formatted as pod_manager/pod_name container_name","text":"

        When you need to check all of the containers of pods in a given pod manager (sts, ds, deployment), you need a list that is formatted in a very specific way.

        For instance, to get a list of containers inside the prometheus sts pods

        kubectl get sts -l component=prometheus -n \"$NS\" -o json |\n  jq -r '.items[] | .kind as $kind | .metadata.name as $name | .spec.template.spec.containers[] | \"\\($kind)/\\($name) \\(.name)\"' |\n  column -t\n

        Which produces the output:

        StatefulSet/demo-prometheus  configmap-reloader\nStatefulSet/demo-prometheus  prometheus\n

        This can then be fed into anything needing such syntax, for example kubectl exec to check the runtime env of these containers:

        $ kubectl get sts -l component=prometheus -n $NS -o json |\n  jq -r '.items[] | .kind as $kind | .metadata.name as $name | .spec.template.spec.containers[] | \"\\($kind)/\\($name) \\(.name)\"' |\n  while read -r p c ; do echo \"$p $c $(kubectl -n $NS exec $p -c $c -- env | grep '^HOSTNAME=')\" ; done ;\nStatefulSet/demo-prometheus configmap-reloader HOSTNAME=demo-prometheus-1\nStatefulSet/demo-prometheus prometheus HOSTNAME=demo-prometheus-1\n

        That's obviously a contrived example, but the real learning here is that it is possible to iterate deep json data while referencing values form higher levels by storing that higher level as variables.

        "},{"location":"kubernetes/#decode-a-secret","title":"Decode a secret","text":"

        Use built in base64 decoding like this:

        kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o go-template='{{ printf \"%s\\n\" (.data.password | base64decode) }}'\n

        Things get tricky when you have a dot in the key name:

        kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o go-template='{{ printf \"%s\\n\" (index .data \"pgbouncer.ini\" | base64decode) }}'\n

        Or you can use -o jsonpath with an external base64 decoder:

        kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o jsonpath='{.data.pgbouncer\\.ini}' | base64 -d\n

        Alternatively you can use jq, which has the cleanest syntax when accessing keys with dots in the name:

        kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o json | jq -r '.data[\"pgbouncer.ini\"]' | base64 -d\n
        "},{"location":"kubernetes/#decode-ssl-secrets-and-show-info-about-the-certificates","title":"Decode SSL secrets and show info about the certificates","text":"
        kubectl get secret -n istio-system istio.default -o json |\njq -r '.data | keys[] as $k | \"\\($k) \\(.[$k])\"' |\ngrep cert |\nwhile read -r k v ; do\n  echo \"------ $k ------\"\n  echo -n \"$v\" |\n  base64 -d |\n  openssl x509 -noout -subject -issuer -dates\ndone\n

        Example output:

        ------ cert-chain.pem ------\nsubject=\nissuer= /O=cluster.local\nnotBefore=Aug 10 13:55:50 2022 GMT\nnotAfter=Nov  8 13:55:50 2022 GMT\n------ root-cert.pem ------\nsubject= /O=cluster.local\nissuer= /O=cluster.local\nnotBefore=Sep 29 13:52:55 2021 GMT\nnotAfter=Sep 27 13:52:55 2031 GMT\n
        "},{"location":"kubernetes/#watch-whats-going-on-in-your-cluster","title":"Watch what's going on in your cluster","text":"
        watch kubectl get pods --all-namespaces -o wide\n

        or

        kubectl get pods --all-namespaces -o wide -w\n
        "},{"location":"kubernetes/#show-all-pods-and-their-containers-requests-and-limits","title":"Show all pods and their container's requests and limits","text":"
        kubectl get pods --all-namespaces -o json |\njq -r '\n  .items[] |\n  .metadata.namespace as $namespace |\n  .metadata.name as $pod_name |\n  .spec.containers[] |\n  [$namespace, $pod_name, .name, (.resources | tostring)] |\n  @tsv\n' | column -t -s$'\\t'\n

        This will produce output like with the columns namespace, pod, container, resources as a json blob:

        development  gamehouse-nats-0                  nats                {\"limits\":{\"cpu\":\"250m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"75m\",\"memory\":\"30Mi\"}}\ndevelopment  gamehouse-nats-2                  metrics             {\"limits\":{\"cpu\":\"250m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"75m\",\"memory\":\"30Mi\"}}\ndevelopment  gamehouse-nginx-85885cbb75-m5t58  nginx               {\"limits\":{\"cpu\":\"100m\",\"memory\":\"10Mi\"},\"requests\":{\"cpu\":\"80m\",\"memory\":\"7Mi\"}}\ndevelopment  gamehouse-nginx-85885cbb75-wdmhf  nginx               {\"limits\":{\"cpu\":\"100m\",\"memory\":\"10Mi\"},\"requests\":{\"cpu\":\"80m\",\"memory\":\"7Mi\"}}\ndevelopment  gamehouse-prometheus-0            configmap-reloader  {\"limits\":{\"cpu\":\"100m\",\"memory\":\"25Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"25Mi\"}}\ndevelopment  gamehouse-prometheus-0            prometheus          {\"limits\":{\"cpu\":\"3\",\"memory\":\"20Gi\"},\"requests\":{\"cpu\":\"1\",\"memory\":\"4Gi\"}}\n
        "},{"location":"kubernetes/#show-daemonsets-that-are-not-up-to-date","title":"Show daemonsets that are not up to date","text":"
        kubectl get daemonset -A | awk '$3 != $6 {print}'\n
        "},{"location":"kubernetes/#watch-events-in-a-given-namespace","title":"Watch events in a given namespace","text":"
        kubectl -n kube-system get events --field-selector type=Warning -w\n

        Or format the event messages with more useful information (really wide output)

        kubectl get events -w -o custom-columns=FirstSeen:.firstTimestamp,LastSeen:lastTimestamp,Kind:involvedObject.kind,Name:.involvedObject.name,Count:.count,From:.source.component,Type:.type,Reason:.reason,Message:.message\n
        "},{"location":"kubernetes/#show-all-containers-for-each-pod-matching-a-label","title":"Show all containers for each pod matching a label","text":"
        kubectl -n kube-system get pod -l k8s-app=kube-dns -o=jsonpath='{range .items[*]}{\"\\n\"}{.metadata.name}{\":\\n\\t\"}{range .spec.containers[*]}{.name}{\":\\t\"}{.image}{\"\\n\\t\"}{end}{\"\\n\"}{end}'\n
        "},{"location":"kubernetes/#show-a-list-of-everything-in-a-namespace","title":"Show a list of everything in a namespace","text":"
        NS=kube-system\nkubectl get all -n \"$NS\" --output 'jsonpath={range $.items[*]}{.kind} {.metadata.name}{\"\\n\"}{end}' |\ngrep -v '^List $'  # exclude empty namespace\n
        "},{"location":"kubernetes/#show-logs-for-a-given-pod-since-n-hours-ago","title":"Show logs for a given pod since N hours ago","text":"
        pod_name=httpbin\nkubectl logs $pod_name --since=12h\n

        The --since arg can take [s]econds, [m]inutes and [h]ours. Longer durations should use --since-time=<rfc3339 timestamp>

        "},{"location":"kubernetes/#show-logs-for-a-given-pod-since-a-given-date","title":"Show logs for a given pod since a given date","text":"

        The --since-time arg takes RFC3339 datetime. EG: 1991-08-03T13:31:46-07:00. This format requirement is strict, and is incompatible with the GNU date --rfc-3339=seconds output, which uses a space instead of a T to separate the full date from the full time, and +%FT%F%z, which does not include a colon between hours and minutes.

        pod_name=httpbin\nkubectl logs $pod_name --since-time=\"$(date -Iseconds -d '-5 weeks')\"\n
        "},{"location":"kubernetes/#output-custom-column-names","title":"Output custom column names","text":"
        $ kubectl get pvc --all-namespaces -o custom-columns='NAME:metadata.name,SIZE:spec.resources.requests.storage'\nNAME                   SIZE\nfoo-logs               256Gi\ntest-volume-2          1Gi\nsome-awesome-service   5Gi\n\n$ kubectl get pods -o custom-columns='NAME:.metadata.name,START_TIME:.status.startTime,.spec.containers[0].env[?(@.name == \"GITLAB_USER_EMAIL\")].value' | grep -E 'NAME|jobs'\nNAME                                                 START_TIME             GITLAB_USER_EMAIL\nrunner-ppzmy1zx-project-11548552-concurrent-0q2pmk   2019-10-23T17:00:56Z   user2@example.com\nrunner-ppzmy1zx-project-11548552-concurrent-1f7nfx   2019-10-23T17:04:27Z   user1@example.com\nrunner-ppzmy1zx-project-11548552-concurrent-2n84rv   2019-10-23T17:04:19Z   user1@example.com\n
        "},{"location":"kubernetes/#perform-a-restart-of-a-service-daemonset-or-statefulset","title":"Perform a restart of a service, daemonset or statefulset","text":"
        DEPLOYMENT_NAME=gibson_garbagefile_seeker\nkubectl rollout restart deployment $DEPLOYMENT_NAME\n
        "},{"location":"kubernetes/#run-a-cronjob-out-of-schedule","title":"Run a cronjob out of schedule","text":"
        kubectl create job --from=cronjob/download-cat-pix download-cat-pix-manual-run\n
        "},{"location":"kubernetes/#create-a-yaml-file-for-a-resource-type","title":"Create a yaml file for a resource type","text":"

        You can generate yaml for a variety of entities without having to create them on the server. Each entity requires different syntax, so you have to work through the error messages to get to a final solution.

        https://kubernetes.io/docs/reference/kubectl/conventions/#generators

        $ kubectl create --dry-run=client -o yaml cronjob --schedule='15 * * * *' --image=image-name:1.2.3 job-name\napiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n  creationTimestamp: null\n  name: job-name\nspec:\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n      name: job-name\n    spec:\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n          - image: image-name:1.2.3\n            name: job-name\n            resources: {}\n          restartPolicy: OnFailure\n  schedule: 15 * * * *\nstatus: {}\n
        "},{"location":"kubernetes/#installations","title":"Installations","text":"

        The standard way to install k8s by yourself is to use kubeadm.

        "},{"location":"kubernetes/#manually-on-ubuntu-16","title":"Manually on Ubuntu 16","text":"
        ## as root\nswapoff -a # https://github.com/kubernetes/kubernetes/issues/53533\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\ncurl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -\necho \"deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable\" > /etc/apt/sources.list.d/docker.list\necho \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" > /etc/apt/sources.list.d/kubernetes.list\napt update\napt dist-upgrade -y\napt install -y apt-transport-https ca-certificates curl software-properties-common\napt install -y docker-ce\napt install -y kubelet kubeadm kubectl\nkubeadm init\n

        kubeadm init guide: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#instructions

        "},{"location":"kubernetes/#dns","title":"DNS","text":"

        Kubernetes lets you resolve resources via DNS

        • https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
        • https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/
        "},{"location":"kubernetes/#enable-k8s-dns-logging","title":"Enable k8s dns logging","text":"
        kubectl -n kube-system edit configmap coredns\n## Add 'log' to the 'Corefile' config\n
        "},{"location":"kubernetes/#dns-entity-map","title":"DNS Entity map","text":"
        • Kubernetes Service: <service>.<namespace>.svc.cluster.local. (eg: httpbin.default.svc.cluster.local.)
        kubectl get svc --all-namespaces -o jsonpath='{range .items[*]}{.metadata.name}{\".\"}{.metadata.namespace}{\".svc.cluster.local.\\n\"}'\n

        Or with jq:

        kubectl get svc --all-namespaces -o json |\njq -r  '.items[] | \"\\(.metadata.name).\\(.metadata.namespace).svc.cluster.local.\"'\n

        And if you want to also add port numbers:

        kubectl get svc --all-namespaces -o json |\njq -r '.items[] | \"\\(.metadata.name).\\(.metadata.namespace).svc.cluster.local.\" as $base | .spec.ports[] | \"\\($base):\\(.port)\"'\n
        • With core-dns you can run dig SRV +short *.*.svc.cluster.local. to get a list of all services.
        • Kubernetes service srv records: _${service_port_name}._${protocol}.${service}.${namespace}.svc.cluster.local. (eg: _http._tcp.httpbin.default.svc.cluster.local.)
        "},{"location":"kubernetes/#crictl","title":"crictl","text":"

        crictl is a tool to inspect the local Container Runtime Interface (CRI)

        user@k3snode:~$ sudo crictl pods\nPOD ID          CREATED       STATE  NAME                            NAMESPACE    ATTEMPT\n688ecc2d9ce4d   2 weeks ago   Ready  log-fluentd-676d9d7c9d-ghz5x    default      8\nee1d8b0593e71   2 weeks ago   Ready  tiller-deploy-677f9cb999-rx6qp  kube-system  7\n1153f4c0bd1f4   2 weeks ago   Ready  coredns-78fcdf6894-qsl74        kube-system  8\n5be9c530c8cdc   2 weeks ago   Ready  calico-node-59spv               kube-system  10\nd76d211830064   2 weeks ago   Ready  kube-proxy-cqdvn                kube-system  104\naa1679e0bfcca   2 weeks ago   Ready  kube-scheduler-s1               kube-system  10\nef64eea461bc0   2 weeks ago   Ready  kube-controller-manager-s1      kube-system  10\n14ec5abe1e3ab   2 weeks ago   Ready  kube-apiserver-s1               kube-system  11\nd4ce465a0942f   2 weeks ago   Ready  etcd-s1                         kube-system  10\n
        "},{"location":"kubernetes/#cloud-provider-versions","title":"Cloud Provider versions","text":"
        • AKS: https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions#aks-kubernetes-release-calendar
        • EKS: https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-release-calendar
        • GKE: https://cloud.google.com/kubernetes-engine/docs/release-schedule
        • Upstream: https://en.wikipedia.org/wiki/Kubernetes#History
        "},{"location":"kubernetes/#code-snips","title":"Code snips","text":""},{"location":"kubernetes/#show-all-ingresses-and-what-hostnames-they-handle","title":"Show all ingresses and what hostnames they handle","text":"

        This is useful when you have a lot of ingresses or very long hostnames which cause them to be hidden by an ellipse with normal or even wide output.

        kubectl get ingresses -n \"$NS\" --output 'jsonpath={range $.items[*]}{.metadata.name}{\":\"}{range @.spec.rules[*]}{\"\\n\\t\"}{.host}{end}{\"\\n\"}{end}\n

        This will output a list like

        rodent-ingress:\n    mouse.example.com\n    hamster.example.com\nbird-ingress:\n    parrot.example.com\n    swallow.example.com\n    owl.example.com\n
        "},{"location":"kubernetes/#parse-swaggerjson-for-api-definitions","title":"Parse swagger.json for API definitions","text":"

        This is useful when manually writing helm chart templates to handle a range of k8s versions. (Keep an eye on https://github.com/helm/helm/issues/9765 though for hopefully a better way than manually doing this.)

        # Download a range of swagger.json files named by version. EG: v1.18.0.json\nfor X in {15..22} ;\n  do ver=\"v1.$X.0\"\n  curl -LsSo \"${ver}.json\" \"https://raw.githubusercontent.com/kubernetes/kubernetes/${ver}/api/openapi-spec/swagger.json\"\ndone\n\n# Parse these into a text list of API versions supported by the version. EG: v1.18.0-definitions.txt\nfor X in v1.* ; do\n  jq -r '.definitions | keys | .[]' $X > ${X/.json/}-definitions.txt\ndone\n\n# Then you can grep for a definition to see what versions support it\ngrep 'Ingress$' *definitions.txt | grep -vE 'LoadBalancer'\n
        "},{"location":"kubernetes/#use-jq-to-find-zombie-pods","title":"Use jq to find zombie pods","text":"

        If the base container shuts down, sometimes the istio sidecar can continue to run. You can find this condition with:

        kubectl get pods -A -o json | jq '\n  .items[] |\n  select(.status.containerStatuses[].name == \"base\" and .status.containerStatuses[].state.terminated.exitCode == 0) |\n  select(.status.containerStatuses[].name == \"istio-proxy\" and .status.containerStatuses[].state.terminated.exitCode == null) |\n  {\n    \"name\": .metadata.name,\n    \"namespace\": .metadata.namespace,\n    \"status\": [\n      .status.containerStatuses[] |\n      {\n        \"name\": .name,\n        \"exit_code\": .state.terminated.exitCode\n      }\n    ]\n  }\n'\n
        "},{"location":"kubernetes/#use-jq-to-find-all-pods-with-a-specific-container-state","title":"Use jq to find all pods with a specific container state","text":"
        kubectl get pods -A -o json | jq '\n  .items[]? |\n  select(.status.containerStatuses[]?.state.waiting.reason == \"CreateContainerConfigError\") |\n  .metadata.name\n'\n
        "},{"location":"kubernetes/#use-jq-to-find-pods-that-have-problematic-phases","title":"Use jq to find pods that have problematic phases","text":"
        kubectl get pods -A -o json |\njq -c '\n  .items[] |\n  select(.status.phase|test(\"Pending|Unknown\")) |\n  [.status.phase, .metadata.creationTimestamp, .metadata.namespace, .metadata.name]\n'\n
        "},{"location":"kubernetes/#linux-kernel-namespaces","title":"linux kernel namespaces","text":"

        Linux kernel namespaces are part of the magic that allows containers to run, and kubernetes pods take this a step further by allowing multiple containers to run inside a pod, and share only some of the namespaces. Which ones?

        diff -t -W 65 -y ns-container-1.txt ns-container-2.txt\n$ readlink /proc/$$/task/*/ns/*   $ readlink /proc/$$/task/*/ns/*\ncgroup:[4026531835]               cgroup:[4026531835]\nipc:[4026532832]                  ipc:[4026532832]\nmnt:[4026533233]                | mnt:[4026533326]\nnet:[4026532835]                  net:[4026532835]\npid:[4026533325]                | pid:[4026533328]\npid:[4026533325]                | pid:[4026533328]\nuser:[4026531837]                 user:[4026531837]\nuts:[4026533324]                | uts:[4026533327]\n
        "},{"location":"kubernetes/#links","title":"Links","text":"
        • https://kubernetes.io/docs/reference/kubectl/cheatsheet/
        • http://on-demand.gputechconf.com/gtc/2018/presentation/s8893-the-path-to-gpu-as-a-service-in-kubernetes.pdf
        • http://slack.kubernetes.io/
        • https://blog.hypriot.com/post/setup-kubernetes-raspberry-pi-cluster
        • https://docs.tigera.io/calico/latest/about: \"Calico is a networking and security solution that enables Kubernetes workloads and non-Kubernetes/legacy workloads to communicate seamlessly and securely.\"
        • https://github.com/kelseyhightower/kubernetes-the-hard-way: \"The target audience for this tutorial is someone planning to support a production Kubernetes cluster and wants to understand how everything fits together.\"
        • https://github.com/kinvolk/kubernetes-the-hard-way-vagrant: \"A port of Kelsey Hightower's 'Kubernetes the Hard Way' tutorial to Vagrant.\"
        • https://github.com/kubernetes/dashboard#kubernetes-dashboard
        • https://github.com/kubernetes/kompose: Compose to Kubernetes
        • https://kubernetes.io/docs/concepts/cluster-administration/addons/
        • https://kubernetes.io/docs/concepts/cluster-administration/logging/
        • https://kubernetes.io/docs/concepts/services-networking/network-policies/
        • https://kubernetes.io/docs/concepts/workloads/
        • https://kubernetes.io/docs/getting-started-guides/minikube/
        • https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm
        • https://www.cncf.io/certification/expert/CKA/
        • https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-details
        • https://github.com/ClusterHQ/flocker: Flocker is an open-source Container Data Volume Manager for your Dockerized applications.
        • https://cloudplatform.googleblog.com/2018/05/Beyond-CPU-horizontal-pod-autoscaling-comes-to-Google-Kubernetes-Engine.html
        • https://github.com/vapor-ware/ksync: Sync local filesystem with a target container
        • https://metallb.universe.tf: For those of us not running in a cloud, MetalLB can serve as a k8s native LB.
        • https://k3s.io: Lightweight Kubernetes. Easy to install. A binary of less than 40 MB. Only 512 MB of RAM required to run.
        • https://learnk8s.io/production-best-practices/: A curated checklist of best practices designed to help you release to production.
        • https://kind.sigs.k8s.io/docs/user/quick-start#multinode-clusters: Multi-node kubernetes clusters running within docker
        • https://www.stackrox.com/categories/eks-vs-gke-vs-aks: Available cloud versions of hosted k8s and notable changes
        • https://velero.io: cluster backups
        • kube-dns-autoscaler: https://gist.github.com/MrHohn/1198bccc2adbd8cf3b066ab37ccd8355 / https://github.com/kubernetes-sigs/cluster-proportional-autoscaler
        • Kubernetes pods /etc/resolv.conf ndots:5 option and why it may negatively affect your application performances: https://pracucci.com/kubernetes-dns-resolution-ndots-options-and-why-it-may-affect-application-performances.html
        • https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/ \"Kubernetes admission controllers are plugins that govern and enforce how the cluster is used. They can be thought of as a gatekeeper that intercept (authenticated) API requests and may change the request object or deny the request altogether.\"
        • https://tunein.engineering/implementing-leader-election-for-kubernetes-pods-2477deef8f13: Leader election for Kubernetes pods
        • https://medium.com/kubernetes-tutorials/making-sense-of-taints-and-tolerations-in-kubernetes-446e75010f4e
        • https://web.archive.org/web/20190306132233/https://supergiant.io/blog/learn-how-to-assign-pods-to-nodes-in-kubernetes-using-nodeselector-and-affinity-features/
        • https://fluxcd.io: \"Open and extensible continuous delivery solution for Kubernetes. Powered by GitOps Toolkit.\"
        • https://k8slens.dev: \"Lens is the only IDE you\u2019ll ever need to take control of your Kubernetes clusters.\"
        • https://kustomize.io: \"Kubernetes native configuration management\"
        • https://github.com/kubernetes-sigs/kustomize/tree/master/examples: Kustomize examples
        • https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/: Downward API allows you to mount k8s spec information as env vars
        • https://isovalent.com/blog/post/2021-12-08-ebpf-servicemesh: How eBPF will solve Service Mesh - Goodbye Sidecars
        • https://www.kubeshark.co: Basically tcpdump and wireshark (ethereal) for k8s clusters. 2 nodes free as of 2024-10-17.
        "},{"location":"launchd/","title":"launchd","text":"

        launchd is MacOS X's init system.

        "},{"location":"launchd/#example","title":"Example","text":""},{"location":"launchd/#watch-folder","title":"Watch Folder","text":"

        This user LaunchAgent would be placed into $HOME/Library/LaunchAgents/photo_processor.plist.

        We have to specify /bin/bash as the first ProgramArgument so OS X doesn't complain about DRM or mach-o executable shizz. This effectively limits us to bash 3.

        <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n  <dict>\n    <key>Label</key>\n    <string>photo_processor.sh</string>\n\n    <key>ProgramArguments</key><array>\n      <string>/bin/bash</string>\n      <string>/Users/hoherd/code/dho-bin/photo_processor.sh</string>\n    </array>\n\n    <key>WatchPaths</key>\n    <array>\n        <string>/Users/hoherd/Dropbox/yp/photo_queue/</string>\n    </array>\n\n  </dict>\n</plist>\n
        "},{"location":"launchd/#see-also","title":"See also","text":"
        • https://github.com/jordansissel/pleaserun
        "},{"location":"ldap/","title":"ldap","text":"

        Lightweight Directory Access Protocol

        "},{"location":"ldap/#acronyms-and-stuff","title":"Acronyms and stuff","text":"
        • o=organization
        • c=country
        • dn=distinguished_name
        • dc=domain_component
        • rdn=relative_distinguished_name
        • cn=common_name
        • uid=user_id
        "},{"location":"ldap/#tricks","title":"Tricks","text":""},{"location":"ldap/#show-the-whole-ldap-database","title":"Show the whole ldap database","text":"

        From the LDAP server

        slapcat\n
        "},{"location":"ldap/#show-pwdfailuretime-count-and-associated-uid","title":"Show pwdFailureTime count and associated uid","text":"
        sudo slapcat 2>/dev/null | egrep '^(pwdFailureTime|uid:)' | uniq -c -w 14 | grep -B1 pwdFailureTime\n
        "},{"location":"ldap/#show-ldap-and-local-user-account-info","title":"Show LDAP and local user account info","text":"
        getent passwd maxb\n
        "},{"location":"ldap/#search-an-ldap-tree","title":"Search an LDAP tree","text":"
        ldapsearch -x -b \"dc=example,dc=com\"\n

        This can be used when forwarding through ssh -L 3389:127.0.0.1:389 remotehost

        ldapsearch -x -h 127.0.0.1 -p 3389 -b \"dc=example,dc=com\"\n
        "},{"location":"ldap/#run-a-search-while-authenticated","title":"Run a search while authenticated","text":"

        This logs in as danielh and searches for a record with uid=robertc

        ldapsearch -x -b \"dc=example,dc=com\" -D \"uid=danielh,ou=people,dc=example,dc=com\" -W \"uid=danielh\"\n
        "},{"location":"ldap/#refresh-ldap-user-cache-on-centos-6","title":"Refresh LDAP user cache on CentOS 6","text":"
        nscd -i passwd ; nscd -i group ; /etc/init.d/lldpad restart ; /etc/init.d/nslcd restart ; /etc/init.d/nscd restart ;\n
        "},{"location":"ldap/#see-also","title":"See Also","text":"
        • Apache Directory Studio graphical interface for LDAP: https://directory.apache.org/studio/
        • A great series of articles on LDAP: http://www.ldapman.org/articles/
        • Ubuntu Server LDAP integration: https://help.ubuntu.com/community/LDAPClientAuthentication
        "},{"location":"lego/","title":"LEGO","text":"

        Lego STEM, including Mindstorms, Powered Up, etc..

        "},{"location":"lego/#robot-inventor","title":"Robot Inventor","text":"

        The 51515 is the successor to the EV3 31313. Its software is much more approachable and modern, but its hackability appears to be less than 31313 due to no USB or SD slot.

        "},{"location":"lego/#mindstorms-ev3","title":"Mindstorms EV3","text":"
        • EV3 programming software is available via cask: brew cask install lego-mindstorms-ev3
        "},{"location":"lego/#micropython","title":"MicroPython","text":"

        In 2019 LEGO started shipping a microSD card for developing MicroPython using vscode. This setup is based on ev3dev, including having an ev3dev splash screen on boot and the default ev3dev ssh credentials. On the vscode side, one interesting thing is that when you install the LEGO\u00ae Mindstorms\u00ae EV3 MicroPython extension, it installs ev3dev-browser

        "},{"location":"lego/#ev3-links","title":"EV3 links","text":"
        • Robot Operating System for EV3
        • https://education.lego.com/en-us/support/mindstorms-ev3/python-for-ev3
        • http://ev3lessons.com/en
        • ev3dev: \"ev3dev is a Debian Linux-based operating system that runs on several LEGO\u00ae MINDSTORMS compatible platforms including the LEGO\u00ae MINDSTORMS EV3 and Raspberry Pi-powered BrickPi.\"
        • LEGO MINDSTORMS EV3 source code
        • BUILD A ROBOT: \"These robots give you the full EV3 experience, complete with building instructions, programming missions and the programming tool included in the new free EV3 Programmer App.\"
        • http://flltutorials.com
        • Community Gallery: Robot designs from lego mindstorm community.
        "},{"location":"lego/#great-ball-contraption","title":"Great Ball Contraption","text":"

        \"A great ball contraption (GBC) is a machine which receives soccer balls or basketballs from one module and passes them to another module, rather like a bucket brigade. Modules built according to the GBC standard can be assembled into a collaborative display without pre-planning or modification.\" - https://www.greatballcontraption.com

        "},{"location":"lego/#ball-specs","title":"Ball specs","text":"

        From http://www.planet-gbc.com/planet-gbc-tutorial-get-your-gbc-balls

        • Diameter: 14mm
        • Weight: ~1.5g
        "},{"location":"lego/#gbc-links","title":"GBC Links","text":"
        • https://www.greatballcontraption.com
        • http://www.planet-gbc.com
        • https://www.reddit.com/r/GreatBallContraption
        "},{"location":"lego/#lego-mario","title":"Lego Mario","text":"

        Lego Mario is an electronic physical lego game device that has an accelerometer and a colored barcode reader. It interfaces with an smartphone/tablet app to enhance the physical gaming experience with instructions, a scoreboard, etc.. The barcodes that Mario reads have been reverse engineered and can be printed up with a color accurate printer.

        "},{"location":"lego/#videos","title":"Videos","text":"
        • https://www.youtube.com/playlist?list=PLz-GEJhc2xVdWHAlHnhTEK7g8Et25rgZY My (Daniel Hoherd) LEGO Robotics Youtube Playlist
        • https://www.youtube.com/playlist?list=PLA-qIpWJQgnowkY7u6TY7KLViXsyQqv6U Akiyuki LEGO GBC Modules Youtube Playlist
        "},{"location":"lego/#general-links","title":"General links","text":"
        • https://www.lego.com/en-us/mindstorms
        • BrickPi: \"This project combines the brains of a Raspberry Pi with the brawn of a LEGO MINDSTORMS NXT\"
        • https://www.raspberrypi.com/news/raspberry-pi-build-hat-lego-education: Control LEGO motors via Raspberry Pi
        • https://botbench.com
        • https://robotsquare.com
        • https://www.bricklink.com/v3/studio/download.page: \"Build, render, and create instructions\" with this CAD style LEGO software.
        • http://www.brickpile.com/wp-content/uploads/2015/10/brick-geometry-brickcon2015-compressed.pdf (935 KB): mm, Lego Draw Units, triangles and more! Lots of LEGO math for accuracy and precision.
        • https://github.com/virantha/bricknil: Library that supports programming Duplo Train Push & Go Motor (part 28743) and others.
        • https://youtu.be/I6Vnwi6oQYg: Toa Mata Band: Lego robot orchestra tribute to Depeche Mode - Everything Counts
        • https://brickset.com/buy: Deals on Lego sets
        • https://github.com/maarten-pennings/Lego-Mindstorms: technical info about Lego Mindstorms kits
        • https://www.code.pybricks.com: IDE for Powered Up lego tech. See also https://pybricks.com/install
        • https://lego.github.io/MINDSTORMS-Robot-Inventor-hub-API
        • https://pypi.org/project/mindstorms
        • https://antonsmindstorms.com/2021/01/14/advanced-undocumented-python-in-spike-prime-and-mindstorms-hubs
        • https://builderdude35.com
        "},{"location":"lego/#see-also","title":"See also","text":"
        • Robotics
        "},{"location":"lektor/","title":"Lektor","text":"

        \"A flexible and powerful static content management system for building complex and beautiful websites out of flat files\" - https://www.getlektor.com

        "},{"location":"lektor/#deploying-a-github-user-site-with-gh-pages","title":"Deploying a github user site with gh-pages","text":"

        Github user sites like danielhoherd.github.io must be served from the master branch. This means lektor must live in a feature branch. Create a feature branch, then go into the github repo settings and protect that branch. When you do a lektor deploy it will destroy all content in master and replace it with the static site. To make this deployment match a custom domain name, set up your whatever.lektorproject with something like the following configs:

        [servers.github]\nname = github\nenabled = yes\ndefault = yes\ntarget = ghpages://danielhoherd/danielhoherd.github.io?cname=danielhoherd.com\n
        "},{"location":"lektor/#links","title":"Links","text":"
        • https://www.getlektor.com/docs/deployment/ghpages/
        "},{"location":"linksys/","title":"linksys","text":"

        Linksys makes a variety of networking devices. They are owned by Cisco.

        "},{"location":"linksys/#ea3500","title":"ea3500","text":"
        • http://www.linksys.com/us/support-product?pid=01t80000003K7bbAAC
        "},{"location":"linksys/#1140162464","title":"1.1.40.162464","text":"
        • Bug where if 5ghz is enabled, 2.4ghz sometimes will not be accessible. Only workaround is to use only 2.4ghz or 5ghz, not both. Even then sometimes the configs can end up in a funky state where neither will work, and you have to re-configure the wifi settings using wired ethernet.
        "},{"location":"linux-performance-monitoring/","title":"Linux Performance Monitoring","text":"

        Notes from the Linux Performance Monitoring talk at Velocity 2015 - Part 1, Part 2

        http://www.brendangregg.com/linuxperf.html

        "},{"location":"linux-performance-monitoring/#use","title":"USE","text":"
        • Utilization
        • Saturation
        • Errors
        "},{"location":"linux-performance-monitoring/#observability-tools","title":"Observability Tools","text":"
        • atop (atop uses the linux kernel event interface rather than sampling on screen updates, so it is better for viewing systems affected by short-lived processes)
        • htop
        • vmstat -Sm 1
        • iostat -xmdz 1
        • mpstat -P ALL 1
        • free -m
        • sar -n DEV 1
        • strace -tttT # very disruptive of system performance, slows system significantly
        • tcpdump
        • pidstat -t 1
        • pidstat -d
        • swapon -s
        • lsof
        • sar -n TCP,ETCP,DEV 1
        • collectl
        • dstat
        • strace 2>&1 | head -n 100 # since there's no strace -c N
        • ss
        • iptraf
        • slabtop
        • pcstat
        • perf
        • tiptop
        • rdmsr
        • perf-tools/execsnoop
        "},{"location":"linux-performance-monitoring/#benchmarking-tools","title":"Benchmarking tools","text":"
        • unixbench
        • imbench
        • sysbench
        • lmbench
        • fio
        • pchar
        • iperf
        "},{"location":"linux-performance-monitoring/#tuning-tools","title":"Tuning tools","text":"
        • sysctl
        • ulimit
        • chcpu
        "},{"location":"linux-performance-monitoring/#static-tools","title":"Static tools","text":""},{"location":"linux-performance-monitoring/#tracing","title":"Tracing","text":"
        • ftrace
        • iosnoop
        • iolatency
        • opensnoop
        • tpoint
        • funccount
        • funcgraph
        • kprobe
        • bytehist
        • stap
        "},{"location":"linux/","title":"linux","text":"

        \"Linux is a family of free and open-source software operating systems built around the Linux kernel.\" - https://en.wikipedia.org/wiki/Linux

        Most linux distros are built on GNU tools, and this article is relevant in distinguishing the importance GNU plays in the linux ecosystem: https://www.gnu.org/gnu/why-gnu-linux.en.html

        Linux is part of the Unix family tree.

        "},{"location":"linux/#performance-monitoring","title":"Performance monitoring","text":"
        • Linux Load Averages: Solving the Mystery
        • Brendan Gregg's Linux Performance page
        • Notes from the Linux Performance Monitoring talk at Velocity 2015
        "},{"location":"linux/#tricks","title":"Tricks","text":""},{"location":"linux/#best-way-to-see-mounts","title":"Best way to see mounts","text":"

        There are a few ways to see mounts, but most of them will leave out little details in some cases. The best view of mounts is the /proc/self/mountinfo file.

        "},{"location":"linux/#determine-if-running-kernel-is-32-or-64-bit","title":"Determine if running kernel is 32 or 64 bit","text":"

        Works on x86 or ARM.

        getconf LONG_BIT\n
        "},{"location":"linux/#configure-a-system-to-reboot-on-kernel-panic","title":"Configure a system to reboot on kernel panic","text":"

        These lines should be added to sysctl.conf

        ## Reboot after 10 seconds if kernel panics\nkernel.panic = 10\n## Treat all oopses as panics\nkernel.panic_on_oops = 1\n
        "},{"location":"linux/#force-reboot-on-corrupt-system","title":"Force reboot on corrupt system","text":"

        For times that commands like reboot and shutdown are not available.

        echo 1 > /proc/sys/kernel/sysrq\necho b > /proc/sysrq-trigger\n
        "},{"location":"linux/#show-process-signals","title":"Show process signals","text":"

        This should work on other unixes too.

        trap -l\n
        "},{"location":"linux/#kernel-namespaces","title":"Kernel namespaces","text":"

        \"A namespace wraps a global system resource in an abstraction that makes it appear to the processes within the namespace that they have their own isolated instance of the global resource. Changes to the global resource are visible to other processes that are members of the namespace, but are invisible to other processes. One use of namespaces is to implement containers.\" - man namespaces

        \"Control cgroups, usually referred to as cgroups, are a Linux kernel feature which allow processes to be organized into hierarchical groups whose usage of various types of resources can then be limited and monitored.\" - man cgroups

        cgroup is one of the linux namespaces. (see man namespaces for more info.)

        "},{"location":"linux/#tools-and-stuff","title":"Tools and stuff","text":"
        • lsns - list namespaces
        • cgcreate - create new cgroup
        • cgexec - run the task in given control group
        • cgclassify - move running task(s) to given cgroup
        • nsenter - Run a command in a referenced process cgroup config
        • systemd-cgls - systemd-cgls - Recursively show control group contents
        • systemd-cgtop - Show top control groups by their resource usage
        • /proc/self/cgroup - cgroup introspection
        "},{"location":"linux/#various-namespace-aware-tool-examples","title":"Various namespace-aware tool examples","text":""},{"location":"linux/#ps-cgroup-output","title":"ps cgroup output","text":"
        ps -o pid,ppid,user,comm,flags,%cpu,sz,%mem,cgname\n
        "},{"location":"linux/#run-a-process-in-another-namespace","title":"Run a process in another namespace","text":"

        With nsenter you specify a target pid to reference, and then specify which namespaces of its you want to enter.

        On Ubuntu 18.04, udev mounts devices in a non-global namespace, which prevents normal users from viewing those mounts. You must use nsenter to enter the udevd namespaces to view the mounts, using either --all to get all namespaces of udevd, or --mount for just that one required namespace:

        root@bionic:~# lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1\n\nroot@bionic:~# nsenter --all -t $(pgrep systemd-udevd) lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1 /mnt/adea64ca-e340-4961-8a4d-75d8a5970664\n\nroot@bionic:~# nsenter --mount -t $(pgrep systemd-udevd) lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1 /mnt/adea64ca-e340-4961-8a4d-75d8a5970664\n

        See udev for one permanent fix for this.

        "},{"location":"linux/#find-the-path-to-a-namespace","title":"Find the path to a namespace","text":"

        The path to a namespace can be used in some instances instead of the pid. We can discover the path to a namespace by using lsns.

        root@bionic:~# lsns -p $(pgrep udevd) -o +PATH\n        NS TYPE   NPROCS   PID USER COMMAND                    PATH\n4026531835 cgroup    173     1 root /sbin/init                 /proc/1/ns/cgroup\n4026531836 pid       173     1 root /sbin/init                 /proc/1/ns/pid\n4026531837 user      173     1 root /sbin/init                 /proc/1/ns/user\n4026531838 uts       173     1 root /sbin/init                 /proc/1/ns/uts\n4026531839 ipc       173     1 root /sbin/init                 /proc/1/ns/ipc\n4026532009 net       173     1 root /sbin/init                 /proc/1/ns/net\n4026532286 mnt         1  5480 root /lib/systemd/systemd-udevd /proc/5480/ns/mnt\n
        "},{"location":"linux/#access-network-sockets-from-the-command-line","title":"Access network sockets from the command line","text":"

        This is a poor man's netcat, useful for when there is no netcat:

        echo asdf > /dev/tcp/${REMOTE_IP_ADDRESS}/${REMOTE_PORT}\n
        "},{"location":"linux/#see-also","title":"See also","text":""},{"location":"linux/#distros","title":"Distros","text":"
        • rhel
        • ubuntu
        "},{"location":"linux/#init-systems","title":"Init systems","text":"
        • openrc
        • systemd
        • sysvinit
        • upstart
        "},{"location":"linux/#filesystems-and-block-devices","title":"Filesystems and block devices","text":"
        • Filesystem Hierarchy Standards: http://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html
        • LVM
        • ZFS
        "},{"location":"linux/#links","title":"Links","text":"
        • https://blog.quarkslab.com/digging-into-linux-namespaces-part-1.html
        • https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
        • https://wiki.archlinux.org/index.php/Cgroups
        • https://poor.dev/blog/terminal-anatomy
        • https://www.linusakesson.net/programming/tty
        • https://www.sobyte.net/post/2022-05/tty/
        • https://www.linuxcommand.org/tlcl.php
        • https://unix.stackexchange.com/a/367012: Linux sockets full names are limited to 107 characters
        • https://www.linuxatemyram.com
        • https://syscalls.mebeim.net: \"Linux kernel syscall tables\"
        • https://specifications.freedesktop.org/basedir-spec/: \"Various specifications specify files and file formats. This specification defines where these files should be looked for by defining one or more base directories relative to which files should be located.\"
        • https://kevinboone.me/systemd_embedded.html: \"Why systemd is a problem for embedded Linux\"
        "},{"location":"logstash/","title":"logstash","text":"

        \"Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite stash.\" - https://www.elastic.co/products/logstash

        "},{"location":"lsblk/","title":"lsblk","text":"

        \"lsblk lists information about all available or the specified block devices. The lsblk command reads the sysfs filesystem and udev db to gather information.\" - man lsblkq

        "},{"location":"lsblk/#examples","title":"Examples","text":""},{"location":"lsblk/#simple-usage","title":"Simple usage","text":"

        Here is the output of lsblk on an Ubuntu 16.04 Vagrant box:

        $ lsblk\nNAME   MAJ:MIN RM SIZE RO TYPE MOUNTPOINT\nsda      8:0    0  10G  0 disk\n\u2514\u2500sda1   8:1    0  10G  0 part /\nsdb      8:16   0  10M  0 disk\n
        "},{"location":"lsblk/#show-filesystem-information","title":"Show filesystem information","text":"
        $ lsblk -f\nNAME   FSTYPE  LABEL           UUID                                 MOUNTPOINT\nsda\n\u2514\u2500sda1 ext4    cloudimg-rootfs 73ea38ed-7fcd-4871-8afa-17d36f4e4bfc /\nsdb    iso9660 cidata          2017-08-15-16-47-34-00\n
        "},{"location":"lsblk/#show-some-chosen-fields","title":"Show some chosen fields","text":"

        To see a list of available columns, run lsblk -o.

        $ lsblk -do NAME,SIZE,TYPE,VENDOR,MODEL,SERIAL\nNAME   SIZE TYPE VENDOR   MODEL            SERIAL\nsdd    1.8T disk ATA      Micron_5100_MTFD 18211C914753\nsdb    1.8T disk ATA      Micron_5100_MTFD 18211C914736\nsde    1.8T disk ATA      Micron_5100_MTFD 171216897B54\nsdc    1.8T disk ATA      Micron_5100_MTFD 171216897B63\nsda  223.6G disk ATA      INTEL SSDSC2KB24 BTYS815301VG245AGN\n
        ## Quick way to view ssd vs hdd models and mount points\n## ROTA: 1=hdd, 0=ssd\n## As of writing this method does not show zfs mount points. EG: sdb1 is mounted as zfs\n##\n$ lsblk -o name,rota,mountpoint,vendor,model,serial\nNAME   ROTA MOUNTPOINT VENDOR   MODEL            SERIAL\nsda       0            ATA      Samsung SSD 850  S39KNX0JA59421F\n\u2514\u2500sda1    0 /\nsdb       1            WD       Elements 25A1    575833314435383753393754\n\u251c\u2500sdb1    1\n\u2514\u2500sdb9    1\nsdc       1            Generic  External         002210107962\n\u251c\u2500sdc1    1\n\u2514\u2500sdc2    1 /mnt/sdc\nsr0       1            MATSHITA DVD+-RW SW830    CN0X85FC3686\n
        "},{"location":"lsblk/#show-all-available-information","title":"Show all available information","text":"

        The output here is really wide, but reformatting it through JSON makes it easier to read. Thankfully lsblk has a JSON output option, unlike many unix CLI tools.

        $ lsblk -O -J | jq .\n{\n  \"blockdevices\": [\n    {\n      \"name\": \"sda\",\n      \"kname\": \"sda\",\n      \"maj:min\": \"8:0\",\n      \"fstype\": null,\n      \"mountpoint\": null,\n      \"label\": null,\n      \"uuid\": null,\n      \"parttype\": null,\n      \"partlabel\": null,\n      \"partuuid\": null,\n      \"partflags\": null,\n      \"ra\": \"128\",\n      \"ro\": \"0\",\n      \"rm\": \"0\",\n      \"hotplug\": \"0\",\n      \"model\": \"HARDDISK        \",\n      \"serial\": null,\n      \"size\": \"10G\",\n      \"state\": \"running\",\n      \"owner\": \"root\",\n      \"group\": \"disk\",\n      \"mode\": \"brw-rw----\",\n      \"alignment\": \"0\",\n      \"min-io\": \"512\",\n      \"opt-io\": \"0\",\n      \"phy-sec\": \"512\",\n      \"log-sec\": \"512\",\n      \"rota\": \"1\",\n      \"sched\": \"deadline\",\n      \"rq-size\": \"128\",\n      \"type\": \"disk\",\n      \"disc-aln\": \"0\",\n      \"disc-gran\": \"0B\",\n      \"disc-max\": \"0B\",\n      \"disc-zero\": \"0\",\n      \"wsame\": \"0B\",\n      \"wwn\": null,\n      \"rand\": \"1\",\n      \"pkname\": null,\n      \"hctl\": \"2:0:0:0\",\n      \"tran\": \"spi\",\n      \"subsystems\": \"block:scsi:pci\",\n      \"rev\": \"1.0 \",\n      \"vendor\": \"VBOX    \",\n      \"children\": [\n        {\n          \"name\": \"sda1\",\n          \"kname\": \"sda1\",\n          \"maj:min\": \"8:1\",\n          \"fstype\": \"ext4\",\n          \"mountpoint\": \"/\",\n          \"label\": \"cloudimg-rootfs\",\n          \"uuid\": \"73ea38ed-7fcd-4871-8afa-17d36f4e4bfc\",\n          \"parttype\": \"0x83\",\n          \"partlabel\": null,\n          \"partuuid\": \"8d714561-01\",\n          \"partflags\": \"0x80\",\n          \"ra\": \"128\",\n          \"ro\": \"0\",\n          \"rm\": \"0\",\n          \"hotplug\": \"0\",\n          \"model\": null,\n          \"serial\": null,\n          \"size\": \"10G\",\n          \"state\": null,\n          \"owner\": \"root\",\n          \"group\": \"disk\",\n          \"mode\": \"brw-rw----\",\n          \"alignment\": \"0\",\n          \"min-io\": \"512\",\n          \"opt-io\": \"0\",\n          \"phy-sec\": \"512\",\n          \"log-sec\": \"512\",\n          \"rota\": \"1\",\n          \"sched\": \"deadline\",\n          \"rq-size\": \"128\",\n          \"type\": \"part\",\n          \"disc-aln\": \"0\",\n          \"disc-gran\": \"0B\",\n          \"disc-max\": \"0B\",\n          \"disc-zero\": \"0\",\n          \"wsame\": \"0B\",\n          \"wwn\": null,\n          \"rand\": \"1\",\n          \"pkname\": \"sda\",\n          \"hctl\": null,\n          \"tran\": null,\n          \"subsystems\": \"block:scsi:pci\",\n          \"rev\": null,\n          \"vendor\": null\n        }\n      ]\n    },\n    {\n      \"name\": \"sdb\",\n      \"kname\": \"sdb\",\n      \"maj:min\": \"8:16\",\n      \"fstype\": \"iso9660\",\n      \"mountpoint\": null,\n      \"label\": \"cidata\",\n      \"uuid\": \"2017-08-15-16-47-34-00\",\n      \"parttype\": null,\n      \"partlabel\": null,\n      \"partuuid\": null,\n      \"partflags\": null,\n      \"ra\": \"128\",\n      \"ro\": \"0\",\n      \"rm\": \"0\",\n      \"hotplug\": \"0\",\n      \"model\": \"HARDDISK        \",\n      \"serial\": null,\n      \"size\": \"10M\",\n      \"state\": \"running\",\n      \"owner\": \"root\",\n      \"group\": \"disk\",\n      \"mode\": \"brw-rw----\",\n      \"alignment\": \"0\",\n      \"min-io\": \"512\",\n      \"opt-io\": \"0\",\n      \"phy-sec\": \"512\",\n      \"log-sec\": \"512\",\n      \"rota\": \"1\",\n      \"sched\": \"deadline\",\n      \"rq-size\": \"128\",\n      \"type\": \"disk\",\n      \"disc-aln\": \"0\",\n      \"disc-gran\": \"0B\",\n      \"disc-max\": \"0B\",\n      \"disc-zero\": \"0\",\n      \"wsame\": \"32M\",\n      \"wwn\": null,\n      \"rand\": \"1\",\n      \"pkname\": null,\n      \"hctl\": \"2:0:1:0\",\n      \"tran\": \"spi\",\n      \"subsystems\": \"block:scsi:pci\",\n      \"rev\": \"1.0 \",\n      \"vendor\": \"VBOX    \"\n    }\n  ]\n}\n
        "},{"location":"lsblk/#see-also","title":"See also","text":"
        • findmnt
        "},{"location":"lshw/","title":"lshw","text":"

        lshw is a cli tool on linux to show you information about your hardware. It outputs in several formats that are human and computer friendly.

        "},{"location":"lshw/#examples","title":"Examples","text":""},{"location":"lshw/#lshw-help","title":"lshw --help","text":"
        $ lshw --help\nHardware Lister (lshw) - B.02.18\nusage: lshw [-format] [-options ...]\n       lshw -version\n\n        -version        print program version (B.02.18)\n\nformat can be\n        -html           output hardware tree as HTML\n        -xml            output hardware tree as XML\n        -json           output hardware tree as a JSON object\n        -short          output hardware paths\n        -businfo        output bus information\n\noptions can be\n        -class CLASS    only show a certain class of hardware\n        -C CLASS        same as '-class CLASS'\n        -c CLASS        same as '-class CLASS'\n        -disable TEST   disable a test (like pci, isapnp, cpuid, etc. )\n        -enable TEST    enable a test (like pci, isapnp, cpuid, etc. )\n        -quiet          don't display status\n        -sanitize       sanitize output (remove sensitive information like serial numbers, etc.)\n        -numeric        output numeric IDs (for PCI, USB, etc.)\n        -notime         exclude volatile attributes (timestamps) from output\n
        "},{"location":"lshw/#example-of-short-output","title":"Example of -short output","text":"

        It's best to use sudo, otherwise you will not see all hardware.

        $ sudo lshw -short\nH/W path                   Device      Class          Description\n=================================================================\n                                       system         OptiPlex 7010 (OptiPlex 7010)\n/0                                     bus            0YXT71\n/0/0                                   memory         64KiB BIOS\n/0/5e                                  processor      Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz\n/0/5e/3a                               memory         256KiB L1 cache\n/0/5e/3b                               memory         1MiB L2 cache\n/0/5e/3c                               memory         8MiB L3 cache\n/0/3d                                  memory         16GiB System Memory\n/0/3d/0                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/3d/1                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/3d/2                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/3d/3                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/100                                 bridge         Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller\n/0/100/2                               display        Xeon E3-1200 v2/3rd Gen Core processor Graphics Controller\n/0/100/14                              bus            7 Series/C210 Series Chipset Family USB xHCI Host Controller\n/0/100/14/0                usb3        bus            xHCI Host Controller\n/0/100/14/1                usb4        bus            xHCI Host Controller\n/0/100/14/1/4              scsi6       storage        Elements 25A1\n/0/100/14/1/4/0.0.0        /dev/sdb    disk           4TB Elements 25A1\n/0/100/14/1/4/0.0.0/1      /dev/sdb1   volume         3725GiB OS X ZFS partition or Solaris /usr partition\n/0/100/14/1/4/0.0.0/9      /dev/sdb9   volume         8191KiB reserved partition\n/0/100/16                              communication  7 Series/C216 Chipset Family MEI Controller #1\n/0/100/16.3                            communication  7 Series/C210 Series Chipset Family KT Controller\n/0/100/19                  eno1        network        82579LM Gigabit Network Connection\n/0/100/1a                              bus            7 Series/C216 Chipset Family USB Enhanced Host Controller #2\n/0/100/1a/1                usb1        bus            EHCI Host Controller\n/0/100/1a/1/1                          bus            Integrated Rate Matching Hub\n/0/100/1a/1/1/2                        generic        BCM20702A0\n/0/100/1b                              multimedia     7 Series/C216 Chipset Family High Definition Audio Controller\n/0/100/1d                              bus            7 Series/C216 Chipset Family USB Enhanced Host Controller #1\n/0/100/1d/1                usb2        bus            EHCI Host Controller\n/0/100/1d/1/1                          bus            Integrated Rate Matching Hub\n/0/100/1d/1/1/5                        bus            USB2.0 Hub\n/0/100/1d/1/1/5/2          scsi7       storage        External\n/0/100/1d/1/1/5/2/0.0.0    /dev/sdc    disk           256GB External\n/0/100/1d/1/1/5/2/0.0.0/1              volume         512MiB EFI GPT partition\n/0/100/1d/1/1/5/2/0.0.0/2  /dev/sdc2   volume         221GiB EXT4 volume\n/0/100/1e                              bridge         82801 PCI Bridge\n/0/100/1f                              bridge         Q77 Express Chipset LPC Controller\n/0/100/1f.2                            storage        7 Series/C210 Series Chipset Family 6-port SATA Controller [AHCI mode]\n/0/100/1f.3                            bus            7 Series/C216 Chipset Family SMBus Controller\n/0/1                       scsi0       storage\n/0/1/0.0.0                 /dev/sda    disk           256GB Samsung SSD 850\n/0/1/0.0.0/1               /dev/sda1   volume         238GiB EXT4 volume\n/0/2                       scsi1       storage\n/0/2/0.0.0                 /dev/cdrom  disk           DVD+-RW SW830\n/1                         docker0     network        Ethernet interface\n
        "},{"location":"lshw/#see-also","title":"See also","text":"
        • lsblk
        • lscpu
        • lslogins
        • lsmod
        • lsof
        • lspci
        • lsusb
        "},{"location":"lsof/","title":"lsof","text":"

        lsof lists open files. This CLI tool is available on most *nix OSes.

        On linux, a much quicker alternative may be to use fuser

        "},{"location":"lsof/#examples","title":"Examples","text":""},{"location":"lsof/#list-files-open-by-a-given-user","title":"List files open by a given user","text":"
        lsof -u username\n
        "},{"location":"lsof/#show-listening-tcp-sockets","title":"show listening TCP sockets","text":"

        Since everything in unix is a file, including network sockets, you can list open sockets and the programs that have them open. However, this is notably unreliable in Docker, so don't trust this completely. When in doubt, double check against ss -nlptu or netstat -nlptu, though this will likely only work in linux.

        lsof -iTCP -sTCP:LISTEN\n
        "},{"location":"lsof/#show-a-sorted-list-of-processes-by-listening-port","title":"Show a sorted list of processes by listening port","text":"
        lsof -iTCP -sTCP:LISTEN -P | sort -k2 -t: -n\n
        "},{"location":"lsof/#show-what-process-is-using-port-80-or-443-with-port-numbers","title":"show what process is using port 80 or 443 with port numbers","text":"
        ## -w make output wide\n## -n makes lsof not resolve hostnames from ip addresses\n## -P makes lsof not resolve service names for port numbers.\n## -iTCP shows IP TCP sockets.\n\nlsof -wnP -iTCP:80,443\n
        "},{"location":"lsof/#show-the-selinux-context-for-sockets","title":"show the selinux context for sockets","text":"
        lsof -i -Z\n

        See man page for extended syntax around Z

        "},{"location":"lsof/#see-also","title":"See Also","text":"
        • Process Explorer - LSOF type functionality for windows.
        • fuser
        "},{"location":"lua/","title":"LUA","text":"

        \"Lua is a powerful, efficient, lightweight, embeddable scripting language. It supports procedural programming, object-oriented programming, functional programming, data-driven programming, and data description.\" - https://www.lua.org/about.html

        "},{"location":"lua/#links","title":"Links","text":"
        • https://www.lua.org
        • https://learnxinyminutes.com/docs/lua
        • https://pico-8.fandom.com/wiki/Lua
        • https://www.lexaloffle.com/dl/docs/pico-8_manual.html#Lua_Syntax_Primer
        "},{"location":"lvm/","title":"lvm","text":"

        Linux Logical Volume Manager.

        • Tutorial
        • Overview
        • Managing
        "},{"location":"lvm/#general-flow","title":"General flow","text":"

        Physical volumes (pv) are grouped into volume groups (vg). Volume groups are sliced up into logical volumes (lv). Because of that, the general flow is something like:

        ## Partitioning is not necessary, so no need for fdisk or sgdisk\npvcreate /dev/sd{x..z}\nvgcreate vg_scratch /dev/sd{x..z}\nlvcreate -l 95%FREE -n lv_scratch vg_scratch\nmkfs.ext4 /dev/vg_scratch/lv_scratch\n
        "},{"location":"lvm/#examples","title":"Examples","text":""},{"location":"lvm/#show-a-bunch-of-info","title":"Show a bunch of info","text":"
        pvdisplay -v\npvs -v\npvs -a\npvs --segments\nvgdisplay -v\nvgs -v\nvgs -a -o +devices\n
        "},{"location":"lvm/#show-system-disks-and-if-they-are-in-an-lvm","title":"Show system disks and if they are in an LVM","text":"

        lvmdiskscan

        "},{"location":"lvm/#show-all-logical-volumes","title":"Show all logical volumes","text":"

        lvs

        "},{"location":"lvm/#activate-all-volume-groups","title":"Activate all volume groups","text":"

        vgchange -a y

        "},{"location":"lvm/#create-a-physical-volume","title":"Create a physical volume","text":"

        physical volumes are groups of physical disks that can be used to create logical volumes

        pvcreate pv_name /dev/sdb2 /dev/sdc2

        "},{"location":"lvm/#create-a-logical-volume","title":"Create a logical volume","text":"

        This creates a specifically named logical volume on a volume group named vg_data

        lvcreate -L 10G -n lv_name vg_data

        "},{"location":"lvm/#show-how-each-logical-volume-is-set-up","title":"Show how each logical volume is set up","text":"

        lvdisplay

        "},{"location":"lvm/#show-free-extents","title":"Show free extents","text":"

        vgs -o vg_free_count

        "},{"location":"lvm/#extend-a-volume-group-to-1tb","title":"Extend a volume group to 1TB","text":"
        lvextend -L 1T /dev/vgroot/lv_srv && \\\nresize2fs /dev/mapper/vgroot-lv_srv && \\\ndf -h /srv\n
        "},{"location":"lvm/#extend-a-volume-group-to-its-max","title":"Extend a volume group to its max","text":"
        lvextend -l +100%FREE /dev/vgroot/lv_srv && \\\nresize2fs /dev/mapper/vgroot-lv_srv && \\\ndf -h /srv\n
        "},{"location":"lxc/","title":"LXC","text":"

        \"LXC is a userspace interface for the Linux kernel containment features. Through a powerful API and simple tools, it lets Linux users easily create and manage system or application containers.\" - https://linuxcontainers.org

        "},{"location":"lxc/#see-also","title":"See Also","text":"
        • Docker
        "},{"location":"machine-learning/","title":"Machine Learning","text":"

        Machine learning is the subfield of computer science that, according to Arthur Samuel in 1959, gives \"computers the ability to learn without being explicitly programmed.\" - https://en.wikipedia.org/wiki/Machine_learning

        "},{"location":"machine-learning/#terminology-and-concepts","title":"Terminology and concepts","text":"
        • Supervised machine learning: The program is \"trained\" on a pre-defined set of \"training examples\", which then facilitate its ability to reach an accurate conclusion when given new data.
        • Unsupervised machine learning: The program is given a bunch of data and must find patterns and relationships therein.
        • \"The goal of ML is never to make 'perfect' guesses, because ML deals in domains where there is no such thing. The goal is to make guesses that are good enough to be useful.\"
        • Machine learning builds heavily on statistics.
        "},{"location":"machine-learning/#prerequisites","title":"Prerequisites","text":"
        • Statistics
        • Linear Algebra
        • Calculus
        "},{"location":"machine-learning/#resources","title":"Resources","text":"
        • Reddit /r/machinelearning wiki
        • Data Science From Scratch book
        • Andrew Ng's Coursera course on ML
        • Machine Learning with Python / Practical Machine Learning Tutorial with Python Introduction
        • Your First Machine Learning Project in Python Step-By-Step
        • Example Machine Learning IPython Notebook
        • FastML: Machine Learning Made Easy
        • Tensorflow
        • My Neural Network isn't working! What should I do?
        • Machine Learning Recipes with Josh Gordon - Google Developers
        "},{"location":"machine-learning/#see-also","title":"See Also","text":"
        • Life 3.0: Being Human in the Age of Artificial Intelligence: https://www.amazon.com/Life-3-0-Being-Artificial-Intelligence/dp/1101946598
        • DeepMind and Blizzard open StarCraft II as an AI research environment
        • Intuitive RL: Intro to Advantage-Actor-Critic (A2C)
        • Deep Reinforcement Learning instrumenting bettercap for WiFi pwning.
        • Creating music through image generation of spectrograms.
        "},{"location":"macos/","title":"macOS","text":"

        Apple's Unix desktop operating system.

        "},{"location":"macos/#links","title":"Links","text":"
        • Significant Changes in macOS 10.15 Catalina of Interest to Mac Admins
        • https://git.herrbischoff.com/awesome-macos-command-line/about/
        • https://support.apple.com/en-us/108900: How to revive or restore Mac firmware
        • https://weiyen.net/articles/useful-macos-cmd-line-utilities
        • https://dortania.github.io/OpenCore-Legacy-Patcher: Run newer macOS versions on older, unsupported Mac hardware.
        "},{"location":"macos/#useful-commands","title":"Useful Commands","text":"
        • caffeinate
        • mdutil
        • networksetup
        • scutil
        • serverinfo
        • sharing
        • tccutil
        "},{"location":"macos/#logout-user-from-a-shell","title":"Logout user from a shell","text":"
        sudo launchctl bootout gui/$(id -u \"$USERNAME\")\n

        or

        sudo launchctl bootout user/$(id -u \"$USERNAME\")\n
        "},{"location":"macos/#fix-sshd-client-timeout-config","title":"Fix sshd client timeout config","text":"

        macOS has an /etc/ssh/sshd_config that will never disconnect idle clients. With sketchy wifi, or for a variety of other reasons, this can cause ssh connections to pile up, causing a DoS on the ssh server. When this happens, a client that is attempting to connect may see kex_exchange_identification: read: Connection reset by peer. To fix this, set the following config values to anything other than 0. See man 5 sshd_config for more info.

        The following settings would allow an unresponsive ssh session 10 minutes before terminating it:

        ClientAliveInterval 120\nClientAliveCountMax 5\n

        Using BSD sed, you can quickly set this:

        sudo sed -i '' -E 's/^#?ClientAliveInterval [0-9]+/ClientAliveInterval 120/ ; s/^#?ClientAliveCountMax [0-9]+/ClientAliveCountMax 5/ ;' /etc/ssh/sshd_config\nsudo bash -c \"launchctl stop com.openssh.sshd ; launchctl start com.openssh.sshd ;\"\n
        "},{"location":"macos/#show-hardware-info","title":"Show hardware info","text":"
        system_profiler SPHardwareDataType\n
        "},{"location":"macos/#install-package-from-cli","title":"Install package from CLI","text":"

        Use brew.sh for most things. Otherwise:

        sudo installer -pkg /Volumes/ExifTool-9.16/ExifTool-9.16.pkg -target /\n
        "},{"location":"macos/#start-ftp-server","title":"Start FTP server","text":"
        sudo -s launchctl load -w /System/Library/LaunchDaemons/ftp.plist\n
        "},{"location":"macos/#check-swap-usage","title":"Check swap usage","text":"
        sysctl vm.swapusage\n
        "},{"location":"macos/#disable-wifi-disconnect-when-locking-screen","title":"Disable wifi disconnect when locking screen","text":"
        sudo /System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources airport en1 prefs DisconnectOnLogout=NO\n
        "},{"location":"macos/#show-some-downloaded-files","title":"Show some downloaded files","text":"

        This shows a list of all the quarantine checked downloads:

        sqlite3 ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV* 'select distinct LSQuarantineDataURLString from LSQuarantineEvent'\n
        "},{"location":"macos/#send-notifications-from-terminal","title":"Send Notifications from Terminal","text":"
        sudo gem install terminal-notifier\nterminal-notifier -message \"Hello, this is my message\" -title \"Message Title\"\n
        "},{"location":"macos/#enable-verbose-eap-logging","title":"Enable verbose eap logging","text":"
        sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.eapolclient LogFlags -int -1\n
        "},{"location":"macos/#network","title":"Network","text":"
        • gif0 - Generic Tunnel Interface. See man gif.
        • stf0 - Six To Four tunnel. See man stf.
        "},{"location":"macos/#migration-assistant","title":"Migration Assistant","text":"

        Some files are not migrated using Migration Assistant. EG:

        • /etc/hosts
        • crontabs
        • /etc/apache2/httpd.conf
        "},{"location":"macos/#remap-caps-lock-to-escape","title":"Remap caps-lock to escape","text":"

        This remaps using a different mechanism than what the Keyboard pref pane uses, so the change will not be reflected there, so you will want to make sure to save both of these functions so you can reset when you need normal behavior.

        caps_to_esc_map(){\n    hidutil property --set \\\n    '{\"UserKeyMapping\":[{\"HIDKeyboardModifierMappingSrc\":0x700000039,\"HIDKeyboardModifierMappingDst\":0x700000029}]}'\n}\n\ncaps_to_esc_unmap(){\n    hidutil property --set '{\"UserKeyMapping\":[]}'\n}\n
        "},{"location":"make/","title":"GNU make","text":"

        GNU make

        "},{"location":"make/#automatic-variables","title":"Automatic variables","text":"

        make requires commands to begin with tabs, so copying the examples below will not work unless you replace the leading spaces with tabs. This is probably the must frustrating thing about make.

        $ cat Makefile\nall: foo_one foo_two\n\nfoo_%: bar_a bar_b\n    @echo $*: this is $@ and it requires $^\n\nbar_%: baz\n    @echo $*: this is $@ and it requires $^\n\nbaz:\n    @echo this is baz\n\n$ make\nthis is baz\na: this is bar_a and it requires baz\nb: this is bar_b and it requires baz\none: this is foo_one and it requires bar_a bar_b\ntwo: this is foo_two and it requires bar_a bar_b\n
        "},{"location":"make/#links","title":"Links","text":"
        • https://www.gnu.org/software/make/manual/html_node/Automatic-Variables.html
        • https://www.gnu.org/software/make/manual/html_node/Standard-Targets.html
        • https://nullprogram.com/blog/2017/08/20: A Tutorial on Portable Makefiles
        "},{"location":"marlin/","title":"Marlin","text":"

        \"Marlin is an open source firmware for the RepRap family of replicating rapid prototypers \u2014 popularly known as \u201c3D printers.\u201d It was derived from Sprinter and grbl, and became a standalone open source project on August 12, 2011 with its Github release. Marlin is licensed under the GPLv3 and is free for all applications.\" - https://marlinfw.org/docs/basics/introduction.html

        Marlin is used on the Ender line of printers, as well as many others.

        "},{"location":"marlin/#examples","title":"Examples","text":""},{"location":"marlin/#show-firmware-info","title":"Show firmware info","text":"

        In a serial terminal, type M115. You will see something like:

        Send: M115\nRecv: FIRMWARE_NAME:2.0.8.2 (Jul  6 2022 19:18:56) SOURCE_CODE_URL:www.creality.com PROTOCOL_VERSION:1.0 MACHINE_TYPE:Ender-3 Pro EXTRUDER_COUNT:1 UUID:cede2a2f-41a2-4748-9b12-c55c62f367ff\nRecv: Cap:SERIAL_XON_XOFF:0\nRecv: Cap:BINARY_FILE_TRANSFER:0\nRecv: Cap:EEPROM:1\n...lots of similar lines lines...\nRecv: Cap:CHAMBER_TEMPERATURE:0\nRecv: Cap:COOLER_TEMPERATURE:0\nRecv: Cap:MEATPACK:0\nRecv: ok\nSend: M155 S2\nRecv: ok\n
        "},{"location":"marlin/#perform-a-pid-autotune","title":"Perform a PID Autotune","text":"

        In octoprint or some other terminal interface:

        1. enter M303 S215 C10 to perform a 215\u00baC tuning test 10 times. You will get Kp, Ki, and Kd values back at the end of the test.
        2. Enter those values in the terminal as M301 Pxx.xx Ixx.xx Dxx.xx
        3. Enter M500 to save the values to the EEPROM.
        "},{"location":"marlin/#see-also","title":"See Also","text":"
        • 3D Printing
        "},{"location":"math/","title":"math","text":""},{"location":"math/#links","title":"Links","text":"
        • https://en.wikipedia.org/wiki/Category:Probability_theory_paradoxes
        • https://en.wikipedia.org/wiki/Set_(mathematics) and https://realpython.com/python-sets
        "},{"location":"mdraid/","title":"mdraid","text":"

        Linux software raid.

        "},{"location":"mdraid/#examples","title":"Examples","text":""},{"location":"mdraid/#show-details-of-an-array","title":"Show details of an array","text":"
        mdadm --detail /dev/md0\n
        "},{"location":"mdraid/#links","title":"Links","text":"
        • http://poweredgec.dell.com/ - the Dell ldstate command is a good view into software raid and hardware raid (eg: megaraid, sas2) under one command.
        • https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Deployment_Guide/s2-raid-manage-removing.html
        • http://tldp.org/HOWTO/Software-RAID-HOWTO.html
        • https://raid.wiki.kernel.org/index.php/Linux_Raid
        • https://raid.wiki.kernel.org/index.php/RAID_setup
        "},{"location":"mechanical-keyboards/","title":"Keyboards","text":""},{"location":"mechanical-keyboards/#links","title":"Links","text":"
        • https://fingerpunch.xyz: Custom keyboards made by my friend Sadek.
        • https://www.caniusevia.com: A QMK firmware with online programming.
        • https://ergodox-ez.com: A great lineup of programmable mechanical keyboards.
        • https://configure.zsa.io: Eergodox online keyboard configuration and training tool.
        • https://shop.pimoroni.com/products/keybow-2040: A circuitpython programmable mechanical macro keypad based on Pico 2040.
        • https://shop.pimoroni.com/products/pico-rgb-keypad-base: A Raspberry Pi Pico compatible programmable mushy macro keypad.
        • https://1upkeyboards.com/shop/controllers/usb-to-usb-converter: Turn any usb keyboard into a programmable keyboard.
        "},{"location":"metallb/","title":"MetalLB","text":"

        \"MetalLB is a load-balancer implementation for bare metal Kubernetes clusters, using standard routing protocols.\" - https://metallb.universe.tf/

        \"This is not an official Google project, it is just code that happens to be owned by Google.\" - https://github.com/google/metallb

        "},{"location":"metallb/#links","title":"Links","text":"
        • https://github.com/google/metallb
        • https://metallb.universe.tf/installation
        • https://metallb.universe.tf/configuration/#layer-2-configuration
        "},{"location":"metallb/#see-also","title":"See Also","text":"
        • kubernetes
        • minikube
        "},{"location":"microbit/","title":"micro:bit","text":"

        \"micro:bit is a tiny programmable computer, designed to make learning and teaching easy and fun!\" - https://microbit.org

        "},{"location":"microbit/#notes","title":"Notes","text":"
        • Weight: 8g
        • Weight with 2x AAA batteries in case: 37g
        • Python doesn't support BTLE, JS does
        "},{"location":"microbit/#links","title":"Links","text":"
        • Hardware Description: https://tech.microbit.org/hardware/
        • Python API docs: https://microbit-micropython.readthedocs.io
        • Microbit focused python editor: https://codewith.mu
        • Online python editor: https://python.microbit.org
        • Online drag-and-drop and javascript editor: https://makecode.microbit.org
        "},{"location":"microcontrollers/","title":"microcontrollers","text":"

        Notes about microcontrollers like the esp8266.

        "},{"location":"microcontrollers/#links","title":"Links","text":"
        • https://blog.squix.org/2016/07/esp8266-based-plane-spotter-how-to.html: esp8266 with rtl-sdr for plane spotting.
        • https://youtu.be/WlkMbNnIECM: ping pong ball lamps with neopixels.
        • https://kno.wled.ge: WLED, by FAR the easiest way to use neopixels.
        • https://docs.micropython.org/en/latest/esp8266/quickref.html: Micropython on the esp8266.
        • https://learn.adafruit.com: There is SO MUCH awesome creativity with microcontrollers in here.
        • https://diyi0t.com/esp8266-nodemcu-tutorial: Good info on the esp8266. There are some conflicting forum posts out there, and the spec table here was useful for me.
        • https://youtu.be/udo8mv5oarg / https://github.com/bertrandom/snowball-thrower: Playing Zelda Breath of the Wild Bowling minigame using Teensy
        • https://tasmota.github.io: General purpose ESP firmware for connecting to and automating various snesors and devices.
        • https://www.solder.party/docs
        • https://www.withdiode.com: Modern circuit simulator. \"Build, program, and simulate hardware in the browser.\"
        • https://vanhunteradams.com/Pico/Bootloader/Boot_sequence.html
        "},{"location":"microcontrollers/#my-personal-projects","title":"My personal projects","text":"

        These are all components I've used. They may not be the best or even the most suitable, but I've had success with all of the things listed below.

        N.B.: There are some amazon links in here. They are all smile.amazon links, not affiliate links.

        "},{"location":"microcontrollers/#led-projects","title":"LED projects","text":""},{"location":"microcontrollers/#microcontrollers_1","title":"Microcontrollers","text":"

        As of January 2022 I've only used the HiLetgo NodeMCU ESP8266 (CP2102 ESP-12E). The NodeMCU ESP8266 has a built in voltage regulator on its voltage-in pin (Vin) that tolerates over 12v input, which means you can use a single 12v power supply to power both the device and one or more 12v LED strands or strips. Here's a wiring diagram of how I used to do it. I've iterated on the wiring since I first made this diagram, but logically the circuit itself is identical to this.

        "},{"location":"microcontrollers/#led-strands","title":"LED strands","text":"
        • Alitove WS2811 12v strand requires 12v power input.
        • Alitove WS2811 5v strand is identical to the previous, but because it's 5v it can run off usb power to the microcontroller, which is relayed over the vin port. See this wiring diagram. Because you can run the microcontroller and lights off usb power, this is a portable setup that would be good for bikes, cosplay, etc..
        • Alitove 5v strip is electrically the same as the above 5v strand, but in a different form factor. This is good for making lamps, accent lighting at home, etc..
        "},{"location":"microcontrollers/#software","title":"Software","text":"

        For software I've been using the led focused firmware WLED which has a bunch of pre-built animations and stuff. This doesn't let you easily do things like control individual LEDs, so if you want those features should look at using something else like circuitpython or micropython.

        Another firmware that may be worth checking out is Tasmota. I haven't used this, but it purportedly lets you animate LEDs and also connect a variety of other sensors and devices, and is more smart-home focused.

        "},{"location":"microcontrollers/#additional-hardware","title":"Additional hardware","text":"
        • Breadboard jumpers are great for avoiding having to solder things together.
        • Lever nuts also help avoid soldering.
        • A high-wattage power supply is good regardless of whether you're going with 12v or 5v. If you don't have enough amps, your LEDs will not go as bright as they potentially could.
        "},{"location":"micropython/","title":"MicroPython","text":"

        \"MicroPython is a lean and efficient implementation of the Python 3 programming language that includes a small subset of the Python standard library and is optimised to run on microcontrollers and in constrained environments.\"- https://micropython.org

        https://docs.micropython.org

        "},{"location":"micropython/#hardware","title":"Hardware","text":"
        • ESP8266
        • pyboard
        • Arduino Due
        • microbit
        "},{"location":"micropython/#tips","title":"Tips","text":"
        • screen /dev/usb.whatever can sometimes not work to get a usb repl Try using picocom instead.
        "},{"location":"micropython/#esp8266-workflow","title":"ESP8266 workflow","text":"
        • Info on flashing can be found here: https://docs.micropython.org/en/latest/esp8266/tutorial/intro.html
        • brew install picocom
        • pip install --user adafruit-ampy esptool
        • export AMPY_PORT=/dev/tty.usbserial-0001 sets up ampy with the needed sieral port.
        • ampy run test.py run test.py found in the current local directory on the ESP8266.
        • ampy --port /serial/port put test.py /foo/bar.py copy a file to the board. Use get to copy the other way. Omit destination file name to just view the file.
        • Modify boot.py for any boot setup. Run ampy get boot.py to see the defaults for the flash you have loaded.
        • Modify main.py with your main program code to auto-run.
        "},{"location":"micropython/#links","title":"Links","text":"
        • My microcontrollers notes have more info about LED projects and associated hardware.
        • https://docs.micropython.org
        • https://www.digikey.com/en/maker/projects/micropython-basics-load-files-run-code/fb1fcedaf11e4547943abfdd8ad825ce
        • TalkPython.fm Episode #108: MicroPython and Open Source Hardware at Adafruit
        • https://github.com/scientifichackers/ampy
        • https://youtu.be/odffCT0aW58: Tutorial on how to use Jupyter as a micropython development interface for neopixels
        "},{"location":"micropython/#see-also","title":"See also","text":"
        • python
        • circuitpython
        "},{"location":"minikube/","title":"minikube","text":"

        \"Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a single-node Kubernetes cluster inside a VM on your laptop for users looking to try out Kubernetes or develop with it day-to-day.\" - https://github.com/kubernetes/minikube

        "},{"location":"minikube/#examples","title":"Examples","text":""},{"location":"minikube/#list-available-addons","title":"List available addons","text":"
        $ minikube addons list\n- addon-manager: enabled\n- dashboard: disabled\n- default-storageclass: enabled\n- efk: disabled\n- freshpod: disabled\n- gvisor: disabled\n- heapster: disabled\n- ingress: enabled\n- kube-dns: disabled\n- metrics-server: disabled\n- nvidia-driver-installer: disabled\n- nvidia-gpu-device-plugin: disabled\n- registry: enabled\n- registry-creds: disabled\n- storage-provisioner: enabled\n- storage-provisioner-gluster: disabled\n
        "},{"location":"minikube/#launch-a-specific-version-of-k8s-in-minikube","title":"Launch a specific version of k8s in Minikube","text":"
        minikube start --kubernetes-version='v1.14.6'\n
        "},{"location":"minikube/#enable-the-registry-addon","title":"Enable the registry addon","text":"
        minikube addons enable registry\n
        "},{"location":"minikube/#links","title":"Links","text":"
        • https://github.com/kubernetes/minikube
        • https://minikube.sigs.k8s.io/docs
        • https://metallb.universe.tf/community/#testing-in-minikube
        "},{"location":"misc/","title":"Miscellaneous","text":"

        Things that don't have much content and don't deserve their own page.

        "},{"location":"misc/#misc","title":"Misc","text":"
        • https://developer.okta.com/blog/2019/10/21/illustrated-guide-to-oauth-and-oidc: An Illustrated Guide to OAuth and OpenID Connect
        • https://youtu.be/LPqqoOm8y5s?t=3816: Corey Quinn Scale 14x - Docker Must Die
        • https://www.youtube.com/watch?v=0T2XFSALOaU: Docker in Production: Tales From the Engine Room - Bryan Cantrill (Surge 2015)
        • https://vimeo.com/92770954: Sensu @ Yelp part 1
        • https://vimeo.com/92838680: Sensu @ Yelp part 2
        • https://www.youtube.com/watch?v=jr4zQc3g1Ts&t=416: Terrible ideas in Git
        • https://www.youtube.com/watch?v=9koJOCL8Bms: The Well Tempered API - Why can we play 400 year old music but our software only lasts a few months or years?
        • https://www.youtube.com/watch?v=ybGrm73dXow: Using Swagger to tame HTTP/JSON interfaces
        • https://www.usenix.org/conference/lisa13/working-theory-monitoring: Working Theory of Monitoring
        • https://youtu.be/YFDwdRVTg4g?t=33m11s: Yelp's Theory of PaaSes talk from Box SRE Hour
        • https://slack.engineering/introducing-nebula-the-open-source-global-overlay-network-from-slack/: Introducing Nebula, the open source global overlay network from Slack
        • https://mtlynch.io/code-review-love: How to Make Your Code Reviewer Fall in Love with You
        • https://sonic-pi.net: Sonic Pi: The \"code-based music creation and performance tool\" that DJ Dave used in the opening set of Github Universe 2020.
        • https://emojipedia.org/emoji-zwj-sequence: Emojipedia Lists all emoji's including ZWJ composed emoji's, EG: with different skin tone and hair.
        • http://www.sizecoding.org: SizeCoding.org is a wiki dedicated to the art of creating very tiny programs for most popular types of CPUs
        • http://www.ritsumei.ac.jp/~akitaoka/index-e.html: Optical Illusions: Things don't always make sense. It's good to be open to the possibility that you are wrong, even when you are certain you are right.
        • https://nautil.us/blog/12-mind_bending-perceptual-illusions: 12 Mind-Bending Perceptual Illusions. Some of these are insane. #2 shouts so many things about how flawed the human mind is and how important science is.
        • https://www.physics.smu.edu/pseudo: The Scientific Method - Critical and Creative Thinking (Debunking Pseudoscience) lots of great reading references in the bottom section
        • http://www.brendangregg.com/usemethod.html) \"The Utilization Saturation and Errors (USE: USE method Method is a methodology for analyzing the performance of any system.\"
        • https://martinfowler.com/articles/developer-effectiveness.html: Maximizing Developer Effectiveness
        • http://sl4.org/crocker.html: Crocker's Rules: \"other people are allowed to optimize their messages for information, not for being nice to you.\"
        • https://mkhan45.github.io/CalcuLaTeX-Web: CalcuLaTex: Online calculator
        • https://slack.engineering/a-faster-smarter-quick-switcher: A faster, smarter Quick Switcher: how slack implemented frecency in their cmd-k interface
        • https://endoflife.date: Easy reference for when software will go EOL
        • https://serr.disabilityrightsca.org: \"The Federal and California special education laws give eligible students with disabilities the right to receive a free appropriate public education in the least restrictive environmentThis manual explains how to become eligible for special education, how to request and advocate for special education services, and what to do if you disagree with school districts.\"
        • https://runyourown.social: How to run a small social network site for your friends
        • https://www.jamesrobertwatson.com: \"300 essays about design, culture, and Jim Watson.\"
        • http://blog.timhutt.co.uk/fast-inverse-square-root: Interactive fast inverse-square analysis.
        • https://wiki.c2.com/?MakeItWorkMakeItRightMakeItFast
        • https://en.wikipedia.org/wiki/SOLID: \"In software engineering, SOLID is a mnemonic acronym for five design principles intended to make software designs more understandable, flexible, and maintainable.\"
        • https://ciechanow.ski/internal-combustion-engine: Interactive animated explanation of an internal combustion engine and its components.
        • https://ciechanow.ski/mechanical-watch: Interactive animated explanation of mechanical watch design(This blog is full of great interactive content.)
        • https://hbr.org/2022/05/managers-what-are-you-doing-about-change-exhaustion
        • https://goodresearch.dev: The Good Research Code Handbook
        • https://en.wikipedia.org/wiki/Gartner_hype_cycle
        • https://mrogalski.eu/ansi-art
        • https://tailscale.com/kb/1114/pi-hole: How to route DNS to your self-hosted pi-hole using Tailscale VPN
        • https://minitokyo3d.com
        • https://maggieappleton.com/folk-interfaces
        • https://slsa.dev: a security framework, a check-list of standards and controls to prevent tampering, improve integrity, and secure packages and infrastructure in your projects, businesses or enterprises.
        • https://brutalist-web.design
        • https://fs.blog/chestertons-fence
        • https://www.jefftk.com/p/accidentally-load-bearing: further thoughts about chesterton's fence.
        • https://en.wikipedia.org/wiki/Single_source_of_truth
        • https://lateblt.tripod.com/bit68.txt: What happens when a CPU starts
        • https://en.wikipedia.org/wiki/Expert_system
        • https://www.interaction-design.org/literature/book/the-glossary-of-human-computer-interaction/cognitive-artifacts
        • https://en.wikipedia.org/wiki/Goodhart%27s_law: \"When a measure becomes a target, it ceases to be a good measure.\"
        • https://matt.blwt.io/post/corporate-legibility-for-engineers
        • https://en.wikipedia.org/wiki/Cooperative_principle
        • https://prog21.dadgum.com/80.html: Advice to Aimless, Excited Programmers
        • https://exrx.net: Exercise and fitness website
        • https://cohost.org/mcc/post/178201-the-baseline-scene: Deep dive on the baseline scene in Blade Runner 2049
        • https://shkspr.mobi/blog/2023/06/do-open-source-licences-cover-the-ship-of-theseus
        • https://en.wikipedia.org/wiki/Scunthorpe_problem: \"the unintentional blocking of online content by a spam filter or search engine because their text contains a string (or substring) of letters that appear to have an obscene or otherwise unacceptable meaning.\"
        • https://slatestarcodex.com/2014/07/30/meditations-on-moloch
        • https://en.wikipedia.org/wiki/Law_of_Demeter: \"a design guideline for developing software\"
        • https://skunkledger.substack.com/p/escaping-high-school
        • https://blog.rfox.eu/en/Hardware/Cyberdecks.html
        • https://en.wikipedia.org/wiki/Martha_Mitchell_effect: \"when a medical professional labels a patient's accurate perception of real events as delusional, resulting in misdiagnosis.\"
        • https://en.wikipedia.org/wiki/Principle_of_least_astonishment: \"a system should behave in a way that most users will expect it to behave\"
        • https://en.wikipedia.org/wiki/Ulysses_pact: \"A Ulysses pact or Ulysses contract is a freely made decision that is designed and intended to bind oneself in the future.\"
        • https://youtu.be/rimtaSgGz_4: DEF CON 31 - An Audacious Plan to Halt the Internet's Enshittification - Cory Doctorow
        • https://dgl.cx/2023/09/ansi-terminal-security
        • https://en.wikipedia.org/wiki/Two_Generals%27_Problem
        • https://en.wikipedia.org/wiki/List_of_emerging_technologies
        • https://supabase.com/blog/why-supabase-remote: Reasoning behind a fully remote workforce, and how that culture works.
        • https://catern.com/services.html: \"Write libraries instead of services, where possible\"
        • https://luke.hsiao.dev/blog/housing-documentation: Writing Documentation for Your House
        • https://filiph.net/text/we-need-technology-that-is-less-immersive,-not-more.html
        • https://norvig.com/21-days.html: \"Teach Yourself Programming in Ten Years\"
        • https://bitbytebit.substack.com/p/the-size-of-your-backlog-is-inversely
        • https://ferd.ca/a-distributed-systems-reading-list.html
        • https://sohl-dickstein.github.io/2024/02/12/fractal.html: \"Neural network training makes beautiful fractals\"
        • https://www.pluralsight.com/blog/software-development/programming-naming-conventions-explained: Lots of examples of different variable naming schemes, what they are called, and where they are used.
        • https://effectiviology.com/shirky-principle: \"Institutions Try to Preserve the Problem to Which They Are the Solution\"
        • https://github.com/charlax/professional-programming
        • https://bost.ocks.org/mike/algorithms/: \"Visualizing Algorithms\"
        • https://www.baldurbjarnason.com/2024/facing-reality-in-the-eu-and-tech/
        • https://jacobian.org/2021/jun/8/incorrect-estimates: \"So you messed up. Now what? This is the final part of a series on estimating software project timelines.\"
        • https://the-simulation-strategists.beehiiv.com/p/being-stuck: \"The Illusion of Being Stuck\"
        • https://calculusmadeeasy.org
        • https://vorakl.com/articles/posix: \"A few facts about POSIX\"
        • https://cleankotlin.nl/blog/double-negations: \"Double negatives should not not be avoided\"
        • https://newsletter.posthog.com/p/habits-of-effective-remote-teams
        • https://www.mensurdurakovic.com/hard-to-swallow-truths-they-wont-tell-you-about-software-engineer-job
        • https://www.brightball.com/articles/story-points-are-pointless-measure-queues
        • https://www.bitsandbeing.com/p/leaving-stripe-parting-thoughts
        • https://gwern.net/complement
        • https://www.raptitude.com/2024/08/do-quests-not-goals
        • https://photonlines.substack.com/p/visual-data-structures-cheat-sheet
        • https://jdstillwater.blogspot.com/2012/05/i-put-toaster-in-dishwasher.html
        • https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions: Document architectural decisions using Alexandrian Pattern language text snippets.
        • https://clig.dev: \"Command Line Interface Guidelines\"
        • https://www.atlassian.com/devops/frameworks/dora-metrics: \"DevOps Research and Assessment (DORA) provides a standard set of DevOps metrics used for evaluating process performance and maturity.\"
        • https://en.wikipedia.org/wiki/Tuckman%27s_stages_of_group_development: \"Forming, Storming, Norming, Performing\"
        • https://bitfieldconsulting.com/posts/career: \"Where will you be when you realise that this is where you\u2019ve always wanted to be?\"
        "},{"location":"misc/#kids","title":"Kids","text":"
        • https://hourofcode.com/us/learn: ~1hr coding activities
        • https://scratch.mit.edu: \"Scratch is the world's largest free coding community for kids.\"
        • https://www.redblobgames.com: \"interactive visual explanations of math and algorithms, using motivating examples from computer games.\"
        "},{"location":"misc/#finance","title":"Finance","text":"
        • https://github.com/jlevy/og-equity-compensation
        • https://www.holloway.com/g/equity-compensation
        • https://www.benkuhn.net/optopt: \"Startup options are much better than they look\"
        "},{"location":"mkdocs/","title":"MkDocs","text":"

        \"MkDocs is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation. Documentation source files are written in Markdown, and configured with a single YAML configuration file.\" - http://www.mkdocs.org/

        "},{"location":"mkdocs/#links","title":"Links","text":"
        • https://www.mkdocs.org/user-guide/writing-your-docs/
        "},{"location":"molecule/","title":"Molecule","text":"

        \"Molecule is designed to aid in the development and testing of Ansible roles.\" - https://molecule.readthedocs.io

        "},{"location":"molecule/#examples","title":"Examples","text":""},{"location":"molecule/#initialize-a-new-role-to-be-tested-in-docker","title":"Initialize a new role to be tested in docker","text":"
        molecule init role ansible-role-whatever --driver-name docker\n

        The above command creates the following directory structure with boilerplate filled in, similar to what you'd expect from cookiecutter.

        ansible-role-whatever/.travis.yml\nansible-role-whatever/.yamllint\nansible-role-whatever/README.md\nansible-role-whatever/defaults/main.yml\nansible-role-whatever/handlers/main.yml\nansible-role-whatever/meta/main.yml\nansible-role-whatever/molecule/default/INSTALL.rst\nansible-role-whatever/molecule/default/converge.yml\nansible-role-whatever/molecule/default/molecule.yml\nansible-role-whatever/molecule/default/verify.yml\nansible-role-whatever/tasks/main.yml\nansible-role-whatever/tests/inventory\nansible-role-whatever/tests/test.yml\nansible-role-whatever/vars/main.yml\n

        Note: in newer versions of molecule this command is not available. It was deliberately removed because it can also be done in ansible-galaxy role init blah, though this doesn't cover the molecule test aspects, which need to be done with molecule init scenario.

        "},{"location":"molecule/#profile-a-molecule-run","title":"Profile a molecule run","text":""},{"location":"molecule/#profiling-molecule-itself","title":"Profiling molecule itself","text":"

        This profiles the molecule run, but not everything is shown due to not everything being python native.

        $ python3 -m cProfile -o ansible-run.cprofile $(which molecule) test\n...normal molecule output scrolls by...\n\n$ python3 -m pstats ansible-run.cprofile\nWelcome to the profile statistics browser.\nansible-run.cprofile% sort cumulative\nansible-run.cprofile% stats 20\nMon Jan 13 08:56:45 2020    ansible-run.cprofile\n         1782927 function calls (1731081 primitive calls) in 145.057 seconds\n   Ordered by: cumulative time\n   List reduced from 6303 to 20 due to restriction <20>\n   ncalls  tottime  percall  cumtime  percall filename:lineno(function)\n   1247/1    0.005    0.000  145.060  145.060 {built-in method builtins.exec}\n        1    0.000    0.000  145.060  145.060 /Users/daniel.hoherd/Library/Python/3.7/bin/molecule:3(<module>)\n        1    0.000    0.000  144.267  144.267 /Users/daniel.hoherd/Library/Python/3.7/lib/python/site-packages/click/core.py:762(__call__)\n...\n
        "},{"location":"molecule/#profiling-testinfra-runs","title":"Profiling testinfra runs","text":"

        Install pytest-profiling, which adds the --profile flag needed in the next step

        pip3 install --user pytest-profiling\n

        Then set up your molecule/scenario/molecule.yml file with the following env contents:

        verifier:\n  name: testinfra\n  env:\n    PYTEST_ADDOPTS: \"--profile\"\n
        "},{"location":"molecule/#see-also","title":"See Also","text":"
        • Ansible
        "},{"location":"molecule/#links","title":"Links","text":"
        • Ansible Tests with Molecule - https://ansible.readthedocs.io/projects/molecule/ / https://www.digitalocean.com/community/tutorials/how-to-test-ansible-roles-with-molecule
        • Molecule sequence of scenario events - https://ansible.readthedocs.io/projects/molecule/configuration/?h=scenario#scenario
        "},{"location":"mongodb/","title":"MongoDB","text":"

        \"MongoDB is a general purpose, document-based, distributed database built for modern application developers and for the cloud era.\" - https://www.mongodb.com

        "},{"location":"mongodb/#tricks-and-usage","title":"Tricks and Usage","text":""},{"location":"mongodb/#start-mongodb-locally-using-docker","title":"Start mongodb locally using docker","text":"
        docker run -d -p 27017:27017 --name mongotest mongo:4.2\n
        "},{"location":"mongodb/#see-also","title":"See Also","text":"
        • robo-3t: mongodb GUI (brew cask install robo-3t)
        • https://hub.docker.com/_/mongo
        "},{"location":"mqtt/","title":"mqtt","text":"

        'MQTT is a machine-to-machine (M2M)/\"Internet of Things\" connectivity protocol.' - http://mqtt.org/

        "},{"location":"mutt/","title":"mutt","text":"

        CLI e-mail client

        "},{"location":"mutt/#usage","title":"Usage","text":"

        http://www.mutt.org/doc/manual/manual-2.html

        j or Down       next-entry      move to the next entry\nk or Up         previous-entry  move to the previous entry\nz or PageDn     page-down       go to the next page\nZ or PageUp     page-up         go to the previous page\n= or Home       first-entry     jump to the first entry\n* or End        last-entry      jump to the last entry\nq               quit            exit the current menu\n?               help            list all keybindings for the current menu\n
        "},{"location":"mutt/#message-deletion","title":"Message Deletion","text":"

        http://www.sendmail.org/~ca/email/mutt/manual-4.html

        • Delete e-mails older than 2012-12-01: [shift-d] ~d 1/12/12-1/1/1 # D/M/Y. this will only delete back to 2001-01-01
        • Delete messages where the body matches a search: [shift-d] ~b search\\ pattern
        • Delete messages where the subject matches a search: [shift-d] ~s search\\ pattern
        • Delete messages older than one month: [shift-d] ~d >1m
        • Delete messages older than 14 days: [shift-d] ~d > 14d
        "},{"location":"mutt/#links","title":"Links","text":"
        • https://srobb.net/mutt.html: Not actually a quick guide.
        "},{"location":"myrepos/","title":"myrepos","text":"

        \"You have a lot of version control repositories. Sometimes you want to update them all at once. Or push out all your local changes. You use special command lines in some repositories to implement specific workflows. Myrepos provides a mr command, which is a tool to manage all your version control repositories.\" -- http://myrepos.branchable.com/

        "},{"location":"myrepos/#usage-examples","title":"Usage Examples","text":""},{"location":"myrepos/#register-a-bunch-of-repos","title":"Register a bunch of repos","text":"
        for repo in ~/code/* ; do\n  mr register \"$repo\"\ndone\n
        "},{"location":"myrepos/#update-all-of-your-registered-repos","title":"Update all of your registered repos","text":"
        mr up\n
        "},{"location":"mysql/","title":"MySQL","text":"

        \"MySQL is an open-source relational database management system. Its name is a combination of \"My\", the name of co-founder Michael Widenius's daughter, and \"SQL\", the abbreviation for Structured Query Language. The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, and is now owned by Oracle Corporation.\" - https://en.wikipedia.org/wiki/MySQL

        "},{"location":"mysql/#examples","title":"Examples","text":""},{"location":"mysql/#show-variables-of-the-running-server","title":"Show variables of the running server","text":"
        mysqladmin variables\n
        "},{"location":"mysql/#enable-bin-logging","title":"Enable bin logging","text":"

        Edit /etc/my.cnf:

        log-bin=/var/lib/mysql/mysql-bin\n
        "},{"location":"mysql/#show-how-a-table-was-created","title":"Show how a table was created","text":"
        SHOW CREATE TABLE table_name \\G\n
        "},{"location":"mysql/#create-a-table","title":"Create a table","text":"
        CREATE TABLE photo_sizes (\n  `photo_id` char(32) NOT NULL,\n  `format` mediumtext,\n  `width` mediumtext,\n  `height` mediumtext,\n  `source` mediumtext,\n  `url` mediumtext,\n  PRIMARY KEY(`photo_id`)\n) ;\n
        "},{"location":"mysql/#create-a-table-with-multiple-columns-as-the-primary-key","title":"Create a table with multiple columns as the primary key","text":"
        CREATE TABLE `photo_sizes` (\n  `photo_id` char(32) NOT NULL,\n  `format` char(32) NOT NULL DEFAULT '',\n  `width` mediumtext,\n  `height` mediumtext,\n  `source` mediumtext,\n  `url` mediumtext,\n  PRIMARY KEY (`photo_id`,`format`)\n) ENGINE=MyISAM DEFAULT CHARSET=latin1\n
        "},{"location":"mysql/#show-what-processes-are-running","title":"Show what processes are running","text":"
        show processlist;\n
        "},{"location":"mysql/#dump-databases-to-sql-files","title":"Dump databases to sql files","text":"

        All databases

        mysqldump -u root -phunter2 --all-databases | gzip -9 > ~/$(date +%F-%H%m).sql.gz\n

        Or just a single database

        mysqldump -u root -phunter2 my_favorite_db | gzip -9 > ~/my_favorite_db-$(date +%F-%H%m).sql.gz\n
        "},{"location":"mysql/#duplicate-a-database","title":"Duplicate a database","text":"
        sudo mysqldump -v mogilefs | sudo mysql -D mogilefs_sjc\n
        "},{"location":"mysql/#dump-the-schema-of-a-database-with-no-actual-data","title":"Dump the schema of a database with no actual data","text":"
        sudo mysqldump --no-data dbname > schema.sql\n
        "},{"location":"mysql/#show-privileges","title":"Show privileges","text":"
        show GRANTS ;\n
        "},{"location":"mysql/#create-a-new-user","title":"Create a new user","text":"
        CREATE USER 'a_new_user'@'10.0.5.%' IDENTIFIED BY 'the_user_password';\nGRANT ALL PRIVILEGES ON some_database.* TO 'a_new_user'@'10.0.5.%' WITH GRANT OPTION;\n
        "},{"location":"mysql/#delete-a-user","title":"Delete a user","text":"
        DELETE from mysql.user where user = 'user_name';\n
        "},{"location":"mysql/#grant-privileges","title":"Grant Privileges","text":"
        GRANT ALL ON database.* TO 'newuser'@'localhost';\n
        "},{"location":"mysql/#change-root-password","title":"Change root password","text":"
        /usr/bin/mysqladmin -u root password 'new-password'\n/usr/bin/mysqladmin -u root -h hostname password 'new-password'\n

        or...

        UPDATE mysql.user\n  SET Password=PASSWORD('hunter2')\n  WHERE User='leroy_jenkins'\n  AND Host='localhost' ;\n
        "},{"location":"mysql/#create-statements","title":"Create statements","text":""},{"location":"mysql/#create-an-index-on-table-images-for-column-rating_count","title":"Create an index on table images for column rating_count","text":"
        create index rating_count on images (rating_count) ;\n
        "},{"location":"mysql/#drop-an-index-from-a-table","title":"Drop an index from a table","text":"
        drop index rating_count on images ;\n
        "},{"location":"mysql/#table-alters","title":"Table Alters","text":""},{"location":"mysql/#add-a-column","title":"Add a column","text":"
        alter table flixplor add o_width char(12);\n
        "},{"location":"mysql/#drop-a-column","title":"Drop a column","text":"
        alter table flixplor drop column o_width;\n
        "},{"location":"mysql/#change-the-type-of-a-column","title":"Change the type of a column","text":"
        alter table flixplor modify o_height mediumint ;\n
        "},{"location":"mysql/#add-a-current-timestamp-column","title":"Add a current timestamp column","text":"
        alter table images add last_updated timestamp not null default current_timestamp on update current_timestamp;\n
        "},{"location":"mysql/#change-the-table-engine-to-innodb","title":"Change the table engine to innodb","text":"
        ALTER TABLE images ENGINE=INNODB;\n
        "},{"location":"mysql/#change-a-tables-encoding","title":"Change a table's encoding","text":"
        alter table raw_flickr_data CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci ;\n
        "},{"location":"mysql/#table-inserts","title":"Table Inserts","text":""},{"location":"mysql/#add-a-record","title":"Add a record","text":""},{"location":"mysql/#table-updates","title":"Table Updates","text":""},{"location":"mysql/#update-if-key-exists","title":"Update if key exists","text":"

        For unique fields, you cannot insert, so you need to have an on duplicate key section in your statement.

        INSERT INTO photo_sizes (\n  photo_id,\n  format,\n  height,\n  width,\n  url,\n  source\n) values (\n  '32704962848',\n  'Medium 640',\n  '427',\n  '640',\n  'https://www.flickr.com/photos/warzauwynn/32704962848/sizes/z/',\n  'https://farm5.staticflickr.com/4855/32704962848_3a96b4c635_z.jpg'\n) ON DUPLICATE KEY UPDATE\n  height='427',\n  width='640',\n  url='https://www.flickr.com/photos/warzauwynn/32704962848/sizes/z/',\n  source='https://farm5.staticflickr.com/4855/32704962848_3a96b4c635_z.jpg'\n
        • http://dev.mysql.com/doc/refman/5.0/en/insert-on-duplicate.html
        "},{"location":"mysql/#update-a-datetime-column-with-a-random-datetime","title":"Update a datetime column with a random datetime","text":"
        UPDATE flixplor\nSET last_retrieval = subtime(\n  concat_ws(' ','2019-01-01' - interval rand() * 10000 day ,'00:00:00'), -- create a random YYYY-MM-DD within 10k days of given datetime\n  sec_to_time(floor(0 + (rand() * 86401))) -- create a random HH:MM:SS within a 24 hour period\n)\nWHERE last_retrieval < '2019-01-01 00:00:00';\n
        "},{"location":"mysql/#table-selects","title":"Table Selects","text":""},{"location":"mysql/#select-values-and-dont-show-duplicates","title":"Select values and don't show duplicates","text":"
        SELECT col from servers group by col ;\n
        "},{"location":"mysql/#select-photo_id-and-discard-duplicates-uniq","title":"Select photo_id and discard duplicates (uniq)","text":"
        SELECT photo_id from photo_sizes group by photo_id ;\n
        "},{"location":"mysql/#select-and-count-unique-pairs-of-columns","title":"Select and count unique pairs of columns","text":"
        SELECT model, unit, count(*) as n from servers group by model, unit having n > 1 order by model asc ;\n
        "},{"location":"mysql/#select-the-count-of-rows-in-a-table","title":"Select the count of rows in a table","text":"
        SELECT count(*) from flixplor where o_height > 100 ;\n
        "},{"location":"mysql/#do-some-math-to-create-a-new-column-during-a-select","title":"Do some math to create a new column during a select","text":"
        SELECT photo_id,last_retrieval,o_height,o_width,(o_height * o_width) as pixels from flixplor\nwhere last_reposted < from_unixtime('1384268667') or last_reposted is NULL\norder by (o_height * o_width) limit 10 ;\n
        "},{"location":"mysql/#transform-datetime-into-a-date-diff","title":"Transform datetime into a date diff","text":"

        This selects the number of hours since the given datestamp instead of the datestamp itself.

        SELECT TIMESTAMPDIFF(HOUR, date_taken, NOW()) from photos ;\n

        See also DATEDIFF.

        "},{"location":"mysql/#statement-explanations","title":"Statement explanations","text":"

        The EXPLAIN statement can give you additional info about how complex your statement is.

        "},{"location":"mysql/#explain-select","title":"Explain select","text":"
        mysql> explain SELECT *,(rating_sum / rating_count) as average from images where (rating_sum / rating_count) > 20 or rating_count=0 ORDER BY RAND() LIMIT 1 ;\n+----+-------------+--------+------+---------------+------+---------+------+--------+----------------------------------------------+\n| id | select_type | table  | type | possible_keys | key  | key_len | ref  | rows   | Extra                                        |\n+----+-------------+--------+------+---------------+------+---------+------+--------+----------------------------------------------+\n|  1 | SIMPLE      | images | ALL  | rating_count  | NULL | NULL    | NULL | 301937 | Using where; Using temporary; Using filesort |\n+----+-------------+--------+------+---------------+------+---------+------+--------+----------------------------------------------+\n1 row in set (0.00 sec)\n
        "},{"location":"mysql/#misc","title":"Misc","text":"
        • Complete statement with \\G for different output format
        • ERROR 1045 (28000) may be caused by invalid hostname in connect command. Replace the --host token with the full hostname of the db server. Or, restart mysql and try again.
        "},{"location":"mysql/#recommended-reading","title":"Recommended reading","text":"
        • MySQL (5th Edition) (Developer's Library)
        • High Performance MySQL: Optimization, Backups, Replication, and More
        "},{"location":"mysql/#see-also","title":"See Also","text":"
        • http://www.sqlalchemy.org/ - ORM, better for abstracting database in code
        • http://www.mycli.net/ - A better CLI for MySQL
        • https://modern-sql.com/blog/2018-04/mysql-8.0 - Good comparison of modern (as of 2018) SQL options
        "},{"location":"namei/","title":"namei","text":"

        \"follow a pathname until a terminal point is found\" - man namei

        "},{"location":"namei/#examples","title":"Examples","text":""},{"location":"namei/#simple-usage","title":"Simple usage","text":"
        ## namei /etc/systemd/system/multi-user.target.wants/ssh.service\nf: /etc/systemd/system/multi-user.target.wants/ssh.service\n d /\n d etc\n d systemd\n d system\n d multi-user.target.wants\n l ssh.service -> /lib/systemd/system/ssh.service\n   d /\n   d lib\n   d systemd\n   d system\n   - ssh.service\n
        "},{"location":"namei/#show-permissions-of-all-entries","title":"Show permissions of all entries","text":"
        ## namei -l /etc/systemd/system/multi-user.target.wants/ssh.service\nf: /etc/systemd/system/multi-user.target.wants/ssh.service\ndrwxr-xr-x root root /\ndrwxr-xr-x root root etc\ndrwxr-xr-x root root systemd\ndrwxr-xr-x root root system\ndrwxr-xr-x root root multi-user.target.wants\nlrwxrwxrwx root root ssh.service -> /lib/systemd/system/ssh.service\ndrwxr-xr-x root root   /\ndrwxr-xr-x root root   lib\ndrwxr-xr-x root root   systemd\ndrwxr-xr-x root root   system\n-rw-r--r-- root root   ssh.service\n
        "},{"location":"ncftp/","title":"ncftp","text":"

        \"NcFTP Client is a set of FREE application programs implementing the File Transfer Protocol. ... The program has been in service on UNIX systems since 1991 and is a popular alternative to the standard FTP program, /usr/bin/ftp.\" - https://www.ncftp.com/ncftp/

        "},{"location":"ncftp/#examples","title":"Examples","text":""},{"location":"ncftp/#connect-to-a-non-standard-port","title":"Connect to a non-standard port","text":"
        ncftp ftp://10.8.5.103:5000\n
        "},{"location":"ncftp/#recursively-put-a-directory","title":"Recursively put a directory","text":"
        put -r local_dir\n
        "},{"location":"ncftp/#recursively-get-a-directory","title":"Recursively get a directory","text":"
        get -T -R remote_dir\n

        Note: Without the -T option you may run into the error:

        tar: This does not look like a tar archive\ntar: Exiting with failure status due to previous errors\n
        "},{"location":"ncftp/#recursively-delete-a-remote-directory","title":"Recursively delete a remote directory","text":"

        This does not always work.

        rm -r remote_dir\n
        "},{"location":"neopixel/","title":"Adafruit Neopixel","text":""},{"location":"neopixel/#hardware","title":"Hardware","text":"

        I've used the following devices together with success:

        • https://www.amazon.com/gp/product/B01AG923EU/: Alitove WS8211 neopixel strands that I've used with success Raspberry Pi and ESP8266.
        • https://www.amazon.com/gp/product/B081CSJV2V/: ESP8266 I've used with the above light Alitove light strands and WLED custom firmware for really simple awesome LEDs.
        "},{"location":"neopixel/#links","title":"Links","text":"
        • https://github.com/Aircoookie/WLED / https://kno.wled.ge: Custom microcontroller firmware for neopixel integration. This is by far the easiest way to do Neopixels.
        • https://learn.adafruit.com/adafruit-neopixel-uberguide
        • https://www.adafruit.com/category/168
        • https://learn.adafruit.com/neopixels-on-raspberry-pi/python-usage
        • https://docs.micropython.org/en/latest/esp8266/tutorial/neopixel.html
        • https://github.com/danielhoherd/stranger_things_lights: Stranger Things alphabet neopixel project for Raspberry Pi
        • https://www.youtube.com/watch?v=ciaFar8nfHc: Custom 16 x 16 x 16 (4096) neopixel cube
        "},{"location":"netgear/","title":"NETGEAR","text":""},{"location":"netgear/#netgear-r7000","title":"Netgear R7000","text":""},{"location":"netgear/#dd-wrt","title":"DD-WRT","text":"
        • https://www.myopenrouter.com/downloads/dd-wrt-r7000
        • http://www.desipro.de/ddwrt/K3-AC-Arm/

        See Also: dd-wrt

        "},{"location":"netgear/#netgear-gss116e","title":"Netgear GSS116E","text":"
        • https://www.netgear.com/support/product/GSS116E
        "},{"location":"netgear/#netgear-m4300-8x8f","title":"Netgear M4300-8X8F","text":"

        \"Stackable Managed Switch with 16x10G including 8x10GBASE-T and 8xSFP+ Layer 3\"

        • https://www.netgear.com/support/product/M4300-8X8F
        "},{"location":"netgear/#arlo","title":"Arlo","text":""},{"location":"netgear/#pros","title":"Pros","text":"
        • Completely wireless solution available but not required.
        "},{"location":"netgear/#cons","title":"Cons","text":"
        • Requires internet access to interact with, even for cameras that are accessible on the same LAN.
        "},{"location":"nethogs/","title":"nethogs","text":"

        \"Linux 'net top' tool\" - https://github.com/raboof/nethogs

        Nethogs shows you which PIDs used or are using how much bandwidth.

        "},{"location":"nethogs/#see-also","title":"See also","text":"
        • Top variant list
        "},{"location":"networking/","title":"networking","text":""},{"location":"networking/#links","title":"Links","text":"
        • https://en.wikipedia.org/wiki/Reserved_IP_addresses
        • https://wiki.debian.org/BridgeNetworkConnectionsProxyArp
        • https://routersecurity.org
        • https://github.com/apenwarr/blip: Web based graph of network latency, hosted at https://gfblip.appspot.com
        • https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy
        "},{"location":"networking/#see-also","title":"See also","text":"
        • bind - DNS server
        • ip command for linux
        • iperf - network performance testing
        • iptables - linux firewall
        • linksys - soho network hardware vendor
        • netgear - network hardware vendor
        • networksetup - Mac OSX configuration tool for network settings in System Preferences
        • ntop - network top
        • OSI model - The Open Systems Interconnection model (OSI model) is a conceptual model that characterizes and standardizes the communication functions of a telecommunication or computing system without regard to its underlying internal structure and technology.
        • pac - dynamic proxy configuration
        • procurve - HP managed networking
        "},{"location":"networksetup/","title":"networksetup","text":"

        \"networksetup -- configuration tool for network settings in System Preferences.\" - man networksetup

        networksetup is a standard tool on MacOS

        "},{"location":"networksetup/#examples","title":"Examples","text":""},{"location":"networksetup/#list-all-network-services","title":"List all network services","text":"

        Network Services are not the same as hardware devices.

        $ networksetup -listallnetworkservices\nAn asterisk (*) denotes that a network service is disabled.\nEthernet Adapter (en4)\nWi-Fi\nThunderbolt Bridge\n$ networksetup -listnetworkserviceorder\nAn asterisk (*) denotes that a network service is disabled.\n(1) Ethernet Adapter (en4)\n(Hardware Port: Thunderbolt Ethernet Slot 0, Device: en4)\n\n(2) Wi-Fi\n(Hardware Port: Wi-Fi, Device: en0)\n\n(3) Thunderbolt Bridge\n(Hardware Port: Thunderbolt Bridge, Device: bridge0)\n
        "},{"location":"networksetup/#modify-dns-for-a-device","title":"Modify DNS for a device","text":"

        Using Empty is pretty unintuitive. This is used in a few other places in networksetup.

        $ networksetup -getdnsservers Wi-Fi\nThere aren't any DNS Servers set on Wi-Fi.\n$ networksetup -setdnsservers Wi-Fi 8.8.8.8\n$ networksetup -getdnsservers Wi-Fi\n8.8.8.8\n$ networksetup -setdnsservers Wi-Fi Empty\n$ networksetup -getdnsservers Wi-Fi\nThere aren't any DNS Servers set on Wi-Fi.\n
        "},{"location":"networksetup/#show-info-for-the-device-named-wi-fi","title":"Show info for the device named Wi-Fi","text":"
        networksetup -getinfo \"Wi-Fi\"\n
        "},{"location":"networksetup/#show-all-connected-hardware-ports","title":"Show all connected hardware ports","text":"
        networksetup -listallhardwareports\n
        "},{"location":"networksetup/#show-all-search-domains","title":"Show all search domains","text":"
        networksetup -listallnetworkservices |\n  tail -n +2 |\n  xargs -I :: networksetup -getsearchdomains \"::\"\n
        "},{"location":"networksetup/#create-a-bunch-of-vlan-interfaces","title":"Create a bunch of VLAN interfaces","text":"
        for X in {1..32} ; do\n  sudo networksetup -createVLAN \"vlan${X}\" en3 \"${X}\" ;\ndone ;\n
        "},{"location":"networksetup/#delete-a-bunch-of-vlan-interfaces","title":"Delete a bunch of VLAN interfaces","text":"
        for X in {1..32} ; do\n  sudo networksetup -deleteVLAN \"vlan${X}\" en3 \"${X}\" ;\ndone ;\n
        "},{"location":"nfc/","title":"NFC","text":"

        \"Near-field communication (NFC) is a set of communication protocols that enables communication between two electronic devices over a distance of 4 centimetres (1.6 in) or less.\" - https://en.wikipedia.org/wiki/Near-field_communication

        "},{"location":"nfc/#mifare","title":"Mifare","text":"
        • FM11RF08S universal backdoor key: A396EFA4E24F via https://eprint.iacr.org/2024/1275.pdf
        "},{"location":"nfc/#links","title":"Links","text":"
        • https://gototags.com/nfc/standards/iso-14443
        • https://nfc-tools.github.io
        • https://docs.flipper.net/nfc
        "},{"location":"nfc/#see-also","title":"See also","text":"
        • My Nintendo Amiibo notes
        • My Flipper Zero notes
        "},{"location":"nfs/","title":"nfs","text":"

        nfs is the Network File System.

        • Configured in linux at /etc/exports
        • Great info here: http://nfs.sourceforge.net/
        "},{"location":"nfs/#tips-and-tricks","title":"Tips and Tricks","text":""},{"location":"nfs/#wait-for-network-to-be-online-before-mounting","title":"Wait for network to be online before mounting","text":"

        If you are using /etc/fstab for your nfs mounts, you may run into a race condition where the network target comes online and allows NFS mounts to continue, but DHCP has not yet completed. This causes the NFS mounts to fail.

        To fix this race condition, enable systemd-networkd-wait-online.service

        sudo systemctl enable systemd-networkd-wait-online.service\n

        Then edit your /etc/fstab entry to rely on that target.

        192.168.0.99:/share/media  /mnt/shared-media  nfs  x-systemd.requires=network-online.target, ... the_rest_of_the_options\n

        This solution so far works, but has the negative side effect of making boot take longer due to waiting for dhclient requests to time out before continuing. On Debian 12, this adds 2 minutes to the total boot time shown by systemd-analyze plot > boot-$(date +%F).svg.

        If you know the exact interface your mount points rely on, then you can tailor the wait-online.service to only wait for that one interface:

        sudo systemctl edit systemd-networkd-wait-online.service\n

        Then add the following section to the correct location as directed by the comments in the editor window:

        [Service]\nExecStart=\nExecStart=/lib/systemd/systemd-networkd-wait-online --ipv4 --interface=your_interface_name\n

        The reason there is an empty ExecStart= is that this is how systemd is instructed to empty out the previous assignments instead of appending to them. This works with other option too. More info here: https://www.freedesktop.org/software/systemd/man/systemd.service.html

        "},{"location":"nfs/#regarding-mount-points-within-shares","title":"Regarding mount points within shares","text":"

        If you have a mount point within an NFS share, you must have a separate entry in your exports file that sets the permissions of this mount point. Currently OS X has a problem with this, but officially this is the way to do it.

        "},{"location":"nfs/#show-hosts-that-are-connected-to-this-nfs-server","title":"Show hosts that are connected to this NFS server","text":"
        showmount\n
        "},{"location":"nfs/#show-what-hosts-are-using-what-exports","title":"Show what hosts are using what exports","text":"
        showmount -a\n
        "},{"location":"nfs/#show-exported-directories","title":"Show exported directories","text":"
        showmount -e\n
        "},{"location":"nfs/#show-directories-in-use-by-nfs","title":"Show directories in use by NFS","text":"
        showmount -d\n
        "},{"location":"nfs/#add-an-nfs-mount-to-fstab","title":"Add an NFS mount to fstab","text":"
        opal:/z4  /mnt/z4   nfs  rsize=8192,wsize=8192,timeo=14,intr\n
        "},{"location":"nfs/#linux-tips-and-tricks","title":"Linux Tips and Tricks","text":""},{"location":"nfs/#show-which-versions-of-nfs-your-nfs-server-supports","title":"Show which versions of NFS your NFS server supports","text":"
        rpcinfo -p\n
        "},{"location":"nfs/#allow-an-os-x-client-to-mount-nfs4-nested-zfs-data-sets","title":"Allow an OS X client to mount nfs4 nested zfs data sets","text":"

        OS X has problems with the privileged port default requirement in nfs4, so the insecure option is required.

        The nohide option allows you to mount nested zfs datasets, instead of requiring a separate export for each dataset.

        /z4 *.local(rw,async,no_subtree_check,insecure,nohide)\n
        "},{"location":"nfs/#os-x-tips-and-tricks","title":"OS X Tips and Tricks","text":""},{"location":"nfs/#create-persistent-nfs-mount-in-os-x-108","title":"Create persistent NFS mount in OS X 10.8","text":"

        This is not bulletproof. Modern OS X 10.9+ version are switching away from NFS to CIFS. The NFS client on OS X is pretty weak. For instance it might crash your machine if the share has 0 bytes free but is mounted RW. Use at your own risk.

        sudo mkdir /mnt # OS X doesn't like you playing with /Volumes, it may delete your dirs\nsudo dscl . -create /Mounts/z4\nsudo dscl . -create /Mounts/z4 VFSLinkDir /mnt/z4\nsudo dscl . -create /Mounts/z4 VFSOpts resvport rw nosuid\nsudo dscl . -create /Mounts/z4 VFSType nfs\nsudo dscl . -create /Mounts/z4 RecordName opal:/z4\nsudo dscl . -create /Mounts/iTunes\nsudo dscl . -create /Mounts/iTunes VFSLinkDir /mnt/z4/iTunes\nsudo dscl . -create /Mounts/iTunes VFSOpts resvport rw nosuid\nsudo dscl . -create /Mounts/iTunes VFSType nfs\nsudo dscl . -create /Mounts/iTunes RecordName opal:/z4/iTunes\nsudo dscl . -read /Mounts/opal:/z4\nsudo dscl . -read /Mounts/opal:/z4/iTunes\nsudo dscl . -list /Mounts\n\nsudo dscl . -delete /Mounts opal:/z4/iTunes\n
        "},{"location":"ngrok/","title":"ngrok","text":"

        \"ngrok exposes local networked services behinds NATs and firewalls to the public internet over a secure tunnel. Share local websites, build/test webhook consumers and self-host personal services.\" - ngrok --help

        "},{"location":"ngrok/#examples","title":"Examples","text":""},{"location":"ngrok/#simple-http-usage","title":"Simple http usage","text":"
        ngrok http 8000\n
        "},{"location":"ngrok/#host-a-helm-repository","title":"Host a helm repository","text":""},{"location":"ngrok/#create-a-helm-package","title":"Create a helm package","text":"

        This will create a tgz of your chart and its dependencies in your current directory

        helm package /path/to/your-chart --dependency-update\n
        "},{"location":"ngrok/#create-a-helm-repository-index","title":"Create a helm repository index","text":"
        helm repo index .\n
        "},{"location":"ngrok/#serve-this-directory-with-the-index-and-tgz-file-over-http","title":"Serve this directory with the index and tgz file over http","text":"
        helm\npython3 -m http.server\n
        "},{"location":"ngrok/#expose-the-http-server-to-the-internet","title":"Expose the http server to the internet","text":"
        ngrok http 8000\n
        "},{"location":"ngrok/#use-it","title":"Use it","text":"
        helm repo add super-awesome-test-repo \"${your_ngrok_url}\"\nhelm repo update\nhelm search repo super-awesome-test-repo\n
        "},{"location":"ngrok/#links","title":"Links","text":"
        • https://ngrok.com/docs
        "},{"location":"nintendo-3ds/","title":"Nintendo 3DS","text":""},{"location":"nintendo-3ds/#lego-3ds-notes","title":"Lego + 3DS notes","text":"
        • 3DSXL screen is ~ 9x11 Lego units
        • 3DSXL outside is 20x12 Lego units
        "},{"location":"nintendo-3ds/#emulation","title":"Emulation","text":"
        • Citra 3DS emulator
        • Dumping cartridges
        "},{"location":"nintendo-3ds/#hacking-and-cfw","title":"Hacking and CFW","text":"
        • https://3ds.hacks.guide/
        • https://github.com/AuroraWright/Luma3DS
        • https://www.3dbrew.org/wiki/Homebrew_Applications
        • https://zoogie.github.io/web/34%E2%85%95c3/#/
        • https://web.archive.org/web/20220130042347/https://www.reddit.com/r/3dshacks/comments/6iclr8/a_technical_overview_of_the_3ds_operating_system/: \"A Technical Overview of the 3DS Operating System\"
        • https://youtu.be/ImR-TdDAIJE: How a Terrible Game Cracked the 3DS's Security - Early Days of 3DS Hacking
        • https://www.youtube.com/watch?v=bZczf57HSag: (2015 32c3 talk) 3DS console hacking deep dive
        • https://www.youtube.com/watch?v=8C5cn_Qj0G8: Nintendo Hacking 2016 33c3
        • https://courses.csail.mit.edu/6.857/2019/project/20-Chau-Ko-Tang.pdf: History of Hacking the Nintendo 3DS
        • https://www.copetti.org/writings/consoles/nintendo-3ds: \"Nintendo 3DS Architecture. A practical analysis by Rodrigo Copetti.\"
        • https://pretendo.network: \"Pretendo is a free and open source replacement for Nintendo's servers for both the 3DS and Wii U, allowing online connectivity for all, even after the original servers are discontinued\"
        "},{"location":"nintendo-amiibo/","title":"Nintendo Amiibo","text":"

        Amiibo are NFC figurines that enable in-game features on Nintendo 3DS, Wii U and Switch platforms.

        "},{"location":"nintendo-amiibo/#info","title":"Info","text":"
        • Uses NTAG215 RFID chips.
        "},{"location":"nintendo-amiibo/#links","title":"Links","text":"
        • https://amiibo.wikia.com/wiki/Amiibo_Wiki
        • https://github.com/HiddenRamblings/TagMo - TagMo is an Android app which allows for cloning Amiibos using blank NTAG215 NFC tags.
        • https://nintendo.wikia.com/wiki/List_of_Amiibo_compatible_Games
        • https://pyamiibo.readthedocs.io/ - PyAmiibo helps to read, modify and write dump files of Nintendo Amiibo figures. PyAmiibo is capable of parsing most NTAG properties, as well as some Amiibo data.
        • https://www.amiiboapi.com - An Amiibo database that holds all amiibo information in a single API.
        • https://www.codejunkies.com/powersaves-for-amiibo/
        • https://www.neurohacked.com/how-to-mimic-any-amiibo/ - How to Mimic Any Amiibo
        • https://www.nintendo.com/amiibo/games
        • https://ally.ninja: \"Collect & backup your amiibo\", can write PowerTags.
        • https://www.tagmiibo.com: \"Tagmiibo app creates backup amiibos with NFC 215 tags or devices.\"
        "},{"location":"nintendo-amiibo/#see-also","title":"See also","text":"
        • My NFC notes
        • My Flipper Zero notes
        "},{"location":"nintendo-nes/","title":"Nintendo Entertainment System","text":"

        \"The Nintendo Entertainment System (NES) is an 8-bit third-generation home video game console produced by Nintendo.\" - https://en.wikipedia.org/wiki/Nintendo_Entertainment_System

        "},{"location":"nintendo-nes/#links","title":"Links","text":"
        • https://youtu.be/TPbroUDHG0s: \"Game Development in Eight Bits\" by Kevin Zurawel
        • https://www.famicom.party/book
        "},{"location":"nintendo-switch/","title":"Nintendo Switch","text":""},{"location":"nintendo-switch/#homebrew","title":"Homebrew","text":""},{"location":"nintendo-switch/#combine-multi-part-xci","title":"Combine multi-part xci","text":"

        When dumping games to fat32, you may need to create multi-part xci files. In my experience, you can combine these files by just concatenating them together. For example:

        cat *.xc[0-9] > foo.xci\n4nxci -k prod.keys foo.xci\n
        "},{"location":"nintendo-switch/#create-a-switch-compatible-partition-from-linux","title":"Create a Switch compatible partition from linux","text":"

        Rule #1 is to avoid using exfat at all costs.

        The Switch uses MBR \ud83d\ude44 and has crummy exfat support... The following requires root permissions.

        SD_DISK=/dev/sdZ\nsfdisk \"$SD_DISK\" <<EOF\nlabel: mbr\n,\nEOF\nmkfs.exfat \"$SD_DISK\"1\n
        "},{"location":"nintendo-switch/#homebrew-glossary","title":"Homebrew Glossary","text":"

        Most of these were taken from https://github.com/XorTroll/Goldleaf

        • Atmosphere: custom firmware
        • Hekate: custom bootloader
        • NSP (Nintendo Submission Package): It's the official format used by Nintendo to provide installable content from their CDN servers.
        • NRO (Nintendo Relocatable Object): Officially is treated as a dynamic library object, similar to DLLs on Windows. These are loaded dynamically by applications at runtime.
        • NCA (Nintendo Content Archive): This format is the base format used by Horizon OS to store content.
        • NACP (Nintendo Application Control Property): This is the format used by Nintendo to store several properties of installed titles, like the title's name, version, author name and other information, like if the title supports screenshots or video captures.
        • XCI: Cartridge dump file
        "},{"location":"nintendo-switch/#homebrew-links","title":"Homebrew links","text":"
        • https://switch.hacks.guide
        • https://github.com/Team-Neptune/DeepSea: Minimal CFW setup, all files included
        • https://github.com/HamletDuFromage/aio-switch-updater: All-in-one switch CFW updater
        • https://webcfw.sdsetup.com/: Online, client-side, web-USB payload injector (requires Chrome-ish browser)
        • https://nh-server.github.io/switch-guide/
        • https://github.com/nh-server/fusee-interfacee-tk: TK payload injector
        • https://github.com/DarkMatterCore/gcdumptool
        • https://github.com/mologie/nxboot: CLI payload injector. 0.2.0 is has macOS builds.
        • https://github.com/XorTroll/Goldleaf
        • https://nswdb.com: DB of Nintendo dump metadata. Downloadable as NSWreleases.xml
        • https://switchbrew.github.io/nx-hbl/
        • https://switchtools.sshnuke.net/: ChoiDuJour, which lets you go between Switch firmwares
        • https://www.reddit.com/r/SwitchHaxing/top/?sort=top&t=month
        • https://sigmapatches.su: Sigpatches and other useful homebrew files
        • https://www.cheatslips.com/wiki: How to write cheat codes
        • https://github.com/The-4n/4NXCI: (repo deleted) XCI to NSP converter
        • https://github.com/AtlasNX/Kosmos: (deprecated) All in one guide
        • https://sdsetup.com: (deprecated) Create zip file with with all the needed and desired software and configs to put onto an sd card.
        "},{"location":"nintendo-wii/","title":"Nintendo Wii","text":"

        A gaming system by Nintendo. See also the Dolphin emulator.

        "},{"location":"nintendo-wii/#tips","title":"Tips","text":"
        • In Boot-Mii you can use the power and reset buttons to navigate if you don't have a GC controller.
        "},{"location":"nintendo-wii/#wii-remote-sync","title":"Wii Remote Sync","text":"

        Standard Mode:

        1. Press the Power button on the Wii console to turn it on.
        2. Remove the battery cover on the back of the Wii Remote to be synced. Open the SD Card Slot cover on the front of the Wii console.
        3. Press and release the SYNC button just below the batteries on the Wii Remote; the Player LED on the front of the Wii Remote will blink. While the lights are still blinking, quickly press and release the red SYNC button on the front of the Wii console.
        4. When the Player LED blinking stops and stays lit, the syncing is complete. The LED that is illuminated indicates the player number (1 through 4).
        "},{"location":"nintendo-wii/#hack-notes","title":"Hack notes","text":"
        • IOSes Explained - http://gwht.wikidot.com/ioses-explained
        • Backup Launcher runs ISO - http://wiihacked.com/backup-launcher-v30gamma-download-here-and-how-to-install
        • Wiibrew is the channel for loading home-brew software. - http://wiibrew.org
        • Wii Backup Manager is an app to help manage ISOs and filesystems - http://www.wiibackupmanager.co.uk/
        • Wiimms ISO Tools will modify ISOs and WBFS - http://wit.wiimm.de/
        • A list of WBFS managers - http://wiki.gbatemp.net/wiki/WBFS_Managers
        • Anti-brick and general hacking tutorial - http://www.howtogeek.com/howto/38041/set-up-anti-brick-protection-to-safeguard-and-supercharge-your-wii/
        • https://sites.google.com/site/completesg/backup-launchers/installation - Successfully installed Trucha patched IOS to enable the USB loader
        • https://sites.google.com/site/completesg/cios/hermes-cios - More software that needs to be installed to get USB loaders to work nicely
        • http://www.howtogeek.com/howto/40349/install-a-wii-game-loader-for-easy-backups-and-fast-load-times/ - How to USB Load ISOs on the Wii
        • https://code.google.com/archive/p/nusdownloader/ - NUS Downloader lets you download Various official Wii software
        "},{"location":"nintendo-wiiu/","title":"Nintendo Wii U","text":"
        • https://wiiu.hacks.guide/
        • https://wiiu.hacks.guide/#/block-updates
        • https://github.com/GaryOderNichts/udpih: Best jailbreak as of 2023.
        • https://github.com/koolkdev/wfs-tools: Wii-U filesystem tools
        "},{"location":"ntop/","title":"ntop","text":"

        \"High-speed web-based traffic analysis.\" - https://www.ntop.org/

        This isn't a traditional top style tool since it has a web interface. For network top in a TUI, see iftop

        "},{"location":"ntop/#see-also","title":"See also","text":"
        • Top variant list
        "},{"location":"ntp/","title":"NTP","text":"

        \"Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems over packet-switched, variable-latency data networks. In operation since before 1985, NTP is one of the oldest Internet protocols in current use.\" - https://en.wikipedia.org/wiki/Network_Time_Protocol

        "},{"location":"ntp/#links","title":"Links","text":"
        • RFC 5905: Network Time Protocol Version 4: Protocol and Algorithms Specification
        • Understanding and mitigating NTP-based DDoS attacks
        • Google Public NTP: Leap Smear
        • Five different ways to handle leap seconds with NTP
        • The Unix leap second mess
        • ntp.org FAQ: What happens during a Leap Second?
        • The Raspberry Pi as a Stratum-1 NTP Server
        • NTP vs PTP: Network Timing Smackdown!
        "},{"location":"ntp/#ntp-daemon-in-systemd","title":"ntp daemon in systemd","text":"

        Systemd has its own time and date tools that replace classic linux tools like ntpd. See systemd-timesyncd and timedatectl. systemd-timesyncd refuses to start if the ntp package is installed.

        "},{"location":"ntp/#see-also","title":"See Also","text":"
        • time - Notes on time technologies
        • ptp - Precision Time Protocol
        "},{"location":"nvidia/","title":"nvidia","text":"
        • Verify linux nvidia drivers are installed: nvidia-smi
        • Verify linux nvidia drivers work within docker: docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi
        • Show nvidia container info: nvidia-container-cli -k -d /dev/tty info
        • https://github.com/Syllo/nvtop: GPU top command. Works with more than just Nvidia GPUs.
        "},{"location":"onboarding/","title":"Onboarding","text":"

        Notes about what to do when joining a new team, and what to do to help people who are joining your team.

        "},{"location":"onboarding/#tips","title":"Tips","text":"
        • Try to treat every bad piece of code you find as an opportunity. You were hired to solve problems, and you are going to find problems, so think of those bad pieces of code you find as the things you were hired to find.
        • Try to be charitable when you find bad code. Sometimes people wrote something under immense pressure, under a tight deadline, with poorly defined details, and sometimes they did the best they could to solve the problems that may have dealt with technology they were unfamiliar with.
        • Start taking your own notes. It's much easier to quickly add or modify notes you own and don't have to consult other people about. You can always copy parts of those notes out into shared resources if they are valuable enough for that.
        "},{"location":"onboarding/#links","title":"Links","text":"
        • https://www.simplermachines.com/why-you-need-a-wtf-notebook
        "},{"location":"openvpn/","title":"openvpn","text":"

        \"Your private path to access network resources and services securely\" - https://openvpn.net/

        "},{"location":"openvpn/#tips","title":"Tips","text":""},{"location":"openvpn/#check-status-of-logged-in-clients","title":"Check status of logged in clients","text":"

        kill -USR2 $OPENVPN_PID is the magic that causes the server to output its current client status. This may be logged to a different logfile, so look around if you don't see status.

        killall -USR2 /usr/sbin/openvpn ; tail /var/log/syslog ;\n

        There also may be a file called /etc/openvpn/openvpn-status.log with current status. Don't ask me why a status log file is located in /etc/... \ud83d\ude44

        "},{"location":"orbstack/","title":"orbstack","text":"

        \"OrbStack is a fast, light, and simple way to run containers and Linux machines on macOS. It's a supercharged alternative to Docker Desktop and WSL, all in one easy-to-use app.\" - https://docs.orbstack.dev

        Orbstack is like a modern alternative to virtualbox and docker in one app. It only supports linux distros as of 2024-02-13 though.

        "},{"location":"osquery/","title":"osquery","text":"

        \"SQL powered operating system instrumentation, monitoring, and analytics.\"

        oquery runs locally and allows you to inspect your host using sql queries. Tables exist for a variety of useful data, such as file hashes, process list, last user login, etc..

        "},{"location":"osquery/#links","title":"Links","text":"
        • https://github.com/facebook/osquery
        • https://osquery.readthedocs.io
        • https://osquery.io/schema/
        • https://github.com/UtahDave/salt-vagrant-demo
        • https://www.digitalocean.com/community/tutorials/how-to-monitor-your-system-security-with-osquery-on-ubuntu-16-04
        • https://kolide.com/fleet - osquery fleet/cluster system
        "},{"location":"outline/","title":"outline","text":"

        \"Journalists need safe access to information to research issues, communicate with sources, and report the news. Outline lets news organizations easily provide their network safer access to the open internet.\" - https://getoutline.org/

        Outline is a self hosted VPN that is geared around easy setup.

        • Source code available at https://github.com/jigsaw-code
        "},{"location":"pac/","title":"pac","text":"

        Information about proxy auto-config files.

        "},{"location":"pac/#example-pac-file","title":"Example pac file","text":"

        The following pac file will

        • Redirect all traffic destined to 192.168.1.0/24 to a proxy running on localhost:47000, but only if we do not have an ip address in that subnet
        • Redirect all traffic destined to 172.16.0.0/16 to a proxy running on localhost:33001
        • All other traffic bypasses the proxy.
        function FindProxyForURL(url, host) {\n  if ((isInNet(host, \"192.168.1.0\", \"255.255.255.0\"))\n  && (! isInNet(myIpAddress(), \"192.168.1.0\", \"255.255.255.0\"))) {\n    return \"SOCKS5 localhost:47000\" ;\n  } else if (isInNet(host, \"172.16.0.0\", \"255.255.0.0\")) {\n    return \"SOCKS5 localhost:33001\" ;\n  } else {\n    return \"DIRECT\" ;\n  }\n}\n
        "},{"location":"pac/#links","title":"Links","text":"
        • http://findproxyforurl.com/official-toolset
        • https://github.com/pacparser/pacparser
        "},{"location":"pandoc/","title":"pandoc","text":"

        Convert between document formats.

        http://pandoc.org/

        "},{"location":"pandoc/#examples","title":"Examples","text":""},{"location":"pandoc/#convert-a-doc-from-mediawiki-to-markdown","title":"Convert a doc from mediawiki to markdown","text":"
        pandoc -f mediawiki -t markdown nfs.mediawiki > nfs.md\n
        "},{"location":"panopticlick/","title":"panopticlick","text":"

        \"Panopticlick will analyze how well your browser and add-ons protect you against online tracking techniques. We\u2019ll also see if your system is uniquely configured\u2014and thus identifiable\u2014even if you are using privacy-protective software.\" - https://panopticlick.eff.org/

        "},{"location":"passwords/","title":"passwords","text":"

        \"A secret word or phrase that must be used to gain admission to something.\" - https://en.oxforddictionaries.com/definition/password

        "},{"location":"passwords/#generation","title":"Generation","text":""},{"location":"passwords/#pwgen","title":"pwgen","text":"
        $ pwgen 12 3\nahZielooC4ei Ielui3ahh9su aiZoa7fioy1o\n
        "},{"location":"passwords/#apg","title":"apg","text":"

        This tool lets you show how to pronounce the random password

        $ apg -a 1 -m 6 -n 3 -l\nI[hM@}]t: India-LEFT_BRACKET-hotel-Mike-AT_SIGN-RIGHT_BRACE-RIGHT_BRACKET-tango-COLON\nWoqrJ}R+ps Whiskey-oscar-quebec-romeo-Juliett-RIGHT_BRACE-Romeo-PLUS_SIGN-papa-sierra\nzni6VC3 zulu-november-india-SIX-Victor-Charlie-THREE\n
        "},{"location":"passwords/#links","title":"Links","text":"
        • https://nakedsecurity.sophos.com/2016/08/18/nists-new-password-rules-what-you-need-to-know/
        • https://pages.nist.gov/800-63-3/sp800-63-3.html
        "},{"location":"perl/","title":"perl","text":"

        Practical Extraction and Reporting Language

        "},{"location":"perl/#special-variables","title":"Special Variables","text":"
        • \"That thing\": $_
        • Record Separator: $/
        "},{"location":"perl/#techniques","title":"Techniques","text":""},{"location":"perl/#assign-an-array-to-some-matches","title":"Assign an array to some matches","text":"
        @array_of_matches = ($source_string =~ m/..pattern../g);\n
        "},{"location":"perl/#assign-several-variables-to-some-matches","title":"Assign several variables to some matches","text":"
        my ($num, $a, $t) = ($_ =~ m/([0-9]*)\\. (.*) - (.*)\\.mp3/) ;\n
        "},{"location":"perl/#iterate-a-hash","title":"Iterate a hash","text":"
        while(($key, $value) = each(%$_)){\n    print \"$value is $key\\n\" ;\n}\n
        "},{"location":"perl/#print-out-a-file-with-line-numbers","title":"Print out a file with line numbers","text":"
        cat ~/.bash_history | perl -nle 'print \"$.\\t$_\";'\n

        This should probably be done with nl -ba .bash_history instead.

        "},{"location":"perl/#edit-a-file-in-place","title":"Edit a file in-place","text":"

        To change all instances of \"foo\" to \"bar\":

        perl -i -pe 's/foo/bar/g' filename.txt\n
        "},{"location":"perl/#remove-blank-lines-from-a-file","title":"Remove blank lines from a file","text":"
        perl -pi -e \"s/^\\n//\" file.txt\n
        "},{"location":"perl/#remove-lines-from-a-file-that-match-a-certain-regex","title":"Remove lines from a file that match a certain regex","text":"
        perl -i -pe 'if ($_ =~ m/string to remove/ ){$_ = \"\";}' filename.txt\n
        "},{"location":"perl/#sort-a-line-by-spaces","title":"Sort a line by spaces","text":"

        See bash for a bash-only way

        echo -n \"whiskey tango foxtrot \" \\\n| perl -e '\n  $/=\" \" ;\n  @foo = <STDIN> ;\n  print (sort(@foo)) ;\n  print \"\\n\" ;\n'\n
        "},{"location":"perl/#sort-records-in-a-file-that-are-separated-by-a-blank-line","title":"Sort records in a file that are separated by a blank line","text":"
        ##!/usr/bin/perl\n$/ = \"\\n\\n\" ;\nmy @input = (<STDIN>) ;\n\nmy @sorted = sort { lc($a) cmp lc($b) } @input ;\n\nforeach (@sorted) {\n  if (length($_) > 10) { print \"$_\"; }\n}\n
        "},{"location":"perl/#subtract-two-from-the-last-octet-of-a-mac-address","title":"Subtract two from the last octet of a MAC address","text":"
        for X in 24:b6:fd:ff:b7:f{{a..f},{0..9}} ; do\n  echo -n \"${X} - 2 = \" ;\n  echo ${X} \\\n  | perl -ne '\n    @foo = split(\":\",$_) ;\n    $foo[5] = sprintf(\"%02x\", (hex($foo[5]) - 2)) ;\n    $new = join(\":\",@foo) ;\n    print \"$new\\n\" ;\n  ' ;\ndone ;\n
        "},{"location":"perl/#add-one-to-the-last-octet-of-a-mac-address","title":"Add one to the last octet of a MAC address","text":"
        for X in 24:b6:fd:ff:b7:c{{a..f},{0..9}} ; do\n  echo ${X} \\\n  | perl -ne '\n    @foo = split(\":\",$_) ;\n    $foo[5] = sprintf(\"%02x\", (hex($foo[5]) + 1)) ;\n    $new = join(\":\",@foo) ;\n    print \"$new\\n\";\n  ' ;\ndone ;\n
        "},{"location":"pgp/","title":"pgp","text":"

        \"Pretty Good Privacy (PGP) is an encryption program that provides cryptographic privacy and authentication for data communication. PGP is often used for signing, encrypting, and decrypting texts, e-mails, files, directories, and whole disk partitions and to increase the security of e-mail communications. It was created by Phil Zimmermann in 1991.\" - https://en.wikipedia.org/wiki/Pretty_Good_Privacy

        \"GNU Privacy Guard (GnuPG or GPG) is a free software replacement for Symantec's PGP cryptographic software suite. GnuPG is compliant with RFC 4880, which is the IETF standards track specification of OpenPGP. Modern versions of PGP and Veridis' Filecrypt are interoperable with GnuPG and other OpenPGP-compliant systems.\" - https://en.wikipedia.org/wiki/GNU_Privacy_Guard

        "},{"location":"pgp/#links","title":"Links","text":""},{"location":"pgp/#technology","title":"Technology","text":"
        • The GNU Privacy Guard
        • Creating a new GPG key
        • How to create a PGP/GPG-key free of SHA-1
        "},{"location":"pgp/#web-of-trust","title":"Web of Trust","text":"
        • A draft guide to organizing or participating in a PGP key signing party
        • OpenPGP key paper slip generator
        • PIUS: The PGP Individual UID Signer
        "},{"location":"pgp/#philosophy","title":"Philosophy","text":"
        • Op-ed: I'm throwing in the towel on PGP, and I work in security
        • Op-ed: Why I'm not giving up on PGP
        "},{"location":"philips-hue/","title":"Philips Hue","text":"

        \"Philips Hue is your personal wireless lighting system that lets you easily control your light and create the right ambiance for every moment.\" - https://www2.meethue.com

        "},{"location":"philips-hue/#siri-integration","title":"Siri integration","text":"

        Siri knows the names of all of the X11 colors

        "},{"location":"philips-hue/#links","title":"Links","text":"
        • Enabling the hidden Wi-Fi radio on the Philips Hue Bridge 2.0: Adventures with 802.11n, ZigBee 802.15.4 and OpenWrt
        • https://arantius.github.io/web-color-wheel: Color wheel of named colors, which can all be used to set your hue light colors using Siri.
        "},{"location":"photography/","title":"photography","text":""},{"location":"photography/#software-links","title":"Software Links","text":"
        • exiftool: File metadata swiss-army knife.
        • https://github.com/photoprism/photoprism: Self hosted photo gallery
        • https://github.com/LibrePhotos/librephotos: Self hosted photo gallery
        • https://damselfly.info: \"Damselfly is a server-based Digital Asset Management system.\"
        • https://photostructure.com: \"Your new home for all your photos & videos\"
        • Ask HN: Alternatives to Google Photos?
        "},{"location":"php/","title":"php","text":"

        The PHP scripting language.

        "},{"location":"php/#code-guidelines","title":"Code Guidelines","text":"
        • The PEAR code guidelines are pretty good - https://pear.php.net/manual/en/standards.php
        • Even better coding standards - https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-1-basic-coding-standard.md
        "},{"location":"php/#examples","title":"Examples","text":""},{"location":"php/#convert-date-formats","title":"Convert date formats","text":"

        This converts mysql time to epoch unix timestamp and back $timestamp = strtotime($mysqltime); echo date(\"Y-m-d H:i:s\", $timestamp);

        "},{"location":"php/#run-code-from-cli","title":"Run code from CLI","text":"
        php -r \"phpinfo();\"\n
        "},{"location":"php/#show-php-cli-env-vars","title":"Show php CLI env vars","text":"

        This shows the location of the ini file used for CLI. php -i

        "},{"location":"php/#enable-errors","title":"Enable Errors","text":"

        Set display_errors = On in php.ini, or in a php file add:

        error_reporting(E_ALL);\nini_set('display_errors', 1);\n
        "},{"location":"php/#disable-timeout","title":"Disable timeout","text":"
        set_time_limit(0);\nini_set ('max_execution_time', 0);\n
        "},{"location":"php/#random-numbers","title":"Random numbers","text":"
        rand() ; # random int\nrand(1,10) ; # random int between 1 and 10\nrand(100,1000)/100 ; # workaround for generating floats with 2 decimal points\n
        "},{"location":"php/#links","title":"Links","text":"
        • https://aloneonahill.com/blog/if-php-were-british/
        "},{"location":"plex/","title":"plex","text":"

        Plex is a media center system that runs on a variety of platforms including Linux, Roku, macOS, iOS, tvOS, and a variety of smart TVs.

        "},{"location":"plex/#links","title":"Links","text":"
        • https://support.plex.tv/articles/categories/your-media/
        "},{"location":"postgres/","title":"PostgreSQL","text":"

        \"PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.\" - https://www.postgresql.org

        "},{"location":"postgres/#usage","title":"Usage","text":""},{"location":"postgres/#connect-to-a-database","title":"Connect to a database","text":"
        psql \"postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT:-5432}/${POSTGRES_DB}\"\n
        "},{"location":"postgres/#meta-commands","title":"Meta Commands","text":"

        postgres shell has a lot of meta commands. See https://www.postgresql.org/docs/15/app-psql.html#APP-PSQL-META-COMMANDS for full descriptions.

        meta-command behavior \\l List databases. Filter like \\l foo* \\c Connect to database. There are various syntaxes to accomplish this. Here is one: \\c \"host=localhost port=5432 dbname=mydb connect_timeout=10 sslmode=disable\" \\d \"$table_name\" Show full definition for the given table \\d or \\d+ Show extended table description. Show all with \\d+ * \\df List functions \\di List indexes \\x Toggle expanded display. This is the same as \\G in MySQL, separating each record and showing each column as a row formatted as column_name \\| row value."},{"location":"postgres/#show-postgres-version","title":"Show postgres version","text":"
        astronomer_houston=> SELECT version();\n                                          version\n--------------------------------------------------------------------------------------------\n PostgreSQL 9.6.18 on x86_64-pc-linux-gnu, compiled by Debian clang version 10.0.1 , 64-bit\n(1 row)\n
        "},{"location":"postgres/#show-processes","title":"Show processes","text":"

        Each process is one connection to the db. (See How Connections are Establisted)

        select * from pg_stat_activity ;\n

        or

        select count(*) from pg_stat_activity where usename = 'airflow' ; -- note this is usename, not useRname\n

        or a more refined view

        select pid as process_id,\n       usename as username,\n       datname as database_name,\n       client_addr as client_address,\n       application_name,\n       backend_start,\n       state,\n       state_change\nfrom pg_stat_activity\nwhere state != 'idle' ;\n
        "},{"location":"postgres/#terminate-all-processes-connected-to-a-given-database","title":"Terminate all processes connected to a given database","text":"
        select pg_terminate_backend(pg_stat_activity.pid)\nfrom pg_stat_activity\nwhere pg_stat_activity.datname = 'some_db_name'\n  and pid <> pg_backend_pid();\n
        "},{"location":"postgres/#postgres-in-docker","title":"Postgres in Docker","text":"
        • Official images: https://hub.docker.com/_/postgres

        Some of these syntaxes apply to non-docker interactions too, so long as you remove the docker-isms from them.

        "},{"location":"postgres/#example-docker-compose-file-for-local-development","title":"Example docker-compose file for local development","text":"
        ## https://docs.docker.com/compose/compose-file/\nversion: \"3.7\"\nservices:\n  postgres:\n    # https://hub.docker.com/_/postgres\n    image: \"postgres:latest\"\n    restart: \"always\"\n    env_file: .env # Should contain POSTGRES_DB, POSTGRES_USER, POSTGRES_PASSWORD\n    # Uncomment the 'command' line to enable postgres query logging to the terminal\n    # https://www.postgresql.org/docs/15/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHEN\n    # command: [ \"postgres\", \"-c\", \"log_destination=stderr\", \"-c\", \"log_min_messages=debug\", \"-c\", \"log_min_error_statement=debug\" ]\n\n    expose:\n      - \"5432\"\n    ports:\n      - \"5432:5432\"\n
        "},{"location":"postgres/#dump-a-database","title":"Dump a database","text":"

        https://www.postgresql.org/docs/15/app-pgdump.html

        docker exec \"${POSTGRES_CONTAINER}\" pg_dump -U \"${POSTGRES_USER}\" \"${POSTGRES_DB}\"\n

        Full backups should be performed with pg_dumpall.

        "},{"location":"postgres/#load-local-data-into-a-db","title":"Load local data into a db","text":"

        https://www.postgresql.org/docs/15/app-psql.html

        cat foo.sql |\ndocker exec -i \"${POSTGRES_CONTAINER}\" \\\n  psql -U \"${POSTGRES_USER}\" \"${POSTGRES_DB}\"\n
        "},{"location":"postgres/#example-queries","title":"Example queries","text":""},{"location":"postgres/#show-sizes-for-all-dbs-in-a-given-server","title":"Show sizes for all DBs in a given server","text":"
        pokemon_collector=> SELECT\n    pg_database.datname,\n    pg_database_size(pg_database.datname) AS size,\n    pg_size_pretty(pg_database_size(pg_database.datname))\n    FROM pg_database\n    ORDER BY size ;\n     datname    |     size      | pg_size_pretty\n----------------+---------------+----------------\n postgres       |       7436824 | 7263 kB\n cloudsqladmin  |       7641624 | 7463 kB\n pokemon        |       8231448 | 8039 kB\n smeargle_4080  |      10230296 | 9991 kB\n ampharos_7008  |      10877464 | 10 MB\n litten_7803    |      12655128 | 12 MB\n silvally_9992  |   64589765144 | 60 GB\n cinderace_8705 |   86162946584 | 80 GB\n emolga_1932    |   92073171480 | 86 GB\n aegislash_4257 | 1265669151256 | 1179 GB\n(10 rows)\n
        "},{"location":"postgres/#show-all-of-the-table-sizes-for-a-given-schema","title":"Show all of the table sizes for a given schema","text":"
        sunny_moon_4257_airflow=> SELECT table_name, pg_total_relation_size(table_name), pg_size_pretty(pg_total_relation_size(table_name))\nFROM sunny_moon_4257_airflow.information_schema.tables\nwhere table_schema = 'airflow'\nORDER BY pg_total_relation_size(table_name) desc ;\n          table_name           | pg_total_relation_size | pg_size_pretty\n-------------------------------+------------------------+----------------\n xcom                          |          1269437857792 | 1182 GB\n job                           |               77586432 | 74 MB\n dag_run                       |               60440576 | 58 MB\n log                           |               58630144 | 56 MB\n task_instance                 |               31784960 | 30 MB\n serialized_dag                |                 851968 | 832 kB\n rendered_task_instance_fields |                 843776 | 824 kB\n task_fail                     |                 638976 | 624 kB\n import_error                  |                 393216 | 384 kB\n dag                           |                 122880 | 120 kB\n dag_code                      |                 122880 | 120 kB\n ab_user                       |                  98304 | 96 kB\n ab_permission_view_role       |                  90112 | 88 kB\n astro_available_version       |                  90112 | 88 kB\n slot_pool                     |                  81920 | 80 kB\n ab_user_role                  |                  73728 | 72 kB\n ab_view_menu                  |                  73728 | 72 kB\n ab_permission                 |                  73728 | 72 kB\n ab_role                       |                  73728 | 72 kB\n ab_permission_view            |                  73728 | 72 kB\n astro_version_check           |                  65536 | 64 kB\n sensor_instance               |                  57344 | 56 kB\n alembic_version               |                  57344 | 56 kB\n connection                    |                  24576 | 24 kB\n task_reschedule               |                  24576 | 24 kB\n ab_register_user              |                  24576 | 24 kB\n sla_miss                      |                  24576 | 24 kB\n variable                      |                  24576 | 24 kB\n dag_pickle                    |                  16384 | 16 kB\n known_event                   |                  16384 | 16 kB\n dag_tag                       |                   8192 | 8192 bytes\n known_event_type              |                   8192 | 8192 bytes\n(32 rows)\n
        "},{"location":"postgres/#show-the-rows-with-the-largest-values","title":"Show the rows with the largest values","text":"
        sunny_moon_4257_airflow=> select timestamp, pg_column_size(value) as size from xcom order by size desc limit 20 ;\n           timestamp           |   size\n-------------------------------+-----------\n 2021-09-25 14:23:40.0142+00   | 188149150\n 2021-09-24 14:24:39.699152+00 | 171979158\n 2021-09-23 14:24:08.201579+00 | 158880026\n 2021-09-22 14:24:03.309817+00 | 144807562\n 2021-09-21 14:24:25.052796+00 | 129923562\n(5 rows)\n
        "},{"location":"postgres/#see-also","title":"See Also","text":"
        • pgcli: \"Pgcli is a command line interface for Postgres with auto-completion and syntax highlighting.\" https://github.com/dbcli/pgcli
        • http://www.pgadmin.org - Graphical UI for postgres
        • MySQL: Another relational database
        • SQLite: File based local database that does not require a server.
        • https://hakibenita.com/postgresql-unknown-features: \"Lesser Known PostgreSQL Features. Features you already have but may not know about!\"
        • https://github.com/citusdata/citus: Horizontal scaling extension
        • https://cloud.google.com/alloydb: GCP managed postgres that has advanced clustered scaling features
        • https://www.postgresql.org/docs/current/postgres-fdw.html: module that provides sharding across multiple postgres servers
        • https://www.cybertec-postgresql.com/en/btree-vs-brin-2-options-for-indexing-in-postgresql-data-warehouses
        • https://www.amazingcto.com/postgres-for-everything
        • https://postgrest.org/en/stable: REST API directly from postgres
        • https://challahscript.com/what_i_wish_someone_told_me_about_postgres
        "},{"location":"powershell/","title":"powershell","text":"

        PowerShell is a shell for Windows operating systems, and it was ported to Linux in 2016.

        https://github.com/PowerShell/PowerShell/

        "},{"location":"powershell/#profileps1","title":"Profile.ps1","text":"

        On startup, powershell will run any .ps1 files it finds in the WindowsPowerShell directory under my documents. There is allegedly a Profile.ps1 file in there by default.

        $env:Path = \"c:\\Users\\user1\\Dropbox\\Scripts;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\System32\\Wbem;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\\"\n
        "},{"location":"powershell/#examples","title":"Examples","text":""},{"location":"powershell/#restart-a-remote-computer","title":"Restart a remote computer","text":"
        Restart-Computer remotehostname -Force\n
        "},{"location":"powershell/#find-a-command-that-matches-a-substring","title":"Find a command that matches a substring","text":"
        get-command *time*\n
        "},{"location":"powershell/#get-help-on-commands-that-match-a-substring","title":"Get help on commands that match a substring","text":"
        get-help *time*\n
        "},{"location":"powershell/#show-acls-of-the-current-dir","title":"Show ACLs of the current dir","text":"
        get-acl | format-list\n
        "},{"location":"powershell/#show-system-bios-information","title":"Show system BIOS information","text":"
        Get-WmiObject -ComputerName hostname win32_bios\n
        "},{"location":"powershell/#show-object-methods","title":"Show object methods","text":"
        $foo | Get-Member\n
        "},{"location":"powershell/#browse-the-registry","title":"Browse the registry","text":"
        Set-Location HKCU:\\Software\\Microsoft\\Windows\\\nGet-ChildItem\n
        "},{"location":"powershell/#show-top-processes","title":"Show top processes","text":"
        while (1) { ps | sort -desc cpu | select -first 30; sleep -seconds 1; cls }\n
        "},{"location":"powershell/#browse-the-cert-store","title":"Browse the Cert store","text":"
        Set-Location cert:\\CurrentUser\\\nGet-ChildItem\n
        "},{"location":"powershell/#get-a-list-of-stopped-services","title":"Get a list of stopped services","text":"
        Get-Service | Where-Object { $_.Status -eq \"Stopped\" }\n
        "},{"location":"powershell/#compare-two-objects","title":"Compare two objects","text":"

        This will only show the lines that are not common:

        Compare-Object $(Get-VIPrivilege -role admin) $(Get-VIPrivilege -role member)\n
        "},{"location":"powershell/#save-object-to-a-csv","title":"Save object to a csv","text":"
        Get-Process | Export-Csv -Encoding unicode processes.csv\n
        "},{"location":"powershell/#load-object-from-a-csv-and-parse-it","title":"Load object from a csv and parse it","text":"
        Import-Csv ./processes.csv | Where-Object { $_.Name -like \"*systemd*\" } | Select-Object -last 10 | Format-Table\n
        "},{"location":"powershell/#replacement-for-unix-tail","title":"Replacement for unix tail","text":"

        tail filename

        Get-Content [filename] | Select-Object -Last 10\n

        tail -f

        Get-Content -Path \"C:\\scripts\\test.txt\" -Wait\n
        "},{"location":"powershell/#replacement-for-unix-wc","title":"Replacement for unix wc","text":"
        Get-Content test.csv | Measure-Object -line -word -character\n
        "},{"location":"powershell/#replacement-for-unix-time","title":"Replacement for unix time","text":"
        Measure-Command { Sleep 5 }\n
        "},{"location":"powershell/#replacement-for-unix-grep-b2-a1","title":"Replacement for unix grep -B2 -A1","text":"
        Get-Content test.csv | Select-String \"searchstring\" -Context 2,1 -CaseSensitive\n
        "},{"location":"powershell/#install-powershell-in-ubuntu-1804","title":"Install PowerShell in Ubuntu 18.04","text":"
        wget -q https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb && \\\n    dpkg -i packages-microsoft-prod.deb && \\\n    apt-get update && \\\n    apt-get install -y powershell\n
        "},{"location":"powershell/#see-also","title":"See Also","text":"
        • http://poshcode.org/: Used to be a great PSH site with example code. Now it looks like it's just a chat network portal, which IMHO seems much less useful.
        • https://developer.vmware.com/web/tool/12.5.0/vmware-powercli: PowerCLI: VMware PSH CLI
        "},{"location":"powertop/","title":"powertop","text":"

        \"PowerTOP is a Linux tool to diagnose issues with power consumption and power management.\" - https://github.com/fenrus75/powertop

        "},{"location":"powertop/#examples","title":"Examples","text":""},{"location":"powertop/#generate-an-html-power-report","title":"Generate an html power report","text":"
        powertop --html=powertop.html\n
        "},{"location":"powertop/#see-also","title":"See also","text":"
        • Top variant list
        "},{"location":"procurve/","title":"procurve","text":"

        Procurve switches from HP offer cheap layer 2 and layer 3 switching.

        "},{"location":"procurve/#copy-config-files","title":"Copy config files","text":"

        scp user@switch:cfg/startup-config ./ scp user@switch:cfg/running-config ./

        "},{"location":"procurve/#firmware-update","title":"Firmware update","text":"
        • Via ssh: linuxclient$ scp /path/to/image user@switch-hostname:/os/primary
        • Via tftp: switch# copy tftp flash 172.28.115.151 flashfilename.swi primary This doesn't always work, try scp if it fails.

        Then on the switch...

        system boot flash primary

        "},{"location":"procurve/#config-examples","title":"Config Examples","text":""},{"location":"procurve/#set-an-ip-for-the-default-vlan","title":"Set an IP# for the default VLAN","text":"
        interface vlan 1 ip address 172.28.115.234 255.255.255.0\nip default-gateway 172.28.115.1\n
        "},{"location":"procurve/#set-up-additional-vlans","title":"Set up additional VLANs","text":"
        vlan 100 untagged 2\nvlan 100 ip address 172.28.100.1\nvlan 102 untagged 3\nvlan 102 ip address 172.28.102.1\n
        "},{"location":"procurve/#enable-routing-between-connected-networks","title":"Enable routing between connected networks","text":"

        ip routing

        "},{"location":"procurve/#set-up-sntp-clock","title":"Set up SNTP clock","text":"
        sntp server 172.28.111.16\ntimesync sntp\nsntp 120\nsntp unicast\n
        "},{"location":"procurve/#alter-dst-settings","title":"Alter DST settings","text":"

        time daylight-time-rule User-defined begin-date 3/8 end-date 11/1

        "},{"location":"procurve/#enable-ssh","title":"Enable SSH","text":"
        crypto key generate ssh\nip ssh\nip ssh version 2\nip ssh filetransfer\n
        "},{"location":"procurve/#disable-telnet","title":"Disable telnet","text":"

        no telnet-server

        "},{"location":"procurve/#set-up-snmp","title":"Set up snmp","text":"

        snmp-server community \"foobar\" Operator

        "},{"location":"procurve/#set-up-a-vlan-112-port-group","title":"Set up a VLAN 112 port group","text":"

        vlan 112 untagged 6-12

        "},{"location":"procurve/#set-two-groups-of-ports-as-a-trunks-eg-to-use-with-vmware-in-static-lacp","title":"Set two groups of ports as a trunks (eg: to use with VMware in static LACP)","text":"
        trunk 1-4 trk1 trunk\ntrunk 5-8 trk2 trunk\n
        "},{"location":"procurve/#set-up-vlan-multiplexing","title":"Set up VLAN multiplexing","text":"
        vlan 114 tagged 24\nvlan 115 tagged 24\nvlan 114 tagged Trk1\nvlan 115 tagged Trk1\n
        "},{"location":"procurve/#example-config","title":"Example Config","text":"
        hostname \"HP-CORE-0\"\nsnmp-server location \"Cup1-Closet1\"\nmax-vlans 64\ntime timezone -480\ntime daylight-time-rule User-defined begin-date 3/8 end-date 11/1\nconsole inactivity-timer 5\nno web-management\nweb-management ssl\nno telnet-server\ninterface 2\n   name \"Load Test Cluster\"\nexit\ninterface 5\n   name \"hq-vm-1\"\nexit\ninterface 6\n   name \"hq-vm-1\"\nexit\ninterface 8\n   name \"beast\"\nexit\ninterface 10\n   name \"Winserv\"\nexit\ninterface 12\n   name \"IT\"\nexit\ninterface 13\n   name \"Services\"\nexit\ninterface 14\n   name \"IT\"\nexit\ninterface 15\n   name \"IT\"\nexit\ninterface 16\n   name \"IT\"\nexit\ninterface 17\n   name \"beast\"\nexit\ninterface 18\n   name \"VPN\"\nexit\ninterface 19\n   name \"IT\"\nexit\ninterface 20\n   name \"IT\"\nexit\ninterface 21\n   name \"Radio Station\"\nexit\ninterface 22\n   name \"AT&T Network\"\nexit\ninterface 23\n   name \"HP-CORE trunk\"\nexit\ninterface 24\n   name \"Jun1-trunk\"\nexit\nip default-gateway 10.8.100.1\nsntp server 10.8.5.220\nip routing\ntimesync sntp\nsntp unicast\nsnmp-server community \"public\" Unrestricted\nsnmp-server host 10.8.5.189 \"public\"\nvlan 1\n   name \"DEFAULT_VLAN\"\n   untagged 4,14\n   no ip address\n   tagged 23\n   no untagged 1-3,5-13,15-22,24\n   exit\nvlan 101\n   name \"Services\"\n   untagged 3,8,10,15,19\n   ip address 10.8.1.1 255.255.255.0\n   ip helper-address 10.8.5.220\n   tagged 2,5-6,23-24\n   exit\nvlan 102\n   name \"LoadTest\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 2,5-6,15,23-24\n   exit\nvlan 103\n   name \"QATest\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23\n   exit\nvlan 104\n   name \"PS\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 105\n   name \"IT\"\n   untagged 1,5-6,9,12-13,16,20\n   ip address 10.8.5.1 255.255.255.0\n   ip helper-address 10.8.5.220\n   tagged 2,15,23-24\n   exit\nvlan 110\n   name \"Wireless\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 111\n   name \"Eng\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 113\n   name \"SW2\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 23\n   exit\nvlan 112\n   name \"SW1\"\n   untagged 21\n   ip address 10.8.12.1 255.255.255.0\n   ip helper-address 10.8.5.220\n   tagged 23\n   exit\nvlan 100\n   name \"Backbone\"\n   ip address 10.8.100.100 255.255.255.0\n   tagged 23-24\n   exit\nvlan 114\n   name \"Upstairs\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 23-24\n   exit\nvlan 106\n   name \"VPN\"\n   untagged 18\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 188\n   name \"OldNet\"\n   untagged 11,17\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 23-24\n   exit\nvlan 42\n   name \"ATT\"\n   untagged 22\n   tagged 23-24\n   exit\nvlan 107\n   name \"DMZ\"\n   untagged 7\n   ip helper-address 10.8.5.220\n   tagged 15,24\n   exit\nvlan 109\n   name \"Jail\"\n   tagged 23-24\n   exit\ndhcp-relay option 82 keep\nip route 0.0.0.0 0.0.0.0 10.8.100.1\nip route 10.8.11.0 255.255.255.0 10.8.100.101\nip route 10.8.3.0 255.255.255.0 10.8.100.101\nip route 10.172.188.0 255.255.255.0 10.8.100.1\nip route 10.8.13.0 255.255.255.0 10.8.100.101\nip route 10.8.2.0 255.255.255.0 10.8.100.1\nip route 10.8.10.0 255.255.255.0 10.8.100.1\nip route 10.8.7.0 255.255.255.0 10.8.100.1\nip route 10.8.4.0 255.255.255.0 10.8.100.1\nip route 10.8.14.0 255.255.255.0 10.8.100.102\nip route 10.8.9.0 255.255.255.0 10.8.100.1\nstack commander \"HP-CORE\"\nstack auto-grab\nstack member 1 mac-address 0016b90b4ea0\nstack member 2 mac-address 0016b968df40\nspanning-tree\nip ssh\nip ssh filetransfer\nno tftp client\nno tftp server\npassword manager\npassword operator\n
        "},{"location":"prometheus/","title":"Prometheus","text":"

        \"Prometheus, a Cloud Native Computing Foundation project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true.\" - https://github.com/prometheus/prometheus

        "},{"location":"prometheus/#tips","title":"Tips","text":""},{"location":"prometheus/#restart-prometheus-pods-in-kubernetes","title":"Restart prometheus pods in kubernetes","text":"
        kubectl get pods -l component=prometheus -o name |\nwhile read -r pod ; do\n  echo $pod\n  kubectl port-forward \"$pod\" 9090 &\n  sleep 10  # to let the port-forward establish before using it\n  curl -X POST localhost:9090/-/reload\n  kill %%\n  sleep 5  # to let the previous process exit before starting another port forward\ndone\n
        "},{"location":"prometheus/#validate-a-prometheus-config","title":"Validate a prometheus config","text":"
        promtool check config --syntax-only prometheus-config.yaml\n
        "},{"location":"prometheus/#links","title":"Links","text":"
        • https://prometheus.io/docs/introduction/overview
        • https://prometheus.io/docs/prometheus/latest/querying/basics: Good intro to promql fundamentals.
        • https://the-zen-of-prometheus.netlify.app
        • https://www.robustperception.io/cardinality-is-key
        • https://github.com/cortexproject/cortex: \"Horizontally scalable, highly available, multi-tenant, long term storage for Prometheus.\"
        • https://github.com/thanos-io/thanos: \"Thanos is a set of components that can be composed into a highly available metric system with unlimited storage capacity, which can be added seamlessly on top of existing Prometheus deployments.\"
        • https://github.com/prometheus/prometheus/blob/release-2.42/tsdb/docs/format
        • https://www.robustperception.io/using-tsdb-analyze-to-investigate-churn-and-cardinality
        • https://fiberplane.com/blog/why-are-prometheus-queries-hard
        • https://blog.cloudflare.com/how-cloudflare-runs-prometheus-at-scale
        • https://www.robustperception.io/cardinality-is-key
        "},{"location":"protobuf/","title":"protobuf","text":"

        \"Protocol buffers are a language-neutral, platform-neutral extensible mechanism for serializing structured data\" - https://developers.google.com/protocol-buffers/

        \"Google's data interchange format\" - https://github.com/google/protobuf

        "},{"location":"ps/","title":"ps","text":"

        ps shows a list of processes in a unix system.

        "},{"location":"ps/#examples","title":"Examples","text":""},{"location":"ps/#show-the-exact-command-used-to-start-all-process","title":"show the exact command used to start all process","text":"
        ps axwwo command\n
        "},{"location":"ps/#show-a-process-tree-view","title":"show a process tree view","text":"
        ps auxf\n
        "},{"location":"ps/#show-only-all-running-processes","title":"show only all running processes","text":"

        This excludes sleeping processes and threads.

        ps auxr\n
        "},{"location":"ps/#show-process-list-sorted-by-process-start-time","title":"Show process list sorted by process start time","text":"
        ps hax -o lstart,pid,args |\n  while read -r a b c d e f g ; do\n    echo \"$(date -d \"$a $b $c $d $e\" \"+%F %T%z\") $f $g\" ;\n  done |\n  sort\n
        "},{"location":"ps/#show-all-processes-not-owned-by-a-user-and-no-threads-including-cgroup-name","title":"Show all processes not owned by a user, and no threads, including cgroup name","text":"
        • -N after a condition makes hides the results of that condition
        • All linux kernel threads have parent pid 2 (kthreadd)
        FILTERED_USER=zerocool # user must exist\nps -o pid,ppid,user,comm,flags,%cpu,sz,%mem,cgname --user \"${FILTERED_USER}\" -N --ppid 2 -N\n
        "},{"location":"ps/#show-linux-kernel-namespaces-of-all-processes","title":"Show linux kernel namespaces of all processes","text":"

        You have to use sudo to see all processes in all namespaces. awk is to filter out kernel threads, which are irrelevant in this context.

        sudo ps -axe -o user,pid,ipcns,mntns,netns,pidns,userns,utsns,comm | awk '$3!=\"-\"'\n

        The output will look like:

        $ sudo ps -axe -o user,pid,ipcns,mntns,netns,pidns,userns,utsns,comm | awk '$3!=\"-\"' | grep -E \"udevd|init|MNTNS|dockerd\"\nUSER         PID      IPCNS      MNTNS      NETNS      PIDNS     USERNS      UTSNS COMMAND\nroot         477 4026531839 4026532239 4026531840 4026531836 4026531837 4026532259 systemd-udevd\nroot         748 4026531839 4026531841 4026531840 4026531836 4026531837 4026531838 dockerd\nroot       17781 4026532479 4026532477 4026531840 4026532480 4026531837 4026532478 s6-linux-init-s\n
        "},{"location":"ps_mem/","title":"ps_mem","text":"

        \"A utility to accurately report the in core memory usage for a program.\" - https://github.com/pixelb/ps_mem

        "},{"location":"ps_mem/#usage-examples","title":"Usage examples","text":""},{"location":"ps_mem/#simple-usage","title":"Simple usage","text":"
        $ sudo ps_mem\n Private  +   Shared  =  RAM used    Program\n\n144.0 KiB +  12.5 KiB = 156.5 KiB    acpid\n144.0 KiB +  31.5 KiB = 175.5 KiB    hald-addon-acpi\n160.0 KiB +  56.5 KiB = 216.5 KiB    hald-addon-input\n...snip...\n 17.9 MiB + 101.0 KiB =  18.0 MiB    mysqld [updated]\n 25.5 MiB + 516.5 KiB =  26.0 MiB    salt-minion\n 31.6 MiB + 730.0 KiB =  32.3 MiB    python (2)\n 41.0 MiB + 309.5 KiB =  41.3 MiB    ruby\n 45.5 MiB +  36.0 KiB =  45.6 MiB    init\n 48.9 MiB +   4.1 MiB =  53.0 MiB    ssh (48)\n 57.3 MiB +   2.5 MiB =  59.7 MiB    bash (114)\n115.0 MiB +  86.0 KiB = 115.1 MiB    named\n148.3 MiB + 132.5 KiB = 148.4 MiB    java\n  1.4 GiB + 449.5 KiB =   1.4 GiB    screen (15)\n---------------------------------\n                          2.0 GiB\n=================================\n
        "},{"location":"psp/","title":"psp","text":"

        Playstation Portable

        "},{"location":"psp/#links","title":"Links","text":"
        • Custom firmware and homebrew
        "},{"location":"pssh/","title":"pssh","text":"

        Parallel SSH tools for running commands on multiple system simultaneously.

        • http://www.theether.org/pssh/
        "},{"location":"pssh/#examples","title":"Examples","text":""},{"location":"pssh/#run-a-command-on-hosts-contained-in-a-file-showing-stdin-and-stdout","title":"Run a command on hosts contained in a file, showing stdin and stdout","text":"
        pssh -h hostnames.txt -i some_command some_arg\n
        "},{"location":"pssh/#run-commands-and-view-results-on-many-hosts","title":"Run commands and view results on many hosts","text":"
        o=$(date +%F-%T)\npssh -o \"$o\" -h hosts.txt uname -a\ngrep -r . $o\n
        "},{"location":"pssh/#run-two-commands-on-many-hosts-using-bash-expansion-for-host-list","title":"Run two commands on many hosts using bash expansion for host list","text":"
        o=$(date +pssh-%T)\npssh -p 50 -t 60 {-H\\ sea-z-app00{1..9},} -o $o 'whoami ; hostname ;'\ngrep -r . $o\n
        "},{"location":"pssh/#install-a-package-on-many-hosts","title":"Install a package on many hosts","text":"
        fping < hosts.txt | awk '$3 == \"alive\" {print $1}' > alive.txt\npssh \\\n  -h alive.txt \\\n  -o out_dir \\\n  -l root \\\n  yum -y localinstall ~danielh/rpms/cfengine-community-3.6.2-1.x86_64.rpm\n

        or directly from a db query and fping...

        pssh \\\n  -h <(\n    invdb -d sjc-z-01opsdbw 'select hostname from servers where colo = \"sjc\";' |\n    sort -u |\n    egrep '[0-9]+6[^0-9]' |\n    fping 2> /dev/null |\n    awk '$3 == \"alive\" {print $1}'\n  ) \\\n  -o out_dir \\\n  -l root \\\n  yum -y localinstall ~danielh/rpms/cfengine-community-3.6.2-1.x86_64.rpm\n

        or from mco...

        o=$(date +pssh-%T) ; pssh -O GlobalKnownHostsFile=/dev/null -O UserKnownHostsFile=/dev/null -O StrictHostKeyChecking=no -t300 -p10 -h <(mco find -C role::devbox) -o \"$o\" 'sudo apt-get install -y silversearcher-ag' ; grep -r . \"$o\" ;\n
        "},{"location":"ptp/","title":"ptp","text":"

        \"The Precision Time Protocol (PTP) is a protocol used to synchronize clocks throughout a computer network. On a local area network, it achieves clock accuracy in the sub-microsecond range, making it suitable for measurement and control systems.\" - https://en.wikipedia.org/wiki/Precision_Time_Protocol

        "},{"location":"ptp/#links","title":"Links","text":"
        • RFC 8173: Precision Time Protocol Version 2 (PTPv2)
        • Precision System Synchronization with the IEEE-1588 Precision Time Protocol (PTP)
        • GigE Vision
        • NTP vs PTP: Network Timing Smackdown!
        "},{"location":"ptp/#see-also","title":"See Also","text":"
        • time - Notes on time technologies
        • ptp - Precision Time Protocol
        "},{"location":"puppet/","title":"Puppet","text":"

        \"Puppet is an open-source configuration management tool. It runs on many Unix-like systems as well as on Microsoft Windows, and includes its own declarative language to describe system configuration.\" - https://en.wikipedia.org/wiki/Puppet_(software)

        "},{"location":"puppet/#videos-and-links","title":"Videos and links","text":"
        • Overview of Puppet's architecture
        • Puppet Documentation Index
        • Introduction to Puppet
        • Function Reference
        • stdlib is another good function reference.
        • Language: Basics
        • Include-like vs. resource-like class instantiation
        • Style Guide
        • Vagrant Docs - Puppet Apply Provisioner
        • Downloads
        • PuppetConf 2015
        • Designing Puppet: Roles/Profiles Pattern - based on the blog post Designing Puppet - Roles and Profiles
        • Building a Functional Puppet Workflow Part 2: Roles and Profiles
        • Configuration Management as Legos
        "},{"location":"puppet/#examples","title":"Examples","text":""},{"location":"puppet/#standalone-mode","title":"Standalone mode","text":"
        • puppet apply /path/to/manifests works, or you can specify a .pp file
        "},{"location":"puppet/#show-variables-about-the-host-that-puppet-knows-facts","title":"Show variables about the host that puppet knows (facts)","text":"
        facter\n
        "},{"location":"puppet/#show-how-puppet-interacts-with-a-resource","title":"Show how puppet interacts with a resource","text":"
        puppet describe cron\n
        "},{"location":"puppet/#show-available-puppet-types","title":"Show available puppet types","text":"
        puppet resource --types\n
        "},{"location":"puppet/#show-the-puppet-code-that-will-create-a-resource","title":"Show the puppet code that will create a resource","text":"
        $ puppet resource file /etc/hosts\nfile { '/etc/hosts':\n  ensure  => 'file',\n  content => '{md5}9ffbd726fd5b15de760cc0150d607628',\n  ctime   => 'Wed Apr 01 17:05:59 -0700 2015',\n  group   => '0',\n  mode    => '644',\n  mtime   => 'Wed Apr 01 17:05:59 -0700 2015',\n  owner   => '0',\n  type    => 'file',\n}\n
        "},{"location":"puppet/#tests","title":"Tests","text":"
        • https://rspec-puppet.com/documentation/
        "},{"location":"puppet/#marionette-collective","title":"Marionette Collective","text":"

        \"The Marionette Collective, also known as MCollective, is a framework for building server orchestration or parallel job-execution systems. Most users programmatically execute administrative tasks on clusters of servers.\" - http://docs.puppetlabs.com/mcollective/

        • Overview of MCollective Components and Configuration
        • Invoking MCollective actions
        • Cheatsheet: https://coderwall.com/p/ig9mxa/mcollective-mco-cheat-sheet
        • Vagrant demo: https://github.com/ripienaar/mcollective-vagrant
        "},{"location":"puppet/#mco","title":"mco","text":""},{"location":"puppet/#show-some-puppet-cluster-stats","title":"Show some puppet cluster stats","text":"
        mco puppet summary\nmco puppet count\nmco puppet status\n
        "},{"location":"puppet/#find-a-random-node-in-the-cluster","title":"Find a random node in the cluster","text":"
        mco find -1\n
        "},{"location":"puppet/#ping-all-nodes-in-the-puppet-cluster","title":"Ping all nodes in the puppet cluster","text":"
        mco ping\n
        "},{"location":"puppet/#show-if-a-file-exists-on-each-host-in-the-cluster","title":"Show if a file exists on each host in the cluster","text":"
        mco filemgr -f /srv/nginx status\n
        "},{"location":"puppet/#use-fstat-and-md5-to-detect-files-needing-repair","title":"Use fstat and md5 to detect files needing repair","text":"
        mco find -S \"fstat('/srv/somedir/somefile').md5=/af6db18c6dfa81c294895003e13a2eef/\" > files_needing_attention.txt\npssh -h files_needing_attention.txt) 'do_something_to_the_file'\n
        "},{"location":"puppet/#use-fstat-to-find-hosts-where-a-directory-has-not-been-modified-recently","title":"Use fstat to find hosts where a directory has not been modified recently","text":"
        mco find -S \"fstat('/srv').mtime_seconds<$(date +%s -d '-8 hours')\"\n
        "},{"location":"puppet/#show-stats-about-which-oses-you-have","title":"Show stats about which OSes you have","text":"
        mco facts lsbdistdescription\n
        "},{"location":"puppet/#show-all-ip-addresses-on-all-hosts-where-a-configured-ip-address-matches-a-regex","title":"Show all ip addresses on all hosts where a configured IP address matches a regex","text":"
        mco facts all_ipaddresses -F 'all_ipaddresses=~10\\.(56|29)\\.'\n
        "},{"location":"puppet/#show-a-report-about-uptimes-over-a-year","title":"Show a report about uptimes over a year","text":"
        mco facts uptime -F 'uptime_days>365' |\nawk '$2 == \"days\" {print}' |\nsort -n -k1 |\ncolumn -t\n
        "},{"location":"puppet/#find-machines-where-a-fact-is-true","title":"Find machines where a fact is true","text":"
        mco find is_ec2\n

        Which is the same as

        mco find -W is_ec2=true\n
        "},{"location":"puppet/#find-machines-that-have-a-certain-fact-value","title":"Find machines that have a certain fact value","text":"
        mco find --with-fact lsbdistcodename=lucid\n
        "},{"location":"puppet/#show-a-fact-on-machines-that-have-a-specific-fact-value","title":"Show a fact on machines that have a specific fact value","text":"
        mco facts role --with-fact lsbdistcodename=lucid -v\n
        "},{"location":"puppet/#find-ec2-hosts-with-low-uptime","title":"Find ec2 hosts with low uptime","text":"
        mco find -W 'is_ec2=true uptime_seconds<7200'\n
        "},{"location":"puppet/#show-detailed-info-about-a-node","title":"Show detailed info about a node","text":"
        mco inventory fqdn.example.com\n
        "},{"location":"puppet/#find-nodes-that-match-a-config-management-class","title":"Find nodes that match a config management class","text":"
        mco find -C role::awsadmin\n
        "},{"location":"puppet/#show-the-classes-for-a-given-host","title":"Show the classes for a given host","text":"
        sort /var/lib/puppet/state/classes.txt\n
        "},{"location":"puppet/#kick-off-a-puppet-run-on-all-hosts-of-a-certain-class","title":"Kick off a puppet run on all hosts of a certain class","text":"

        The following two syntaxes are essentially the same, using the same puppet agent of mco. The only differences are the use of runall vs runonce, and the method that performs parallel execution. I'm not sure what difference there is in the code path.

        mco rpc    -C \"class_boolean\" -F \"fact_name=fact_value\" --batch 10 --agent puppet --action runonce\nmco puppet -C \"class_boolean\" -F \"fact_name=fact_value\" runall 10\n
        "},{"location":"puppet/#show-the-status-and-puppet-policy-about-a-package-on-all-hosts","title":"Show the status and puppet policy about a package on all hosts","text":"
        mco rpc package status package=openssh-client --discovery-timeout 60 --json\n
        "},{"location":"puppet/#upgrade-an-installed-package-on-10-random-web-hosts","title":"Upgrade an installed package on 10 random web hosts","text":"

        This upgrades, but does not install if the package is not already present.

        mco package update 'nginx' -I '/web/' --limit=10\n
        "},{"location":"puppet/#show-breakdown-of-hosts-by-os-version-by-role","title":"Show breakdown of hosts by OS version by role","text":"
        mco facts -v --wc role::mon lsbdistdescription\n
        "},{"location":"puppet/#use-mco-to-find-packages-of-a-certain-version-on-a-certain-os","title":"Use mco to find packages of a certain version on a certain OS","text":"
        mco rpc package status package=apt -j -F lsbdistcodename=trusty > cache.json\njq -c '.[] | select(.data.ensure == \"1.0.1ubuntu2\") | { version: .data.ensure, hostname: .sender }' cache.json\n
        "},{"location":"puppet/#hiera","title":"Hiera","text":"

        \"Hiera is a key/value lookup tool for configuration data, built to make Puppet better and let you set node-specific data without repeating yourself.\" - http://docs.puppetlabs.com/hiera/latest/

        • https://github.com/puppetlabs/hiera
        • http://www.craigdunn.org/2011/10/puppet-configuration-variables-and-hiera/
        "},{"location":"puppet/#r10k","title":"r10k","text":"

        The suggested workflow for puppet is to use r10k on a control repo to manage the modules on your puppetmaster and the environments it provides. The general idea is that each module is represented by a puppetforge module name or a git repo listed inside of the ambiguously named Puppetfile. When r10k puppetfile install -v is run, all modules listed in this file are installed according to their definitions, and all modules that are not in this file are purged. Also, r10k will set up environments based on the git branches of the control repo. This workflow is described in detail at Managing and deploying Puppet code. It assumes you are not using a puppet apply type setup, which makes this difficult to follow for people who are playing with this at home in a non-puppetmaster scenario, such as in vagrant or on raspberry pi's.

        "},{"location":"pv/","title":"pv","text":"

        pv - monitor the progress of data through a pipe

        This can be used in place of dd or dcfldd in some cases, such as copying disks or files. It's also useful for including with nc so you can see stats about the flow of that pipe.

        "},{"location":"pv/#examples","title":"Examples","text":""},{"location":"pv/#show-the-average-growth-rate-of-logs","title":"Show the average growth rate of logs","text":"
        xtail /nail/scribe/buffer/some_service_gnerated | pv -a > /dev/null\n
        "},{"location":"pv/#write-a-disk-image-to-usb","title":"Write a disk image to usb","text":"

        This can be used in place of dd if=file of=/dev/disk

        ## As root\npv ~hoherd/Downloads/ubuntu-16.04.1-desktop-amd64.iso > /dev/rdisk4\n
        "},{"location":"pv/#show-the-number-of-k8s-pods-going-into-notready-per-second","title":"Show the number of k8s pods going into NotReady per second","text":"
        kubectl get pods -A --watch-only |\ngrep --line-buffered NotReady |\npv --line-mode --timer --rate --average-rate --wait --force >/dev/null\n
        "},{"location":"pv/#see-also","title":"See Also","text":"
        • dcfldd
        • dd
        • ddrescue
        "},{"location":"pxe/","title":"pxe","text":"

        \"In computing, the Preboot eXecution Environment, PXE (most often pronounced as pixie, often called PXE Boot/pixie boot.) specification describes a standardized client-server environment that boots a software assembly, retrieved from a network, on PXE-enabled clients. ... The concept behind the PXE originated in the early days of protocols like BOOTP/DHCP/TFTP, and as of 2015 it forms part of the Unified Extensible Firmware Interface (UEFI) standard. In modern data centers, PXE is the most frequent choice for operating system booting, installation and deployment.\" - https://en.wikipedia.org/wiki/Preboot_Execution_Environment

        "},{"location":"pxe/#links","title":"Links","text":"
        • https://netboot.xyz
        • https://ipxe.org
        "},{"location":"pytest/","title":"pytest","text":"

        \"The pytest framework makes it easy to write small tests, yet scales to support complex functional testing for applications and libraries.\" - https://docs.pytest.org

        "},{"location":"pytest/#common-args","title":"Common args","text":"

        pytest --help is a bit overwhelming, so here's a smaller reference:

        • --pdb drop to pdb when an exception is raised
        • --maxfail=N quit after this many test failures
        • --ff run previously failed tests first
        • --lf only run tests that previously failed
        • -k searchstring only run tests that have \"searchstring' in them (actually more complicated matches can be done with -k)
        • -s alias for --capture=no which basically means \"show output of print statements in tests\"
        "},{"location":"pytest/#usage-tips","title":"Usage Tips","text":""},{"location":"pytest/#debug-failing-test-with-pdb","title":"Debug failing test with pdb","text":"

        This will drop you into a pdb shell when a test failure occurs.

        pytest --pdb tests/test_*.py\n
        "},{"location":"pytest/#override-test-args","title":"Override test args","text":"
        export PYTEST_ADDOPTS='--maxfail=1 -v --pdb'\npytest app/tests/test_*.py\n
        "},{"location":"pytest/#run-only-tests-that-failed-on-the-last-run","title":"Run only tests that failed on the last run","text":"
        pytest --lf\n
        "},{"location":"pytest/#run-all-tests-but-put-the-last-failures-first","title":"Run all tests, but put the last failures first","text":"
        pytest --ff\n
        "},{"location":"pytest/#run-a-specific-test-case","title":"Run a specific test case","text":"

        You can use python expressions to match more than one test. Each given test is substring matched against available tests. The matching logic can get pretty complicated, so see the help docs.

        pytest -k 'test_this_specific_thing or that_specific_thing'\n
        "},{"location":"pytest/#passing-args-via-env-vars","title":"Passing args via ENV vars","text":"

        You can pass args via the PYTEST_ADDOPTS ENV var. This is useful for instance if you're using make to run tests, and the command line does additional things like source files, enter a venv, or whatever.

        PYTEST_ADDOPTS=\"--ff --maxfail=1\" make test\n
        "},{"location":"pytest/#show-your-fixtures","title":"Show your fixtures","text":"
        pytest --fixtures\n
        "},{"location":"pytest/#show-fixture-setup-and-teardown-during-run","title":"Show fixture setup and teardown during run","text":"
        pytest --setup-show\n
        "},{"location":"pytest/#plugins","title":"Plugins","text":"
        • pytest-profiling: \"Profiling plugin for pytest, with tabular and heat graph output.\"
        • pytest-sugar: improved display of test output
        • pytest-xdist: parallel runs of tests for speed improvements
        • pytest-testmon: \"selects tests affected by changed files and methods\"
        "},{"location":"python/","title":"python","text":"

        \"Python is a programming language that lets you work more quickly and integrate your systems more effectively.\" - https://www.python.org/

        "},{"location":"python/#tips-and-techniques","title":"Tips and techniques","text":"
        • Don't use assert statements for regular validation. assert statements can be disabled at the interpreter level, which would vastly change the flow of your code if they were used widespread.
        "},{"location":"python/#variable-names","title":"Variable names","text":"
        • _varname: Semi-private. Basically a convention that developers use to indicate that the scope of a variable is local, but this locality is not enforced by the interpreter.
        • __varname: Private variable in name, but not in logic or security. The interpreter mangles the name of the var to make it globally unique, but it is still globally accessible.
        • var_: Used to get around shadowing built-in variable names. EG: list_ won't conflict with list()
        • __magic_method__: See https://diveintopython3.net/special-method-names.html
        • _: Temp var, pretty common entity in programming. (eg: bash and perl both support this too.)
        "},{"location":"python/#virtual-environments","title":"Virtual Environments","text":"

        Virtual environments isolate your project away from the system's python interpreter and modules, so you can have full control over what code is available to your project. This makes it easy to develop, debug, and deploy to a new system. It's basically always a good idea to use a virtual environment. You will thank yourself later by learning this one up front.

        "},{"location":"python/#virtual-environments-using-venv","title":"Virtual environments using venv","text":""},{"location":"python/#creating-a-venv","title":"Creating a venv","text":"
        echo \"venv\" >> .gitignore  # optional\nvirtualenv venv\n. venv/bin/activate\npip install -r requirements.txt\n## write code, interact with it, whatever\ndeactivate\n
        "},{"location":"python/#use-venv-to-work-around-missing-pip","title":"Use venv to work around missing pip","text":"

        This is mostly useful for installing for your user, since if you can't install pip you won't be able to install into system-wide locations.

        virtualenv venv --system-site-packages && venv/bin/pip install --user \"$PACKAGENAME\" && rm -rf venv\n
        "},{"location":"python/#virtual-environments-with-poetry","title":"Virtual environments with poetry","text":"

        poetry is the new-school 2019 way of doing virtual environments. poetry stores its requirements in the new standard pyproject.toml file, and keeps the virtual environment stored outside of the current directory.

        • https://python-poetry.org/docs/
        • https://python-poetry.org/docs/pyproject/
        "},{"location":"python/#creating-a-virtual-environment-using-poetry","title":"Creating a virtual environment using poetry","text":"
        cd project_dir\npoetry init\n## walk through the dialogue\npoetry add bpython boto3\npoetry shell  # this spawns a subshell with the new python environment\n## interact with your python environment\nexit\n
        "},{"location":"python/#import-module-from-absolute-path","title":"Import module from absolute path","text":"
        sys.path.append('/Users/username/code/somedir')\nimport module # from somedir\n
        "},{"location":"python/#convert-between-character-number-and-string","title":"Convert between character number and string","text":"

        You can use these functions to convert ascii and unicode characters into their numeric representations and back. Technically, ord converts a unicode character into a unicode code point, and chr does the reverse.

        >>> ord('\ud83d\udc0d')\n128013\n>>> chr(128013)\n'\ud83d\udc0d'\n
        "},{"location":"python/#benchmarking","title":"Benchmarking","text":""},{"location":"python/#links","title":"Links","text":"
        • https://youtu.be/YY7yJHo0M5I: Talk - Anthony Shaw: Write faster Python! Common performance anti patterns
        • https://pypi.org/project/scalene
        • https://pypi.org/project/austin-python
        • https://docs.python.org/3/library/profile.html
        • https://docs.python.org/3/library/timeit.html
        "},{"location":"python/#show-stats-about-call-count-and-times","title":"Show stats about call count and times","text":"

        This example shows how to profile a pytest run, and then generate a stats.txt file showing stats sorted by total time:

        python -m cProfile -o output.prof pytest -sv tests\ncat <<EOF | python -m pstats output.prof > stats.txt\nsort time\nstats\nEOF\n

        Yes, that syntax is ugly, and yes, the stats module could use a better CLI, but it works. Creating a function to make the CLI interface better is left as an exercise for the reader.

        "},{"location":"python/#generate-a-flame-graph","title":"Generate a flame graph","text":"

        Until https://github.com/baverman/flameprof/pull/5 is merged you may have to edit the installed flameprof shell script.

        pip3 install flameprof\npython -m cProfile -o output.prof myscript.py\nflameprof output.prof > output.svg\n
        "},{"location":"python/#_1","title":"python","text":""},{"location":"python/#debugging","title":"Debugging","text":""},{"location":"python/#verbose-environment-var","title":"Verbose environment var","text":"

        https://docs.python.org/3/using/cmdline.html#envvar-PYTHONVERBOSE

        export PYTHONVERBOSE=1\n## or...\npython -v pip search beets\n
        "},{"location":"python/#follow-the-flow-of-a-python-script","title":"Follow the flow of a python script","text":"

        This is equivalent to bash -x / bash -o xtrace, but is probably even more useful because it prefixes the name of the file and the line number to what is actually being executed, which aids in debugging large projects.

        python -m trace --trace foo.py\n

        You can get the equivalent output for a single function with:

        import trace\ntracer = trace.Trace(trace=True)\ntracer.runfunc(some_func_name, 'some_func arg1', 'some_func arg2')\n
        "},{"location":"python/#enter-an-interactive-prompt-after-script-ends","title":"Enter an interactive prompt after script ends","text":"

        https://docs.python.org/3/using/cmdline.html#envvar-PYTHONINSPECT

        This works when your code causes an exception, but none of your code will actually be executed, you will simply be dropped into a shell, which is not very useful.

        export PYTHONINSPECT=1\n## or...\nsudo python -i ./ps_mem.py\n
        "},{"location":"python/#enter-a-python-terminal-arbitrarily","title":"Enter a python terminal arbitrarily","text":"

        https://docs.python.org/3/library/pdb.html

        import pdb; pdb.set_trace()\n

        In python 3.6+ you can simply insert breakpoint()

        This drops you into a pdb shell. This is not the same as a full python REPL. To get a python REPL, type interact. After you have inspected the current state, you can type continue.

        Alternatively there is the web-pdb package which allows you to debug via a web browser using web_pdb.set_trace

        "},{"location":"python/#print-variables-from-the-local-scope","title":"Print variables from the local scope","text":"
        for var in dir():\n    print(\"Debug: {0} = {1}\".format(var,eval(var)))\n
        "},{"location":"python/#inspect-things","title":"Inspect things","text":"
        >>> import inspect\n>>> inspect.getargspec(inspect.getargspec)\nArgSpec(args=['func'], varargs=None, keywords=None, defaults=None)\n
        "},{"location":"python/#create-an-http-server-using-pwd-as-document-root","title":"Create an http server using PWD as document root","text":"
        python3 -m http.server 9980\n
        "},{"location":"python/#discover-the-location-for-pip-user-installs","title":"Discover the location for pip --user installs","text":"
        echo $(python -m site --user-base)/bin\n
        "},{"location":"python/#add-pythons-pip-install-user-bin-path-to-path","title":"Add python's pip install --user bin path to PATH","text":"
        ## ~/.bash_profile\nif PYTHON3_USER_BASE=$(python3 -m site --user-base 2>/dev/null) ; then PATH=\"${PYTHON3_USER_BASE}/bin:${PATH}\" ; fi\n
        "},{"location":"python/#manage-a-requirementstxt-file-like-a-pro","title":"Manage a requirements.txt file like a pro","text":"

        Managing requirements.txt manually can lead to a variety of problems related to dependencies and package compatibility. The best way to manage the requirements.txt file is by using the pip-tools command pip-compile, which builds a requirements.in file into a requirements.txt.

        In your requirements.txt you define only your direct dependencies:

        pendulum\ntyper\n

        Then you run pip-compile --upgrade requirements.in, which would create a requirements.txt file like:

        #\n# This file is autogenerated by pip-compile with Python 3.10\n# by the following command:\n#\n#    pip-compile requirements.in\n#\nclick==8.1.3\n    # via typer\npendulum==2.1.2\n    # via -r requirements.in\npython-dateutil==2.8.2\n    # via pendulum\npytzdata==2020.1\n    # via pendulum\nsix==1.16.0\n    # via python-dateutil\ntyper==0.7.0\n    # via -r requirements.in\n

        You can also specify --generate-hashes to get a more reliable lockfile style result.

        "},{"location":"python/#show-currently-installed-versions-for-all-packages-in-requirementstxt","title":"Show currently installed versions for all packages in requirements.txt","text":"

        This can be used to update a requirements.txt file to the exact installed version.

        pip freeze | grep -f <(grep -o '^\\w\\+' requirements.txt)\n

        Or auto pin to the current major version

        pip freeze |\n  grep -f <(grep -o '^\\w\\+' requirements.txt) |\n  sed 's/==/~=/' |\n  awk -F. '{print $1 \".\" $2}'\n
        "},{"location":"python/#generate-a-tz-aware-datetime-using-only-the-standard-library","title":"Generate a TZ aware datetime using only the standard library","text":"
        from datetime import datetime, timezone\nTZ = datetime.now(timezone.utc).astimezone().tzinfo\ndatetime.now(TZ).isoformat(timespec='seconds')  # 2023-06-24T12:50:01-04:00\n
        "},{"location":"python/#common-date-operations","title":"Common date operations","text":"

        None of these examples deal with timezones.

        "},{"location":"python/#get-the-current-time-and-date","title":"Get the current time and date","text":"
        >>> from datetime import datetime\n>>> datetime.now().strftime(\"%s\")\n'1572039830'  # unix timestamp as a string\n>>> datetime.now().strftime(\"%F\")\n'2019-10-25'  # abribrarily formatted timestamp string\n>>> datetime.now()\ndatetime.datetime(2019, 10, 25, 14, 49, 49, 175165)  # as a datetime object\n
        "},{"location":"python/#convert-from-unix-timestamp","title":"Convert from unix timestamp","text":"
        >>> from datetime import datetime\n>>> datetime.utcfromtimestamp(1234567890)\ndatetime.datetime(2009, 2, 13, 23, 31, 30)  # to a datetime object\n>>> datetime.utcfromtimestamp(1234567890).strftime('%F %T')\n'2009-02-13 23:31:30'  # to a string, via datetime object formatting\n
        "},{"location":"python/#convert-from-datetime-string","title":"Convert from datetime string","text":"
        >>> from datetime import datetime\n>>> datetime.strptime('2019-05-01', \"%Y-%m-%d\")  # For some reason you can't use '%F' to describe inputs. Same with %s.\ndatetime.datetime(2019, 5, 1, 0, 0)  # to a datestamp object\n>>> datetime.strptime('2019-05-01 10:01:59', \"%Y-%m-%d %H:%M:%S\").strftime('%A %B %d, %Y')\n'Wednesday May 01, 2019'  # to a string, via datetime object\n
        "},{"location":"python/#install-a-package-from-git","title":"Install a package from git","text":"

        This is great for locally developed packages. This also works with pipx, which will install tools into their own virtualenv.

        pip install git+ssh://gitserver/path/repo.git@git-ref\n
        "},{"location":"python/#links_1","title":"Links","text":""},{"location":"python/#decorators","title":"Decorators","text":"
        • https://wiki.python.org/moin/PythonDecoratorLibrary
        • http://stackoverflow.com/questions/739654/how-can-i-make-a-chain-of-function-decorators-in-python/1594484#1594484
        • http://ains.co/blog/things-which-arent-magic-flask-part-1.html
        "},{"location":"python/#modules","title":"Modules","text":"
        • https://github.com/jonathanslenders/ptpython: improved python REPL
        • https://docs.python.org/3/library/sched.html: cross-platform cron-like scheduler
        • https://pypi.python.org/pypi/colorama: cross-platform colorized terminal output
        • https://pypi.python.org/pypi/begins/: Simplified CLI arguments
        • https://pypi.python.org/pypi/watchdog: cross-platform filesystem events API
        • https://github.com/giampaolo/psutil/: system information
        • https://github.com/timothycrosley/hug: simplified web API creation
        • http://python-future.org: \"python-future is the missing compatibility layer between Python 2 and Python 3. It allows you to use a single, clean Python 3.x-compatible codebase to support both Python 2 and Python 3 with minimal overhead.\"
        • https://pymotw.com/3/: Python Module of the Week has lots of useful module examples
        • https://docs.python.org/3/library/functools.html
        • https://docs.python.org/3/library/itertools.html
        • https://more-itertools.readthedocs.io/en/stable/
        "},{"location":"python/#various-links","title":"Various links","text":"
        • A gallery of interesting Jupyter and IPython Notebooks
        • Drag'n'drop Pivot Tables and Charts in Jupyter
        • Dive Into Python 3
        • Google's Python Class
        • Google Python Style Guide
        • Learn Python dot org
        • Python Cheatsheets
        • The Flask Mega-Tutorial
        • The Python IAQ: Infrequently Answered Questions
        • Why I use py.test and you probably should too
        • PyCon 2017 videos
        • PyCon 2018 videos
        • https://caremad.io/posts/2013/07/setup-vs-requirement/: Hard vs abstract dependencies in requirements.txt (and by extension Pipfile) vs setup.py
        • https://plot.ly/python/
        • https://realpython.com/factory-method-python/
        • https://pythontest.com
        • How to structure a Flask-RESTPlus web service for production builds
        • MIT Open Courseware: Introduction to Computer Science and Programming in Python
        • Documenting Python Code: A Complete Guide
        • Current Status of Python Packaging - April 2019: TL;DR: \"Create your development environment with Poetry, specifying the direct dependencies of your project with a strict version.\"
        • Brian Warner - Magic Wormhole - Simple Secure File Transfer - PyCon 2016
        • List and dict comprehension are taken from a mathematical notation https://en.wikipedia.org/wiki/Set-builder_notation#Parallels_in_programming_languages
        • My Python testing style guide
        • Film simulations from scratch using Python
        • timeit: stdlib library for testing many iterations of the same code. See also python3 -m timeit --help
        • https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html: Good walkthrough of colorizing terminals using ANSI escape codes.
        • https://guicommits.com/organize-python-code-like-a-pro: Other than some of the dir naming stuff, I agree with most of the content here.
        • https://packaging.python.org/en/latest/guides/tool-recommendations
        • https://towardsdatascience.com/12-python-decorators-to-take-your-code-to-the-next-level-a910a1ab3e99
        • https://nedbatchelder.com/blog/202312/realworld_matchcase.html
        • https://clickpy.clickhouse.com/dashboard: Show pypi stats in a nifty dashboard.
        "},{"location":"q/","title":"q","text":"

        N.B.: This tool isn't updated frequently. It's probably better to use something more broadly useful like dasel

        \"q - Text as Data\" - http://harelba.github.io/q/

        This tool is cool, but honestly you can do a lot more stuff and iterate quicker by importing your csv into sqlite using sqlite3 -csv new.db \".import some/file.csv destination_table_name\". q actually uses sqlite3 behind the scenes. See also https://til.simonwillison.net/sqlite/one-line-csv-operations

        "},{"location":"q/#examples","title":"Examples","text":""},{"location":"q/#format-the-pagerduty-incidentscsv-to-be-more-readable","title":"Format the Pagerduty incidents.csv to be more readable","text":"
        ## -d, = comma delimited input\n## -H  = use the headings found in the input csv files\n## -T  = tab delimited output\n## -f  = python 2 format strings to be applied to 1-indexed output fields\n$ q -d, -H -T -f '1=https://pagerduty.com/incidents/%s,2=alerted at %s,3=Description: %s' 'select id,created_on,description from incidents.csv order by created_on asc limit 5'\nhttps://pagerduty.com/incidents/P66XNLT    alerted at 2017-12-04T00:04:07-08:00    Description: proxy0302: 200 Status Code Proxy Log Watcher: Matches found in last run met or dropped below 0.0, dropping to 0.0 for 10 minutes at 12:00AM https://server.pingdom.com/a/3103869181\nhttps://pagerduty.com/incidents/PLUG344    alerted at 2017-12-04T04:14:05-08:00    Description: sandbox-apigateway00: API Gateway Error Watcher: Occurrences met or exceeded 10.00 /min, increasing to 15.82 /min for 10 minutes at 04:10AM https://server.pingdom.com/a/3104379391\nhttps://pagerduty.com/incidents/PT13M2B    alerted at 2017-12-04T06:48:14-08:00    Description: hadoop-r21: Hadoop Resource Monitor: Lostnodes met or exceeded 4.0, increasing to 4.0 at 06:47AM https://server.pingdom.com/a/3104686551\nhttps://pagerduty.com/incidents/P3RLOTT    alerted at 2017-12-04T08:56:07-08:00    Description: hadoop-c05: /srv Disk Usage: Disk Capacity met or exceeded 90%, increasing to 90% for 10 minutes at 08:50AM https://server.pingdom.com/a/3104929931\nhttps://pagerduty.com/incidents/PNOJZKC    alerted at 2017-12-04T09:02:21-08:00    Description: sjc-http2: HTTP 500 error Watcher: Occurrences met or exceeded 10.00 /min, increasing to 31.91 /min for 10 minutes at 09:00AM https://server.pingdom.com/a/3104941911\n
        "},{"location":"q/#format-pagerduty-events-as-html-for-pasting-into-confluence-for-issue-response-tracking","title":"Format pagerduty events as HTML for pasting into confluence for issue response tracking","text":"

        pagerduty-csv-download opens your browser and downloads the csv file for the last week of events. You'll have to change companyname to whatever your company URL is.

        pagerduty-csv-to-html uses q to reformat the csv into HTML lists you can paste into the source editor of your HTML friendly CMS like Confluence.

        This uses BSD relative date syntax, you'll have to change it for linux.

        pagerduty-csv-download() {\n  rm -f incidents.csv\n  TZ=America/Los_Angeles\n  past=\"$(date -v-7d \"+%FT00:00:00\")\"\n  present=\"$(date \"+%FT00:00:00\")\"\n  open \"$(date \"+https://companyname.pagerduty.com/api/v1/reports/raw/incidents.csv?since=${past}&until=${present}&time_zone=${TZ}\")\"\n}\npagerduty-csv-to-html() {\n  q \\\n    -H \\\n    -d',' \\\n    -D' ' \\\n    -f '1=<li>%s,2=<a href \\\"https://companyname.pagerduty.com/incidents/%s\\\">,3=%s</a>,4=%s<ul><li>...</li></ul></li>' \\\n    'select substr(created_on,12,5),id,id,description from incidents.csv order by created_on asc' | tail -n 50 | sed 's/href /href=/;s/> />/'\n}\n
        "},{"location":"q/#select-count-of-daily-alerts-by-date-from-pagerduty-incidentscsv","title":"Select count of daily alerts by date from PagerDuty incidents.csv","text":"
        q -H --delimiter=',' -O --output-delimiter=',' 'select substr(created_on,0,11) as date,count(substr(created_on,0,11)) as count from incidents.csv group by date'\n
        "},{"location":"raspberry-pi/","title":"raspberry-pi","text":"

        A small computer, good for running linux.

        • http://www.raspberrypi.org

        The standard OS used to be called raspbian, but it is now called Raspberry Pi OS.

        "},{"location":"raspberry-pi/#tips","title":"Tips","text":""},{"location":"raspberry-pi/#show-what-firmware-you-have-running","title":"Show what firmware you have running","text":"
        sudo /opt/vc/bin/vcgencmd version\n

        Check vcgencmd commands for more usage.

        "},{"location":"raspberry-pi/#update-raspberry-pi-os-and-firmware-versions","title":"Update Raspberry Pi OS and firmware versions","text":"
        sudo apt full-upgrade -y\n
        "},{"location":"raspberry-pi/#software","title":"Software","text":""},{"location":"raspberry-pi/#raspberry-pi-os","title":"Raspberry Pi OS","text":"
        dpkg-reconfigure locales\n
        "},{"location":"raspberry-pi/#hassio","title":"Hass.io","text":"

        \"Hass.io turns your Raspberry Pi (or another device) into the ultimate home automation hub powered by Home Assistant. With Hass.io you can focus on integrating your devices and writing automations.\" - https://home-assistant.io/hassio/

        "},{"location":"raspberry-pi/#spillpasspi","title":"SpillPassPi","text":"

        SpillPassPi is deprecated because Nintendo shut down the Streetpass Relay servers.

        "},{"location":"raspberry-pi/#v1","title":"V1","text":"

        Retired.

        'A Simple Homebrew Plug and Play 3DS HomePass Relay and Fake \"Nintendo Zone\" Hotspot' - http://www.spillmonkey.com/?page_id=5

        "},{"location":"raspberry-pi/#v2","title":"V2","text":"

        'A Simple Homebrew Plug and Play 2DS/3DS/N3DS StreetPass Relay and Fake \"Nintendo Zone\" Hotspot' - http://www.spillmonkey.com/?page_id=169

        "},{"location":"raspberry-pi/#homepass","title":"Homepass","text":"

        \"Nintendo 3DS homepass resources and software.\" - https://github.com/danielhoherd/homepass/tree/master/RaspberryPi

        "},{"location":"raspberry-pi/#links","title":"Links","text":"
        • https://www.raspberrypi.com/software/operating-systems
        • https://www.raspberrypi.com/documentation/computers/os.html Firmware and OS update instructions
        • https://www.jeffgeerling.com/blog/2018/raspberry-pi-microsd-card-performance-comparison-2018
        • https://techcrunch.com/2019/07/09/the-raspberry-pi-4-doesnt-work-with-all-usb-c-cables/
        • https://curriculum.raspberrypi.org
        • https://www.jeffgeerling.com/blog/2020/uasp-makes-raspberry-pi-4-disk-io-50-faster: Useful USB info in general, regarding UASP
        • https://ptx2.net/posts/unbricking-a-bike-with-a-raspberry-pi/
        • https://www.raspberrypi.com/news/raspberry-pi-build-hat-lego-education/: Control LEGO motors via Raspberry Pi
        "},{"location":"redis/","title":"redis","text":"

        \"Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker.\" - https://redis.io

        "},{"location":"redis/#tips-and-examples","title":"Tips and Examples","text":""},{"location":"redis/#solve-memory-allocation-problems","title":"Solve memory allocation problems","text":"

        Errors like this can cause the disk to fill up over long periods of time:

        [2535] 02 Jan 19:58:52.376 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 19:58:52.376 # Can't rewrite append only file in background: fork: Cannot allocate memory\n

        This problem can be solved without restarting anything:

        ## df -h .\nFilesystem      Size  Used Avail Use% Mounted on\n/dev/xvdf       250G  135G  116G  54% /srv\n## cat /proc/sys/vm/overcommit_memory\n0\n## echo 1 > /proc/sys/vm/overcommit_memory\n## tail  redis.log\n[2535] 02 Jan 22:03:23.707 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 22:03:23.707 # Can't rewrite append only file in background: fork: Cannot allocate memory\n[2535] 02 Jan 22:03:23.807 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 22:03:23.807 # Can't rewrite append only file in background: fork: Cannot allocate memory\n[2535] 02 Jan 22:03:23.907 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 22:03:23.926 * Background append only file rewriting started by pid 27302\n[27302] 02 Jan 22:04:05.337 * SYNC append only file rewrite performed\n[27302] 02 Jan 22:04:05.379 * AOF rewrite: 36 MB of memory used by copy-on-write\n[2535] 02 Jan 22:04:05.406 * Background AOF rewrite terminated with success\n[2535] 02 Jan 22:04:05.406 * Parent diff successfully flushed to the rewritten AOF (42 bytes)\n[2535] 02 Jan 22:04:05.406 * Background AOF rewrite finished successfully\n## df -h .\nFilesystem      Size  Used Avail Use% Mounted on\n/dev/xvdf       250G  4.5G  246G   2% /srv\n
        "},{"location":"redis/#find-what-is-using-the-most-memory","title":"Find what is using the most memory","text":"
        redis-cli --bigkeys\n
        "},{"location":"redis/#links","title":"Links","text":"
        • https://redis.io/topics/faq
        • https://redis.io/commands
        "},{"location":"retropie/","title":"retropie","text":"

        \"RetroPie allows you to turn your Raspberry Pi, ODroid C1/C2, or PC into a retro-gaming machine. It builds upon Raspbian, EmulationStation, RetroArch and many other projects to enable you to play your favourite Arcade, home-console, and classic PC games with the minimum set-up.\" - https://retropie.org.uk/

        "},{"location":"retropie/#links","title":"Links","text":""},{"location":"retropie/#-httpsgithubcomretropieretropie-docsblob3719d6docsretroarch-configurationmddefault-joypad-hotkeys-httpsretropieorgukdocsretroarch-configurationhotkeys","title":"- https://github.com/RetroPie/RetroPie-Docs/blob/3719d6/docs/RetroArch-Configuration.md#default-joypad-hotkeys / https://retropie.org.uk/docs/RetroArch-Configuration/#hotkeys","text":""},{"location":"retropie/#tips","title":"Tips","text":""},{"location":"retropie/#pair-bluetooth-controller","title":"Pair bluetooth controller","text":"

        Pairing game controllers can be difficult. One way to do be sure to get the right controller is to pair the bluetooth controller to your computer, then look for the MAC address there, then unpair it. Then go onto the terminal and run sudo ~/RetroPie-Setup/retropie_setup.sh and walk through pairing the controller there now that you know the MAC address. https://retropie.org.uk/docs/Bluetooth-Controller/

        "},{"location":"retropie/#enable-rewind","title":"Enable rewind","text":"

        I don't know why rewind isn't enabled by default, since it's one of the best features.

        1. Start a game
        2. Enter the quick menu (Share + Triangle on PS4 controller)
        3. Go up one menu (X on PS4 controller)
        4. Navigate to Settings -> Input -> Hotkeys, then configure Rewind and Fast Forward to whatever you want.
        5. Go back up to the top menu, then go to Settings -> Frame Throttle -> Rewind, and toggle Rewind Support on.
        6. Go back up to the top menu, then go to Configuration File -> Save Current Configuration
        "},{"location":"rhel/","title":"rhel","text":"

        \"Red Hat, Inc. is an American multinational software company providing open-source software products to the enterprise community.\" - https://en.wikipedia.org/wiki/Red_Hat

        "},{"location":"rhel/#see-also","title":"See Also","text":"
        • CentOS is now owned by RedHat
        • Inline with Upstream Stable Community Project
        "},{"location":"robotics/","title":"robotics","text":""},{"location":"robotics/#links","title":"Links","text":"
        • http://botbench.com
        • http://www.andymark.com
        • http://www.vexrobotics.com
        • https://www.adafruit.com
        • https://www.reddit.com/r/robotics/wiki/index
        • https://www.sparkfun.com/
        "},{"location":"robotics/#see-also","title":"See also","text":"
        • Lego Robotics
        • Wonder Workshop
        "},{"location":"roku/","title":"Roku","text":""},{"location":"roku/#links","title":"Links","text":"
        • https://sdkdocs.roku.com/display/sdkdoc/BrightScript+Language+Reference
        "},{"location":"rook/","title":"rook","text":"

        \"File, Block, and Object Storage Services for your Cloud-Native Environments\" - https://rook.io/

        Rook is based on ceph.

        "},{"location":"ros/","title":"ros","text":"

        \"The Robot Operating System (ROS) is a set of software libraries and tools that help you build robot applications. From drivers to state-of-the-art algorithms, and with powerful developer tools, ROS has what you need for your next robotics project. And it's all open source.\": http://www.ros.org

        "},{"location":"ros/#links","title":"Links","text":"
        • https://wiki.ros.org/Distributions: ROS Distributions
        • http://wiki.ros.org/ROS/Tutorials: ROS tutorials
        • http://wiki.ros.org/turtlesim/Tutorials: ROS Turtle Example
        • https://github.com/CPFL/Autoware: Autoware built on top of ROS for Self-driving cars
        • http://wiki.ros.org/Robots/EV3: Robot Operating System for ev3
        • https://www.cse.sc.edu/~jokane/agitr/: A Gentle Introduction to ROS
        • https://github.com/svautosarusersgroup/meetupslides/: ROS2 / Apex.AI slides
        "},{"location":"rpm/","title":"rpm","text":"

        Redhat Package Manager. \"rpm is a powerful Package Manager, which can be used to build, install, query, verify, update, and erase individual software packages.\" - man rpm

        "},{"location":"rpm/#tricks","title":"Tricks","text":""},{"location":"rpm/#show-installed-keys","title":"Show installed keys","text":"
        rpm -qa gpg-pubkey\n
        "},{"location":"rpm/#show-extended-info-about-all-keys","title":"Show extended info about all keys","text":"
        rpm -qa gpg-pubkey | xargs -n1 -P1 rpm -qi\n
        "},{"location":"rpm/#show-information-about-an-rpm-file","title":"Show information about an rpm file","text":"
        rpm -qpi\n
        "},{"location":"rpm/#show-all-installed-packages-and-when-they-were-installed","title":"Show all installed packages and when they were installed","text":"
        rpm -qa --last\n
        "},{"location":"rpm/#show-information-about-the-installed-wget-package","title":"Show information about the installed wget package","text":"
        rpm -qi wget\n
        "},{"location":"rpm/#output-formatted-information-about-packages","title":"Output formatted information about packages","text":"
        rpm -qa --queryformat \"%{NAME} %{PACKAGER} %{URL}\\n\" tomcat7\n

        More info on queryformat: http://www.rpm.org/max-rpm/ch-queryformat-tags.html

        "},{"location":"rpm/#show-which-package-installed-a-file","title":"Show which package installed a file","text":"
        rpm -qf /usr/bin/wget\n
        "},{"location":"rpm/#show-all-files-that-were-installed-by-package-wget","title":"Show all files that were installed by package wget","text":"
        rpm -ql wget\n
        "},{"location":"rpm/#show-all-files-in-a-package-that-is-not-yet-installed","title":"Show all files in a package that is not yet installed","text":"
        rpm -qpl ~/downloads/wget-1.10.2-78.i586.rpm\n
        "},{"location":"rpm/#show-which-documentation-files-get-installed-with-a-package","title":"Show which documentation files get installed with a package","text":"
        rpm -qd wget\n
        "},{"location":"rpm/#show-what-has-changed-on-the-system-since-installing-a-package","title":"Show what has changed on the system since installing a package","text":"

        This will verify file integrity and show you what has changed for each file.

        rpm -V openssl\n
        "},{"location":"rpm/#show-installation-and-uninstallation-scripts","title":"Show installation and uninstallation scripts","text":"
        rpm -qp --scripts foo.rpm\n
        "},{"location":"rpm/#check-the-integrity-of-an-rpm","title":"Check the integrity of an RPM","text":"
        rpm -K ~/downloads/filename.rpm\n
        "},{"location":"rpm/#show-which-packages-are-hogging-all-the-space","title":"Show which packages are hogging all the space","text":"
        rpm -qa --queryformat \"%{SIZE} %{NAME}\\n\" |sort -rn |head -n50 | column -t\n
        "},{"location":"rpm/#show-a-table-about-rpm-files-versions-and-creators-in-a-directory","title":"Show a table about RPM files versions and creators in a directory","text":"
        rpm -qp --queryformat \"%{NAME},%{VERSION},%{PACKAGER}\\n\" * | column -s, -t\n
        "},{"location":"rpm/#show-what-files-were-installed-into-varlog","title":"Show what files were installed into /var/log","text":"
        rpm -qa --filesbypkg | grep \" /var/log\" # space before /var is necessary to weed out things like /usr/var\n
        "},{"location":"rpm/#rebuild-a-corrupt-rpm-db","title":"Rebuild a corrupt rpm db","text":"
        rm -rf /var/lib/rpm/__db*\nrpm --rebuilddb\n
        "},{"location":"rpm/#see-also","title":"See Also","text":"
        • How to create RPMs - http://fedoraproject.org/wiki/How_to_create_an_RPM_package
        • yum - supplement to rpm command
        "},{"location":"rrd/","title":"rrd","text":"

        \"RRDtool is the OpenSource industry standard, high performance data logging and graphing system for time series data. RRDtool can be easily integrated in shell scripts, perl, python, ruby, lua or tcl applications.\" - https://oss.oetiker.ch/rrdtool/index.en.html

        "},{"location":"rrd/#acronyms","title":"Acronyms","text":"
        • cs = consolidation function
        • ds = data source
        • dst = data source type
        • rra = round robin archive
        "},{"location":"rrd/#examples","title":"Examples","text":""},{"location":"rrd/#reconfigure-the-x-axis-precision-of-an-rrd","title":"Reconfigure the X-axis precision of an RRD","text":"

        Assuming the first value (eg: 5856) is the value you want and 244 is the value you currently have, reconfigure data index 0,1,2:

        sudo rrdtool tune coral/pkts_in.rrd \"RRA#0:+$((5856-244))\" \"RRA#1:+$((20160-244))\" \"RRA#2:+$((52704-244))\"\n
        "},{"location":"rrd/#links","title":"Links","text":"
        • https://oss.oetiker.ch/rrdtool/tut/rrd-beginners.en.html
        "},{"location":"rst/","title":"reStructuredText","text":"

        \"reStructuredText is an easy-to-read, what-you-see-is-what-you-get plaintext markup syntax and parser system.\" - http://docutils.sourceforge.net/rst.html

        • http://rst.ninjs.org/
        "},{"location":"rsync/","title":"rsync","text":"

        Great way to sync one location to another, local or remote. Note that this does not mean full synchronization, two commands with reversed source and destinations are required to accomplish that.

        "},{"location":"rsync/#syntax-examples","title":"Syntax Examples","text":""},{"location":"rsync/#giving-additional-ssh-options","title":"Giving additional ssh options","text":"
        rsync -e 'ssh -o ConnectTimeout=10 -o PasswordAuthentication=no' -Rai /home target:/\n
        "},{"location":"rsync/#exclude-filters","title":"Exclude Filters","text":"

        Exclude filters are kinda weird.

        • They're case sensitive and there's no way to be case insensitive.
        • They are relative to the root of the source URI. EG, rsync --exclude=\"Desktop/\" ~/ remotehost:~/

        Here is an example of what to use in --exclude-from=file.txt

        **Cache\n**Caches\n**cache\n**caches\n**/.dropbox\n**Previews.lrdata\n**/Library/Application\\ Support/Google/Chrome\n
        "},{"location":"rsync/#include-filters","title":"Include Filters","text":"

        Include filters should be specified before exclude filters if you have very broad exclude filters. For instance, to only transfer mp4 files:

        rsync -ai --include='*.mp4' --exclude=\"*\" remote-server:/media/movies/ ~/Movies/\n

        If you exclude first, nothing will be transferred.

        "},{"location":"rsync/#long-and-partial-transfers","title":"Long and Partial Transfers","text":"

        If you're doing transfers which you'd like to monitor and risk being cut off, use this syntax:

        rsync -e ssh -az --partial --progress ./foo remotehost:~/bar/\n

        This will resume broken file transfers where they were left off, and give you completion statistics with transfer rate, percent complete and estimated time left.

        "},{"location":"rsync/#recursively-link-src-to-dst","title":"Recursively link src to dst","text":"

        rsync can be used to create a hard linked local copy of a whole tree. This is useful if you don't have GNU cp where the same could be done with simply cp -lrp. On OS X with homebrew, GNU cp can be installed via brew install coreutils and accessed via gcp. See also ls -la /usr/local/opt/coreutils/bin/.

        Slashes are really really important here; this won't work if you get them wrong. Absolute paths must be given, thus ${PWD} and ${HOME} vs ~

        rsync -aP --link-dest=\"${PWD}/src\" ./src/ dst #recursively hard link ./src to dst\n

        For example:

        rsync -aivv --link-dest=\"${HOME}/Dropbox\" ${HOME}/Dropbox/some_dir ${HOME}/temp/\n

        This will create the directory ${HOME}/temp/some_dir and hard link all the files from the source into the destination. It should only take a few seconds. Lines with 'hf' indicate a hard linked file. Lines with 'cd' indicate 'created directory'.

        rsync can copy not only data, but also filesystem attributes, and if these differ between the link-dest and the src, a hard link may not be created but instead a copy of the file from the local filesystem is made and correct metadata is applied from the source.

        "},{"location":"rsync/#backup-to-remote-host-with-timestamp","title":"Backup to remote host with timestamp","text":"

        The following example copies files from /local/host/src into /remote/host/path-2 but hard links those files against the data in /remote/host/path-1 on the receiving side if any files are identical. This avoids transferring data, is an efficient use of disk space for files that will be archived (IE: not changed in-place), and allows deletion of older copies of the data while keeping newer copies.

        rsync -aP --link-dest=\"/remote/host/path-1\" \"/local/host/src/\" \"$REMOTE_HOST\":/remote/host/path-2/\n

        Or for a daily datestamped backup using GNU date (this example will not work with BSD date like macOS has):

        rsync -aivv --link-dest=\"/archive/path/$(date -d \"-1 day\" \"+%F\")/\" /src/data/ \"${REMOTE_HOST}:/archive/path/$(date \"+%F\")/\"\n
        "},{"location":"rsync/#move-files-to-another-server-in-small-batches","title":"Move files to another server in small batches","text":"

        This is useful if you want to gradually clear up disk space rather than waiting until the end of a transfer of a large number of files to clear up disk space in one large operation.

        while date ;\nfiles=$(find /srv/backups/scribe/./ -type f -mtime +400 | head -n 500) ;\necho md5 of files ${#files} is $(echo ${files} | md5sum) ;\n[ ! -z \"${files}\" ] ; do\n  sudo rsync --bwlimit 20000 -RaPi --remove-source-files ${files} root@10.2.17.7:/srv/backups/scribe-sea/ ; echo sleeping ;\n  sleep 10 ;\ndone ;\n
        "},{"location":"rsync/#move-all-datestamped-files-older-than-the-beginning-of-the-previous-month-excluding-symlinks","title":"Move all datestamped files older than the beginning of the previous month, excluding symlinks","text":"

        This relies on gnu date, so use gdate if used on OS X.

        rsync -aPiv \\\n  --remove-source-files \\\n  --bwlimit 20000 \\\n  --exclude=\"**$(date -d \"1 month ago\" \"+%Y-%m\")**\" \\\n  --exclude=\"**$(date \"+%Y-%m\")**\" \\\n  --no-links \\\n  /srv/backups/scribe/* \\\n  root@10.2.17.7:/srv/backups/scribe-sea/\n
        "},{"location":"rsync/#reduce-time-precision-during-comparison","title":"Reduce time precision during comparison","text":"

        This is useful for rsyncing to FAT filesystems where time precision is 2 seconds.

        rsync --modify-window=1 # allow 1 second of difference in timestamps\n
        "},{"location":"rsync/#connect-as-a-normal-user-and-escalate-using-sudo","title":"Connect as a normal user and escalate using sudo","text":"

        Many times you have to copy files that your remote user does not have access to without sudo. You can perform this hoop-jump with ease using the following syntax:

        rsync --rsync-path='sudo rsync' $REMOTE_HOSTNAME:/etc/kubernetes/admin.conf \"$HOME/.kube/config\"\n
        "},{"location":"rsync/#use-rsync-with-find","title":"Use rsync with find","text":"

        You can use rsync --files-from=- to read files from stdin. You have to make sure the path used in find matches the source path for rsync.

        cd /tank/photos/ && find . -type f -print0 | rsync -ia0 --files-from=- . remote-host:/dest/\n
        "},{"location":"rsync/#rsync-to-fat32","title":"rsync to fat32","text":"

        fat32 has 4GiB - 1 byte (0xFFFFFFFF, or 4294967295 bytes) file size limit. It also has some other shortcomings that I have not worked through yet, so this example is far from perfect.

        rsync --max-size=4GiB-1\n
        "},{"location":"rsync/#see-also","title":"See Also","text":"
        • https://github.com/lilydjwg/pssh: parallel ssh (pssh), rsync (prsync), and scp (pscp)
        • https://github.com/rsnapshot/rsnapshot: \"a tool for backing up your data using rsync\"
        "},{"location":"rust/","title":"Rust","text":"

        \"A language empowering everyone to build reliable and efficient software.\" - https://www.rust-lang.org

        "},{"location":"rust/#links","title":"Links","text":"
        • https://www.youtube.com/playlist?list=PLJbE2Yu2zumDF6BX6_RdPisRVHgzV02NW: Intro to rust programming videos
        • https://github.com/rust-lang/rustup: the Rust toolchain installer - https://rustup.rs
        • https://github.com/rust-lang/rust-by-example
        • https://github.com/esp-rs: Rust on ESP microcontrollers
        • https://github.com/joaocarvalhoopen/How_to_learn_modern_Rust
        • https://github.com/rochacbruno/py2rs: From Python into Rust
        • https://rauljordan.com/rust-concepts-i-wish-i-learned-earlier
        • https://doc.rust-lang.org/book: \"The Rust Programming Language\"
        • https://lokathor.github.io/gba-from-scratch: \"GBA From Scratch With Rust ... a book about programming for the Game Boy Advance using the Rust programming language.\"
        "},{"location":"rust/#books","title":"Books","text":"
        • https://www.manning.com/books/rust-in-action
        • https://www.oreilly.com/library/view/command-line-rust/9781098109424
        • https://hands-on-rust.com
        "},{"location":"saltstack/","title":"saltstack","text":"

        \"Software to automate the management and configuration of any infrastructure or application at scale.\" - http://github.com/saltstack/salt

        • https://github.com/saltstack/salt
        • https://docs.saltstack.com/en/latest/topics/tutorials
        • https://docs.saltstack.com/en/latest/topics/development/index.html
        "},{"location":"saltstack/#design-characteristics","title":"Design characteristics","text":"
        • Glossary: https://docs.saltstack.com/en/latest/glossary.html
        "},{"location":"saltstack/#common-commands","title":"Common commands","text":"

        All salt* commands require root access, so use sudo or log in as root.

        • salt: Salt allows for commands to be executed across a swath of remote systems in parallel. This means that remote systems can be both controlled and queried with ease.
        • salt-call: The salt-call command is used to run module functions locally on a minion instead of executing them from the master. Salt-call is used to run a Standalone Minion, and was originally created for troubleshooting.
        • salt-cloud: Salt Cloud is the system used to provision virtual machines on various public clouds via a cleanly controlled profile and mapping system.
        • salt-cp: Salt copy copies a local file out to all of the Salt minions matched by the given target.
        • salt-key: Salt-key executes simple management of Salt server public keys used for authentication.
        • salt-minion: The Salt minion daemon, receives commands from a remote Salt master.
        • salt-run: salt-run is the frontend command for executing Salt Runners. Salt runners are simple modules used to execute convenience functions on the master.
        • salt-ssh: Salt SSH allows for salt routines to be executed using only SSH for transport.
        "},{"location":"saltstack/#state-files","title":"State files","text":"

        These are desired state files, not the view of the current state. These are where you describe how you want the system to be

        "},{"location":"saltstack/#grains","title":"Grains","text":"

        Facts about a system. Similar to facter in puppet land.

        "},{"location":"saltstack/#pillar","title":"Pillar","text":"

        Hierarchical data to be interpolated into variables in state files. Similar to hiera in puppet land.

        "},{"location":"saltstack/#examples","title":"Examples","text":""},{"location":"saltstack/#configure-output-options","title":"Configure output options","text":"

        Unfortunately this only applies to the salt command, not salt-run, salt-key, etc..

        $ cat ~/.saltrc\noutput: yaml\n
        "},{"location":"saltstack/#view-salt-versions","title":"View salt versions","text":"

        For simple salt version:

        salt --version\n

        For more specific versions:

        salt --versions\n
        "},{"location":"saltstack/#show-all-minions","title":"Show all minions","text":""},{"location":"saltstack/#show-all-responding-minions","title":"Show all responding minions","text":"
        salt-run manage.up\n
        "},{"location":"saltstack/#show-all-minions-listed-by-hostst-that-are-up-and-hosts-that-are-down","title":"Show all minions, listed by hostst that are up and hosts that are down","text":"
        salt-run manage.status\n

        Example output:

        down:\n    - hadoop4.chn1.example\nup:\n    - appserver1.chn1.example\n    - backups1.chn1.example\n
        "},{"location":"saltstack/#show-any-host-that-has-had-salt-applied-at-some-point","title":"Show any host that has had salt applied at some point","text":"

        This shows only accepted keys. Without the jq part, rejected and denied keys would also show up in this list.

        salt-key --out json | jq '.minions[]'\n
        "},{"location":"saltstack/#accept-a-key-that-has-not-yet-been-accepted","title":"Accept a key that has not yet been accepted","text":"

        After finding the hostname in the Unaccepted list returned by salt-key:

        salt-key -a hostname.example.com\n
        "},{"location":"saltstack/#show-the-version-of-an-installed-package-on-all-hosts","title":"Show the version of an installed package on all hosts","text":"
        salt '*' pkg.version bash\n
        "},{"location":"saltstack/#targeting-hosts","title":"Targeting hosts","text":"
        • https://docs.saltstack.com/en/latest/topics/targeting/#advanced-targeting-methods
        "},{"location":"saltstack/#target-using-globs","title":"Target using globs","text":"
        salt '*dev*' pkg.install pre-commit\n
        "},{"location":"saltstack/#target-using-regular-expressions","title":"Target using regular expressions","text":"
        salt -b1 -E 'miner..-aws' cmd.run 'service miner restart'\n
        "},{"location":"saltstack/#target-an-ip-subnet","title":"Target an IP subnet","text":"
        salt -t 15 -S '172.21.5.0/24' cmd.run 'dpkg -l linux-image'\n
        "},{"location":"saltstack/#target-a-specific-os","title":"Target a specific OS","text":"

        https://docs.saltstack.com/en/latest/topics/targeting/compound.html

        salt -C 'G@lsb_distrib_codename:trusty' pkg.install cmatrix\n
        "},{"location":"saltstack/#run-a-command-on-a-subset-of-hosts","title":"Run a command on a subset of hosts","text":"

        Check ntp stats on hadoop hosts.

        salt \"*hadoop*\" cmd.run \"ntpq -p\"\n

        Many more complicated examples of remote command execution: https://docs.saltstack.com/en/latest/topics/execution/remote_execution.html

        "},{"location":"saltstack/#show-ip-addresses","title":"Show IP addresses","text":"

        https://docs.saltstack.com/en/latest/ref/modules/all/salt.modules.network.html

        salt '*itni*' network.ip_addrs\n
        "},{"location":"saltstack/#show-available-grains","title":"Show available grains","text":"

        This just lists the grain keys, not the values

        salt '*minecraft*' grains.ls\n
        "},{"location":"saltstack/#show-grain-data-for-a-subset-of-hosts","title":"Show grain data for a subset of hosts","text":"

        This lists the keys and values

        salt '*dorks*' grains.items\n
        "},{"location":"saltstack/#show-one-grain-for-a-subset-of-hosts","title":"Show one grain for a subset of hosts","text":"
        salt '*elk*' grains.fetch lsb_distrib_release\n

        or...

        salt '*elk*' grains.item os\n
        "},{"location":"saltstack/#look-up-grain-data-while-logged-into-a-minion","title":"Look up grain data while logged into a minion","text":"

        While logged into a minion, you can view what pillar data would be applied:

        salt-call pillar.get users\n
        "},{"location":"saltstack/#append-a-username-to-the-accounts-grain-and-apply-the-users-saltstate","title":"Append a username to the accounts grain and apply the users saltstate","text":"
        salt '*searchstring*' grains.append accounts user-to-add\nsalt '*searchstring*' state.sls users\nsalt '*searchstring*' user.list_users --out yaml > list_users.yaml\n

        Or as a function to run locally

        add_user_via_salt_grains() {\n  new_user=$1\n  id \"${new_user}\" && return 0\n  salt-call grains.append accounts \"$new_user\" && \\\n  salt-call state.sls users\n  id \"$new_user\"\n}\n
        "},{"location":"samba/","title":"Samba","text":"

        Samba is the unix SMB daemon of choice.

        "},{"location":"samba/#commands","title":"Commands","text":""},{"location":"samba/#usrbinnet","title":"/usr/bin/net","text":"

        Samba has a generically named binary file called net, which matches the windows command. It's used to manage Samba and CIFS servers.

        "},{"location":"samba/#testparm","title":"testparm","text":"

        testparm - check an smb.conf configuration file for internal correctness. This is great for having a heavily commented main file, like smb.conf.master, then generating the bare smb.conf from that file using testparm -s smb.conf.master > smb.conf.

        "},{"location":"samba/#smbtree","title":"smbtree","text":"

        The smbtree tool will print out a tree list of all the reachable samba shares.

        "},{"location":"samba/#tips","title":"Tips","text":""},{"location":"samba/#get-info-from-winbind","title":"get info from winbind","text":"

        wbinfo

        "},{"location":"samba/#clear-name-resolution-cache","title":"Clear name resolution cache","text":"

        net cache flush

        "},{"location":"samba/#get-a-remote-rpc-shell","title":"Get a remote rpc shell","text":"

        net rpc shell -U $user_name -S $host_name

        "},{"location":"samba/#show-open-sessions-on-local-server","title":"Show open sessions on local server","text":"

        net status shares

        "},{"location":"samba/#show-open-files-on-remote-server","title":"Show open files on remote server","text":"

        net rpc file -S $server_name

        "},{"location":"samba/#mount-a-samba-share-on-a-linux-client","title":"Mount a samba share on a linux client","text":"

        mount -t smbfs -o username=$user_name //$server_name/C$ $local_share_name

        "},{"location":"samba/#mount-a-remote-share","title":"Mount a remote share","text":"

        mount_smbfs \"//domain_name;username@hostname/groups\" asdf

        "},{"location":"samba/#kill-all-samba-sessions-for-a-given-user-hence-forcing-refresh-of-their-group-memberships","title":"Kill all Samba sessions for a given user, hence forcing refresh of their group memberships","text":"

        net status sessions | grep johndoe | awk '{print $1}' | xargs sudo kill

        "},{"location":"samba/#join-domain-in-ads-security-mode","title":"Join domain in ads security mode","text":"

        net ads join -U dhoherd@DOMAIN.EXAMPLE.COM

        "},{"location":"samba/#leave-domain","title":"Leave domain:","text":"

        net ads leave -U dhoherd@DOMAIN.EXAMPLE.COM

        "},{"location":"samba/#upgrading-supermicro-firmware","title":"Upgrading Supermicro firmware","text":"

        Supermicro iKVM can only mount ISOs that are hosted on Samba shares... \ud83d\ude44 In my experience they also only support the old and vulnerable SMBv1 protocol. (wannacry used SMBv1.) In order to host ISOs for use with iKVM, it's useful to run Samba in Docker so it is isolated and only running while you are using it.

        This example uses the docker image dperson/samba to start a Samba server sharing one passwordless /public share which mounts $HOME/isos from outside the container.

        docker run \\\n  --name \"$USER-samba\" \\\n  --rm \\\n  -d \\\n  -p 139:139 \\\n  -p 445:445  \\\n  -v $HOME/isos:/public  \\\n  -d dperson/samba \\\n  -s \"public;/public\" \\\n  -g \"server min protocol = NT1\" \\\n  -g \"log level = 3\"\n
        "},{"location":"samsung/","title":"Samsung","text":""},{"location":"samsung/#android-links-relevant-to-samsung","title":"Android links relevant to Samsung","text":"
        • https://www.sammobile.com/firmwares/database/SM-T700/
        • https://www.sammobile.com/samsung/galaxy-s6/firmware/
        • https://en.wikipedia.org/wiki/Android_version_history
        • https://en.wikipedia.org/wiki/Samsung_Galaxy_S6
        • https://en.wikipedia.org/wiki/Samsung_Galaxy_Tab_S_8.4
        • https://www.kingoapp.com/help/samsung-knox-counter.htm
        • https://wiki.lineageos.org/devices/klimtwifi/install: Galaxy Tab S 8.4 WiFi
        • https://wiki.lineageos.org/devices/zerofltexx/: Galaxy S6
        • https://github.com/LineageOS/lineage_wiki/blob/57994e983af4e7/_data/devices/klimtwifi.yml
        "},{"location":"samsung/#tv-links","title":"TV links","text":"
        • UN55D6000 specs
        "},{"location":"samsung/#frame-info","title":"Frame info","text":"
        • 2022 versions of the frame removed an API that allows you to manage artwork that is available to the TV for display.
        "},{"location":"science/","title":"Science","text":""},{"location":"science/#links","title":"Links","text":"
        • https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5862244/: \"Getting rigorous with scientific rigor\"
        "},{"location":"screenshot/","title":"Screenshots","text":"

        Different methods for grabbing an graphical image of what is being displayed on a physical or virtual device.

        "},{"location":"screenshot/#linux","title":"Linux","text":""},{"location":"screenshot/#grab-all-vt-screenshots","title":"Grab all vt screenshots","text":"
        for X in {0..10} ; do\n  sudo DISPLAY=:0 fbgrab -c${X} fbgrab_vt${X}_screenshot.png ;\ndone ;\n
        "},{"location":"screenshot/#screenshot-x-using-scrot","title":"Screenshot X using scrot","text":"
        sudo DISPLAY=:0 scrot -b -d 5 'scrot_%F-%T.png'\n
        "},{"location":"screenshot/#screenshot-x-using-imagemagick","title":"Screenshot X using imagemagick","text":"
        sudo DISPLAY=:0 import -window root imagemagick_screenshot.png\n
        "},{"location":"screenshot/#macos","title":"macOS","text":"
        screencapture screenshot.png\n

        There are a lot of command line args for this tool.

        "},{"location":"sdr/","title":"sdr","text":"

        Software Defined Radio

        "},{"location":"sdr/#overview","title":"Overview","text":"

        SDR is dominated by windows software, so this is going to leave all that out and deal with Linux and Mac OS software.

        "},{"location":"sdr/#links","title":"Links","text":"
        • https://www.weather.gov/nwr: NOAA Weather Radio
        • http://gqrx.dk: GUI SDR software for macOS and Linux (including Raspberry Pi).
        • https://cubicsdr.com: GUI SDR software for macOS, Linux (including Raspberry Pi), and Windows.
        • https://www.radioreference.com
        • http://www.sigidwiki.com
        • http://www.baudline.com: \"Baudline is a time-frequency browser designed for scientific visualization of the spectral domain.\"
        • http://gnuradio.org: \"GNU Radio is a free & open-source software development toolkit that provides signal processing blocks to implement software radios.\"
        • https://github.com/miek/inspectrum: \"inspectrum is a tool for analysing captured signals, primarily from software-defined radio receivers.\"
        • https://www.sigidwiki.com/wiki/Signal_Identification_Guide
        • https://www.rail.watch/rwpi.html: \"Rail Watch Raspberry Pi Monitoring Software\"
        "},{"location":"security/","title":"Security","text":""},{"location":"security/#internet","title":"Internet","text":"
        • https://www.cisecurity.org
        • https://csrc.nist.gov/glossary: \"This Glossary includes terminology from the final version of NIST's cybersecurity and privacy publications\"
        "},{"location":"security/#physical","title":"Physical","text":"
        • https://www.youtube.com/@lockpickinglawyer
        "},{"location":"sed/","title":"sed","text":"

        sed is the stream editor.

        "},{"location":"sed/#tips","title":"Tips","text":""},{"location":"sed/#osx-pitfalls","title":"OSX Pitfalls","text":"

        Beware that BSD sed -i requires a mandatory flag for the backup file. You can use -i '' to have no backup file.

        Also, OS X sed doesn't support case insensitivity! WTF?! We have to use perl -pe 's/foo/bar/i' foo.txt or homebrew's gsed.

        "},{"location":"sed/#only-print-a-specific-line","title":"Only print a specific line","text":"

        This will print only the second line of the file

        sed -n ' 2{p;q;}' foo.txt\n
        "},{"location":"sed/#only-print-if-match","title":"Only print if match","text":"

        This will perform a replacement and print the result. Use -i (with caution!) to edit the file at the same time.

        sed -n 's/\\(127.[0-9]\\{1,3\\}.[0-9]\\{1,3\\}.[0-9]\\{1,3\\}\\)/\\1 localhost localhost4/p' /etc/hosts\n
        "},{"location":"sed/#add-a-new-line-with-content-after-a-match","title":"Add a new line with content after a match","text":"

        Since sed can't insert things like \\n, this has to take place on multiple lines, so it's a bit funky looking but still functional.

        sed -i \"\" -e '/respawn/a\\\nrespawn limit 10 5' app_worker_*.conf\n
        "},{"location":"sed/#print-file-starting-with-first-string-match","title":"Print file starting with first string match","text":"
        sed -n '/ROUTING TABLE/,$p' /etc/openvpn/openvpn-status.log\n
        "},{"location":"sed/#print-only-lines-after-a-match","title":"Print only lines after a match","text":"

        The syntax in both of these is <sed_address_one>,<sed_address_two> <action>

        $ k describe pod -n moms-iot-oven cookie-baker-2022-proxy |\nsed -ne '/^Events/,$ p;'\nEvents:\n  Type    Reason   Age                       From     Message\n  ----    ------   ----                      ----     -------\n  Normal  Pulling  56m (x1281 over 4d13h)    kubelet  Pulling image \"ubuntu:2004\"\n  Normal  BackOff  118s (x29062 over 4d13h)  kubelet  Back-off pulling image \"ubuntu:2004\"\n

        That says \"Do not print each line by default (-n). Start at the sed address which is a 'context address' regex /^Events/, and end at the special sed address $ which means the last line of input, and print those lines. (All of this info is in the man page.)

        Or if you don't want to include the match:

        sed -e '1,/^Events/ d'\n

        This says \"Start at line one and delete every line up to and including the match ^Events.\"

        "},{"location":"sed/#print-file-contents-between-two-string-matches","title":"Print file contents between two string matches","text":"

        This will print the contents of the log file between ROUTING TABLE and GLOBAL STATS inclusive.

        sed -n '/^ROUTING TABLE/,/^GLOBAL STATS/p;/^GLOBAL STATS/q' /etc/openvpn/openvpn-status.log\n

        Or as a bash function

        show-contents-between() { sed -n \"/$1/,/$2/p;/$2/q\"; }\n
        "},{"location":"sed/#uncomment-a-line-that-matches-a-regex","title":"Uncomment a line that matches a regex","text":"

        This removes the comment and adds wheel to the sudoers list

        /bin/sed -i '/^#\\s\\+%wheel\\s\\+ALL=(ALL)\\s\\+ALL$/s/^#\\s*//' /etc/sudoers\n
        "},{"location":"sed/#delete-lines-containing-a-string","title":"Delete lines containing a string","text":"
        sed -i -e '/root/d' asdf.txt\n
        "},{"location":"sed/#delete-lines-not-containing-a-string","title":"Delete lines not containing a string","text":"
        sed -i '/foo/!d' wy.txt\n

        Or not containing a MAC address:

        sed -i '' -E '/[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}/!d' *\n
        "},{"location":"sed/#do-a-replacement-on-all-files-in-a-dir","title":"Do a replacement on all files in a dir","text":"
        sed -i \"s/foo/bar/g\" /etc/apache2/sites-available/*\n
        "},{"location":"sed/#switch-all-github-urls-from-http-to-ssh","title":"Switch all github urls from http to ssh","text":"
        sed '/url = /s%https?://github.com/\\([^/]*/[^/]*\\)%git@github.com:\\1%' ~/code/*/.git/config\n
        "},{"location":"sed/#word-boundaries","title":"Word boundaries","text":"

        Normally, word boundaries look like this:

        /\\bMyWord\\b/\n

        or

        /\\<myword\\>/\n

        But in OS X, you have to do them like this:

        /[[:<:]]MyWord[[:>:]]/\n

        Which is just ridiculous, so use homebrew's gsed if you can.

        "},{"location":"sed/#add-a-bell-to-tail","title":"Add a bell to tail","text":"
        tail -n 0 -f /var/log/messages | sed 's/$/\\a'\n
        "},{"location":"sed/#see-also","title":"See Also","text":"
        • Some great sed tips - http://www-rohan.sdsu.edu/doc/sed.html
        "},{"location":"selinux/","title":"selinux","text":"

        Security Enhanced Linux

        "},{"location":"selinux/#notes","title":"Notes","text":"
        • Tutorial Video: https://www.youtube.com/watch?v=MxjenQ31b70
        • CentOS HowTo: http://wiki.centos.org/HowTos/SELinux
        • Labels are in user:role:type:level(optional)
        • Logs go in /var/log/audit/audit.log and /var/log/messages
        • Additional tools:

        • semanage and more are included in CentOS package policycoreutils

        • setroubleshoot has a bunch of tools included. Lots of prerequisites
        • setroubleshoot-server has a bunch of tools included. Lots of prerequisites
        "},{"location":"selinux/#examples","title":"Examples","text":""},{"location":"selinux/#show-status-of-selinux","title":"Show status of selinux","text":"
        sestatus\ngetenforce\n
        "},{"location":"selinux/#disable-without-rebooting","title":"Disable without rebooting","text":"
        echo 0 >/selinux/enforce\n

        or...

        setenforce 0\n
        "},{"location":"selinux/#list-selinux-contexts-for-processes","title":"List selinux contexts for processes","text":"
        ps auxZ\n
        "},{"location":"selinux/#list-selinux-contexts-for-processes-that-have-open-sockets","title":"List selinux contexts for processes that have open sockets","text":"
        lsof -i -Z # See `man lsof` for more specific selinux syntaxes\n
        "},{"location":"selinux/#list-selinux-contexts-for-the-current-user","title":"List selinux contexts for the current user","text":"
        id -Z\n
        "},{"location":"selinux/#list-selinux-contexts-for-files","title":"List selinux contexts for files","text":"
        ls -lZ\n
        "},{"location":"selinux/#recursively-set-a-context-type","title":"Recursively set a context type","text":"
        chcon -R -t httpd_sys_content_t sole\n
        "},{"location":"selinux/#copy-the-selinux-context-from-another-file-or-directory","title":"Copy the selinux context from another file or directory","text":"
        chcon --reference /file/or/dir/to/reference /target/file\n
        "},{"location":"selinux/#restore-default-contexts","title":"Restore default contexts","text":"

        This command restores the contexts as referenced in /etc/selinux/targeted/contexts/files/file_contexts

        restorecon /path/to/broken/file\nrestorecon -vR /path/to/broken/dir\n
        "},{"location":"selinux/#restore-defaults-context-automatically-at-system-reboot","title":"Restore defaults context automatically at system reboot","text":"

        This should take roughly the same amount of time as a fsck would.

        touch /.autorelabel\n
        "},{"location":"selinux/#define-a-default-context-for-a-directory","title":"Define a default context for a directory","text":"
        semanage fcontext -a /z5/sole\n
        "},{"location":"selinux/#define-a-default-context-for-a-directory-using-a-reference-from-the-original-policy","title":"Define a default context for a directory, using a reference from the original policy","text":"
        semanage fcontext -a -e /var/www /z5/sole\ncat /etc/selinux/targeted/contexts/files/file_contexts.subs # view the result\n
        "},{"location":"selinux/#list-policies","title":"List policies","text":"
        semanage port -l\nsemanage user -l\n
        "},{"location":"selinux/#show-selinux-booleans","title":"Show selinux booleans","text":"
        getsebool -a\n
        "},{"location":"selinux/#permanetnly-set-an-selinux-boolean","title":"Permanetnly set an selinux boolean","text":"
        setsebool -P booleanname 1\n
        "},{"location":"semver/","title":"semver","text":"

        Semantic Versioning

        "},{"location":"semver/#links","title":"Links","text":"
        • Online semver checking tool: https://jubianchi.github.io/semver-check
        • https://hynek.me/articles/semver-will-not-save-you/
        • BNF checking tool
        • Example valid and invalid semvers
        • Semver cheatsheet
        • Semantic Versioning Will Not Save You
        "},{"location":"sensu/","title":"sensu","text":"

        \"Monitor servers, services, application health, and business KPIs. Get notified about failures before your users do. Collect and analyze custom metrics. Give your business the competitive advantage it deserves.\" - https://sensuapp.org

        "},{"location":"sensu/#overview","title":"Overview","text":"
        • Checks - used to monitor services or measure resources
        • Handlers - for taking action on Sensu events, which are produced by checks
        • Filters - for filtering (removing) events destined for one or more event handlers
        • Mutators - transform event data for handlers
        "},{"location":"sensu/#checks","title":"Checks","text":"
        • standalone checks are scheduled to run periodically on the client (eg: all hosts need to check disks every 15 minutes)
        • subscription checks are requested by the server to hosts with a given tag (eg: all web hosts need to run check_http)
        "},{"location":"sensu/#see-also","title":"See Also","text":"
        • Uchiwa - Open source dashboard for Sensu.
        • Puppet + Sensu = Love; Infrastructure as Code and Monitoring, Sharing the Same Development Workflow
        • SF DevOps Meetup: Kyle Anderson, Sensu @ Yelp Part 1, Part 2
        "},{"location":"serverless/","title":"serverless","text":"

        \"Serverless is your toolkit for deploying and operating serverless architectures. Focus on your application, not your infrastructure.\" - https://serverless.com/

        "},{"location":"sgdisk/","title":"sgdisk","text":"

        \"sgdisk - Command-line GUID partition table (GPT) manipulator for Linux and Unix\" - man sgdisk

        "},{"location":"sgdisk/#see-also","title":"See also","text":"
        • gdisk is an interactive prompt interface.
        • cgdisk is a curses interface to gdisk, similar to fdisk in MS Windows of yore.
        "},{"location":"sgdisk/#examples","title":"Examples","text":""},{"location":"sgdisk/#delete-all-gpt-and-mbr-entries-and-create-a-new-gpt","title":"Delete all GPT and MBR entries and create a new GPT","text":"
        sgdisk -Z /dev/sdz\n
        "},{"location":"sgdisk/#create-a-new-partition","title":"Create a new partition","text":"
        • Partition numbers start at 1
        • The syntax here is --new <partition_number>[:<start>[:<end>]]
        • See the man page for a variety of ways to reference start and end.
        sgdisk --new 1:2048:732566636 /dev/sdz\n
        "},{"location":"sgdisk/#randomize-guids-to-ensure-uniqueness-after-cloning","title":"Randomize GUIDs to ensure uniqueness after cloning","text":"
        sgdisk --randomize-guids /dev/sdz\n
        "},{"location":"sgdisk/#print-info-about-partitions-on-a-disk","title":"Print info about partitions on a disk","text":"
        sgdisk -p /dev/sdz\n
        "},{"location":"sgdisk/#destroy-gpt-partition-table","title":"Destroy GPT partition table","text":"
        sgdisk -z /dev/sdz\n
        "},{"location":"sgdisk/#print-last-block-number-of-the-largest-available-section-of-the-disk","title":"Print last block number of the largest available section of the disk","text":"
        sgdisk -E /dev/sdz\n
        "},{"location":"sgdisk/#print-extended-info-about-the-first-partition-on-devsda","title":"Print extended info about the first partition on /dev/sda","text":"
        sgdisk -i 1 /dev/sdz\n
        "},{"location":"sgdisk/#backup-a-guid-partition-table","title":"Backup a GUID partition table","text":"
        sgdisk -b ~/sdz_partition_backup /dev/sdz\n
        "},{"location":"sgdisk/#restore-a-guid-partition-table","title":"Restore a GUID partition table","text":"
        sgdisk -l ~/sdz_partition_backup /dev/sdz\n
        "},{"location":"sgdisk/#create-a-new-partition_1","title":"Create a new partition","text":"

        This creates a 4th partition that is 50G using the default starting point (0 is default)

        sgdisk /dev/sdz --new=4:0:+50G\n
        "},{"location":"sgdisk/#delete-the-4th-partition","title":"Delete the 4th partition","text":"
        sgdisk /dev/sdz -d 4\n
        "},{"location":"sgdisk/#create-a-new-partition-number-4-that-fills-the-biggest-available-section-of-the-disk","title":"Create a new partition number 4 that fills the biggest available section of the disk","text":"
        sgdisk /dev/sdz -N 4\n
        "},{"location":"sgdisk/#grab-the-name-of-a-partition","title":"Grab the name of a partition","text":"
        ## sloppy, doesn't handle spaces or single quotes\nsgdisk /dev/sdk -i 1 | grep '^Partition name' | awk '{print $NF}' | sed \"s/'//g\"\n
        "},{"location":"shairport-sync/","title":"shairport-sync","text":"

        \"AirPlay audio player. Shairport Sync adds multi-room capability with Audio Synchronisation\" - https://github.com/mikebrady/shairport-sync

        "},{"location":"shairport-sync/#example","title":"Example","text":""},{"location":"shairport-sync/#shairport-sync-in-docker","title":"shairport-sync in docker","text":"

        This is a really easy way to get shairport-sync running on Linux. The container is 12mb.

        docker run \\\n  --rm \\\n  --detach \\\n  --network=host \\\n  --device /dev/snd \\\n  -e AIRPLAY_NAME=\"shairport ${HOSTNAME}\" \\\n  kevineye/shairport-sync\n
        "},{"location":"shred/","title":"Shred","text":"

        \"shred - overwrite a file to hide its contents, and optionally delete it.\" - man shred

        "},{"location":"shred/#examples","title":"Examples","text":""},{"location":"shred/#write-random-data-to-a-hard-disk-5-times-then-once-with-zeroes","title":"Write random data to a hard disk 5 times, then once with zeroes","text":"

        shred -n 5 -z /dev/disk/by-id/usb-Seagate_External_2HC015KJ-0:0

        "},{"location":"shred/#write-zeroes-to-a-disk-5-times-and-report-progress","title":"Write zeroes to a disk 5 times and report progress","text":"
        for _ in {1..5} ; do shred -n 0 -z -v /dev/disk/by-id/usb-Seagate_External_2HC015KJ-0:0 ; done ;\n
        "},{"location":"shutdown/","title":"shutdown","text":"

        a command to reboot a *nix server.

        "},{"location":"shutdown/#linux-examples","title":"Linux Examples","text":""},{"location":"shutdown/#reboot-a-server-in-5-minutes-with-a-message","title":"Reboot a server in 5 minutes with a message","text":"
        shutdown -r +5 \"Please announce in #dev if you would like to cancel this reboot\"\n
        "},{"location":"shutdown/#power-off-a-server-in-5-minutes","title":"Power off a server in 5 minutes","text":"
        shutdown -P +5\n
        "},{"location":"shutdown/#cancel-a-scheduled-shutdown","title":"Cancel a scheduled shutdown","text":"
        shutdown -c\n
        "},{"location":"shutdown/#show-when-a-shutdown-will-occur","title":"Show when a shutdown will occur","text":"
        awk -F= 'NR == 1 {print substr($2,0,length($2)-6)}' scheduled |\nxargs -I{} date -d @{}\n
        "},{"location":"shutdown/#macos-examples","title":"MacOS examples","text":""},{"location":"shutdown/#power-off-a-machine-in-5-minutes","title":"Power off a machine in 5 minutes","text":"
        shutdown -h +5 \"Shutting down in 5 minutes\"\n

        or...

        echo \"Shutting down in 5 minutes\" | shutdown -h $(date -v +5M +%y%m%d%H%M) -\n
        "},{"location":"shutdown/#other-functions","title":"Other functions","text":"

        Using the above shutdown syntax, you can use -s for sleep, -r for reboot, and others. See man date for more.

        "},{"location":"sips/","title":"sips","text":"

        \"scriptable image processing system.\" - sip --help

        "},{"location":"sips/#examples","title":"Examples","text":""},{"location":"sips/#resize-a-dng-and-save-the-output-as-jpg","title":"Resize a DNG and save the output as JPG","text":"
        SOURCE=foo.dng\nsips --resampleHeightWidthMax 1024  --setProperty format jpeg \"$SOURCE\" --out \"${SOURCE%.dng}.jpg\"\n
        "},{"location":"sips/#resize-all-images-in-the-cwd-that-were-taken-by-the-d5100","title":"Resize all images in the CWD that were taken by the D5100","text":"
        mdfind -onlyin \"$PWD\" 'kMDItemAcquisitionModel == \"NIKON D5100\"' |\nwhile read -r file ; do\n  sips --resampleHeightWidthMax 1600 --setProperty format jpeg \"${file}\" --out \"${file%.*}.jpg\"\ndone\n
        "},{"location":"sips/#resize-all-images-in-a-dir-tree-convert-them-to-jpg-and-output-them-to-a-different-folder","title":"Resize all images in a dir tree, convert them to jpg and output them to a different folder","text":"

        In the following example it is important to leave off the trailing slash on the target dir:

        SRC_DIR=\"${HOME}/Pictures/photo_queue\"\nOUT_DIR=\"${HOME}/Desktop/Stuff/\"\nMAX_WIDTH=1600\nfind \"${SRC_DIR}\" \\\n  -type f \\\n  -exec sips \\\n        --resampleHeightWidthMax \"${MAX_WIDTH}\" \\\n        --setProperty format jpeg {} \\\n        --out \"${OUT_DIR}\" \\;\n
        "},{"location":"sips/#see-also","title":"See Also","text":"
        • exiftool
        • graphicsmagick
        • imagemagick
        • jpeginfo
        "},{"location":"slides/","title":"slides","text":"

        Technologies used to create slide decks

        "},{"location":"slides/#links","title":"Links","text":"
        • https://github.com/hakimel/reveal.js
        • https://github.com/mikepea/awk_tawk
        • https://rise.readthedocs.io
        "},{"location":"smartctl/","title":"smartctl","text":"

        Linux interface to SMART data for hard disks.

        "},{"location":"smartctl/#examples","title":"Examples","text":""},{"location":"smartctl/#show-identifying-information-about-a-device","title":"Show identifying information about a device","text":"
        smartctl -i /dev/sda\n
        "},{"location":"smartctl/#show-drive-attributes","title":"Show drive attributes","text":"

        This shows a bunch of recorded information that is updated over the life of the drive.

        smartctl -A /dev/sda\n

        Show the same information with better output showing vendor flags

        smartctl -A -f brief /dev/sda\n
        "},{"location":"smartctl/#show-all-data","title":"Show all data","text":"
        smartctl -x /dev/sda\n
        "},{"location":"smartctl/#perform-a-self-test","title":"Perform a self test","text":"
        smartctl -t short /dev/sda\n
        "},{"location":"smartctl/#view-the-results-of-a-self-test","title":"View the results of a self test","text":"

        Make sure to check for the presence of a \"Self-test routine in progress\" line.

        smartctl -l selftest /dev/sda\n
        "},{"location":"smartctl/#show-how-many-hours-each-drive-has-been-powered-on","title":"Show how many hours each drive has been powered on","text":"
        $ lsblk -I8 -pdo NAME,SIZE,MODEL,SERIAL -n |\nwhile read -r drive size extras ; do\n  echo \"$drive|$(sudo smartctl -A \"$drive\" | awk '/Power_On_Hours/ {print $NF}')|$size|$extras\"\ndone | column -s'|' -t\n/dev/sda    35265    238.5G    Samsung SSD 850  S33KNX0JB59421F\n/dev/sdb    41261    7.3T      HGST HUH728080AL VKHAVT1X\n/dev/sdc    41305    7.3T      HGST HUH728080AL VKH46K7X\n/dev/sdd    41320    7.3T      HGST HUH728080AL VKH82NNX\n/dev/sdf    73       10.9T     HGST HUH721212AL 8CK26DWK\n
        "},{"location":"smartctl/#see-also","title":"See Also","text":"
        • https://www.backblaze.com/blog/what-smart-stats-indicate-hard-drive-failures/
        "},{"location":"smartstack/","title":"smartstack","text":"

        SmartStack is an automated service discovery and registration framework.

        "},{"location":"smartstack/#components","title":"Components","text":"
        • Synapse: Announces state of local services
        • Nerve: Query zookeeper for healthy services to connect to, then configures HAProxy
        • Zookeper: Service registry
        • HAProxy: Load balancing
        "},{"location":"smartstack/#links","title":"Links","text":"
        • DockerCon 14: Tomas Doran - Building a smarter application stack
        • Smartstack ( HAProxy + Serf ) Automated service discovery without rewriting apps
        • GetYourGuide's SmartStack handbook
        "},{"location":"snap/","title":"snap","text":"

        \"Package any app for every Linux desktop, server, cloud or device, and deliver updates directly.\" - http://snapcraft.io/

        A snap is a fancy zip file containing an application together with its dependencies, and a description of how it should safely be run on your system, especially the different ways it should talk to other software.

        Most importantly snaps are designed to be secure, sandboxed, containerised applications isolated from the underlying system and from other applications. Snaps allow the safe installation of apps from any vendor on mission critical devices and desktops.

        "},{"location":"snap/#links","title":"Links","text":"
        • https://www.ubuntu.com/internet-of-things
        • https://developer.ubuntu.com/en/snappy/
        • http://snapcraft.io/
        "},{"location":"snmp/","title":"snmp","text":"

        Simple Network Management Protocol

        "},{"location":"snmp/#links","title":"Links","text":"
        • Third party MIBs: https://github.com/trevoro/snmp-mibs/tree/master/mibs
        • How-To: https://web.archive.org/web/20070122063239/http://www.linuxhomenetworking.com/wiki/index.php/Quick_HOWTO_:_Ch22_:_Monitoring_Server_Performance
        • APC has some snmp tricks specific to those devices.
        "},{"location":"snmp/#examples","title":"Examples","text":""},{"location":"snmp/#install-snmp-utils-on-redhat-centos","title":"Install snmp utils on redhat / centos","text":"
        yum install net-snmp-utils\n
        "},{"location":"snmp/#show-the-system-description-of-a-host","title":"Show the system description of a host","text":"
        snmpwalk -v 1 -c public 192.168.9.1 SNMPv2-MIB::sysDescr.0\n
        "},{"location":"snmp/#walk-1722811110-with-community-string-itgwrk","title":"Walk 172.28.111.10 with community string itgwrk","text":"
        snmpwalk -v 1 -c \"public\" 172.28.111.10\n

        IPV6 is different...

        snmpwalk -v 1 -c public udp6:fe80::a2e:5fff:feba:f586%en0 enterprises\n
        "},{"location":"snmp/#show-network-info","title":"Show Network Info","text":"
        snmpwalk -c public 192.168.9.1 1.3.6.1.2.1\n
        "},{"location":"snmp/#show-airport-upload-and-download-bytes","title":"Show Airport Upload and Download bytes","text":"
        snmpwalk -c public 192.168.9.1 IF-MIB::ifOutOctets\nsnmpwalk -c public 192.168.9.1 IF-MIB::ifInOctets\n
        "},{"location":"snmp/#show-configured-ip-addresses","title":"Show configured IP addresses","text":"

        IPV4 by querying IPV6

        snmpwalk -v 1 -c public udp6:fe80::a2e:5fff:feba:f586%en0 ipAdEntAddr\n
        "},{"location":"solo/","title":"solo","text":"

        solo is a command that uses a per-user loopback IP# and a designated port to ensure that multiple copies of a command are not run. This takes the place of pid files and process tracking, and has the benefit of never leaving around a false positive. It also lets you skip building trap, pid file and process checking into every shell script you write.

        https://github.com/timkay/solo

        "},{"location":"sort/","title":"sort","text":"

        sort is a command to sort lines of data.

        "},{"location":"sort/#gnu-syntax-examples","title":"GNU Syntax Examples","text":"

        In OS X, this is gsort.

        "},{"location":"sort/#avoid-leading-space-caveat","title":"Avoid leading space caveat","text":"

        There is a big caveat to using GNU sort with data that has inconsistent whitespace.

        $ cat test-data.txt\nfoo     5\nbar  2\nbaz         9\n$ gsort -k2 --debug test-data.txt\ngsort: text ordering performed using \u2018en_US.UTF-8\u2019 sorting rules\ngsort: leading blanks are significant in key 1; consider also specifying 'b'\nbaz         9\n   __________\n_____________\nfoo     5\n   ______\n_________\nbar  2\n   ___\n______\n

        As the debug text indicates, using -b can avoid this, though I'm not sure why this isn't the default behavior:

        $ gsort -b -k2 --debug test-data.txt\ngsort: text ordering performed using \u2018en_US.UTF-8\u2019 sorting rules\nbar  2\n     _\n______\nfoo     5\n        _\n_________\nbaz         9\n            _\n_____________\n
        "},{"location":"sort/#randomly-sort-a-file-in-place","title":"Randomly sort a file in place","text":"

        By giving the -o the same output file as the input file we can shuffle in-place without errors. Trying this same thing using a pipe or a redirect will usually cause an empty file.

        Beware that this will put duplicate lines right next to each other. If you need better file content shuffling use shuf.

        sort -o foo -R foo\n
        "},{"location":"sort/#sort-by-multiple-fields-with-different-sort-requirements","title":"Sort by multiple fields with different sort requirements","text":"

        When sorting by multiple fields, it's important to specify the start and end of where you want the sort to occur. If you do not do this, you may get too short of a comparison, or too long of a comparison. Check the output of --debug if you don't get the right sort order.

        # -k defines the sort key as starting position, sort style, ending position\n# -r is included in the second key to reverse numeric sort\n\ngsort -k1d,1 -k2nr,2\n

        Another practical example of this is sorting authorized_keys files by their comment, putting commented keys at the bottom, which keeps the columns nicely aligned. For instance, if you have this file:

        #ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBDhB9kjaireRsJgPASR2jJqU0o8UvIVIPunKNQmS+mw user@a-key-we-want-to-manually-enable\n#ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKeUlnQ8TVgYkI1/DvPMhHJoujYarUvdBx3/BA1mlZLs another-user@some-other-commented-key\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMWu8gF1qT19FtikCMoIBnmEJH1nKyrcC/pRCnvWzoSa bastion-01\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICY9ScGyTpXOLnYnUfqGDfdwMf4kRIPey1xvPRJ8CsAX root@some-old-box\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFSoe1Ho3PjOrS4Hz+B+ILHh40Xi2kbN2f7qb2tNKb1d admin@some-other-box\n

        Normal sort authorized_keys would put the comments at the top, and not sort by the third column, which is the human readable comment of the keys. A better view would be to reverse sort the first column so the comments are at the bottom, then sort by the third column so it's easy to glance through:

        $ gsort -b -k1,1r -k3 authorized_keys\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFSoe1Ho3PjOrS4Hz+B+ILHh40Xi2kbN2f7qb2tNKb1d admin@some-other-box\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMWu8gF1qT19FtikCMoIBnmEJH1nKyrcC/pRCnvWzoSa bastion-01\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICY9ScGyTpXOLnYnUfqGDfdwMf4kRIPey1xvPRJ8CsAX root@some-old-box\n#ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKeUlnQ8TVgYkI1/DvPMhHJoujYarUvdBx3/BA1mlZLs another-user@some-other-commented-key\n#ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBDhB9kjaireRsJgPASR2jJqU0o8UvIVIPunKNQmS+mw user@a-key-we-want-to-manually-enable\n
        "},{"location":"sort/#sort-ip-addresses-by-first-octet-then-last-octet-showing-which-fields-are-sorted","title":"Sort IP Addresses by first octet then last octet, showing which fields are sorted","text":"
        ip neigh show | sort -k1,1n -k4,4n -t. --debug\n

        Console example:

        $ ip neigh show | sort -k1,1n -k4,4n -t. --debug\nsort: using \u2018en_US.UTF-8\u2019 sorting rules\n10.0.2.2 dev eth0 lladdr 52:54:00:12:35:02 REACHABLE\n__\n       _\n____________________________________________________\n10.0.2.3 dev eth0 lladdr 52:54:00:12:35:03 STALE\n__\n       _\n________________________________________________\n192.16.35.10 dev eth1 lladdr 08:00:27:7a:50:42 STALE\n___\n          __\n____________________________________________________\n192.16.35.11 dev eth1 lladdr 08:00:27:56:64:2f STALE\n___\n          __\n____________________________________________________\n
        "},{"location":"sort/#bsd-syntax-examples","title":"BSD Syntax Examples","text":"

        GNU sort and BSD sort behave differently, which is mostly lame.

        "},{"location":"sort/#sort-by-the-third-column","title":"Sort by the third column","text":"
        sort -k 3 filename\n
        "},{"location":"sort/#sort-dates-by-the-day","title":"Sort dates by the day","text":"

        This example shows how to sort dates in ISO Year format by date. (EG: 2017-01-19). Assumes use of bash 4 to generate the example dates.

        ## -n for numeric sort\n## -k3 for column 3\n## -t- to use - as a column delimiter\n\nfor X in {2016..2017}-{01..12..03}-{01..19..06} ; do echo ${X} ; done |\n\nsort -n -k3 -t-\n
        "},{"location":"sort/#sort-the-etcpasswd-by-uid","title":"Sort the /etc/passwd by UID","text":"

        Also works on /etc/group file and GID

        sort -n -t: -k 3 /etc/passwd\n
        "},{"location":"sound-and-music/","title":"Sound and Music","text":""},{"location":"sound-and-music/#links","title":"Links","text":"
        • https://muted.io/major-minor-scales/
        • https://youtu.be/JcjT7zgs6cs: Music Theory for Techno
        • https://www.jezzamon.com/fourier/: An Interactive Introduction to Fourier Transforms
        • https://github.com/stemrollerapp/stemroller: OSS audio track separation tool. Spits out drums, vocals, bass, and lead tracks for input songs.
        • https://www.riffusion.com/about: Use Stable Diffusion to generate spectrograms, then play them back.
        "},{"location":"sphinx/","title":"sphinx","text":"

        Sphinx is how many opensource projects generate sites for their documentation.

        • http://sphinx-doc.org/contents.html
        "},{"location":"split/","title":"split","text":"

        \"split - split a file into pieces\" - man split

        split is a common unix command.

        "},{"location":"split/#usage-examples","title":"Usage Examples","text":""},{"location":"split/#split-into-dvd-sized-chunks","title":"Split into DVD sized chunks","text":"

        This example isn't practical, the size needs to be smaller than specified because of DVD filesystem overhead, so you should use 4700000000 or similar if you want to actually burn the data.

        split -b 4707319808 source\n
        "},{"location":"split/#split-a-big-file-and-compress-it-with-gzip-before-writing-to-disk","title":"Split a big file and compress it with gzip before writing to disk","text":"

        This is useful for splitting up large uncompressed logs. This command is background safe.

        split -a4 --additional-suffix='-redis.log.1' -l500000 --filter='gzip -9 > $FILE.gz' redis.log.1\n

        And if you want to see read stats, throw pv in the mix:

        $ split -a4 --additional-suffix='-redis.log.1' -l500000 --filter='pv | gzip -9 > $FILE.gz' redis.log.1\n1.94GB 0:00:58 [21.2MB/s] [                                         <=>                              ]\n
        "},{"location":"splunk/","title":"splunk","text":"

        Enterprise log consumption and analysis.

        • http://www.splunk.com/
        • http://docs.splunk.com/Documentation/Splunk/latest/SearchReference/Sort
        "},{"location":"splunk/#examples","title":"Examples","text":""},{"location":"splunk/#get-a-list-of-indexes","title":"Get a list of indexes","text":"
        | REST /services/data/indexes | dedup title | table title\n
        "},{"location":"splunk/#get-a-list-of-sourcetypes","title":"Get a list of sourcetypes","text":"
        | metadata type=sourcetypes index=* OR index=_*\n
        "},{"location":"splunk/#cfengine-runs-per-hour-by-version","title":"CFEngine runs per hour by version","text":"
        source=\"/var/log/messages\" OR source=\"udp:514\" \"Starting CFEngine\" earliest=\"-1w\" | rex \"Starting CFEngine (?<version>3.[0-9]+.[0-9]+).*\" | timechart span=4h usenull=0 dc(host) by version\n
        "},{"location":"splunk/#strip-domain-from-hostname-for-consistent-uqdns","title":"Strip domain from hostname for consistent UQDNs","text":"
        rex mode=sed field=host \"s/\\.foo\\.example\\.com//\"\n
        "},{"location":"splunk/#count-of-records-per-hour-by-host","title":"Count of records per hour by host","text":"
        earliest=\"-7d\" | timechart span=1h count(_raw) by host\n
        "},{"location":"splunk/#count-of-records-per-source-by-5m-with-no-limit-on-list","title":"Count of records per source by 5m with no limit on list","text":"
        earliest=\"-8h\" | timechart span=5m count(_raw) by source limit=0\n
        "},{"location":"splunk/#count-of-records-per-source-with-a-given-list","title":"Count of records per source with a given list","text":"
        earliest=\"-1d\" source=\"/var/log/messages\" OR source=\"udp:10514\" OR source=\"udp:514\" | timechart count by source\n
        "},{"location":"splunk/#count-of-records-per-splunk-server-for-a-given-time-period","title":"Count of records per splunk server for a given time period","text":"

        Stupidly, splunk doesn't support ISO date format by default (in the version I'm using).

        earliest=\"06/19/2015:3:0:0\" latest=\"06/19/2015:3:3:0\" | timechart count(_raw) by splunk_server\n
        "},{"location":"splunk/#order-number-of-hits-for-a-given-string-by-an-extracted-ip-address","title":"Order number of hits for a given string by an extracted IP address","text":"
        earliest=\"7/6/2015:9:30:0\" \"verifying pingback from\" | rex \"verifying pingback from (?<pingback_source_ip>[0-9\\.]*)\\\"\" | stats count(_raw) as pingback_source_ip_total by pingback_source_ip | sort pingback_source_ip_total desc\n
        "},{"location":"splunk/#order-an-rpm-report","title":"Order an RPM report","text":"

        Given a report where RPM fields are exported as field=\"value\", such as:

        rpm -qa --queryformat 'report=\"rpm\", name=\"%{NAME}\", release=\"%{RELEASE}\", version=\"%{VERSION}\", packager=\"%{PACKAGER}\", url=\"%{URL}\", installtime=\"%{INSTALLTIME}\"\\n'\n

        This search in splunk will show a useful table:

        earliest=\"-1d\" report=\"rpm\" | dedup name | eval install_timestamp = strftime(installtime, \"%F %T.%3N\") | sort installtime desc | table host,name,version,release,install_timestamp\n

        See also:

        • http://docs.splunk.com/Documentation/Splunk/latest/Admin/Propsconf
        • http://answers.splunk.com/answers/140493/timestamp-contain-t-between-date-and-time.html#answer-140495
        "},{"location":"splunk/#count-of-kernel-versions","title":"Count of kernel versions","text":"

        Assuming you have a report that sends kernel_version=$(uname -r):

        kernel_version | stats count(kernel_version) by kernel_version, host\n
        "},{"location":"spotlight/","title":"Spotlight","text":"

        Spotlight is the Apple metadata database.

        "},{"location":"spotlight/#indexing","title":"Indexing","text":"

        You can edit the file /.Spotlight-V100/_rules.plist to add or deny indexing of specific folders. Use mdutil to edit per-disk indexing behavior. More info at MacOSXHints.com: configure spotlight to index excluded directories

        "},{"location":"spotlight/#spotlight-search-bar-examples","title":"Spotlight Search Bar Examples","text":"
        • name:nikon kind:pdf
        • kind:image modified:>3/25/2011 # does not support iso-8601 format \ud83d\ude1e
        "},{"location":"spotlight/#mdls","title":"mdls","text":"

        \"mdls -- lists the metadata attributes for the specified file\" - man mdls

        "},{"location":"spotlight/#show-gps-date-for-all-jpg-files","title":"Show GPS date for all JPG files","text":"
        mdls -name kMDItemGPSDateStamp *.jpg\n
        "},{"location":"spotlight/#show-name-and-version-of-an-app","title":"Show name and version of an app","text":"
        mdls -name kMDItemVersion -name kMDItemDisplayName /Applications/Alacritty.app\n
        "},{"location":"spotlight/#mdutil","title":"mdutil","text":"

        \"mdutil -- manage the metadata stores used by Spotlight\" - man mdutil

        "},{"location":"spotlight/#disable-indexing-on-a-volume","title":"Disable indexing on a volume","text":"
        sudo mdutil -i off /Volumes/volume_name\n
        "},{"location":"spotlight/#delete-local-indexes","title":"Delete local indexes","text":"

        This flag will cause each local store for the volumes indicated to be erased. The stores will be rebuilt if appropriate.

        sudo mdutil -E /\n
        "},{"location":"spotlight/#mdimport","title":"mdimport","text":"

        A tool to manage the way things are imported.

        "},{"location":"spotlight/#show-the-schema","title":"Show the schema","text":"
        mdimport -X\n
        "},{"location":"spotlight/#mdfind","title":"mdfind","text":"

        The terminal search tool for spotlight.

        \"mdfind -- finds files matching a given query\" - man mdfind

        "},{"location":"spotlight/#find-all-landscape-oriented-photos","title":"Find all landscape oriented photos","text":"
        mdfind -onlyin \"$PWD\" \"kMDItemOrientation = 0\"\n
        "},{"location":"spotlight/#find-all-portrait-oriented-photos","title":"Find all portrait oriented photos","text":"
        mdfind -onlyin \"$PWD\" \"kMDItemOrientation = 1\"\n
        "},{"location":"spotlight/#spotlight-search-by-filename-function-for-bash","title":"Spotlight search by filename function for bash","text":"

        function sl { mdfind \"kMDItemFSName == '$*'wc\" ; }

        "},{"location":"spotlight/#find-music-files-modified-since-yesterday","title":"Find music files modified since yesterday","text":"

        You must use single quotes for the spotlitght $time variable so bash does not attempt to interpolate it as a bash variable. You could also use double quotes and escape it.

        mdfind -onlyin \"/Volumes/Peter/Music/\" 'kMDItemContentModificationDate >= $time.yesterday'

        "},{"location":"spotlight/#mdgrep","title":"mdgrep","text":"
        #!/bin/bash\n#\n# Spotlight metadata find and grep by Daniel.Hoherd at gmail dot com\n\n## Check for at least two arguments, print usage if else\nif [ $# -lt 2 ] ; then\n    echo \"usage: $0 searchstring [dir or file] [dir2 or file2]\"\n    exit ;\nfi\n\nss=$1;\nshift;\nfiles=$@;\n\nuntil [ -z \"$1\" ] ; do\n    thisitem=$1\n    onlyin=\"-onlyin '$thisitem' $onlyin\"\n    shift\ndone;\neval mdfind -0 $onlyin \"$ss\" | xargs -0 grep -Hi \"$ss\"\n
        "},{"location":"spotlight/#xattr","title":"xattr","text":"

        xattr can be used to set arbitrary spotlight metadata:

        dho@tro:~/temp$ touch foo\ndho@tro:~/temp$ xattr -w com.apple.metadata:kMDItemStarRating 10 foo\ndho@tro:~/temp$ mdls -name kMDItemStarRating foo\nkMDItemStarRating = \"10\"\n
        "},{"location":"spotlight/#see-also","title":"See Also","text":"
        • Extensive article on OS X metadata: http://code.google.com/p/understand/wiki/MacOSMetadata
        • macOS User Guide: Search with Spotlight on Mac: https://support.apple.com/guide/mac-help/spotlight-mchlp1008/mac
        "},{"location":"sqlite/","title":"sqlite","text":""},{"location":"sqlite/#links","title":"Links","text":"
        • CLI Shell info: http://www.sqlite.org/sqlite.html
        • Better CLI tutorial: http://souptonuts.sourceforge.net/readme_sqlite_tutorial.html
        • FAQ - http://www.sqlite.org/faq.html
        • When to use SQLite - https://www.sqlite.org/whentouse.html
        • SpatiaLite - \"SpatiaLite is an open source library intended to extend the SQLite core to support fully fledged Spatial SQL capabilities.\"
        • Improved CLI - https://github.com/dbcli/litecli
        • GUI tool with sqlite support - https://dbeaver.io
        • \"An open source multi-tool for exploring and publishing data\" https://docs.datasette.io/en/stable/ / https://simonwillison.net/2018/Aug/19/instantly-publish-datasette/
        • https://www.sqlite.org/appfileformat.html
        • https://sqlite.org/src4/doc/trunk/www/design.wiki The Design of SQLite4
        • https://til.simonwillison.net/sqlite/one-line-csv-operations Simon Willison has a lot of great sqlite knowledge, tools, and examples.
        • https://sqlitestudio.pl: GUI app to \"Create, edit, browse SQLite databases.\"
        "},{"location":"sqlite/#syntax-examples","title":"Syntax Examples","text":""},{"location":"sqlite/#open-a-db-read-only","title":"Open a db read-only","text":"
        sqlite3 \"file:///absolute/path/to/file/datasette.db?mode=ro\"\n
        "},{"location":"sqlite/#import-a-csv-file","title":"Import a csv file","text":"

        This method does not appear to support ~/filename or $HOME/filename, but does support relative and absolute paths. The sqlite3 help text says that -csv will \"set output mode to 'csv'\", but it also affects .import statements.

        sqlite3 -csv filename.db \".import path/to/some_file.csv destination_table_name\"\n
        "},{"location":"sqlite/#export-to-a-csv-file-including-headers","title":"Export to a csv file, including headers","text":"

        This would export the cards database table as a csv:

        sqlite3 -csv pokemon-card-collection.db -cmd '.headers on' 'select * from cards' > pokemon-cards.csv\n
        "},{"location":"sqlite/#create-a-table","title":"Create a table","text":"
        CREATE TABLE servers (\n  id INTEGER NOT NULL,\n  hostname VARCHAR(255),\n  ip_addr VARCHAR(32),\nPRIMARY KEY (id), UNIQUE (id,hostname));\n

        Or from a unix shell

        sqlite3 foo.db \"CREATE TABLE servers (\n  id INTEGER NOT NULL,\n  hostname VARCHAR(255),\n  ip_addr VARCHAR(32),\nPRIMARY KEY (id),\nUNIQUE (id,hostname));\"\n
        "},{"location":"sqlite/#add-a-column-to-the-table","title":"Add a column to the table","text":"
        ALTER TABLE servers ADD os varchar(255);\n
        "},{"location":"sqlite/#create-a-view","title":"Create a view","text":"

        \"a view is the result set of a stored query, which can be queried in the same manner as a persistent database collection object\" - https://en.wikipedia.org/wiki/View_(SQL)

        https://www.sqlite.org/lang_createview.html

        The following view would show only rows from servers where the ip_addr starts with 192.168.. This is an effective way to move logic into the database, potentially reudcing app complexity:

        CREATE VIEW local_servers AS\nSELECT hostname,ip_addr FROM servers WHERE ip_addr like '192.168.%' ;\n
        "},{"location":"sqlite/#add-rows-to-the-table-from-unix-shell","title":"Add rows to the table from unix shell","text":"
        sqlite3 foo.db \"insert into baz values ('50','some text');\"\n
        "},{"location":"sqlite/#add-rows-or-update-if-the-row-already-exists","title":"Add rows or update if the row already exists","text":"

        This syntax is different from other SQL implementations

        insert or replace into tablename(filename, hash) values\n  ('/etc/hosts', 'somehash'),\n  ('/etc/resolv.conf', 'someotherhash');\n
        "},{"location":"sqlite/#output-rows-via-the-unix-shell","title":"Output rows via the unix shell","text":"

        This outputs as columns, but csv, html line, and list exist too.

        sqlite3 -column foo.db \"SELECT * FROM baz LIMIT 5;\"\n

        If -column is truncating your output, instead use -list -separator ' '

        -line is the equivalent of mysql's \\G or postgres's \\x

        "},{"location":"sqlite/#select-a-field-where-its-value-occurs-more-than-n-times","title":"Select a field where its value occurs more than N times","text":"
        select DateTimeOriginal\nfrom photos\ngroup by DateTimeOriginal\nhaving count(DateTimeOriginal) > 1 ;\n
        "},{"location":"sqlite/#select-field-a-where-the-value-field-b-occurs-more-than-n-times","title":"Select field A where the value Field B occurs more than N times","text":"

        This selects all values for field A (SourceFile), including where duplicates exist for field B (DateTimeOriginal). The prior example would not have shown this if we had added SourceFile to the select.

        select SourceFile from photos\nwhere DateTimeOriginal in (\n  select DateTimeOriginal from photos\n  group by DateTimeOriginal\n  having count(DateTimeOriginal) > 1\n) order by SourceFile ;\n
        "},{"location":"sqlite/#select-a-random-row-from-a-table","title":"Select a random row from a table","text":"
        TABLE='costreport'\nsqlite3 -line CostReport-1.db \"SELECT * FROM $TABLE\nWHERE _ROWID_ >= (abs(random()) % (SELECT max(_ROWID_) FROM $TABLE))\nLIMIT 1 ;\"\n
        "},{"location":"sqlite/#select-data-from-a-table-and-include-an-incrementing-id-column","title":"Select data from a table and include an incrementing id column","text":"

        Given the following data:

        sqlite> .schema\nCREATE TABLE IF NOT EXISTS \"people\"(\n\"name\" TEXT, \"age\" TEXT);\nsqlite> .mode box\nsqlite> select * from people ;\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 name  \u2502 age \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 brad  \u2502 18  \u2502\n\u2502 david \u2502 9   \u2502\n\u2502 grace \u2502 29  \u2502\n\u2502 john  \u2502 51  \u2502\n\u2502 katie \u2502 23  \u2502\n\u2502 nora  \u2502 33  \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2518\n

        You can sort by age and add an id column to show the numeric ordering of their age:

        sqlite> select row_number() over (order by cast(age as integer)) as id, * from people ;\n\u250c\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 id \u2502 name  \u2502 age \u2502\n\u251c\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 1  \u2502 david \u2502 9   \u2502\n\u2502 2  \u2502 brad  \u2502 18  \u2502\n\u2502 3  \u2502 katie \u2502 23  \u2502\n\u2502 4  \u2502 grace \u2502 29  \u2502\n\u2502 5  \u2502 nora  \u2502 33  \u2502\n\u2502 6  \u2502 john  \u2502 51  \u2502\n\u2514\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2518\n

        The row_number() over (order by cast(age as integer)) as id adds the extra column. Because the age column is a string we have to cast it to an integer for the sort to work correctly.

        "},{"location":"sqlite/#sum-one-column-ordered-by-another-column","title":"Sum one column ordered by another column","text":"

        This does a quick cost analysis on an AWS Cost-Report-1.csv file.

        ## First sanitize the column names\nsed -i '1 s#[/: ]#_#g' CostReport-1.csv  # linux sed, macos use gsed\n## Next import the csv into a sqlite db\nsqlite3 -csv CostReport-1.db \".import CostReport-1.csv costreport\"\n## Then start a sqlite shell\nsqlite3 CostReport-1.db\n
        -- Output Usage Type, ARN or ID, and summed cost as columns\nSELECT lineItem_UsageType, lineItem_ResourceId, sum(lineItem_BlendedCost) cost\nFROM costreport\nGROUP BY lineItem_ResourceId\nORDER BY cost ;\n
        "},{"location":"sqlite/#dump-a-db-from-cli","title":"Dump a db from CLI","text":"
        sqlite3 foo.db \".dump\" > foo.sql\n
        "},{"location":"sqlite/#search-skype-chat-history","title":"Search Skype chat history","text":"
        sqlite3 \"$HOME/Library/Application Support/Skype/daniel.austin/main.db\" \\\n  \"SELECT author, timestamp, body_xml FROM messages WHERE body_xml LIKE '%music%' ;\"\n
        "},{"location":"sqlite/#expanded-functionality-skype-history-search","title":"Expanded functionality skype history search","text":"
        function skypesearch(){\n  skypeusername=$1\n  searchstring=$2\n  /usr/bin/env sqlite3 \"$HOME/Library/Application Support/Skype/${skypeusername}/main.db\" \\\n  \"SELECT author, datetime(timestamp,'unixepoch','localtime'), body_xml\n  FROM messages\n  WHERE body_xml\n  LIKE '%${searchstring}%' ;\"\n}\nalias ss=\"skypesearch john.doe\"\n
        "},{"location":"sqlite/#quickly-create-an-image-database","title":"Quickly create an image database","text":"

        A better way to do this would be to use sqlite-utils, which is part of datasette, and there is an example below that shows how to do this.

        ## Create the database\nsqlite3 images.db \"create table images (filename varchar(255), createdate timestamp, unique(filename))\"\n## Populate the database. This can be blindly re-run when new files are added.\nexiftool -d \"%s\" -p 'insert into images values (\"$filename\", \"$DateTimeOriginal\");' -q -f -r . | sqlite3 images.db 2> /dev/null\n## Query the database\nsqlite3 images.db \"SELECT filename,datetime(createdate,'unixepoch','localtime') as date FROM images WHERE date LIKE '2014-08-02%';\"\n
        "},{"location":"sqlite/#use-exiftool-and-sqlite-utils-to-find-duplicate-photos","title":"Use exiftool and sqlite-utils to find duplicate photos","text":"

        This example shows how to delete JPG files where a DNG also exists, for photos taken with a camera that keeps track of the ImageNumber, like most DSLRs.

        First, ingest all the exif data from your photos into a sqlite db. We specify only the fields we need from exiftool, and output the data as a json blob, then use jq to reformat the json as one record per line, then send that into sqlite-utils. The sqlite-utils --nl option tells it to expect one record per line, --pk SourceFile specifies that field (which is always present in exiftool json output) as the primary key to ensure uniqueness, --replace will update any rows where the primary key already exists (so we can re-run this on an existing database). If we were not specifying exiftool fields, we would need an --alter flag here to tell sqlite-utils to add columns to the table for exif fields that did not exist in any previously imported photos.

        SOURCE_DIR=/some/absolute/path/with/photos\nfind \"${SOURCE_DIR}\" -type f -iname '*.jpg' -print0 -or -iname '*.dng' -print0 |\nxargs -0 exiftool -SerialNumber -ImageNumber -FileType -json |\njq -c '.[]' |\nsqlite-utils insert --nl --pk SourceFile --replace ~/photos.db photos -\n

        Now do a sql select for all JPG files that finds the duplicates. The idea here is to find distinct shots from a specific camera body which are identified by concatenating the SerialNumber and the ImageNumber together, and only select rows with FileType = JPEG where that same shot also has a FileType = DNG entry. This avoids deletion of JPG files where there is no DNG for the same shot.

        This example does echo rm just for the example. Remove echo to actually do the deletion. This example also shows how to interpolate fields in sqlite3 by using || to concatenate fields and strings to produce a single string that is used to compare with, or output as a result.

        sqlite3 ~/photos.db \"\nselect SourceFile from photos\nwhere FileType = 'JPEG'\nand SerialNumber || ImageNumber in (\n  select SerialNumber || ImageNumber from photos where FileType = 'DNG'\n) ;\n\" |\nxargs -r echo rm -fv\n
        "},{"location":"sqlite/#vacuum-a-database-file","title":"Vacuum a database file","text":"

        \"The VACUUM command rebuilds the database file, repacking it into a minimal amount of disk space.\" - https://www.sqlite.org/lang_vacuum.html

        sqlite3 filename.db \"VACUUM;\"\n
        "},{"location":"ss/","title":"ss","text":"

        \"ss - another utility to investigate sockets\" - man ss

        This tool shows all sockets, not just networking sockets.

        "},{"location":"ss/#examples","title":"Examples","text":"

        Options can be concatenated, so ss -t -n -l -p can be ss -tnlp

        "},{"location":"ss/#show-all-established-connections-dont-resolve-service-names","title":"Show all established connections, don't resolve service names","text":"
        ss -n\n
        "},{"location":"ss/#show-all-listening-sockets","title":"Show all listening sockets","text":"
        ss -l\n
        "},{"location":"ss/#display-all-tcp-sockets","title":"Display all TCP sockets","text":"
        ss -t -a\n
        "},{"location":"ss/#show-ipv4-listening-sockets-sorted-by-port","title":"Show ipv4 listening sockets sorted by port","text":"
        ss -4 -ltn | sort -k2 -t: -n\n
        "},{"location":"ss/#show-ssh-connections","title":"Show ssh connections","text":"
        ss -at '( sport = :ssh or dport = :ssh )'\n
        "},{"location":"ss/#show-ipv4-sockets-in-a-particular-state","title":"Show ipv4 sockets in a particular state","text":"
        ss -t4 state time-wait\n
        "},{"location":"ss/#show-the-processes-for-listening-ipv4-sockets","title":"Show the processes for listening ipv4 sockets","text":"
        ss -lt4p\n
        "},{"location":"ssh/","title":"ssh","text":"

        ssh is the secure shell, an encrypted version of telnet and a whole lot more

        "},{"location":"ssh/#ssh_1","title":"ssh","text":"

        The secure shell itself, very useful for administering remote systems, tunneling arbitrary ports, tunneling X sessions, and a whole lot more.

        "},{"location":"ssh/#scp","title":"scp","text":"

        scp is like cp, but it happens securely and allows host-to-host transfers over ssh. Very handy when used with ssh_config and key-based authentication.

        "},{"location":"ssh/#sftp","title":"sftp","text":"

        A secure FTP client built into ssh. The native client sucks, try lftp or rsync if it's available.

        "},{"location":"ssh/#sshd","title":"sshd","text":""},{"location":"ssh/#output-effective-server-configuration-variables","title":"Output effective server configuration variables","text":"

        This is useful for troubleshooting ssh_config matching.

        sshd -T # requires root\n
        "},{"location":"ssh/#ssh_2","title":"ssh","text":""},{"location":"ssh/#output-effective-client-configuration-variables","title":"Output effective client configuration variables","text":"
        ssh -G user@host\n
        "},{"location":"ssh/#tunnel-local-port-to-the-destination-through-the-ssh-connection","title":"tunnel local port to the destination through the SSH connection","text":"

        This allows you to hit remote services as if they were running on your own machine on the given port.

        This will only listen on localhost, not ethernet interfaces. Use -g to listen on all interfaces.

        local_port=9980\nremote_port=80\ndestination_host=some_other_remote_server\nssh -L \"${remote_port}:${destination_host}:${local_port}\"\n
        "},{"location":"ssh/#tunnel-remote-port-through-the-ssh-connection-to-the-local-machine","title":"Tunnel remote port through the ssh connection to the local machine","text":"

        This allows remote hosts to connect to a server running on your local network.

        local_port=80\nremote_port=9980\ndestination_host=some_other_local_server\nssh -R \"${remote_port}:${destination_host}:${local_port}\"\n
        "},{"location":"ssh/#create-a-socks-5-proxy-on-a-local-port","title":"Create a socks 5 proxy on a local port","text":"
        local_port=5555\nssh -D \"$local_port\" user@host\n
        "},{"location":"ssh/#loop-through-some-ssh-hosts-and-execute-a-command","title":"Loop through some ssh hosts and execute a command","text":"

        -n is required in order to proceed past the first host.

        cat hostnames.txt | while read -r host ; do\n  ssh -o ConnectTimeout=10 -o PasswordAuthentication=no -n \"$host\" 'some_command ; another_command ;'\ndone\n
        "},{"location":"ssh/#be-really-verbose-about-not-wanting-to-use-an-interactive-login","title":"Be really verbose about not wanting to use an interactive login","text":"

        Some ssh servers (EG: macOS 11) need even more options to not ask for a password:

        ssh \\\n  -o PasswordAuthentication=no \\\n  -o KbdInteractiveAuthentication=no \\\n  -o KbdInteractiveDevices=no\n

        This is also really handy for putting into GIT_SSH_COMMAND to avoid password prompts in scripts.

        "},{"location":"ssh/#prefer-password-auth","title":"Prefer password auth","text":"

        Sometimes you need to prefer password auth over key based auth. For example, if you have lots of keys and you are trying to connect to a host that only allows one failure, you will expire your failures before you ever reach a password dialogue.

        ssh -o PreferredAuthentications=password root@libreelec.local\n
        "},{"location":"ssh/#ssh_config","title":"ssh_config","text":"

        The user ssh config file, ~/.ssh/config, lets you override default options. This makes it handy for command line stuff where the syntax is funky such as using non-standard ports.

        Notably, global variables need to come at the end of the file, not the beginning!

        "},{"location":"ssh/#simple-host-aliasing","title":"Simple host aliasing","text":"

        The following example will let you simply ssh sugarbeast to log in on the non-standard port on the proper IP# with the specified user.

        Host sugarbeast\n  HostName 66.134.66.42\n  User daniel\n  Port 888\n
        "},{"location":"ssh/#multiplexed-connections","title":"Multiplexed connections","text":"

        After running mkdir -p -m 700 ~/.ssh/sockets add this to your ~/.ssh/config

        Host *\n  ControlPersist yes\n  ControlMaster auto\n  ControlPath ~/.ssh/sockets/%r@%h:%p\n

        To kill a multiplexed connection, run ssh -O exit user@host

        "},{"location":"ssh/#proxycommand","title":"ProxyCommand","text":"

        This config option lets you execute an arbitrary series of commands to connect with.

        SSH proxy through ssh host for openSSH v4 and earlier (Ubuntu 8):

        ProxyCommand ssh -q bastion nc -q 0 %h %p\n

        SSH proxy through ssh host for openSSH v5 and later:

        ProxyCommand ssh -W %h:%p bastion\n

        HTTP proxy (from man ssh_config):

        ProxyCommand nc -X connect -x 192.0.2.0:8080 %h %p\n
        "},{"location":"ssh/#key-based-authentication","title":"key-based authentication","text":"

        Key-based authentication lets you log in without specifying a password. This is useful for rsync, scp and just plain old ssh shell. Adding comments to the public key makes it easy to sort through the keys that authorized_keys file. The $HOME/.ssh/authorized_keys file is the default list of public keys which are allowed password-less login. See also man authorized_keys for more info.

        "},{"location":"ssh/#key-based-auth-permissions","title":"Key-based auth Permissions","text":"

        Permissions on this file need to be set like this:

        #!/bin/sh\n# This will repair permissions for the current user's ssh key-pair authentication.\nmkdir ~/.ssh/\ntouch ~/.ssh/authorized_keys\nchmod go-w ~          && \\\nchmod 700 ~/.ssh      && \\\nchmod 600 ~/.ssh/*    && \\\necho \"Successfully fixed ssh authentication files permissions.\"\n
        "},{"location":"ssh/#ssh-keygen","title":"ssh-keygen","text":""},{"location":"ssh/#validate-each-entry-of-authorized_keys","title":"Validate each entry of authorized_keys","text":"
        ssh-keygen -lvf ~/.ssh/authorized_keys\n
        "},{"location":"ssh/#generate-keys","title":"Generate Keys","text":"

        Not all systems support ed25519, but as of 2016 it is the most secure key type.

        ssh-keygen -t ssh-ed25519 -C \"Daniel Hoherd: ${USER}@${HOSTNAME} -- $(date +%F)\"\n

        If you require backwards compatibility, use 4096 bit RSA keys.

        ssh-keygen -b 4096 -t rsa -C \"Daniel Hoherd: ${USER}@${HOSTNAME} -- $(date +%F)\"\n
        "},{"location":"ssh/#create-or-change-a-password-for-an-ssh-identity","title":"Create or change a password for an ssh identity","text":"

        This will update the password used to unlock an ssh identity.

        ssh-keygen -p -f ~/.ssh/id_ed25519\n
        "},{"location":"ssh/#generate-a-public-key-from-a-given-private-key","title":"Generate a public key from a given private key","text":"

        This outputs the pub key, including the comment that is stored in the private key.

        ssh-keygen -y -f id_rsa_bar\n
        "},{"location":"ssh/#change-the-comment-in-an-ssh-key-pair","title":"Change the comment in an ssh key pair","text":"

        The old comment will be printed when changing the comment:

        ssh-keygen -c -f ~/.ssh/id_ed25519 -C 'this is the new comment'\n
        "},{"location":"ssh/#show-the-fingerprints-for-the-given-key-file","title":"Show the fingerprints for the given key file","text":"

        This works with both private and public keys

        ssh-keygen -E MD5 -l -f id_rsa_baz\n
        "},{"location":"ssh/#ssh-add","title":"ssh-add","text":""},{"location":"ssh/#show-fingerprints-for-all-keys-that-are-loaded-into-ssh-agent","title":"Show fingerprints for all keys that are loaded into ssh-agent","text":"
        # ssh-add -l\n2048 SHA256:aFAG8RjEr+mvqNyFR10kwCF9LP5ttJR3vI85qPDHDbo  (RSA)\n4096 SHA256:8K5XkmSFyAUgA6DLhQTbmTDnkh1kPc7GTdg5EYP7C8s  (RSA)\n4096 SHA256:7Bmhh1TGQkY7RfT9gmShNb1Eaq7erRkDphcOsQH0jaE  (RSA)\n

        Or if you need to show the hash as the older MD5 hash, EG to use with CircleCI

        # ssh-add -l -E md5\n2048 MD5:65:fd:c2:05:1e:b2:a6:32:15:37:3d:e6:98:81:a9:ab  (RSA)\n4096 MD5:db:af:71:c0:44:06:33:5f:63:b0:cb:8f:8a:59:0b:46  (RSA)\n4096 MD5:8e:f3:02:1c:bb:39:8e:b2:5e:27:5a:48:c4:d1:0c:4b  (RSA)\n
        "},{"location":"ssh/#delete-keys-from-the-agent","title":"Delete keys from the agent","text":"

        Delete all keys with

        ssh-add -D\n

        Delete the key for the givent filename from ssh-agent

        ssh-add -d ~/.ssh/id_rsa\n
        "},{"location":"ssh/#fetch-pub-keys-from-ssh-agent","title":"Fetch pub keys from ssh-agent","text":"

        These keys will show the comment contained within the key

        ssh-add -L\n
        "},{"location":"ssh/#limit-root-login-to-key-based-auth","title":"Limit root login to key based auth","text":"

        In /etc/ssh/sshd_config

        PermitRootLogin without-password\n
        "},{"location":"ssh/#see-also","title":"See Also","text":"
        • sshuttle - IP network router over ssh
        • sslh - lets one accept both HTTPS and SSH connections on the same port. It makes it possible to connect to an SSH server on port 443 (e.g. from inside a corporate firewall)
        • Corkscrew - a tool for tunneling SSH through HTTP proxies
        • Putty - An SSH (and telnet) client for windows.
        • Passwordless SSH logins
        • SSH server for Windows
        • SSH jump hosts
        • The Secure Shell (SSH) Connection Protocol - https://www.ietf.org/rfc/rfc4254.txt
        • The Secure Shell (SSH) Authentication Protocol - https://www.ietf.org/rfc/rfc4252.txt
        • The Secure Shell (SSH) Transport Layer Protocol - https://www.ietf.org/rfc/rfc4253.txt
        • https://www.agwa.name/blog/post/ssh_signatures
        • lwn.net: Restricting SSH agent keys
        "},{"location":"sshuttle/","title":"sshuttle","text":"

        sshuttle is an SSH powered ipv4 routed VPN that doesn't require admin rights on the target host.

        • https://github.com/apenwarr/sshuttle
        "},{"location":"sshuttle/#usage","title":"Usage","text":"

        Tunnel DNS queries and create a route through the given host to the given subnet, and be verbose about it.

        sshuttle --dns -vr user@host:port 192.168.1.0/24\n
        "},{"location":"stat/","title":"stat","text":"

        show filesystem metadata about a file

        "},{"location":"stat/#gnu-stat-examples","title":"GNU stat examples","text":""},{"location":"stat/#show-permissions-modify-date-ownership-and-long-filename","title":"Show permissions, modify date, ownership and long filename","text":"
        stat -c \"%a/%A  %y %G(%g):%U(%u) %N\" /srv/log/apache2/\n
        "},{"location":"stat/#sum-file-sizes","title":"Sum file sizes","text":"
        stat -c '%s' *2016* | awk '{sum += $1} END {print sum}'\n
        "},{"location":"stat/#gnu-stat-c-variables","title":"GNU stat -c variables","text":"

        This section is taken from man stat

        "},{"location":"stat/#the-valid-format-sequences-for-files-without-file-system","title":"The valid format sequences for files (without --file-system):","text":"
        • %A - access rights in human readable form
        • %a - access rights in octal (note '#' and '0' printf flags)
        • %b - number of blocks allocated (see %B)
        • %B - the size in bytes of each block reported by %b
        • %C - SELinux security context string
        • %d - device number in decimal
        • %D - device number in hex
        • %F - file type
        • %f - raw mode in hex
        • %g - group ID of owner
        • %G - group name of owner
        • %h - number of hard links
        • %i - inode number
        • %m - mount point
        • %n - file name
        • %N - quoted file name with dereference if symbolic link
        • %o - optimal I/O transfer size hint
        • %s - total size, in bytes
        • %t - major device type in hex, for character/block device special files
        • %T - minor device type in hex, for character/block device special files
        • %u - user ID of owner
        • %U - user name of owner
        • %w - time of file birth, human-readable; - if unknown
        • %W - time of file birth, seconds since Epoch; 0 if unknown
        • %x - time of last access, human-readable
        • %X - time of last access, seconds since Epoch
        • %y - time of last data modification, human-readable
        • %Y - time of last data modification, seconds since Epoch
        • %z - time of last status change, human-readable
        • %Z - time of last status change, seconds since Epoch
        "},{"location":"stat/#valid-format-sequences-for-file-systems","title":"Valid format sequences for file systems:","text":"
        • %a - free blocks available to non-superuser
        • %b - total data blocks in file system
        • %c - total file nodes in file system
        • %d - free file nodes in file system
        • %f - free blocks in file system
        • %i - file system ID in hex
        • %l - maximum length of filenames
        • %n - file name
        • %s - block size (for faster transfers)
        • %S - fundamental block size (for block counts)
        • %t - file system type in hex
        • %T - file system type in human readable form
        "},{"location":"strace/","title":"strace","text":"

        strace is a tool to trace system calls and signals in Linux.

        "},{"location":"strace/#examples","title":"Examples","text":""},{"location":"strace/#trace-a-running-process","title":"Trace a running process","text":"
        strace -p 5789\n
        "},{"location":"strace/#trace-only-exec-calls-of-a-command-and-all-child-processes","title":"Trace only exec calls of a command and all child processes","text":"
        strace -f -eexecve cf-agent -K\n
        "},{"location":"sudo/","title":"sudo","text":"

        super user do

        "},{"location":"sudo/#examples","title":"Examples","text":""},{"location":"sudo/#includedir-etcsudoersd","title":"#includedir /etc/sudoers.d","text":"
        ##includedir /etc/sudoers.d\n

        This line is in some sudoers files, and is not a comment. #includedir is a configuration directive. This directory does not like to have .conf files, so they should be flatly named. eg: 00_default

        "},{"location":"sudo/#check-sudoers-for-errors","title":"Check sudoers for errors","text":"
        visudo -c\n
        "},{"location":"sudo/#show-sudo-rules-that-match-for-the-given-user","title":"Show sudo rules that match for the given user","text":"
        sudo -l -U username\n

        The order shown is important. sudo picks the last matching rule. Rules are parsed in order from /etc/sudoers and all included files. Because of this, #includedir /etc/sudoers.d should be the last line in the /etc/sudoers file, and the order of the /etc/sudoers.d/ files will be important when fine tuning rules.

        "},{"location":"sudo/#allow-admins-to-run-mtr-without-a-password","title":"Allow admins to run mtr without a password","text":"

        Add the following line to /etc/sudoers

        %admin ALL=(ALL)NOPASSWD:/usr/local/sbin/mtr\n
        "},{"location":"sudo/#allow-several-groups-and-users-to-execute-upstart-commands","title":"Allow several groups and users to execute upstart commands","text":"
        User_Alias UPSTART_USERS = %wheel, %sysadmin, %adm\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/start\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/stop\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/initctl\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/restart\n
        "},{"location":"sudo/#run-several-commands-with-one-sudo-command","title":"Run several commands with one sudo command","text":"
        sudo -s -- <<EOF\nwhoami\nwhoami\nEOF\n

        or

        sudo bash -c \"whoami ; whoami ;\"\n
        "},{"location":"sudo/#links","title":"Links","text":"
        • sudoers config - http://ubuntuforums.org/showthread.php?t=1132821
        "},{"location":"swagger/","title":"swagger","text":"

        \"Swagger is the world\u2019s largest framework of API developer tools for the OpenAPI Specification(OAS), enabling development across the entire API lifecycle, from design and documentation, to test and deployment.\" - https://swagger.io/

        "},{"location":"swagger/#links","title":"Links","text":"
        • Hello World with Swagger - https://swagger.io/blog/getting-started-with-swagger-i-what-is-swagger/
        • Swagger 101 - https://app.swaggerhub.com/help/tutorials/writing-swagger-definitions
        • Online editor with example app definition https://editor.swagger.io/
        • https://swagger.io/tools/
        "},{"location":"swift/","title":"Swift","text":"

        \"Swift is a powerful and intuitive programming language for macOS, iOS, watchOS and tvOS. Writing Swift code is interactive and fun, the syntax is concise yet expressive, and Swift includes modern features developers love. Swift code is safe by design, yet also produces software that runs lightning-fast.\"

        • https://developer.apple.com/swift/
        • https://www.appcoda.com/learnswift/
        "},{"location":"sysctl/","title":"sysctl","text":"

        \"sysctl - configure kernel parameters at runtime\" - man sysctl

        /etc/sysctl.conf is for storing permanent changes, sysctl is used for making changes to the running system.

        "},{"location":"sysctl/#containers-caveat","title":"Containers caveat","text":"

        Because sysctl is a kernel level feature, its values are shared between all containers running on a given node. This means that if you have containers in kubernetes that modify sysctl, this alters the behavior of every other container in every pod running on that same node. It also means that if you do such things, you cannot rely on those values being consistent because they are not managed in a central place, but instead are being modified by pods that are scheduled on the node. Because of this, it's a good idea to try to avoid this if at all possible, and if you need to tweak sysctl for pods, try to account for that in your pod affinities.

        "},{"location":"sysctl/#sysctl-command-examples","title":"sysctl command examples","text":""},{"location":"sysctl/#show-all-kernel-variables-for-the-in-memory-kernel","title":"Show all kernel variables for the in-memory kernel","text":"
        sysctl -a\n
        "},{"location":"sysctl/#assign-a-new-variable-for-the-running-kernel-to-use","title":"Assign a new variable for the running kernel to use","text":"
        sysctl -w variable=value\n
        "},{"location":"sysctl/#load-values-from-etcsysctlconf","title":"Load values from /etc/sysctl.conf","text":"
        sysctl -p\n
        "},{"location":"sysctl/#sysctlconf-examples","title":"sysctl.conf examples","text":""},{"location":"sysctl/#reboot-after-10-seconds-if-kernel-panics","title":"Reboot after 10 seconds if kernel panics","text":"
        kernel.panic = 10\n
        "},{"location":"sysctl/#treat-all-oopses-as-panics","title":"Treat all oopses as panics","text":"
        kernel.panic_on_oops = 1\n
        "},{"location":"sysdig/","title":"sysdig","text":"

        An awesome host inspection tool, with tcpdump like tool and an interface similar to top et al. - http://www.sysdig.org/

        "},{"location":"sysdig/#installation-centos","title":"Installation - CentOS","text":"
        • https://github.com/draios/sysdig/wiki/How-to-Install-Sysdig-for-Linux

        Their shell script installs epel from a 3rd party source, so it's best to use this method instead:

        rpm --import https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public && \\\ncurl -s -o /etc/yum.repos.d/draios.repo http://download.draios.com/stable/rpm/draios.repo && \\\nyum -y install kernel-devel-$(uname -r) && \\\nyum -y install sysdig\n
        "},{"location":"sysdig/#examples","title":"Examples","text":""},{"location":"sysdig/#simple-usage","title":"Simple usage","text":"
        sysdig\n
        "},{"location":"sysdig/#write-a-system-trace-file","title":"Write a system trace file","text":"
        sysdig -w tracefile.scap\n
        "},{"location":"sysdig/#replay-a-tracefile","title":"Replay a tracefile","text":"
        sysdig -r tracefile.scap\n
        "},{"location":"sysdig/#show-filters","title":"Show filters","text":"
        sysdig -l\n
        "},{"location":"sysdig/#show-activity-for-access-to-a-given-file","title":"Show activity for access to a given file","text":"
        sysdig -l fd.name=/etc/hosts\n
        "},{"location":"sysdig/#show-shell-commands-for-all-users","title":"Show shell commands for all users","text":"
        sysdig -pc -c spy_users\n
        "},{"location":"sysdig/#spy-on-a-user-and-exclude-a-process","title":"Spy on a user and exclude a process","text":"
        sysdig -pc -c spy_users proc.name!=gmetric\n

        Or exclude multiple processes

        sysdig -pc -c spy_users \"not proc.name in ( gmetric, awk, sed, grep )\"\n
        "},{"location":"sysdig/#show-a-top-like-interface","title":"Show a top like interface","text":"
        csysdig\n
        "},{"location":"sysdig/#links","title":"Links","text":"
        • https://github.com/draios/sysdig/wiki
        • https://github.com/draios/sysdig/wiki/Sysdig%20Examples
        • http://man7.org/linux/man-pages/man8/sysdig.8.html
        • Getting Started With Sysdig
        • Getting Started With Csysdig
        "},{"location":"systemd-resolved/","title":"systemd-resolved","text":"

        \"systemd-resolved - Network Name Resolution manager\" - man systemd-resolved

        systemd-resolved enhances (ie: interferes with) old school simple methods of managing DNS on a linux system. The way it reads the /etc/resolv.conf file as either a file or symlink alters the behavior of the service, which is a new mechanic that can have unexpected effects if you're not familiar with it.

        "},{"location":"systemd-resolved/#examples","title":"Examples","text":""},{"location":"systemd-resolved/#resolve-a-hostname","title":"Resolve a hostname","text":"
        $ systemd-resolve google.com\ngoogle.com: 216.58.192.238\n\n-- Information acquired via protocol DNS in 23.9ms.\n-- Data is authenticated: no\n
        "},{"location":"systemd-resolved/#check-name-resolution-statistics","title":"Check name resolution statistics","text":"
        $ systemd-resolve --statistics\nDNSSEC supported by current servers: no\n\nTransactions\nCurrent Transactions: 0\n  Total Transactions: 36\n\nCache\n  Current Cache Size: 2\n          Cache Hits: 2\n        Cache Misses: 103\n\nDNSSEC Verdicts\n              Secure: 0\n            Insecure: 0\n               Bogus: 0\n       Indeterminate: 0\n
        "},{"location":"systemd-resolved/#see-also","title":"See Also","text":"
        • http://man7.org/linux/man-pages/man1/systemd-resolve.1.html
        • http://manpages.ubuntu.com/manpages/xenial/man8/systemd-resolved.service.8.html
        • http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html
        "},{"location":"systemd/","title":"systemd","text":"

        \"systemd is a system and service manager for Linux, compatible with SysV and LSB init scripts. systemd provides aggressive parallelization capabilities, uses socket and D-Bus activation for starting services, offers on-demand starting of daemons, keeps track of processes using Linux control groups, supports snapshotting and restoring of the system state, maintains mount and automount points and implements an elaborate transactional dependency-based service control logic. It can work as a drop-in replacement for sysvinit.\" - http://www.freedesktop.org/wiki/Software/systemd/

        "},{"location":"systemd/#links","title":"Links","text":"
        • https://www.freedesktop.org/wiki/Software/systemd/TipsAndTricks: Tips and tricks
        • https://man7.org/linux/man-pages/man5/systemd.resource-control.5.html: Man page for control groups
        • https://www.freedesktop.org/wiki/Software/systemd/FrequentlyAskedQuestions: FAQ
        • https://fedoraproject.org/wiki/SysVinit_to_Systemd_Cheatsheet: sysvinit to chkconfig
        • https://wiki.ubuntu.com/SystemdForUpstartUsers: systemd for upstart users
        • https://github.com/joehillen/sysz: fzf interface for systemd
        "},{"location":"systemd/#tips","title":"Tips","text":""},{"location":"systemd/#init-file-locations","title":"Init file locations","text":"
        /usr/lib/systemd/system\n/etc/systemd/system # has precedence\n
        "},{"location":"systemd/#show-the-full-systemd-journal","title":"Show the full systemd journal","text":"

        The systemd journal is syslog and more.

        journalctl --full\n
        "},{"location":"systemd/#show-logs-for-the-last-15-minutes","title":"Show logs for the last 15 minutes","text":"
        journalctl --since \"-15 minutes\"\n
        "},{"location":"systemd/#show-logs-for-one-service","title":"Show logs for one service","text":"

        Find your service with systemctl list-units

        journalctl --unit=\"docker.service\" --since -5m\n
        "},{"location":"systemd/#show-only-logs-that-match-a-certain-pattern","title":"Show only logs that match a certain pattern","text":"

        \"PERL-compatible regular expressions are used. \u2026 If the pattern is all lowercase, matching is case insensitive. Otherwise, matching is case sensitive. This can be overridden with the --case-sensitive option\" - man journalctl

        journalctl -g '\\b(foo|bar)\\b\n
        "},{"location":"systemd/#show-the-nginx-journal-for-today","title":"Show the nginx journal for today","text":"

        The -u here is \"unit\", not \"user\".

        journalctl -u nginx.service --since today\n
        "},{"location":"systemd/#list-journals-by-boot","title":"List journals by boot","text":"
        journalctl --list-boots\n

        The above command produces the following style of output:

         -5 e6fcef265a164688b5f9aad999a9b1d2 Mon 2019-09-09 08:55:47 PDT\u2014Tue 2019-09-17 17:41:25 PDT\n -4 1e402042ad0a48bebe17298fd80dfb66 Tue 2019-09-17 17:42:06 PDT\u2014Tue 2019-09-17 18:26:28 PDT\n -3 1b36653fa7b64b1a808f10a894a0e303 Tue 2019-09-17 18:27:11 PDT\u2014Sun 2019-09-22 13:34:59 PDT\n -2 be854ba422934cf2a9e7952dc052461a Sun 2019-09-22 16:23:43 PDT\u2014Mon 2019-09-30 07:55:08 PDT\n -1 0454f1208e5e49c59fabf95cf4f68346 Mon 2019-09-30 07:55:51 PDT\u2014Fri 2019-10-04 08:54:38 PDT\n  0 f8c09c85ed9f4976987121a345b6f446 Fri 2019-10-04 08:55:22 PDT\u2014Wed 2019-10-09 15:34:01 PDT\n
        "},{"location":"systemd/#show-journal-for-previous-boot","title":"Show journal for previous boot","text":"

        Using the info from the previous --list-boots example, we can view the system log for the previous boot. This gives all system logs from the time the system booted to the time it shut down.

        journalctl -b -1\njournalctl -b 0454f1208e5e49c59fabf95cf4f68346\n
        "},{"location":"systemd/#show-timers","title":"Show timers","text":"

        \"A unit configuration file whose name ends in \".timer\" encodes information about a timer controlled and supervised by systemd, for timer-based activation.\" - man systemd.timer

        systemctl list-timers\n
        "},{"location":"systemd/#show-units","title":"Show units","text":"

        Units are things that are handled by systemd, including services.

        systemctl list-units\n
        "},{"location":"systemd/#show-dependencies","title":"Show dependencies","text":"

        This works on any .target or .service

        systemctl list-dependencies network.service\n
        "},{"location":"systemd/#enable-a-service","title":"Enable a service","text":"

        This behavior replaces chkconfig

        systemctl enable docker.service\n
        "},{"location":"systemd/#check-the-status-of-a-service-and-show-20-lines","title":"Check the status of a service and show 20 lines","text":"
        systemctl -n 20 status nodejs\n
        "},{"location":"systemd/#per-user-services","title":"Per-user services","text":"

        https://wiki.archlinux.org/index.php/Systemd/User

        /usr/lib/systemd/user/ # where services provided by installed packages go.\n/etc/systemd/user/ # where system-wide user services are placed by the system administrator.\n~/.config/systemd/user/ # where the user puts its own services.\n
        "},{"location":"systemd/#alter-power-sleep-hibernate-button-behaviors","title":"Alter power / sleep / hibernate button behaviors","text":"
        /etc/systemd/logind.conf\n
        "},{"location":"systemd/#show-name-resolution-status","title":"Show name resolution status","text":"
        systemd-resolve --status\n
        "},{"location":"systemd/#show-boot-performance","title":"Show boot performance","text":"
        systemd-analyze blame\nsystemd-analyze critical-chain\n
        "},{"location":"systemd/#view-a-time-chart-of-the-boot-sequence","title":"View a time-chart of the boot sequence","text":"
        sudo systemd-analyze plot > systemd.svg\n
        "},{"location":"systemd/#show-cgroup-contexts","title":"Show cgroup contexts","text":"
        systemd-cgls\n
        "},{"location":"systemd/#show-top-control-groups-by-their-resource-usage","title":"Show top control groups by their resource usage","text":"
        systemd-cgtop\n
        "},{"location":"systemd/#detect-if-you-are-running-in-a-container-or-on-a-vm","title":"Detect if you are running in a container or on a VM","text":"
        systemd-detect-virt\n
        "},{"location":"tar/","title":"tar","text":"

        \"GNU 'tar' saves many files together into a single tape or disk archive, and can restore individual files from the archive.\" - man tar on linux

        \"tar - manipulate tape archives\" - man tar on macOS

        "},{"location":"tar/#examples","title":"Examples","text":""},{"location":"tar/#deal-with-leading-slash-quirks","title":"Deal with leading slash quirks","text":"

        Some tar archives have a leading ./ stored for every filename:

        $ curl -fsSL https://github.com/vectordotdev/vector/releases/download/v0.20.1/vector-0.20.1-x86_64-unknown-linux-gnu.tar.gz | tar -tzf -\n./vector-x86_64-unknown-linux-gnu/\n./vector-x86_64-unknown-linux-gnu/README.md\n./vector-x86_64-unknown-linux-gnu/bin/\n./vector-x86_64-unknown-linux-gnu/bin/vector\n

        and some do not

        $ curl -fsSL \"https://get.helm.sh/helm-v3.8.2-linux-amd64.tar.gz\" | tar -tzf -\nlinux-amd64/\nlinux-amd64/helm\nlinux-amd64/LICENSE\nlinux-amd64/README.md\n

        This alters the syntax when you want to extract a single file. You need to give the exact filename seen in tar -t to extract a single file. If you want to never have to deal with that leading ./, you can add --no-anchored

        danielh@cs-462709900404-default:~/temp/2022-04-20$ curl -fsSL https://github.com/vectordotdev/vector/releases/download/v0.20.1/vector-0.20.1-x86_64-unknown-linux-gnu.tar.gz | tar -tzf - vector-x86_64-unknown-linux-gnu/bin/\ntar: vector-x86_64-unknown-linux-gnu/bin: Not found in archive\ntar: Exiting with failure status due to previous errors\ndanielh@cs-462709900404-default:~/temp/2022-04-20$ curl -fsSL https://github.com/vectordotdev/vector/releases/download/v0.20.1/vector-0.20.1-x86_64-unknown-linux-gnu.tar.gz | tar -tzf - --no-anchored vector-x86_64-unknown-linux-gnu/bin/\n./vector-x86_64-unknown-linux-gnu/bin/\n./vector-x86_64-unknown-linux-gnu/bin/vector\n
        "},{"location":"tcl/","title":"TCL","text":"

        \"TCL Corporation is a Chinese multinational electronics company headquartered in Huizhou, Guangdong Province.\" - https://en.wikipedia.org/wiki/Tcl

        "},{"location":"tcl/#tcl-65c807","title":"TCL 65C807","text":"
        • https://www.tclusa.com/products/home-theater/c-series/tcl-65-class-c-series-4k-uhd-hdr-roku-smart-tv-65c807
        Attribute Value Product Size (WxHxD) with Stand 57.2\" x 36.0\" x 12.8\" Product Size (WxHxD) without Stand 57.2\" x 34.4\" x 2.3\" Stand Separation Distance 55.6\" Product Weight with Stand (lbs) 53 lbs Product Weight without stand (lbs) 52 lbs"},{"location":"tcpdump/","title":"tcpdump","text":"

        Network sniffing tool.

        "},{"location":"tcpdump/#syntax-examples","title":"Syntax Examples","text":""},{"location":"tcpdump/#capture-packets-to-and-from-an-ip-address","title":"Capture packets to and from an IP address","text":"

        Captures all data that includes 1.2.3.4 as the source or destination address, but no other traffic.

        tcpdump host 1.2.3.4\n
        "},{"location":"tcpdump/#capture-traffic-that-contains-a-given-mac-address","title":"Capture traffic that contains a given mac address","text":"

        writes capfile.cap containing all traffic to or from the specified mac address on the network attached to eth1

        tcpdump -w capfile.cap -i eth1 ether host 00:03:fa:46:2c:08\n
        "},{"location":"tcpdump/#filter-packets-from-an-existing-capture","title":"Filter packets from an existing capture","text":"

        Filters port 53 packets out of the old capfile into the new

        tcpdump -r oldcapfile.cap -w newcapfile.cap port 53\n
        "},{"location":"tcpdump/#capture-all-pop3-traffic-and-all-traffic-from-a-particular-host","title":"Capture all pop3 traffic and all traffic from a particular host","text":"

        Captures all pop3 traffic and all traffic to or from the specified host on the first interface of a Mac OS X computer

        tcpdump -w foo.cap -i en0 ether host 00:03:9a:28:44:01 or port 110\n
        "},{"location":"tcpdump/#capture-all-traffic-not-a-mac-address","title":"Capture all traffic not a mac address","text":"

        Captures all traffic not from the host 00:1b:63:ce:83:2e, useful for filtering out your own traffic.

        tcpdump -i en1 not ether src 00:1b:63:ce:83:2e\n
        "},{"location":"tcpdump/#capture-lldp-traffic","title":"Capture LLDP traffic","text":"

        This matches 2 bytes starting at the 12th byte against 88cc

        tcpdump -v -s 1500 -c 1  '(ether[12:2]=0x88cc)'\n
        "},{"location":"tcpdump/#capture-syn-packets","title":"Capture SYN packets","text":"
        tcpdump -n 'tcp[13] & 2!=0'\n
        "},{"location":"tcpdump/#capture-synack-packets","title":"Capture SYN/ACK packets","text":"
        tcpdump -n 'tcp[13]=18'\n

        Or another way

        tcpdump 'tcp[tcpflags] && tcp-syn != 0'\n

        Or capture all SYN packets going only to two ethernet destinations:

        tcpdump 'tcp[13] & 2!=0 && (ether dst 00:22:64:f4:d0:70 or ether dst 00:22:64:f4:d0:6e)'\n
        "},{"location":"tcpdump/#write-capture-to-file-and-replay-it-at-the-same-time","title":"Write capture to file and replay it at the same time","text":"
        sudo tcpdump -n 'host 216.200.102.84' -s 1500 -l -w - | tee logcopy.pcap | tcpdump -r -\n
        "},{"location":"tcpdump/#write-a-circular-buffer-of-traffic","title":"Write a circular buffer of traffic","text":"

        This will write 5 files 1 mb each and loop through them as the destination for writing traffic. That is, the filenames do not indicate chronology. The files will be named foo.cap[0-4]

        sudo tcpdump -C 1 -W 5 -w foo.cap\n
        "},{"location":"tcpdump/#show-how-many-bytes-were-captured-in-a-cap-file","title":"Show how many bytes were captured in a cap file","text":"

        This prints out some stats about captured packets, then adds up all the bytes. The size is from layer 3 up, so it excludes ethernet frame data.

        tcpdump -q -n -r hillary-clintons-email.cap | awk '{sum+=$NF} END {print sum}'\n
        "},{"location":"tcpdump/#print-out-a-list-of-observed-src-ip-addresses-every-5-seconds","title":"Print out a list of observed src ip addresses every 5 seconds","text":"

        This is limited to 192.168.1 matches

        while true ; do\n  date '+%F %T%z'\n  sudo timeout 5 tcpdump -n 2>/dev/null |\n  awk '$3 ~ /10.8/ {\n    print gensub(/([0-9]*\\.[0-9]*\\.[0-9]*\\.[0-9]*)(\\.[0-9]*)?/, \"ip address: \\\\1\", \"g\", $3) ;\n  }' |\n  sort -t. -k4n |\n  uniq -c\ndone\n

        You can reassemble these files chronologically with mergecap -w merged.cap foo.cap*

        "},{"location":"tcpdump/#show-wpa-4-way-handshakes","title":"Show WPA 4-way handshakes","text":"
        tcpdump -n -i en0 \"ether proto 0x888e\"\n
        "},{"location":"tcpdump/#links","title":"Links","text":"
        • http://www.danielmiessler.com/study/tcpdump/
        • https://github.com/mozillazg/ptcpdump: \"Process-aware, eBPF-based tcpdump\" that can sniff k8s namespaces, pods, containers, etc..
        "},{"location":"tcpflow/","title":"tcpflow","text":"

        tcpflow uses the tcpdump libraries to reconstruct full TCP streams. It uses much of tcpdump syntax.

        "},{"location":"tcpflow/#examples","title":"Examples","text":""},{"location":"tcpflow/#sniff-http-into-the-console","title":"Sniff HTTP into the console","text":"
        sudo tcpflow -c port 80\n
        "},{"location":"tcpflow/#see-also","title":"See Also","text":"
        • ngrep
        • tcpdump
        "},{"location":"terminal-emulator/","title":"Terminal Emulator","text":""},{"location":"terminal-emulator/#comparison","title":"Comparison","text":"

        Tracking things I care about in a some terminal emulators I use.

        iTerm2 kitty alacritty cool-retro-term Select a rectangle cmd-opt N ctrl-opt N Tabs cmd-t N N N Panes cmd-d / cmd-shift-d N N N Broadcast input opt-cmd-i N N N Speed medium fast fast slow Easily export prefs Decent Y Y N"},{"location":"terminal-emulator/#links","title":"Links","text":"
        • https://en.wikipedia.org/wiki/List_of_terminal_emulators
        • https://sw.kovidgoyal.net/kitty/performance: Performance comparison of several popular terminal emulators comopared to kitty
        • https://jvns.ca/blog/2024/10/01/terminal-colours: \"Terminal colours are tricky\"
        "},{"location":"terraform/","title":"Terraform","text":"

        \"Write, Plan, and Create Infrastructure as Code\" - https://www.terraform.io/

        "},{"location":"terraform/#links","title":"Links","text":"
        • https://www.terraform.io/docs/providers/github/index.html
        • https://www.terraform.io/docs/providers/gitlab/index.html
        • https://github.com/28mm/blast-radius - Blast Radius is a tool for reasoning about Terraform dependency graphs with interactive visualizations.
        • https://terragrunt.gruntwork.io - \"Terragrunt is a thin wrapper that provides extra tools for keeping your configurations DRY, working with multiple Terraform modules, and managing remote state.\"
        "},{"location":"terraform/#examples","title":"Examples","text":""},{"location":"terraform/#generate-a-graph-of-module-dependencies","title":"Generate a graph of module dependencies","text":"
        terraform init # must succeed\nterraform graph | dot -Tsvg > graph.svg\n
        "},{"location":"time/","title":"time","text":"

        Notes about time technologies.

        "},{"location":"time/#iso-8601","title":"ISO 8601","text":"

        ISO 8601 Data elements and interchange formats - Information interchange - Representation of dates and times is an international standard covering the exchange of date and time-related data.

        "},{"location":"time/#iso-8601-format-examples","title":"ISO 8601 format examples","text":"

        See the ISO 8601 wikipedia page for many examples. Much of the content in this section was taken from that article.

        There are a ton of examples on this page: https://ijmacd.github.io/rfc3339-iso8601

        One notable syntax is that the letter T should always precede times. This aids in parsing, and distinguishes between month and minute, which are both shortened to M.

        Another notable syntax is the use of Z to mean a timezone offset of 0 hours, or GMT.

        "},{"location":"time/#single-points-in-time","title":"Single points in time","text":"
        $ for fmt in date hours minutes seconds ns  ; do\n    bash -x -c \"\n      TZ=$(\n        awk '$1 !~ /^#/ {print $3}' /usr/share/zoneinfo/zone.tab |\n        sort -R |\n        head -n 1\n      ) \\\n      date --iso-8601=${fmt}\n    \" ;\n  done ;\n+ TZ=America/Paramaribo\n+ date --iso-8601=date\n2016-08-09\n+ TZ=Africa/Dakar\n+ date --iso-8601=hours\n2016-08-09T21+00:00\n+ TZ=Indian/Kerguelen\n+ date --iso-8601=minutes\n2016-08-10T02:58+05:00\n+ TZ=Pacific/Saipan\n+ date -Iseconds\n2016-08-10T07:58:48+10:00\n+ TZ=Pacific/Midway\n+ date --iso-8601=ns\n2016-08-09T10:58:48,503878101-11:00\n
        • Week: 2016-W32
        • Date with week number: 2016-W32-2
        • Month and day without year: -12-31
        "},{"location":"time/#durations-or-ranges-of-time","title":"Durations, or ranges of time","text":"

        Durations are a component of time intervals and define the amount of intervening time in a time interval.

        "},{"location":"time/#examples","title":"Examples","text":"
        • P10Y - a duration of ten years.
        • P5DT12H - a duration of five days and twelve hours.
        • P3Y6M4DT12H30M5S - a duration of three years, six months, four days, twelve hours, thirty minutes, and five seconds.
        • P1M - one month.
        • PT1M - one minute.
        "},{"location":"time/#time-intervals","title":"Time intervals","text":"

        A time interval is the intervening time between two time points. There are four ways to express a time interval:

        • Start and end, such as 2007-03-01T13:00:00Z/2008-05-11T15:30:00Z
        • Start and duration, such as 2007-03-01T13:00:00Z/P1Y2M10DT2H30M
        • Duration and end, such as P1Y2M10DT2H30M/2008-05-11T15:30:00Z
        • Duration only, such as P1Y2M10DT2H30M, with additional context information
        "},{"location":"time/#repeating-intervals","title":"Repeating intervals","text":"

        Repeating intervals are formed by adding R[n]/ to the beginning of an interval expression. Such as R5/2007-03-01T13:00:00Z/2008-05-11T15:30:00Z. The n can be omitted if the interval should repeat forever.

        "},{"location":"time/#rfc-3339","title":"RFC 3339","text":"

        RFC 3339 is considered a profile of ISO 8601. It defines a profile of ISO 8601 for use in Internet protocols and standards. It explicitly excludes durations and dates before the common era. The more complex formats such as week numbers and ordinal days are not permitted.

        • https://tools.ietf.org/html/rfc3339
        "},{"location":"time/#leap-seconds","title":"Leap Seconds","text":"

        \"A leap second is a one-second adjustment that is occasionally applied to Coordinated Universal Time (UTC) in order to keep its time of day close to the mean solar time, or UT1.\" - https://en.wikipedia.org/wiki/Leap_second

        Leap seconds are scheduled by the international earth rotation and reference systems service (See also: https://en.wikipedia.org/wiki/International_Earth_Rotation_and_Reference_Systems_Service) Leap seconds cause a variety of problems in computer systems, and complicate time tracking in general.

        "},{"location":"time/#public-time-server-handling-of-leap-seconds","title":"Public time server handling of leap seconds","text":"
        • https://developers.google.com/time/: Google time servers do leap second smearing
        • https://aws.amazon.com/about-aws/whats-new/2017/11/introducing-the-amazon-time-sync-service: AWS time servers do leap second smearing
        • https://docs.ntpsec.org/latest/leapsmear.html: ntp.org servers do not leap smear: \"Leap Second Smearing MUST NOT be used for public servers, e.g. servers provided by metrology institutes, or servers participating in the NTP pool project.\"
        "},{"location":"time/#leap-second-links","title":"Leap Second Links","text":"
        • https://www.nature.com/articles/d41586-022-03783-5: \"The leap second\u2019s time is up: world votes to stop pausing clocks\"
        • https://datacenter.iers.org/data/latestVersion/bulletinC.txt: When is the next leap second?
        • https://access.redhat.com/articles/15145: Resolve Leap Second Issues in Red Hat Enterprise Linux 4-8
        • https://developers.google.com/time/smear: Google Public NTP: Leap Smear
        • https://developers.redhat.com/blog/2015/06/01/five-different-ways-handle-leap-seconds-ntp/: Five different ways to handle leap seconds with NTP
        • http://www.madore.org/~david/computers/unix-leap-seconds.html: The Unix leap second mess
        • http://www.ntp.org/ntpfaq/NTP-s-algo-real.htm#AEN2499: ntp.org FAQ: What happens during a Leap Second?
        "},{"location":"time/#code-snips-and-examples","title":"Code snips and examples","text":""},{"location":"time/#quick-and-dirty-time-sync-in-linux-for-when-ntp-is-blocked","title":"Quick and dirty time sync in Linux for when NTP is blocked.","text":"
        date -s $(curl -s -D - google.com | sed '/Date:/s/.*Date: //p ; d')\n
        "},{"location":"time/#links","title":"Links","text":""},{"location":"time/#reading","title":"Reading","text":"
        • https://en.wikipedia.org/wiki/ISO_8601: \"ISO 8601 is an international standard covering the worldwide exchange and communication of date and time related data.\"
        • https://tools.ietf.org/html/rfc3339: Date and Time on the Internet: Timestamps - RFC 3339
        • https://www.gnu.org/software/coreutils/manual/html_node/Examples-of-date.html: Examples of date (GNU)
        • http://man7.org/linux/man-pages/man1/date.1.html: man date (linux)
        • http://www.freebsd.org/cgi/man.cgi?date: man date (freebsd)
        • http://infiniteundo.com/post/25326999628/falsehoods-programmers-believe-about-time: Falsehoods programmers believe about time
        • http://infiniteundo.com/post/25509354022/more-falsehoods-programmers-believe-about-time: More falsehoods programmers believe about time; \"wisdom of the crowd\" edition
        • https://www.iers.org: International Earth Rotation and Reference Systems Service
        • https://ijmacd.github.io/rfc3339-iso8601/: RFC 3339 vs ISO 8601 format visualizer
        • https://blog.healthchecks.io/2021/10/how-debian-cron-handles-dst-transitions/
        • https://everytimezone.com: Tool for coordinating across many time zones. Great for coordinating shift coverage across geographically distributed teams.
        • http://timesched.pocoo.org: A simpler tool for coordinating across many time zones.
        "},{"location":"time/#videos","title":"Videos","text":"
        • https://youtu.be/-5wpm-gesOY: The Problem with Time & Timezones - Computerphile
        "},{"location":"time/#see-also","title":"See Also","text":"
        • ntp - Network Time Protocol
        • ptp - Precision Time Protocol
        "},{"location":"tls/","title":"tls","text":"

        TLS is Transport Layer Security. It used to be called SSL: the Secure Sockets Layer. It has to do with encrypted IP traffic.

        "},{"location":"tls/#apache-ssl-steps","title":"Apache SSL steps","text":"
        1. Generate a host key: openssl genrsa -out foo.com.key 2048
        2. Generate a CSR from that key: openssl req -new -key foo.com.key -out foo.com.csr

        To set up VirtualHosts, follow this template: https://cwiki.apache.org/confluence/display/HTTPD/NameBasedSSLVHosts

        "},{"location":"tls/#examples","title":"Examples","text":""},{"location":"tls/#download-a-certificate-from-an-https-server","title":"Download a certificate from an https server","text":"
        get_certificate_from_server() {\n  hostname=\"$1\"\n  port=\"${2:-443}\"\n  ip_address=\"$(dig +short \"$hostname\")\"\n  echo |\n    openssl s_client -servername \"$hostname\" -connect \"${ip_address}:${port}\" 2>/dev/null |\n    sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p'\n}\n\nget_certificate_from_server github.com\n
        "},{"location":"tls/#show-info-about-a-certificate-file","title":"Show info about a certificate file","text":"
        openssl x509 -noout -text -in foo.pem\n
        "},{"location":"tls/#validate-a-keys-cert-pair","title":"Validate a keys / cert pair","text":"

        To validate that a particular key was used to generate a certificate, useful for testing https key/crt files, do the following and make sure the modulus sections match:

        openssl rsa  -noout -modulus -in server.key\nopenssl x509 -noout -modulus -in server.crt # or server.pem\n

        Or as a function:

        function crt-key-compare {\n  if [ ! -f \"$1\" ] || [ ! -f \"$2\" ] ; then\n    echo \"ERROR: check that both files exist.\"\n    return 1\n  fi\n\n  if [[ \"$1\" != *crt* ]] || [[ \"$2\" != *key* ]] ; then\n    echo \"usage: crt-key-compare <server.crt> <server.key>\" ;\n  else\n    crt_modulus=$(openssl x509 -in \"$1\" -modulus | grep Modulus) || return 1\n    key_modulus=$(openssl rsa  -in \"$2\" -modulus | grep Modulus) || return 1\n\n    if diff <(echo \"$crt_modulus\") <(echo \"$key_modulus\") ; then\n      echo \"key and crt match.\"\n    else\n      echo \"key and crt do not match\"\n    fi\n  fi\n}\n
        "},{"location":"tls/#see-some-information-about-a-servers-certificate","title":"See some information about a server's certificate","text":"
        SERVER_NAME=linuxforums.org\nSERVER_PORT=443\nopenssl s_client -connect \"${SERVER_NAME}:${SERVER_PORT:-443}\" -servername \"${SERVER_NAME}\n
        "},{"location":"tls/#see-just-the-dates-of-a-webservers-ssl-certificate","title":"See just the dates of a webserver's SSL certificate","text":"
        check-server-cert-dates() {\n    test -z \"$1\" && { echo \"Usage: check-server-cert-dates <servername> [port]\" ; return 1 ; }\n    openssl s_client -connect \"${1}:${2:-443}\" -servername \"$1\" 2>/dev/null </dev/null |\n    openssl x509 -noout -dates\n}\n
        "},{"location":"tls/#show-the-issuer-and-dates-of-a-certificate","title":"Show the issuer and dates of a certificate","text":"

        This is useful when you're moving certs between issuers, for instance if you moved from letsencrypt to something else, then later get an expiration notice from letsencrypt, and want to verify that you're not using that certificate anymore:

        openssl s_client -connect \"${REMOTE_HOST}:443\" -servername \"$REMOTE_HOST\" 2>/dev/null </dev/null |\nopenssl x509 -noout -issuer -dates\n

        The output will be something like:

        issuer= /C=US/O=DigiCert Inc/CN=DigiCert TLS Hybrid ECC SHA384 2020 CA1\nnotBefore=Feb 18 00:00:00 2022 GMT\nnotAfter=Oct  5 23:59:59 2022 GMT\n
        "},{"location":"tls/#encrypt-a-file","title":"Encrypt a file","text":"
        openssl enc -aes-256-cbc -salt -in yourfile -out yourfile.enc\n
        "},{"location":"tls/#decrypt-a-file","title":"Decrypt a file","text":"
        openssl enc -aes-256-cbc -d -in encryptedfile.enc -out decryptedfile\n
        "},{"location":"tls/#encrypt-decrypt-bash-functions","title":"Encrypt / Decrypt bash functions","text":"
        function encrypt_file() { openssl enc -aes-256-cbc -salt -in \"$1\" -out \"$1.enc\" ; }\nfunction decrypt_file() { openssl enc -aes-256-cbc -d -in \"$1\" -out \"$1.dec\" ; }\n
        "},{"location":"tls/#perform-a-benchmark","title":"Perform a benchmark","text":"

        You can run benchmarks on one or more ciphers or digests using openssl speed.

        openssl speed -seconds 5 -evp sha256 sha512\n
        "},{"location":"tls/#generate-random-data","title":"Generate random data","text":"

        openssl can generate pseudo random data faster than /dev/urandom. This generates a 1 megabyte random data file at over 3x the speed of using /dev/urandom.

        openssl enc -aes-256-ctr -pbkdf2 -pass pass:\"foo\" < /dev/zero | dd bs=1024 count=1024 of=1_megabyte_random.dat\n
        "},{"location":"tls/#see-also","title":"See Also","text":"
        • https://datatracker.ietf.org/doc/html/rfc5246 - The Transport Layer Security (TLS) Protocol Version 1.2
        • https://github.com/cloudflare/cfssl - CFSSL: Cloudflare's PKI and TLS toolkit https://cfssl.org
        • https://tls12.xargs.org - Every byte of a TLS connection explained and reproduced
        • https://github.com/FiloSottile/mkcert - simple local dev Certificate Authority
        • https://www.tutorialsteacher.com/https/ssl-certificate-format - Good overview of certificate formats, including diagrams
        • https://shkspr.mobi/blog/2022/01/should-you-use-lets-encrypt-for-internal-hostnames
        • https://badssl.com: Hosting a variety of ssl failure modes, this site is a great resource when writing code that needs to handle all of these different scenarios.
        "},{"location":"tmux/","title":"tmux","text":"

        \"tmux is a terminal multiplexer: it enables a number of terminals to be created, accessed, and controlled from a single screen.\" - man tmux

        You can think of it as a replacement for GNU Screen that is more modern.

        "},{"location":"tmux/#examples","title":"Examples","text":""},{"location":"tmux/#attach-to-a-new-session-or-open-a-new-session-if-there-is-none","title":"Attach to a new session, or open a new session if there is none","text":"

        Use this shell alias:

        alias t=\"tmux attach || tmux new-session\"\n
        "},{"location":"tmux/#open-several-named-tabs","title":"Open several named tabs","text":"

        This example opens named tabs for several hosts and connects to them via ssh. I use a shell script called ssh-stay-connected which attempts to reconnect to any lost session, otherwise the tab will be closed when the ssh command terminates:

        for node in n1 n2 n3 ; do tmux new-window -n \"$node\" ssh-stay-connected \"$node\" ; done ;\n
        "},{"location":"tmux/#links","title":"Links","text":"
        • https://github.com/tmux/tmux/wiki
        • https://github.com/rothgar/awesome-tmux
        "},{"location":"top-variant-list/","title":"Top Variant List","text":"

        The top interface is a common pattern in the CLI tool world. Here are some top style tools.

        "},{"location":"top-variant-list/#top-style-tools","title":"top style tools","text":"
        • atop: Linux top tool that catches short-lived processes. Written in C.
        • bottom: \"Yet another cross-platform graphical process/system monitor.\" Written in rust.
        • btop: \"Resource monitor that shows usage and stats for processor, memory, disks, network and processes.\" Written in C++.
        • glances: \"Glances is an open-source system cross-platform monitoring tool. It allows real-time monitoring of various aspects of your system such as CPU, memory, disk, network usage etc. It also allows monitoring of running processes, logged in users, temperatures, voltages, fan speeds etc.\" Written in python.
        • htop: \"htop is a cross-platform interactive process viewer.\" Written in in C.
        • iftop: Top for network interfaces. Hasn't been updated since 2014 (as of 2024-02-05)
        • innotop: \"A realtime terminal-based top-like monitor for MySQL\" Written in perl.
        • iotop: \"A top utility for IO\" Written in C.
        • nethogs: Network top that shows usage by pid.
        • ngxtop: top like view of nginx logs.
        • ntop: Top for networking.
        • nvtop: \"Nvtop stands for Neat Videocard TOP, a (h)top like task monitor for AMD, Intel and NVIDIA GPUs.\"
        • powertop: Top for power usage.
        • top: The original.
        "},{"location":"top/","title":"top","text":"

        top is a standard unix CLI tool to show running processes. This is one of those tools that behaves differently on BSD (macOS) and Linux, and may behave different on other unixes.

        "},{"location":"top/#gnu-top-usage","title":"GNU top Usage","text":"
        • Change the number of displayed processes with n
        • Display all CPUs with 1
        • Kill a process with k
        • Renice a process with r
        • Save current display as default in ~/.toprc with W
        • Show or hide idle processes with i
        • Sort output with O
        "},{"location":"top/#bsd-top-usage","title":"BSD top Usage","text":""},{"location":"top/#start-top-sorted-by-cpu","title":"Start top sorted by cpu","text":"
        top -u\n
        "},{"location":"top/#see-also","title":"See also","text":"
        • Top variant list
        "},{"location":"touch/","title":"touch","text":"

        touch is a command to modify the date of filesystem metadata for a given file. By default it updates the create date of the given files.

        "},{"location":"touch/#examples","title":"Examples","text":""},{"location":"touch/#create-an-empty-file","title":"Create an empty file","text":"
        touch somefile\n
        "},{"location":"touch/#update-a-files-modify-time-to-a-specific-date","title":"Update a file's modify time to a specific date","text":"
        • -m to set modify time
        • -t to specify a timestamp in the format [[CC]YY]MMDDhhmm[.SS]
        touch -m -t 199902030405.06 1999-02-03-04-05-06.gpx\n
        "},{"location":"touch/#update-access-time-on-several-files","title":"Update access time on several files","text":"

        Not all filesystems support access time.

        touch -a -t 199509150102 GARBAGE/FILE\\ [1234]\n
        "},{"location":"touch/#randomize-the-mtime-for-a-given-file","title":"Randomize the mtime for a given file","text":"

        bash's random only goes up to 32767, which is about 9 hours in 1-second increments. With RANDOM * 32767 + RANDOM we can get this up to just over 34 years.

        randomize-mtime() {\n  seconds=\"$(( $(date +%s) - $(( RANDOM * 32767 )) - RANDOM))\"\n  new_mtime=\"$(gdate -d @\"${seconds}\" \"+%Y%m%d%H%M.%S\")\"\n  echo \"${new_mtime} $*\" 1>&2\n  touch -m -t \"${new_mtime}\" \"$@\"\n}\n\n## change mtime of all files to the same random mtime\nrandomize-mtime test-foo{1..3} ;\n\n## change mtime of each file to a different random mtime\nfor F in test-bar{1..3} ; do\n  randomize-mtime \"$F\"\ndone\n
        "},{"location":"tr/","title":"tr","text":"

        \"The tr utility copies the standard input to the standard output with substitution or deletion of selected characters.\" - man tr

        "},{"location":"tr/#examples","title":"Examples","text":"

        Interestingly, tr does not have any features to operate on files. It operates only on stdin. To use it on files you must use input redirection like tr .... < filename.txt or pipes like cat filename.txt | tr ...

        "},{"location":"tr/#replace-all-non-letters-with-a-carriage-return","title":"Replace all non-letters with a carriage return","text":"

        -s shrinks adjacent matches into a single match.

        $ echo abcdefghijklmnopqrstuvwxyz | tr g-t '_'\nabcdef______________uvwxyz\n$ echo abcdefghijklmnopqrstuvwxyz | tr -c g-t '_'\n______ghijklmnopqrst_______\n$ echo abcdefghijklmnopqrstuvwxyz | tr -s g-t '_'\nabcdef_uvwxyz\n$ echo abcdefghijklmnopqrstuvwxyz | tr -cs g-t '_'\n_ghijklmnopqrst_$\n

        In Doug McIlroy's critique of Donald Knuth's unique word count finder, tr was used twice. Here's a somewhat modified version:

        $ man tr | tr -cs A-Za-z '\\n' | tr A-Z a-z | sort | uniq -c | sort -rn | head\n  96 the\n  45 in\n  44 characters\n  38 string\n  30 of\n  29 a\n  25 to\n  23 tr\n  22 character\n  21 is\n
        "},{"location":"tr/#see-also","title":"See also","text":"
        • tr is often used with [cut](cut.md), though I prefer [awk](awk.md) most of the time.
        "},{"location":"ubuntu/","title":"Ubuntu Linux","text":"

        \"Ubuntu is an open source software operating system that runs from the desktop, to the cloud, to all your internet connected things.\" - https://www.ubuntu.com

        I used Ubuntu for a long time but finally switched back to Debian because of snapd, so this doc is mostly historical.

        "},{"location":"ubuntu/#versions-overview","title":"Versions overview","text":"Codename Version EOS Kernel Python bash zfs Jammy 22.04 LTS 2027-04 5.15 3.10 5.1 2.1.5 Impish 21.10 2022-07 5.13 3.9.7 5.1.8 2.0.6 Focal 20.04 LTS 2025-04 5.4 3.8 5.0.17 0.8.3 Eoan 19.10 2010-07 5.3 2.7.17, 3.7 5.0.3 0.8.1 Disco 19.04 2020-01 5.0 2.7.16, 3.7 5.0.3 0.7.12 Bionic 18.04 LTS 2023-04 4.15 2.7.15, 3.6 4.4.20 0.7.5 Xenial 16.04 LTS 2021-04 4.4 2.7.12, 3.5 4.3.46 0.6.5 Trusty 14.04 LTS 2019-04 3.13 2.7.6 4.3.11"},{"location":"ubuntu/#links","title":"Links","text":"
        • Ubuntu Kernel Support and Schedules
        • List of releases
        • Table of Versions
        • https://endoflife.date/ubuntu
        "},{"location":"udev/","title":"udev","text":"

        \"udev supplies the system software with device events, manages permissions of device nodes and may create additional symlinks in the /dev directory, or renames network interfaces.\" - man udev

        "},{"location":"udev/#tips","title":"Tips","text":"
        • Default rules are in /lib/udev/rules.d
        "},{"location":"udev/#monitor-udev-events-in-real-time","title":"Monitor udev events in real time","text":"
        udevadm monitor\n
        "},{"location":"udev/#view-udev-environment-for-a-given-disk","title":"View udev environment for a given disk","text":"

        This is helpful when writing udev rules.

        udevadm info /dev/sda\n
        "},{"location":"udev/#view-human-readable-information-about-a-disk","title":"View human readable information about a disk","text":"
        udisksctl info -b /dev/sda\n
        "},{"location":"udev/#mount-namespace-problems","title":"Mount namespace problems","text":"

        In Ubuntu and probably other linuxes udevd runs in its own mount namespace. This means if you mount things using udev rules, by default they will be in an isolated namespace where users and other processes cannot access them. You can view this with:

        root@bionic:~# lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1\n\nroot@bionic:~# nsenter --all -t $(pgrep systemd-udevd) lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1 /mnt/adea64ca-e340-4961-8a4d-75d8a5970664\n

        To solve this, run systemctl edit systemd-udevd and input the following config:

        [Service]\nMountFlags=shared\n

        Then run systemctl daemon-reload ; service systemd-udevd --full-restart ; See man 7 mount_namespaces for more info.

        "},{"location":"udev/#trigger-a-disk-add-event-for-a-disk-that-has-already-been-inserted","title":"Trigger a disk add event for a disk that has already been inserted","text":"
        udevadm trigger -t devices --name /dev/sda1 --action=add\n
        "},{"location":"udev/#list-builtin-modules","title":"List builtin modules","text":"
        udevadm test-builtin --help\n
        "},{"location":"udev/#links","title":"Links","text":"
        • http://www.reactivated.net/writing_udev_rules.html
        "},{"location":"upstart/","title":"Upstart","text":"

        \"Upstart is an event-based replacement for the /sbin/init daemon which handles starting of tasks and services during boot, stopping them during shutdown and supervising them while the system is running.\" - http://upstart.ubuntu.com

        "},{"location":"upstart/#examples","title":"Examples","text":""},{"location":"upstart/#start-multiple-instances-of-the-same-services","title":"Start multiple instances of the same services","text":"

        http://upstart.ubuntu.com/cookbook/#instance

        "},{"location":"upstart/#master","title":"Master","text":"
        start on runlevel [2345]\nstop on runlevel [^2345]\nrespawn\n\nenv job_count=6\n\npre-start script\n  for i in $(seq -w 1 ${job_count}) ; do\n    start photoworker N=${i}\n  done\nend script\n\npost-stop script\n  for i in $(seq -w 1 ${job_count}) ; do\n    stop photoworker N=${i}\n  done\nend script\n\n
        "},{"location":"upstart/#child","title":"Child","text":"
        respawn\nrespawn limit 10 5\ninstance $N\nenv logfile=\"/var/log/worker_photoworker.log\"\nchdir /srv/photoworkers/current/web/services/jobworkers\n\npre-start exec bash -c \"echo $(date --rfc-3339=seconds) beginning worker run >> ${logfile}\"\nexec su -s /bin/sh -c 'exec \"$0\" \"$@\"' php-worker -- php photoworker.php >> ${logfile} 2>&1\npost-stop exec bash -c \"echo $(date --rfc-3339=seconds) ended worker run >> ${logfile}\"\n
        "},{"location":"upstart/#redirect-all-output-of-an-upstart-script-to-syslog","title":"Redirect all output of an upstart script to syslog","text":"

        Found at http://serverfault.com/questions/114052/logging-a-daemons-output-with-upstart

        script\n  FIFO=fifo.temp\n  mkfifo $FIFO\n\n  ( logger -t myservice <$FIFO & )\n\n  exec > $FIFO\n  rm $FIFO\n\n  exec /usr/local/bin/myservice 2>&1\nend script\n
        "},{"location":"upstart/#links","title":"Links","text":"
        • http://upstart.ubuntu.com/cookbook
        "},{"location":"utm/","title":"utm","text":"

        \"UTM employs Apple's Hypervisor virtualization framework to run ARM64 operating systems on Apple Silicon at near native speeds. On Intel Macs, x86/x64 operating system can be virtualized. In addition, lower performance emulation is available to run x86/x64 on Apple Silicon as well as ARM64 on Intel.\" - https://mac.getutm.app

        "},{"location":"vagrant/","title":"vagrant","text":"

        \"Vagrant is a tool for building and managing virtual machine environments in a single workflow.\" - https://www.vagrantup.com/intro/index.html

        "},{"location":"vagrant/#links","title":"Links","text":"
        • https://www.vagrantup.com
        • https://docs.vagrantup.com/v2
        • https://atlas.hashicorp.com
        • Examples: https://github.com/patrickdlee/vagrant-examples
        "},{"location":"vagrant/#os-x-shell-tweaks","title":"OS X Shell tweaks","text":"
        brew tap homebrew/completions\nbrew install vagrant-completion\n

        Then in .bash_profile:

        if [ -f $(brew --prefix)/etc/bash_completion ]; then\n  . $(brew --prefix)/etc/bash_completion\nfi\n
        "},{"location":"vagrant/#plugins","title":"Plugins","text":"
        vagrant plugin install vagrant-vbguest\nvagrant plugin install vagrant-hosts\n
        "},{"location":"vagrant/#usage-examples","title":"Usage Examples","text":""},{"location":"vagrant/#list-which-boxes-you-have-stored-locally","title":"List which boxes you have stored locally","text":"
        vagrant box List\n
        "},{"location":"vagrant/#remove-an-old-version-of-a-vagrant-box","title":"Remove an old version of a vagrant box","text":"
        vagrant box remove ubuntu/trusty64 --box-version 20151201.0.0\n
        "},{"location":"vagrant/#script-box-updates","title":"Script box updates","text":"

        This may fail in some circumstances, I haven't tested it exhaustively.

        vagrant box outdated --machine-readable --global |\n  awk -F, '$4 == \"warn\" {print $5; exit 1}' |\n  awk -F\"'\" '{print $2}' |\n  xargs -n1 vagrant box update --box\n
        "},{"location":"vagrant/#show-status-of-all-running-vagrant-boxes-not-just-the-one-in-the-cwd","title":"Show status of all running Vagrant boxes, not just the one in the CWD","text":"
        vagrant global-status\n
        "},{"location":"vector/","title":"vector","text":"

        \"A lightweight, ultra-fast tool for building observability pipelines\" - https://vector.dev

        You can think of vector as being an replacement for fluentd or fluentbit. It is great for reading inputs, transforming those inputs, and sending those inputs elsewhere. EG: for reading logs and shipping them.

        "},{"location":"vector/#links","title":"Links","text":"
        • https://github.com/vectordotdev/vector
        • https://vector.dev/docs/reference
        "},{"location":"vector/#examples","title":"Examples","text":""},{"location":"vector/#show-the-supported-sources-transforms-sinks","title":"Show the supported sources, transforms, sinks","text":"

        I'm not going to paste them here because the list is long and likely would be different depending on your version, but you can view them via:

        vector list\n

        The list as of vector 0.22.0 includes things from aws, gcp, splunk, prometheus, kafka, influxdb, elasticsearch, azure, and more.

        "},{"location":"vector/#spawn-a-process-and-handle-its-stdout-and-stderr","title":"Spawn a process and handle its stdout and stderr","text":"

        One problem with reading stdout and stderr in linux is that those are two different file handles, so you have to handle them as such. Having a tool to aggregate them back into a single stream with annotations about what stream they were taken from is great. This example shows how to use vector to spawn a subprocess, remove some fields, and print to stdout:

        #!/bin/bash\n# Filename: /tmp/stream-test.sh\n\nfor _ in {1..5} ; do\n  echo \"This is stdout\"\n  echo \"This is stderr\" >&2\n  sleep 0.$(( RANDOM ))\ndone\n

        The default config file format is toml, but the below example uses yaml because it is my preference. You can convert between them with dasel.

        # Filename: vector.yaml\n---\n# https://vector.dev/docs/reference/configuration/sources/exec\nsources:\n  exec:\n    command:\n      - /tmp/stream-test.sh\n    decoding:\n      codec: bytes\n    mode: streaming\n    streaming:\n      respawn_on_exit: False\n    type: exec\n\n# https://vector.dev/docs/reference/configuration/transforms\ntransforms:\n  remove_exec_fields:\n    inputs:\n      - exec\n    # https://vector.dev/docs/reference/vrl/\n    source: |-\n      del(.command)\n      del(.host)\n      del(.source_type)\n    type: remap\n\n# https://vector.dev/docs/reference/configuration/sinks/console\nsinks:\n  print:\n    encoding:\n      codec: json\n    inputs:\n      - remove_exec_fields\n    type: console\n
        $ vector --config vector.yaml\n2022-06-01T21:29:35.914895Z  INFO vector::app: Log level is enabled. level=\"vector=info,codec=info,vrl=info,file_source=info,tower_limit=trace,rdkafka=info,buffers=info,kube=info\"\n2022-06-01T21:29:35.915019Z  INFO vector::app: Loading configs. paths=[\"vector.yaml\"]\n2022-06-01T21:29:35.916968Z  INFO vector::topology::running: Running healthchecks.\n2022-06-01T21:29:35.917095Z  INFO vector: Vector has started. debug=\"false\" version=\"0.22.0\" arch=\"x86_64\" build_id=\"5e937e3 2022-06-01\"\n2022-06-01T21:29:35.917138Z  INFO vector::app: API is disabled, enable by setting `api.enabled` to `true` and use commands like `vector top`.\n2022-06-01T21:29:35.917152Z  INFO vector::topology::builder: Healthcheck: Passed.\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:35.918778044Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:35.918821210Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:36.679150968Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:36.679193905Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:36.959284295Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:36.959315187Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:37.124459926Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:37.124598441Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:37.241035793Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:37.241074381Z\"}\n2022-06-01T21:29:37.484711Z  INFO vector::shutdown: All sources have finished.\n2022-06-01T21:29:37.484751Z  INFO vector: Vector has stopped.\n

        Even in the above example you can see how difficult it is to aggregate stdout and stderr with accurate order. In the script, stderr always comes second, but in all but one of these iterations, stderr was handled before stdout. This is not a problem of vector, this is a fundamental posix problem due to stderr and stdout having separate streams. However, vector seems to have a method for handling this when a timestamp shows up in the stream. If I replace echo with date \"+%FT%T%z.%N foo\" in both streams, they are consistently in-order. Of course, another way to handle this is to output logs as structured data with the timestamp right from the source, but you will not always have control over the source log format.

        Another aspect of this setup is you can use vector as a type of init system, because you can set sources.exec.streaming.respawn_on_exit = true which will re-launch the process if it dies for some reason.

        "},{"location":"vector/#tap-a-running-vector-instance","title":"Tap a running vector instance","text":"

        https://vector.dev/guides/level-up/vector-tap-guide/

        Vector has a feature called tap that lets you hook into an running instance and see what is coming through. You can enable this in your vector config via:

        # Filename: vector.toml\n[api]\nenabled = true\n

        Then simply

        vector tap\n

        This shows pre-transform inputs, and outputs, which is useful when you are not seeing the output you expect because you can see the before and after right next to each other. There are also some further arguments you can pass to vector tap that let you filter so you can see specific inputs or outputs. See vector tap --help for those syntaxes.

        "},{"location":"vector/#debug-syntax-using-a-repl","title":"Debug syntax using a repl","text":"

        https://vector.dev/docs/reference/vrl/

        Vector has a repl feature that can be use for developing configs and debugging. Launch it with vector vrl. Once inside, type help to get guidance on how to proceed.

        "},{"location":"velero/","title":"velero","text":"

        \"Velero is a tool for managing disaster recovery, specifically for Kubernetes cluster resources. It provides a simple, configurable, and operationally robust way to back up your application state and associated data.\" - velero --help

        "},{"location":"velero/#examples","title":"Examples","text":""},{"location":"velero/#list-backups","title":"List backups","text":"
        $ velero backup get\nNAME                             STATUS            ERRORS   WARNINGS   CREATED                         EXPIRES   STORAGE LOCATION   SELECTOR\nvelero-somename-20210916020049   PartiallyFailed   1        0          2021-09-15 18:00:49 -0800 PST   26d       default            <none>\nvelero-somename-20210915020048   PartiallyFailed   1        0          2021-09-14 18:00:48 -0800 PST   25d       default            <none>\nvelero-somename-20210914020048   Completed         0        0          2021-09-13 18:00:48 -0800 PST   24d       default            <none>\nvelero-somename-20210913020026   Completed         0        0          2021-09-12 18:00:26 -0800 PST   23d       default            <none>\n

        Or as yaml

        velero backup get -o yaml\n
        "},{"location":"velero/#list-backup-schedules","title":"List backup schedules","text":"
        velero schedule get\n
        "},{"location":"velero/#get-logs-for-a-specific-backup","title":"Get logs for a specific backup","text":"

        This looks like the same thing that comes out of kubectl logs or stern, but it lets you see the entire history, which is likely not available other ways.

        velero logs \"${backup_name}\"\n
        "},{"location":"vim/","title":"vim","text":"

        \"Vim is a text editor that is upwards compatible to Vi.\" - man vim

        "},{"location":"vim/#modelines","title":"Modelines","text":"

        modelines are commented lines in files that set vim settings to use when editing that file.

        http://vim.wikia.com/wiki/Modeline_magic

        "},{"location":"vim/#modeline-example","title":"modeline example:","text":"
        ## vim: set expandtab ts=2\n
        "},{"location":"vim/#links","title":"Links","text":"
        • https://www.barbarianmeetscoding.com/boost-your-coding-fu-with-vscode-and-vim/moving-blazingly-fast-with-the-core-vim-motions
        "},{"location":"virtual-reality/","title":"Virtual Realtiy","text":"

        \"Virtual reality (VR) is a simulated experience that employs pose tracking and 3D near-eye displays to give the user an immersive feel of a virtual world.\" - https://en.wikipedia.org/wiki/Virtual_reality

        \"Virtual reality will grow, just as the telegraph grew to the telephone - as the radio to the TV - it will be everywhere.\" - Jobe Smith

        "},{"location":"virtual-reality/#links","title":"Links","text":"
        • https://vr-compare.com
        • https://hugo.blog/2024/03/11/vision-pro: \"What we got wrong at Oculus that Apple got right\"
        "},{"location":"virtualbox/","title":"VirtualBox","text":""},{"location":"virtualbox/#vboxmanage-examples","title":"VBoxManage Examples","text":"

        VBoxManage is the command line tool used to interact with VirtualBox.

        "},{"location":"virtualbox/#create-a-new-virtual-disk-image","title":"Create a new virtual disk image","text":"

        https://www.virtualbox.org/manual/ch08.html#vboxmanage-createmedium

        VBoxManage createmedium --format vdi --filename $DISK_NAME --size $DISK_SIZE_IN_MB\n
        "},{"location":"virtualbox/#attach-storage-to-a-vm","title":"Attach storage to a vm","text":"

        https://www.virtualbox.org/manual/ch08.html#vboxmanage-storageattach

        VBoxManage storageattach $VM_NAME --storagectl $STORAGE_CONTROLLER --format vdi --filename $DISK_NAME --size $DISK_SIZE_IN_MB\n
        "},{"location":"vpn/","title":"VPN","text":""},{"location":"vpn/#links","title":"Links","text":"
        • Nebula: \"Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, and Windows.\"
        • OpenVPN: \"OpenVPN is an open-source commercial software that implements virtual private network (VPN) techniques to create secure point-to-point or site-to-site connections in routed or bridged configurations and remote access facilities. It uses a custom security protocol that utilizes SSL/TLS for key exchange. It is capable of traversing network address translators (NATs) and firewalls. It was written by James Yonan and is published under the GNU General Public License (GPL)\" - OpenVPN Wikipedia entry
        • Tailscale: \"A secure network that just works\"
        • Tinc: \"tinc is a Virtual Private Network (VPN) daemon that uses tunnelling and encryption to create a secure private network between hosts on the Internet.\"
        • WireGuard: \"WireGuard is a free and open-source software application and communication protocol that implements virtual private network (VPN) techniques to create secure point-to-point connections in routed or bridged configurations. It is run as a module inside the Linux kernel and aims for better performance than the IPsec and OpenVPN tunneling protocols. It was written by Jason A. Donenfeld and is published under the second version of the GNU General Public License (GPL).\" - WireGuard Wikipedia entry
        • ZeroTier: \"ZeroTier, Inc is a software company with a Freemium business model based in Irvine, California. ZeroTier provides open-source tools and commercial products in the SDWAN sector that enable developers, operators and security professionals to create and manage geographically-agnostic virtual data centers. The company's flagship product ZeroTier One is a Private peer-to-peer client that enables devices such as laptops, desktops, phones, embedded devices, cloud resources, and services to securely connect to virtual networks.\" - ZeroTier Wikipedia entry
        "},{"location":"wasm/","title":"web assembly","text":"

        \"WebAssembly (abbreviated Wasm) is a binary instruction format for a stack-based virtual machine. Wasm is designed as a portable compilation target for programming languages, enabling deployment on the web for client and server applications.\" - https://webassembly.org

        "},{"location":"wasm/#links","title":"Links","text":"
        • https://webassembly.org
        • https://pyscript.net
        • https://github.com/webrcade/webrcade: Multi-game-system app in-browser
        • https://pythondev.readthedocs.io/wasm.html
        • https://github.com/sagemathinc/python-wasm
        • https://bytecodealliance.org/articles/wasmtime-1-0-fast-safe-and-production-ready
        • https://system7.app: Mac OS 7 in wasm
        • https://copy.sh/v86/: A variety of x86 operating systems running in-browser
        • https://github.com/psf/webassembly: \"A repo to track the progress of Python on WebAssembly (WASM)\"
        "},{"location":"webgl/","title":"webgl","text":"

        \"WebGL (short for Web Graphics Library) is a JavaScript API for rendering interactive 2D and 3D graphics within any compatible web browser without the use of plug-ins.\" - https://en.wikipedia.org/wiki/WebGL

        "},{"location":"webgl/#links","title":"Links","text":"
        • https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API
        • https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Tutorial/Getting_started_with_WebGL
        • https://threejs.org/examples/#webgl_animation_skinning_ik
        • https://ciechanow.ski: Really cool educational blog using webgl to teach about physical objects.
        "},{"location":"wget/","title":"Wget","text":"

        \"GNU Wget is a free software package for retrieving files using HTTP, HTTPS, FTP and FTPS the most widely-used Internet protocols. It is a non-interactive commandline tool, so it may easily be called from scripts, cron jobs, terminals without X-Windows support, etc.\" - https://www.gnu.org/software/wget

        "},{"location":"wget/#examples","title":"Examples","text":""},{"location":"wget/#mirror-site-for-local-viewing","title":"Mirror site for local viewing","text":"

        From the man page: to download a single page and all its requisites (even if they exist on separate websites), and make sure the lot displays properly locally, this author likes to use a few options in addition to -p:

        wget -E -H -k -K -p \"https://$site/$dir\"\n
        "},{"location":"wget/#download-all-images-from-a-site","title":"Download all images from a site","text":"

        To politely download all images from within a current remote directory:

        wget \\\n  --accept \".jpg\" \\\n  --adjust-extension \\\n  --continue \\\n  --no-parent \\\n  --random-wait \\\n  --recursive \\\n  --timestamping \\\n  --tries=0 \\\n  --wait=2 \\\n  --waitretry=30 \\\n  \"https://$site/$dir/\"\n
        "},{"location":"wget/#simple-use-of-cookies","title":"Simple use of cookies","text":"

        Some servers that need referrers and cookies can be accessed by doing:

        wget --save-cookies=\"cookies.txt\" \"foo.html\"\nwget --load-cookies=\"cookies.txt\" --referer=\"foo.html\" \"foo.mp3\"\n
        "},{"location":"wget/#set-default-behavior","title":"Set default behavior","text":"

        ~/.wgetrc sets default parameter values

        tries=0\ncontinue=1\n
        "},{"location":"wget/#see-also","title":"See Also","text":"
        • aria2
        • curl
        • httpstat - download and show a some useful connection information
        "},{"location":"winbind/","title":"winbind","text":"

        These examples may only work on Samba 3. See info about Winbindd here: https://wiki.samba.org/index.php/Configuring_Winbindd_on_a_Samba_AD_DC

        "},{"location":"winbind/#examples","title":"Examples","text":""},{"location":"winbind/#ping-the-winbind-servers","title":"Ping the winbind servers","text":"

        wbinfo -p

        "},{"location":"winbind/#list-the-domain-users","title":"list the domain users","text":"

        wbinfo -u

        "},{"location":"winbind/#try-authenticating-the-user-against-winbind","title":"try authenticating the user against winbind","text":"

        wbinfo -a dhoherd

        "},{"location":"wireshark/","title":"Wireshark","text":"

        Wireshark) is the new name for what was Ethereal. It is a graphical packet sniffer that uses the same libraries as tcpdump.

        "},{"location":"wireshark/#display-filters","title":"Display Filters","text":"

        Display filters have their own syntax, whereas capture filters use tcpdump syntax.

        See also: https://www.wireshark.org/docs/man-pages/wireshark-filter.html

        "},{"location":"wireshark/#filter-by-packet-data-content","title":"Filter by packet data content","text":"

        Display all packets that contain \"foo\" in the data section:

        data contains foo\n

        contains is a simple substring match, whereas matches is a Perl compatible regex.

        "},{"location":"wireshark/#display-hosts-within-a-given-subnet","title":"Display hosts within a given subnet","text":"
        ip.addr == 10.57.8.244/30\n
        "},{"location":"wireshark/#display-data-within-a-port-range","title":"Display data within a port range","text":"

        To see all ceph-osd data

        tcp.port >= 6800 and tcp.port <= 7300\n
        "},{"location":"wireshark/#show-only-dns-traffic-about-a-certain-host","title":"Show only dns traffic about a certain host","text":"
        dns.qry.name contains www.rmi.net || dns.resp.name contains www.rmi.net\n
        "},{"location":"wireshark/#show-all-dns-queries-that-do-not-have-a-response","title":"Show all dns queries that do not have a response","text":"

        In order for this to work you must perform the capture on the client side, or capture traffic from all DNS servers and combine it.

        dns && (dns.flags.response == 0) && ! dns.response_in\n
        "},{"location":"wireshark/#io-graphing","title":"IO graphing","text":"

        Within the Statistic -> IO Graph window, you can create graphs that illustrate trends in traffic

        "},{"location":"wireshark/#dns-response-time-stats","title":"DNS response time stats","text":"

        Create graphs that have the following data:

        Graph Name Display Filter Style Y Axis Y Field SMA Period AVG DNS Time dns line AVG(Y Field) dns.time 10 interval SMA MAX DNS Time dns line MAX(Y Field) dns.time 10 interval SMA MIN DNS Time dns line MIN(Y Field) dns.time 10 interval SMA"},{"location":"wireshark/#see-also","title":"See also","text":"
        • tcpdump
        "},{"location":"wonder-workshop/","title":"wonder-workshop","text":"

        Wonder Workshop

        "},{"location":"wonder-workshop/#dash","title":"Dash","text":"

        Dash is a tripod robot with two wheels and a caster, with IR I/O, bluetooth, microphone, speaker, LEDs, and proximity sensors.

        "},{"location":"wonder-workshop/#links","title":"Links","text":"
        • https://github.com/playi/WonderPy - Python API for Wonder Dash, Dot and Cue.
        • https://www.makewonder.com/robots/dash/
        "},{"location":"wuzz/","title":"wuzz","text":"

        \"Interactive cli tool for HTTP inspection\" - https://github.com/asciimoo/wuzz

        "},{"location":"xargs/","title":"xargs","text":"

        xargs issues commands with the STDIN as arguments, by default appended to the end of the command.

        "},{"location":"xargs/#examples","title":"Examples","text":""},{"location":"xargs/#handle-spaces-and-use-the-arg-as-something-other-than-the-last-token","title":"Handle spaces and use the arg as something other than the last token","text":"

        The -I argument takes a string to use as a delimiter for any input. The -print0 arg causes find to terminate each result with a null, which allows it to handle filename with characters that might not play nicely with the shell. We have to then use xargs -0 to make it also handle the null terminated lines. Lots of commands have a feature like this, so be on the lookout for it.

        find . -maxdepth 1 -type f -print0 |\nxargs -0 -I {} mv \"{}\" ~/some/dir/\n
        "},{"location":"xargs/#tun-3-concurrent-processes-each-consuming-5-results","title":"Tun 3 concurrent processes, each consuming 5 results","text":"
        find /dir/with/large/files -type f -print0 |\nxargs -0 -n5 -P3 sha256sum\n

        This would run 3 instances of sha256sum with each instance operating on 5 files. Since sha256sum is single-threaded, this would speed things up by using multiple CPU cores instead of being bound to a single CPU core.

        "},{"location":"xargs/#use-sed-to-change-git-files-containing-a-certain-string","title":"use sed to change git files containing a certain string","text":"

        This uses GNU sed -i, on macOS you should use sed -i '' or gsed -i. The -z on git grep causes it to null terminate the entries so xargs -0 will work.

        git grep -z -l 192.168.5 |\nxargs -0 sed -i 's/192.168.5/172.18.0/g'\n
        "},{"location":"xargs/#issue-the-same-command-several-times-in-parallel","title":"Issue the same command several times in parallel","text":"

        This takes 1 directory as input and starts a sub-shell that cd's to the directory and runs a command. Up to 4 subhells are run in parallel. This is very similar to the GNU parallel command.

        find ~/code/ -mindepth 1 -maxdepth 1 -type d -print0 |\nxargs -0 -I {} -n1 -P4 bash -c \"cd {} ; make install-hooks ;\"\n
        "},{"location":"yaml/","title":"yaml","text":"

        \"YAML Ain't Markup Language\" - https://yaml.org/

        "},{"location":"yaml/#links","title":"Links","text":"
        • https://www.yaml.org/
        • https://www.yaml.org/refcard.html
        • https://en.wikipedia.org/wiki/YAML
        • http://www.yamllint.com/
        • https://yaml-multiline.info: Good reference of different multi-line syntaxes.
        • https://play.yaml.io/main/parser: See how different parsers treat the same snip of yaml.
        • https://matrix.yaml.info: Table of yaml 1.2 test results for various parsers. Surprsingly, as of 2022-02 only two pass 100%.
        • https://github.com/crdoconnor/strictyaml: \"StrictYAML is a type-safe YAML parser that parses and validates a restricted subset of the YAML specification.\"
        • https://hitchdev.com/strictyaml/why-not/toml: Why StrictYAML is better than TOML.
        "},{"location":"youtube-dl/","title":"youtube-dl","text":"

        \"Command-line program to download videos from YouTube.com and other video sites\" - https://github.com/rg3/youtube-dl/

        "},{"location":"youtube-dl/#examples","title":"Examples","text":""},{"location":"youtube-dl/#show-available-media-formats","title":"Show available media formats","text":"
        youtube-dl -F 'https://youtu.be/LdCq6y1Uu5Y'\n
        "},{"location":"youtube-dl/#download-the-best-quality-within-resolution-bounds","title":"Download the best quality within resolution bounds","text":"
        youtube-dl -f 'bestvideo[height<=480]+bestaudio' 'https://youtu.be/-kgTCpv_W64'\n
        "},{"location":"youtube-dl/#download-the-best-quality-m4a-version","title":"Download the best quality m4a version","text":"
        youtube-dl -f 'bestaudio[ext=m4a]' 'https://youtu.be/0ZII3Cu_Uo4'\n
        "},{"location":"youtube-dl/#download-songs-longer-than-30-minutes","title":"Download songs longer than 30 minutes","text":"

        The --match-filter only alters what gets downloaded, not what is displayed when using options like -j

        youtube-dl -f bestaudio --match-filter 'duration > 1800' 'https://www.youtube.com/user/boyceavenue'\n
        "},{"location":"youtube-dl/#reformat-a-youtube-channel-into-lines-as-url-title","title":"Reformat a youtube channel into lines as \"URL - Title\"","text":"
        youtube-dl -j --flat-playlist 'https://www.youtube.com/watch?v=CHMZW9kLpg0&list=PL1B627337ED6F55F0' |\n  jq -S -r '\"https://youtu.be/\" + .id + \" - \" + .title'\n
        "},{"location":"yum/","title":"yum","text":"

        \"yum - Yellowdog Updater Modified\" - man yum

        \"yum is an interactive, rpm based, package manager.\" - man yum

        "},{"location":"yum/#examples","title":"Examples","text":""},{"location":"yum/#show-a-list-of-enabled-repositories","title":"Show a list of enabled repositories","text":"
        yum repolist\n
        "},{"location":"yum/#show-a-list-of-available-repositories","title":"Show a list of available repositories","text":"
        yum repolist all\n
        "},{"location":"yum/#show-all-installed-packages-their-versions-and-their-source-repo","title":"Show all installed packages, their versions and their source repo","text":"
        yum list installed\n
        "},{"location":"yum/#list-available-packages-and-the-repo-they-come-from","title":"List available packages and the repo they come from","text":"
        yum list available | grep jre\n
        "},{"location":"yum/#show-all-duplicates-in-a-search","title":"Show all duplicates in a search","text":"

        This is a good way to get a complete list of packages that are available that match a certain string

        yum --showduplicates search thrift\n
        "},{"location":"yum/#query-available-packages-in-a-given-repository","title":"Query available packages in a given repository","text":"
        yum --disablerepo=\"*\" --enablerepo=\"epel\" list available\n
        "},{"location":"yum/#upgrade-and-skip-broken-dependencies","title":"Upgrade and skip broken dependencies","text":"
        yum upgrade -y --skip-broken\n
        "},{"location":"yum/#upgrade-and-skip-certain-packages","title":"Upgrade and skip certain packages","text":"
        yum upgrade --exclude=*rabbitmq*\n
        "},{"location":"yum/#check-for-package-conflicts","title":"Check for package conflicts","text":"

        install yum-utils, then run package-cleanup

        "},{"location":"yum/#find-a-package-that-includes-a-specific-command-or-file","title":"Find a package that includes a specific command or file","text":"
        yum whatprovides \"*/filename\"\n
        "},{"location":"yum/#check-for-groups-of-packages","title":"Check for groups of packages","text":"

        yum grouplist

        "},{"location":"yum/#enable-optional-installs-in-groups","title":"Enable optional installs in groups","text":"

        Add group_package_types=mandatory,default,optional in /etc/yum.conf

        "},{"location":"yum/#download-but-do-not-install-packages-for-update","title":"Download but do not install packages for update","text":"
        yum upgrade --downloadonly --skip-broken\n
        "},{"location":"yum/#install-a-local-file-using-yum","title":"Install a local file using yum","text":"
        yum localinstall whatever.rpm\n
        "},{"location":"yum/#auto-updates-for-centos5","title":"Auto-updates for Centos5","text":"
        yum install yum-updatesd\n
        "},{"location":"yum/#auto-updates-for-centos6","title":"Auto-updates for Centos6","text":"
        yum install yum-cron\n
        "},{"location":"yum/#see-also","title":"See Also","text":"
        • rpm - interact with rpms directly
        • https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/sec-Managing_Yum_Repositories.html
        "},{"location":"zerotier/","title":"ZeroTier","text":"

        \"ZeroTier delivers the capabilities of VPNs, SDN, and SD-WAN with a single system. Manage all your connected resources across both local and wide area networks as if the whole world is a single data center.\" - https://www.zerotier.com/

        "},{"location":"zfs/","title":"ZFS","text":"

        ZFS is the Zetabyte File System.

        "},{"location":"zfs/#links","title":"Links","text":"
        • OpenZFS - http://open-zfs.org
        • Tuning Guide - https://web.archive.org/web/20161223004915/http://www.solarisinternals.com/wiki/index.php/ZFS_Evil_Tuning_Guide
        • Hardware recommendations - http://blog.zorinaq.com/?e=10
        • Mac ZFS - http://code.google.com/p/maczfs/
        • Shadow migration feature - http://docs.oracle.com/cd/E23824_01/html/821-1448/gkkud.html
        • Speed tuning - http://icesquare.com/wordpress/how-to-improve-zfs-performance/
        • ZFS RAID levels - https://web.archive.org/web/20201120053331/http://www.zfsbuild.com/2010/05/26/zfs-raid-levels/
        • http://en.wikipedia.org/wiki/ZFS
        • http://wiki.freebsd.org/ZFSQuickStartGuide
        • http://www.solarisinternals.com/wiki/index.php/ZFS_Best_Practices_Guide
        • http://zfsguru.com
        • http://zfsonlinux.org/faq.html
        • https://web.archive.org/web/20190603150811/http://www.oracle.com/technetwork/articles/servers-storage-admin/o11-113-size-zfs-dedup-1354231.html
        • http://wiki.freebsd.org/ZFSTuningGuide#Deduplication
        • Corruption / failure to import - https://github.com/zfsonlinux/zfs/issues/2457
        • https://www.percona.com/blog/2018/05/15/about-zfs-performance/
        • https://wiki.freebsd.org/ZFSTuningGuide
        • https://freebsdfoundation.org/blog/raid-z-expansion-feature-for-zfs
        • https://www.binwang.me/2023-12-14-ZFS-Profiling-on-Arch-Linux.html
        • https://despairlabs.com/blog/posts/2024-10-27-openzfs-dedup-is-good-dont-use-it
        "},{"location":"zfs/#tips","title":"Tips","text":""},{"location":"zfs/#memory","title":"Memory","text":"
        • For normal operation, 1gb of memory per tb of disk space is suitable.
        • For dedup operation, 5gb of memory per tb of addressable disk space is suitable.
        "},{"location":"zfs/#log-devices","title":"Log devices","text":"
        • Use a log device if you have lots of writes.
        • Mirror it, because if you lose it you lose the whole volume.
        • Speed and latency are most important, not size. Log flushes every 5 seconds.
        • Get SLC if possible, otherwise MLC
        "},{"location":"zfs/#l2arc-cache-devices","title":"l2arc Cache devices","text":"
        • Use if you have lots of reads.
        • Size does matter, with big devices more data can be cached for faster reads of more data.
        • Speed and latency matter.
        • Mirrororing l2arc does not matter because if it fails, reads come from the spinning disks.
        • Too big of a device can suck up resources and cause poor performance. See https://wiki.freebsd.org/ZFSTuningGuide

        Good explanation: https://web.archive.org/web/20160324170916/https://blogs.oracle.com/brendan/entry/test

        "},{"location":"zfs/#zdb","title":"zdb","text":""},{"location":"zfs/#show-the-potential-savings-of-turning-on-dedupe-on-zpool-tank","title":"Show the potential savings of turning on dedupe on zpool tank","text":"

        https://web.archive.org/web/20130217052412/http://hub.opensolaris.org/bin/view/Community+Group+zfs/dedup

        zdb -S tank\n
        "},{"location":"zfs/#show-transactions-and-human-readable-dates-in-the-zdb-history","title":"Show transactions and human readable dates in the zdb history","text":"

        Use zdb -e for pools that are not mounted.

        zdb -hh tank \\\n| egrep 'txg|time' \\\n| while read -r _ a b ; do\n  if [ \"$a\" == \"time:\" ] ; then\n    date -d @$b \"+$a %F %T\" ;\n  else\n    echo \"$a  $b\" ;\n  fi ;\ndone\n
        "},{"location":"zfs/#zpool","title":"zpool","text":""},{"location":"zfs/#create-a-zpool-and-its-base-filesystem","title":"Create a zpool and its base filesystem","text":"
        zpool create -f -o cachefile=/tmp/zpool.cache zpoolname /dev/ada1 #create a zpool\n
        "},{"location":"zfs/#add-a-cache-device-to-a-pool","title":"Add a cache device to a pool","text":"
        ## add ada0p3 as a cache device to the tank zpool\nzpool add tank cache ada0p3\n
        "},{"location":"zfs/#show-all-configured-zpool-options-for-a-given-zpool","title":"Show all configured zpool options for a given zpool","text":"
        zpool get all tank\n
        "},{"location":"zfs/#show-history-of-all-operations-on-a-given-pool","title":"Show history of all operations on a given pool","text":"
        ## show history of operations on the pool, eg: snapshots, attribute changes\nzpool history\n
        "},{"location":"zfs/#show-real-time-statistics-on-a-given-zpool","title":"Show real time statistics on a given zpool","text":"
        ## show per-device statistics every 1 second\nzpool iostat -v 1\n
        "},{"location":"zfs/#show-basic-information-about-all-imported-zpools","title":"Show basic information about all imported zpools","text":"
        ## show zpool space info, deduplication ratio and health\nzpool list\n
        "},{"location":"zfs/#show-deduplication-tables","title":"Show deduplication tables","text":"
        ## show deduplication table entries. Take entries * size / 1024 / 1024 to calculate DDT consumption\nzpool status -D z2\n
        "},{"location":"zfs/#import-a-pool-by-different-disk-path","title":"Import a pool by different disk path","text":"

        You can change the paths your pool is imported from. This is useful if you created your zpool using /dev/sdN when you should have used /dev/disk/by-id/, which is deterministic. The -d option lets you specify a directory to look within for the given pool's devices.

        zpool import -d /dev/disk/by-id/ \"$ZPOOL_NAME\"\n

        You may find that your pool was imported using links from this path that are not desirable, because there are several options available. For instance, you may find that your pool was imported using wwn links (EG: wwn-0x5000cca22eca1056) that are not very user friendly compared to a link that shows the model and serial number (EG: scsi-SATA_HGST_HMS5C4141BM_PM1302LAGR5A0F). Because these links are managed by udev and are created when the disk is seen by the system, either at boot or at insertion, and because nothing else should be referencing these symlinks, they are safe to delete. Export your pool, then delete unwanted symlinks for the devices related to your pool, leaving only the symlinks you want to use, then run zpool import -d once again.

        "},{"location":"zfs/#replace-a-disk-in-a-zpool","title":"Replace a disk in a zpool","text":"
        ## Replace the first disk with the second in the tank pool\nzpool replace -f tank /dev/disk/by-id/ata-ST3000DM001-9YN166_W1F09CW9 /dev/disk/by-id/ata-ST3000DM001-9YN166_Z1F0N9S7\n
        "},{"location":"zfs/#real-example","title":"Real example","text":"
        $ zpool replace -f tank /dev/disk/by-id/ata-HGST_HDN724040ALE640_PK1334PCJY9ASS /dev/disk/by-id/ata-HGST_HUH728080ALE600_VKHA6YDX\n$ zpool status\n  pool: home\n state: ONLINE\n  scan: scrub repaired 0 in 0h0m with 0 errors on Sun Dec 10 00:24:07 2017\nconfig:\n\n        NAME                                             STATE     READ WRITE CKSUM\n        home                                             ONLINE       0     0     0\n          ata-M4-CT064M4SSD2_0000000012170908F759-part4  ONLINE       0     0     0\n\nerrors: No known data errors\n\n  pool: tank\n state: DEGRADED\nstatus: One or more devices is currently being resilvered.  The pool will\n        continue to function, possibly in a degraded state.\naction: Wait for the resilver to complete.\n  scan: resilver in progress since Mon Jan  8 19:57:45 2018\n    47.1M scanned out of 13.7T at 6.72M/s, 592h39m to go\n    11.5M resilvered, 0.00% done\nconfig:\n\n        NAME                                           STATE     READ WRITE CKSUM\n        tank                                           DEGRADED     0     0     0\n          raidz1-0                                     DEGRADED     0     0     0\n            replacing-0                                UNAVAIL      0     0     0\n              ata-HGST_HDN724040ALE640_PK1334PCJY9ASS  UNAVAIL      0     1     0  corrupted data\n              ata-HGST_HUH728080ALE600_VKHA6YDX        ONLINE       0     0     0  (resilvering)\n            ata-HGST_HDN724040ALE640_PK2334PEHG8LAT    ONLINE       0     0     0\n            ata-HGST_HDN724040ALE640_PK2334PEHGD37T    ONLINE       0     0     0\n            ata-HGST_HDN724040ALE640_PK2338P4H3TJPC    ONLINE       0     0     0\n\nerrors: No known data errors\n
        "},{"location":"zfs/#expand-a-zpool-in-place-after-replacing-disks-with-larger-disks","title":"Expand a zpool in place after replacing disks with larger disks","text":"

        Expansion happens automatically if you have done zpool set autoexpand=on tank. If you did not do that and you find your pool has not expanded, you can perform the following:

        List the absolute paths of your devices with something like:

        zpool list -v -PH | awk '$1 ~ \"^\\/dev\\/\" {gsub(\"-part1\",\"\",$1) ; print $1 ;}'\n

        Then go through your device list and run

        zpool online -e tank <disk-name> # do the expansion\nzpool list -v tank # check the EXPANDSZ column for the disk\n

        After doing all of these your pool should be expanded.

        "},{"location":"zfs/#zfs_1","title":"zfs","text":""},{"location":"zfs/#show-differences-between-current-filesystem-state-and-snapshot-state","title":"show differences between current filesystem state and snapshot state","text":"
        zfs diff tank tank@snap\n
        "},{"location":"zfs/#show-configured-properties-for-a-filesystem","title":"Show configured properties for a filesystem","text":"
        zfs get all\n
        "},{"location":"zfs/#show-custom-filesystem-attributes","title":"Show custom filesystem attributes","text":"
        ## show custom attributes that override inherited attributes\nzfs get all -s local tank\n
        "},{"location":"zfs/#show-an-overview-of-all-mounted-zfs-filesystems","title":"Show an overview of all mounted zfs filesystems","text":"
        ## show disk space including free physical disk space and mount info\nzfs list\n
        "},{"location":"zfs/#show-specified-fields-of-each-filesystem","title":"Show specified fields of each filesystem","text":"
        ## show the listed fields of all filesystems\nzfs list -t all -o name,referenced,used,written,creation,userused@root\n
        "},{"location":"zfs/#show-only-snapshots","title":"Show only snapshots","text":"
        zfs list -t snapshot\n
        "},{"location":"zfs/#show-space-consumed-by-file-owner","title":"Show space consumed by file owner","text":"
        zfs userspace tank\n
        "},{"location":"zfs/#disable-atime-updates-for-a-filesystem","title":"Disable atime updates for a filesystem","text":"
        zfs set atime=off tank\n
        "},{"location":"zfs/#set-compression-to-lz4-for-a-filesystem","title":"Set compression to lz4 for a filesystem","text":"
        zfs set compression=lz4 tank\n
        "},{"location":"zfs/#set-deduplication-to-enabled-for-a-filesystem","title":"Set deduplication to enabled for a filesystem","text":"
        zfs set dedup=on tank\n
        "},{"location":"zfs/#set-a-filesystem-to-readonly","title":"Set a filesystem to readonly","text":"
        zfs set readonly=on zpoolname/dataset\n
        "},{"location":"zfs/#set-a-filesystem-to-allow-nfs-sharing","title":"Set a filesystem to allow NFS sharing","text":"
        zfs set sharenfs=on tank\n
        "},{"location":"zfs/#create-a-dataset","title":"Create a dataset","text":"
        ## create a dataset 'sole' on zpool 'tank'\nzfs create tank/sole\n
        "},{"location":"zfs/#destroy-multiple-snapshots","title":"Destroy multiple snapshots","text":"
        zfs destroy tank@20130413-weekly,20130420-weekly,20130428-weekly,20130505-weekly\n
        "},{"location":"zfs/#zfs-send-receive","title":"zfs send / receive","text":"

        Replicate a zpool (use the latest snapshot name as the source) to a blank zpool:

        zfs send -v -D -R tank@20120907-oldest | zfs receive -F -v z2\n
        • -D enables a deduplicated stream.
        • -R enables a recursive send of all snapshots and filesystems up to that point.
        • -F enables deletion of any snapshots on the target that don't exist on the sender
        • -v enables verbose mode
        "},{"location":"zfs/#recursively-zfs-send-a-filesystem-to-a-remote-host-and-receive-it-as-a-new-dataset","title":"recursively zfs send a filesystem to a remote host and receive it as a new dataset","text":"
        zfs send -v -D -R z1@20120907-oldest | ssh otherhost zfs receive -v z2/z1\n
        "},{"location":"zfs/#show-summary-of-what-would-be-sent","title":"Show summary of what would be sent","text":"

        This shows an entire dataset up to the given snapshot

        zfs send -n -v -D -R tank@20140531-monthly\n
        "},{"location":"zfs/#show-the-space-differences-between-two-snapshots","title":"Show the space differences between two snapshots","text":"
        zfs send -n -v -D -i tank@20140531-monthly tank@20141031-monthly\n
        "},{"location":"zfs/#show-the-amount-of-new-space-consumed-by-each-monthly","title":"Show the amount of new space consumed by each monthly","text":"
        zfs list -o name | grep 'tank@.*monthly' | while read -r X ; do [[ ! $a =~ .*monthly ]] && a=$X || zfs send -n -v -D -i $a $X && a=$X ; done 2>&1 | grep send\n
        "},{"location":"zfs/#complex-examples","title":"Complex examples","text":""},{"location":"zfs/#create-a-raidz-called-tank","title":"Create a raidz called tank","text":"

        Create a raidz pool from 4 disks and set some properties:

        pool=tank\nzpool create -f \"${pool}\" raidz /dev/disk/by-id/scsi-SATA_HGST_HDN724040A_PK2338P4H*-part1 -o ashift=12\nzfs set dedup=on \"${pool}\"\nzpool set listsnapshots=on \"${pool}\"\nzfs set atime=off \"${pool}\"\nzfs set compression=lz4 \"${pool}\"\n
        "},{"location":"zfs/#create-a-case-insensitive-raidz3-out-of-50-files","title":"Create a case insensitive raidz3 out of 50 files","text":"
        pool=tank\nfor X in {1..50} ; do mkfile -n 2g ${pool}.$X ; done ;\nsudo zpool create -O casesensitivity=insensitive ${pool} raidz3 \"${PWD}/${pool}\".{1..50}\n
        "},{"location":"zfs/#troubleshooting","title":"Troubleshooting","text":""},{"location":"zfs/#mount-a-pool-that-is-giving-you-trouble","title":"Mount a pool that is giving you Trouble","text":"
        zpool import -o failmode=continue -o readonly=on zpool_name\n

        This helped me get read access to a pool that was kernel panicking with the following error when I tried to import it normally:

        Dec  7 14:48:40 localhost kernel: PANIC: blkptr at ffff8803fddb4200 DVA 0 has invalid OFFSET 294940902907904\n
        "},{"location":"zfs/#zfs-on-mac-os-x","title":"ZFS on Mac OS X","text":"
        • http://openzfsonosx.org
        "},{"location":"zfs/#create-a-zfs-partition-on-devdisk3","title":"Create a ZFS partition on /dev/disk3","text":"
        ## Must eject device in Disk Utility first\ndiskutil partitiondisk /dev/disk3 GPTFormat ZFS %noformat% 100% # strange syntax, but works\nzpool create backups1 /dev/disk3s2 # create the zpool\nmdutil -i off /Volumes/backups1 # required on MacZFS since spotlight does not function\n
        "},{"location":"zfs/#zfs-on-linux","title":"ZFS on Linux","text":"
        • If you get module errors: modprobe zfs ; ldconfig
        • If you get permission denied, check selinux settings
        "},{"location":"zfs/#centos-6-repository","title":"CentOS 6 Repository","text":"
        sudo yum install -y epel-release # assumes later CentOS 6 where epel is provided upstream\nsudo yum localinstall --nogpgcheck http://archive.zfsonlinux.org/epel/zfs-release.el6.noarch.rpm\nsudo yum install zfs -y\n
        "},{"location":"zfs/#reinstalling-when-things-fail","title":"Reinstalling when things fail","text":"
        ##!/bin/bash -x\nyum install -y kernel-devel-$(uname -r)\nzfs_version=0.6.5.4\ndkms remove  -m zfs -v \"${zfs_version}\" --all\ndkms remove  -m spl -v \"${zfs_version}\" --all\ndkms add     -m spl -v \"${zfs_version}\" --force\ndkms add     -m zfs -v \"${zfs_version}\" --force\ndkms install -m spl -v \"${zfs_version}\" --force\ndkms install -m zfs -v \"${zfs_version}\" --force\n
        "},{"location":"zfs/#inspect-the-rpm-for-what-scripts-it-runs","title":"Inspect the rpm for what scripts it runs","text":"

        This is useful for debugging failures after kernel upgrade.

        rpm -q --scripts zfs-dkms\n
        "},{"location":"zfs/#building-on-centos-6","title":"Building on CentOS 6","text":"
        yum groupinstall \"Development tools\" && yum install -y libuuid-devel zlib-devel bc lsscsi mdadm parted kernel-debug\n## For spl, then again for zfs:\n./configure && make && make rpm && rpm -i *64.rpm\n
        "},{"location":"zookeeper/","title":"zookeeper","text":"

        ZooKeeper is a high-performance coordination service for distributed applications. - https://zookeeper.apache.org/doc/current/

        "},{"location":"zookeeper/#examples","title":"Examples","text":""},{"location":"zookeeper/#the-four-letter-words","title":"The four letter words","text":"

        \"ZooKeeper responds to a small set of commands. Each command is composed of four letters. You issue the commands to ZooKeeper via telnet or nc, at the client port.\" - https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands

        "},{"location":"zookeeper/#lists-brief-details-for-the-server-and-connected-clients","title":"Lists brief details for the server and connected clients","text":"

        echo 'stat' | nc localhost 2181

        "},{"location":"zookeeper/#view-a-list-of-variables-that-could-be-used-for-monitoring-the-health-of-the-cluster","title":"View a list of variables that could be used for monitoring the health of the cluster","text":"

        echo 'mntr' | nc localhost 2181

        "},{"location":"zookeeper/#list-full-details-for-the-server","title":"List full details for the server","text":"

        echo 'srvr' | nc localhost 2181

        "},{"location":"zookeeper/#links","title":"Links","text":"
        • https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html
        "},{"location":"zsh/","title":"zsh","text":"

        \"Zsh is a UNIX command interpreter (shell) usable as an interactive login shell and as a shell script command processor.\" - man zsh

        One big caveat to using zsh is that it has no syntax linter. There is an open shellcheck gh issue about this.

        "},{"location":"zsh/#links","title":"Links","text":"
        • https://ohmyz.sh
        • https://github.com/unixorn/awesome-zsh-plugins
        • https://github.com/unixorn/zsh-quickstart-kit
        "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"About these notes","text":"

        These are notes I've taken on technologies that I have used or would like to use.

        These notes started out some time before 2005 in VoodooPad 2. In December 2005 I discovered that you could self-host MediaWiki, so I moved my content into a private MediaWiki installation. Both VoodooPad and self-hosted MediaWiki worked fine for me, but as my notes became more useful and I wanted to show different sections to people in a way that let them discover useful content, the private nature of my self-hosted MediaWiki installation became problematic. MediaWiki also had the problem of being hosted by a web service, which meant it was not possible to access or edit content when my laptop was offline. I solved this for a while by running MediaWiki in a VM on my laptop, but that meant I couldn't access notes from other computers if my laptop was offline, and it meant I had a VM running at all time just to serve notes, which wasted a lot of resources. In 2015 I decided to move out of MediaWiki into markdown files in git, and in 2016 I began using mkdocs to publish these notes publicly to github pages.

        Since 2016, these notes are rendered from markdown files and published to github-pages using mkdocs gh-deploy. If you have suggestions, please open a github issue. Please do not submit PRs.

        • https://danielhoherd.github.io/tech-notes/
        • https://github.com/danielhoherd/tech-notes

        "},{"location":"3d-printing/","title":"3D Printing","text":""},{"location":"3d-printing/#links","title":"Links","text":"
        • https://www.makerbot.com
        • http://www.meshlab.net: \"the open source system for processing and editing 3D triangular meshes.\"
        • https://www.thingiverse.com
        • https://all3dp.com/1/petg-filament-3d-printing/
        • https://www.monoprice.com/product?p_id=33820: Monoprice MP Voxel 3D Printer
        • https://www.shapeways.com/materials
        • https://cadquery.readthedocs.io: \"CadQuery is an intuitive, easy-to-use Python library for building parametric 3D CAD models.\"
        • https://www.prusa3d.com/category/original-prusa-i3-mk3s
        • https://youtu.be/ibsOYzXduYc: Ender 3 Pro build
        • https://youtu.be/_EfWVUJjBdA: Ender 3 Pro bed leveling
        • https://youtu.be/kG_YKeJDaX8: Ender 3 Pro mods
        • https://www.3dbenchy.com/features
        • https://ultimaker.com/software/ultimaker-cura
        • https://teachingtechyt.github.io: \"Teaching Tech 3D Printer Site\"
        "},{"location":"3d-printing/#see-also","title":"See Also","text":"
        • Marlin
        "},{"location":"airflow/","title":"Airflow","text":"

        \"Airflow is a platform created by the community to programmatically author, schedule and monitor workflows.\" - https://airflow.apache.org/

        "},{"location":"airflow/#links","title":"Links","text":"
        • https://airflow.apache.org/docs/stable/concepts.html
        • https://airflow.readthedocs.io/en/latest/executor/kubernetes.html
        • https://www.astronomer.io/blog/dag-writing-best-practices-in-apache-airflow / https://youtu.be/HvjnLCQygO4
        "},{"location":"airport/","title":"Apple Airport","text":"

        Apple Airport hardware was discontinued in November 2016.

        "},{"location":"airport/#using-old-airport-utility-apps-with-new-versions-of-os-x","title":"Using old Airport Utility apps with new versions of OS X","text":"
        • https://zcs.zyniker.org/airport-utility-v5-6-1
        • https://support.apple.com/kb/DL1536

        Or use the 5.6.1 Utility in Windows? Not sure if this works.

        • https://support.apple.com/kb/dl1547
        "},{"location":"amazon/","title":"Amazon","text":"

        Mostly related to the technological offerings of Amazon, not the shopping experience.

        "},{"location":"amazon/#kindle","title":"Kindle","text":"
        • https://blog.lidskialf.net/2021/02/08/turning-an-old-kindle-into-a-eink-development-platform/
        • https://wiki.mobileread.com/wiki/Kindle_Hacks_Information
        • https://www.mobileread.com/forums/showthread.php?t=225030
        • https://goodereader.com/blog/kindle/interesting-ways-to-infuse-new-life-to-old-kindle-ereader-devices
        "},{"location":"amazon/#aws","title":"AWS","text":"

        \"Amazon Elastic Compute Cloud (Amazon EC2) is a web service that provides secure, resizable compute capacity in the cloud. It is designed to make web-scale cloud computing easier for developers.\" - https://aws.amazon.com/ec2/

        "},{"location":"amazon/#cloudformation","title":"Cloudformation","text":"

        cloudformation has its own notes page.

        "},{"location":"amazon/#links","title":"Links","text":"
        • Amazon EC2 Instance Types
        • Amazon service availability by region
        • awless - awless is a powerful, innovative and small surface command line interface (CLI) to manage Amazon Web Services.
        • AWS Console
        • AWS in plain english
        • EC2Instances.info - Easy Amazon EC2 Instance Comparison
        • Github - Troposphere
        • localstack - A fully functional local AWS cloud stack for use in development.
        • s3toosl - Command Line S3 Client and Backup
        • https://aws.amazon.com/answers/account-management/aws-tagging-strategies/
        "},{"location":"amazon/#tips","title":"Tips","text":""},{"location":"amazon/#determine-if-you-are-on-an-ec2-instance","title":"Determine if you are on an EC2 instance","text":"
        grep -i '^ec2' /sys/hypervisor/uuid\n
        "},{"location":"amazon/#reformat-accesskeyscsv-into-awscredentials-format","title":"Reformat accessKeys.csv into .aws/credentials format","text":"
        awk -F, 'BEGIN { print \"[temp_name]\" ; } !/Access/ {print \"aws_access_key_id = \"$1\"\\naws_secret_access_key = \"$2}' ~/Downloads/accessKeys.csv\n
        "},{"location":"amazon/#force-reset-mfa-credentials","title":"Force reset mfa credentials","text":"

        https://github.com/broamski/aws-mfa

        aws-mfa --device arn:aws:iam::$UID:mfa/$USER --force\n
        "},{"location":"amazon/#create-eks-cluster-from-cli","title":"Create eks cluster from cli","text":"

        https://github.com/weaveworks/eksctl

        eksctl create cluster\n
        "},{"location":"amazon/#get-eks-cluster-config","title":"Get eks cluster config","text":"
        # find your cluster name\naws eks list-clusters | jq -r '.clusters[]'\n\n# configure the current KUBECONFIG for the given cluster\naws eks update-kubeconfig --name the_cluster_name\n
        "},{"location":"amazon/#see-also","title":"See Also","text":"
        • aws cli
        • CloudFormation
        • AWS Glossary
        "},{"location":"android/","title":"Android","text":"

        \"Android is a mobile operating system based on a modified version of the Linux kernel and other open-source software, designed primarily for touchscreen mobile devices such as smartphones and tablets.\" - https://en.wikipedia.org/wiki/Android_(operating_system)

        "},{"location":"android/#links","title":"Links","text":"
        • https://github.com/melontini/bootloader-unlock-wall-of-shame
        "},{"location":"ansible/","title":"Ansible","text":"
        • https://docs.ansible.com/intro_inventory.html
        • https://docs.ansible.com/playbooks_best_practices.html
        • https://galaxy.ansible.com
        • https://www.azavea.com/blogs/labs/2014/10/creating-ansible-roles-from-scratch-part-1
        • https://ryandlane.com/blog/2014/08/04/moving-away-from-puppet-saltstack-or-ansible/
        • https://docs.ansible.com/ansible/latest/user_guide/intro_patterns.html: Targeting hosts
        • https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable
        • https://zwischenzugs.com/2021/08/27/five-ansible-techniques-i-wish-id-known-earlier
        "},{"location":"ansible/#modules","title":"Modules","text":"
        • https://docs.ansible.com/ansible/latest/collections/ansible/builtin/#modules
        • https://docs.ansible.com/ansible/latest/modules/apt_module.html
        • https://docs.ansible.com/ansible/latest/modules/apt_repository_module.html
        • https://docs.ansible.com/ansible/latest/modules/sysctl_module.html
        • https://docs.ansible.com/ansible/latest/modules/user_module.html
        "},{"location":"ansible/#see-also","title":"See also","text":"
        • Molecule: testing of Ansible roles
        "},{"location":"ansible/#examples","title":"Examples","text":""},{"location":"ansible/#generate-a-copy-block-for-a-given-file","title":"Generate a copy block for a given file","text":"

        Not perfect because the output is json, but json is yaml and easy enough to fix up quickly.

        ## stat -c '{\"copy\": {\"src\": \"SOURCE_FILE_NAME\", \"dest\": \"%n\", \"mode\": \"0%a\", \"owner\": \"%U\", \"group\": \"%G\"}}' /etc/logrotate.d/backup | jq .\n{\n  \"copy\": {\n    \"src\": \"SOURCE_FILE_NAME\",\n    \"dest\": \"/etc/logrotate.d/backup\",\n    \"mode\": \"0644\",\n    \"owner\": \"root\",\n    \"group\": \"root\"\n  }\n}\n
        "},{"location":"ansible/#show-a-list-of-installed-modules","title":"Show a list of installed modules","text":"
        ansible-doc --list\n
        "},{"location":"ansible/#run-a-playbook-and-prompt-for-sudo-password","title":"Run a playbook and prompt for sudo password","text":"
        ansible-playbook --ask-become-pass -i inventory/hosts.yaml create_users.yaml\n
        "},{"location":"ansible/#run-an-ad-hoc-command","title":"Run an ad-hoc command","text":"

        You can run one-off ad-hoc commands by passing a module and args to the module.

        ansible localhost \\\n  -m get_url \\\n  -a \"mode=755\n    url=https://github.com/bcicen/ctop/releases/download/v0.7.1/ctop-0.7.1-linux-amd64\n    dest=/usr/local/bin/ctop\n    checksum=sha256:38cfd92618ba2d92e0e1262c0c43d7690074b4b8dc77844b654f8e565166b577\n    owner=root\n    group=root\"\n
        "},{"location":"ansible/#validate-and-inspect-your-inventory-file","title":"Validate and inspect your inventory file","text":"

        This command parses your inventory and group_vars and outputs a json data structure if no syntax faults are found.

        ansible-inventory -i inventory/hosts.yml --list\n
        "},{"location":"ansible/#use-arbitrary-groups-in-static-inventory-file","title":"Use arbitrary groups in static inventory file","text":"
        $ nl -w 2 -s ' ' -ba inventory/example.yml\n 1 all:\n 2   hosts:\n 3     client:\n 4       ansible_host: 192.168.1.2\n 5     server:\n 6       ansible_host: 192.168.2.3\n 7\n 8 linux:\n 9   hosts:\n10     server:\n11\n12 windows:\n13   hosts:\n14     client:\n15\n16 california:\n17   hosts:\n18     client:\n19     server:\n$ ansible-inventory -i inventory/example.yml --graph\n@all:\n  |--@california:\n  |  |--client\n  |  |--server\n  |--@linux:\n  |  |--server\n  |--@windows:\n  |  |--client\n
        "},{"location":"ansible/#merge-multiple-inventory-files","title":"Merge multiple inventory files","text":"

        The below example gives higher precedence to the later files.

        ## cat foo.yml\nall:\n  hosts:\n    client:\n      ansible_host: 192.168.1.2\n      service_hostname: hostname-from-file-1\n    server:\n      ansible_host: 192.168.2.3\n      file_number: one\n\n## cat bar.yml\nall:\n  hosts:\n    client:\n      ansible_host: 10.1.2.3\n    server:\n      ansible_host: 10.2.3.4\n      file_number: two\n\n## ansible-inventory -i foo.yml -i bar.yml --list | json-to-yaml.py\n_meta:\n  hostvars:\n    client:\n      ansible_host: 10.1.2.3\n      service_hostname: hostname-from-file-1\n    server:\n      ansible_host: 10.2.3.4\n      file_number: two\nall:\n  children:\n  - ungrouped\nungrouped:\n  hosts:\n  - client\n  - server\n
        "},{"location":"ansible/#show-all-resolved-variables-for-a-given-inventory-host","title":"Show all resolved variables for a given inventory host","text":"

        This wisll show all host vars, including variables resolved from all the different variable locations.

        ansible -i inventory target_hostname -m debug -a \"var=hostvars[inventory_hostname]\"\n
        "},{"location":"ansible/#gather-all-facts-and-save-them-to-files","title":"Gather all facts and save them to files","text":"

        This will create a directory called facts and save results as one json file per host.

        ansible -i inventory target_group_or_hostname -m gather_facts --tree host_facts\n
        "},{"location":"ansible/#generate-an-deterministic-random-number","title":"Generate an deterministic random number","text":"

        This is similar to the Puppet fqdn_rand() function, which is really useful to splay cron jobs. Splaying cron jobs avoids the thundering herd problem by spreading the jobs out over time with deterministic randomness.

        ---\n## defaults/main.yml\n\ndemo_cron_minute: \"{{ 59 | random(seed=inventory_hostname) }}\"\ndemo_cron_hour: \"{{ 23 | random(seed=inventory_hostname) }}\"\n

        See also: https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#randomizing-data

        "},{"location":"ansible/#simple-ansible-playbook","title":"Simple ansible playbook","text":"

        This may be useful for testing syntax and experimenting with ansible modules.

        ---\n## playbook.yml\n\n- name: A local play\n  hosts: localhost\n  connection: local\n  gather_facts: no\n  tasks:\n    - name: Run cmd\n      shell: /bin/date\n      register: cmd_out\n\n    - debug:\n        var: cmd_out.stdout\n

        ansible-playbook -i localhost playbook.yml

        Slightly more complicated example:

        ## playbook.yml\n## run with: ansible-playbook -i localhost playbook.yml\n\n- name: A local play\n  hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: test_region\n    subnets:\n      - subnet_name: Public_2a\n        subnet_cidr: 192.168.100.0/26\n        subnet_az: \"{{ region }}_a\"\n      - subnet_name: Public_2b\n        subnet_cidr: 192.168.100.64/26\n        subnet_az: \"{{ region }}_b\"\n      - subnet_name: Private_2a\n        subnet_cidr: 192.168.100.128/26\n        subnet_az: \"{{ region }}_a\"\n      - subnet_name: Private_2b\n        subnet_cidr: 192.168.100.192/26\n        subnet_az: \"{{ region }}_b\"\n\n  tasks:\n    - name: Run cmd\n      shell: echo \"{{ item.subnet_name }} {{ item.subnet_cidr }} {{ item.subnet_az }}\"\n      register: cmd_out\n      loop: \"{{ subnets }}\"\n\n    - debug:\n        var: cmd_out\n
        "},{"location":"ansible/#get-a-list-of-failed-hosts","title":"Get a list of failed hosts","text":"
        {{ ansible_play_hosts_all | difference(ansible_play_hosts) }}\n
        "},{"location":"ansible/#links","title":"Links","text":"
        • https://github.com/danielhoherd/ansible-playground: Examples I've made of Ansible techniques
        • Ansible Tests with Molecule - https://molecule.readthedocs.io/en/latest/ / https://www.digitalocean.com/community/tutorials/how-to-test-ansible-roles-with-molecule
        • Molecule sequence of scenario events - https://molecule.readthedocs.io/en/latest/configuration.html#scenario
        • Test-driven infrastructure development with Ansible & Molecule - https://blog.codecentric.de/en/2018/12/test-driven-infrastructure-ansible-molecule/ / https://github.com/jonashackt/molecule-ansible-docker-vagrant
        • Testkitchen modules (for Molecule tests): https://testinfra.readthedocs.io/en/latest/modules.html
        • https://www.hashicorp.com/resources/ansible-terraform-better-together
        "},{"location":"apfs/","title":"Apple APFS","text":"

        A lot of notes here are as of macOS 10.13, and don't apply specifically to any other devices that run APFS.

        APFS got some big bumps in macOS 12, including big snapshot improvements.

        "},{"location":"apfs/#usage","title":"Usage","text":"
        $ diskutil apfs\n2017-11-04 18:23:55-0700\nUsage:  diskutil [quiet] ap[fs] <verb> <options>\n        where <verb> is as follows:\n\n     list                (Show status of all current APFS Containers)\n     convert             (Nondestructively convert from HFS to APFS)\n     create              (Create a new APFS Container with one APFS Volume)\n     createContainer     (Create a new empty APFS Container)\n     deleteContainer     (Delete an APFS Container and reformat disks to HFS)\n     resizeContainer     (Resize an APFS Container and its disk space usage)\n     addVolume           (Export a new APFS Volume from an APFS Container)\n     deleteVolume        (Remove an APFS Volume from its APFS Container)\n     eraseVolume         (Erase contents of, but keep, an APFS Volume)\n     changeVolumeRole    (Change the Role metadata bits of an APFS Volume)\n     unlockVolume        (Unlock an encrypted APFS Volume which is locked)\n     lockVolume          (Lock an encrypted APFS Volume (diskutil unmount))\n     listCryptoUsers     (List cryptographic users of encrypted APFS Volume)\n     changePassphrase    (Change the passphrase of a cryptographic user)\n     setPassphraseHint   (Set or clear passphrase hint of a cryptographic user)\n     encryptVolume       (Start async encryption of an unencrypted APFS Volume)\n     decryptVolume       (Start async decryption of an encrypted APFS Volume)\n     updatePreboot       (Update the APFS Volume's related APFS Preboot Volume)\n\ndiskutil apfs <verb> with no options will provide help on that verb\n
        "},{"location":"apfs/#file-clones","title":"File clones","text":"

        APFS supports deduplicated file copies, which it calls clonefiles. Copying a file by option-dragging it in Finder creates a clonefile. To create a clonefile on the CLI use cp -c src dst. Creating clonefiless of any size file is instantaneous because no file data is actually being copied. This differs from hard links because if you modify the clone, only the new blocks will be written to disk, and the source of the cloned file will not be modified.

        "},{"location":"apfs/#snapshots","title":"Snapshots","text":"

        Snapshots appear to be tied pretty directly to Time Machine, and do not appear to be general purpose. There appear to be many limitations in how they can be used, and what information you can get about them.

        There was previously a tool called apfs_snapshot but it was removed before macOS 10.13 was released.

        "},{"location":"apfs/#create-a-snapshot","title":"Create a snapshot","text":"

        You cannot choose a name for your snapshot, it is tied to the date the snapshot was taken in the form of YYYY-MM-DD-HHMMSS, or date \"+%Y-%m-%d-%H%M%S\"

        $ sudo tmutil localsnapshot\nNOTE: local snapshots are considered purgeable and may be removed at any time by deleted(8).\nCreated local snapshot with date: 2021-08-23-101843\n
        "},{"location":"apfs/#show-snapshots","title":"Show snapshots","text":"
        $ sudo tmutil listlocalsnapshots /\ncom.apple.TimeMachine.2017-11-01-161748\ncom.apple.TimeMachine.2017-11-02-100755\ncom.apple.TimeMachine.2017-11-03-084837\ncom.apple.TimeMachine.2017-11-04-182813\n
        "},{"location":"apfs/#mount-a-snapshot","title":"Mount a snapshot","text":"

        The easiest way to mount snapshots is to open Time Machine.app and browse backwards in time. This will mount your snapshots at /Volumes/com.apple.TimeMachine.localsnapshots/Backups.backupdb/$HOSTNAME/$SNAPSHOT_DATE/Data or a similar path.

        If you just want to mount a single snapshot, fill in $snapshot_name using one of the lines from tmutil listlocalsnapshots /, then:

        mkdir apfs_snap\nmount_apfs -o nobrowse,ro -s \"$snapshot_name\" /System/Volumes/data \"$PWD/apfs_snap\"\n

        Older version of macOS have a slightly different syntax

        mkdir apfs_snap\nsudo mount_apfs -s \"$snapshot_name\" / \"${PWD}/apfs_snap\"\n
        "},{"location":"apfs/#delete-a-snapshot","title":"Delete a snapshot","text":"

        You can only delete snapshots based off of their date.

        $ sudo tmutil deletelocalsnapshots 2017-11-04-183813\nDeleted local snapshot '2017-11-04-183813'\n
        "},{"location":"apfs/#delete-all-snapshots","title":"Delete all snapshots","text":"
        /usr/bin/tmutil listlocalsnapshots / |\ngrep -oE '2[0-9]{3}-[0-9]{2}-[0-9]{2}-[0-9]{6}'\nwhile read -r snap ; do\n  tmutil deletelocalsnapshots \"${snap##*.}\"\ndone\n
        "},{"location":"apfs/#thin-out-snapshots","title":"Thin out snapshots","text":"

        On the given drive, reclaim the given space by thinning out snapshots. As of tmutil 4.0.0, you cannot use any data unit other than bytes. (EG: 1G or 1GB will not work)

        $ sudo tmutil thinlocalsnapshots / 250000000\nThinned local snapshots:\n2017-11-04-184425\n2017-11-04-184433\n2017-11-04-184440\n
        "},{"location":"apfs/#see-also","title":"See also","text":"
        /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util\n/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs_invert\n/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs_preflight_converter\n/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs_stats\n
        "},{"location":"apfs/#links","title":"Links","text":"
        • Apple File System
        • Rich Trouton - Storing our digital lives - Mac filesystems from MFS to APFS
        • Apple File System Guide
        • Russ Bishop - Apple File System
        • A ZFS developer\u2019s analysis of the good and bad in Apple\u2019s new APFS file system
        • https://www.jinx.de/zfs/hfsfailure.html - Demo of how awful HFS+ is at detecting failures
        • https://eclecticlight.co/2021/11/09/disk-utility-now-has-full-features-for-managing-snapshots/
        "},{"location":"aptly/","title":"Aptly","text":"
        • \"Aptly is a swiss army knife for Debian repository management.\"
        • https://github.com/sepulworld/aptly-vagrant
        "},{"location":"aria2/","title":"Aria2","text":"

        \"aria2 is a lightweight multi-protocol & multi-source command-line download utility. It supports HTTP/HTTPS, FTP, SFTP, BitTorrent and Metalink. aria2 can be manipulated via built-in JSON-RPC and XML-RPC interfaces.\" - https://aria2.github.io/

        Of particular interest is the ability to download a single file from multiple sources, even using multiple protocols, to have increased download speed.

        "},{"location":"aria2/#examples","title":"Examples","text":""},{"location":"aria2/#download-a-file-in-place","title":"Download a file in place","text":"

        This command can be canceled and given again to resume the file download.

        ## -x5 Connect once to each server\n## -c Continue a partially downloaded file (HTTP/FTP)\n## --file-allocation=none Do not pre-allocate disk space for the file (begin downloading immediately. see man page for more options.)\n## --max-overall-download-limit=3 (K = 1024, M = 1024K)\n## --max-download-limit=1M per connection speed limits\naria2c -x5 -c --file-allocation=none --max-overall-download-limit=3 --max-download-limit=1M http://example.com/foo.iso\n
        "},{"location":"aria2/#see-also","title":"See Also","text":"
        • curl
        • httpstat - download and show a some useful connection information
        • wget
        "},{"location":"arpwatch/","title":"arpwatch","text":"

        \"arpwatch - keep track of ethernet/ip address pairings\" - man arpwatch

        "},{"location":"arpwatch/#examples","title":"Examples","text":""},{"location":"arpwatch/#fork-and-log-to-file-not-to-e-mail","title":"Fork and log to file, not to e-mail","text":"
        arpwatch -Q\ntail -F /var/lib/arpwatch/arp.dat\n
        "},{"location":"atomicparsley/","title":"AtomicParsley","text":"

        AtomicParsley is a lightweight command line program for reading, parsing and setting metadata into MPEG-4 files. This is a functional mp4 equivalent of what i3dv2 is for mp3 files.

        "},{"location":"atomicparsley/#examples","title":"Examples","text":""},{"location":"atomicparsley/#set-metadata-on-multiple-files","title":"Set metadata on multiple files","text":"

        Unfortunately the syntax of this tool requires you to edit one file at a time, so you have to iterate each item of an album using shell loops or xargs or whatever you prefer.

        for file in *.m4a ; do\n  AtomicParsley \"${file}\" --artist \"Various Artists\" ;\ndone ;\n
        "},{"location":"atomicparsley/#remove-personally-identifiable-information-pii-from-files","title":"Remove Personally Identifiable Information (pii) from files","text":"

        Useful if you want to remove your personal info from iTunes Match files.

        for file in *.m4a ; do\n  AtomicParsley \\\n    \"$file\" \\\n    --DeepScan \\\n    --manualAtomRemove \"moov.trak.mdia.minf.stbl.mp4a.pinf\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.----.name:[iTunMOVI]\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.apID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.atID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.cnID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.cprt\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.flvr\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.geID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.plID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.purd\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.rtng\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.sfID\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.soal\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.stik\" \\\n    --manualAtomRemove \"moov.udta.meta.ilst.xid\" \\\ndone\n
        "},{"location":"automotive/","title":"Automotive","text":""},{"location":"automotive/#links","title":"Links","text":"
        • Autonomous Vehicles
        • https://ciechanow.ski/internal-combustion-engine
        • http://animatedengines.com
        • Animagraffs - How a Car Engine Works
        "},{"location":"autonomous-vehicles/","title":"Autonomous Vehicle Links","text":""},{"location":"autonomous-vehicles/#terms","title":"Terms","text":"
        • ACES: Automated, Connected, Electric, Shared
        • Levels: Refers to the 6 levels of autonomous vehicles
        • SAE: Society of Automotive Engineers
        "},{"location":"autonomous-vehicles/#autonomy-levels","title":"Autonomy Levels","text":"
        • L0: No automation. Human performs all operations. Base cost.
        • L1: Driver assistance. Human performs most driving operations, system can perform steering OR speed controls. Increased cost.
        • L2: Partial automation. Human performs most driving operations, system can perform steering AND speed controls. Increased cost.
        • L3: Conditional automation. System can perform all driving operations, human must be available to intervene in complex situations. Increased cost.
        • L4: High automation. System can perform all driving operations. Complex situations do not require a human to intervene but intervention is still an option. Decreased cost.
        • L5: Full automation. System performs all driving operations. Humans are all passengers with no requirement to intervene. Decreased cost.
        "},{"location":"autonomous-vehicles/#links","title":"Links","text":"
        • https://apollo.auto
        • https://automotivelinux.org
        • https://autonomoustuff.com
        • https://www.autosar.org
        • https://avs.auto/demo/index.html
        • https://en.wikipedia.org/wiki/ISO_26262
        • https://en.wikipedia.org/wiki/Units_of_transportation_measurement
        • https://github.com/visgl/deck.gl
        • https://renovo.auto dead link, acquired by https://woven.toyota
        • https://ros.org
        • https://some-ip.com
        • https://kevinchen.co/blog/autonomous-trucking-harder-than-rideshare (2024)
        "},{"location":"avahi/","title":"Avahi","text":"

        The Avahi mDNS/DNS-SD daemon implements Multicast DNS like Apple's Zeroconf architecture (also known as \"Rendezvous\" or \"Bonjour\").

        "},{"location":"avahi/#tips","title":"Tips","text":"

        After installing avahi-daemon it may not start. To fix this you may need to run service messagebus start

        Service types are defined in /usr/share/avahi/service-types

        "},{"location":"avahi/#service-configs","title":"Service configs","text":"

        Correctly formatted and named files in /etc/avahi/services/whatever.service are loaded on the fly, no need to restart avahi-daemon. If your service doesn't immediately show up, check syslog for errors.

        <?xml version=\"1.0\" standalone='no'?><!--*-nxml-*-->\n<!DOCTYPE service-group SYSTEM \"avahi-service.dtd\">\n<service-group>\n  <name replace-wildcards=\"yes\">%h</name>\n  <service>\n    <type>_ssh._tcp</type>\n    <port>22</port>\n  </service>\n  <service>\n    <type>_http._tcp</type>\n    <port>80</port>\n  </service>\n</service-group>\n
        "},{"location":"awk/","title":"awk","text":"

        \"pattern-directed scanning and processing language\" - man awk

        "},{"location":"awk/#examples","title":"Examples","text":"

        Some of these require GNU awk.

        "},{"location":"awk/#print-the-first-column-of-a-file","title":"Print the first column of a file","text":"
        awk '${print $1}' filename.txt\n
        "},{"location":"awk/#print-column-2-if-column-1-matches-a-string","title":"Print column 2 if column 1 matches a string","text":"
        ps aux | awk '$1 == \"root\" {print $2}'\n
        "},{"location":"awk/#pass-in-a-variable-and-value","title":"Pass in a variable and value","text":"
        ps | awk -v host=\"$HOSTNAME\" '{print host,$0}'\n
        "},{"location":"awk/#sort-a-file-by-line-lengths","title":"Sort a file by line lengths","text":"
        awk '{print length, $0}' testfile.txt | sort -n\n
        "},{"location":"awk/#tdl-to-csv","title":"TDL to CSV","text":"
        awk '{gsub(\"\\t\",\"\\\",\\\"\",$0); print;}' | sed 's#^#\"#;s#$#\"#;'\n
        "},{"location":"awk/#print-the-first-column-of-every-other-line","title":"Print the first column of every other line","text":"

        % is the modulus operator, which finds the remainder after an integer divide.

        awk 'NR % 2 == 0 { print $1 }'\n
        "},{"location":"awk/#print-only-even-numbered-lines","title":"Print only even numbered lines","text":"
        ls | awk 'NR % 2 == 0 { print $0 }'\n
        "},{"location":"awk/#print-only-odd-numbered-lines","title":"Print only odd numbered lines","text":"
        ls | awk 'NR % 2 != 0 { print $0 }'\n
        "},{"location":"awk/#print-even-numbered-lines-on-the-same-line-before-odd-numbered-lines","title":"Print even numbered lines on the same line before odd numbered lines","text":"
        awk '{if (NR%2==0) { print $0 \" \" prev } else { prev=$0 }}'\n
        "},{"location":"awk/#print-sum-all-the-first-columns-of-each-line-in-a-file","title":"Print sum all the first columns of each line in a file","text":"
        awk '{sum += $1} END {print sum}' filename\n
        "},{"location":"awk/#print-count-sum-and-average-of-the-first-column-of-stdin","title":"Print count, sum, and average of the first column of stdin","text":"
        for _ in {1..100} ; do echo $((RANDOM % 100)) ; done |\nawk '{sum += $1} END {avg = sum/NR ; printf \"Count:   %s\\nSum:     %s\\nAverage: %s\\n\", NR, sum, avg}'\n
        "},{"location":"awk/#split-file-by-recurring-string","title":"Split file by recurring string","text":"

        This will create a new file every time the string \"SERVER\" is found, essentially splitting the file by that string. Concatenating all of the output files would create the original file (potentially adding an extra newline).

        awk '/SERVER/{n++}{print >\"out\" sprintf(\"%02d\", n) \".txt\" }' example.txt\n
        "},{"location":"awk/#show-count-of-syslog-messages-per-minute","title":"Show count of syslog messages per minute","text":"
        awk -F: {'print $1 `\u201c`:`\u201d` $2'} /var/log/messages |uniq -c\n
        "},{"location":"awk/#show-count-of-root-logins-per-minute","title":"Show count of root logins per minute","text":"
        awk -F: '/root/{print $1 \":\" $2}' /var/log/auth.log |uniq -c\n
        "},{"location":"awk/#print-lines-in-ls-where-uid-is-numeric","title":"Print lines in ls where UID is numeric","text":"
        ls -la | awk '$3 ~/[0-9]/{print}'\n
        "},{"location":"awk/#show-only-zfs-snapshots-whose-size-is-zero","title":"Show only zfs snapshots whose size is zero","text":"
        zfs list -t snapshot | awk '$2 == 0'\n
        "},{"location":"awk/#print-a-line-if-the-third-field-does-not-match-a-regex","title":"Print a line if the third field does not match a regex","text":"
        echo {100..200} | fold -w 12 | awk '$3 !~ /[13579]$/ {print}'\n
        "},{"location":"awk/#show-500-errors-in-a-standard-apache-access-log","title":"Show 500 errors in a standard apache access log","text":"
        awk '$9 ~ /5[0-9][0-9]/' access.log\n
        "},{"location":"awk/#show-total-rss-and-vsz-count-for-all-cronolog-processes","title":"Show total rss and vsz count for all cronolog processes","text":"
        ps aux |\n  grep -i cronolo[g] |\n  awk '{vsz += $5; rss += $6} END {print \"vsz total = \"vsz ; print \"rss total = \"rss}'\n
        "},{"location":"awk/#get-ipv4-address-on-bsdosx","title":"Get IPv4 address on BSD/OSX","text":"
        ifconfig | awk '$1 == \"inet\" && $2 != \"127.0.0.1\" {print $2}'\n
        "},{"location":"awk/#get-ipv6-address-on-bsdosx","title":"Get IPv6 address on BSD/OSX","text":"
        ifconfig | awk '$1 == \"inet6\" && $2 !~ \"::1|.*lo\" {print $2}'\n
        "},{"location":"awk/#print-the-last-element","title":"Print the last element","text":"
        ls -la | awk -F\" \" '{print $NF}'\n
        "},{"location":"awk/#print-2nd-to-last-element","title":"Print 2nd to last element","text":"
        ls -la | awk -F\" \" '{print $(NF - 1)}'\n
        "},{"location":"awk/#print-the-previous-line-on-string-match","title":"Print the previous line on string match","text":"

        This works by storing the previous line. If the current line matches the regex, the previous line is printed from the stored value.

        $ awk '/32 host/ { print previous_line } {previous_line=$0}' /proc/net/fib_trie | column -t | sort -u\n|--  10.134.243.137\n|--  127.0.0.1\n|--  169.50.9.172\n
        "},{"location":"awk/#add-content-to-line-1-if-there-is-no-match","title":"Add content to line 1 if there is no match","text":"

        This adds a yaml document separator to the beginning of all yaml files in the current directory only if it does not already have one.

        tempfile=$(mktemp)\nfor file in ./*.yaml ; do\n  awk 'NR == 1 && $0 != \"---\" {print \"---\"} {print}' \"${file}\" > \"${tempfile}\" \\\n  && mv \"${tempfile}\" \"${file}\"\ndone\n
        "},{"location":"awk/#show-all-docker-images-in-a-helm-chart-and-their-https-links","title":"Show all docker images in a helm chart and their https links","text":"
        helm template . --set global.baseDomain=foo.com -f /Users/danielh/a/google-environments/prod/cloud/app/config.yaml 2>/dev/null |\nawk '/image: / {match($2, /(([^\"]*):[^\"]*)/, a) ; printf \"https://%s %s\\n\", a[2], a[1] ;}' |\nsort -u |\ncolumn -t\n

        A less complicated awk form of this that uses other shell commands would be

        helm template . --set global.baseDomain=foo.com -f /Users/danielh/a/google-environments/prod/cloud/app/config.yaml 2>/dev/null |\ngrep 'image: ' |\nawk '{print $2}' |\nsed 's/\"//g' |\nsed 's/\\(\\(.*\\):.*\\)/https:\\/\\/\\2 \\1/' |\nsort -u |\ncolumn -t\n

        So it really depends on where you want to put your complications, how performant you want to be, and how readable you want it to be. These both produce identical output, but some people find it easier to read shorter commands with simpler syntaxes, which is great for maintainability when performance is not an issue.

        https://quay.io/astronomer/ap-alertmanager  quay.io/astronomer/ap-alertmanager:0.23.0\nhttps://quay.io/astronomer/ap-astro-ui      quay.io/astronomer/ap-astro-ui:0.25.4\nhttps://quay.io/astronomer/ap-base          quay.io/astronomer/ap-base:3.14.2\nhttps://quay.io/astronomer/ap-cli-install   quay.io/astronomer/ap-cli-install:0.25.2\n...snip...\n
        "},{"location":"awk/#show-a-list-of-dns-hostname-queries-with-domain-stripped-sorted-by-hostname-length","title":"Show a list of dns hostname queries with domain stripped, sorted by hostname length","text":"

        This samples 100k dns queries, strips off all the domain names in the queried hostname, and prints the length of that first component of the FQDN (the bare hostname) along with the bare hostname itself, and shows the longest 25 entries.

        tcpdump -c 100000 -l -n -e dst port 53 |\nawk '$14 == \"A?\" {gsub(/\\..*/, \"\", $15) ; print(length($15), $15) ; fflush(\"/dev/stdout\") ;}' |\nsort -u |\nsort -n |\ntail -n 25\n

        Run this on your kube-dns nodes to see how close you're getting to the 63 character limit. You will never see errors though, because any name with components that are longer than 63 characters are not sent over the wire, so you'll need to check your logs for those. A good string to search for is \"63 characters\".

        "},{"location":"awk/#see-also","title":"See Also","text":"
        • https://www.grymoire.com/Unix/Awk.html
        • https://github.com/TheMozg/awk-raycaster: An FPS written in awk
        "},{"location":"awless/","title":"awless","text":"

        \"A Mighty CLI for AWS\" - https://github.com/wallix/awless

        "},{"location":"awless/#examples","title":"Examples","text":"
        • Example templates - https://github.com/wallix/awless-templates

        A lot of these syntax examples can be found by issuing the command, verb and entity but no parameters. Such as awless create stack, which will drop you into a prompt series to complete the necessary and optional parameters.

        "},{"location":"awless/#list-ec2-instances-sorted-by-uptime","title":"List ec2 instances sorted by uptime","text":"
        $ awless list instances --sort=uptime\n|         ID          |    ZONE    |           NAME          |  STATE  |    TYPE    | PUBLIC IP |   PRIVATE IP  | UPTIME \u25b2 | KEYPAIR |\n|---------------------|------------|-------------------------|---------|------------|-----------|---------------|----------|---------|\n| i-050ad501b33c6ad07 | us-west-1a | faruko-nal              | running | m4.xlarge  |           | 172.19.15.172 | 85 mins  | foo-ops |\n| i-5b381e9b          | us-west-1b | planted-collector11.foo | running | m4.xlarge  |           | 172.27.26.159 | 6 days   | foo-ops |\n| i-04ced9880586c009b | us-west-1a | hadoop07.foo            | running | m4.4xlarge |           | 172.27.37.100 | 8 days   | foo-ops |\n| i-0e583dcd3bc2444d8 | us-west-1a | db-na-historical06.foo  | running | m2.4xlarge |           | 172.19.48.79  | 12 days  | foo-ops |\n
        "},{"location":"awless/#sum-the-amount-of-unattached-disks-in-your-environment","title":"Sum the amount of unattached disks in your environment","text":"
        awless list volumes \\\n    --filter state=available \\\n    --format json |\n  jq .[].Size |\n  awk '{sum += $1 ; count += 1 ;} END {print sum \"G in \" count \" volumes\"}'\n
        "},{"location":"awless/#switch-to-a-different-aws-profile","title":"Switch to a different AWS profile","text":"

        This uses the ~/.aws/credentials file for its profiles

        Short way:

        awless switch prod\n

        Long way:

        awless config set aws.profile prod\n
        "},{"location":"awless/#customize-output-columns","title":"Customize output columns","text":"
        awless list instances --columns name,type,launched\n
        "},{"location":"awless/#add-a-user-to-a-group","title":"Add a user to a group","text":"
        awless \\\n  --aws-profile govcloud \\\n  --aws-region us-gov-west-1 \\\n  attach user \\\n  group=SystemAdministrators \\\n  name=SpaceGhost\n
        "},{"location":"awless/#create-an-access-key-for-a-user","title":"Create an access key for a user","text":"

        This creates an access key and saves it in ~/.aws/credentials

        awless \\\n  --aws-profile govcloud \\\n  --aws-region us-gov-west-1 \\\n  create accesskey \\\n  user=SpaceGhost \\\n  save=true\n
        "},{"location":"awless/#create-a-tag","title":"Create a tag","text":"
        awless create tag key=test_tag resource=i-9ba90158 value=true\n
        "},{"location":"awless/#delete-a-tag","title":"Delete a tag","text":"
        awless delete tag key=test_tag_dhoherd resource=i-9ba90158\n
        "},{"location":"awless/#create-an-instance","title":"Create an instance","text":"
        awless create instance \\\n  count=1 \\\n  image=ami-5ab82fa8 \\\n  keypair=ops \\\n  name=new-hostname \\\n  securitygroup=[sg-c4321fd1,sg-c4321cb0] \\\n  subnet=subnet-c4321c33 \\\n  type=t2.medium\n
        "},{"location":"awless/#see-also","title":"See also","text":"
        • amazon
        • aws-cloudformation
        • awscli
        "},{"location":"aws-cloudformation/","title":"Amazon AWS Cloudformation","text":"

        \"AWS CloudFormation is a service that helps you model and set up your Amazon Web Services resources so that you can spend less time managing those resources and more time focusing on your applications that run in AWS.\" - http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html

        "},{"location":"aws-cloudformation/#links","title":"Links","text":"
        • http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html
        • CloudFormer - CloudFormer is a template creation beta tool that creates an AWS CloudFormation template from existing AWS resources in your account. You select any supported AWS resources that are running in your account, and CloudFormer creates a template in an Amazon S3 bucket.
        • Sceptre - Sceptre is a tool to drive Cloudformation. Sceptre manages the creating, updating and deletion of stacks, and provides meta commands to allow users to get information about their stacks.
        • rain - A development workflow tool for working with AWS CloudFormation. (See also its other tools section)
        • https://www.singlestoneconsulting.com/blog/cloudformation-mapping-and-conditionals-making-your-templates-more-universal
        • https://sbstjn.com/blog/cloudformation - CloudFormation Best-Practices
        "},{"location":"aws-cloudformation/#examples","title":"Examples","text":""},{"location":"aws-cloudformation/#import-cloudformation-stack-entities-into-datasette","title":"Import cloudformation stack entities into Datasette","text":"
        aws cloudformation list-stack-resources --stack-name \"$STACK_NAME\" --region \"$REGION\"  |\njq -c '.[]' |\nsqlite-utils insert datasette.db stack -\n
        "},{"location":"awscli/","title":"Amazon awscli","text":"

        Official Amazon AWS command-line interface - https://aws.amazon.com/cli

        "},{"location":"awscli/#example-usage","title":"Example usage","text":""},{"location":"awscli/#show-subnets-for-a-particular-region-and-account","title":"Show subnets for a particular region and account","text":"
        aws --profile=dev --region=us-west-2 ec2 describe-subnets\n
        "},{"location":"awscli/#see-also","title":"See Also","text":"
        • Amazon
        • awless
        "},{"location":"backups/","title":"Backups","text":"

        Notes about backing up data.

        "},{"location":"backups/#links","title":"Links","text":"
        • http://duplicity.nongnu.org
        • https://www.nixtutor.com/linux/off-site-encrypted-backups-using-rsync-and-aes
        • http://www.nongnu.org/rdiff-backup
        "},{"location":"badblocks/","title":"badblocks","text":"

        badblocks is a program to test storage devices for bad blocks. - https://wiki.archlinux.org/index.php/badblocks

        "},{"location":"badblocks/#examples","title":"Examples","text":""},{"location":"badblocks/#destroy-all-data-on-a-disk-while-logging-bad-blocks","title":"Destroy all data on a disk while logging bad blocks","text":"
        ## -v verbose output writes error info to stderr\n## -s show scan progress, including percent complete, time elapsed, and error count\n## -w destructive write test, vs -n (nondestructive read/write test)\n## -b 4096 byte blocks\n## -t random test pattern\n## -o output file containing list of bad blocks, which can be passed back to badblocks, fsck or mke2fs\nbadblocks -v -s -w -b 4096 -t random -o ~/sdc.txt /dev/sdc\n
        "},{"location":"badblocks/#see-also","title":"See also","text":"
        • dcfldd
        • dd
        • ddrescue
        • pv
        "},{"location":"bash/","title":"GNU bash","text":"

        Bash is one of the most common mainstream unix shells.

        "},{"location":"bash/#tips-and-usage-examples","title":"Tips and Usage Examples","text":""},{"location":"bash/#navigating-on-the-command-line","title":"Navigating on the command line","text":"

        The following can be seen by running: stty -a

        • ctrl-a - move cursor to the beginning of the line
        • ctrl-e - move cursor to the end of the line
        • ctrl-l - do a \"clear\" on the terminal window
        • ctrl-r - reverse history command search
        • ctrl-t - get status of foreground process
        • ctrl-w - delete previous word
        "},{"location":"bash/#view-a-list-of-all-commands-etc","title":"View a list of all commands, etc..","text":"
        • compgen -b will list all the built-ins you could run.
        • compgen -a will list all the aliases you could run.
        • compgen -c will list all the commands you could run.
        • compgen -k will list all the keywords you could run.
        • compgen -A function will list all the functions you could run.
        • compgen -back will list all the above in one go.
        "},{"location":"bash/#remove-leading-zeroes","title":"Remove leading zeroes","text":"

        This method converts the numbers from base-10 to base-10, which has the side effect of removing leading zeroes. You can also use this to convert from other base systems

        for X in 00{1..20..2} ; do\n  echo \"$X = $(( 10#${X} ))\"\ndone\n

        Or use bc, a CLI calculator...

        for X in {1..50..5} ; do\n  Y=00${X}\n  echo \"${X} with zeroes is ${Y} and removed with bc is $(echo ${Y} | bc)\"\ndone ;\n
        "},{"location":"bash/#print-several-files-side-by-side","title":"Print several files side by side","text":"
        printf \"%s\\n\" {a..z} > alpha.txt\nprintf \"%s\\n\" {1..26} > num.txt\npr -w 10 -t -m alpha.txt num.txt\n

        The following output will be printed:

        a    1\nb    2\nc    3\nd    4\ne    5\nf    6\ng    7\nh    8\ni    9\nj    10\nk    11\nl    12\nm    13\nn    14\no    15\np    16\nq    17\nr    18\ns    19\nt    20\nu    21\nv    22\nw    23\nx    24\ny    25\nz    26\n
        "},{"location":"bash/#convert-base-36-to-decimal","title":"Convert base 36 to decimal","text":"

        This converts the base 36 number z to a decimal value

        echo $((36#z))\n
        "},{"location":"bash/#run-a-command-for-5-seconds-then-kill-it","title":"Run a command for 5 seconds, then kill it","text":"
        ping -f & sleep 5 ; kill %1\n

        Alternatively, use the timeout command if it's available. In macOS this can be installed through brew install coreutils and accessed with gtimeout.

        timeout 300 cmd\n
        "},{"location":"bash/#test-if-a-variable-is-empty","title":"Test if a variable is empty","text":"
        if [[ -z \"$var\" ]]\n
        "},{"location":"bash/#date","title":"Date","text":"

        For date stuff, see date, because it differs by platform.

        "},{"location":"bash/#show-random-statistics","title":"Show RANDOM statistics","text":"
        for X in {0..9999} ; do\n  echo $(($RANDOM % 5)) ;\ndone |\nsort |\nuniq -c\n
        "},{"location":"bash/#named-pipes","title":"named pipes","text":"
        mkfifo baz ; ps aux > baz\n

        then, in another terminal

        cat baz\n
        "},{"location":"bash/#alternate-redirection-outputs","title":"alternate redirection outputs","text":"
        exec 3> /tmp/baz ; ps aux >&3 # sends the output of ps aux to /tmp/baz\n
        "},{"location":"bash/#redirect-all-output-of-a-script-into-a-file","title":"Redirect all output of a script into a file","text":"

        This is not bash specific, but works in bash.

        ##!/usr/bin/env bash\n\nexec >> /tmp/$0.log\nexec 2>&1\n\ndate \"+%F %T%z $0 This is stdout, and will be written to the log\"\ndate \"+%F %T%z $0 This is stderr, and will also be written to the log\"\n
        "},{"location":"bash/#show-size-of-each-users-home-folder","title":"Show size of each user's home folder","text":"
        getent passwd |\nwhile IFS=: read -r user _ uid _ _ home _ ; do\n  if [[ $uid -ge 500 ]] ; then\n    printf \"$user \" ;\n    sudo du -sh $home ;\n  fi ;\ndone\n
        "},{"location":"bash/#previous-commands-args","title":"Previous command's args","text":"
        mkdir temp ; cd !!:*\n

        Be aware of the location of the tokens. For example:

        mkdir -p {foo,bar}/{a,b,c}\nstat !!:*\n

        This creates a problem because you can't stat -p so you must stat -p !!:2*

        "},{"location":"bash/#debug-a-script","title":"Debug a script","text":"

        This will show everything bash is executing

        bash -x scriptname.sh\n

        Or debug with a function:

        function debug {\n  if [ \"${debug:-0}\" -gt 0 ] ; then\n    echo \"$@\" 2>&1\n  fi\n}\n
        "},{"location":"bash/#debug-nested-scripts","title":"Debug nested scripts","text":"
        PS4=\"+(\\${BASH_SOURCE}:\\${LINENO}): \\${FUNCNAME[0]:+\\${FUNCNAME[0]}(): }\" bash -x some-command\n
        "},{"location":"bash/#find-where-all-the-inodes-are","title":"Find where all the inodes are","text":"
        find ~/ -type d -print0 |\nxargs -I %% -0 bash -c \"echo -n %% ; ls -a '%%' | wc -l\" >> ~/inodes.txt\n
        "},{"location":"bash/#build-and-print-an-array","title":"Build and print an array","text":"
        array=(\"one is the first element\");\narray+=(\"two is the second element\" \"three is the third\");\necho \"${array[@]}\"\n

        This is useful for building command line strings. For example, gpsbabel requires each input file to be prepended with -f. The following script takes a list of files and uses a bash array to create a command line in the form of gpsbabel -i gpx -f input_file_1.gpx -f input_file_2.gpx -o gpx -F output.gpx

        ##!/usr/bin/env bash\n\n## Check for at least one argument, print usage if fail\nif [ $# -lt 2 ] ; then\n    echo \"This script merges gpx files and requires at least two gpx files passed as arguments. Output is output.gpx\";\n    echo \"Usage:    $0 <gpx file> <gpx file> [...<gpx file>]\";\n    exit 1;\nfi\n\n## Create an array of arguments to pass to gpsbabel\nargs=();\nfor item in \"$@\" ; do\n    if [ -f \"$item\" ] || [ -h \"$item\" ] ; then\n        args+=( \"-f\" \"$item\" );\n    else\n        echo \"Skipping $item, it's not a file or symlink.\"\n    fi\ndone;\n\n## Verify we have at least two files to work with\nif [ \"${#args[@]}\" -lt 4 ] ; then\n    echo \"We don't have enough actual files to work with. Exiting.\"\n    exit 1\nfi\n\ngpsbabel -i gpx \"${args[@]}\" -o gpx -F output.gpx\n
        "},{"location":"bash/#build-and-print-an-associative-array-dict-hash","title":"Build and print an associative array (dict, hash)","text":"
        declare -A animals=(\n  [\"cow\"]=\"moo\"\n  [\"dog\"]=\"woof woof\"\n  [\"cat\"]=\"meow\"\n) ;\nfor animal in \"${!animals[@]}\" ; do\n  echo \"The $animal says '${animals[$animal]}'\" ;\ndone ;\n
        "},{"location":"bash/#show-permissions-in-rwx-and-octal-format","title":"Show permissions in rwx and octal format","text":"

        Linux:

        stat -c '%A %a %n' filename\n

        OSX:

        stat -f '%A %N' filename\n

        See stat for more stat usage.

        "},{"location":"bash/#find-the-length-of-a-variable","title":"Find the length of a variable","text":"
        echo ${#SHELL}\n
        "},{"location":"bash/#print-all-variables-that-start-with-the-substring-sh","title":"Print all variables that start with the substring SH","text":"
        echo \"${!SH@}\"\n
        "},{"location":"bash/#tertiary-type-variables","title":"Tertiary type variables","text":"
        ${V:-empty} # means \"return the value of the variable V or the string 'empty' if $V isn't set.\n
        "},{"location":"bash/#do-a-command-and-if-it-returns-false-so-some-more-stuff","title":"Do a command, and if it returns false, so some more stuff","text":"
        until command_that_will_fail ; do something_else ; done ;\n
        "},{"location":"bash/#print-two-digit-months","title":"Print two digit months","text":"

        echo {1..12} may not work. If not, use echo $(seq -w 1 12)

        "},{"location":"bash/#get-filename-extension-or-path","title":"Get filename, extension or path","text":"

        Taken from http://mywiki.wooledge.org/BashFAQ/073

        "},{"location":"bash/#rename-files-to-a-sequence-and-change-their-extension-at-the-same-time","title":"Rename files to a sequence and change their extension at the same time","text":"
        ls | while read -r line ; do\n  stub=${line%.*} ;\n  (( i += 1 )) ;\n  mv \"${line}\" \"${i}-${stub}.txt3\" ;\ndone ;\n
        FullPath=/path/to/name4afile-00809.ext   # result:   #   /path/to/name4afile-00809.ext\nFilename=${FullPath##*/}                             #   name4afile-00809.ext\nPathPref=${FullPath%\"$Filename\"}                     #   /path/to/\nFileStub=${Filename%.*}                              #   name4afile-00809\nFileExt=${Filename#\"$FileStub\"}                      #   .ext\n
        "},{"location":"bash/#sort-a-line-by-spaces","title":"Sort a line by spaces","text":"
        s=( whiskey tango foxtrot );\nsorted=$(printf \"%s\\n\"` `${s[@]}|sort);\necho $sorted\n
        "},{"location":"bash/#calculate-the-difference-between-two-dates","title":"Calculate the difference between two dates","text":"
        echo $(( $(gdate +%s -d 20120203) - $(gdate +%s -d 20120115) ))\n
        "},{"location":"bash/#substring-replace-a-variable","title":"substring replace a variable","text":"

        This is not regex, just a simple string replacement.

        ## ${VAR/search/replace} does only the first\n## ${VAR//search/replace} does all replacements\necho \"Paths in your path: ${PATH//:/ }\"\n
        "},{"location":"bash/#subtract-two-from-a-mac-address","title":"Subtract two from a MAC address","text":"
        ## printf -v defines a variable instead of printing to stdout\nprintf -v dec \"%d\" 0x$(echo 00:25:9c:52:1c:2a | sed 's/://g') ;\nlet dec=${dec}-2 ;\nprintf \"%012X\" ${dec} \\\n| sed -E 's/(..)(..)(..)(..)(..)(..)/\\1:\\2:\\3:\\4:\\5:\\6/g'\n
        "},{"location":"bash/#print-the-last-for-chars-of-a-variable","title":"Print the last for chars of a variable","text":"
        • echo ${foo:$((${#foo}-4))}
        • echo ${foo: -4} The space is necessary to prevent it from
        • doing a completely different thing. See the next example...
        "},{"location":"bash/#dereference-a-variable","title":"Dereference a variable","text":"
        $ for var in ${!BASH_V*} ; do echo \"${var}: ${!var}\" ; done ;\nBASH_VERSINFO: 5\nBASH_VERSION: 5.0.7(1)-release\n
        "},{"location":"bash/#print-something-else-if-a-variable-doesnt-exist","title":"Print something else if a variable doesn't exist","text":"
        • echo ${foo:-foo isn't assigned}
        • echo ${foo:-${bar}}

        This can even be recursively done...

        • echo ${foo:-${bar:-foo and bar are not assigned}}
        "},{"location":"bash/#print-every-third-number-starting-with-1-and-ending-with-30","title":"Print every third number starting with 1 and ending with 30","text":"

        echo {1..30..3}

        "},{"location":"bash/#print-every-5th-letter-of-the-alphabet","title":"Print every 5th letter of the alphabet","text":"

        echo {a..z..5}

        "},{"location":"bash/#process-all-lines-but-print-out-status-about-what-line-we-are-on-every-nth-line","title":"Process all lines, but print out status about what line we are on every Nth line","text":"

        Sometimes during a series of long-running jobs you want to see the status of where you are at, or at least some indicator that things have not paused. when ctrl-t is not available (and even when it is) this pattern can help you monitor that things are still moving a long.

        N=0\nfind \"/usr/bin\" -type f |\nwhile read -r X ; do\n  N=$((N + 1))\n  [[ \"$((N % 50))\" -eq 0 ]] && date \"+%F %T file number $N $X\" >&2\n  shasum -a 512 \"${X}\" >> ~/usr_bin_shasums.txt\ndone\n

        Example terminal output from the above command, while all shasum output goes into ~/usr_bin_shasums.txt:

        $ find \"/usr/bin\" -type f |\n> while read -r X ; do\n>   N=$((N + 1))\n>   [[ \"$((N % 50))\" -eq 0 ]] && date \"+%F %T file number $N $X\" >&2\n>   shasum -a 512 \"${X}\" >> ~/usr_bin_shasums.txt\n> done\n2018-02-24 15:30:29 file number 50 /usr/bin/toe\n2018-02-24 15:30:30 file number 100 /usr/bin/db_hotbackup\n2018-02-24 15:30:32 file number 150 /usr/bin/host\n2018-02-24 15:30:33 file number 200 /usr/bin/groffer\n2018-02-24 15:30:35 file number 250 /usr/bin/mail\n2018-02-24 15:30:36 file number 300 /usr/bin/dbicadmin\n2018-02-24 15:30:38 file number 350 /usr/bin/fwkpfv\n2018-02-24 15:30:39 file number 400 /usr/bin/tab2space\n
        "},{"location":"bash/#make-a-directory-structure-of-every-combination-of-adjectivenoun","title":"Make a directory structure of every combination of /adjective/noun","text":"

        mkdir -p {red,green,blue}/{fish,bird,flower}

        "},{"location":"bash/#generate-a-zero-padded-random-2-byte-hex-number","title":"Generate a zero padded random 2 byte hex number","text":"

        printf \"%02X\\n\" $((RANDOM % 256))

        "},{"location":"bash/#grep-many-log-files-and-sort-output-by-date","title":"grep many log files and sort output by date","text":"
        sudo grep cron /var/log/* |\nsed 's/:/ /' |\nwhile read file month day hour line ; do\n  date -d \"$month $day $hour\" \"+%F %T%z ${file} ${line}\" ;\ndone |\nsort\n
        "},{"location":"bash/#get-command-line-switches","title":"Get command line switches","text":"

        From the docs

        • If a character is followed by a colon, the option is expected to have an argument.
        • If the first character of optstring is a colon, silent error reporting is used.
        while getopts p:l:t: opt; do\n  case $opt in\n    p) pages=$OPTARG ;;\n    l) length=$OPTARG ;;\n    t) time=$OPTARG ;;\n  esac\ndone\n\nshift $((OPTIND - 1))\necho \"pages is ${pages}\"\necho \"length is ${length}\"\necho \"time is ${time}\"\necho \"\\$1 is $1\"\necho \"\\$2 is $2\"\n

        Call this script as ./foo.sh -p \"this is p\" -l llll -t this\\ is\\ t foo bar

        "},{"location":"bash/#unexpected-code-execution","title":"Unexpected code execution","text":"

        When using numeric comparison operators that use array syntax, code that determines the array index is executed:

        $ rm -f pwnd ; [[ -v '$(echo hello > pwnd)' ]] ; cat pwnd ; # does not use array syntax\ncat: pwnd: No such file or directory\n$ rm -f pwnd ; [[ -v 'x[$(echo hello > pwnd)]' ]] ; cat pwnd ; # uses array syntax\nhello\n

        This also happens with -eq

        $ rm -f pwnd ; [[ 0 -eq 'x$(echo hello > pwnd)' ]] ; cat pwnd ; # does not use array syntax\n-bash: [[: x$(echo hello > pwnd): syntax error: invalid arithmetic operator (error token is \"$(echo hello > pwnd)\")\ncat: pwnd: No such file or directory\n$ rm -f pwnd ; [[ 0 -eq 'x[$(echo hello > pwnd)]' ]] ; cat pwnd ; # uses array syntax\nhello\n

        Via https://yossarian.net/til/post/some-surprising-code-execution-sources-in-bash

        "},{"location":"bash/#files","title":"Files","text":"

        These files can change the behavior of bash.

        "},{"location":"bash/#bash_profile","title":".bash_profile","text":"

        ~/.bash_profile is executed every time you log into the system or initiate a login shell. Inclusion of things that write to stdout is allowed here.

        If you want to write scripts that change your interactive shell environment, such as changing your CWD, define functions here instead of using stand-alone scripts.

        "},{"location":"bash/#example-bash_profile","title":"Example .bash_profile","text":"

        The ~/.bash_profile file can be quite long and complicated. The following example is an incomplete sample:

        export EDITOR=/usr/bin/vim\nexport GZIP='-9'\nexport HISTSIZE=5000\nexport HISTTIMEFORMAT='%F %T%z '\nexport PS1=\"\\u@\\h:\\w$ \"\nexport TERM=xterm-256color\nexport TMOUT=\"1800\"  # log out after this many seconds of shell inactivity\n\nalias ll='ls -la'\nalias temp='date_F=$(date +%F) ; mkdir -p ~/temp/$date_F 2>/dev/null ; cd ~/temp/$date_F'\n\nsprunge() { curl -F 'sprunge=<-' http://sprunge.us < \"${1:-/dev/stdin}\"; } # usage: sprunge FILE # or some_command | sprunge\n\n## Don't record some commands\nexport HISTIGNORE=\"&:[ ]*:exit:ls:bg:fg:history:clear\"\n\n## Avoid duplicate entries\nHISTCONTROL=\"erasedups:ignoreboth\"\n\n## Perform file completion in a case insensitive fashion\nbind \"set completion-ignore-case on\"\n
        "},{"location":"bash/#bashrc","title":".bashrc","text":"

        ~/.bashrc is executed every time you open a sub-shell. It should not output any text, otherwise certain things (eg: scp) will fail.

        "},{"location":"bash/#inputrc","title":"~/.inputrc","text":"

        This file defines some bash behaviors. It also affects some other tools.

        ## Ignore case while completing\nset completion-ignore-case on\n
        "},{"location":"bash/#links","title":"Links","text":"
        • Command Line Quicksheet: http://www.pixelbeat.org/cmdline.html
        • Tons of BASH examples: http://mywiki.wooledge.org/BashFAQ
        • Bash Manual: Bash Variables
        • Bash pitfalls: http://mywiki.wooledge.org/BashPitfalls
        • Bash prompt howto, including colors: http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html
        • Bash Automated Test System
        • http://www.kfirlavi.com/blog/2012/11/14/defensive-bash-programming/
        • https://google.github.io/styleguide/shellguide.html
        • https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html
        • https://www.shellcheck.net/: Use this cli tool to help you write awesome shell scripts.
        • https://www.pcwdld.com/bash-cheat-sheet: Cheatsheet with some interesting examples not covered in many other resources.
        • https://kapeli.com/cheat_sheets/Bash_Test_Operators.docset/Contents/Resources/Documents/index
        • https://utcc.utoronto.ca/~cks/space/blog/unix/DropShellCommandHashing
        "},{"location":"bbcp/","title":"bbcp","text":"

        \"Securely and quickly copy data from source to target.\" - https://www.slac.stanford.edu/~abh/bbcp/

        This is a useful tool for copying files. Notably it gets around some bandwidth limitations of nc that I ran into when trying to copy one large file across an 80gbps network.

        "},{"location":"bc/","title":"GNU bc","text":"

        bc is a tool that does math on the CLI.

        "},{"location":"bc/#examples","title":"Examples","text":""},{"location":"bc/#divide-one-number-into-another-and-show-two-decimal-places","title":"Divide one number into another and show two decimal places","text":"

        The scale variable sets the number of significant digits.

        echo \"scale=2 ; 7 / 3\" | bc

        "},{"location":"bc/#convert-decimal-to-hexadecimal","title":"Convert decimal to hexadecimal","text":"

        echo \"obase=16 ; 10\" | bc

        "},{"location":"bc/#convert-hexadecimal-to-binary","title":"Convert hexadecimal to binary","text":"

        echo \"ibase=16 ; obase=2 ; AF\" | bc

        "},{"location":"bc/#subtract-two-from-the-last-octet-of-a-mac-address","title":"Subtract two from the last octet of a MAC address","text":"
        echo 24:b6:fd:ff:ba:31 |\nwhile read -r X ; do\n  echo ${X%??}$(\n    echo \"obase=16 ; $(( 0x${X#*:??:??:??:??:} )) - 2\" |\n      bc |\n      sed 's/^\\(.\\)$/0\\1/' |\n      tr A-Z a-z\n  ) ;\ndone ;\n
        "},{"location":"bind/","title":"BIND","text":"

        BIND, or named, is the most widely used Domain Name System (DNS) software on the Internet.

        • https://www.isc.org/downloads/bind/doc/
        • https://en.wikipedia.org/wiki/BIND
        "},{"location":"bind/#flush-records","title":"Flush records","text":""},{"location":"bind/#flush-a-single-record","title":"Flush a single record","text":"
        rndc flushname github.com\n
        "},{"location":"bind/#flush-all-records","title":"Flush all records","text":"
        rndc flush\n
        "},{"location":"blkid/","title":"blkid","text":"

        \"The blkid program is the command-line interface to working with the libblkid(3) library. It can determine the type of content (e.g. filesystem or swap) that a block device holds, and also attributes (tokens, NAME=value pairs) from the content metadata (e.g. LABEL or UUID fields). blkid has two main forms of operation: either searching for a device with a specific NAME=value pair, or displaying NAME=value pairs for one or more specified devices.\" - man blkid

        "},{"location":"blkid/#examples","title":"Examples","text":""},{"location":"blkid/#simple-usage","title":"Simple usage","text":"

        Here is the output of blkid on an Ubuntu 16.04 Vagrant box:

        $ blkid\n/dev/sda1: LABEL=\"cloudimg-rootfs\" UUID=\"743b1402-d445-494c-af0b-749040bb33e4\" TYPE=\"ext4\" PARTUUID=\"95a4c157-01\"\n/dev/sdb: UUID=\"2017-12-12-14-38-00-00\" LABEL=\"cidata\" TYPE=\"iso9660\"\n
        "},{"location":"blkid/#see-also","title":"See Also","text":"
        • findmnt
        • lsblk
        "},{"location":"bluetooth/","title":"bluetooth","text":""},{"location":"bluetooth/#examples","title":"Examples","text":""},{"location":"bluetooth/#linux-software","title":"Linux software","text":"
        • bluetoothctl: pair, connect, get info on bluetooth devices
        • gatttool
        "},{"location":"bpf/","title":"bpf","text":"

        \"Linux Socket Filtering (LSF) is derived from the Berkeley Packet Filter. Though there are some distinct differences between the BSD and Linux Kernel filtering, but when we speak of BPF or LSF in Linux context, we mean the very same mechanism of filtering in the Linux kernel.\"

        • https://www.kernel.org/doc/Documentation/networking/filter.txt
        • https://lwn.net/Articles/599755/
        • https://www.facebook.com/atscaleevents/videos/1693888610884236/
        • http://iovisor.github.io/bcc/
        • http://www.brendangregg.com/blog/2015-05-15/ebpf-one-small-step.html
        • https://github.com/sharklinux/shark
        "},{"location":"c/","title":"C","text":"

        \"C (pronounced like the letter c) is a general-purpose computer programming language. It was created in the 1970s by Dennis Ritchie, and remains very widely used and influential.\" - https://en.wikipedia.org/wiki/C_(programming_language)

        The linux kernel is > 98% C code.

        "},{"location":"c/#links","title":"Links","text":"
        • https://en.wikipedia.org/wiki/C_(programming_language)
        • https://tmewett.com/c-tips
        "},{"location":"calico/","title":"calico","text":"

        \"Calico provides secure network connectivity for containers and virtual machine workloads.\" - https://docs.projectcalico.org/v3.1/introduction/

        "},{"location":"calico/#kubernetes-examples","title":"Kubernetes Examples","text":"

        Calico works in several environments, but these examples all apply to Kubernetes.

        "},{"location":"calico/#installation","title":"Installation","text":"

        https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/

        "},{"location":"calico/#show-a-bunch-of-info-about-your-calico-config","title":"Show a bunch of info about your calico config","text":"

        See also https://docs.projectcalico.org/v2.0/reference/calicoctl/resources/

        for X in bgpPeer hostEndpoint ipPool node policy profile workloadEndpoint ; do\n  echo \"=========== $X\"\n  calicoctl get $X 2>/dev/null\ndone\n
        "},{"location":"calico/#links","title":"Links","text":"
        • https://docs.projectcalico.org/latest/
        • https://docs.projectcalico.org/v2.0/reference/calicoctl/resources/
        • https://kubernetes.io/docs/concepts/services-networking/network-policies/
        "},{"location":"calver/","title":"CalVer","text":"

        \"CalVer is a software versioning convention that is based on your project's release calendar, instead of arbitrary numbers.\" - https://calver.org/

        "},{"location":"calver/#links","title":"Links","text":"
        • https://calver.org/
        • Cockroach Labs: Why we're switching to calendar versioning / https://news.ycombinator.com/item?id=19658969
        • https://news.ycombinator.com/item?id=21967879
        "},{"location":"centos/","title":"CentOS Linux","text":"

        \"The CentOS Project is a community-driven free software effort focused on delivering a robust open source ecosystem.\" - https://www.centos.org/

        "},{"location":"centos/#centos-7","title":"CentOS 7","text":"
        • Released 2014-07-07
        "},{"location":"centos/#new-things-in-centos-7","title":"New things in CentOS 7","text":"
        • firewalld manages the firewall
        • hostnamectl changes the hostname and applies the setting immediately
        • journalctl shows log files of services launched by systemd
        • systemctl manages systemd services
        "},{"location":"centos/#initial-setup","title":"Initial setup","text":"

        Set up some base parameters on a fresh instance

        yum install -y bash-completion bc curl git lsof mlocate mutt net-snmp ntpd smartmontools strace sysstat vim wget\nln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime\nntpdate {0..3}.pool.ntp.org\nsystemctl start ntpd\n
        "},{"location":"centos/#centos-6","title":"CentOS 6","text":"
        • Released 2011-07-10
        "},{"location":"centos/#centos-6-initial-setup","title":"CentOS 6 Initial Setup","text":"
        yum install -y ntp\nchkconfig --levels 345 ntpd on && ntpdate time.apple.com && service ntpd start\nyum upgrade -y\nyum install -y arping avahi avahi-tools bc bind-utils curl elinks fping lsof net-snmp man mlocate mutt openssh openssh-clients openssh-server perl-Crypt-SSLeay perl-libwww-perl rsync strace vim wget yum-cron\nln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime\nchkconfig --levels 345 yum-cron on && service yum-cron start\nyum install -y dcfldd nfs-utils smartmontools dmidecode lshw dstat htop iotop\nchkconfig --levels 345 smartd on && service smartd start\n
        "},{"location":"centos/#tweaks-and-tricks","title":"Tweaks and Tricks","text":""},{"location":"centos/#get-past-protected-lib-problems","title":"Get past protected lib problems","text":"

        yum update --setopt=protected_multilib=false --skip-broken

        "},{"location":"centos/#enable-dhcp-hostname-for-dns-resolution","title":"Enable DHCP Hostname for DNS resolution","text":"

        add \"DHCP_HOSTNAME=whatever\" to /etc/sysconfig/network-scripts/ifcfg-eth0

        "},{"location":"centos/#install-os-from-usb","title":"Install OS from USB","text":"
        • From Windows: http://iso2usb.sourceforge.net/
        • From Linux: https://fedoraproject.org/wiki/Livecd-iso-to-disk
        "},{"location":"centos/#show-installed-repository-keys","title":"Show installed repository keys","text":"

        rpm -q gpg-pubkey --qf '%{name}-%{version}-%{release} --> %{summary}\\n'

        "},{"location":"centos/#dhcp-with-ddns-hostname","title":"DHCP with DDNS hostname","text":"

        Model your /etc/sysconfig/network-scripts/ifcfg-eth0 like this:

        TYPE=Ethernet\nDEVICE=eth0\nONBOOT=yes\nBOOTPROTO=dhcp\n## Without the following line, dhclient will not update /etc/resolv.conf and may not get an IP address at all\nDHCP_HOSTNAME=some_hostname\n
        • To configure your hostname, edit /etc/sysconfig/network and add HOSTNAME=
        • You also may need to delete these files:
        • rm -f /etc/dhclient-eth?.conf /var/lib/dhclient/dhclient-eth?.leases /etc/udev/rules.d/70-persistent-net.rules /etc/sysconfig/network-scripts/ifcfg-eth1\n
          "},{"location":"centos/#bond-configs","title":"Bond Configs","text":"
          $ cat /etc/modprobe.d/bond0.conf\nalias bond0 bonding\noptions bond0 max_bonds=1 fail_over_mac=2 mode=1 num_grat_arp=2 primary=em1 primary_reselect=1 arp_validate=1 arp_interval=100 arp_ip_target=10.1.5.15,10.1.1.1\n
          "},{"location":"centos/#see-also","title":"See Also","text":"
          • chkconfig
          • rpm
          • selinux - https://wiki.centos.org/HowTos/SELinux
          • yum
          "},{"location":"ceph/","title":"Ceph","text":"

          \"Ceph is a unified, distributed storage system designed for excellent performance, reliability and scalability.\" - https://ceph.com

          "},{"location":"ceph/#glossary","title":"Glossary","text":"

          http://docs.ceph.com/docs/master/glossary/

          • Ceph OSD: The Ceph OSD software, which interacts with a logical disk (OSD).
          • CephFS: The POSIX filesystem components of Ceph.
          • MDS: (Ceph Metadata Server) The Ceph metadata software.
          • MGR: (Ceph Manager) The Ceph manager software, which collects all the state from the whole cluster in one place.
          • MON: (Ceph Monitor) The Ceph monitor software.
          • OSD: (Object Storage Device) A physical or logical storage unit.
          • RADOS: Reliable Autonomic Distributed Object Store.
          • RBD: The block storage component of Ceph.
          • RGW: The S3/Swift gateway component of Ceph.
          • PG: Placement Group. http://docs.ceph.com/docs/master/rados/operations/placement-groups/
          "},{"location":"ceph/#examples","title":"Examples","text":""},{"location":"ceph/#activate-all-osds","title":"Activate all OSDs","text":"
          sudo ceph-disk activate-all\n

          Starting with ceph 13, use:

          ceph-volume lvm activate --all\n
          "},{"location":"ceph/#start-all-ceph-services","title":"Start all ceph services","text":"
          sudo systemctl start ceph.target\n
          "},{"location":"ceph/#stop-all-ceph-services","title":"Stop all ceph services","text":"

          Unfortunately there's not a single service or target to stop, so you have to use globs

          sudo systemctl stop '*ceph*'\n
          "},{"location":"ceph/#show-the-status-of-all-osds-in-the-cluster","title":"Show the status of all osds in the cluster","text":"
          ceph osd status\n

          Or alternatively

          ceph osd tree\n
          "},{"location":"ceph/#show-metadata-about-all-osds-in-the-cluster","title":"Show metadata about all osds in the cluster","text":"

          This produces a json list with a dict for each osd.

          ceph osd metadata\n
          "},{"location":"ceph/#show-all-pools","title":"Show all pools","text":"
          ceph osd lspools\n
          "},{"location":"ceph/#list-all-rbd-images-in-a-pool","title":"List all RBD images in a pool","text":"
          pool_name=\"kube\"\nrbd list \"$pool_name\"\n
          "},{"location":"ceph/#show-rbd-usage-stats","title":"Show rbd usage stats","text":"

          This will show name, provisioned, used, and will have a sum at the bottom, with sizes defaulting to human readable units. You can use --format json to get raw byte usage.

          rbd disk-usage --pool $pool_name $optional_rbd_name\n
          "},{"location":"ceph/#map-an-rbd-image-to-a-system-device","title":"Map an RBD image to a system device","text":"
          pool_name=\"kube\"\nrbd_image_name=\"testimage\"\nrbd map \"$pool_name/$rbd_image_name\"\n

          Then you can mount whatever the resulting device is. -o X-mount.mkdir automatically creates the destination mount point, but may not be available on some systems.

          mount -o X-mount.mkdir /dev/rbd8 /mnt/rbd8\n
          "},{"location":"ceph/#list-snapshots-for-an-rbd-image","title":"List snapshots for an RBD image","text":"
          pool_name=\"kube\"\nrbd_image_name=\"testimage\"\nrbd snap list \"$pool_name/$rbd_image_name\"\n
          "},{"location":"ceph/#copy-a-snapshot-to-an-image-so-it-can-be-mounted","title":"Copy a snapshot to an image so it can be mounted","text":"
          pool_name=\"kube\"\nrbd_image_name=\"testimage\"\nsnap_name=\"snap-072519-213210\"\nrbd clone \"$pool_name/$rbd_image_name@$snap_name\" \"$pool_name/image-$snap_name\"\n

          After this you can map the new image and mount it as described above.

          "},{"location":"ceph/#monitor-existing-operations","title":"Monitor existing operations","text":"
          ceph daemon \"mon.$MON_HOSTNAME\" ops\n
          "},{"location":"ceph/#links","title":"Links","text":"
          • Ceph Intro & Architectural Overview - https://youtu.be/7I9uxoEhUdY
          • http://docs.ceph.com/ceph-ansible/master/
          • http://docs.ceph.com/docs/master/rados/configuration/network-config-ref/
          • http://docs.ceph.com/docs/master/rados/operations/operating/
          • http://docs.ceph.com/docs/master/start/quick-ceph-deploy/
          "},{"location":"chkconfig/","title":"chkconfig","text":"

          chkconfig is a tool to interact with sys-v init scripts on centos/rhel hosts, and probably others.

          "},{"location":"chkconfig/#examples","title":"Examples","text":""},{"location":"chkconfig/#list-services-and-their-runlevels","title":"List services and their runlevels","text":"
          chkconfig --list\n
          "},{"location":"chkconfig/#turn-on-mysql-at-runlevels-3-and-5","title":"Turn on mysql at runlevels 3 and 5","text":"
          chkconfig --level 35 mysql on\n
          "},{"location":"chkconfig/#see-also","title":"See Also","text":"
          • update-rc.d - similar feature for ubuntu sysvinit
          "},{"location":"chocolatey/","title":"Choclatey","text":"

          \"The package manager for Windows\" - https://chocolatey.org

          "},{"location":"chocolatey/#examples","title":"Examples","text":"

          choco has to be run from an admin shell.

          "},{"location":"chocolatey/#search-for-a-package","title":"Search for a package","text":"
          choco search xencenter\n
          "},{"location":"chocolatey/#install-software-and-all-its-requirements","title":"Install software and all its requirements","text":"
          choco install xencenter -y\n
          "},{"location":"chronos/","title":"chronos","text":"

          \"Chronos is a replacement for cron. It is a distributed and fault-tolerant scheduler that runs on top of Apache Mesos that can be used for job orchestration.\" - https://mesos.github.io/chronos/

          • Uses ISO 8601 Repeating Interval notation, but the P is required so you can only use the syntax Rn/<datespec>/PT4H - see http://mesos.github.io/chronos/docs/api.html#adding-a-scheduled-job
          "},{"location":"circleci/","title":"CircleCI","text":"

          \"CircleCI's continuous integration and delivery platform makes it easy for teams of all sizes to rapidly build and release quality software at scale. Build for Linux, macOS, and Android, in the cloud or behind your firewall.\" - https://circleci.com/

          "},{"location":"circleci/#links","title":"Links","text":"
          • https://circleci.com/docs/2.0/configuration-reference/
          • https://circleci.com/docs/2.0/sample-config/
          • https://circleci.com/docs/2.0/circleci-images/
          • https://circleci.com/docs/2.0/workflows/
          • https://discuss.circleci.com/
          "},{"location":"circuitpython/","title":"circuitpython","text":"

          \"The easiest way to program microcontrollers. CircuitPython is a programming language designed to simplify experimenting and learning to code on low-cost microcontroller boards. \" - https://circuitpython.org

          "},{"location":"circuitpython/#examples","title":"Examples","text":""},{"location":"circuitpython/#disable-circuitpython-usb-drive","title":"Disable CIRCUITPYTHON usb drive","text":"

          Create a boot.py file in the drive with the following contents:

          import storage\n\nstorage.disable_usb_drive()\n

          To re-enable the drive, use the serial console to access the repl and comment out all of the contents of boot.py by running:

          import storage\n\nwith open('boot.py', mode='r') as f:\n    contents = [f'#{x}' for x in f.readlines()]\n\nwith open('boot.py', mode='w') as f:\n    f.write(''.join(contents))\n

          or just delete the boot.py file with:

          import os, storage\nstorage.remount('/', readonly=False)\nos.remove('/boot.py')\n
          "},{"location":"circuitpython/#paste-mode","title":"Paste mode","text":"

          You can enter paste mode by pressing CTRL-E. This lets you paste in multi-line code that will not be interpreted until you press CTRL-D. This also lets you paste code that has spaces init without the REPL saying your syntax is invalid due to indentation problems or other whitespace nuances of the normal REPL.

          "},{"location":"circuitpython/#see-also","title":"See also","text":"
          • python
          • micropython
          • microcontrollers
          "},{"location":"cncf/","title":"CNCF","text":"

          \"CNCF serves as the vendor-neutral home for many of the fastest-growing projects on GitHub, including Kubernetes, Prometheus and Envoy, fostering collaboration between the industry\u2019s top developers, end users, and vendors.\" - https://www.cncf.io

          "},{"location":"cobbler/","title":"Cobbler","text":"

          \"Cobbler is a Linux installation server that allows for rapid setup of network installation environments.\" -- http://cobbler.github.io

          "},{"location":"cobbler/#notes","title":"Notes","text":"
          • Versions prior to 2.6.9 can no longer auto-download loaders
          "},{"location":"cobbler/#links","title":"Links","text":"
          • https://cobbler.github.io/
          • https://cobbler.github.io/quickstart/
          • https://download.opensuse.org/repositories/home:/libertas-ict:/cobbler26/CentOS_CentOS-6/noarch/ - Newer than epel CentOS 6 rpms
          • https://github.com/rhinstaller/pykickstart
          "},{"location":"colorblindness/","title":"Colorblindness","text":"

          A significant fraction of the human population has color deficient vision. Designing information systems so these people can access them is important, and often overlooked.

          "},{"location":"colorblindness/#links","title":"Links","text":"
          • https://jfly.uni-koeln.de/color: \"Color Universal Design (CUD) - How to make figures and presentations that are friendly to Colorblind people\"
          • https://www.cs.unm.edu/~aaron/creative/colorTest.htm: Reverse colorblind test
          • https://paletton.com/: Pick color palletes, with a colorblind simulator
          • https://colororacle.org: Colorblindness simulation app
          • https://wearecolorblind.com
          • https://www.color-blindness.com
          • https://asada0.tumblr.com/post/11517603099/the-day-i-saw-van-goghs-genius-in-a-new-light
          • http://www.vischeck.com
          • https://www.biyee.net/color-science/color-vision-test
          • https://www.joshwcomeau.com/css/make-beautiful-gradients: avoid grey, dull colors when making gradients by using HSL instead of RGB.
          • https://bsago.me/posts/that-annoying-shade-of-blue: Not really color blindness, but discusses human color perception and technology.
          • https://ericportis.com/posts/2024/okay-color-spaces: Exploraiton of color spaces.
          • https://jlongster.com/why-chromaticity-shape: \"Why does the chromaticity diagram look like that?\"
          "},{"location":"computing/","title":"Computing","text":"

          General notes about technology. Basically tech bookmarks.

          "},{"location":"computing/#links","title":"Links","text":"
          • http://emulator101.com - Great introduction to CPU architectures and assembly language.
          • https://www.progsbase.com/blog/flow-charts-of-programming-language-constructs/ - Flow-Charts of Programming Language Constructs
          "},{"location":"consul/","title":"consul","text":"

          \"Service Discovery and Configuration Made Easy\" - https://www.consul.io/

          "},{"location":"consul/#links","title":"Links","text":"
          • https://www.consul.io/docs/internals/architecture.html
          "},{"location":"cookiecutter/","title":"cookiecutter","text":""},{"location":"cookiecutter/#examples","title":"Examples","text":""},{"location":"cookiecutter/#find-a-list-of-cookiecutter-variables","title":"Find a list of cookiecutter variables","text":"
          grep -h -o '{{cookiecutter[^}]*}}' \\{\\{cookiecutter.repo_name\\}\\}/* | sort | uniq -c\n
          "},{"location":"cookiecutter/#links","title":"Links","text":"
          • Cookiecutter: Better Project Templates
          • A pantry full of cookiecutters
          "},{"location":"cowsay/","title":"cowsay","text":"

          In Linux etc., print a cow that is saying something. Also works as cowthink, and a variety of other animals and artwork are available.

          "},{"location":"cowsay/#examples","title":"Examples","text":""},{"location":"cowsay/#get-a-list-of-things-that-can-talk","title":"Get a list of things that can talk","text":"
          $ cowthink -l\nCow files in /usr/share/cowsay/cows:\napt beavis.zen bong bud-frogs bunny calvin cheese cock cower daemon default\ndragon dragon-and-cow duck elephant elephant-in-snake eyes flaming-sheep\nghostbusters gnu head-in hellokitty kiss kitty koala kosh luke-koala\nmech-and-cow meow milk moofasa moose mutilated pony pony-smaller ren sheep\nskeleton snowman sodomized-sheep stegosaurus stimpy suse three-eyes turkey\nturtle tux unipony unipony-smaller vader vader-koala www\n
          "},{"location":"cowsay/#cowsay_1","title":"cowsay","text":"
          $ cowsay \"Hello world!\"\n ______________\n< Hello world! >\n --------------\n        \\   ^__^\n         \\  (oo)\\_______\n            (__)\\       )\\/\\\n                ||----w |\n                ||     ||\n
          "},{"location":"cowsay/#cowthink","title":"cowthink","text":"
          $ cowthink -f dragon \"On the internet, nobody knows you're a dragon!\"\n ________________________________________\n( On the internet, nobody knows you're a )\n( dragon!                                )\n ----------------------------------------\n      o                    / \\  //\\\n       o    |\\___/|      /   \\//  \\\\\n            /0  0  \\__  /    //  | \\ \\\n           /     /  \\/_/    //   |  \\  \\\n           @_^_@'/   \\/_   //    |   \\   \\\n           //_^_/     \\/_ //     |    \\    \\\n        ( //) |        \\///      |     \\     \\\n      ( / /) _|_ /   )  //       |      \\     _\\\n    ( // /) '/,_ _ _/  ( ; -.    |    _ _\\.-~        .-~~~^-.\n  (( / / )) ,-{        _      `-.|.-~-.           .~         `.\n (( // / ))  '/\\      /                 ~-. _ .-~      .-~^-.  \\\n (( /// ))      `.   {            }                   /      \\  \\\n  (( / ))     .----~-.\\        \\-'                 .~         \\  `. \\^-.\n             ///.----..>        \\             _ -~             `.  ^-`  ^-_\n               ///-._ _ _ _ _ _ _}^ - - - - ~                     ~-- ,.-~\n                                                                  /.-~\n
          "},{"location":"cpp/","title":"C++","text":"

          \"C++ (pronounced \"C plus plus\") is a high-level general-purpose programming language created by Danish computer scientist Bjarne Stroustrup as an extension of the C programming language, or \"C with Classes\".\" - https://en.wikipedia.org/wiki/C%2B%2B

          "},{"location":"cpp/#links","title":"Links","text":"
          • https://www.learncpp.com
          • https://cplusplus.com
          • https://github.com/federico-busato/Modern-CPP-Programming
          "},{"location":"cradlepoint/","title":"Cradlepoint","text":"

          \"Founded in 2006, Cradlepoint has grown to become the industry leader in cloud-delivered 4G LTE network solutions for business, service providers, and government organizations, and we are committed to extending our leadership into the emerging 5G space.\" - https://cradlepoint.com/company/about

          "},{"location":"cradlepoint/#cradlepoint-ibr900","title":"Cradlepoint IBR900","text":"
          • https://cradlepoint.com/products/cor-ibr900-series
          • https://cradlepoint.com/sites/default/files/upload-file/cradlepoint_ibr900_manual_0.pdf

          The CLI is not a normal shell, but is a minimal appliance type UI.

          "},{"location":"cradlepoint/#get-config-data","title":"Get config data","text":"

          This will get all config data, which can be over 25k lines of JSON.

          get\n

          To get just a subset of the data, use something like:

          [admin@IBR900-13e: /]$ get config/vlan\n[\n    {\n        \"mode\": \"wan\",\n        \"ports\": [],\n        \"uid\": \"wan\",\n        \"vid\": 1\n    },\n    {\n        \"mode\": \"lan\",\n        \"ports\": [\n            {\n                \"mode\": \"untagged\",\n                \"port\": 0\n            },\n            {\n                \"mode\": \"untagged\",\n                \"port\": 1\n            },\n            {\n                \"mode\": \"untagged\",\n                \"port\": 2\n            }\n        ],\n        \"uid\": \"lan\",\n        \"vid\": 2\n    }\n]\n
          "},{"location":"cradlepoint/#set-and-fetch-variables","title":"Set and fetch variables","text":"
          [admin@IBR900-13e: /]$ set foo/bar: \"baz\"\n[admin@IBR900-13e: /]$ get foo\n{\n    \"bar:\": \"baz\"\n}\n
          "},{"location":"cradlepoint/#getting-help","title":"Getting help","text":"
          [admin@IBR900-13e: /]$ help\nAvailable Commands:\n        SupportQA      adduser        append         arpdump        atterm         banner         bgp\n        cd             clear          clients        cpconnect      date           delete         deluser\n        devices        diff           edit           exit           factory_reset  find           free\n        get            gre            grep           help           inspect        ips            ipset\n        lan            log            ls             mkdir          nemo           netcloud       netfilter\n        netstat        nhrp           ospf           passwd         ping           ping6          pwd\n        qos            reboot         reset          resources      rip            ripng          route\n        rtpolicy       serial         set            sleep          sms            ssh            stp\n        switch         tcpdump        telnet         threads        traceroute     uptime         vlan\n        vpn            vrrp           wan            wireless       workqueue      xfrm           zebra\n\nAvailable Aliases:\n        cat  => get\n        dir  => ls\n        ll   => ls -l 1\n        more => get\n        post => append\n        put  => set\n        quit => exit\n        rm   => delete\n\nTo get help for a specific command run: \"help CMD\"\n[admin@IBR900-13e: /]$ help SupportQA\nCommand to provide debugging data\n[admin@IBR900-13e: /]$ help adduser\nAdd a new user account and set the password for this account.\nUsage: adduser USER\n\n[admin@IBR900-13e: /]$ help append\nAppend new item to an array in the config\n    append PATH VALUE\n[admin@IBR900-13e: /]$ help arpdump\nThe arpdump command shows the current ARP table.\n[admin@IBR900-13e: /]$ help atterm\nThe atterm command can be used to get direct access to a modem's AT command channel.\nIf a modem interface is not given then the system will pick the highest priority modem.\nTo discover a modem's interface to use, use the command 'devices' and use\nthe value found under the 'SubIface' column.\n\nUSAGE: atterm [interface] [-c ATCMD] [-t SOCKET_TIMEOUT]\n\n[admin@IBR900-13e: /]$ help banner\nbanner [set|unset]\n[admin@IBR900-13e: /]$ help bgp\nUsage: bgp [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga BGP CLI is given.\nUse \"bgp list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help cd\nChange to a new config directory\nUsage: cd DIR/[DIR...]\n[admin@IBR900-13e: /]$ help clear\nClear the screen\n[admin@IBR900-13e: /]$ help clients\nThe clients command can be used to show the currently connected clients,\nboth wireless and wired as well as hotspot.\n\nUSAGE:    clients [show | revoke | kick ] [SUBOPTIONS...]\n    show [CONNECTION TYPE]: Show clients by connection type.\n        If no connection type option is given, all connection types will be shown.\n    Connection Types:\n        all  : All Known Clients, both wired and wireless, that the router knows about.\n        wlan : Wireless LAN Clients\n        hotspot : Authenticated Hotspot Clients\n    revoke [-t [ip | mac | hn]]: Revoke a client's Hotspot authentication.\n        -t : The type of adddress to be used to find the client in the client list\n            ip  : IP address of the client (default)\n            mac : MAC address of the client\n            hn  : Hostname of the client (cannot be used if client does not have a hostname)\n    kick [-t [ip | mac | hn]]: Remove wireless access until the router is rebooted.\n        -t : The type of adddress to be used to find the client in the client list\n            ip  : IP address of the client (default)\n            mac : MAC address of the client\n            hn  : Hostname of the client (cannot be used if client does not have a hostname)\n[admin@IBR900-13e: /]$ help cpconnect\nUsage: cpconnect {COMMAND} [OPTIONS]\n    where COMMAND := { add | remove | show }\n          OPTIONS := {-n[ame] | -u[ser] | -p[asswd] | -s[server] } [VAR]\n\n    example: cpconnect add -n test1 -u user1 -p pass1 -s vpn.accessmylan.com\n             cpconnect show\n\n[admin@IBR900-13e: /]$ help date\nShow system clock.\n[admin@IBR900-13e: /]$ help delete\nDelete an item from the config\nUsage: delete PATH\n[admin@IBR900-13e: /]$ help deluser\nDelete a user account.\nUsage: deluser USER\n\n[admin@IBR900-13e: /]$ help devices\nDisplay network devices connected to the router.\n\nUSAGE:    devices [-v]                 Show all devices(s)\n          devices [-v] [DEVICE_UID...] Show one or more specific devices.\n            -v     Verbose output\n[admin@IBR900-13e: /]$ help diff\nShow differences between the current and the default configuration.\n[admin@IBR900-13e: /]$ help edit\nUsage: edit CONFIG_PATH\nExamples:\n    edit /config/system/gps\n    edit .\n\n[admin@IBR900-13e: /]$ help exit\nExit the shell\n[admin@IBR900-13e: /]$ help factory_reset\nReset config to factory defaults\n[admin@IBR900-13e: /]$ help find\nFind or list files and combine with grep to locate specific files\nUsage: find PATH [| grep file]\n[admin@IBR900-13e: /]$ help free\nShow amount (kilobytes) of free and used system memory.\n\"free\" memory is presently unused, while \"available\" includes used memory,\ntypically for cache, that is readily reclaimed for something else when needed.\n[admin@IBR900-13e: /]$ help get\nGet value for config item(s)\nUsage: get PATH [PATH...]\n[admin@IBR900-13e: /]$ help gre\n\nStart, stop, or show status of all gre tunnels.\nStart, stop or restart a tunnel.\nRenew or release the DHCP lease for a tunnel (if DHCP for GRE is enabled).\n\ngre [show|start|stop]\ngre [starttunnel|stoptunnel|restarttunnel|renew|release] -n <TUNNEL NAME>\n\n[admin@IBR900-13e: /]$ help grep\nUsage: grep PATTERN [FILE]...\n[admin@IBR900-13e: /]$ help help\nTo get help for a specific command run: \"help CMD\"\n[admin@IBR900-13e: /]$ help inspect\nInspect a directory\nUsage: inspect [PATH]\n[admin@IBR900-13e: /]$ help ips\nInteract with the IPS engine.\n\nUsage: ips [load|show|mode|status] [SUBOPTIONS..]\n    load [-f PATH]: Load a rule file into the IPS engine\n        -f [PATH]: Load rule file from PATH (Must contain the filename)\n    show [-o [ids | cats | sec | app | anom]]: Show information from the signatures loaded.\n        ids: Print all the signatures (default)\n        cats: Print all the categories\n        sec: Print only the signatures for the Security rules\n        app: Print only the signatures for the Application rules\n        anom: Print only the signatures for the Anomaly rules\n    mode [-o [off | ips | ids]]: Change the IPS Global mode\n        off: Set Default IPS  mode to 'Disabled'\n             (If no other Category or Signature is 'Enabled' then the kernel\n              modules will be unloaded)\n        ips: Set Default IPS mode to 'Detect and Prevent'\n        ids: Set Default IPS mode to 'Detect Only'\n    status: Print the status of the IPS engine\n    update: Request a Rule File Update\n\n[admin@IBR900-13e: /]$ help ipset\nipset [list {name}]\nspecify the name of the set to list, or nothing to see the names of the sets\n[admin@IBR900-13e: /]$ help lan\nShow the current LAN configuration and status.\n[admin@IBR900-13e: /]$ help log\nShow and manipulate the log system.\nUsage: log [show|clear|service|level|msg] [SUBOPTONS...]\n        show [FILTER] [FILTERN] [[-bhi] -s SEARCH]:\n              FILTER can be one or more space-separated names or levels.\n                 eg. log show wanmgr kernel DEBUG INFO\n              -b bold new entries\n              -h HIGHLIGHT Same usage as searching but does not filter results.\n              -i makes the search case insensitive.\n              -s SEARCH can be any string to search for in the log message contents.\n                 eg. log show -s Firmware\n              -f [LINES_OF_HISTORY] Follow mode with optional argument for number of lines of history to show.\n              -r recover crash log if one exists.\n        service [level (DEBUG|INFO|WARNING|ERROR|CRITICAL)]:\n             Display all service log levels.\n             level change service log level.\n        clear: Erase all logs from memory\n        level [NEW_LEVEL]: View current level or set new log level to: critical, error, warning, info, or debug\n        msg [-l LEVEL] MESSAGE: Write a message to the logging system. LEVEL defaults to: info\n[admin@IBR900-13e: /]$ help ls\nList files in the current config directory\n[admin@IBR900-13e: /]$ help mkdir\nCreate an empty container\nUsage: mkdir DIRNAME\n[admin@IBR900-13e: /]$ help nemo\nShow status and configuration of NEMO session\nUsage: nemo [-v]\n[admin@IBR900-13e: /]$ help netcloud\nManage connection to the Cradlepoint NetCloud.\n\nUsage: netcloud [alert|status|register|stop|start|restart|triggers]\n     status: [DEFAULT] Show current status information.\n     register [--username=USER --password=PW]|[--token_id=TID --token_secret=TS]: (Re)register\n       --username: The NetCloud username that should be used to authenticate.\n       --password: The NetCloud password that should be used to authenticate.\n       --token_id: [EXPERT] Token ID for token authentication mode.\n       --token_secret: [EXPERT] Token secret for token authentication mode.\n     unregister: Unregister this router from NetCloud and unregister.\n     stop: Manually stop the NetCloud client.\n     start: Manually start the NetCloud client.\n     restart: Manually restart the NetCloud client.\n\n[admin@IBR900-13e: /]$ help netfilter\nShow info and debug from netfilter2 rule(s) and trigger(s).\nUsage: netfilter [active|all|upnp|triggers|states] [input|routed|output] [ip/ip6] [-v] [-s] [-r RULE_INDEX] [-t TRIGGER_INDEX] [-s STATE_INDEX]\n\n[admin@IBR900-13e: /]$ help netstat\nUsage: netstat [-al] [-tuwx] [-enWp]\n\nDisplay networking information\n\n-a     All sockets\n-l     Listening sockets\n        Else: connected sockets\n-t     TCP sockets\n-u     UDP sockets\n-w     Raw sockets\n-x     Unix sockets\n        Else: all socket types\n-e     Other/more information\n-n     Don't resolve names\n-W     Wide display\n-p     Show PID/program name for sockets\n\n[admin@IBR900-13e: /]$ help nhrp\n\nUsage:    nhrp show\n          nhrp flush\n          nhrp flush nbma ip\n\n[admin@IBR900-13e: /]$ help ospf\nUsage: ospf [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga OSPF CLI is given.\nUse \"ospf list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help passwd\nSet the password for a user.\nUsage: passwd [USER]\n\n[admin@IBR900-13e: /]$ help ping\nSend ICMP echo_request(s) to a networked host\n\nUsage: ping [-w DEADLINE] [-c COUNT] [-i INTERVAL] [-I INTERFACE [-P]] [-M PMTUDISC_OPT] [-s SIZE] HOSTNAME\n\n    -w DEADLINE\n        Stop after a total of DEADLINE seconds, or (if specified) COUNT packets\n        are sent, whichever comes first.\n\n    -c COUNT\n        Stop after sending COUNT packets, or (if specified) DEADLINE seconds,\n        whichever comes first\n\n    -i INTERVAL\n        Wait INTERVAL seconds between packet transmissions. Default is 1.\n\n    -I INTERFACE\n        Specify the source of the ICMP requests. If INTERFACE is an IP address,\n        then source the ICMP requests from that address. If INTERFACE is not\n        an IP address, treat it as an interface name and source from that\n        interface. When treated as a name, exact matches of interface \"Ifaces\"\n        are chosen first, followed by exact matches of \"Device UIDs\", and\n        finally case-insensitive matches to Network names are returned. See\n        the \"devices\" command for a list of valid \"Ifaces\" and \"Device UIDs\".\n\n    -P\n        Requires the -I INTERFACE option. When -P is present, ping will\n        source from an IP address on the specified interface, instead of the\n        interface itself.\n\n    -s SIZE\n        Specifiy the ICMP data length, in bytes. The default is 56 bytes, which\n        will result in an ICMP packet length of 64 bytes (56 data bytes plus 8\n        ICMP header bytes)\n\n    -M PMTU_OPT\n        Select Path MTU discovery. PMTU_OPT must be one of: \"do\", \"want\" or \"dont\".\n        If the PMTU_OPT is \"do\" (default), then ping will set the Don't Fragment (DF)\n        flag in the ICMP requests, which will prohibit packet fragmentation. If\n        PMTU_OPT is \"want\", then ping will fragment if the ICMP request exceeds\n        the local outbound interfaces' MTU. Finally if PMTU_OPT is \"dont\" (do not\n        set the DF flag), then fragmentation is allowed and ICMP Requests will be\n        fragmented as necessary in response to ICMP Fragmentation Responses.\n\n\n[admin@IBR900-13e: /]$ help ping6\nSend ICMPv6 echo_request(s) to a networked host\n\nUsage: ping6 [-w DEADLINE] [-c COUNT] [-i INTERVAL] [-I INTERFACE [-P]] [-M PMTUDISC_OPT] [-s SIZE] HOSTNAME\n\n    -w DEADLINE\n        Stop after a total of DEADLINE seconds, or (if specified) COUNT packets\n        are sent, whichever comes first.\n\n    -c COUNT\n        Stop after sending COUNT packets, or (if specified) DEADLINE seconds,\n        whichever comes first\n\n    -i INTERVAL\n        Wait INTERVAL seconds between packet transmissions. Default is 1.\n\n    -I INTERFACE\n        Specify the source of the ICMP requests. If INTERFACE is an IP address,\n        then source the ICMP requests from that address. If INTERFACE is not\n        an IP address, treat it as an interface name and source from that\n        interface. When treated as a name, exact matches of interface \"Ifaces\"\n        are chosen first, followed by exact matches of \"Device UIDs\", and\n        finally case-insensitive matches to Network names are returned. See\n        the \"devices\" command for a list of valid \"Ifaces\" and \"Device UIDs\".\n\n    -P\n        Requires the -I INTERFACE option. When -P is present, ping will\n        source from an IP address on the specified interface, instead of the\n        interface itself.\n\n    -s SIZE\n        Specifiy the ICMP data length, in bytes. The default is 56 bytes, which\n        will result in an ICMP packet length of 64 bytes (56 data bytes plus 8\n        ICMP header bytes)\n\n\n[admin@IBR900-13e: /]$ help pwd\nPrint the current working directory\n[admin@IBR900-13e: /]$ help qos\nShow QoS statistics.\nUsage: qos\n\n[admin@IBR900-13e: /]$ help reboot\nReboot the router\n[admin@IBR900-13e: /]$ help reset\nReset the tty to default settings\n[admin@IBR900-13e: /]$ help resources\nReport the system resource usage.\n[admin@IBR900-13e: /]$ help rip\nUsage: rip [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga RIP CLI is given.\nUse \"rip list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help ripng\nUsage: ripng [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga RIPNG CLI is given.\nUse \"ripng list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$ help route\nThe route command can be used to show the current routing tables as\nwell as make changes to the user defined static routing table.\n\nUSAGE:    route [show [{TABLE}]]                        Show route(s)\n          route config {bgp|ospf|rip|ripng|static}      Show routing protocol configuration\n          route add IP/NM [gw IP] [dev UID] [auto]    Add new user defined static route\n          route del IP/NM [gw IP] [dev UID]     Remove user defined static route(s)\n\nEXAMPLES: route add 172.0.0.100 dev primarylan\n          route add 10.0.0.0/8 gw 172.0.0.100\n          route add 2000::/3 dev guestlan metric 100\n\nNOTE: Only the user defined static routing table may be modified directly.\n[admin@IBR900-13e: /]$ help rtpolicy\nShow the routing policies used by the router.  These policies control which\nrouting table will be used based on certain packet matching criteria and can\nalso control certain routing decisions.\n\nUSAGE:    rtpolicy                                 Show policies(s)\n\n[admin@IBR900-13e: /]$ help serial\nThe serial command can be used to access a serial adapter connected to the router.\nUSAGE:  serial [--force]          Connects to the first serial device using the serial configuration in config/system/serial\nUSAGE:  serial [--force] [number] Connects to the [number] port of a multi-port serial device using the serial configuration in config/system/serial\nOnly one remote user can be connected to a serial port at a time.  The --force option will force quit another user to allow this command to take over.\n[admin@IBR900-13e: /]$ help set\nSet a value to a config item\nUsage: set PATH VALUE\n[admin@IBR900-13e: /]$ help sleep\nsleep SECONDS\n[admin@IBR900-13e: /]$ help sms\n The sms command is used to send a msg to the given address (optionally from the modem on port)\n  Usage: sms addr msg [port]\n  Example: sms 2081234567 'hello world' usb1\n  Note: Add a '+' before the addr for international numbers\n[admin@IBR900-13e: /]$ help ssh\nCreate an SSH connection to an SSH server.\n\nusage:    ssh [-v] [-C] [-1] [-2] [-l username] [-p port] [-c cipher_spec] [user@]hostname\n\nrequired arguments:\n  hostname    Either the hostname or a user@hostname pair\n\noptional arguments:\n  -v          Debug level. May be specified up to 3 times (-v, -vv, -vvv).\n  -C          Requests compression of all data.\n  -1          Force ssh to try protocol version 1 only.\n  -2          Force ssh to try protocol version 2 only.\n  -l          Specifies login name.\n  -p          Specifies port.\n  -c          Comma separated list of ciphers (e.g. aes256-ctr,aes192-ctr,aes128-ctr,).\n\nSupported ciphers:\naes256-ctr,aes192-ctr,aes128-ctr,aes256-cbc,aes192-cbc,aes128-cbc,3des-cbc,blowfish-cbc\nNote: Cradlepoint routers support only PCI-compliant ciphers by default.\nTo relax this behavior, set /config/firewall/ssh_admin/weak_ciphers to true.\nDoing so will set the ciphers to the list of supported ciphers (above).\n[admin@IBR900-13e: /]$ help stp\nShow the current STP configuration and status.\n[admin@IBR900-13e: /]$ help switch\nShow ethernet switch status.\nUsage: switch [-v] [show|set|clearstats]\n\n      show [port PORT_NUM]: Only show the port specified\n\n      set PORT_NUM [--link_speed=LINK_SPD]\n                   [--enabled=true/false]\n                   [--port_name=PORT_NAME]\n      clearstats [PORT_NUM]\n[admin@IBR900-13e: /]$ help tcpdump\nUsage: tcpdump [-aAdDeflLnNOpqRStuvxX] [-c count] [ -E algo:secret ]\n               [ -i interface ] [ -M secret ] [ -s snaplen ] [ -T type ]\n               [ -y datalinktype ] [ expression ]\n\n[admin@IBR900-13e: /]$ help telnet\nThe telnet command can be used to access telnet services located on the routers LAN.\nUSAGE: telnet [host] [port] [timeout]\n[admin@IBR900-13e: /]$ help threads\nShow currently active threads.\n USAGE: threads [-v]\n[admin@IBR900-13e: /]$ help traceroute\nUsage: traceroute [options] host [packet_len]\n\nAvailable options:\n  -4                  Use IPv4\n  -6                  Use IPv6\n  -F                  Do not fragment packets\n  -f                  Start from the first_ttl hop (instead from 1)\n  -g                  Route packets through the specified gateway\n  -I                  Use ICMP ECHO for tracerouting\n  -T                  Use TCP SYN for tracerouting (default port is 80)\n  -i                  Specify a network interface to operate with\n  -m                  Set the max number of hops (max TTL to be reached). Default is 30\n  -n                  Do not resolve IP addresses to their domain names\n  -p                  Set the destination port to use\n  -t                  Set the TOS (IPv4 type of service) or TC (IPv6 traffic class) value for outgoing packets\n  -l                  Use specified flow_label for IPv6 packets\n  -w                  Set the number of seconds to wait for response to a probe (default is 5.0)\n  -q                  Set the number of probes per each hop. Default is 3\n  -r                  Bypass the normal routing and send directly to a host on an attached network\n  -s                  Use source src_addr for outgoing packets\n  -z                  Minimal time interval between probes (default 0)\n  -e                  Show ICMP extensions (if present), including MPLS\n  --sport=num         Use source port num for outgoing packets. Implies \"-N 1\"\n  --fwmark=num        Set firewall mark for outgoing packets\n  -U                  Use UDP to particular port for tracerouting\n  -UL                 Use UDPLITE for tracerouting (default dest port is 53)\n  -P                  Use raw packet of protocol prot for tracerouting\n  --mtu               Discover MTU along the path being traced. Implies \"-F -N 1\"\n  --back              Guess the number of hops in the backward path and print if it differs\n  -V                  Print version info and exit\n\n[admin@IBR900-13e: /]$ help uptime\nShow system uptime and load avg.\n[admin@IBR900-13e: /]$ help vlan\nShow VLAN configuration.\n\n[admin@IBR900-13e: /]$ help vpn\n\nStart, stop, or show status of all vpn tunnels.\nStart, stop or restart a tunnel.\nAdjust logging for various ipsec subsystems.  Reset sets the default\nand all subsystems to log level 1.\n\nvpn [show|start|stop]\nvpn [starttunnel|stoptunnel|restarttunnel] -n <TUNNEL NAME>\nvpn config\nvpn loglevel reset\nvpn loglevel default [-1|0|1|2|3|4]\nvpn loglevel [app|asn|cfg|chd|dmn|enc|esp|ike|imc|imv|job|knl|lib|mgr|net|pts|tls|tnc] [-1|0|1|2|3|4]\n\n[admin@IBR900-13e: /]$ help vrrp\nShow the current VRRP configuration and status.\n[admin@IBR900-13e: /]$ help wan\nShow all the attached wan devices and their current state.\nUsage: wan [monitor] [UID] [CONFIG...]\nCONFIG: Can be any number of --key=[value] pairs as defined in the /config/wan/rules2 config section.\n        If the optional [value] argument is ommited then the current value (if any) will be printed.\n        Get example: wan cp1 --ip_mode --static.ip_address\n        Set example: wan cp1 --ip_mode=\"static\" --static.ip_address=\"10.0.0.1\" --static.netmask=\"255.0.0.0\"\n[admin@IBR900-13e: /]$ help wireless\nShow the current wireless configuration and status.\nUsage: wireless [OPTIONS...]\n    -w Print information about enabled Access Point profiles\n    -d Print information about enabled WiFi as WAN / WiFi Bridge profiles\n    -c Print information about currently associated WiFi clients\n    -v Print additional driver specific debug when available\n    -t [CHANNEL] Set wireless channel\n    -k [MAC ADDRESS] Kick wireless client\n    -s [OPTIONAL DWELL TIME] Perform a wireless survey\n    -a [OPTIONAL DWELL TIME] Do a survey and autoselect a channel\n    -r [RADIO] Radio to operate on (0: first, 1: second)\n\n[admin@IBR900-13e: /]$ help workqueue\nPrint current workqueue activities and schedules.\nUSAGE: workqueue [-v] [-t TASKID] [-a]\n           -v: VERBOSE\n    -t TASKID: Only show information for a single task\n           -a: Show information for active tasks (ie. currently executing)\n\n[admin@IBR900-13e: /]$ help xfrm\nUsage: xfrm policy list [ SELECTOR ] [ dir DIR ]\n        [ index INDEX ] [ action ACTION ]\nUsage: xfrm policy flush\nSELECTOR := [ src ADDR[/PLEN] ] [ dst ADDR[/PLEN] ] [ dev DEV ] [ UPSPEC ]\nUPSPEC := proto { { tcp | udp | sctp | dccp } [ sport PORT ] [ dport PORT ] |\n                  { icmp | ipv6-icmp | 135 } [ type NUMBER ] [ code NUMBER ] |\n                  gre [ key { DOTTED-QUAD | NUMBER } ] | PROTO }\nDIR := in | out | fwd\nACTION := allow | block\nUsage: xfrm state list [ ID ] [ mode MODE ]\nUsage: xfrm state flush\nID := [ src ADDR ] [ dst ADDR ] [ spi SPI ]\nMODE := transport | tunnel\n[admin@IBR900-13e: /]$ help zebra\nUsage: zebra [cmd]\n\nExcecute and show output of \"cmd\". If no \"cmd\" is given then access to the Quagga CLI is given.\nUse \"zebra list\" to see avaliable commands or See the Quagga documentation for details.\n\n    *** WARNING: Changes made using the Quagga CLI do not persist! ***\n\nThe configuration entered via the Cradlepoint user interface will be restored\nafter user interface changes and/or a router reboot. If the routing protocol\noptions in the user interface do not allow a necessary Quagga configuration,\nplease provide all configuration details to Cradlepoint support.\n\n[admin@IBR900-13e: /]$\n
          "},{"location":"cron/","title":"cron","text":"

          Per-user and system-wide scheduled tasks, handled by the cron daemon.

          "},{"location":"cron/#locations","title":"Locations","text":"

          Cron scripts and entries can run from several locations. By using /etc/crontab.d/scriptname you can set different MAILTO and ENV variables and isolate your scheduled jobs. User jobs can be edited via crontab -e.

          "},{"location":"cron/#dst","title":"DST","text":"

          Some cron daemons don't handle DST correctly. Because of this, do not schedule jobs within the our of 1am. During DST changes this hour happens twice or is skipped altogether.

          Cronie says it handled DST gracefully, running jobs that should have run but haven't yet due to time changes, or no running jobs twice when time goes back.

          "},{"location":"cron/#syntax-quirks","title":"Syntax quirks","text":"

          Some systems have problems with #/# syntax. (eg: */5 * * * * /usr/bin/whatever)

          "},{"location":"cron/#default-editor","title":"Default Editor","text":"

          In some systems, the default editor is found by the symlink located at /etc/defaults/editor. To override this, export the EDITOR environment variable. (eg: export EDITOR=/usr/bin/vim)

          "},{"location":"cron/#examples","title":"Examples","text":""},{"location":"cron/#quick-and-dirty-realignment","title":"Quick and dirty realignment","text":"

          This will definitely fail in some circumstances.

          alias crontab-align='crontab -l | while read -r a b c d e f ; do\n  if [[ \"$a\" =~ ^# ]] ; then\n    echo \"$a $b $c $d $e $f\" ;\n  else\n    printf \"% -20s %s\\n\" \"$a $b $c $d $e\" \"$f\" ;\n  fi ;\ndone'\n\ncrontab-align | crontab -\n
          "},{"location":"cron/#add-a-random-delay","title":"Add a random delay","text":"

          This example sleeps for a random number of seconds lower than 1800, including 0. The % symbol has to be escaped in crontabs.

          0 * * * *   sleep $((RANDOM \\% 1800)) ; /usr/local/bin/do-a-thing.sh ;\n
          "},{"location":"cron/#programmatic-editing-of-the-crontab","title":"Programmatic editing of the crontab","text":"

          This is potentially dangerous because you can wipe out a user's crontab.

          crontab -l | sed -e '/downtime/s/^\\#//' | crontab -\necho \"* * * * * /usr/local/bin/every_minute.sh\" | crontab -\n
          "},{"location":"cron/#see-if-and-when-parts-are-running","title":"See if and when parts are running","text":"

          Put this in /etc/cron.*/01-cron-log and when those parts run you will see the message in syslog.

          logger -t cron Running `basename $PWD`\n
          "},{"location":"cron/#os-x-alarm-clock","title":"OS X Alarm Clock","text":"
          59 5 * * 1-5    /usr/bin/osascript -e 'tell application \"iTunes\"' -e 'set the sound volume to 100' -e 'end tell'\n0  6 * * 1-5    /usr/bin/osascript -e 'tell application \"iTunes\"' -e 'play playlist \"Old Podcasts\"' -e 'end tell'\n15 8 * * 1-5    /usr/bin/osascript -e 'tell application \"iTunes\" to stop'\n
          "},{"location":"cron/#detect-if-you-are-running-in-an-interactive-shell","title":"Detect if you are running in an interactive shell","text":"

          When writing scripts to use with crontab, sometimes you want to give different flags if they are given by a human. The most obvious case of this is verbosity, where you may want to be very verbose when a human runs the command but be quiet in cron.

          # Check if we're running in an interactive shell\nif [ -t 0 ] ; then\n  verbosity=\"--verbose\"\nelse\n  verbosity=\"--quiet\"\nfi\n\n/usr/bin/some-command \"${verbosity}\"\n
          "},{"location":"cron/#troubleshooting","title":"Troubleshooting","text":"

          Having junk files like temp vim files in /var/cron/tabs can make cron go to 100% cpu usage. Remove all non crontab files and kill cron to fix it.

          "},{"location":"cron/#links","title":"Links","text":"
          • https://cronheatmap.com: Visualize a daily heatmap of your cron jobs
          • https://crontab.guru/: Online crontab linter and explainer
          • https://github.com/kiorky/croniter: python module that \"provides iteration for the datetime object with a cron like format\"
          "},{"location":"cryptsetup/","title":"cryptsetup","text":"

          \"Cryptsetup is utility used to conveniently setup disk encryption based on DMCrypt kernel module.\" - https://gitlab.com/cryptsetup/cryptsetup/blob/master/README.md

          \"LUKS is the standard for Linux hard disk encryption. By providing a standard on-disk-format, it does not only facilitate compatibility among distributions, but also provides secure management of multiple user passwords.\" - https://gitlab.com/cryptsetup/cryptsetup/blob/master/README.md

          "},{"location":"csplit/","title":"csplit","text":"

          \"split files based on context\" - man csplit

          There is a similar, simpler tool called split.

          "},{"location":"csplit/#gnu-examples","title":"GNU Examples","text":"

          GNU and BSD cpsplit are not compatible. In macOS you can use gcsplit if you have brew coreutils installed.

          "},{"location":"csplit/#split-amazon-rds-global-certs-into-one-cert-per-file","title":"Split amazon RDS global certs into one cert per file","text":"
          curl -fsSL https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem |\ncsplit --elide-empty-files --quiet --prefix global-rds-crt -k - '/-BEGIN CERTIFICATE-/' '{*}'\n
          "},{"location":"csplit/#split-a-multi-doc-yaml-file","title":"Split a multi-doc yaml file","text":"

          This is great for splitting helm template or kubectl get pod,svc,sts,ds -o yaml output

          $ wc -l k-get-all.yaml  # lots of lines in this one yaml file\n9717 k-get-all.yaml\n\n$ grep -c '^---$' k-get-all.yaml  # lots of docs too\n161\n\n$ csplit k-get-all.yaml -s --elide-empty-files --prefix=yaml-split- --suffix-format='%03d.yaml' '/^---$/' '{*}'\n\n$ wc -l yaml-split-???.yaml\n    38 yaml-split-000.yaml\n    32 yaml-split-001.yaml\n...long-list-of-files...\n   227 yaml-split-159.yaml\n   230 yaml-split-160.yaml\n  9717 total\n
          "},{"location":"css/","title":"Cascading Style Sheets","text":""},{"location":"css/#links","title":"Links","text":"
          • https://cssprinciples.com/3/grid: \"dabble in precision with css grid\"
          • https://developer.mozilla.org/en-US/docs/Web/CSS
          • https://www.fffuel.co/css-selectors: \"CSS Selectors: A Visual Guide\"
          "},{"location":"curl/","title":"cURL","text":"

          \"command line tool and library for transferring data with URLs\" - https://curl.haxx.se

          curl is a tool to transfer data from or to a server, using one of the supported protocols (DICT, FILE, FTP, FTPS, GOPHER, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, TELNET and TFTP). The command is designed to work without user interaction.

          "},{"location":"curl/#examples","title":"Examples","text":""},{"location":"curl/#follow-location-redirects","title":"Follow location redirects","text":"
          curl -L http://whatever\n
          "},{"location":"curl/#show-the-http-code-for-a-given-request","title":"Show the HTTP code for a given request","text":"

          Use the http method HEAD to only the headers of the remote URI and shows the HTTP code. This is useful for efficiently checking the existence of a URI that would be a large download.

          curl --head -s -w \"%{http_code}\\n\" -o /dev/null http://www.example.com/\n

          Not all HTTP servers support the HEAD method though, so a slightly worse alternative is:

          curl --max-filesize 1 -s -w \"%{http_code}\\n\" -o /dev/null http://www.example.com/\n

          --max-filesize 1 will download something more than 0 bytes, though usually not the whole file. Notable is that curl does not seem to tell the truth about how many bytes were actually downloaded when using --max-filesize 1 -w '%{size_download}'. This can be verified by opening a simple http server (eg: python3 -m http.server), then running tcpflow -c, and then requesting the root dir of your simple http server. You will see the directory listing being sent across the wire, but curl will report 0 bytes being downloaded. The true download size depends on what file is being downloaded and how big it is. For large binary files, I have observed this as being 0 bytes.

          "},{"location":"curl/#request-a-specific-vhost-from-a-server","title":"Request a specific vhost from a server","text":"

          This is useful for testing production code on non-production multi-tennant name based virtual hosts.

          curl -H 'Host: www.domain.com' http://example.com\n
          "},{"location":"curl/#get-the-length-of-the-file-to-be-downloaded","title":"Get the length of the file to be downloaded","text":"
          curl -qI  https://www.google.com/index.php 2>/dev/null | awk '/Length/ {print $2}'\n
          "},{"location":"curl/#fetch-only-http-headers-not-content","title":"Fetch only HTTP headers, not content","text":"
          curl -I http://www.example.com/some_huge_file.iso\n
          "},{"location":"curl/#send-post-variables","title":"Send POST variables","text":"
          curl --data \"user=foo&pass=bar\" http://example.com/login.php\n
          "},{"location":"curl/#scrape-urls-from-a-page","title":"Scrape URLs from a page","text":"

          This appears to have problems with some strings. For instance, this doesn't catch the full https://accounts.google.com string. The regex is correct according to http://regexpal.com, but egrep is apparently not handling it correctly.

          curl -s http://www.google.com | egrep -o '(((https?|ftp|gopher)://|(mailto|file|news):)[^\u2019 <>\\n\"]+|(www|web|w3)\\.[-a-z0-9.]+)[^\u2019 .,;<>\":]'\n
          "},{"location":"curl/#use-curl-to-fetch-the-current-rfc-2822-time","title":"Use curl to fetch the current RFC 2822 time","text":"

          If you don't have NTP you can use this to manually feed the current time into date -s to set your system clock to within a few seconds of accuracy.

          curl -sIH 'Cache-Control: no-cache' example.org | grep '^Date'\n

          Using GNU cut and date (IE: on linux, like on a Raspberry Pi image that does not have NTP properly set up) you can set your time using this command:

          sudo date -s \"$(curl -sIH 'Cache-Control: no-cache' example.org | grep '^Date:' | cut -f 1 -d ' ' --complement)\"\n
          "},{"location":"curl/#links","title":"Links","text":"
          • Release Notes
          "},{"location":"curl/#see-also","title":"See Also","text":"
          • aria2
          • httpie
          • httpstat - download and show a some useful connection information
          • wget
          "},{"location":"cut/","title":"cut","text":"

          \"cut out selected portions of each line of a file\" - man cut

          "},{"location":"cut/#examples","title":"Examples","text":""},{"location":"cut/#keep-only-the-selected-characters","title":"keep only the selected characters","text":"

          cut is 1-indexed

          $ echo {0..9}\n0 1 2 3 4 5 6 7 8 9\n$ echo {0..9} | cut -c 1\n0\n$ echo -n {0..9} | cut -c 1-5\n0 1 2\n$ echo -n {0..9} | cut -c 1,19\n09\n$ echo -n {a..z} | sed 's/ //g' | cut -c 10-20\njklmnopqrst\n
          "},{"location":"cut/#keep-only-the-given-field-index","title":"keep only the given field index","text":"

          By default, cut works with tab delimited fields, which is not really useful. You can specify a different field delimiter with -d

          $ echo {a..z} | cut -d ' ' -f 3\nc\n$ echo {a..z} | cut -d ' ' -f 3-10\nc d e f g h i j\n$ echo 'a^b^c' | cut -d'^' -f 3\nc\n

          It's not really intuitive though because every single space separates one field from the next, even if the field is null

          $ echo 'a b c d' | cut -d ' ' -f 1-3\na b c\n$ echo 'a  b  c  d' | cut -d ' ' -f 1-3\na  b\n

          Using -w allows all consecutive whitespace to be treated as one separator, which is usually the desired behavior, but then the output fields are separated by tabs.

          $ echo 'a  b  c  d' | cut -w -f 1-3\na       b       c\n
          "},{"location":"cut/#see-also","title":"See Also","text":"
          • cut is often used with [tr](tr.md)
          • [awk](awk.md) is what I usually reach for instead of cut when working with words.
          "},{"location":"d2/","title":"d2 diagram language","text":"

          \"Create beautiful diagrams in minutes. Simple syntax. Endlessly customizable. D2 is the fastest and easiest way to get a mental model from your head onto the screen, then make edits with your team.\" - https://d2lang.com

          "},{"location":"d2/#links","title":"Links","text":"
          • https://d2lang.com
          • https://play.d2lang.com
          "},{"location":"d2/#see-also","title":"See also","text":"
          • https://mermaid.js.org
          • http://www.plantuml.com
          • https://text-to-diagram.com
          "},{"location":"dasel/","title":"dasel","text":"

          \"Select, put and delete data from JSON, TOML, YAML, XML and CSV files with a single tool. Supports conversion between formats and can be used as a Go package.\" - https://github.com/TomWright/dasel

          "},{"location":"dasel/#example-usage","title":"Example usage","text":""},{"location":"dasel/#convert-between-formats","title":"Convert between formats","text":"

          yaml to toml

          dasel --read yaml --write toml --file vector.yaml > vector.toml\n

          json to yaml with a data filter

          dasel -r json -w yaml -f ansible-facts.json '.ansible_facts.ansible_default_ipv4'\n
          "},{"location":"dasel/#restructure-a-pagerduty-csv","title":"Restructure a pagerduty csv","text":"

          Download the csv using this shell function that uses BSD (macOS) date and open:

          pagerduty-csv-download() {\n  TZ=America/Los_Angeles\n  past=\"$(date -v-7d \"+%FT%T\")\"\n  present=\"$(date \"+%FT%T\")\"\n  open \"$(date \"+https://company-name.pagerduty.com/api/v1/reports/raw/incidents.csv?since=${past}&until=${present}&time_zone=${TZ}\")\"\n}\n

          Then restructure it using --format to interpolate variables into a template string:

          dasel -f incidents.csv -w json -m --format '{{ .created_on }} https://company-name.pagerduty.com/incidents/{{ .id }} {{ .description }}' '.[*]'\n

          The output will be something like:

          2022-02-02T20:02:02-08:00 https://company-name.pagerduty.com/incidents/Q0ZL9NU2 [FIRING:1] TargetDown (cat-downloader)\n
          "},{"location":"dasel/#pretty-format-a-gpx-file","title":"Pretty format a gpx file","text":"

          This is useful for comparing two files, for instance where one may have been appended, and you need to make sure.

          dasel -r xml -f 2022-01-02-18-20-00.gpx > old.gpx\n

          Keep in mind though that pretty-formatted gpx files take up significantly more space.

          "},{"location":"dasel/#compact-format-a-gpx-file","title":"Compact format a gpx file","text":"

          dasel supports compact formatting, which can save disk space by eliminating whitespace characters. In dasel 1.x this is -c, but in 2.x it is --pretty=false.

          dasel -r xml -f books.xml --pretty=false\n

          My tests show this compact output saving ~25% in gpx files compared to a formatted gpx file using whitespace, and 15% compared to a gpx file using tabs.

          "},{"location":"data/","title":"data","text":"

          General information about data.

          "},{"location":"data/#links","title":"Links","text":"
          • https://en.wikipedia.org/wiki/Information_assurance
          • https://en.wikipedia.org/wiki/CAP_theorem
          • https://raft.github.io / http://thesecretlivesofdata.com/raft/
          "},{"location":"datasette/","title":"Datasette","text":"

          \"An open source multi-tool for exploring and publishing data\" - https://docs.datasette.io

          "},{"location":"datasette/#links","title":"Links","text":"
          • https://github.com/simonw/datasette
          • https://docs.datasette.io
          • sqlite
          "},{"location":"datasette/#examples","title":"Examples","text":""},{"location":"datasette/#start-a-server","title":"Start a server","text":"

          If you're starting from scratch, just touch existing-file.db before running this. -o opens your browser automatically.

          datasette existing-file.db -o\n
          "},{"location":"datasette/#ingest-a-json-file","title":"Ingest a json file","text":"

          You can use sqlite-utils to ingest a json file into a sqlite table, which can then be explored in datasette

          curl -s \"https://hub.docker.com/v2/repositories/ubuntu/\" |\njq .results |\nsqlite-utils insert datasette.db docker/docker.io/ubuntu -\n
          "},{"location":"date/","title":"date","text":"

          The date shell command

          date behaves differently between gnu and bsd. In OS X you can install gnu date by doing brew install coreutils

          "},{"location":"date/#gnu-date","title":"GNU date","text":""},{"location":"date/#show-adjusted-datetime","title":"Show adjusted date/time","text":"
          date -d -2month # two months ago\ndate -d +1hour # one hour in the future\ndate -d +15minute\ndate -d \"last week + 1 hour\"\ndate -d \"january 10 1978 + 5 years\" +%a\n
          "},{"location":"date/#convert-a-string-date-to-epoch-seconds","title":"Convert a string date to epoch seconds","text":"
          date -d \"Fri Sep  7  2:00 2012\" +%s\n
          "},{"location":"date/#convert-epoch-seconds-to-string-date","title":"Convert epoch seconds to string date","text":"
          date -d @1375899534\n
          "},{"location":"date/#output-various-rfc-3339-time-formats","title":"Output various RFC 3339 time formats","text":"
          date --rfc-3339=date\ndate --rfc-3339=seconds\ndate --rfc-3339=ns\n
          "},{"location":"date/#show-and-number-all-previous-weeks-from-one-year-ago","title":"Show and number all previous weeks from one year ago","text":"
          for X in {1..53} ; do printf \"%02s \" ${X} ; date -d -49weeks-2days+${X}week \"+%b %d %Y\" ; done ;\n
          "},{"location":"date/#show-and-number-all-weeks-from-the-point-i-started-working-at-zoosk","title":"Show and number all weeks from the point I started working at Zoosk","text":"
          for X in {1..90} ; do printf \"%02s \" ${X} ; date -d \"June 10 2013 - 1 week + ${X} week\" \"+%a %b %d %Y\" ; done ;\n
          "},{"location":"date/#show-how-many-seconds-old-i-am","title":"Show how many seconds old I am","text":"
          echo \"$(date +%s) - $(date -d \"January 10 1978 7:46pm\" +%s)\" | bc\n
          "},{"location":"date/#show-subsecond-date-without-going-full-nano","title":"Show subsecond date, without going full nano","text":"
          for X in {1..100} ; do date +%s.%N | cut -c1-15 ; done ;\n
          "},{"location":"date/#sleep-until-the-next-5-minute-0-seconds-mark","title":"Sleep until the next 5 minute 0 seconds mark","text":"
          while sleep $(date \"+60 - %S.%N\" | bc) 240 ; do date \"+%F %T.%N\" ; done ;\n
          "},{"location":"date/#show-all-format-strings-and-their-values","title":"Show all format strings and their values","text":"
          $ man date | awk '/[[:space:]]%/ {X = $1 ; $1 = \"\" ; print X,$0}' | while read -r Y Z ; do date \"+%$Y^$Y^${Z//%/%%}\" ; done | column -t -s^\n%      %                                MON literal %\n%a     Mon                              locale's abbreviated weekday name (e.g., Sun)\n%A     Monday                           locale's full weekday name (e.g., Sunday)\n%b     Nov                              locale's abbreviated month name (e.g., Jan)\n%B     November                         locale's full month name (e.g., January)\n%c     Mon 22 Nov 2021 10:33:55 AM PST  locale's date and time (e.g., Thu Mar 3 23:05:25 2005)\n%C     20                               century; like %Y, except omit last two digits (e.g., 20)\n%d     22                               day of month (e.g., 01)\n%D     11/22/21                         date; same as %m/%d/%y\n%e     22                               day of month, space padded; same as %_d\n%F     2021-11-22                       full date; like %+4Y-%m-%d\n%g     21                               last two digits of year of ISO week number (see %G)\n%G     2021                             year of ISO week number (see %V); normally useful only with %V\n%h     Nov                              same as %b\n%H     10                               hour (00..23)\n%I     10                               hour (01..12)\n%j     326                              day of year (001..366)\n%k     10                               hour, space padded ( 0..23); same as %_H\n%l     10                               hour, space padded ( 1..12); same as %_I\n%m     11                               month (01..12)\n%M     33                               minute (00..59)\n%n                                      a newline\n%N     258608657                        nanoseconds (000000000..999999999)\n%p     AM                               locale's equivalent of either AM or PM; blank if not known\n%P     am                               like %p, but lower case\n%q     4                                quarter of year (1..4)\n%r     10:33:55 AM                      locale's 12-hour clock time (e.g., 11:11:04 PM)\n%R     10:33                            24-hour hour and minute; same as %H:%M\n%s     1637606035                       seconds since 1970-01-01 00:00:00 UTC\n%S     55                               second (00..60)\n%t                                      a tab\n%T     10:33:55                         time; same as %H:%M:%S\n%u     1                                day of week (1..7); 1 is Monday\n%U     47                               week number of year, with Sunday as first day of week (00..53)\n%V     47                               ISO week number, with Monday as first day of week (01..53)\n%w     1                                day of week (0..6); 0 is Sunday\n%W     47                               week number of year, with Monday as first day of week (00..53)\n%x     11/22/2021                       locale's date representation (e.g., 12/31/99)\n%X     10:33:55 AM                      locale's time representation (e.g., 23:13:48)\n%y     21                               last two digits of year (00..99)\n%Y     2021                             year\n%z     -0800                            +hhmm numeric time zone (e.g., -0400)\n%:z    -08:00                           +hh:mm numeric time zone (e.g., -04:00)\n%::z   -08:00:00                        +hh:mm:ss numeric time zone (e.g., -04:00:00)\n%:::z  -08                              numeric time zone with : to necessary precision (e.g., -04, +05:30)\n%Z     PST                              alphabetic time zone abbreviation (e.g., EDT)\n
          "},{"location":"date/#bsd-date","title":"BSD date","text":""},{"location":"date/#show-adjusted-datetime_1","title":"Show adjusted date/time","text":"
          date -v-2m # two months ago\ndate -v+1H # one hour in the future\n
          "},{"location":"date/#convert-epoch-seconds-to-string-date_1","title":"Convert epoch seconds to string date","text":"
          date -r 1514308711\n
          "},{"location":"date/#see-also","title":"See also","text":"
          • ntp
          • ptp
          • time
          "},{"location":"dcfldd/","title":"dcfldd","text":"

          dcfldd is an advanced version of dd which is more useful than pv in some situations.

          One simple advantage dcfldd has over dd is a progress counter displayed by default, although even with dd you can see progress by pressing ctrl-t. However, if all you need is a progress display, pv is really your best bet.

          Another useful advantage dcfldd has is the ability to specify hex and ascii patterns, as well as the output of a command as the source. You may also specify multiple outputs.

          "},{"location":"dcfldd/#examples","title":"Examples","text":""},{"location":"dcfldd/#wipe-a-hard-disk","title":"Wipe a hard disk","text":"

          This wipes hard disk /dev/rdisk9 with binary 01010101 pattern.

          dcfldd pattern=AAAA of=/dev/rdisk9\n
          "},{"location":"dcfldd/#resume-wiping-a-hard-disk","title":"Resume wiping a hard disk","text":"

          You can use seek to skip past the first N blocks on the destination disk. If you have to resume multiple times, perhaps the best option is to use bash's arithmetic expansion to add up the number of blocks written.

          $ dcfldd pattern=AAAA of=/dev/rdisk3\n3328 blocks (104Mb) written.^C\n3466+0 records in\n3465+0 records out\n$ dcfldd pattern=AAAA of=/dev/rdisk3 seek=3328\n2936064 blocks (91752Mb) written.^C\n2936132+0 records in\n2936131+0 records out\n$ dcfldd pattern=AAAA of=/dev/rdisk3 seek=$((3328+2936064))\n
          "},{"location":"dcfldd/#view-progress-with-pv","title":"View progress with pv","text":"

          pv is useful for seeing the transfer rate of the pipe, which can help diagnose continued success or lack thereof with failing hard disks.

          root# dcfldd pattern=AAAA | pv | dcfldd of=/dev/rdisk3 seek=$((4192000+504000+10240000+2936064))\n512 blocks (16Mb) written.22.1MiB 0:00:07 [21.7MiB/s] [   <=>\n1280 blocks (40Mb) written.43.5MiB 0:00:08 [21.5MiB/s] [    <=>\n2304 blocks (72Mb) written.79.4MiB 0:00:09 [35.9MiB/s] [      <=>\n3584 blocks (112Mb) written. 114MiB 0:00:10 [35.2MiB/s] [       <=>\n
          "},{"location":"dcfldd/#see-also","title":"See Also","text":"
          • dd
          • ddrescue
          • pv
          "},{"location":"dcgc/","title":"Docker Custodian","text":"

          \"Keep docker hosts tidy\" - https://github.com/Yelp/docker-custodian

          "},{"location":"dcgc/#examples","title":"Examples","text":""},{"location":"dcgc/#sync-script","title":"Sync script","text":"

          This script will pull the latest version of some containers and delete old containers and images

          ##!/usr/bin/env bash\n\ncontainers=(\n  debian:latest\n  homeassistant/home-assistant:latest\n  linuxserver/unifi:latest\n  ubuntu:xenial\n  yelp/docker-custodian:latest\n  )\n\nfor c in \"${containers[@]}\" ; do\n  docker pull \"$c\"\n  keepers+=( \"--exclude-image\" \"$c\" )\ndone\n\ndocker run -ti -v /var/run/docker.sock:/var/run/docker.sock yelp/docker-custodian \\\n  dcgc \"${keepers[@]}\" --dangling-volumes --max-image-age 10w --max-container-age 5w\n
          "},{"location":"dcraw/","title":"dcraw","text":"

          \"dcraw decodes raw photos, displays metadata, and extracts thumbnails.\" - man dcraw

          "},{"location":"dcraw/#examples","title":"Examples","text":""},{"location":"dcraw/#identify-corrupt-dng-files","title":"Identify corrupt DNG files","text":"
          find /photos -iname '*.dng' | xargs dcraw -i > /dev/null 2> corrupt-files.txt\n
          "},{"location":"dd-wrt/","title":"dd-wrt","text":"

          \"DD-WRT is a Linux based alternative OpenSource firmware suitable for a great variety of WLAN routers and embedded systems.\" - https://www.dd-wrt.com

          "},{"location":"dd-wrt/#netgear-r7000","title":"Netgear R7000","text":"
          • https://www.myopenrouter.com/downloads/dd-wrt-r7000
          • http://www.desipro.de/ddwrt/K3-AC-Arm/
          "},{"location":"dd-wrt/#restart-script","title":"Restart script","text":"

          This device with dd-wrt has caused me so much trouble I have to monitor it and reboot it when it fails. Here is a short script to do that. I have this set up in cron to run every 5 minutes. The router will not reboot unless it's been up for 10 minutes.

          fping -q google.com || {\n  date '+%F %T%z Router is locked up. Restarting it.' | tee -a \"${HOME}/router_reboot.log\"\n  ssh root@192.168.1.1 'set -x ; uptime ; awk \"int(\\$1) < 600 { exit 1 }\" /proc/uptime && reboot ;' | tee -a \"${HOME}/router_reboot.log\"\n}\n
          "},{"location":"dd-wrt/#version-notes","title":"Version notes","text":""},{"location":"dd-wrt/#2015-12-24-v30-r28598-kongac","title":"2015-12-24 v3.0-r28598 kongac","text":"
          • Cannot edit DHCP reservations. Only can push and pop from the list, but cannot edit the added entries.
          "},{"location":"dd-wrt/#2015-12-03-v30-r28600m-kongac","title":"2015-12-03 v3.0-r28600M kongac","text":"
          • No observed differences from v3.0-r28598
          "},{"location":"dd-wrt/#2017-01-18-v30-r31160m-kongac","title":"2017-01-18 v3.0-r31160M kongac","text":"
          • General instability. Periodic lockups requiring power cycle to fix.
          • Potential weirdness playing with other wifi access points, unable to roam from this to Airport AC as I used to.
          "},{"location":"dd-wrt/#2017-03-10-v30-r31520m-kongac","title":"2017-03-10 v3.0-r31520M kongac","text":"
          • http://www.dd-wrt.com/phpBB2/viewtopic.php?p=1071890
          • Installed 2017-03-20
          • Experienced hard lock within 24 hours, had to power cycle to fix. Found posts in dd-wrt forum about other folks experiencing the same issue.
          "},{"location":"dd-wrt/#2017-03-26-v30-r31575m-kongac","title":"2017-03-26 v3.0-r31575M kongac","text":"
          • Installed on 2017-03-21
          • Appears to have fixed the hard lock-ups
          "},{"location":"dd-wrt/#2017-03-26-v30-r31780m-kongac","title":"2017-03-26 v3.0-r31780M kongac","text":"
          • Installed on 2017-03-31 via ddup --flash-latest. First attempt failed. Rebooted, and second attempt worked.
          • Never had any problems with this
          "},{"location":"dd-wrt/#2017-03-31-v30-r31800m-kongac","title":"2017-03-31 v3.0-r31800M kongac","text":"
          • Installed on 2017-04-01
          • 1 router lockup 3 days after installation
          • 2 router lockups on day 4
          "},{"location":"dd-wrt/#2017-04-08-v30-r31830m-kongac","title":"2017-04-08 v3.0-r31830M kongac","text":"
          • Installed on 2017-04-07
          • Locked up after 10 days
          "},{"location":"dd-wrt/#2017-04-16-v30-r31870m-kongac","title":"2017-04-16 v3.0-r31870M kongac","text":"
          • Installed on 2017-04-17
          • Router locked up after 4 days
          "},{"location":"dd-wrt/#2017-04-30-v30-r31920m-kongac","title":"2017-04-30 v3.0-r31920M kongac","text":"
          • Installed on 2017-05-02
          • Had periodic lockups
          "},{"location":"dd-wrt/#2017-05-11-v30-r31980m-kongac","title":"2017-05-11 v3.0-r31980M kongac","text":"
          • Installed on 2017-05-14
          • Experienced wifi problem, dhcp problem, and routing problems within 24 hours
          • Experienced the same problems within 12 hours after reboot
          • Uptime peak is 9 days
          "},{"location":"dd-wrt/#2017-06-03-v30-r32170m-kongac","title":"2017-06-03 v3.0-r32170M kongac","text":"
          • Installed on 2017-06-08
          • Sometimes wireless clients are unable to connect to the network.
          • Sometimes the router becomes inaccessible to clients even though it is still up.
          "},{"location":"dd-wrt/#2017-08-02-v30-r33000m-kongac","title":"2017-08-02 v3.0-r33000M kongac","text":"
          • Installed on 2017-08-05
          • Quite stable
          "},{"location":"dd-wrt/#2017-10-22-v30-r33575m-kongac","title":"2017-10-22 v3.0-r33575M kongac","text":"
          • Installed on 2017-10-22
          • Seeing lock-ups and wifi unavailability after 24 hours
          • Seeing more lock-ups and wifi unavailability hours after restart
          "},{"location":"dd-wrt/#2017-11-03-v30-r33655m-kongac","title":"2017-11-03 v3.0-r33655M kongac","text":"
          • Installed on 2017-11-04
          • WiFi instability observed within hours
          "},{"location":"dd-wrt/#2017-11-03-v30-r33675m-kongac","title":"2017-11-03 v3.0-r33675M kongac","text":"
          • 2017-12-25: Discovered this firmware had been installed
          "},{"location":"dd-wrt/#2018-01-03-v30-r34320m-kongac","title":"2018-01-03 v3.0-r34320M kongac","text":"
          • Installed on 2018-01-16
          • DD-WRT v3.0-r34320M kongac (c) 2017 NewMedia-NET GmbH
          • Release: 01/03/18
          • http://www.desipro.de/ddwrt/K3-AC-Arm/TEST/dd-wrt.v24-K3_AC_ARM_STD.bin
          • Frequent lock-ups
          "},{"location":"dd-wrt/#2018-02-11-v30-r34900m-kongac","title":"2018-02-11 v3.0-r34900M kongac","text":"
          • Installed on 2018-02-16
          • DD-WRT v3.0-r34900M kongac (c) 2018 NewMedia-NET GmbH
          • Release: 02/11/18
          • Very unstable wifi
          "},{"location":"dd-wrt/#2018-02-19-v30-r35030m-kongac","title":"2018-02-19 v3.0-r35030M kongac","text":"
          • Installed on 2018-02-24 (?)
          • DD-WRT v3.0-r35030M kongac (c) 2018 NewMedia-NET GmbH
          • Release: 02/19/18
          • This version cannot seem to stay up for more than 12 hours without losing connectivity.
          "},{"location":"dd-wrt/#2018-04-04-v30-r35550m-kongac","title":"2018-04-04 v3.0-r35550M kongac","text":"
          • Installed on 2018-04-04
          • DD-WRT v3.0-r35550M kongac (03/28/18)
          • Still having stability problems, didn't make it 2h before needing a reboot.
          "},{"location":"dd/","title":"dd","text":"

          Disk Dump, used for cloning disks, wiping data, copying blocks of data.

          "},{"location":"dd/#tips-and-tricks","title":"Tips and Tricks","text":""},{"location":"dd/#write-random-data","title":"Write random data","text":"
          dd if=/dev/urandom of=/dev/hda\n
          "},{"location":"dd/#write-zeros","title":"Write zeros","text":"
          dd if=/dev/zero of=/dev/hda\n
          "},{"location":"dd/#wipe-a-failed-disk","title":"Wipe a failed disk","text":"

          If you can't use shred or ddrescue, this is a very slow but portable alternative

          i=0\nwhile true ; do\n  echo \"Writing block $i\"\n  dd if=/dev/zero of=/dev/sda count=1 bs=1 seek=\"$i\"\n  let i=i+1\ndone\n
          "},{"location":"dd/#wipe-first-and-last-1g-of-a-hard-disk","title":"Wipe first and last 1G of a hard disk","text":"
          dd bs=4096 if=/dev/zero of=/dev/sdx count=256 seek=$(( $(blockdev --getsz /dev/sdx) - 256))\n
          "},{"location":"dd/#see-also","title":"See Also","text":"
          • dcfldd
          • ddrescue
          • pv
          "},{"location":"ddrescue/","title":"ddrescue","text":"

          \"GNU ddrescue - Data recovery tool. Copies data from one file or block device to another, trying to rescue the good parts first in case of read errors.\" - man ddrescue

          There are actually two tools called ddrescue: dd_rescue and gddrescue. gddrescue is the best.

          • Software home page - http://www.gnu.org/software/ddrescue/
          • Instruction Manual - http://www.gnu.org/software/ddrescue/manual/ddrescue_manual.html
          "},{"location":"ddrescue/#examples","title":"Examples","text":""},{"location":"ddrescue/#attempt-to-mirror-an-entire-hard-disk","title":"Attempt to mirror an entire hard disk","text":"
          ddrescue -f -n --min-read-rate=500000 /dev/source_disk /dev/target_disk ~/ddrescue.log\n

          This uses the minimum read rate to skip sctors that may be bad on the input device.

          "},{"location":"ddrescue/#wipe-a-hard-disk-and-log-bad-sectors","title":"Wipe a hard disk and log bad sectors","text":"
          sudo ddrescue --force /dev/zero /dev/disk/by-id/ata-foo ~/ddrescue-ata-foo.log\n

          You can re-run this exact same command to resume the wipe of a hard disk.

          "},{"location":"ddrescue/#attempt-to-continue-the-mirror-of-a-hard-disk","title":"Attempt to continue the mirror of a hard disk","text":"
          ddrescue -f -n -A /dev/source_disk /dev/target_disk ~/ddrescue.log\n
          "},{"location":"ddrescue/#wipe-the-good-sectors-of-a-failing-disk","title":"Wipe the good sectors of a failing disk","text":"

          This requires a valid rescue log file mapping out the good sectors that were recovered.

          ddrescue --fill=+ --force /dev/zero /dev/bad_drive ~/bad_drive_wipe.log\n
          "},{"location":"ddrescue/#see-also","title":"See Also","text":"
          • dcfldd
          • dd
          • pv
          "},{"location":"deb/","title":"deb","text":"

          Notes and tips about working with the .deb package format.

          "},{"location":"deb/#examples","title":"Examples","text":""},{"location":"deb/#show-packages-that-can-be-updated","title":"Show packages that can be updated","text":"
          apt list --upgradable\n
          "},{"location":"deb/#show-installed-package-versions","title":"Show installed package versions","text":"
          ## -V = sort by version (GNU sort only)\n/usr/bin/dpkg-query -W --showformat '${Package} ${Version} ${Status}\\n' | sort -k2 -V | column -t\n
          "},{"location":"deb/#list-files-in-packages-that-are-available-in-configured-repositories","title":"List files in packages that are available in configured repositories","text":"
          apt-file list package_name\n
          "},{"location":"deb/#find-a-file-available-inside-packages-that-are-available-in-configured-repositories","title":"Find a file available inside packages that are available in configured repositories","text":"
          apt-file find libmysqlclient.so\n
          "},{"location":"deb/#show-a-list-of-packages-that-are-installed-or-have-left-things-on-the-filesystem","title":"Show a list of packages that are installed or have left things on the filesystem","text":"
          dpkg --list\n
          "},{"location":"deb/#show-which-package-a-file-came-from","title":"Show which package a file came from","text":"
          dpkg -S /bin/bash\n
          "},{"location":"deb/#list-files-in-package-that-is-installed","title":"List files in package that is installed","text":"
          dpkg-query -L klibc-utils\n
          "},{"location":"deb/#list-files-in-package-that-is-not-installed","title":"List files in package that is not installed","text":"
          dpkg -c package.deb\n
          "},{"location":"deb/#list-packages-available-in-the-repository","title":"List packages available in the repository","text":"
          apt-cache dumpavail\n
          "},{"location":"deb/#show-information-about-a-package","title":"Show information about a package","text":"
          apt-cache show coreutils\n
          "},{"location":"deb/#show-reverse-dependencies-of-a-package","title":"Show reverse dependencies of a package","text":"
          apt-cache rdepends ec2-api-tools\n
          "},{"location":"deb/#show-reverse-dependencies-of-installed-package","title":"Show reverse dependencies of installed package","text":"
          aptitude why openjdk-7-jre-headless\n
          "},{"location":"deb/#re-install-many-packages-and-validate-that-they-were-re-installed","title":"Re-install many packages and validate that they were re-installed","text":"

          When apt-get install --reinstall isn't good enough, this is the next option. This should not be done unless you're willing to reload the system if it fails.

          ## Generate a list of packages\ndpkg -l | grep 'python-' > dpkg-l-python ;\n\n## Remove and re-install each individual package one at a time\nawk '{print $2,$3}' dpkg-l-python |\n  while read -r p v ; do\n    echo \"Working on $p version $v\" ;\n    sudo dpkg --purge --force-depends \"$p\" ;\n    sudo apt-get install \"${p}=${v}\" ;\n  done ;\n\n## Validate that all packages are re-installed with the right version\nawk '{print $2,$3}' dpkg-l-python |\n  while read -r p v ; do\n    dpkg -l \"$p\" | grep \"$v\" || echo \"ERROR: Problem with $p $v\" ;\n  done ;\n
          "},{"location":"deb/#links","title":"Links","text":"
          • https://wiki.debian.org/RPM
          "},{"location":"debian/","title":"debian","text":"

          \"Debian is a free operating system, developed and maintained by the Debian project.\" - https://www.debian.org/intro

          Debian is a solid linux distribution that serves as the upstream base for many other linux distributions, including Ubuntu and Raspberry Pi OS.

          "},{"location":"defaults/","title":"defaults","text":"

          defaults allows users to read, write, and delete Mac OS X user defaults from a command-line shell.

          "},{"location":"defaults/#examples","title":"Examples","text":""},{"location":"defaults/#set-some-boolean-values","title":"Set some boolean values","text":"
          defaults write NSGlobalDomain     NSAutomaticQuoteSubstitutionEnabled -bool false\ndefaults write NSGlobalDomain     NSAutomaticDashSubstitutionEnabled  -bool false\ndefaults write com.apple.TextEdit SmartQuotes                         -bool false\ndefaults write com.apple.TextEdit SmartDashes                         -bool false\n
          "},{"location":"defaults/#add-a-value-an-array-to-a-dict","title":"Add a value (an array) to a dict","text":"
          FILENAME=\"${HOME}/Library/Preferences/com.googlecode.iterm2.plist\"\ndefaults write \"${FILENAME}\" GlobalKeyMap -dict-add 0xf703-0x280000 '{ Action = 10; Text = f; }'\ndefaults write \"${FILENAME}\" GlobalKeyMap -dict-add 0xf702-0x280000 '{ Action = 10; Text = b; }'\n
          "},{"location":"devops/","title":"devops","text":""},{"location":"devops/#devops-lifecycle","title":"DevOps lifecycle","text":"
          • Plan - Jira, Github tickets, Gitlab Milestones
          • Code - Git, Eclipse, pycharm
          • Build - Ant, Maven, Gradle
          • Test - Selenium, JUnit
          • Release - Gitlab CI, Jenkins
          • Deploy - Puppet, Chef, Ansible, Saltstack
          • Operate - Linux, Chrome, iOS
          • Monitor - Sensu, Splunk, Nagios, Kibana
          "},{"location":"devops/#links","title":"Links","text":"
          • The 9 Circles of Deployment Hell
          • Raft: Understandable distributed consensus - Good visualization of the Raft algorithm.
          • https://aws.amazon.com/devops/what-is-devops/
          • https://www.reddit.com/r/devops/comments/aqc8cj/interested_in_getting_into_devops_start_here/
          • https://landing.google.com/sre/books/
          • https://web.devopstopologies.com/ Team topologies, not technology stack topologies.
          "},{"location":"dhcp/","title":"DHCP","text":"

          Dynamic Host Configuration Protocol

          "},{"location":"dhcp/#isc-dhcpd","title":"isc dhcpd","text":"

          https://www.isc.org/downloads/dhcp/

          "},{"location":"dhcp/#test-configuration-file","title":"Test configuration file","text":"
          dhcpd3 -t\n
          "},{"location":"dhcp/#test-lease-file","title":"Test lease file","text":"
          dhcpd3 -T\n
          "},{"location":"dhcp/#handshake-process","title":"Handshake Process","text":"
          Apr 21 15:33:00 ops1prod dhcpd: DHCPDISCOVER from 08:9e:01:8b:18:94 via eth0\nApr 21 15:33:01 ops1prod dhcpd: DHCPOFFER on 10.1.14.127 to 08:9e:01:8b:18:94 via eth0\nApr 21 15:33:01 ops1prod dhcpd: DHCPREQUEST for 10.1.225.43 from 00:1e:0b:bc:8a:c4 via eth1\nApr 21 15:33:01 ops1prod dhcpd: DHCPACK on 10.1.225.43 to 00:1e:0b:bc:8a:c4 via eth1\n
          "},{"location":"dhcpd.conf/","title":"dhcpd.conf","text":"

          This is the configuration file for the linux dhcp daemon.

          "},{"location":"dhcpd.conf/#example-config","title":"Example config","text":"
          ###\n## Pikachu dhcpd.conf for redacted ny1 facility.\n###\n\n\nddns-update-style none ;\ndeny bootp ;    #default\nauthoritative ;\n\noption domain-name \"ny1.redacted.net\" ;\noption subnet-mask 255.255.255.0 ;\noption domain-name-servers 10.8.5.220 ;\noption ntp-servers ntp.ny1.redacted.net, pool.ntp.org ;\noption time-servers ntp.ny1.redacted.net, pool.ntp.org ;\n#option time-offset -25200 ;\noption time-offset -28800 ;\ndefault-lease-time 28800 ;\nmax-lease-time 7200 ;\n\noption boot-server code 66 = string ;\n\n## APC Cookie bullshit\noption vendor-encapsulated-options 01:04:31:41:50:43;\n\n### Old Net 188\nsubnet 10.172.188.0 netmask 255.255.255.0\n{\n    option routers 10.172.188.1 ;\n\n    range   10.172.188.3    10.172.188.195 ;\n#   host winserv    {   hardware ethernet 00:11:2f:37:a2:34 ;   fixed-address 10.172.188.196 ;  }\n    range   10.172.188.197  10.172.188.201 ;\n#   host clicktracks    {   hardware ethernet 00:13:20:5B:EF:2A ;   fixed-address 10.172.188.202 ;  }\n    host redactedbeast  {   hardware ethernet 00:30:48:2A:E3:1E ;   fixed-address 10.172.188.203 ;  }\n    range   10.172.188.204  10.172.188.216 ;\n#   host carnage    {   hardware ethernet 00:13:20:5B:E5:B1 ;   fixed-address 10.172.188.217 ;  }\n#   host sipura-2   {   hardware ethernet 00:0E:08:FA:AB:A4 ;   fixed-address 10.172.188.222 ;  }\n    range   10.172.188.226  10.172.188.254 ;\n}\n\n## Services - 10.8.1.0/24 - VLAN 101\n##\n##    There should be NO DHCP RANGE IN THIS SUBNET.\n##    This will keep us in order with what devices are using\n##    what addresses.  Only use pre-defined host-addresses.\n##\nsubnet 10.8.1.0 netmask 255.255.255.0\n{\n    option routers 10.8.1.1 ;\n    host terastation  {   hardware ethernet 00:0d:0b:7a:cd:ea ;   fixed-address 10.8.1.11 ;   }\n    host switchvox    {   hardware ethernet 00:13:d4:e8:c1:2d ;   fixed-address 10.8.1.12 ;   }\n    host eng-svn-1    {   hardware ethernet 00:0C:29:7E:68:DB ;   fixed-address 10.8.1.233 ;  }\n    host eng-esx-1    {   hardware ethernet 00:50:56:47:7e:bc ;   fixed-address 10.8.1.234 ;  }\n}\n\n################\n##\n##  This information is incomplete, make sure to check IP's for usage before assigning them, and double check the Wiki:\n##  https://it.redacted.com/wiki/index.php?title=IP_Addresses_-_ny1#Load_Test_-_10.8.2.0.2F24_-_VLAN_102\n##\n################\n\n## Load Test - 10.8.2.0/24 - VLAN 102\n##\nsubnet 10.8.2.0 netmask 255.255.255.0\n{\n    filename \"pxelinux.0\" ;\n    option subnet-mask 255.255.255.0 ;\n    option broadcast-address 10.8.2.255 ;\n    option routers 10.8.2.1 ;\n    next-server 10.8.2.240 ;\n    range 10.8.2.100 10.8.2.199 ;\n\n    host honey-b-drac   {   hardware ethernet 00:18:8B:40:DC:78 ;   fixed-address 10.8.2.19 ;   }\n## 10.8.2.30-39 reserved for Eng VMs\n    host eng-vm-01  {   hardware ethernet 00:0c:29:b9:3e:bb ;   fixed-address 10.8.2.31 ;   }\n    host eng-vm-25  {   hardware ethernet 00:0c:29:00:35:66 ;   fixed-address 10.8.2.35 ;   }\n    host eng-vm-26  {   hardware ethernet 00:0c:29:69:b2:b9 ;   fixed-address 10.8.2.36 ;   }\n    host eng-vm-27  {   hardware ethernet 00:0c:29:c5:e3:59 ;   fixed-address 10.8.2.37 ;   }\n    host eng-vm-28  {   hardware ethernet 00:0c:29:a0:8b:a4 ;   fixed-address 10.8.2.38 ;   }\n    host eng-vm-29  {   hardware ethernet 00:0c:29:bd:42:7b ;   fixed-address 10.8.2.39 ;   }\n}\n\n## 10.8.2.240-250 reserved for Puppet installs\n    host ion-vm     {   hardware ethernet 00:0c:29:d6:7b:90 ;   fixed-address 10.8.2.253 ;  }\n    host shinseivm  {   hardware ethernet 00:0c:29:1d:90:07 ;   fixed-address 10.8.2.252 ;  }\n    host star       {   hardware ethernet 00:03:ba:d9:50:1a ;   fixed-address 10.8.2.251 ;  }\n\n}\n\n## QA Test - 10.8.3.0/24 - VLAN 103\nsubnet 10.8.3.0 netmask 255.255.255.0\n{\n    range 10.8.3.10 10.8.3.200 ;\n    option routers 10.8.3.1 ;\n}\n\n## Professional Services - 10.8.4.0/24 - VLAN 104\nsubnet 10.8.4.0 netmask 255.255.255.0\n{\n    option routers 10.8.4.1 ;\n    range 10.8.4.10 10.8.4.200 ;\n    host caracal        {   hardware ethernet 00:13:72:58:7C:C9 ;   fixed-address 10.8.4.201 ;  }\n    host caracal-drac   {   hardware ethernet 00:13:72:57:86:33 ;   fixed-address 10.8.4.202 ;  }\n}\n\n## IT - 10.8.5.0/24 - VLAN 105\nsubnet 10.8.5.0 netmask 255.255.255.0\n{\n    option routers 10.8.5.1 ;\n    option netbios-name-servers 10.8.5.220 ;\n    option netbios-node-type 8 ;\n    range 10.8.5.10 10.8.5.99 ;\n    host demo-esx-1-drac    {   hardware ethernet 00:1e:4f:25:87:f9 ;   fixed-address 10.8.5.121 ;  }\n    host pikachu        {   hardware ethernet 00:13:46:78:25:20 ;   fixed-address 10.8.5.220 ;  }\n    host mammoth        {   hardware ethernet 00:30:48:20:E4:C2 ;   fixed-address 10.8.5.221 ;  }\n    host hq-esx-1       {   hardware ethernet 00:50:56:45:d1:07 ;   fixed-address 10.8.5.222 ;  }\n    host hq-esx-1-drac  {   hardware ethernet 00:1e:4f:1d:37:87 ;   fixed-address 10.8.5.223 ;  }\n    host eng-esx-2      {   hardware ethernet 00:1e:4f:1d:37:87 ;   fixed-address 10.8.5.223 ;  }\n    host eng-esx-1-drac {   hardware ethernet 00:1e:c9:de:1c:5b ;   fixed-address 10.8.5.224 ;  }\n}\n\n## VPN - 10.8.6.0/24 - VLAN 106\nsubnet 10.8.6.0 netmask 255.255.255.0\n{\n    range 10.8.6.10 10.8.6.200 ;\n    option routers 10.8.6.1 ;\n}\n\n## DMZ - 10.8.6.0/24 - VLAN 107\nsubnet 10.8.7.0 netmask 255.255.255.0\n{\n    range 10.8.7.100 10.8.7.200 ;\n    option routers 10.8.7.1 ;\n    host engineering-ext-1  {   hardware ethernet 00:13:72:53:a3:78 ;   fixed-address 10.8.7.10 ;   }\n}\n\n## Jail network - 10.8.9.0/24 - VLAN 109\nsubnet 10.8.9.0 netmask 255.255.255.0\n{\n    range 10.8.9.100 10.8.9.199 ;\n    option routers 10.8.9.1 ;\n}\n\n## Wireless - 10.8.10.0/24 - VLAN 110\nsubnet 10.8.10.0 netmask 255.255.255.0\n{\n    range 10.8.10.10 10.8.10.200 ;\n    option routers 10.8.10.1 ;\n\n    host linksys-ap-1   {   hardware ethernet 00:0C:41:17:E2:AD ;   fixed-address 10.8.10.201 ; }\n    host linksys-ap-2   {   hardware ethernet 00:0C:41:D7:2D:53 ;   fixed-address 10.8.10.202 ; }\n    host linksys-ap-3   {   hardware ethernet 00:0C:41:DE:23:D8 ;   fixed-address 10.8.10.203 ; }\n    host linksys-ap-4   {   hardware ethernet 00:18:F8:26:D8:46 ;   fixed-address 10.8.10.204 ; }\n    host linksys-ap-5   {   hardware ethernet 00:18:F8:26:D8:51 ;   fixed-address 10.8.10.207 ; }\n    host airport-1      {   hardware ethernet 00:14:51:77:76:4E ;   fixed-address 10.8.10.205 ; }\n    host airport-2      {   hardware ethernet 00:14:51:77:8F:F0 ;   fixed-address 10.8.10.206 ; }\n}\n\n## Polycom phone boot configuration\ngroup {\n    default-lease-time 600 ;\n    max-lease-time 600 ;\n\n    option boot-server \"ftp://sip:sip@phoneboot\" ;\n\n    host 01-c1-b6   {   hardware ethernet 00:04:f2:01:c1:b6 ;   }\n    host 01-71-89   {   hardware ethernet 00:04:f2:01:71:89 ;   }\n    host 01-b6-e1   {   hardware ethernet 00:04:f2:01:b6:e1 ;   }\n    host 01-be-91   {   hardware ethernet 00:04:f2:01:be:91 ;   }\n    host e3-2a-f2   {   hardware ethernet 00:04:f2:e3:2a:f2 ;   }\n}\n
          "},{"location":"diagrams/","title":"diagrams","text":"

          Sometimes a diagram is the best way to communicate an idea.

          "},{"location":"diagrams/#links","title":"Links","text":"
          • https://d2lang.com / https://github.com/terrastruct/d2
          • https://victorbjorklund.com/build-diagrams-as-code-with-d2-d2lang: D2 tutorial
          • https://mermaid.js.org / https://github.com/mermaid-js/mermaid: Diagramming language. Github supports rendering mermaid diagrams in markdown.
          • https://diagrams.mingrammer.com / https://github.com/mingrammer/diagrams: Python diagrams as code, backed by Graphviz.
          "},{"location":"dig/","title":"dig","text":"

          dig (domain information groper) is a flexible tool for interrogating DNS name servers. The syntax for this tool is a bit cryptic and is not standard.

          "},{"location":"dig/#usage","title":"Usage","text":""},{"location":"dig/#simple-usage","title":"Simple usage","text":"
          dig gwos.com\n
          "},{"location":"dig/#show-only-the-answer-section","title":"Show only the Answer section","text":"
          dig +noall +answer \"zoosk.com\"\n
          "},{"location":"dig/#show-only-the-returned-ip-address","title":"Show only the returned IP Address","text":"
          dig +short myip.opendns.com @resolver1.opendns.com\n
          "},{"location":"dig/#trace-a-query-from-the-root-servers","title":"Trace a query from the root servers","text":"

          This is the most accurate way to get a DNS record as it will appear to anybody else on the internet who has not queried it before, and will show you all the DNS steps involved in the resolution.

          dig +trace yelp.com\n

          If this doesn't give you a trace you must specify an alternate DNS server

          dig @8.8.8.8 +trace renovo.auto\n
          "},{"location":"dig/#query-multicast-dns-for-a-hostname","title":"Query multicast DNS for a hostname","text":"
          dig gibson.local @224.0.0.251 -p 5353\n
          "},{"location":"dig/#do-a-reverse-lookup-against-multicast-dns","title":"Do a reverse lookup against multicast DNS","text":"
          dig -x 10.31.33.7 @224.0.0.251 -p 5353\n
          "},{"location":"dmidecode/","title":"dmidecode","text":"

          tool for listing hardware information and decoding it into human readable form. This tool decodes the DMI information.

          "},{"location":"dmidecode/#examples","title":"Examples","text":""},{"location":"dmidecode/#show-system-serial-number","title":"Show system serial number","text":"
          sudo dmidecode -t system | grep Serial\nsudo dmidecode -s system-serial-number\n
          "},{"location":"dmidecode/#show-memory-info-including-max-installable","title":"Show memory info including max installable","text":"
          sudo dmidecode -t memory\n
          "},{"location":"dmidecode/#show-bios-version","title":"Show bios version","text":"

          You may need to grep for a different string, but even then it doesn't always show the info because not all machines support this.

          sudo dmidecode -t bios | grep -i revision\n
          "},{"location":"dmidecode/#shower-power-supply-information","title":"Shower power supply information","text":"

          This doesn't always work. Some power supplies are not supported.

          dmidecode -t 39\n
          "},{"location":"dmidecode/#see-also","title":"See Also","text":""},{"location":"dmidecode/#show-all-keywords-and-their-values","title":"Show all keywords and their values:","text":"
          ## -s without a keyword lists all keywords\n## -s with a keyword shows only the value of that keyword\ndmidecode -s |& grep '^  ' | while read -r X ; do echo $X: $(sudo dmidecode -s $X) ; done ;\n
          • lshw - list hardware
          "},{"location":"dns/","title":"DNS","text":"

          Domain Name System

          \"The Domain Name System (DNS) is a hierarchical decentralized naming system for computers, services, or any resource connected to the Internet or a private network.\"

          • https://en.wikipedia.org/wiki/List_of_DNS_record_types
          • https://en.wikipedia.org/wiki/Category:Application_layer_protocols
          • https://miek.nl/2009/july/31/dns-classes/
          "},{"location":"dns/#query-system-resolvers","title":"Query system resolvers","text":""},{"location":"dns/#macos","title":"MacOS","text":"
          dscacheutil -q host -a name github.com\n
          "},{"location":"dns/#tips","title":"Tips","text":"
          • If you need a public address that resolves to localhost, you can use localtest.me
          • If you need a globally resolvable hostname that point to any arbitrary ip address, use nip.io
          "},{"location":"docker/","title":"docker","text":"

          \"An open source project to pack, ship and run any application as a lightweight container.\" - https://www.docker.com

          "},{"location":"docker/#naming-inconsistencies","title":"Naming inconsistencies","text":"

          As of 2024, there are a bunch of frustrating naming inconsistencies with Docker and OCI images. The docker image tag documentation shows the \"image name\" as being broken down into the following components: [registry[:port]/][namespace/]repository[:tag]. Unfortunately this does not harmonize with what is used in various tools, including the official Docker tools.

          For example, the docker command line shows the full \"image name\" minus the \"tag\" component if you ask it for the \"repository\":

          $ docker images quay.io:443/prometheus/busybox:glibc --format=json | jq -r .Repository\nquay.io:443/prometheus/busybox\n

          And the docker python module shows the entire \"image name\" when you ask it for the \"tag\"

          >>> client.images.get('quay.io:443/prometheus/busybox:glibc').tags\n['quay.io/prometheus/busybox:glibc']\n

          Other documents list other definitions. I think that the community needs to get this terminology straight in order for us to build consistent, resilient software. There are some discussions open about this topic, but it does not seem to be high priority:

          • https://github.com/opencontainers/artifacts/issues/32#issuecomment-954898503
          • https://github.com/opencontainers/distribution-spec/issues/279
          "},{"location":"docker/#docker-desktop","title":"Docker Desktop","text":"

          In August 2021, Docker pulled a license bait and switch with Docker Desktop. If you want a Docker Desktop alternative on macOS that has a docker command but doesn't use Docker Desktop, you can do the following:

          brew install hyperkit minikube docker kubernetes-cli\nminikube config set driver hyperkit\nminikube start\neval $(minikube docker-env)\n

          This will give you docker commands that targets the minikube CRI, and is actually a great dev environment.

          Alternatively, if you have a linux machine that runs docker handy, you can skip the minikube stuff and export DOCKER_HOST=ssh://linux-docker-host to launch containers on the linux dockerd. This has the caveats that you cannot mount local filesystems into the remote docker host, and if you want to use it for building your project directory will be sent over the network to the remote docker host.

          "},{"location":"docker/#examples","title":"Examples","text":""},{"location":"docker/#show-help-on-the-run-command","title":"Show help on the run command","text":"
          docker help run\n
          "},{"location":"docker/#show-the-history-of-an-image-and-count-its-layers","title":"Show the history of an image, and count its layers","text":"
          docker history ubuntu:bionic | nl -ba -v0\n
          "},{"location":"docker/#run-the-docker-command-against-a-remote-host","title":"Run the docker command against a remote host","text":"

          Using this method can save your mac a lot of resources and easily get a laptop to have access to a much larger machine's resources. Not all features work, such as bind mounts from the local machine.

          DOCKER_HOST=ssh://some-linux-machine docker ps\n
          "},{"location":"docker/#run-a-docker-image-in-an-interactive-shell","title":"Run a docker image in an interactive shell","text":"
          docker run -i -t ubuntu:focal bash\n
          • -i, --interactive
          • -t, --tty

          https://docs.docker.com/engine/reference/commandline/run

          "},{"location":"docker/#get-a-bash-terminal-on-a-running-docker-container","title":"Get a bash terminal on a running docker container","text":"
          docker exec -i -t running-container-name bash\n
          "},{"location":"docker/#determine-if-you-are-running-inside-docker","title":"Determine if you are running inside docker","text":"

          Exit code will be 0 in docker:

          grep -q docker /proc/1/cgroup\n
          "},{"location":"docker/#run-a-docker-image-and-assign-it-a-hostname-and-a-docker-name","title":"Run a docker image and assign it a hostname, and a docker name","text":"
          docker run --hostname=somehost1 --name=\"host1\" -ti centos:centos6 bash\n

          The hostname shows up to the OS. The docker name can be used to interact with the container:

          docker ps host1\n
          "},{"location":"docker/#show-a-complete-vertically-oriented-list-of-docker-processes","title":"Show a complete vertically oriented list of docker processes","text":"

          docker ps has no --json flag, but you can work around that with golang style formatting.

          docker ps --no-trunc --format='{{ . | json }}' | jq -S .\n

          This trick also works with docker images, which also lacks a --json arg.

          "},{"location":"docker/#show-a-table-of-docker-containers-sorted-by-space-used-in-the-container-not-by-the-image","title":"Show a table of docker containers sorted by space used in the container (not by the image)","text":"
          $ docker ps --format=\"{{.Size}}\\t{{.ID}}\\t{{.Image}}\\t{{.Names}}\" |\nsort -h -k1 |\ncolumn -t\n0B      (virtual  101MB)  2f7ba92f1e66  wan-connection-logger         wan-connection-logger\n0B      (virtual  413MB)  21d474032755  gitlab/gitlab-runner          gitlab-runner\n2B      (virtual  392MB)  c15b2ad88901  mariadb:10.4-bionic           mariadb\n312kB   (virtual  710MB)  ccee541f32c2  jacobalberty/unifi            unifi\n1.45MB  (virtual  2.3GB)  a9a60f4c6efc  homeassistant/home-assistant  home-assistant\n239MB   (virtual  412MB)  5d9f9cc3b46a  plexinc/pms-docker:plexpass   plex\n
          "},{"location":"docker/#run-a-container-with-a-tcp-port-map","title":"Run a container with a tcp port map","text":"

          This maps port 18022 of the host to 22 of the guest.

          docker run -ti -p 18022:22 centos:7 bash\n
          "},{"location":"docker/#run-a-container-with-a-shared-directory","title":"Run a container with a shared directory","text":"

          We are specifying :ro to make this a read-only mount. Default is rw.

          docker run -d -v \"$HOME/www/:/var/www/html/:ro\" php:5.4.35-apache\n
          "},{"location":"docker/#show-configuration-parameters-for-a-container","title":"Show configuration parameters for a container","text":"

          This shows more things that you can configure, like DNS, DNS search, etc..

          docker inspect host1\n
          "},{"location":"docker/#show-what-has-changed-since-a-container-was-started","title":"Show what has changed since a container was started","text":"
          docker diff \"$some_running_image\"\n

          https://docs.docker.com/engine/reference/commandline/diff

          "},{"location":"docker/#view-the-terminal-scrollback-of-a-a-container","title":"View the terminal scrollback of a a container","text":"
          docker logs \"$some_running_image\"\n
          "},{"location":"docker/#list-all-containers-including-ones-that-have-been-stopped","title":"List all containers, including ones that have been stopped","text":"

          This allows you to restart previous instances of a container.

          docker ps -a\n

          https://docs.docker.com/engine/reference/commandline/ps

          "},{"location":"docker/#start-a-named-container","title":"Start a named container","text":"

          By default containers don't restart when your system restarts, so you have to start them manually.

          docker start ttrss\n
          "},{"location":"docker/#stop-a-named-container","title":"Stop a named container","text":"
          docker stop ttrss\n
          "},{"location":"docker/#update-the-restart-policy-on-a-running-container","title":"Update the restart policy on a running container","text":"
          docker update --restart=unless-stopped \"$some_running_image\"\n
          "},{"location":"docker/#delete-all-unused-stuff","title":"Delete all unused stuff","text":"

          This will be interactive.

          docker system prune\n
          "},{"location":"docker/#delete-old-containers","title":"Delete old containers","text":"

          https://docs.docker.com/engine/reference/commandline/rm

          You may have to remove -r from xargs on non-GNU systems.

          docker ps -a --format=\"{{.ID}} {{.Status}}\" |\nawk '$2 == \"Exited\" && $5 ~ /(days|weeks|months)/ {print $1}' |\nxargs -r docker rm\n

          A more systematic approach is to use Docker Custodian.

          "},{"location":"docker/#delete-old-images","title":"Delete old images","text":"

          This is safe to run as long as valuable containers are running, as it won't delete any images that are attached to running containers.

          docker rmi $(docker images | grep '^<none>' | awk '{print $3}')\n

          https://docs.docker.com/engine/reference/commandline/rmi

          A more systematic approach is to use Docker Custodian.

          "},{"location":"docker/#show-processes-running-inside-all-docker-containers","title":"Show processes running inside all docker containers","text":"

          On hosts without cgroup integration, run:

          pgrep docker | xargs -n1 pstree\n
          "},{"location":"docker/#show-a-list-of-tags-for-a-given-image-on-docker-hub","title":"Show a list of tags for a given image on docker hub","text":"
          $ curl --silent -f -lSL \"https://index.docker.io/v1/repositories/org-name/image-name/tags\" |\njq '.[].name'\n\"latest\"\n\"0.11.1\"\n\"1.3.0\"\n\"1.5.0\"\n\"2.0.0\"\n

          Quay has a swagger console to discover more API uses

          $ curl --silent -f -lSL \"https://quay.io/api/v1/repository/org-name/image-name/tag\" |\njq '.tags[].name'\n\"2.0.0\"\n\"latest\"\n\"1.3.0\"\n
          "},{"location":"docker/#see-also","title":"See Also","text":"
          • https://www.docker.io: Main page
          • http://dockerfile.github.io: Trusted builds of FOSS software
          • https://registry.hub.docker.com: Public docker images
          • https://docs.docker.com/build/builders: How to build Dockerfiles
          • https://cloud.google.com/solutions/best-practices-for-building-containers
          • https://github.com/wagoodman/dive: A tool for exploring each layer in a docker image
          • https://hpcw.github.io: High Performance Container Workshop videos
          • https://github.com/regclient/regclient: Perform operations on OCI registries
          • https://github.com/oras-project/oras: CLI tool to work with arbitrary artifacts stored in OCI registries
          • https://github.com/containers/skopeo: \"Work with remote images registries - retrieving information, images, signing content\"
          • https://danielquinn.org/blog/developing-with-docker
          "},{"location":"document-query/","title":"document query tools","text":"

          Tools to query documents from the command line.

          "},{"location":"document-query/#links","title":"Links","text":"
          • https://github.com/BurntSushi/xsv: \"A fast CSV command line toolkit written in Rust.\"
          • https://github.com/TomWright/dasel: \"Select, put and delete data from JSON, TOML, YAML, XML and CSV files with a single tool.\" Also check out my notes on dasel
          • https://github.com/harelba/q: \"Run SQL directly on delimited files and multi-file sqlite databases.\" Also check out my notes on q
          • https://github.com/itchyny/gojq: \"Pure Go implementation of jq.\"
          • https://github.com/mgdm/htmlq: \"Like jq, but for HTML.\"
          • https://github.com/mikefarah/yq: \"yq is a portable command-line YAML, JSON, XML, CSV, TOML and properties processor.\"
          • https://github.com/jqlang/jq: \"Command-line JSON processor.\" Also check out my notes on jq.
          • https://github.com/johnkerl/miller: \"Miller is like awk, sed, cut, join, and sort for name-indexed data such as CSV, TSV, and tabular JSON\"
          • https://github.com/wwkimball/yamlpath: \"Command-line get/set/merge/validate/scan/convert/diff processors for YAML/JSON/Compatible data using powerful, intuitive, command-line friendly syntax.\"
          • https://github.com/simeji/jid: \"json incremental digger\" is an interactive json digging tool.
          • https://github.com/jmespath/jp: \"Command line interface to JMESPath - http://jmespath.org\" Also check out my notes on jmespath.
          • https://github.com/simonw/sqlite-utils: \"Python CLI utility and library for manipulating SQLite databases.\" Can directly import json, csv, and tsv files for querying in sqlite.
          "},{"location":"drone/","title":"Drone","text":"

          Drones, UAV (unmanned aerial vehicles), UAS (unmanned aerial systems). The notes here are focused on USA jurisdiction.

          Be aware that the FAA only regulates outdoor flying within the USA. FAA regulations do not apply when flying indoors or outside the USA. FAA regulates US outdoor airspace starting at ground level, so if you are flying outdoors within American territories, you are flying in FAA regulated air space.

          "},{"location":"drone/#glossary","title":"Glossary","text":""},{"location":"drone/#faa-terms","title":"FAA terms","text":"
          • LAANC: Low Altitude Authorization and Notifications Capability. You must request LAANC authorization when flying in controlled airspace. https://www.faa.gov/uas/getting_started/laanc (I live in controlled airspace so I have to do this every day I fly my drone at my house, and it doesn't always work when using the DJI controller 2. \ud83d\ude44)
          • TRUST: The Recreational UAS Safety Test. A certification that all drone pilots in USA are required to take. https://www.faa.gov/uas/recreational_flyers/knowledge_test_updates
          • UAV: Unmanned Aerial Vehicle. The actual vehicle part of a UAS, such as the plane, quad copter, etc..
          • UAS: Unmanned Aereial System. The vehicle, controller, goggles, and anything else included in the complete system needed to pilot a UAV.
          • VLOS: Visual Line Of Sight.
          "},{"location":"drone/#drone-subculture-terms","title":"Drone subculture terms","text":"
          • Tiny Whoop: small FPV drones, typically around 65mm, and weighing around 50g, with prop guards. These are safe enough to be flown inside buildings without worrying about breaking things.
          • Cine Whoop: an FPV drone with prop guards that is big enough to carry a camera of some sort. Having prop guards makes it easy for these drones to fly through tight spaces without crashing if they collide with other objects, which makes them great for taking videos in a large variety of scenes where some other camera drones that do not have prop guards would not be as resilient.
          "},{"location":"drone/#links","title":"Links","text":"
          • https://betaflight.com: Flight Controller software
          • https://drone-laws.com
          • https://dronedj.com/2023/09/25/list-dji-drone-remote-id
          • https://edgetx.org: Radio controller firmware
          • https://fpvfc.org: FPV Freedom Coalition
          • https://github.com/Matthias84/awesome-flying-fpv
          • https://github.com/OpenVTx/OpenVTx: Open source VTX system
          • https://intofpv.com
          • https://newbeedrone.com
          • https://openipc.org: Open source camera firmware that works with a variety of cameras, including some drone cameras.
          • https://rotorriot.com/pages/beginners-guide
          • https://web.archive.org/web/20240223154621/https://store.dji.com/guides/properly-maintain-drone/
          • https://www.drl.io/academy
          • https://www.dronepilotgroundschool.com
          • https://www.expresslrs.org: Radio receiver firmware
          • https://www.faa.gov/uas/getting_started/remote_id
          • https://www.faa.gov/uas
          • https://www.fpvknowitall.com
          • https://www.getfpv.com
          • https://www.open-tx.org: Radio controller firmware
          • https://www.tinywhoop.com
          • https://www.youtube.com/@IvanEfimovLimon
          • https://www.youtube.com/@JoshuaBardwell
          "},{"location":"dsrc/","title":"dsrc","text":"

          \"Dedicated Short Range Communications is a two-way short-to-medium range wireless communications capability that permits very high data transmission critical in communications-based active safety applications\" - https://www.its.dot.gov/factsheets/dsrc_factsheet.htm

          \"Dedicated short-range communications are one-way or two-way short-range to medium-range wireless communication channels specifically designed for automotive use and a corresponding set of protocols and standards.\" - https://en.wikipedia.org/wiki/Dedicated_short-range_communications

          "},{"location":"dsrc/#links","title":"Links","text":"
          • https://www.its.dot.gov/factsheets/dsrc_factsheet.htm
          • https://en.wikipedia.org/wiki/Dedicated_short-range_communications
          • https://www.fcc.gov/wireless/bureau-divisions/mobility-division/dedicated-short-range-communications-dsrc-service
          "},{"location":"dtrace/","title":"dtrace","text":"

          \"dynamic tracing compiler and tracing utility\" - man dtrace

          "},{"location":"dtrace/#links","title":"Links","text":"
          • http://www.brendangregg.com/DTrace/dtrace_oneliners.txt
          "},{"location":"du/","title":"du","text":"

          \"estimate file space usage\" - man du

          "},{"location":"du/#examples","title":"Examples","text":""},{"location":"du/#summarize-low-level-directory-uage","title":"Summarize low level directory uage","text":"

          When a partition fills up this is a good place to begin looking. Some flags may not be available, such as sort -h

          ## -x      --one-file-system\n## -d 3    --max-depth=3\n## -h      --human-readable\nsudo du -x -d 3 -h / | sort -h\n
          "},{"location":"duplicity/","title":"duplicity","text":"

          Encrypted bandwidth-efficient backup using the rsync algorithm

          • http://duplicity.nongnu.org/
          "},{"location":"e-bike/","title":"E-bike","text":"

          Electronic motor bicycles.

          "},{"location":"e-bike/#aventon-aventure2","title":"Aventon Aventure.2","text":"
          • Motor: 750w
          • Claimed Battery: 48v, 15Ah (720Wh)
          • Actual Battery: 47.1V (14.4Ah) 678.2Wh (6% less than claimed)
          • Assist sensor: torque sensor
          • Sprockets: 48 T front, 8 12-32T rear
          • Gear ratios: 1.5 - 4.0
          • Tires: 26x4
          "},{"location":"e-bike/#links","title":"Links","text":"
          • https://fucarebike.com/blogs/news/electric-bike-classes-comparison
          • https://www.heybike.com/blogs/heybike-blog/ebike-classes-1-2-3
          • https://www.michigan.gov/dnr/things-to-do/hike-and-bike/ebikes
          • https://www.michigan.gov/dnr/about/newsroom/releases/2024/03/15/proposed-change-would-expand-allowable-e-bike-operation
          • https://www.metroparks.com/rules-and-regulations: E-bikes are mentioned in this FAQ
          "},{"location":"education/","title":"education","text":"
          • https://www.coursera.org/
          • https://www.edx.org/
          • https://www.udemy.com/
          "},{"location":"elasticsearch/","title":"Elasticsearch","text":"

          \"Elasticsearch is a distributed, free and open search and analytics engine for all types of data, including textual, numerical, geospatial, structured, and unstructured.\" - https://www.elastic.co/what-is/elasticsearch

          "},{"location":"elasticsearch/#examples","title":"Examples","text":""},{"location":"elasticsearch/#dev-console","title":"Dev console","text":"

          Kibana ships with a dev console available which is useful for accessing the below examples. More documentation about APIs that can be used in the dev console can be found here: https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html

          "},{"location":"elasticsearch/#interact-with-elasticsearch-over-http","title":"Interact with elasticsearch over HTTP","text":"

          The Compact Aligned Text interface is available at something like https://${elasticsearch_host}:9200/_cat/ and has a variety of endpoinds you can inspect over http in a human friendly output.

          /_cat/allocation\n/_cat/shards\n/_cat/shards/{index}\n/_cat/master\n/_cat/nodes\n/_cat/tasks\n/_cat/indices\n/_cat/indices/{index}\n/_cat/segments\n/_cat/segments/{index}\n/_cat/count\n/_cat/count/{index}\n/_cat/recovery\n/_cat/recovery/{index}\n/_cat/health\n/_cat/pending_tasks\n/_cat/aliases\n/_cat/aliases/{alias}\n/_cat/thread_pool\n/_cat/thread_pool/{thread_pools}\n/_cat/plugins\n/_cat/fielddata\n/_cat/fielddata/{fields}\n/_cat/nodeattrs\n/_cat/repositories\n/_cat/snapshots/{repository}\n/_cat/templates\n

          Accessing any of these will show columns of data. For example, to see all shards, you can do:

          curl -s \"https://${elasticsearch_host}:9200/_cat/shards?v=true\"\n

          Which will show something like:

          index                                        shard  prirep  state    docs    store   ip            node\nfluentd.quasaric-spacecraft-0412.2021.10.15  0      r       STARTED  53277   7.6mb   10.32.4.26    example-elasticsearch-data-3\nfluentd.quasaric-spacecraft-0412.2021.10.15  0      p       STARTED  53277   7.6mb   10.32.63.204  example-elasticsearch-data-9\nfluentd.true-ion-0733.2021.10.16             0      p       STARTED  47771   8.2mb   10.32.78.225  example-elasticsearch-data-11\nfluentd.true-ion-0733.2021.10.16             0      r       STARTED  47771   8.2mb   10.32.70.57   example-elasticsearch-data-10\nfluentd.desolate-terminator-1537.2021.10.19  0      p       STARTED  31216   5.7mb   10.32.70.57   example-elasticsearch-data-10\nfluentd.desolate-terminator-1537.2021.10.19  0      r       STARTED  31216   5.7mb   10.32.63.205  example-elasticsearch-data-6\nfluentd.false-perihelion-2673.2021.10.14     0      p       STARTED  144118  19.8mb  10.32.4.26    example-elasticsearch-data-3\nfluentd.false-perihelion-2673.2021.10.14     0      r       STARTED  144118  19.8mb  10.32.35.26   example-elasticsearch-data-2\n

          The ?v=true enables column headers. ?help is also available. More documentation is available at the following URLs:

          • https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html
          • https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html
          • https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html
          "},{"location":"elasticsearch/#delete-indexes-by-regex","title":"Delete indexes by regex","text":"

          Assuming the indexes you want to delete all have a common string, and assuming you have local http access to elasticserach (EG: you did sudo -E kubfwd svc -n es-namespace)

          curl -fsSL 'http://redacted-elasticsearch:9200/_cat/shards' |\nawk '$1 ~ /\\.2021\\.10\\.14$/ {print $1}' |\nsort -u |\nwhile read -r index ; do\n    curl -X DELETE \"http://redacted-elasticsearch:9200/${index}\"\ndone\n

          You could also use this same logic to delete large shards by using bytes=b and filtering on the index size:

          curl -fsSL 'http://redacted-elasticsearch:9200/_cat/shards?bytes=b' |\nawk '$1 ~ /^fluentd/ && $6 > 7500000 {print}'\n
          "},{"location":"elasticsearch/#move-a-large-shard-to-a-full-node-to-one-that-has-lots-of-free-data","title":"Move a large shard to a full node to one that has lots of free data","text":"

          Assuming you have elasticsearch available on localhost, eg from kubectl -n \"$namespace\" port-forward svc/elasticsearch 9200:9200,

          "},{"location":"elasticsearch/#find-a-large-shard","title":"Find a large shard","text":"
          curl -s http://localhost:9200/_cat/shards?bytes=b | sort -n -k6 | grep <name of node that is full>\n

          If you have GNU sort installed you can append | gsort -k6 -h to sort by shard size.

          "},{"location":"elasticsearch/#find-a-node-with-lots-of-free-space","title":"Find a node with lots of free space","text":"

          The following output shows \"free_space, hostname\"

          curl -s http://localhost:9200/_nodes/stats |\n    jq -rc '.nodes | to_entries | map([.value.fs.data[].free_in_bytes/1024/1024/1024, .value.name])[] | \"\\(.[0]) \\(.[1])\"' |\n    column -t |\n    sort -n\n
          "},{"location":"elasticsearch/#move-the-large-shard-to-the-new-node","title":"Move the large shard to the new node","text":"
          curl -s --location --request POST 'http://localhost:9200/_cluster/reroute' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{\n    \"commands\" : [\n        {\n            \"move\" : {\n                \"index\" : \"<name of shard to move>\",\n                \"shard\" : <shard number, probably 0>,\n                \"from_node\" : \"<node large shard was on that is low on volume>\",\n                \"to_node\" : \"<node that has low volume to move shard to>\"\n            }\n        }\n    ]\n}' | jq .\n
          "},{"location":"elasticsearch/#links","title":"Links","text":"
          • https://www.elastic.co/blog/how-many-shards-should-i-have-in-my-elasticsearch-cluster
          "},{"location":"eleduino/","title":"Eleduino","text":"

          \"Cool and High Quality raspberry pi accessories at Wholesale Price from China\" - http://www.eleduino.com

          "},{"location":"eleduino/#eleduino-spotpear-touchscreen-32-inch","title":"Eleduino SpotPear touchscreen 3.2 inch","text":"

          Taken from https://github.com/notro/rpi-firmware/issues/6#issuecomment-63180647, this is tested to work on Raspbian 7 (wheezy) and 8 (jessie).

          "},{"location":"eleduino/#update-the-firmware-to-support-fbtft","title":"update the firmware to support FBTFT","text":"
          sudo apt-get install -y rpi-update\nsudo REPO_URI=https://github.com/notro/rpi-firmware BRANCH=builtin rpi-update\nsudo reboot\n
          "},{"location":"eleduino/#modify-boot-args-to-enable-the-device","title":"Modify boot args to enable the device","text":"
          sed -i \"s/$/ \\\nfbtft_device.custom \\\nfbtft_device.name=fb_ili9340 \\\nfbtft_device.gpios=dc:22,reset:27 \\\nfbtft_device.bgr=1 \\\nfbtft_device.speed=48000000/\" /boot/cmdline.txt\n
          "},{"location":"eleduino/#enable-console-on-boot","title":"Enable console on boot","text":"
          sed -i \"s/$/ \\\nfbcon=map:10 \\\nfbcon=font:ProFont6x11 \\\nlogo.nologo/\" /boot/cmdline.txt\n
          "},{"location":"eleduino/#rotation-etc","title":"Rotation etc..","text":"
          sed -i \"s/$/ \\\ndma.dmachans=0x7f35 \\\nconsole=tty1 \\\nconsoleblank=0 \\\nfbtft_device.fps=50 \\\nfbtft_device.rotate=270/\" /boot/cmdline.txt\n
          "},{"location":"eleduino/#sort-and-unique-bootcmdlinetxt","title":"Sort and unique /boot/cmdline.txt","text":"
          cat /boot/cmdline.txt |\n    tee /root/cmdline.txt-$(date +%s) |\n    tr \" \" \"\\n\" |\n    sort -u |\n    tr \"\\n\" \" \" > /boot/cmdline.txt.tmp && \\\nmv /boot/cmdline.txt.tmp /boot/cmdline.txt\n
          "},{"location":"etcd/","title":"etcd","text":"

          \"etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines. It\u2019s open-source and available on GitHub. etcd gracefully handles leader elections during network partitions and will tolerate machine failure, including the leader.\" - https://coreos.com/etcd/

          • https://coreos.com/etcd/docs/latest/demo.html
          "},{"location":"ethtool/","title":"ethtool","text":"

          \"ethtool - query or control network driver and hardware settings\" - man ethtool

          "},{"location":"ethtool/#examples","title":"Examples","text":""},{"location":"ethtool/#force-ethernet-adapter-to-re-negotiate-its-speed","title":"Force ethernet adapter to re-negotiate its speed","text":"
          ethtool -r\n
          "},{"location":"ethtool/#show-interface-error-count-by-type","title":"Show interface error count by type","text":"
          $ sudo ethtool -S ens5f0 | grep -i error\n     rx_errors: 13551\n     tx_errors: 0\n     rx_length_errors: 0\n     rx_crc_errors: 13551\n     fcoe_last_error: 0\n     port.tx_errors: 0\n     port.rx_crc_errors: 13551\n     port.rx_length_errors: 0\n
          "},{"location":"exiftool/","title":"exiftool","text":"

          CLI Tool to read and write image metadata for many kinds of images.

          • https://exiftool.org
          "},{"location":"exiftool/#tricks","title":"Tricks","text":""},{"location":"exiftool/#strip-all-tags","title":"Strip all tags","text":"
          exiftool -all= -- \"$filename\"\n
          "},{"location":"exiftool/#show-tags-in-a-format-that-you-can-use-to-rewrite-them","title":"Show tags in a format that you can use to rewrite them","text":"
          exiftool -S -- \"$filename\"\n

          For example

          $ exiftool -S -- \"$filename\" | grep Daniel\nArtist: Daniel Austin Hoherd\nCopyright: \u00a9Daniel Austin Hoherd\nCreator: Daniel Austin Hoherd\nRights: \u00a9Daniel Austin Hoherd\n$ exiftool -Rights='All rights reserved' -- \"$filename\"\n    1 image files updated\n$ exiftool -Rights -- \"$filename\"\nRights                          : All rights reserved\n
          "},{"location":"exiftool/#expanded-basic-usage","title":"Expanded basic usage","text":"

          This prints out a lot more information than normal usage, and indicates what type of metadata it is.

          exiftool -a -u -G:1:2 -- \"$filename\"\n

          Here is an example of each unique column 1 in a file

          $ exiftool -a -u -G:1:2 -- \"$filename\" | sort -u -k1,1\n[Adobe:Image]   DCT Encode Version              : 100\n[Composite:Camera] Scale Factor To 35 mm Equivalent: 7.0\n[Composite:Image] Aperture                      : 1.8\n[Composite:Location] GPS Latitude               : 37 deg 15' 53.04\" N\n[Composite:Time] Date/Time Created              : 2019:01:08 15:59:06\n[ExifIFD:Camera] Exposure Program               : Program AE\n[ExifIFD:Image] Exposure Time                   : 1/120\n[ExifIFD:Time]  Date/Time Original              : 2019:01:08 15:59:06\n[ExifTool:ExifTool] ExifTool Version Number     : 11.11\n[File:Image]    File Type                       : JPEG\n[GPS:Location]  GPS Version ID                  : 2.2.0.0\n[GPS:Time]      GPS Time Stamp                  : 23:59:06\n[ICC-header:Image] Profile CMM Type             : Linotronic\n[ICC-header:Time] Profile Date Time             : 1998:02:09 06:49:00\n[ICC-meas:Image] Measurement Observer           : CIE 1931\n[ICC-view:Image] Viewing Cond Illuminant        : 19.6445 20.3718 16.8089\n[ICC_Profile:Camera] Device Mfg Desc            : IEC http://www.iec.ch\n[ICC_Profile:Image] Profile Copyright           : Copyright (c) 1998 Hewlett-Packard Company\n[IFD0:Author]   Artist                          : Daniel Austin Hoherd\n[IFD0:Camera]   Make                            : Apple\n[IFD0:Image]    X Resolution                    : 240\n[IFD0:Time]     Modify Date                     : 2019:01:09 13:50:29\n[IFD1:Image]    Compression                     : JPEG (old-style)\n[IFD1:Preview]  Thumbnail Image                 : (Binary data 12008 bytes, use -b option to extract)\n[IPTC:Author]   By-line                         : Daniel Austin Hoherd\n[IPTC:Other]    Coded Character Set             : UTF8\n[IPTC:Time]     Date Created                    : 2019:01:08\n[Photoshop:Author] Copyright Flag               : True\n[Photoshop:Image] X Resolution                  : 240\n[Photoshop:Preview] Photoshop Thumbnail         : (Binary data 12008 bytes, use -b option to extract)\n[System:Image]  File Name                       : 2019-01-08-15-59-06-46628465322_d1657e4c95_o.jpg\n[System:Time]   File Modification Date/Time     : 2019:01:22 09:00:22-08:00\n[XMP-aux:Camera] Distortion Correction Already Applied: True\n[XMP-crs:Image] Already Applied                 : True\n[XMP-dc:Author] Creator                         : Daniel Austin Hoherd\n[XMP-dc:Image]  Format                          : image/jpeg\n[XMP-photoshop:Image] Headline                  : ljwZuD\n[XMP-photoshop:Time] Date Created               : 2019:01:08 15:59:06.448\n[XMP-x:Document] XMP Toolkit                    : Image::ExifTool 11.11\n[XMP-xmp:Image] Creator Tool                    : Adobe Photoshop Lightroom 6.14 (Macintosh)\n[XMP-xmp:Time]  Create Date                     : 2019:01:08 15:59:06.448\n[XMP-xmpMM:Other] Derived From Document ID      : 9880573B7AACBFC189C795E182E8A05D\n[XMP-xmpMM:Time] History When                   : 2019:01:09 13:50:29-08:00\n[XMP-xmpRights:Author] Marked                   : True\n
          "},{"location":"exiftool/#add-missing-lens-data-on-rokinon-85mm","title":"Add missing lens data on Rokinon 85mm","text":"

          Rokinon 85mm is a mechanical lens with no electronics, so no data about photos taken with it are stored in the image. This adds some stock metadata describing characteristics of the lens that are always true, which helps these photos sort accurately, etc..

          exiftool \\\n  -overwrite_original \\\n  -LensModel='Rokinon 85mm f/1.4' \\\n  -FocalLength='85' \\\n  -LongFocal='85' \\\n  -ShortFocal='85' \\\n  -- \\\n  \"$filename\"\n
          "},{"location":"exiftool/#correct-exif-time-for-instance-to-sync-with-gps-time","title":"Correct EXIF time, for instance to sync with GPS time","text":"

          The following example increases all metadata dates by 1 minute and 56 seconds.

          # exiftool -AllDates-='Y:M:D H:M:S'\nexiftool -AllDates+='0:0:0 0:1:56' -- \"$filename\"\n
          "},{"location":"exiftool/#set-all-dates-to-something-obviously-wrong","title":"Set all dates to something obviously wrong","text":"

          This is useful when scanning or photographing film or prints where you do not want the current date associated with the image.

          exiftool -alldates='1900:01:01 01:01:01' -- *.tif\n
          "},{"location":"exiftool/#delete-certain-keywords-from-files","title":"Delete certain keywords from files","text":"

          This example uses bash expansion to create multiple -keywords-= statements from the words inside of the braces. Use echo exiftool to see what command is actually being called when testing. Keywords can also be stored in the subject tag, so we clean that too.

          find ~/your/pictures/ -type f -name '*.jpg' |\nxargs exiftool -overwrite_original -{keywords,subject}-={keyword1,\"a keyword with spaces\",keyword3,\"another keyword with spaces\"} --\n

          A more readable way to do this is to use an array and loop over it to create args, then pass the args to exiftool. This technique is quite useful for use with a variety of tools. You could also change this logic to add tags instead of deleting them.

          tags=(\n  \"private tag 1\"\n  \"another private tag\"\n  \"some other tag that is private\"\n)\n\nargs=()\nfor tag in \"${tags[@]}\" ; do\n  args+=( \"-subject-=$tag\" \"-keywords-=$tag\" )\ndone\n\nexiftool -overwrite_original \"${args[@]}\" -- \"$@\"\n
          "},{"location":"exiftool/#append-keywords-to-a-file","title":"Append keywords to a file","text":"

          When adding keywords, the default behavior allows duplicates. This case is covered in FAQ #17 and indicates that you must remove and re-add each keyword in one operation in order to prevent duplicates. A bash function to do that follows. Be careful to use it on only ONE FILE at a time, otherwise you will add filenames as keywords!

          add_keyword_to_file(){\n  local args=()\n  [[ \"$#\" -ge 2 ]] || { echo \"ERROR: Must have at least 2 args: <keyword> [keyword]... <file>\" ; return 1 ;}\n  while [[ \"$#\" -gt 1 ]] ; do\n    args+=(\"-keywords-=${1}\" \"-keywords+=${1}\")\n    shift\n  done\n  filename=$1\n  exiftool \"${args[@]}\" -- \"${filename}\"\n}\n

          Here it is in action:

          $ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show there are no keywords\nWarning: [Minor] Tag 'keywords' not defined - 20211016-21-25-03_450QaA.jpg\n\n$ add_keyword_to_file \"Sutro Heights\" \"San Francisco\" 20211016-21-25-03_450QaA.jpg  # add keywords\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that keywords were added\nSutro Heights, San Francisco 20211016-21-25-03_450QaA.jpg\n\n$ add_keyword_to_file \"Sutro Heights\" \"San Francisco\" 20211016-21-25-03_450QaA.jpg  # re-add existing keywords\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that duplicates were not added\nSutro Heights, San Francisco 20211016-21-25-03_450QaA.jpg\n

          It even works to remove duplicates where they already exist, likely because the -= matches all instances of the keyword.

          $ exiftool -keywords+=\"San Francisco\" -- 20211016-21-25-03_450QaA.jpg  # add a duplicate\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that there are duplicates\nSutro Heights, San Francisco, San Francisco 20211016-21-25-03_450QaA.jpg\n\n$ add_keyword_to_file \"Sutro Heights\" \"San Francisco\" 20211016-21-25-03_450QaA.jpg\n    1 image files updated\n\n$ exiftool -p '$keywords $filename' -- 20211016-21-25-03_450QaA.jpg  # show that duplicates have been removed\nSutro Heights, San Francisco 20211016-21-25-03_450QaA.jpg\n

          To add the same keywords to many files, loop through the files one at a time using something like:

          for file in *.jpg ; do add_keyword_to_file \"Sutro Heights\" \"San Francisco\" \"${file}\" ; done ;\n
          "},{"location":"exiftool/#set-file-modify-time-to-image-capture-time","title":"Set file modify time to image capture time","text":"

          Useful when you want to sort in your file browser by modification time and get a chronological order of files.

          exiftool \"-FileModifyDate<DateTimeOriginal\" -- *.jpg\n
          "},{"location":"exiftool/#generate-a-table-of-filename-camera-model-and-file-size-in-bytes-sorted-by-bytes","title":"Generate a table of Filename, Camera Model and File Size in bytes, sorted by bytes","text":"

          The -n flag here tells exiftool not to convert numbers into human readable formats. This is somewhat ironic in some circumstances, such as with location where using -n makes the GPS location show up as decimal, which IMHO is much more reaable.

          $ find /src_dir/ -iname '*.dng' |\n  xargs exiftool -p '$filename,$Model,$FileSize#' -- 2>/dev/null |\n  sort -t, -k3 -n |\n  column -s, -t\n2012-01-26-23-19-54-6795223065_2e771d1012_o.jpg   iPhone 4S             1242739\n2013-02-03-10-01-56-8441346635_df4404a1f6_o.jpg   NIKON D5200           1646481\n2012-01-22-15-16-38-6746574603_d52311264f_o.jpg   Canon EOS REBEL T3i   1671734\n2011-01-22-23-44-31-6271225963_f9b95b2d7a_o.jpg   NIKON D3S             1773081\n2010-01-27-13-07-00-4313649499_835a6649c2_o.jpg   NIKON D300            1829578\n2016-02-03-07-26-32-24522158414_4aaf116d2a_o.jpg  iPhone 6              2319061\n2018-10-24-13-39-09-44676649345_1de0f581cd_o.jpg  iPhone XS Max         2971254\n2015-02-02-19-17-09-24587486051_3032823e4e_o.jpg  NIKON D800            3309696\n2014-01-27-13-52-41-12951707465_79a8dd3827_o.jpg  iPhone 5              3401479\n2017-01-22-18-33-28-31693592473_40478df088_o.jpg  ILCE-7                4230661\n2018-12-23-22-33-40-45536007225_8fdd50691a_o.jpg  NIKON D850            4924617\n2017-02-06-08-04-18-44658317900_98e04997fb_o.jpg  iPhone 6s             8712631\n2018-12-28-16-56-42-39713091073_c57ec1a8a8_o.jpg  Canon EOS 5D Mark II  8741601\n2019-01-08-16-11-49-39716361093_479e6a2323_o.jpg  iPhone 8 Plus         12041600\n
          "},{"location":"exiftool/#generate-rsync-commands-for-files-matching-a-string","title":"Generate rsync commands for files matching a string","text":"

          Useful for reviewing commands before running them, the following example generates a command for every file, then uses awk to do a numeric comparison on the last field to sort out images under a certain ImageHeight. These rsync commands can be pasted into a terminal to run. (Generating a list of files for use with rsync --files-from would be a better option for this specific use case, but this illustration could be adapted for commands that do not have such an option.)

          $ exiftool -d \"%s\" -p 'rsync -aP $filename otherhost:~/Pictures/ # $ImageHeight' -- * 2>/dev/null | awk '$NF >= 2800 {print}'\nrsync -aP 2017-02-06-08-04-18-44658317900_98e04997fb_o.jpg otherhost:~/Pictures/ # 2869\nrsync -aP 2018-02-06-09-50-04-31514483967_a422a3e3aa_o.jpg otherhost:~/Pictures/ # 2880\nrsync -aP 2018-02-06-15-04-43-45541501845_8dbdc3b208_o.jpg otherhost:~/Pictures/ # 2880\nrsync -aP 2018-02-06-15-05-43-31514485997_e2551fdbbc_o.jpg otherhost:~/Pictures/ # 2880\nrsync -aP 2018-12-19-10-53-27-45663859984_0f93ac24ec_o.jpg otherhost:~/Pictures/ # 2880\n
          "},{"location":"exiftool/#print-filenames-that-are-missing-a-tag","title":"Print filenames that are missing a tag","text":"

          This example creates a file with all full path names for jpg and dng files that do not have GPS Coordinates

          find /some/dir -iname '*.jpg' -or -iname '*.dng' -print0 |\nxargs -0 exiftool -p '${Directory}/${Filename}' -if 'not defined $GPSPosition' -- >> ~/no-geo.txt\n
          "},{"location":"exiftool/#print-filenames-of-photos-that-are-older-than-10-years","title":"Print filenames of photos that are older than 10 years","text":"
          exiftool -if '$now ge ${DateTimeOriginal;ShiftTime($_,\"10:0:0 0\")}' -p '$FileName' *.jpg\n
          "},{"location":"exiftool/#use-testname-tag-target-to-test-what-files-would-be-renamed-to","title":"Use TestName tag target to test what files would be renamed to","text":"

          This block builds an array of possible tags to use as a filename, creates an exiftool argument string from that array, then tests what files would be named to. This is useful when dealing with files from various sources that don't all use the same tag to store the original media creation time. By using TestName instead of FileName as the target, we observe what would occur, essentially a dry-run, instead of actually renaming the files.

          There is a funky behavior of %-c when you operate on a file that should ideally not be renamed. Exiftool will toggle back and forth each run appending and removing -1.

          This assumes GNU xargs for the -r flag.

          #!/usr/bin/env bash\nset -x\n\n# The last valid variable from this list is used as the filename source\ncreate_date_sources=(\n  TrackCreateDate\n  RIFF:DateTimeOriginal\n  MediaCreateDate\n  FileModifyDate\n  DateTimeOriginal\n  CreateDate\n)\n\nfor opt in \"${create_date_sources[@]}\" ; do\n  args+=( \"-TestName<${opt}\" ) ;\ndone ;\n\nargs+=( '-d' './%Y/%m/%Y%m%d-%H-%M-%S%%-c.%%le' )\n\nfind . -maxdepth 1 -type f ! -name '*.sh' -print0 | xargs -0 -r exiftool \"${args[@]}\" --\n
          "},{"location":"exiftool/#rename-files-to-their-shuttercount","title":"Rename files to their ShutterCount","text":"

          Filenames will not be changed if ShutterCount field is not populated.

          exiftool -P '-filename<${ShutterCount;}.%e' -- *.dng\n
          "},{"location":"exiftool/#rename-files-based-on-a-set-of-possible-names","title":"Rename files based on a set of possible names","text":"

          Exiftool will use the last parameter where all variables are present.

          exiftool -P -d '%F-%H-%M-%S' \\\n  '-filename<${DateTimeOriginal} - ${Make;}.%e' \\\n  '-filename<${CreateDate} - ${Make;}.%e' \\\n  '-filename<${DateTimeOriginal} - ${Make;} - ${Model;}.%e' \\\n  '-filename<${CreateDate} - ${Make;} - ${Model;}.%e' \\\n  '-filename<${DateTimeOriginal} - ${Make;} - ${Model;} - ${ShutterCount}.%e' \\\n  '-filename<${CreateDate} - ${Make;} - ${Model;} - ${ShutterCount}.%e' \\\n  -- \\\n  *.dng\n
          "},{"location":"exiftool/#rename-gpx-files-based-on-the-capture-time","title":"Rename GPX files based on the capture time","text":"

          You will end up with a filename like 2013-09-30-23-35-40.gpx based off of the first trkpt timestamp.

          exiftool -d '%Y%m%d-%H-%M-%S' '-FileName<${GpxTrkTrksegTrkptTime;tr/ /-/;tr/:/-/;tr(/Z/)()d;}%-c.gpx' -- *.gpx\n
          "},{"location":"exiftool/#rename-files-to-their-original-date-and-time-using-a-lower-case-file-extension","title":"Rename files to their original date and time using a lower case file extension","text":"
          # %le = lowercase extension\n# %-c = unique filenames when the timestamp is exactly the same. EG: filename-1.jpg\nexiftool \"-FileName<CreateDate\" -d \"%Y%m%d-%H-%M-%S%%-c.%%le\" -- *.jpg\n
          "},{"location":"exiftool/#rename-files-using-a-combination-of-tags","title":"Rename files using a combination of tags","text":"

          Using the name of the tag as output by exiftool -S, you can create complicated filenames by combining tags:

          exiftool -d '%Y%m%d-%H-%M-%S' '-FileName<${CreateDate;}_${Headline;}%-c.%e'\n
          "},{"location":"exiftool/#rename-music-files-in-a-directory","title":"Rename music files in a directory","text":"

          If you use a semicolon inside of a tag that is used to generate a filename, it will have filename-invalid characters stripped. The invalid character list is: / \\ ? * : | \" < >. See the next section for more examples of semicolon behavior.

          exiftool \\\n  '-FileName<${Artist;} - ${Title;}.%e' \\\n  '-FileName<${Artist;} - ${Album;} - ${Title;}.%e' \\\n  -- \\\n  *.mp3 *.m4a\n

          The way I solved this prior to knowing the semicolon behavior was to use a regex replace, which is included here because it could be useful in other circumstances:

          exiftool \\\n  '-FileName<${Artist;s/\\//_/} - ${Title;s/\\//_/}.%e' \\\n  '-FileName<${Artist;s/\\//_/} - ${Album;s/\\//_/} - ${Title;s/\\//_/}.%e' \\\n  -- \\\n  *.mp3 *.m4a\n
          "},{"location":"exiftool/#rename-files-into-directories-with-date-components-as-directory-names","title":"Rename files into directories with date components as directory names","text":"

          Using the above technique, it's not possible to create directories using date components as parts of the directory structure.

          $ exiftool -d '%Y/%m/%d/%F-%H-%M-%S' '-TestName<${DateTimeOriginal;}.%le' -- example.jpg\n'example.jpg' --> '201803042018-03-04-00-01-29.jpg'\n

          Notice how all the directory delimiters were left out. To work around this, you can use a date format string with DafeFmt directly in the date tag instead of in -d:

          $ exiftool '-TestName<${DateTimeOriginal;DateFmt(\"%Y/%m/%d/%F-%H-%M-%S\")}.%le' -- example.jpg\n'example.jpg' --> '2018/03/04/2018-03-04-00-01-29.jpg'\n
          "},{"location":"exiftool/#rename-files-into-subdir-based-on-multiple-tags","title":"Rename files into subdir based on multiple tags","text":"

          Making sure not use put a semicolon into the tags, as described in the last section, you can use more than one tag to rename a file, so long as you format your date string correctly.

          find ./ -type f -iname '*.jpg' -print0 |\nxargs -0 exiftool -d \"%Y/%m/%d/%Y%m%d-%H-%M-%S\" '-FileName<${DateTimeOriginal}_${Headline}%-c.%le' --\n

          EG:

          $ find . -type f -iname '*.jpg' -print0 | xargs -0 exiftool -d \"%Y/%m/%d/%Y%m%d-%H-%M-%S\" '-TestName<${DateTimeOriginal}_${Headline}%-c.%le' --\n'./20170406-17-11-59.jpg' --> '2017/04/06/20170406-17-11-59_qrWLGF.jpg'\n'./20170401-22-20-56.jpg' --> '2017/04/01/20170401-22-20-56_907nMU.jpg'\n'./20170403-07-14-18.jpg' --> '2017/04/03/20170403-07-14-18_JMPDVd.jpg'\n    0 image files updated\n    3 image files unchanged\n

          But if we use a semicolon, the invalid characters are stripped, and thus directories are not created.

          $ find . -type f -iname '*.jpg' -print0 | xargs -0 exiftool -d \"%Y/%m/%d/%Y%m%d-%H-%M-%S\" '-TestName<${DateTimeOriginal;}_${Headline}%-c.%le' --\n'./20170406-17-11-59.jpg' --> './2017040620170406-17-11-59_qrWLGF.jpg'\n'./20170401-22-20-56.jpg' --> './2017040120170401-22-20-56_907nMU.jpg'\n'./20170403-07-14-18.jpg' --> './2017040320170403-07-14-18_JMPDVd.jpg'\n    0 image files updated\n    3 image files unchanged\n
          "},{"location":"exiftool/#move-short-videos-to-one-dir-long-videos-to-another-dir","title":"Move short videos to one dir, long videos to another dir","text":"

          In iOS, if you have Live Photo enabled it creates little movies each time you take a photo. While these can be very interesting context around photos, they can be quite irritating if you're playing through a collection of videos where these are mixed with videos of more moderate duration. The following code snip separates videos with a duration of more than 10 seconds from those with equal or lesser duration.

          # -TestName is used here so it does not destroy data. Replace this with FileName to make this actually work.\n# $Duration# has the # sign appended to make this tag machine readable so it can accurately be compared.\n# We must use perl's numeric comparisons (>, <=), not string comparisons (gt, le)\n# exiftool does not support if else syntax, so for the else condition you must run a second command.\n\nlong_args=(  \"-TestName<${opt}\" '-d' \"${working_path}/long/%Y/%m/%Y%m%d-%H-%M-%S%%-c.%%le\"  '-if' '${Duration#} >  10' )\nshort_args=( \"-TestName<${opt}\" '-d' \"${working_path}/short/%Y/%m/%Y%m%d-%H-%M-%S%%-c.%%le\" '-if' '${Duration#} <= 10' )\n\nfind \"${PWD}\" -maxdepth 1 -type f -print0 | xargs -0 -r exiftool \"${long_args[@]}\" --\nfind \"${PWD}\" -maxdepth 1 -type f -print0 | xargs -0 -r exiftool \"${short_args[@]}\" --\n
          "},{"location":"exiftool/#add-missing-date-metadata-to-nintendo-switch-screenshots","title":"Add missing date metadata to Nintendo Switch screenshots","text":"

          Nintendo Switch screenshots are named with the date, but do not contain this information in the EXIF, which makes this data fragile.

          # Filename like: 2020041909511400-87C68A817A974473877AC288310226F6.jpg\nfor X in 202?????????????-????????????????????????????????.{jpg,mp4} ; do\n  echo \"${X}\" |\n  sed -E 's/^((....)(..)(..)(..)(..)(..).*)/\\2 \\3 \\4 \\5 \\6 \\7 \\1/'\ndone | while read -r Y M D h m s f ; do\n  exiftool \\\n    -overwrite_original \\\n    \"-alldates=$Y:$M:$D $h:$m:$s\" \\\n    '-FileName<DateTimeOriginal' \\\n    -d '%Y%m%d-%H-%M-%S%%-c.%%le' \\\n    -- \"$f\"\ndone\n
          "},{"location":"exiftool/#copy-all-gps-location-data-from-one-file-into-other-files","title":"Copy all GPS location data from one file into other files","text":"
          exiftool -tagsfromfile source-file.jpg '-gpsl*<gpsl*' -- dest-file-1.jpg dest-file-2.jpg\n
          "},{"location":"exiftool/#review-and-delete-all-dji-photos-that-are-looking-at-the-sky","title":"Review and delete all DJI photos that are looking at the sky","text":"

          When taking panorama's with a DJI drone, you end up with a lot of photos of clouds and blue sky. These can be found by looking at GimbalPitchDegree. Review them in macOS Preview.app with:

          find PANORAMA -type f |\nxargs exiftool -if '$GimbalPitchDegree > 40' -p '${Directory}/${Filename}' -- 2>/dev/null |\nxargs -r open\n

          Once you've verified that none of them are worth preserving, delete them with:

          find PANORAMA -type f |\nxargs exiftool -if '$GimbalPitchDegree > 40' -p '${Directory}/${Filename}' -- 2>/dev/null |\nxargs -r rm -fv\n

          If you want to filter out photos that are mostly sky but also contain a bit of the ground in the bottom third of the frame, use > 9 instead of > 40.

          "},{"location":"exiftool/#geotag-non-geotagged-files-using-a-specific-tz","title":"Geotag non-geotagged files using a specific TZ","text":"

          Timezones in photo images is kind of a mess. In order to be specific about what TZ you took photos in, you can override it using the syntax in the example below. For instance, I keep all my photos in UTC so I never have to wonder what TZ I took them in and I never have to worry about DST. This example also skips any files that have existing geotags.

          find ~/Pictures/whatever -type f -iname '*.dng' -print0 |\n  xargs -0 exiftool -if 'not defined $GPSPosition' -geotag ~/gps_tracks.gpx '-Geotime<${createdate}+00:00' --\n

          This page gives more examples: https://exiftool.org/geotag.html

          "},{"location":"exiftool/#export-exif-data-as-json","title":"Export exif data as JSON","text":"

          You can use the -J/-json flag to output JSON data, which is obviously really helpful.

          $ exiftool -J -ExifToolVersion -LensFStops 20241027-20-44-44_177lgJ.dng | jq .\n[\n  {\n    \"SourceFile\": \"20241027-20-44-44_177lgJ.dng\",\n    \"ExifToolVersion\": 12.5,\n    \"LensFStops\": 6\n  }\n]\n

          However, by default, all numeric looking values are not quoted, even if they are not numeric values, like version numbers. In the above example, the version number is 12.50, not 12.5, and the LensFSops is 6.00, not 6. To work around this, you can use -api StructFormat=JSONQ, where JSONQ is \"JSON with quoted numbers\". (See https://exiftool.org/ExifTool.html#StructFormat for more details.) You must be using exiftool >= 12.88 (2024-07-11) for this feature to be available, otherwise it will silently produce non-quoted numeric values.

          $ exiftool -api StructFormat=JSONQ -json -ExifToolVersion -LensFStops 20241027-20-44-44_177lgJ.dng | jq .\n[\n  {\n    \"SourceFile\": \"20241027-20-44-44_177lgJ.dng\",\n    \"ExifToolVersion\": \"13.00\",\n    \"LensFStops\": \"6.00\"\n  }\n]\n
          "},{"location":"exiftool/#see-also","title":"See Also","text":"
          • graphicsmagick
          • imagemagick
          • jpeginfo
          • sips
          "},{"location":"fediverse/","title":"fediverse","text":"

          \"The fediverse is an ensemble of federated (i.e. interconnected) servers that are used for web publishing and file hosting, which, while independently hosted, can communicate with each other.\" - https://en.wikipedia.org/wiki/Fediverse

          "},{"location":"fediverse/#links","title":"Links","text":"
          • https://en.wikipedia.org/wiki/Fediverse
          • https://en.wikipedia.org/wiki/ActivityPub: The fediverse is largely interconnected using the ActivityPub protocol.
          • https://fedidevs.org: Fediverse Developer Network
          • https://joinfediverse.wiki
          • https://lemmy.world/post/256146: Several fediverse links
          • https://www.jvt.me/posts/2019/10/20/indieweb-talk: Not fediverse, but definitely along the same line of thinking in a lot of ways.
          "},{"location":"fedramp/","title":"Fedramp","text":"
          • https://en.wikipedia.org/wiki/FedRAMP
          • https://www.fedramp.gov/
          "},{"location":"ffmpeg/","title":"ffmpeg","text":"

          ffmpeg is a tool for editing movie files.

          • https://ffmpeg.org
          "},{"location":"ffmpeg/#links","title":"Links","text":"
          • https://fileconverter.tommyjepsen.com: ffmpeg based video converter that runs in-browser
          "},{"location":"ffmpeg/#examples","title":"Examples","text":""},{"location":"ffmpeg/#convert-container-format","title":"Convert container format","text":"

          This will copy all streams from every mkv container file in the current directory into an mp4 container file.

          for X in *.mkv ; do ffmpeg -i \"${X}\" -codec copy -map 0 \"${X%.mkv}.mp4\" ; done ;\n

          Some codecs will not be compatible and will need to be transcoded in order to be mp4 compatible. Here is an example that transcodes video to h264 using hardware hardware transcoding (h264_videotoolbox):

          FILE=all-your-base.mkv\nffmpeg -i \"${FILE}\" -c:v h264_videotoolbox -b:v 4000k -c:a copy \"${FILE%.mkv}.mp4\"\n
          "},{"location":"ffmpeg/#sample-video-output-settings","title":"Sample video output settings","text":"

          Before spending a long time converting a video, it's good to sample what you would see. You can render only a portion of the movie to make sure your settings are not too low. The following example starts encoding from 35 minutes into the source file (-ss HH:MM:SS.ss) and produces 20 seconds of output (-t HH:MM:SS.ss):

          ffmpeg -ss 00:35:00 -t 00:00:20.00 -i \"${FILE}\" -c:v h264_videotoolbox -b:v 4000k -c:a copy \"${FILE%.mkv}.mp4\"\n

          Note that the HH:MM:SS.ss is given in the time duration spec that is detailed in man ffmpeg-utils and can have other forms.

          Also note that -ss behaves differently depending on where it is placed in the command in regard to the input and output files.

          "},{"location":"ffmpeg/#time-lapse","title":"Time Lapse","text":""},{"location":"ffmpeg/#generate-a-movie-from-an-image-sequence-like-001jpg-999jpg","title":"Generate a movie from an image sequence like 001.jpg-999.jpg","text":"
          ffmpeg -r 10 -b 1800 -i %03d.jpg test1800.mp4\n
          "},{"location":"ffmpeg/#rename-files-as-a-sequence","title":"Rename files as a sequence:","text":"
          i=0 ;\nfind . -type f |\n  while read -r F ; do\n    let i=${i}+1 ;\n    fn=$(printf %06d ${i}) ;\n    mv \"${F}\" \"${fn}.jpg\" ;\n  done ;\n
          "},{"location":"ffmpeg/#sample-some-of-the-middle-of-the-time-lapse","title":"Sample some of the middle of the time-lapse","text":"
          ffmpeg -pattern_type sequence -start_number 3000 -r 30 -i %06d.jpg -s 1440x1080 -frames 120 \"$(date +%F_%T).mp4\"\n
          "},{"location":"ffmpeg/#turn-these-images-into-a-video","title":"Turn these images into a video","text":"
          ffmpeg -pattern_type sequence -r 30 -i %06d.jpg -s 1440x1080 \"$(date +%F_%T).mp4\"\n
          "},{"location":"ffmpeg/#audio-replace","title":"Audio Replace","text":"

          Replace the audio of DSC_4436.AVI with 01 Gymnopedie 1.mp3 and limit the duration of the output so the music doesn't play beyond the end of the video.

          ffmpeg -t 00:00:47.99 -i DSC_4436.AVI -i \"01 Gymnopedie 1.mp3\" -map 0:0 -map 1:0 -vcodec copy -acodec copy output.AVI\n
          "},{"location":"ffmpeg/#slow-down-video-to-half-speed-drop-audio","title":"Slow down video to half speed, drop audio","text":"
          ffmpeg -i DHO_8751.MOV -an -vf \"setpts=(2/1)*PTS\" output.mp4\n
          "},{"location":"ffmpeg/#extract-two-seconds-worth-of-frames-at-24fps-starting-at-15m","title":"Extract two seconds worth of frames at 24fps starting at 15m","text":"
          ffmpeg -i movie.mkv -r 24 -t 00:00:02.00 -ss 00:15:00 temp/movie-%4d.jpg\n
          "},{"location":"ffmpeg/#detect-errors-in-files","title":"Detect errors in files","text":"
          ffmpeg -v error -i 20091024-08-46-00.mpg -f null - 2>> error.log\n
          "},{"location":"ffmpeg/#dump-a-raw-stream","title":"Dump a raw stream","text":"

          If you encounter a file that has an unsupported stream and you want to dump it for analysis, you can use ffprobe to see what streams there are. ffprobe will produce output including something like:

          Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'DJI_20230929174507_0003_D.MP4':\n  Metadata:\n    major_brand     : isom\n    minor_version   : 512\n    compatible_brands: isomiso2mp41\n    creation_time   : 2023-09-29T21:45:07.000000Z\n    encoder         : DJI Mini4 Pro\n  Duration: 00:00:18.28, start: 0.000000, bitrate: 93326 kb/s\n  Stream #0:0[0x1](und): Video: hevc (Main) (hvc1 / 0x31637668), yuv420p(tv, bt709), 3840x2160, 90422 kb/s, 29.97 fps, 29.97 tbr, 30k tbn (default)\n    Metadata:\n      creation_time   : 2023-09-29T21:45:07.000000Z\n      handler_name    : VideoHandler\n      vendor_id       : [0][0][0][0]\n  Stream #0:1[0x2](und): Data: none (djmd / 0x646D6A64), 68 kb/s\n    Metadata:\n      creation_time   : 2023-09-29T21:45:07.000000Z\n      handler_name    : DJI meta\n  Stream #0:2[0x3](und): Data: none (dbgi / 0x69676264), 2315 kb/s\n    Metadata:\n      creation_time   : 2023-09-29T21:45:07.000000Z\n      handler_name    : DJI dbgi\n  Stream #0:3[0x0]: Video: mjpeg (Baseline), yuvj420p(pc, bt470bg/unknown/unknown), 1280x720 [SAR 1:1 DAR 16:9], 90k tbr, 90k tbn (attached pic)\nUnsupported codec with id 0 for input stream 1\nUnsupported codec with id 0 for input stream 2\n

          Here we see 4 streams. 0:0 is a video stream, 0:1 is a DJI meta data stream, 0:2 is a DJI dbgi data stream, and 0:3 is a video mjpeg stream. Using this list as a reference, we can dump an individual stream. For instance, to dump the 0:2 stream:

          ffmpeg -i DJI_20230929174507_0003_D.MP4 -map 0:2 -f data -c copy stream2.bin\n
          "},{"location":"ffmpeg/#reduce-frame-rate","title":"Reduce frame rate","text":"

          This example is taken directly from https://trac.ffmpeg.org/wiki/ChangingFrameRate

          ffmpeg -i src.mp4 -filter:v fps=30 dest.mp4\n
          "},{"location":"ffmpeg/#record-video-from-a-macos-webcam","title":"Record video from a macOS webcam","text":"

          This is video only, no audio

          ffmpeg -f avfoundation -r 30 -i 1 webcam.mov\n
          "},{"location":"ffmpeg/#screen-record-a-macos-desktop","title":"Screen record a macOS desktop","text":"
          ffmpeg -f avfoundation -r 30 -i 3 screen_capture.mov\n
          "},{"location":"ffmpeg/#generate-a-timelapse-from-dji-hyperlapse-photos","title":"Generate a timelapse from DJI hyperlapse photos","text":"

          The DJI Hyperlapse videos are pretty awful, with unnecessary cropping and perspetive warping. You're better off generating your own movie from the still images it took. To do so, cd to the directory with the hyperlapse photos in it and run:

          ffmpeg -framerate 30 -pattern_type glob -i \"HYPERLAPSE*.JPG\" -s:v 4032x3024 -c:v libx264 -crf 17 -pix_fmt yuv420p timelapse.mp4\n

          You may need to adjust the 4032x3024 dimensions if you are not using the Mini 4 Pro.

          "},{"location":"ffmpeg/#crop-a-video","title":"Crop a video","text":"

          Find the dimensions of your video with exiftool -ImageSize \"$FileName\" or ffprobe -v error -show_entries stream=width,height -of default=noprint_wrappers=1 \"$FileName\". Once you have that, you must construct a command like this:

          ffmpeg -i Sunset.mp4 -filter:v \"crop=2884:2160:478:0\" Sunset-cropped.mp4\n

          Where 2884 is the width, 2160 is the height, 478 is how far to shift the crop from the left edge of the frame, and 0 is how far to shift the crop from the top of the frame.

          "},{"location":"figlet/","title":"figlet","text":"

          Figlet prints horizontal text in ascii drawings.

          "},{"location":"figlet/#examples","title":"Examples","text":""},{"location":"figlet/#print-text","title":"Print text","text":"
          $ figlet hello\n  _          _ _\n | |__   ___| | | ___\n | '_ \\ / _ \\ | |/ _ \\\n | | | |  __/ | | (_) |\n |_| |_|\\___|_|_|\\___/\n
          "},{"location":"figlet/#show-available-fonts","title":"Show available fonts","text":"
          $ showfigfonts | head\n3-d :\n  ****             **\n */// *           /**\n/    /*           /**\n   ***  *****  ******\n  /// */////  **///**\n *   /*      /**  /**\n/ ****       //******\n ////         //////\n
          "},{"location":"figlet/#use-a-font","title":"Use a font","text":"
          $ figlet -f 3-d hello\n **               **  **\n/**              /** /**\n/**       *****  /** /**  ******\n/******  **///** /** /** **////**\n/**///**/******* /** /**/**   /**\n/**  /**/**////  /** /**/**   /**\n/**  /**//****** *** ***//******\n//   //  ////// /// ///  //////\n
          "},{"location":"figlet/#see-also","title":"See Also","text":"
          • cowsay
          "},{"location":"finance/","title":"Finance","text":"

          Misc financial information

          "},{"location":"finance/#links","title":"Links","text":"
          • How to place a credit freeze on your credit report: https://www.usa.gov/credit-freeze
          "},{"location":"find/","title":"find","text":"

          The find util letes you search a filesystem for things that match filesystem attributes. Unfortunately this is one of those tools where BSD and GNU deviate syntactically and featurewise, and GNU mostly wins.

          "},{"location":"find/#examples","title":"Examples","text":""},{"location":"find/#find-and-delete-empty-directories-2-levels-deep-or-deeper","title":"Find and delete empty directories 2 levels deep or deeper","text":"

          find \"${PWD}\" -mindepth 2 -type d -empty -delete

          "},{"location":"find/#find-based-on-a-regex","title":"Find based on a regex","text":"

          find /tank/movies -regextype egrep -iregex '.*\\.(mov|mp4)$'

          "},{"location":"find/#find-files-and-perform-operations-on-them","title":"Find files and perform operations on them","text":"

          One at a time:

          find \"${PWD}\" -type d -exec dot_clean {} \\;

          Or several in batches, similar to how xargs handles things:

          find \"${PWD}\" -type d -exec dot_clean {} \\+

          "},{"location":"find/#find-files-that-match-a-glob","title":"Find files that match a glob","text":"

          find \"${PWD}\" -name '????????-??-??-??_[0-9][0-9][0-9]???.dng'

          "},{"location":"find/#alter-permissions-on-some-files-that-are-not-already-set-correctly","title":"Alter permissions on some files that are not already set correctly","text":"

          find . -mindepth 2 -type f ! -perm 444 -exec chmod 444 {} \\+

          "},{"location":"find/#find-files-in-the-current-directory-that-do-not-match-any-of-several-listed-filenames","title":"Find files in the current directory that do not match any of several listed filenames","text":"

          find . -maxdepth 1 -type f ! -iname '.*' ! -name .DS_Store ! -name '*.db'

          "},{"location":"find/#correctly-handle-spaces-when-piping-to-xargs","title":"Correctly handle spaces when piping to xargs","text":"

          find /Applications -mindepth 1 -maxdepth 1 -type d -name '* *' -print0 | xargs -0 -n1 echo

          "},{"location":"find/#find-executable-files","title":"Find executable files","text":"

          This finds all files where an executable bit is set.

          With BSD find:

          find . -type f -perm +111

          With GNU find:

          find . -type f -executable

          "},{"location":"find/#see-also","title":"See also","text":"
          • https://github.com/jhspetersson/fselect: Find files with SQL-like queries
          • https://github.com/junegunn/fzf: fzf is a general-purpose command-line fuzzy finder
          "},{"location":"findmnt/","title":"findmnt","text":"

          \"findmnt will list all mounted filesystems or search for a filesystem. The findmnt command is able to search in /etc/fstab, /etc/fstab.d, /etc/mtab or /proc/self/mountinfo. If device or mountpoint is not given, all filesystems are shown.\" - man findmnt

          "},{"location":"findmnt/#examples","title":"Examples","text":""},{"location":"findmnt/#simple-usage","title":"Simple usage","text":"

          Here is the output of findmnt on an Ubuntu 16.04 Vagrant box:

          TARGET                                SOURCE     FSTYPE     OPTIONS\n/                                     /dev/sda1  ext4       rw,relatime,data=ordered\n\u251c\u2500/sys                                sysfs      sysfs      rw,nosuid,nodev,noexec,relatime\n\u2502 \u251c\u2500/sys/kernel/security              securityfs securityfs rw,nosuid,nodev,noexec,relatime\n\u2502 \u251c\u2500/sys/fs/cgroup                    tmpfs      tmpfs      ro,nosuid,nodev,noexec,mode=755\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/systemd          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/cpu,cpuacct      cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,cpu,cpuacct\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/perf_event       cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,perf_event\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/hugetlb          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,hugetlb\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/blkio            cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,blkio\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/devices          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,devices\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/cpuset           cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,cpuset\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/memory           cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,memory\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/net_cls,net_prio cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,net_cls,net_prio\n\u2502 \u2502 \u251c\u2500/sys/fs/cgroup/freezer          cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,freezer\n\u2502 \u2502 \u2514\u2500/sys/fs/cgroup/pids             cgroup     cgroup     rw,nosuid,nodev,noexec,relatime,pids\n\u2502 \u251c\u2500/sys/fs/pstore                    pstore     pstore     rw,nosuid,nodev,noexec,relatime\n\u2502 \u251c\u2500/sys/kernel/debug                 debugfs    debugfs    rw,relatime\n\u2502 \u2514\u2500/sys/fs/fuse/connections          fusectl    fusectl    rw,relatime\n\u251c\u2500/proc                               proc       proc       rw,nosuid,nodev,noexec,relatime\n\u2502 \u2514\u2500/proc/sys/fs/binfmt_misc          systemd-1  autofs     rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct\n\u251c\u2500/dev                                udev       devtmpfs   rw,nosuid,relatime,size=500888k,nr_inodes=125222,mode=755\n\u2502 \u251c\u2500/dev/pts                          devpts     devpts     rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000\n\u2502 \u251c\u2500/dev/shm                          tmpfs      tmpfs      rw,nosuid,nodev\n\u2502 \u251c\u2500/dev/hugepages                    hugetlbfs  hugetlbfs  rw,relatime\n\u2502 \u2514\u2500/dev/mqueue                       mqueue     mqueue     rw,relatime\n\u251c\u2500/run                                tmpfs      tmpfs      rw,nosuid,noexec,relatime,size=101596k,mode=755\n\u2502 \u251c\u2500/run/lock                         tmpfs      tmpfs      rw,nosuid,nodev,noexec,relatime,size=5120k\n\u2502 \u2514\u2500/run/user/1000                    tmpfs      tmpfs      rw,nosuid,nodev,relatime,size=101596k,mode=700,uid=1000,gid=1000\n\u251c\u2500/var/lib/lxcfs                      lxcfs      fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other\n\u2514\u2500/vagrant                            vagrant    vboxsf     rw,nodev,relatime\n
          "},{"location":"findmnt/#output-as-keyvalue-pairs-per-device","title":"Output as key/value pairs per device","text":"
          $ findmnt -P\nTARGET=\"/sys\" SOURCE=\"sysfs\" FSTYPE=\"sysfs\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/proc\" SOURCE=\"proc\" FSTYPE=\"proc\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/dev\" SOURCE=\"udev\" FSTYPE=\"devtmpfs\" OPTIONS=\"rw,nosuid,relatime,size=500888k,nr_inodes=125222,mode=755\"\nTARGET=\"/dev/pts\" SOURCE=\"devpts\" FSTYPE=\"devpts\" OPTIONS=\"rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000\"\nTARGET=\"/run\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,noexec,relatime,size=101596k,mode=755\"\nTARGET=\"/\" SOURCE=\"/dev/sda1\" FSTYPE=\"ext4\" OPTIONS=\"rw,relatime,data=ordered\"\nTARGET=\"/sys/kernel/security\" SOURCE=\"securityfs\" FSTYPE=\"securityfs\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/dev/shm\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,nodev\"\nTARGET=\"/run/lock\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,size=5120k\"\nTARGET=\"/sys/fs/cgroup\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"ro,nosuid,nodev,noexec,mode=755\"\nTARGET=\"/sys/fs/cgroup/systemd\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd\"\nTARGET=\"/sys/fs/pstore\" SOURCE=\"pstore\" FSTYPE=\"pstore\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime\"\nTARGET=\"/sys/fs/cgroup/net_cls,net_prio\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,net_cls,net_prio\"\nTARGET=\"/sys/fs/cgroup/perf_event\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,perf_event\"\nTARGET=\"/sys/fs/cgroup/cpu,cpuacct\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,cpu,cpuacct\"\nTARGET=\"/sys/fs/cgroup/hugetlb\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,hugetlb\"\nTARGET=\"/sys/fs/cgroup/memory\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,memory\"\nTARGET=\"/sys/fs/cgroup/devices\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,devices\"\nTARGET=\"/sys/fs/cgroup/freezer\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,freezer\"\nTARGET=\"/sys/fs/cgroup/cpuset\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,cpuset\"\nTARGET=\"/sys/fs/cgroup/blkio\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,blkio\"\nTARGET=\"/sys/fs/cgroup/pids\" SOURCE=\"cgroup\" FSTYPE=\"cgroup\" OPTIONS=\"rw,nosuid,nodev,noexec,relatime,pids\"\nTARGET=\"/proc/sys/fs/binfmt_misc\" SOURCE=\"systemd-1\" FSTYPE=\"autofs\" OPTIONS=\"rw,relatime,fd=26,pgrp=1,timeout=0,minproto=5,maxproto=5,direct\"\nTARGET=\"/sys/kernel/debug\" SOURCE=\"debugfs\" FSTYPE=\"debugfs\" OPTIONS=\"rw,relatime\"\nTARGET=\"/dev/hugepages\" SOURCE=\"hugetlbfs\" FSTYPE=\"hugetlbfs\" OPTIONS=\"rw,relatime\"\nTARGET=\"/dev/mqueue\" SOURCE=\"mqueue\" FSTYPE=\"mqueue\" OPTIONS=\"rw,relatime\"\nTARGET=\"/sys/fs/fuse/connections\" SOURCE=\"fusectl\" FSTYPE=\"fusectl\" OPTIONS=\"rw,relatime\"\nTARGET=\"/var/lib/lxcfs\" SOURCE=\"lxcfs\" FSTYPE=\"fuse.lxcfs\" OPTIONS=\"rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other\"\nTARGET=\"/vagrant\" SOURCE=\"vagrant\" FSTYPE=\"vboxsf\" OPTIONS=\"rw,nodev,relatime\"\nTARGET=\"/run/user/1000\" SOURCE=\"tmpfs\" FSTYPE=\"tmpfs\" OPTIONS=\"rw,nosuid,nodev,relatime,size=101596k,mode=700,uid=1000,gid=1000\"\n
          "},{"location":"findmnt/#output-as-json","title":"Output as JSON","text":"
          $ findmnt -J\n{\n   \"filesystems\": [\n      {\"target\": \"/\", \"source\": \"/dev/sda1\", \"fstype\": \"ext4\", \"options\": \"rw,relatime,data=ordered\",\n         \"children\": [\n            {\"target\": \"/sys\", \"source\": \"sysfs\", \"fstype\": \"sysfs\", \"options\": \"rw,nosuid,nodev,noexec,relatime\",\n               \"children\": [\n                  {\"target\": \"/sys/kernel/security\", \"source\": \"securityfs\", \"fstype\": \"securityfs\", \"options\": \"rw,nosuid,nodev,noexec,relatime\"},\n                  {\"target\": \"/sys/fs/cgroup\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"ro,nosuid,nodev,noexec,mode=755\",\n                     \"children\": [\n                        {\"target\": \"/sys/fs/cgroup/systemd\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd\"},\n                        {\"target\": \"/sys/fs/cgroup/net_cls,net_prio\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,net_cls,net_prio\"},\n                        {\"target\": \"/sys/fs/cgroup/perf_event\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,perf_event\"},\n                        {\"target\": \"/sys/fs/cgroup/cpu,cpuacct\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,cpu,cpuacct\"},\n                        {\"target\": \"/sys/fs/cgroup/hugetlb\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,hugetlb\"},\n                        {\"target\": \"/sys/fs/cgroup/memory\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,memory\"},\n                        {\"target\": \"/sys/fs/cgroup/devices\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,devices\"},\n                        {\"target\": \"/sys/fs/cgroup/freezer\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,freezer\"},\n                        {\"target\": \"/sys/fs/cgroup/cpuset\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,cpuset\"},\n                        {\"target\": \"/sys/fs/cgroup/blkio\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,blkio\"},\n                        {\"target\": \"/sys/fs/cgroup/pids\", \"source\": \"cgroup\", \"fstype\": \"cgroup\", \"options\": \"rw,nosuid,nodev,noexec,relatime,pids\"}\n                     ]\n                  },\n                  {\"target\": \"/sys/fs/pstore\", \"source\": \"pstore\", \"fstype\": \"pstore\", \"options\": \"rw,nosuid,nodev,noexec,relatime\"},\n                  {\"target\": \"/sys/kernel/debug\", \"source\": \"debugfs\", \"fstype\": \"debugfs\", \"options\": \"rw,relatime\"},\n                  {\"target\": \"/sys/fs/fuse/connections\", \"source\": \"fusectl\", \"fstype\": \"fusectl\", \"options\": \"rw,relatime\"}\n               ]\n            },\n            {\"target\": \"/proc\", \"source\": \"proc\", \"fstype\": \"proc\", \"options\": \"rw,nosuid,nodev,noexec,relatime\",\n               \"children\": [\n                  {\"target\": \"/proc/sys/fs/binfmt_misc\", \"source\": \"systemd-1\", \"fstype\": \"autofs\", \"options\": \"rw,relatime,fd=26,pgrp=1,timeout=0,minproto=5,maxproto=5,direct\"}\n               ]\n            },\n            {\"target\": \"/dev\", \"source\": \"udev\", \"fstype\": \"devtmpfs\", \"options\": \"rw,nosuid,relatime,size=500888k,nr_inodes=125222,mode=755\",\n               \"children\": [\n                  {\"target\": \"/dev/pts\", \"source\": \"devpts\", \"fstype\": \"devpts\", \"options\": \"rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000\"},\n                  {\"target\": \"/dev/shm\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,nodev\"},\n                  {\"target\": \"/dev/hugepages\", \"source\": \"hugetlbfs\", \"fstype\": \"hugetlbfs\", \"options\": \"rw,relatime\"},\n                  {\"target\": \"/dev/mqueue\", \"source\": \"mqueue\", \"fstype\": \"mqueue\", \"options\": \"rw,relatime\"}\n               ]\n            },\n            {\"target\": \"/run\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,noexec,relatime,size=101596k,mode=755\",\n               \"children\": [\n                  {\"target\": \"/run/lock\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,nodev,noexec,relatime,size=5120k\"},\n                  {\"target\": \"/run/user/1000\", \"source\": \"tmpfs\", \"fstype\": \"tmpfs\", \"options\": \"rw,nosuid,nodev,relatime,size=101596k,mode=700,uid=1000,gid=1000\"}\n               ]\n            },\n            {\"target\": \"/var/lib/lxcfs\", \"source\": \"lxcfs\", \"fstype\": \"fuse.lxcfs\", \"options\": \"rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other\"},\n            {\"target\": \"/vagrant\", \"source\": \"vagrant\", \"fstype\": \"vboxsf\", \"options\": \"rw,nodev,relatime\"},\n         ]\n      }\n   ]\n}\n
          "},{"location":"findmnt/#see-also","title":"See also","text":"
          • lsblk
          "},{"location":"fio/","title":"fio","text":"

          \"fio - flexible I/O tester\" - man fio

          This seems to work well in linux, but not so well in macOS.

          "},{"location":"fio/#examples","title":"Examples","text":""},{"location":"fio/#simple-disk-benchmark","title":"Simple disk benchmark","text":"
          mkdir temp  # somewhere on the disk you want to test\ncd temp\nfio\n  --bs=4k \\\n  --end_fsync=1 \\\n  --iodepth=1 \\\n  --ioengine=posixaio \\\n  --name=random-write \\\n  --numjobs=1 \\\n  --runtime=60 \\\n  --rw=randwrite \\\n  --size=4g \\\n  --time_based\n
          "},{"location":"fio/#see-also","title":"See also","text":"
          • pv - Pipe viewer can give you stats about arbitrary pipeline throughput.
          • How fast are your disks? Find out the open source way, with fio
          "},{"location":"flask/","title":"flask","text":"

          \"Flask is a lightweight WSGI web application framework. It is designed to make getting started quick and easy, with the ability to scale up to complex applications. \" - https://palletsprojects.com/p/flask/

          "},{"location":"flask/#links","title":"Links","text":"
          • https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world
          • https://restfulapi.net - General REST API guidelines
          "},{"location":"flask/#tips","title":"Tips","text":""},{"location":"flask/#show-routes","title":"Show routes","text":"
          flask routes\n
          "},{"location":"flask/#debug-in-a-shell","title":"Debug in a shell","text":"
          export FLASK_APP=app\nexport FLASK_ENV=development\nflask shell\n
          "},{"location":"flipper-zero/","title":"Flipper Zero","text":"

          \"Multi-tool Device for Geeks\" - https://flipperzero.one

          "},{"location":"flipper-zero/#links","title":"Links","text":"
          • https://docs.flipper.net: The official docs, which are awesome.
          • https://instantiator.dev/post/flipper-zero-app-tutorial-01
          "},{"location":"flipper-zero/#see-also","title":"See also","text":"
          • My NFC notes
          • My Nintendo Amiibo notes
          "},{"location":"fluent-bit/","title":"fluent-bit","text":"

          \"Fluent Bit is an open source Log Processor and Forwarder which allows you to collect any data like metrics and logs from different sources, enrich them with filters and send them to multiple destinations. It's the preferred choice for containerized environments like Kubernetes.\" - https://fluentbit.io

          "},{"location":"fluent-bit/#examples","title":"Examples","text":""},{"location":"fluent-bit/#simple-stdout-log-server","title":"Simple stdout log server","text":"

          Useful for debugging.

          fluent-bit -i tcp -p port=4444 -p format=none -f1 -o stdout\n
          "},{"location":"fluentd/","title":"fluentd","text":"

          \"Fluentd is an open source data collector for unified logging layer.\" - https://www.fluentd.org/

          "},{"location":"fluentd/#snips","title":"Snips","text":""},{"location":"fluentd/#parse-the-tail-pos-file-into-decimal-position-inode-and-inspect-the-position","title":"Parse the tail pos file into decimal position, inode, and inspect the position","text":"

          This pos_file's columns are Filename,Position,Inode. In the below examples we don't actually do anything with the inode number, but you could use in in debugfs etc..

          POS_FILES=\"/var/log/fluentd-containers.log.pos\"\nwhile read -r file pos inode ; do\n    echo \"$file $((16#$pos)) $((16#$inode))\"\ndone < \"$POS_FILE\"\n

          This will output something like:

          /var/log/containers/calico-node-0am...cb0.log 2797 5347425\n

          You can feed this through some math to see how far behind fluentd is for each file, and then into dd to see what the data is that fluentd has yet to process:

          while read -r file pos _ ; do\n    if f=$(readlink -f \"$file\") && [ -f \"$f\" ] ; then  # resolve symlinks and check that file exists\n        f_size=\"$(stat -c \"%s\" \"$f\")\"  # get file size in bytes\n        pos_dec=$((16#$pos))  # convert pos from hex to dec\n        if [[ \"$f_size\" -gt \"${pos_dec}\" ]]; then\n            echo \"$file f_size=${f_size}, pos=${pos_dec}, diff=$(( f_size - 16#$pos ))\"\n            dd status=none bs=1 if=\"$f\" skip=\"${pos_dec}\" count=256\n            echo\n        fi\n    fi\ndone < \"$POS_FILE\"\n

          Which will output a bunch of lines like:

          /var/log/containers/network-metering-agent-tsl6s_kube-system_agent-25c3e4bc7bd0ddfdda571d8279b040d0a2f3dac03786a40b19dac11873a6af5a.log f_size=1996377, pos=1995147, diff=1230\n{\"log\":\"W0809 18:03:09.184540       1 reflector.go:289] k8s.io/client-go/informers/factory.go:133: watch of *v1.ConfigMap ended with: too old resource version: 1489908695 (1489955501)\\n\",\"stream\":\"stderr\",\"time\":\"2021-08-09T18:03:09.184786383Z\"}\n{\"log\":\"W0\n
          "},{"location":"fortune/","title":"fortune","text":"

          fortune is a unix command that displays a random fortune on the CLI.

          "},{"location":"fortune/#make-a-fortune-file","title":"Make a fortune file","text":"
          1. Create a file that has each fortune separated by a line containing only a % symbol.
          2. Run strfile fortunes.txt, which will create fortunes.txt.dat
          3. You can then see the fortunes with fortune fortunes.txt. This also works with many files in a single directory: for file in *.txt ; do strfile \"${file}\" ; done ; fortune .
          "},{"location":"fuser/","title":"fuser","text":"

          \"fuser - identify processes using files or sockets\" - man fuser

          This command is not directly related to the FUSE command fusermount.

          The output here is a bit unusual in that it sends PIDs to stdout and everything else to stderr, but interleaves them so what you see in the terminal is much different from what you get via pipes.

          "},{"location":"fuser/#examples","title":"Examples","text":""},{"location":"fuser/#show-what-is-using-varlog","title":"Show what is using /var/log","text":"
          $ fuser -m /var/log\n/var/log:             2858m  4608rce  4609rce  4749rce\n

          See man fuser for the meaning of each letter.

          But what you get via a pipe is just the pids. The first line of output is all of stderr, and beyond that is stdout.

          $ fuser -m /var/log | xargs -n1 echo\n/var/log:           mrcercerce\n2858\n4608\n4609\n4749\n
          "},{"location":"fuser/#kill-all-processes-accessing-a-given-file","title":"Kill all processes accessing a given file","text":"
          FILE=\"/path/to/somefile.log\"\nfuser -km \"$FILE\"\n
          "},{"location":"fuser/#show-processes-accessing-the-local-ssh-port","title":"Show processes accessing the local ssh port","text":"

          This only works for local ports.

          ## By service/protocol (see /etc/services)\nsudo fuser ssh/tcp\n## By arbitrary port/protocol\nsudo fuser 22/tcp\n
          "},{"location":"fuser/#check-if-a-file-is-being-accessed","title":"Check if a file is being accessed","text":"
          FILE=\"/path/to/somefile.vmdk\nfuser \"$FILE\" && echo \"Yes, $FILE is being used.\"\n
          "},{"location":"gaming/","title":"gaming","text":""},{"location":"gaming/#video-game-engines-and-frameworks","title":"Video Game Engines and Frameworks","text":"
          • https://love2d.org (2d lua)
          • https://lovr.org (VR lua)
          • https://godotengine.org
          • https://www.pygame.org (python)
          • https://www.lexaloffle.com/pico-8.php (2d lua fantasy console)
          • https://www.lexaloffle.com/voxatron.php (3d lua fantasy console)
          • https://paladin-t.github.io/bitty
          "},{"location":"genicam/","title":"GenICam","text":"

          \"The Generic Interface for Cameras standard is the base for plug & play handling of cameras and devices.\" - http://www.emva.org/standards-technology/genicam/

          "},{"location":"geodata/","title":"geodata","text":"
          • https://geojson.org
          • https://geojson.io: Online GeoJSON editor
          • https://tools.ietf.org/html/rfc7946
          • https://www.gpsbabel.org
          • https://en.wikipedia.org/wiki/Military_Grid_Reference_System
          • https://en.wikipedia.org/wiki/Open_Location_Code
          • https://en.wikipedia.org/wiki/World_Geodetic_System
          • https://what3words.com
          • https://www.geocaching.com
          • https://macwright.com/lonlat: \"Geospatial software has a fundamental inconsistency: which order we put longitude and latitude in.\"
          • https://platform.leolabs.space/visualization: Live visualization of low earth orbit traffic, including satellites, rockets and debris.
          • https://satellitemap.space: Live visualization of a few different types of satellites like Starlink, GPS, Oneweb
          • https://en.wikipedia.org/wiki/Dilution_of_precision_(navigation)
          • https://healpix.jpl.nasa.gov: \"Hierarchical Equal Area isoLatitude Pixelization of a sphere\"
          • https://hivekit.io/blog/the-headaches-of-distributed-spatial-indices: \"Hexagons and Hilbert Curves - The Headaches of Distributed Spatial Indices\"
          • https://app.regrid.com/us: Property lines
          • https://shademap.app: Visualize shadows from trees and landscape for any given time and place
          "},{"location":"geodata/#hexagonal-geodata","title":"Hexagonal geodata","text":"
          • https://www.redblobgames.com/grids/hexagons/: Lots of interactive learning fun with hexagons. Really great resource for playing with hexagon theory.
          • https://www.uber.com/blog/h3/: Uber\u2019s Hexagonal Hierarchical Spatial Index
          • https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-statistics/h-whyhexagons.htm: short explanation of the benefits of a hexagonal geo datum
          • https://www.gamedev.net/articles/programming/general-and-gameplay-programming/coordinates-in-hexagon-based-tile-maps-r1800/: interesting theory for addressing hexagonal space in games
          • https://james.darpinian.com/satellites \"See a satellite tonight. No telescope required\"
          "},{"location":"gige-vision/","title":"GigE Vision","text":"

          \"GigE Vision is an interface standard introduced in 2006 for high-performance industrial cameras. It provides a framework for transmitting high-speed video and related control data over Ethernet networks. The distribution of software or development, manufacture or sale of hardware that implement the standard, require the payment of annual licensing fees.\" - https://en.wikipedia.org/wiki/GigE_Vision

          "},{"location":"gige-vision/#technology","title":"Technology","text":"
          • GigE Vision Control Protocol (GVCP)
          • GigE Vision Stream Protocol (GVSP)
          • GigE Device Discovery Mechanism
          "},{"location":"gige-vision/#see-also","title":"See also","text":"
          • http://www.emva.org/standards-technology/genicam/
          "},{"location":"git/","title":"Git","text":"

          baddass version control

          "},{"location":"git/#links","title":"Links","text":"
          • https://git-scm.com/book/
          • https://docs.github.com/en/get-started/using-github/github-flow
          • https://github.com/metacloud/gilt: gilt - A GIT layering tool
          • https://github.com/git/git/tree/master/Documentation/RelNotes: Git release notes
          • https://guides.github.com/introduction/flow/index.html: Understanding the GitHub flow
          • http://nvie.com/posts/a-successful-git-branching-model: A successful Git branching model
          • https://chris.beams.io/posts/git-commit: How to Write a Git Commit Message
          • https://www.conventionalcommits.org: Conventional Commits: A specification for adding human and machine readable meaning to commit messages
          • https://github.com/googleapis/release-please: Release Please automates CHANGELOG generation, the creation of GitHub releases, and version bumps for your projects.
          • https://diziet.dreamwidth.org/14666.html: Never use git submodules
          • https://forgejo.org: git hosting software with organization and user management in a web UI
          • https://github.blog/2023-10-16-measuring-git-performance-with-opentelemetry
          • https://glasskube.dev/guides/git/: \"The guide to Git I never had.\"
          • https://pre-commit.com: I use this in nearly every git repo I create, and I suggest everybody else do the same.
          • https://stefaniemolin.com/tags/pre-commit%20hooks/: Stefanie Molin's pre-commit articles
          • https://www.golinuxcloud.com/git-head-caret-vs-tilde-at-sign-examples: \"Understanding git HEAD~ vs HEAD^ vs HEAD@{}\"
          • https://blog.izissise.net/posts/gitconfig
          "},{"location":"git/#examples","title":"Examples","text":""},{"location":"git/#git-init","title":"git init","text":""},{"location":"git/#create-a-git-repository-for-the-cwd","title":"Create a git repository for the CWD","text":"
          git init\necho \"\" >> .gitignore\necho \"# Ignore other unneeded files.\n*.swp\n*~\n.DS_Store\" >> .gitignore\n
          "},{"location":"git/#git-clone","title":"git clone","text":""},{"location":"git/#clone-a-local-repo","title":"Clone a local repo","text":"
          git clone /path/to/repo\n
          "},{"location":"git/#clone-a-remote-git-repo-via-ssh","title":"Clone a remote git repo via ssh","text":"
          git clone user@ssh_server:/opt/git/project\n
          "},{"location":"git/#clone-and-specify-a-key-when-using-ssh-agent","title":"Clone and specify a key when using ssh-agent","text":"

          When using ssh-agent, if you have several keys loaded this can present problems. One of those problems is if you have multiple keys loaded that have different authorizations on a remote git server like Github. For instance, say you have ~/.ssh/id_ed25519_home as your personal private key and ~/.ssh/id_ed225519_work as your private work key. If you try to clone a work repo and git tries to authenticate with your home identity first, it will be unauthorized and the clone will fail, even though you have a second identity loaded that could have succeeded. To work around this, do something like:

          export GIT_SSH_COMMAND=\"ssh -o IdentitiesOnly=yes -i $HOME/.ssh/id_ed225519_work\"\ngit clone git@github.com:your-work-gh-org/super-secret-repo.git\n

          This works fine for one key having authorization to all private repos. This becomes difficult when you have multiple repos, each with a key that only has authorization for that single repo, such as using deploy keys tied to a single repo. If you try that when doing something like yarn install, which is trying to clone multiple private repos, it will fail 100% of the time. In that case, you can follow the next example. You may have to read it twice, because setting up the configuration makes more sense if you do ssh then git, but logically it makes more sense if you go from git into ssh, which is how the process actually flows.

          Construct an ssh_config file that configures a unique Host for each private repo you need to clone and private key file that has access to it:

          Host secret_repo_1\n   Hostname github.com\n   IdentityFile ~/.ssh/private_key_for_repo_1\n\nHost secret_repo_2\n   Hostname github.com\n   IdentityFile ~/.ssh/private_key_for_repo_2\n\nIdentitiesOnly yes\n

          The ssh_configs above allow ssh access to what is essentially a host alias where the key that is authorized for the repo is used when ssh tries to connect to the correlated Host entry. In the next step, we do the same thing in reverse by crafting a gitconfig file with one stanza for each of the ssh Host entries from your ssh_config, pointing it back to github:

          [url \"git@secret_repo_1:your-work-gh-org/super-secret-repo-1.git\"]\n    insteadOf = git@github.com:your-work-gh-org/super-secret-repo-1.git\n\n[url \"git@secret_repo_2:your-work-gh-org/super-secret-repo-2.git\"]\n    insteadOf = git@github.com:your-work-gh-org/super-secret-repo-2.git\n

          We then export two variables in the shell:

          export GIT_SSH_COMMAND=\"ssh -F $PWD/your_crafted_ssh_config_file\"\nexport GIT_CONFIG_GLOBAL=\"$PWD/your_crafted_gitconfig_file\"\n

          What happens next is when you execute git clone git@github.com:your-work-gh-org/super-secret-repo-1.git, which is the originally source git URL, your git_config alters the URL to be git@secret_repo_1:your-work-gh-org/super-secret-repo-1.git. The server name is passed into ssh, which uses your custom ssh_config file to connect to github.com using the options for that entry that are specified in the git_config, which includes using the identity file that is unique to that git repository. The same series of steps happens for secret_repo_2. The result is that each of these git repositories can be cloned using their original github URL, but these custom ssh configs are used in the process, which allows the right authentication mechanisms to be used for each individual git repository. This all happens without us having to alter the source code of the repo we are building, EG: without modifying package.json which is used by yarn. Using these techniques, we can set up CI to build software from private repositories using deploy keys where otherwise we would be unable to due to ssh authentication errors that might work fine for somebody who has a single ssh key that is authorized to clone all of the repositories.

          "},{"location":"git/#git-filesystem-operations","title":"git filesystem operations","text":""},{"location":"git/#add-everything-in-the-cwd-to-the-git-repo","title":"Add everything in the CWD to the git repo","text":"
          git add .\n
          "},{"location":"git/#rename-a-file-in-the-git-repo","title":"Rename a file in the git repo","text":"

          This also renames the filesystem file.

          git mv README.rdoc README.md\n
          "},{"location":"git/#delete-a-file-from-the-repo","title":"Delete a file from the repo","text":"
          git rm filename\n
          "},{"location":"git/#git-status","title":"git status","text":""},{"location":"git/#check-the-status-of-git","title":"Check the status of git","text":"
          git status\n
          "},{"location":"git/#git-commit","title":"git commit","text":""},{"location":"git/#commit-the-current-changes","title":"Commit the current changes","text":"
          git commit -m \"Initial commit\"\n
          "},{"location":"git/#commit-all-changes-with-commit-a","title":"Commit all changes with commit -a","text":"
          git commit -a -m \"Improve the README file\"\n
          "},{"location":"git/#skip-git-commit-hooks","title":"Skip git commit hooks","text":"
          git commit --no-verify\n
          "},{"location":"git/#git-tag","title":"git tag","text":"

          https://git-scm.com/book/en/v2/Git-Basics-Tagging

          Git supports two types of tags: lightweight and annotated.

          "},{"location":"git/#create-an-annotated-tag","title":"Create an annotated tag","text":"

          Annotated tags, are stored as full objects in the Git database.

          git tag -m \"Improve X and Y.\" v0.5.3\n
          "},{"location":"git/#create-a-light-tag","title":"Create a light tag","text":"

          This is basically DNS A records for git SHAs. The SHA is referenced by the tag, no other info is stored. Using them is generally frowned upon because tags tend to be used where context is important, so the annotations that go with an annotated tag are more suitable.

          git tag v0.5.3\n
          "},{"location":"git/#delete-a-local-tag","title":"Delete a local tag","text":"
          git tag -d v0.5.3\n
          "},{"location":"git/#delete-a-remote-tag","title":"Delete a remote tag","text":"
          git push --delete origin v0.5.3\n
          "},{"location":"git/#show-what-tags-contain-a-given-sha","title":"Show what tags contain a given sha","text":"
          git tag --contains abc123\n
          "},{"location":"git/#git-config","title":"git config","text":"

          git config interacts with configs. There are three scopes: --local, --global, --system.

          • Local = per-repo settings. IE: stored in .git/config directory for the repo
          • Global = per-user settings. IE: stored in ~/.gitconfig
          • System = per-system settings, found in /etc/ or wherever git is looking for system settings.
          "},{"location":"git/#always-use-ssh-for-githubcom","title":"Always use ssh for github.com","text":"
          git config --global url.\"git@github.com:\".insteadOf \"https://github.com/\"\n
          "},{"location":"git/#git-client-setup","title":"Git client setup","text":"

          This creates and modifies ~/.gitconfig with some parameters:

          git config --global user.name \"Daniel Hoherd\"\ngit config --global user.email daniel.hoherd@gmail.com\ngit config --global alias.co checkout\ngit config --global core.editor \"vim\"\ngit config --global merge.tool vimdiff\ngit config --global log.date iso\n
          "},{"location":"git/#edit-a-gitconfig-file-with-some-params","title":"Edit a .git/config file with some params","text":"
          git config --replace-all svn-remote.svn.url https://svn.example.com/ops/\ngit config --replace-all svn-remote.svn.fetch ops:refs/remotes/trunk\ngit config --add svn-remote.svn.preserve-empty-dirs true\ngit config --unset svn-remote.svn.branches\ngit config --unset svn-remote.svn.tags\ngit config --add svn.authorsfile /srv-cluster/git-svn/git/author.txt\n
          "},{"location":"git/#show-your-configs-in-a-dotted-one-one-per-option-format","title":"Show your configs in a dotted one-one-per-option format","text":"
          git config --list\n

          Also include the file that each configuration setting is defined in:

          git config --list --show-origin\n
          "},{"location":"git/#git-diff","title":"git diff","text":"

          Show differences between objects and stuff.

          "},{"location":"git/#diff-between-staged-and-committed","title":"diff between staged and committed","text":"

          This is useful when you're adding files that were not previously in the repo alongside changes to existing files, since a bare git diff before adding the files will only show changes to files that were already in the repo.

          git diff --staged\n
          "},{"location":"git/#diff-that-shows-per-word-colored-differences","title":"diff that shows per-word colored differences","text":"
          git diff --color-words\n
          "},{"location":"git/#machine-readable-word-diff","title":"Machine readable word diff","text":"
          git diff --word-diff\n
          "},{"location":"git/#diff-and-ignore-whitespace","title":"Diff and ignore whitespace","text":"

          This does not ignore line ending changes or blank line insertion and removals.

          git diff -w\n
          "},{"location":"git/#show-diffs-between-master-and-a-given-date","title":"Show diffs between master and a given date","text":"
          git diff $(git rev-list -n1 --before=\"1 month ago\" master)\n
          "},{"location":"git/#show-what-has-changed-since-a-point-in-time","title":"Show what has changed since a point in time","text":"
          git whatchanged --since=\"18 hours ago\" -p\n

          or...

          git whatchanged --since=\"18 hours ago\" --until=\"6 hours ago\" -p\n
          "},{"location":"git/#git-blame","title":"git blame","text":"

          git blame shows information about the commit associated with each line of a file.

          "},{"location":"git/#simple-usage","title":"Simple usage","text":"
          git blame <filename>\n
          "},{"location":"git/#show-non-whitespace-changes-in-blame","title":"Show non-whitespace changes in blame","text":"

          When somebody has reformatted code but didn't make any code changes, this will show the prior commits where something more than whitespace changed.

          git blame -w <filename>\n
          "},{"location":"git/#git-log","title":"git log","text":"

          Shows commit history.

          "},{"location":"git/#view-the-commit-history","title":"View the commit history","text":"
          git log\n
          "},{"location":"git/#show-one-log-entry","title":"Show one log entry","text":"
          git log -1\n
          "},{"location":"git/#show-git-commits-that-contain-a-given-string","title":"Show git commits that contain a given string","text":"

          This searches the content of the diff, not the commit message.

          git log -S search_string\n
          "},{"location":"git/#show-commit-messages-that-match-a-given-regex","title":"Show commit messages that match a given regex","text":"
          git log --grep='[Ww]hitespace'\n
          "},{"location":"git/#show-logs-for-a-given-dir-in-the-last-3-days","title":"Show logs for a given dir in the last 3 days","text":"
          git log --since=3.days modules/profile_sensu\n
          "},{"location":"git/#show-raw-log-history-for-5-most-recent-commits","title":"Show raw log history for 5 most recent commits","text":"

          Useful for seeing TZ settings.

          git log --format=raw -5\n
          "},{"location":"git/#really-pretty-logs","title":"Really pretty logs","text":"
          log --graph --oneline --decorate --all\n
          "},{"location":"git/#git-shortlog","title":"git shortlog","text":""},{"location":"git/#show-number-of-commits-by-user-including-e-mail","title":"Show number of commits by user, including e-mail","text":"

          Using the -e flag includes e-mail address. The list is unique per entry, so if you use a different name along with the same e-mail address, that shows up as two entries in the list.

          git shortlog -ens\n

          Keep in mind this is commits, not lines within the current codebase. If the repo is old, this information may not be useful for finding people who are in-the-know about the current contents of the repo. This is useful for preparing a user list for a git filter-repo operation.

          "},{"location":"git/#git-show","title":"git show","text":""},{"location":"git/#show-the-changes-from-a-specific-sha","title":"Show the changes from a specific SHA","text":"
          git show f73f9ec7c07e\n
          "},{"location":"git/#show-a-complete-file-as-of-a-given-sha","title":"Show a complete file as of a given SHA","text":"

          This is an absolute path from the git root, not relative to CWD. This command will show the whole file as of the given SHA.

          git show f73f9ec7c07e:dir/filename.yaml\n
          "},{"location":"git/#git-branches","title":"git branches","text":"

          Branches are an integral part of git. They allow you to work on distinct changes without mixing them all up together.

          "},{"location":"git/#create-a-branch","title":"Create a branch","text":"
          git checkout -b readme-fix\n
          "},{"location":"git/#check-which-branch-youre-in","title":"Check which branch you're in","text":"
          git branch\n
          "},{"location":"git/#rename-move-a-branch","title":"Rename (move) a branch","text":"
          git branch -m oldname newname\n
          "},{"location":"git/#show-what-branches-contain-a-given-sha","title":"Show what branches contain a given sha","text":"
          git branch --contains abc123\n
          "},{"location":"git/#git-merge","title":"git merge","text":"

          This lets you merge two branches.

          "},{"location":"git/#merge-branch-with-master","title":"Merge branch with master","text":"
          git checkout master\ngit merge readme-fix-branch\ngit branch -d readme-fix-branch\n
          "},{"location":"git/#disable-fast-forward-merges","title":"disable fast-forward merges","text":"

          You can control how the history is kept when merging. By default, fast-forward merges occur, which replays the commits on the branch that is being merged into. By disabling this you can see several commits being merged from one branch into another, making it easier to roll back that whole series of commits without digging through the history to see where each commit from the branch came from.

          git config --global merge.ff false\n
          "},{"location":"git/#git-filter-repo","title":"git filter-repo","text":"

          git filter-repo is not a standard tool with git, but can be installed separately. It is used for rewriting the history of the git repo, allowing move, rename, merging and trimming operations, rewriting commit IDs, and more. This is a destructive tool, so it should be performed on a fresh clone of the repo while you iterate on finding the right sequence of operations to perform to get to your desired state. The destructive nature is that it rewrites the entire history of the repo, so if your repo depends on specific SHAs or any other specific history, you probably need to take a harder look at how to solve your problem.

          "},{"location":"git/#extract-one-subdir-into-its-own-repo-renaming-some-files","title":"Extract one subdir into its own repo, renaming some files","text":"

          This is great if you want to extract part of a repo for public release, or just for organizational purposes.

          Extract the path scratch-work/scripts into bin, removing all other history in the repo.

          git-filter-repo --path scratch-work/scripts --path-rename scratch-work/scripts:bin\n
          "},{"location":"git/#remotes","title":"remotes","text":""},{"location":"git/#add-a-remote","title":"Add a remote","text":"
          git remote add upstream https://github.com/danielhoherd/homepass\n
          "},{"location":"git/#push-to-a-specific-remote","title":"Push to a specific remote","text":"
          # push to the master branch of the remote named upstream\ngit push upstream master\n
          "},{"location":"git/#alter-the-source-of-origin","title":"Alter the source of origin","text":"

          If you move your repo to another location, use this command to change the upstream URL:

          git remote set-url origin https://user@newhost/newpath/reponame\n
          "},{"location":"git/#git-reset","title":"git reset","text":"

          git reset allows you to reset your state to what it was at a previous point.

          "},{"location":"git/#reset-to-a-prior-state-based-on-what-has-been-done-locally","title":"Reset to a prior state based on what has been done locally","text":"

          The reflog is a log of what steps have been performed locally. You can view the reflog, then reset to a prior state.

          git reflog # show all HEAD changes\ngit reset --hard 45e0ae5 # reset all git tracked state to 45e0ae5\n

          Alternately, you can use a date:

          git reflog --date=iso # absolute date based reflog references\ngit reset \"HEAD@{2015-03-25 14:45:30 -0700}\" --hard\n
          "},{"location":"git/#reset-feature-branch-to-state-when-it-was-branched-from-master","title":"Reset feature branch to state when it was branched from master","text":"

          Do this if you want to start your branch over with only the current changes. This is useful if you've been iterating through lots of bad changes that were committed and want to clean them all out. It basically lets you squash to a single commit on your branch.

          git reset $(git merge-base master $(git rev-parse --abbrev-ref HEAD))\n
          "},{"location":"git/#hard-reset-of-local-changes","title":"Hard reset of local changes","text":"

          This will abandon all local changes and resolve merge conflicts

          git fetch origin\ngit reset --hard origin/master\n
          "},{"location":"git/#git-clean","title":"git clean","text":""},{"location":"git/#remove-all-untracked-files-and-directories","title":"Remove all untracked files and directories","text":"

          This is useful after your reset to a prior state. It deletes all files and directories that show up in the untracked section of git status

          git clean -ffdx\n
          "},{"location":"git/#miscellaneous-tricks","title":"Miscellaneous tricks","text":""},{"location":"git/#refresh-all-git-repos-in-a-path","title":"Refresh all Git repos in a path","text":"
          find /var/www/html/mediawiki/ -name .git | while read -r X ; do\n  pushd \"$(dirname \"${X}\")\" && \\\n  [ $(git remote -v | wc -l) -gt 0 ] && \\\n  git pull && \\\n  popd ;\ndone ;\n
          "},{"location":"git/#show-a-numbered-list-of-remote-branches-sorted-by-last-commit-date","title":"Show a numbered list of remote branches sorted by last commit date","text":"
          git branch -r | grep -v HEAD | xargs -r -n1 git log -1 \\\n--pretty=format:'%ad %h%d %an | %s %n' --date=iso -1 | sort | nl -ba\n
          "},{"location":"git/#branch-cleanup","title":"Branch cleanup","text":"
          git gc --prune=now\ngit remote prune origin\n
          "},{"location":"git/#git-grep","title":"git grep","text":""},{"location":"git/#find-a-string-in-all-branches","title":"Find a string in all branches","text":"

          This finds the word \"hunter2\" in the tests directory of all branches.

          git grep '\\bhunter2\\b' $(git branch -a --format='%(refname:short)') tests/\n
          "},{"location":"git/#exclude-certain-directories-from-git-grep-results","title":"Exclude certain directories from git grep results","text":"

          You can accomplish this using pathspec syntax.

          git grep searchstring -- ':!excluded-dir' ':!*junk-glob*'\n

          See the pathspec definition in the git glossary for more info.

          "},{"location":"github/","title":"Github","text":"

          \"GitHub is a development platform inspired by the way you work. From open source to business, you can host and review code, manage projects, and build software alongside 50 million developers.\" - https://github.com

          \"GitHub, Inc. is a provider of Internet hosting for software development and version control using Git. It offers the distributed version control and source code management (SCM) functionality of Git, plus its own features. It provides access control and several collaboration features such as bug tracking, feature requests, task management, continuous integration and wikis for every project. Headquartered in California, it has been a subsidiary of Microsoft since 2018.\" - https://en.wikipedia.org/wiki/GitHub

          "},{"location":"github/#tips","title":"Tips","text":""},{"location":"github/#get-all-public-keys-for-a-user","title":"Get all public keys for a user","text":"

          Append .keys to the user profile URL, so https://github.com/danielhoherd becomes https://github.com/danielhoherd.keys. This is useful for adding to ~/.ssh/authorized_keys

          "},{"location":"github/#get-a-downloadable-patch-for-a-git-commit","title":"Get a downloadable patch for a git commit","text":"

          Append .patch to a commit URL, so https://github.com/apache/airflow/commit/86e2ab53aff becomes https://github.com/apache/airflow/commit/86e2ab53aff.patch

          "},{"location":"github/#get-a-list-of-repositories-for-a-user","title":"Get a list of repositories for a user","text":"

          This gives a list of repositories sorted by the last time they were pushed to.

          curl -s https://api.github.com/users/danielhoherd/repos?per_page=200 |\njq -r '.[] | \"\\(.pushed_at) \\(.html_url)\"' |\nsort -d |\nnl\n

          https://developer.github.com/v3/repos/

          "},{"location":"github/#add-a-collapsible-section-in-markdown","title":"Add a collapsible section in markdown","text":"

          Markdown supports adding HTML elements. One element that can be useful for hiding large chunks of data that are related to a comment but might drown it out is the <details> element. This works in markdown documents, and PR and issue descriptions and comments.

          <details>\n<summary>Clickable thing that unfolds the details</summary>\nwhatever markdown you want\n\n1. list item 1\n2. list item 2\n\n```py\nimport platform\nplatform.uname()\n```\n\n</details>\n
          "},{"location":"github/#show-your-api-rate-limits","title":"Show your API rate limits","text":"
          gh api rate_limit\n
          "},{"location":"github/#links","title":"Links","text":"
          • CLI interface: https://github.com/cli/cli
          • Python API: https://github.com/PyGithub/PyGithub
          • All issues in all repositories assigned to the logged in user
          • All issues in all repositories assigned to the logged in user that are not in a project
          "},{"location":"github/#get-a-json-file-of-the-last-2000-issues-in-a-repository","title":"Get a json file of the last 2000 issues in a repository","text":"

          Using the gh CLI:

          gh issue list --state all --limit 2000 --json author,createdAt,title > issues.json\n
          "},{"location":"gitlab/","title":"Gitlab","text":"

          \"A single application for the complete DevOps lifecycle\" - https://about.gitlab.com

          "},{"location":"gitlab/#examples","title":"Examples","text":""},{"location":"gitlab/#gitlab-cli","title":"Gitlab CLI","text":"

          First pip3 install --user python-gitlab, then add a ~/.python-gitlab.cfg file like:

          [global]\ndefault = default\nssl_verify = true\ntimeout = 5\n\n[default]\nurl = https://www.gitlab.com/\nprivate_token = <some_value>\n

          Where private_token is generated in https://gitlab.com/profile/personal_access_tokens, after which, you can do things like:

          $ gitlab -o yaml -f name_with_namespace,web_url project list --owned=1 --per-page=1\n- name_with_namespace: org-name / sub-group / project-name\n  web_url: https://gitlab.com/orgname/subgroup/projectname\n
          "},{"location":"gitlab/#run-ci-steps-locally","title":"Run CI steps locally","text":"

          Using Gitlab Runner you can run stages of your CI pipeline locally. EG: if you have docker installed, you can run the following command to run the build step of your pipeline:

          gitlab-runner exec docker build\n

          The gitlab-runner command has good inline options.

          "},{"location":"gitlab/#skip-ci-via-git-push-option","title":"Skip CI via git push option","text":"
          git push -o ci.skip\n
          "},{"location":"gitlab/#skip-ci-via-commit-message","title":"Skip CI via commit message","text":"

          https://docs.gitlab.com/ce/ci/yaml/#skipping-jobs

          ##!/usr/bin/env bash\n## Skip CI if all changed files are inside dir \"foo/\"\n\nset -x\nregex='^foo\\/'\n\nfiles=( $(git diff --cached --name-only --diff-filter=ACM ) )\nfor X in \"${files[@]}\" ; do\n  # If any file is not inside dir foo exit 0\n  if [[ ! \"$X\" =~ $regex ]] ; then\n    exit 0\n  fi\ndone\n\n## If we've made it here, all changed files are inside dir foo/\n## so we append '[no ci]' to commit message to skip CI in Gitlab\necho \"[no ci]\" >> \"$1\"\n
          "},{"location":"gitlab/#run-privileged-mode-gitlab-runners-in-gke","title":"Run privileged mode gitlab-runners in GKE","text":"

          https://docs.gitlab.com/runner/install/kubernetes.html#installing-gitlab-runner-using-the-helm-chart

          First init the gitlab chart repo:

          helm repo add gitlab https://charts.gitlab.io\n

          Privileged mode is needed to run docker commands, which is useful for building containers, running ansible molecule, etc..

          The runners.tags includes ${ORG_NAME} which is great for making sure jobs run on your own runners instead of publicly shared runners. This is important because DOCKER_HOST is different in Kubernetes than it is on public dind runners.

          export REGISTRATION_TOKEN=\"foobar\"\nexport ORG_NAME=\"acme\"\nhelm \\\n  install gitlab/gitlab-runner \\\n  --name \"gitlab-runner-${ORG_NAME}-$(date +%s)\" \\\n  --set \"concurrent=20\" \\\n  --set \"gitlabUrl=https://gitlab.com/\" \\\n  --set \"runnerRegistrationToken=${REGISTRATION_TOKEN}\" \\\n  --set \"runners.builds.cpuRequests=1\" \\\n  --set \"runners.env.DOCKER_HOST=tcp://localhost:2375/\" \\\n  --set \"runners.env.DOCKER_TLS_CERTDIR=\" \\\n  --set \"runners.imagePullPolicy=always\" \\\n  --set \"runners.privileged=true\" \\\n  --set \"runners.request_concurrency=10\" \\\n  --set \"runners.tags=${ORG_NAME}\\,dind\\,gke\"\n

          the runners.privileged=true is the magic that is needed to enable docker commands in your .gitlab-ci.yml files to succeed. this --set flag creates the pod environment:

          spec:\n  containers:\n    env:\n    - name: KUBERNETES_PRIVILEGED\n      value: \"true\"\n

          runners.env.DOCKER_TLS_CERTDIR= is required to fix the changes with Docker 19.03 outlined in https://about.gitlab.com/2019/07/31/docker-in-docker-with-docker-19-dot-03/ and https://gitlab.com/gitlab-org/gitlab-ce/issues/64959

          See more variables that you can set by running helm inspect gitlab/gitlab-runner

          "},{"location":"gitlab/#use-helm-from-within-the-tiller-pod","title":"Use helm from within the tiller pod","text":"

          In Gitlab managed k8s clusters there are some TLS hurdles to jump over to get access to Helm:

          kubectl exec -ti -n gitlab-managed-apps $(kubectl get pods -n gitlab-managed-apps -l app=helm,name=tiller -o name) sh\nexport HELM_HOST=:44134\nexport HELM_TLS_CA_CERT=/etc/certs/ca.crt\nexport HELM_TLS_CERT=/etc/certs/tls.crt\nexport HELM_TLS_KEY=/etc/certs/tls.key\nexport HELM_TLS_ENABLE=true\n/helm list\n
          "},{"location":"gitlab/#show-kubernetes-gitlab-runner-pods-their-age-their-job-url-and-who-started-the-job","title":"Show kubernetes gitlab runner pods, their age, their job URL, and who started the job","text":"
          kubectl get pods -o custom-columns='NAME:.metadata.name,START_TIME:.status.startTime,CI_JOB_URL:.spec.containers[0].env[?(@.name == \"CI_JOB_URL\")].value,GITLAB_USER_EMAIL:.spec.containers[0].env[?(@.name == \"GITLAB_USER_EMAIL\")].value' | grep -E 'NAME|jobs'\n

          The output of the above command looks like

          NAME                                                 START_TIME             CI_JOB_URL                                    GITLAB_USER_EMAIL\nrunner-ppzmy1zx-project-11144552-concurrent-0q2pmk   2019-10-23T17:00:56Z   https://gitlab.com/foo/bar/-/jobs/330824976   user2@example.com\nrunner-ppzmy1zx-project-11144552-concurrent-1f7nfx   2019-10-23T17:04:27Z   https://gitlab.com/foo/bar/-/jobs/330827586   user1@example.com\nrunner-ppzmy1zx-project-11144552-concurrent-2n84rv   2019-10-23T17:04:19Z   https://gitlab.com/foo/bar/-/jobs/330827587   user1@example.com\n
          "},{"location":"gitlab/#find-k8s-gitlab-runner-pods-that-are-over-1h-old","title":"Find k8s gitlab-runner pods that are over 1h old","text":"
          kubectl get pods --no-headers=true -o custom-columns=\"NAME:.metadata.name,START_TIME:.status.startTime\" |\ngrep '^runner-' |\nwhile read -r pod starttime ; do\n  if [ \"$(( $(date +%s) - $(gdate -d \"$starttime\" \"+%s\") ))\" -gt 3600 ] ; then\n    echo \"$pod\"\n  fi\ndone\n
          "},{"location":"gitlab/#host-a-private-gitlab-pages-site","title":"Host a private gitlab pages site","text":"

          This does not appear to be a documented feature, but it is quite useful. You can host a website with a static address that tracks any given branch. Normal gitlab pages export a public facing website, but this is essentially a private gitlab pages site.

          ## .gitlab-ci.yml\ndocs:\n  stage: test\n  script:\n    - mkdocs build\n\n  artifacts:\n    paths:\n      - site\n

          Then hit https://gitlab.com/${ORG}/${GROUP}/${PROJECT}/-/jobs/artifacts/master/file/site/index.html?job=docs in your browser. You will be able to browse the built website, but only if you have access to the repository.

          "},{"location":"gitlab/#pros-and-cons","title":"Pros and cons","text":""},{"location":"gitlab/#pros","title":"Pros","text":"
          • You can create a new repo by locally initializing a git repo, setting a remote_url to where you want your project to be, and pushing your code. The server gives you a notification that the project has been created and gives you a URL to it.
          • Built in docker registry for every project
          • Built in CI with on-prem runners
          "},{"location":"gitlab/#cons","title":"Cons","text":"
          • Push-button GKE is configured at the project level, not the group level, so setting up k8s runners is more involved than it could be.
          • User permissions do not have a distinct group entity, they are managed by creating a project sub-group which functions as both a place to put code and a permission level. This shows up in a variety of places, and I suspect is the reason we cannot export groups over SAML.
          • There is no command line tool equivalent to the hub command that github has, which makes it easier to script pull requests etc..
          • Terraform provider for Gitlab is pretty limited compared to Github.
          "},{"location":"gitlab/#links","title":"Links","text":"
          • https://docs.gitlab.com/ce/administration/
          • https://docs.gitlab.com/ce/ci/
          • https://docs.gitlab.com/ce/workflow/gitlab_flow.html - Good git branching and review strategy for teams.
          • https://docs.gitlab.com/ee/ci/docker/using_docker_build.html
          • https://docs.gitlab.com/ee/ci/multi_project_pipelines.html
          • https://docs.gitlab.com/ee/ci/variables/
          • https://medium.com/devopslinks/gitlab-pipeline-to-run-cross-multiple-projects-3563af5d6dca
          • https://gitlab.com/help/user/project/clusters/index.md Lots of info about configuring k8s clusters. (This document does not go into enough detail about using helm with TLS secured Gitlab apps.)
          "},{"location":"gitolite/","title":"gitolite","text":"

          \"Gitolite allows you to setup git hosting on a central server, with fine-grained access control and many more powerful features.\" - http://gitolite.com

          "},{"location":"gitolite/#examples","title":"Examples","text":""},{"location":"gitolite/#get-info-about-available-repositories","title":"Get info about available repositories","text":"
          ssh git@gitserver info\n
          "},{"location":"gnu-screen/","title":"GNU Screen","text":"

          GNU screen is a terminal multiplexer, which is a CLI tool that enables virtual terminals which you can attach and detach from, allowing you to leave commands running on the server when you log out. You can log back in and reattach to the screen session and it appears that you are right back at the original terminal.

          See also tmux, which is a more modern replacement. If you haven't used tmux or screen before, use tmux unless you need a feature that screen has but tmux does not.

          "},{"location":"gnu-screen/#examples","title":"Examples","text":"

          These all assume that your config has ctrl-a set up as the the command character, which is the default.

          "},{"location":"gnu-screen/#rename-a-screen-tab","title":"Rename a screen tab","text":"
          ctrl-a shift-a\n
          "},{"location":"gnu-screen/#show-a-list-of-sessions","title":"Show a list of sessions","text":"
          ctrl-a \"\n
          "},{"location":"gnu-screen/#detach-from-a-screen-session","title":"Detach from a screen session","text":"
          ctrl-a d\n
          "},{"location":"gnu-screen/#re-attach-to-a-specific-screen-session","title":"re-attach to a specific screen session","text":"
          screen -x \"$screen_session_id\"\n
          "},{"location":"gnu-screen/#sharing-your-screen","title":"Sharing your screen","text":"

          In order to share your screen the /usr/bin/screen binary needs to be suid, which is a significant security risk.

          ctrl-a :multiuser on\nctrl-a :acladd [username]\n
          "},{"location":"gnu-screen/#open-three-ipmi-consoles","title":"Open three IPMI consoles","text":"

          This snip opens several new tabs, each with a name and a start command including the name.

          for host in app{213..215}prod ; do\n    screen -t \"${host}\" consoleto \"${host}\"\ndone\n
          "},{"location":"gnu-screen/#open-a-series-of-new-tabs-and-run-ssh-as-root","title":"Open a series of new tabs and run ssh as root","text":"

          For some reason screen doesn't like the ssh user@host syntax, so use ssh -l

          for host in app{215..222}prod ; do\n    screen -t \"${host}\" ssh -l root \"${X}\" puppetd -ov\ndone\n
          "},{"location":"gnu-screen/#terminal-emulation-for-serial-ports","title":"Terminal Emulation for Serial Ports","text":"

          You must first figure out the name of the device that is connecting to your serial port, such as a USB adapter. Then use syntax such as the following:

          screen /dev/tty.usbDeviceName 9600\n
          "},{"location":"gnu-screen/#split-screen","title":"Split Screen","text":"

          ctrl-a S to create a split screen, ctrl-a [tab] to switch between. The splits are destroyed when re-attaching.

          "},{"location":"gnu-screen/#screenrc","title":".screenrc","text":"

          Using ~/.screenrc you can define many variables to customize the look of your screen tool, including tabs, clock and colors. Here's an example that gives all three:

          caption always \"%{Mk}%?%-Lw%?%{km}[%n*%f %t]%?(%u)%?%{mk}%?%+Lw%? %{mk}\"\nhardstatus alwayslastline \"%{kW}%H %{kB}|%{km} %l ~ %=%{km}%c:%s %D %M/%d/%Y \"\n
          "},{"location":"gnu-screen/#bugs","title":"Bugs","text":"

          In Ubuntu with a Mac keyboard connected, sometimes the backspace key functions incorrectly. set TERM=vt100 before running screen to fix this.

          "},{"location":"gnu-screen/#see-also","title":"See also","text":"
          • tmux - similar functionality, way more resource efficient and more widely used.
          • dvtm - similar functionality.
          • reptyr - Takes over a pty, useful for moving a pid running outside of screen to running within screen.
          "},{"location":"golang/","title":"golang","text":"

          \"Go is an open source programming language that makes it easy to build simple, reliable, and efficient software.\" - https://golang.org/

          Golang keeps to a 6-month minor release cadence. See https://golang.org/doc/devel/release.html

          "},{"location":"golang/#tips-and-examples","title":"Tips and examples","text":"
          • Default GOPATH is ${HOME}/go on unix systems.
          • See a bunch of other defaults with go env
          "},{"location":"golang/#view-default-go-environment","title":"View default go environment","text":"
          go env\n
          "},{"location":"golang/#build-code-found-on-github","title":"Build code found on github","text":"
          go get github.com/solarkennedy/uq\ngo build github.com/solarkennedy/uq\n

          With newer golang versions, you can simply go install github.com/solarkennedy/uq@latest

          "},{"location":"golang/#show-all-modules-used-by-a-golang-project","title":"Show all modules used by a golang project","text":"

          This will list all dependencies recursively in two columns with the library on the left and its direct dependencies on the right.

          cd \"$SOURCE_REPO\"\ngo mod graph\n
          "},{"location":"golang/#links","title":"Links","text":"
          • https://go.dev/learn
          • https://golang.org/cmd/go
          • https://labix.org/gopkg.in
          • https://play.golang.org
          • https://www.programming-books.io/essential/go/
          • https://thewhitetulip.gitbook.io/bo: \"This is an easy to understand example based tutorial aimed at those who know a little of Go and nothing of webdev and want to learn how to write a webserver in Go.\"
          • https://changelog.com/gotime: \"Your source for diverse discussions from around the Go community\"
          • https://www.youtube.com/channel/UC_BzFbxG2za3bp5NRRRXJSw: justforfunc YT series about programming in Go
          • https://algorithmswithgo.com
          • https://gobyexample.com
          • https://go.dev/wiki/
          • https://www.youtube.com/playlist?list=PLoILbKo9rG3skRCj37Kn5Zj803hhiuRK6: Golang class vids
          "},{"location":"google-cloud/","title":"Google Cloud","text":"

          \"Google Cloud SDK is a set of tools that you can use to manage resources and applications hosted on Google Cloud Platform. These include the gcloud, gsutil, and bq command line tools. The gcloud command-line tool is downloaded along with the Cloud SDK\" - https://cloud.google.com/sdk/docs/

          "},{"location":"google-cloud/#links","title":"Links","text":"
          • https://cloud.google.com/compute/docs/ssh-in-browser
          • https://cloud.google.com/container-builder/docs/build-config
          • https://cloud.google.com/container-builder/docs/create-custom-build-steps
          • https://cloud.google.com/container-registry/docs/quickstart
          • https://cloud.google.com/docs
          • https://cloud.google.com/sdk/gcloud/reference
          • https://github.com/GoogleCloudPlatform/cloud-builders
          • https://cloud.google.com/iam/docs/permissions-reference (Large download)
          • https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type
          • https://cloud.google.com/container-optimized-os/docs/how-to/toolbox
          "},{"location":"google-cloud/#gcloud-cli-examples","title":"gcloud CLI Examples","text":""},{"location":"google-cloud/#working-with-gcloud-configurations","title":"Working with gcloud configurations","text":"

          When working with several projects, it's best to use multiple configurations, with one for each project. Run gcloud init to create a new configuration. This interactive prompt will let you re-initialize the default, or create a new named config. Once you have done that, you can run gcloud config configurations list to show what you have configured. To activate a different configuration, run gcloud config configurations activate <new-config-name>.

          $ gcloud config configurations list\nNAME     IS_ACTIVE  ACCOUNT          PROJECT          COMPUTE_DEFAULT_ZONE  COMPUTE_DEFAULT_REGION\ndefault  False      user@example.io  example-prod\nstaging  True       user@example.io  example-staging  us-west4-a            us-west4\n$ gcloud config configurations activate default\nActivated [default].\n

          Alternately, you can specify the config you want to use as an environment variable.

          $ gcloud config configurations list\nNAME     IS_ACTIVE  ACCOUNT                PROJECT                  COMPUTE_DEFAULT_ZONE  COMPUTE_DEFAULT_REGION\ndev      False      danielh@example.com    example-dev-369821\nstaging  True       danielh@example.com    example-staging-550891   us-east4-c            us-east4\n$ CLOUDSDK_ACTIVE_CONFIG_NAME=dev gcloud compute instances list\nNAME                                                 ZONE           MACHINE_TYPE    PREEMPTIBLE  INTERNAL_IP  EXTERNAL_IP     STATUS\ncat-pic-download-1                                   us-central1-a  e2-medium                    10.128.0.2                   TERMINATED\ngibson-access                                        us-east4-a     e2-micro                     10.0.0.69    256.24.927.306  RUNNING\ngke-dev-cluster-terraform-20090525165-d1058ae2-c0mc  us-east4-a     n1-standard-16               10.0.0.13                    RUNNING\ngke-dev-cluster-terraform-20210624231-9d581566-1oo9  us-east4-a     n1-standard-8                10.0.0.56                    RUNNING\n
          "},{"location":"google-cloud/#launch-cloud-shell-from-cli","title":"Launch cloud shell from CLI","text":"

          Google Cloud Shell is a free VM in GCP that you can use to access GCP cli tools, or do whatever else you might want to do in a free VM. The home directory is kept for like 6 months or something, so configuring your shell and storing useful scripts isn't a bad idea.

          gcloud cloud-shell ssh --authorize-session\n
          "},{"location":"google-cloud/#use-vscode-with-cloud-shell","title":"Use vscode with cloud-shell","text":"

          First, make sure your cloud-shell is started by logging into it via gcloud cloud-shell ssh --authorize-session --command=uptime

          Generate an ssh_config entry for your cloudshell:

          gcloud cloud-shell ssh --authorize-session --dry-run |\nwhile read -r _ _ _ port _ _ _ _ creds _ ; do\n  printf \"Host cloud-shell\\n  User %s\\n  Port %s\\n  Hostname %s\\n\" ${creds/@*/} $port ${creds/*@/}\ndone\n

          This will print something like:

          Host cloud-shell\n  User dade_murphy\n  Port 53000\n  Hostname 5.4.3.1\n

          You'll have to put this into your ssh_config somehow. I recommend using Include statement in ~/.ssh/config and having the above command redirect to a file with only that content so it can be updated in-place. Then use vscode like you usually do with remote ssh, or by running

          code --folder-uri \\\nvscode-remote://ssh-remote+cloud-shell/home/dade_murphy/git_workspace/garbage_file_recovery_work\n
          "},{"location":"google-cloud/#list-google-cloud-projects","title":"List google cloud projects","text":"
          gcloud projects list\n
          "},{"location":"google-cloud/#switch-to-a-different-project","title":"Switch to a different project","text":"
          gcloud config set project \"$project_name\"\n
          "},{"location":"google-cloud/#grant-a-user-permission-to-a-docker-registry","title":"Grant a user permission to a docker registry","text":"
          gsutil iam ch 'user:user@example.com:objectViewer' 'gs://artifacts.example.appspot.com/'\n
          "},{"location":"google-cloud/#list-google-compute-zones","title":"List google compute zones","text":"

          gcloud compute zones list

          "},{"location":"google-cloud/#list-compute-nodes","title":"List compute nodes","text":"
          gcloud compute instances list\n
          "},{"location":"google-cloud/#list-all-disks","title":"List all disks","text":"
          gcloud compute disks list\n
          "},{"location":"google-cloud/#generate-ssh-commands-for-all-nodes","title":"Generate ssh commands for all nodes","text":"
          gcloud compute instances list | awk 'NR>1 {printf \"gcloud compute ssh --zone %s %s\\n\", $2, $1;}'\n
          "},{"location":"google-cloud/#ssh-to-a-compute-node","title":"ssh to a compute node","text":"

          This is useful for getting system level access to an EKS node.

          ssh-to-gke-node() {\n  [ \"$#\" -gt 0 ] || { echo \"Usage: ssh-to-gke-node <node_name> [command]\" ; return 1 ; }\n  read -r zone host < <(gcloud compute instances list --filter=\"$1\" | awk 'NR==2 {print $2, $1 ;}') ;\n  shift\n  gcloud compute ssh --tunnel-through-iap --zone \"$zone\" \"$host\" -- \"${@}\" ;\n}\n
          "},{"location":"google-cloud/#loop-through-all-gcloud-instances-and-perform-a-command","title":"Loop through all gcloud instances and perform a command","text":"
          gcloud compute instances list |\nawk 'NR>1 {printf \"gcloud compute ssh --zone %s %s\\n\", $2, $1;}' |\nwhile read -r ssh_cmd ; do\n  $ssh_cmd -- \"docker images\" </dev/null\ndone |\nsort -u\n
          "},{"location":"google-cloud/#create-a-one-off-compute-node","title":"Create a one-off compute node","text":"
          gcloud compute instances create $USER-temp-node --zone=us-west4-a --network-interface=no-address\n

          Leave off the --network-interface=no-address if you want a public IP address.

          "},{"location":"google-cloud/#delete-a-compute-node","title":"Delete a compute node","text":"

          Sometimes autoscalers have a hard time scaling down, requiring manual termination of idle nodes. The following commands are equivalent:

          gcloud compute instances delete \"projects/$project_name/zones/$zone/instances/$node_name\"\ngcloud compute instances delete --project \"$project_name\" --zone=\"$zone\" \"$node_name\"\n

          To connect to VMs that don't have a public ip address, you need to give --tunnel-through-iap on the CLI and also have IAP-secured Tunnel User permission.

          "},{"location":"google-cloud/#add-an-eks-context-to-kubectl","title":"Add an EKS context to kubectl","text":"

          https://cloud.google.com/sdk/gcloud/reference/container/clusters/get-credentials

          This adds the cluster to your .kube/config with authentication done via an access token.

          CLUSTER_NAME='foo-dev'\nPROJECT_NAME='some-project'\nREGION='us-west42'\ngcloud container clusters get-credentials \"${CLUSTER_NAME}\" \\\n  --region \"${REGION}\" \\\n  --project \"${PROJECT_NAME}\"\n
          "},{"location":"google-cloud/#add-separate-kubectl-configs-for-different-eks-clusters","title":"Add Separate kubectl configs for different EKS clusters","text":"

          This keeps each config in a different file, which is useful for requiring explicit enabling of a given environment, vs the normal behavior of inheriting the last used context.

          # Set up individual kube config files for dev, prod and staging\n\nKUBECONFIG=\"$HOME/.kube/foo-dev-config\"\ngcloud container clusters get-credentials dev-cluster --region vn-west4 --project foo-dev\nkubectl config rename-context $(kubectl config current-context) foo-dev\n\nKUBECONFIG=\"$HOME/.kube/foo-prod-config\"\ngcloud container clusters get-credentials prod-cluster --region vn-west4 --project foo-prod\nkubectl config rename-context $(kubectl config current-context) foo-prod\n\nKUBECONFIG=\"$HOME/.kube/foo-staging-config\"\ngcloud container clusters get-credentials staging-cluster --region vn-west4 --project foo-staging\nkubectl config rename-context $(kubectl config current-context) foo-staging\n

          Then setup aliases like

          # ~/.bash_aliases\nalias foo-k-dev=\"export KUBECONFIG=$HOME/.kube/foo-dev-config ; kubectl config set-context foo-dev --namespace=default ;\"\nalias foo-k-prod=\"export KUBECONFIG=$HOME/.kube/foo-prod-config ; kubectl config set-context foo-prod --namespace=default ;\"\nalias foo-k-stage=\"export KUBECONFIG=$HOME/.kube/foo-staging-config ; kubectl config set-context foo-staging --namespace=default ;\"\n
          "},{"location":"google-cloud/#list-images-available-in-google-container-registry","title":"List images available in Google Container Registry","text":"
          gcloud container images list\n
          "},{"location":"google-cloud/#pull-a-docker-container-from-google-container-registry","title":"Pull a docker container from Google Container Registry","text":"
          gcloud docker -- pull gcr.io/project-id/hello-world\n
          "},{"location":"google-cloud/#control-access-to-registries","title":"Control access to registries","text":"

          \"Container Registry uses a Cloud Storage bucket as the backend for serving container images. You can control who has access to your Container Registry images by adjusting permissions for the Cloud Storage bucket.

          Caution: Container Registry only recognizes permissions set on the Cloud Storage bucket. Container Registry will ignore permissions set on individual objects within the Cloud Storage bucket.

          You manage access control in Cloud Storage by using the GCP Console or the gsutil command-line tool. Refer to the gsutil acl and gsutil defacl documentation for more information.\" - https://cloud.google.com/container-registry/docs/access-control

          "},{"location":"google-cloud/#authenticate-a-private-gcr-registry-in-kubernetes","title":"Authenticate a private GCR registry in kubernetes","text":"

          This is likely not copy/paste material, but the flow is generally correct.

          PARTNER=other_company\nPROJECT=\"our_company-$PARTNER\"\nUSER=service-account-user-for-$PARTNER\nEMAIL=\"$USER@$PROJECT.iam.gserviceaccount.com\"\ngcloud iam service-accounts create $USER\ngcloud iam service-accounts keys create \\\n  --display-name \"$USER\" \\\n  --iam-account \"$EMAIL\" \\\n  key.json\ngcloud projects add-iam-policy-binding \"$PROJECT\" \\\n  --member \"serviceAccount:$EMAIL\" \\\n  --role \"roles/storage.objectAdmin\"\ndone\nkubectl create secret \"docker-pull-$PROJECT\" \"$PROJECT\" \\\n  --docker-server \"https://gcr.io\" \\\n  --docker-username _json_key \\\n  --docker-email \"$EMAIL\" \\\n  --docker-password \"$(cat key.json)\"\n

          Then use the value of docker-pull-${PROJECT} as your ImagePullSecret.

          "},{"location":"google-cloud/#set-cache-expiration-of-gcp-bucket-items-to-5-minutes","title":"Set cache expiration of GCP bucket items to 5 minutes","text":"

          By default, GCP bucket items have 1 hour of public cache, which means items can be cached outside of the control of the GCP admin. This means that within that cache time window, any requests for the item will have unpredictable results. Set your Cache-Control max-age to something low for files that change, like page content and indexes, but long for files that never change, like images and archives.

          gsutil setmeta -h \"Cache-Control: public, max-age=300\" gs://helm-repo.example.org/index.yaml\n

          More info: https://medium.com/@val_deleplace/public-cache-dont-panic-11038a073a9

          "},{"location":"google-cloud/#view-the-metadata-of-a-gcp-bucket-item","title":"View the metadata of a GCP bucket item","text":"
          gsutil stat gs://helm-repo.example.org/index.yaml\n

          The output will be something like:

          gs://helm-repo.example.org/index.yaml:\n    Creation time:          Fri, 22 Jul 2020 23:20:11 GMT\n    Update time:            Mon, 23 Jul 2020 19:17:53 GMT\n    Storage class:          MULTI_REGIONAL\n    Cache-Control:          public, max-age=300\n    Content-Length:         3195714\n    Content-Type:           application/octet-stream\n    Hash (crc32c):          vA/Awm==\n    Hash (md5):             2AJ32cECSriE0UQStsXxyw==\n    ETag:                   COP7ew7D5+CAEoI=\n    Generation:             1595460011829230\n    Metageneration:         5\n
          "},{"location":"google-cloud/#show-a-bar-chart-of-disk-usage-of-gcp-bucket-contents","title":"Show a bar chart of disk usage of gcp bucket contents","text":"

          The general idea here is you use gsutil du gs://whatever/somepath, use swap the first and second columns, and pipe that to termgraph. In this example I use awk to do the column swap, filter out all files so we're only checking full directory sizes, and trim the directory name and part of the filename:

          $ gsutil du gs://cat-pic-downloader-backups/backups/*full* | awk '/\\/$/ {gsub(/.*velero-/, \"\", $2) ; print $2,$1 ;}' | termgraph\n\nfull-back-up-20190128040005/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 307.60M\nfull-back-up-20190129040006/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 305.13M\nfull-back-up-20190130040007/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 298.71M\nfull-back-up-20190201040008/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 298.80M\nfull-back-up-20190202040009/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 297.77M\nfull-back-up-20190203040010/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 297.64M\nfull-back-up-20190204040011/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 297.61M\nfull-back-up-20190205040012/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 296.78M\nfull-back-up-20190206040013/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 294.78M\nfull-back-up-20190207040014/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 292.45M\nfull-back-up-20190208040015/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 292.05M\nfull-back-up-20190209040016/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 292.74M\nfull-back-up-20190210040017/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 291.43M\nfull-back-up-20190211040018/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 291.73M\nfull-back-up-20190212040019/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 291.55M\nfull-back-up-20190213040020/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 289.23M\nfull-back-up-20190214040022/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 287.54M\nfull-back-up-20190215040023/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.56M\nfull-back-up-20190216040024/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.87M\nfull-back-up-20190217040025/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.58M\nfull-back-up-20190218040026/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.01M\nfull-back-up-20190219040027/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 286.23M\nfull-back-up-20190220040028/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 285.57M\nfull-back-up-20190221040029/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.66M\nfull-back-up-20190222040030/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.62M\nfull-back-up-20190223040031/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.29M\nfull-back-up-20190224040032/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.27M\nfull-back-up-20190225040033/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.31M\nfull-back-up-20190226040034/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.19M\nfull-back-up-20190227040035/: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 283.44M\n
          "},{"location":"google-cloud/#set-maintenance-window-on-a-gke-cluster","title":"Set maintenance window on a GKE cluster","text":"

          This syntax is a bit difficult to work with. It must be given in RFC-5545 (iCal), but GCP only supports a subset of the spec. Most frustratingly, GCP doesn't support timezones. They warn you about this, saying the \"timezone will not be stored\", so if you want to start something at Wednesday 03:00:00+0600 you have to specify that as Tuesday 21:00:00. \ud83d\ude44

          N.B.: in my testing while writing this, local TZ was used if not TZ was sent, so it looks like their docs may be wrong and maybe the conversion step can be avoided.

          "},{"location":"google-cloud/#convert-local-time-to-rfc-5545-utc","title":"Convert local time to rfc-5545 UTC","text":"

          So if you want to start your maintenance on Thursday at 5am Pacific time and end at Thursday 5pm Pacific time, the first step is to figure out what the rfc-5545 UTC format would be:

          $ TZ=Z gdate '+%Y%m%dT%H%M%SZ %A' -d 2021-05-20T05:00:00-0800\n20210520T130000Z Thursday\n$ TZ=Z gdate '+%Y%m%dT%H%M%SZ %A' -d 2021-05-20T17:00:00-0800\n20210521T010000Z Friday\n
          "},{"location":"google-cloud/#set-and-verify-the-maintenance-schedule","title":"Set and verify the maintenance schedule:","text":"
          gcloud container clusters update \\\n  --project=\"$PROJECT_NAME\" \\\n  --zone=\"$ZONE\" \\\n  \"$CLUSTER_NAME\" \\\n  --maintenance-window-start=\"20210520T130000Z\" \\\n  --maintenance-window-end=\"20210521T010000Z\" \\\n  --maintenance-window-recurrence=\"FREQ=WEEKLY;BYDAY=TH\"\n

          You should see Updating <cluster_name>...done. followed by some hyperlinks. Next you should validate that it was set right:

          gcloud container clusters describe \\\n  --format=json \\\n  --project=\"$PROJECT_NAME\" \\\n  --zone=\"$ZONE\" \\\n  \"$CLUSTER_NAME\" |\njq .maintenancePolicy.window.recurringWindow\n

          You should see a small json blob like:

          {\n  \"recurrence\": \"FREQ=WEEKLY;BYDAY=TH\",\n  \"window\": {\n    \"endTime\": \"2021-05-21T02:00:00Z\",\n    \"startTime\": \"2021-05-20T14:00:00Z\"\n  }\n}\n

          Grab the start time and feed it back into gdate to validate that your desired local time is set:

          gdate -d \"2021-05-20T14:00:00Z\"  # comes out to: Thu May 20 07:00:00 PDT 2021\n
          "},{"location":"google-cloud/#gcloud-web-console-examples","title":"gcloud web console examples","text":""},{"location":"google-cloud/#logs-explorer-examples","title":"Logs Explorer examples","text":"
          • Sample queries using the Logs Explorer
          "},{"location":"google-cloud/#show-namespace-deletions","title":"Show namespace deletions","text":"
          protoPayload.methodName=\"io.k8s.core.v1.namespaces.delete\"\n
          "},{"location":"google-cloud/#show-all-images-pulled-by-gke","title":"Show all images pulled by GKE","text":"

          This will show pulls of all images except with substrings matching the references on line 3. The OR appears to be case sensitive.

          resource.type=\"k8s_pod\"\njsonPayload.reason=\"Pulling\"\n-jsonPayload.message : ( \"Pulling image \\\"dade-murphy/leet-image\\\"\" OR \"Pulling image \\\"mr-the-plague/da-vinci\\\"\" )\n
          "},{"location":"google-cloud/#show-all-k8s-control-plane-upgrades","title":"Show all k8s control plane upgrades","text":"

          You can search using the same syntax at the CLI, which is great for local saving of search results so you can search them more quickly using grep and easily archive them for postmortem documentation.

          gcloud beta logging read 'timestamp>=\"2021-01-01T00\" AND protoPayload.metadata.operationType: UPGRADE_MASTER AND operation.producer: container.googleapis.com' > control-plane-upgrades.yaml\n
          • https://console.cloud.google.com/logs/query;query=%20%20resource.type%3D%22k8s_pod%22%0A%20%20jsonPayload.reason%3D%22Pulling%22
          "},{"location":"google-cloud/#cloud-sql-examples","title":"Cloud SQL examples","text":"

          Docs are here: https://cloud.google.com/sql/docs/postgres/

          "},{"location":"google-cloud/#list-instances","title":"List instances","text":"
          gcloud sql instances list\n
          "},{"location":"google-cloud/#create-a-new-postgres-db-instance","title":"Create a new postgres db instance","text":"

          In the following example, the tier is custom, with 24 cpu cores and 122880mb of memory. You can see the standard tiers that are available with gcloud sql tiers list. Also, this creates an instance of POSTGRES_14, but other versions are listed here: https://cloud.google.com/sql/docs/postgres/create-instance#gcloud

          gcloud sql instances create \\\n  --project=${GCP_PROJECT} \\\n  --region=${INSTANCE_REGION} \\\n  --database-version=POSTGRES_14 \\\n  --tier=db-custom-24-122880 \\\n  ${INSTANCE_NAME}\n
          "},{"location":"google-cloud/#upgrade-postgres-to-a-new-version-in-place","title":"Upgrade postgres to a new version in-place","text":"

          https://cloud.google.com/sql/docs/postgres/upgrade-major-db-version-inplace

          gcloud beta sql instances patch \"${INSTANCE_NAME}\" --database-version=POSTGRES_14\n
          "},{"location":"google-cloud/#list-backups-for-an-instance","title":"List backups for an instance","text":"
          gcloud sql backups list --instance=\"${INSTANCE_NAME}\"\n
          "},{"location":"google-cloud/#create-a-backup","title":"Create a backup","text":"
          gcloud sql backups create \\\n  --instance=\"${INSTANCE_NAME}\" \\\n  --description=\"Pre pg14 upgrade\"\n
          "},{"location":"google-cloud/#restore-a-backup-from-one-instance-to-another","title":"Restore a backup from one instance to another","text":"

          This has to be done within the same GCP project

          gcloud sql backups restore \\\n  --backup-instance=\"${OLD_INSTANCE}\" \\\n  --restore-instance=\"${NEW_INSTANCE}\" \\\n  \"${BACKUP_ID}\"\n
          "},{"location":"google-cloud/#restore-a-backup-from-a-sql-file-stored-in-a-bucket","title":"Restore a backup from a sql file stored in a bucket","text":"

          Depending on the contents of your sql backup, you may have to create the target database first. If the db does not exist and the sql file does not working CREATE statements you'll see the ambiguous error system error occurred pre-import and an unhelpful python stack trace.

          gcloud sql import sql ${INSTANCE_NAME} \\\n  --user \"${DB_USERNAME}\" \\\n  --database \"${TARGET_DB}\" \\\n  gs://some-bucket-name/some_backup.sql \\\n  --verbosity debug\n

          Add --async if you just want it to go to the background and return you to your shell prompt.

          "},{"location":"google-earth/","title":"Google Earth","text":"

          \"Google Earth is a computer program that renders a 3D representation of Earth based on satellite imagery. The program maps the Earth by superimposing satellite images, aerial photography, and GIS data onto a 3D globe, allowing users to see cities and landscapes from various angles.\" - https://en.wikipedia.org/wiki/Google_Earth

          "},{"location":"google-earth/#links","title":"Links","text":"
          • simplekml - \"The python package simplekml was created to generate kml (or kmz). It was designed to alleviate the burden of having to study KML in order to achieve anything worthwhile with it. If you have a simple understanding of the structure of KML, then simplekml is easy to run with and create usable KML.\"
          • https://doarama.com
          "},{"location":"google-sheets/","title":"Google Sheets","text":""},{"location":"google-sheets/#links","title":"Links","text":"
          • gspread - Python module for interacting with google spreadsheets
          "},{"location":"google-sheets/#techniques","title":"Techniques","text":"
          • Function list - https://support.google.com/docs/table/25273?hl=en
          "},{"location":"google-sheets/#conditional-formatting","title":"Conditional Formatting","text":""},{"location":"google-sheets/#regex-matching-to-color","title":"Regex matching to color","text":"

          Colorize rows with conditional formatting by using an expression like this:

          =REGEXMATCH($E:$E, \"some_regex\")\n

          This regex is not anchored, so there is no need to prepend or append .*

          Cell references in this case are relative unless prepended by a \\$. So, if you want to match the cell you are working on you would use A1:A1.

          "},{"location":"google-sheets/#color-every-other-row","title":"Color every other row","text":"
          =MOD(ROW(),2)\n
          "},{"location":"google-sheets/#import-an-rss-feed","title":"Import an RSS feed","text":"
          =IMPORTFEED(\"https://api.flickr.com/services/feeds/photos_public.gne\", B2, TRUE, 10)\n
          "},{"location":"google-sheets/#sum-lines-that-match-a-string","title":"Sum lines that match a string","text":"

          This uses syntax similar to a glob search, but uses ~ instead of \\

          =COUNTIF(D:D,\"3*\")\n
          "},{"location":"google-sheets/#automatically-resolve-the-dow-from-a-date","title":"Automatically resolve the DOW from a date","text":"
          =CHOOSE( weekday(A4), \"Sun\", \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\")\n
          "},{"location":"google/","title":"Google","text":"

          \"Google LLC is an American multinational technology company that specializes in Internet-related services and products.\" - https://en.wikipedia.org/wiki/Google

          "},{"location":"google/#links","title":"Links","text":"
          • Google App Script
          • Extending Google Docs
          • Document Service
          • Web Store Developer Dashboard
          • Service Accounts
          • API Explorer
          "},{"location":"graphicsmagick/","title":"GraphicsMagick","text":"

          \"GraphicsMagick is the swiss army knife of image processing.\" - http://www.graphicsmagick.org/

          This software purports to be more favorable than ImageMagick.

          "},{"location":"graphicsmagick/#usage-tips","title":"Usage tips","text":"

          Docs on scripting: http://www.graphicsmagick.org/GraphicsMagick.html

          "},{"location":"graphicsmagick/#convert-a-bunch-of-dng-files-to-low-resolution-jpeg","title":"Convert a bunch of DNG files to low-resolution JPEG","text":"

          The early -size option here is an optimization, but the actual resizing happens in -resize.

          SRC_PATH=\"${HOME}/Pictures/\"\nDEST_PATH=\"${HOME}/Desktop/output/\"\nSIZE=\"400x400\"\nfind \"${SRC_PATH}\" -type f -iname '*.dng' -print0 |\nxargs -0 -n1 -P\"$(nproc 2>/dev/null || echo 2)\" \\\ngm mogrify \\\n  -verbose \\\n  -size \"${SIZE}\" \\\n  -format jpeg \\\n  -resize \"${SIZE}\" \\\n  -create-directories \\\n  -output-directory \"$DEST_PATH\"\n
          "},{"location":"graphicsmagick/#create-a-contact-sheet","title":"Create a contact sheet","text":"
          gm montage \\\n  -geometry 400x400+10+10 \\\n  -background \"#222\" \\\n  -tile 8 *.jpg \\\n  \"$HOME/output.jpg\"\n
          "},{"location":"graphicsmagick/#see-also","title":"See Also","text":"
          • exiftool
          • imagemagick
          • jpeginfo
          • sips
          • dcraw
          "},{"location":"graphql/","title":"GraphQL","text":"

          \"GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data.\" - https://graphql.org

          "},{"location":"graphql/#links","title":"Links","text":"
          • Apollo Federation
          • Why not use GraphQL?
          "},{"location":"grep/","title":"grep","text":"

          grep checks for matches per-line in a file or input stream and prints out matches, and is a standard tool in the linux admin's toolbox. It's easy to use, but there are some neat things you can do with it that aren't so obvious. This doc is mostly focused on the non-obvious things.

          Unless specified, grep here means GNU grep. BSD (macOS) grep functions differently in many cases.

          "},{"location":"grep/#examples","title":"Examples","text":""},{"location":"grep/#print-only-the-matching-string","title":"Print only the matching string","text":"

          We use -E so we don't have to escape +.

          $ echo 'Yummy fooood!' | grep -Eo 'fo+'\nfoooo\n
          "},{"location":"grep/#print-only-part-of-a-matching-string","title":"Print only part of a matching string","text":"

          -P uses perl regex, which supports more features, like lookbehind. This lets us use -o but print only part of the string.

          Use \\K in place of lookbehind to trim the beginning of the match.

          $ echo 'Yummy fooood!' | grep -Po 'foo\\Ko+'\noo\n

          Use lookahead to trim the end of the match

          $ echo 'Yummy fooood!' | grep -Po 'foo(?=o+)'\nfoo\n

          More info: https://www.regular-expressions.info/keep.html

          "},{"location":"grub/","title":"GNU GRUB","text":"

          \"GNU GRUB is a Multiboot boot loader. It was derived from GRUB, the GRand Unified Bootloader, which was originally designed and implemented by Erich Stefan Boleyn.\" - https://www.gnu.org/software/grub/

          "},{"location":"grub/#examples","title":"Examples","text":""},{"location":"grub/#update-defaults","title":"Update defaults","text":"

          The basic workflow for updating grub is to edit /etc/default/grub then run sudo update-grub. The update-grub man page states that \"update-grub is a stub for running grub-mkconfig -o /boot/grub/grub.cfg to generate a grub2 config file.\", and thus you can run run grub-mkconfig to see what would be created.

          "},{"location":"grub/#disable-onboard-frame-buffer","title":"Disable onboard frame buffer","text":"

          I used this configuration to get text mode linux to boot on a Mac Mini with a bad graphics card that would not load a desktop environment. The machine was locking up at boot in OS X. Hardware test would boot with a striped color anomalies, but would never finish. Ubuntu Xenial would not boot correctly even to text mode without these settings.

          In /etc/default/grub:

          GRUB_CMDLINE_LINUX_DEFAULT=\"video=vesafb:off nofb vga=normal nomodeset\"\n
          "},{"location":"grub/#force-brightness-at-boot","title":"Force brightness at boot","text":"

          On a 15\" macbook pro, ubuntu 18.04 was giving me a problem where the LCD was set very dim and the keys to change brightness were not working. I had to configure /etc/default/grub with the following:

          GRUB_CMDLINE_LINUX_DEFAULT=\"acpi_backlight=video\"\n

          An alternate was acpi_backlight=vendor, but for me this was still quite dim. You can also alter your screen brightness on the fly by sending a value between 0-100 like this: echo 0 > /sys/class/backlight/acpi_video0/brightness

          "},{"location":"grub/#serial-over-ipmi","title":"Serial over IPMI","text":"

          First, your BMC has to be configured for SOL. If you see POST in an IPMI sol session, this is the next step. Edit /etc/default/grub and add something like the following lines:

          GRUB_CMDLINE_LINUX=\"console=tty0 console=ttyS1,115200\"\nGRUB_TERMINAL_INPUT=\"console serial\"\nGRUB_TERMINAL_OUTPUT=\"gfxterm serial\"\nGRUB_SERIAL_COMMAND=\"serial --unit=1 --speed=115200\"\n

          This was tested on a SuperMicro running Ubuntu 19.10.

          "},{"location":"grub/#notable-commands-files-and-dirs","title":"Notable commands, files and dirs","text":"
          • /boot/grub/grub.cfg - The grub config that is actually used at boot
          • /etc/grub.d - A directory with some of the configs that are combined to create /boot/grub/grub.cfg
          • /etc/default/grub - Default grub options
          • update-grub - Used to regenerate /boot/grub/grub.cfg
          • grub-set-default - Used to configure the default menu entry during reboot, only for bare metal machines
          • grub-set-default-legacy-ec2 - Used to configure the default menu entry on ec2 machines
          "},{"location":"hadoop/","title":"Hadoop","text":"

          \"The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models.\" - http://hadoop.apache.org/

          "},{"location":"hadoop/#links","title":"Links","text":"
          • Oozie - Oozie is a workflow scheduler system to manage Apache Hadoop jobs.
          "},{"location":"handbrake/","title":"Handbrake","text":"

          Handbrake is a tool for ripping DVD's into MPEG or AVI files.

          "},{"location":"handbrake/#cli-examples","title":"CLI Examples","text":"
          • https://trac.handbrake.fr/wiki/CLIGuide
          "},{"location":"handbrake/#deinterlacing-for-ipad","title":"Deinterlacing for iPad","text":"
          HandbrakeCLI -Z \"AppleTV\" --deinterlace fast --maxWidth 1024 -i infile -o outfile\n
          "},{"location":"handbrake/#show-information-about-the-source-media-for-use-with-extended-flags","title":"Show information about the source media for use with extended flags","text":"
          HandBrakeCLI -t 0 -i VIDEO_TS\n
          "},{"location":"handbrake/#generate-a-1000-frame-preview-of-the-appletv-preset","title":"Generate a 1000 frame preview of the AppleTV preset","text":"

          --stop-at is relative to the start, so it describes the number of frames in the output.

          HandBrakeCLI -i 2046/VIDEO_TS/ --start-at frame:5000 --stop-at frame:1000 -o foo.mp4 -Z AppleTV\n
          "},{"location":"handbrake/#fix-43-aspect-ratio-with-the-expanded-syntax-of-the-appletv-preset","title":"Fix 4:3 aspect ratio with the expanded syntax of the AppleTV preset","text":"
          HandBrakeCLI \\\n-e x264 \\\n-q 20.0 \\\n-a 1,1 \\\n-E faac,ac3 \\\n-B 160,160 \\\n-6 dpl2,auto \\\n-R 48,Auto -D 0.0,0.0 \\\n-f mp4 \\\n-4 \\\n-X 960 \\\n--loose-anamorphic \\\n-m \\\n-x cabac=0:ref=2:me=umh:b-adapt=2:weightb=0:trellis=0:weightp=0 \\\n--custom-anamorphic \\\n--pixel-aspect 4:3\n
          "},{"location":"hashids/","title":"Hashids","text":"

          \"Hashids is a small open-source library that generates short, unique, non-sequential ids from numbers. It converts numbers like 347 into strings like 'yr8', or array of numbers like [27, 986] into '3kTMd'. You can also decode those ids back. This is useful in bundling several parameters into one or simply using them as short UIDs.\" - https://hashids.org/

          "},{"location":"hashids/#examples","title":"Examples","text":""},{"location":"hashids/#python","title":"Python","text":"

          https://github.com/davidaurelio/hashids-python

          from hashids import Hashids\nhashids = Hashids(salt=\"this is my salt\")\nid = hashids.encode(1, 2, 3)  # str('laHquq')\nnumbers = hashids.decode(id)\n
          "},{"location":"helm/","title":"helm","text":"

          \"The Kubernetes Package Manager\" - https://github.com/kubernetes/helm

          These notes are all about helm version 3. Charts that require helm 3 should use apiVersion: v2, though helm 3 does support v1.

          "},{"location":"helm/#links","title":"Links","text":"
          • https://helm.sh/docs/intro/quickstart/
          "},{"location":"helm/#tips","title":"Tips","text":""},{"location":"helm/#list-all-versions-of-a-chart-in-a-given-repo","title":"List all versions of a chart in a given repo","text":"
          helm search repo repo_name/chart_name --devel --versions\n
          "},{"location":"helm/#include-pre-release-versions-in-all-versions","title":"Include pre-release versions in all versions","text":"

          You have to use mastermind/semver constraints with helm, which dictate that you have to include a pre-release component if you want to match against prereleases:

          helm search repo repo_name/chart_name --devel --versions --version '^1.5-0'\n

          This would show all versions including prereleases before or equal to 1.5.x. If you only want to show the latest version even if it is a prerelease, leave off --versions.

          "},{"location":"helm/#get-values-of-a-deployed-chart","title":"Get values of a deployed chart","text":"

          This only shows values that were passed in, not default values.

          $release_name is the NAME column in helm list

          helm get values -o yaml \"$release_name\" > values.yaml\n

          To get a list of all values, use

          helm get values --all -o yaml \"$release_name\" > values.yaml`\n
          "},{"location":"helm/#show-notes-for-a-deployed-service","title":"Show notes for a deployed service","text":"

          Notes are printed when you install a service, but they can be viewed again by running helm status <release_name> where <release_name> is one of the releases from helm list.

          "},{"location":"helm/#install-the-stable-repo","title":"Install the stable repo","text":"
          helm repo add stable https://charts.helm.sh/stable\n
          "},{"location":"helm/#install-the-incubator-repo","title":"Install the incubator repo","text":"

          https://github.com/helm/charts#how-do-i-enable-the-incubator-repository

          helm repo add incubator https://charts.helm.sh/incubator\n
          "},{"location":"helm/#show-metadata-about-a-specific-release-in-json","title":"Show metadata about a specific release in json","text":"

          You can find the revision in a few places, like helm list -A. Default is to store data about the last 10 releases per release_name.

          helm history -n \"$NS\" \"$RELEASE_NAME\" -o json | jq '.[] | select(.revision == 157)'\n
          "},{"location":"helm/#show-raw-data-about-what-helm-sent-to-the-k8s-server-for-a-recent-release","title":"Show raw data about what helm sent to the k8s server for a recent release","text":"

          First, find the secret that you will want to search. You can get the release number from helm history -n \"$NS\" foo or dig in kubectl -n $NS get secret

          $ k -n \"$NS\" get secret | grep sh.helm.release | tail -n 3 | column -t\nsh.helm.release.v1.foo.v338  helm.sh/release.v1  1  14d\nsh.helm.release.v1.foo.v339  helm.sh/release.v1  1  13d\nsh.helm.release.v1.foo.v340  helm.sh/release.v1  1  4d23h\n

          Then send that secret into the following command to get the full manifest that was sent to the k8s api:

          k -n \"$NS\" get secret \"$SECRET\" -o go-template='{{ .data.release | base64decode | base64decode }}' |\n  gzip -d\n

          The result is a json blob with all the details of how the helm chart was applied, including hook manifests, app manifests, and other metadata.

          "},{"location":"helm/#splay-cron-jobs","title":"Splay cron jobs","text":"

          Splaying cron jobs avoids the thundering herd problem by spreading the jobs out over time with deterministic randomness.

          The functions available when using Helm templates are not as plentiful and general purpose like you would expect in a normal programming language, so we have to get creative for some things. One thing that would be great is if Helm provided a deterministic random feature. It does have randomness, but there is no way to seed the random number generator. To work around this, we can use other functions that do take inputs in order to generate deterministic random-ish numbers. One such example is using adler32sum, which returns a base-10 hash value of an input.

          The following example splays a cron job that runs every 15 minutes from 3-12 inclusive, which should avoid high-demand minutes at the beginning of 0/15, and still give a few minutes for work to complete before coming back around to the next 0/15 interval.

          '{{- add 3 (regexFind \".$\" (adler32sum .Release.Name)) -}}-59/15 * * * *'\n

          This bit of code aler32sums the .Release.Name value, which is expected to be unique for every deployment but may not be in your environment, takes only the right-most digit, which is 0-9, and adds 3 to it, then uses that number as the starting minute in the cron schedule, EG: 7-59/* * * * *.

          Here is a python script that shows what minute number would be generated for 235,976 inputs:

          #!/usr/bin/env python3\n\nfrom zlib import adler32\nfrom pathlib import Path\n\n_input = Path(\"/usr/share/dict/words\").read_text().split()\noutput = {x + 3: 0 for x in range(10)}\n\nfor item in _input:\n    cksum = adler32(str.encode(item))\n    splay_minute = int(str(cksum)[-1]) + 3\n    output[splay_minute] += 1\n\nfor k, v in output.items():\n    print(f\"{k:2d}: {v}\")\n

          And the output shows a pretty even splay across all minutes, with each minute containing roughly 1/10th of the input lines:

          $ ./test-adler32sum.py\n 3: 23483\n 4: 23523\n 5: 23699\n 6: 23628\n 7: 23464\n 8: 23750\n 9: 23435\n10: 23833\n11: 23605\n12: 23556\n

          Surely there is more rigorous statistical analysis needed to better understand exactly how the inputs are being spread, but if you care that much about it, you are probably better off submitting a pr with your desired behavior to https://github.com/Masterminds/sprig, which is where the helm template functions come from.

          "},{"location":"helm/#detect-helm-resources-that-do-not-have-proper-annotations","title":"Detect helm resources that do not have proper annotations","text":"

          Helm requires that certain annotations exist. This this check will return all deployments that do not contain the required anntoations:

          kubectl get deployments -A -o json |\njq '.items[] | select((.metadata.annotations.\"meta.helm.sh/release-name\" == null) or (.metadata.annotations.\"meta.helm.sh/release-namespace\" == null)) | .metadata.name'\n
          "},{"location":"helm/#links_1","title":"Links","text":"
          • https://github.com/databus23/helm-diff: \"This is a Helm plugin giving your a preview of what a helm upgrade would change.\"
          • https://blog.flant.com/advanced-helm-templating
          "},{"location":"home-assistant/","title":"Home Assistant","text":"

          \"Home Assistant is an open-source home automation platform running on Python 3. Track and control all devices at home and automate control.\" - https://home-assistant.io/

          \"Hass.io turns your Raspberry Pi (or another device) into the ultimate home automation hub powered by Home Assistant. With Hass.io you can focus on integrating your devices and writing automations.\" - https://home-assistant.io/hassio/

          "},{"location":"hp/","title":"HP","text":"

          Information about HP Inc and Hewlett-Packard Company hardware and software.

          "},{"location":"hp/#links","title":"Links","text":"
          • http://cciss.sourceforge.net - RAID software for linux
          • iLO firmware upgrade is done from in linux using CP012567.scexe
          • Procurve ethernet switches
          • Microserver N40L or N54L for zfs or FreeNAS
          • Custom Microserver BIOS: http://www.avforums.com/forums/networking-nas/1521657-hp-n36l-n40l-n54l-microserver-updated-ahci-bios-support.html
          "},{"location":"htmx/","title":"htmx","text":"

          \"htmx gives you access to AJAX, CSS Transitions, WebSockets and Server Sent Events directly in HTML, using attributes, so you can build modern user interfaces with the simplicity and power of hypertext\" - https://htmx.org/

          "},{"location":"htmx/#links","title":"Links","text":"
          • https://htmx.org/docs/
          • https://github.com/rajasegar/awesome-htmx
          "},{"location":"htop/","title":"htop","text":"

          \"an interactive process viewer for Unix systems.\" - http://hisham.hm/htop/

          "},{"location":"htop/#see-also","title":"See also","text":"
          • Top variant list
          "},{"location":"httpstat/","title":"httpstat","text":"

          \"curl statistics made simple\" - https://github.com/reorx/httpstat

          "},{"location":"httpstat/#usage","title":"Usage","text":""},{"location":"httpstat/#simple-usage","title":"Simple usage","text":"
          $ httpstat http://hoherd.com/\nConnected to 192.30.252.153:80 from 127.0.0.1:61646\n\nHTTP/1.1 200 OK\nServer: GitHub.com\nDate: Mon, 29 Jan 2018 23:24:52 GMT\nContent-Type: text/html; charset=utf-8\nContent-Length: 405\nVary: Accept-Encoding\nVary: Accept-Encoding\nLast-Modified: Tue, 04 Apr 2017 16:43:44 GMT\nAccess-Control-Allow-Origin: *\nX-GitHub-Request-Id: F0D0:1973:5CF2FD:846C00:5A6FAD44\nExpires: Mon, 29 Jan 2018 23:34:52 GMT\nCache-Control: max-age=600\nAccept-Ranges: bytes\n\nBody stored in: /var/folders/2t/rnzxpxd54y7832mx_xjvxl30bb2qzp/T/tmphVaBFx\n\n  DNS Lookup   TCP Connection   Server Processing   Content Transfer\n[     5ms    |       0ms      |       237ms       |        1ms       ]\n             |                |                   |                  |\n    namelookup:5ms            |                   |                  |\n                        connect:5ms               |                  |\n                                      starttransfer:242ms            |\n                                                                 total:243ms\n
          "},{"location":"httpstat/#see-also","title":"See also","text":"
          • aria2
          • curl - what httpstat wraps to get its stats
          • httpstat - download and show a some useful connection information
          • wget
          "},{"location":"iTunes/","title":"iTunes","text":"
          • https://github.com/liamks/pyitunes
          "},{"location":"iTunes/#metadata","title":"Metadata","text":"
          • Skips are counted after within 2-10 seconds in iTunes, 3-10(?) seconds in iOS.
          "},{"location":"image-formats/","title":"Graphical Image Formats","text":"
          • arw - Sony Alpha raw format
          • cr2 - Canon raw format
          • dng - Adobe digital negative
          • gif - Compuserve Graphics Interchange Format
          • jpg - Most common image format on the internet
          • nef - Nikon raw format
          • png - Lossless rasterized image format
          • tiff - Lossless rasterized image format
          • webp - https://en.wikipedia.org/wiki/WebP
          "},{"location":"imagemagick/","title":"ImageMagick","text":"

          \"ImageMagick is a free and open-source software suite for displaying, converting, and editing raster image and vector image files. It can read and write over 200 image file formats.\" - https://en.wikipedia.org/wiki/ImageMagick

          ImageMagick is a framework for manipulating images. convert is the CLI front-end for it, and there are other modules/libraries for php, perl, etc..

          "},{"location":"imagemagick/#limitations","title":"Limitations","text":"
          • Does '''NOT''' handle DNG files. In OS X use sips to do this.
          "},{"location":"imagemagick/#techniques","title":"Techniques","text":""},{"location":"imagemagick/#overlay-the-date-on-the-image","title":"Overlay the date on the image","text":"
          for X in *.jpg ; do\n  convert ${X} -font Times-Roman -pointsize 70 -fill black -annotate +100+100 %[exif:DateTimeOriginal] ${X}-date.jpg\ndone\n\nfor X in *date.jpg ; do\n  convert ${X} -font Times-Roman -pointsize 70 -fill white -annotate +98+98 %[exif:DateTimeOriginal] ${X}-date2.jpg\ndone\n
          "},{"location":"imagemagick/#delete-all-tags-and-metadata","title":"Delete all tags and metadata","text":"
          convert -strip infile.jpg outfile.jpg\n
          "},{"location":"imagemagick/#generate-blurry-dark-terminal-backgrounds-from-normal-backgrounds","title":"Generate blurry, dark terminal backgrounds from normal backgrounds","text":"
          for X in * ; do\n  convert -resize 1100x1100 \"${X}\" -blur 0x4 -fill black -colorize 75% terminal.\"${X}\"\ndone\n
          "},{"location":"imagemagick/#generate-shady-versions-of-desktop-pictures-in-os-x","title":"Generate shady versions of desktop pictures in OS X","text":"
          for X in /Library/Desktop\\ Pictures/*.jpg ; do\n  IMG=$(basename \"${X}\")\n  convert -resize 1100x1100 \"${X}\" -blur 0x4 -set option:modulate:colorspace hsb -modulate 20 ~/Pictures/terminal.\"${IMG}\"\ndone\n
          "},{"location":"imagemagick/#crop-the-center-of-images-out","title":"Crop the center of images out","text":"
          for X in /Volumes/data-b/Timelapse/20120407-14* ; do\n  convert \"${X}\" -gravity Center -crop 1920x1080+0+0 $(basename ${X})\ndone\n
          "},{"location":"imagemagick/#average-many-photos-for-a-long-exposure-style-shot","title":"Average many photos for a long-exposure style shot","text":"
          convert *.jpg -average average.jpg\n
          "},{"location":"imagemagick/#multiply-several-images","title":"Multiply several images","text":"
          convert *.jpg -background white -compose multiply -flatten multiply.jpg\n
          "},{"location":"imagemagick/#combine-images-always-using-the-minimum-value","title":"Combine images always using the minimum value","text":"
          convert *.jpg -background white -compose darken -flatten minimum.jpg\n
          "},{"location":"imagemagick/#combine-images-always-using-the-maximum-value","title":"Combine images always using the maximum value","text":"
          convert *.jpg -background black -compose lighten -flatten maximum.jpg\n
          "},{"location":"imagemagick/#swap-red-and-blue-channels-for-ir-photos","title":"Swap red and blue channels (for IR photos)","text":"
          convert infile.jpg -separate -swap 0,2 -combine swapped.jpg\n
          "},{"location":"imagemagick/#animate-some-images","title":"Animate some images","text":"
          convert -delay 20 -loop 0 *.jpg animation.gif\n
          "},{"location":"imagemagick/#see-also","title":"See Also","text":"
          • exiftool
          • graphicsmagick
          • jpeginfo
          • sips
          "},{"location":"img2xterm/","title":"img2xterm","text":"

          Converts images into xterm 256 color output for viewing when there is no graphical display. Unfortuntely as of 2016-09-07 I can't find this in an easily distributable package.

          https://github.com/rossy/img2xterm

          "},{"location":"inotify/","title":"inotify","text":"
          • \"inotify - monitoring file system events\" - man inotify
          • \"inotifywatch - gather filesystem access statistics using inotify\" - man inotifywatch
          • \"The inotify cron daemon (incrond) is a daemon which monitors filesystem events and executes commands defined in system and user tables. It's (sic) use is generally similar to cron(8).\" - man incrond
          "},{"location":"inotify/#examples","title":"Examples","text":""},{"location":"inotify/#continuously-show-filesystem-events-on-a-file","title":"Continuously show filesystem events on a file","text":"

          This shows a datestamp when /var/log/syslog is modified. Theoretically we could use %N to get millisecond precision, but it doesn't work.

          sudo sudo inotifywait -m --timefmt '%F %T.%z' --format '%T %w %e %f' /var/log/syslog\n
          "},{"location":"internet/","title":"The Internet","text":""},{"location":"internet/#history","title":"History","text":"
          • http://textfiles.com/underconstruction/: Archive of \"under construction\" banners
          "},{"location":"internet/#health","title":"Health","text":"
          • https://map.internetintel.oracle.com/
          • http://www.internettrafficreport.com/
          • https://outage.report/
          • https://www.slac.stanford.edu/comp/net/wan-mon/netmon.html: \"Interesting web sites for Internet Monitoring\"
          "},{"location":"interview/","title":"Interviews","text":"
          • https://sockpuppet.org/blog/2015/03/06/the-hiring-post/
          "},{"location":"iotop/","title":"iotop","text":"

          \"A top utility for IO\" - https://github.com/Tomas-M/iotop

          iotop tracks disk I/O by process, and prints a summary report that is refreshed every interval.

          "},{"location":"iotop/#linux-examples","title":"Linux Examples","text":""},{"location":"iotop/#show-cumulative-stats-for-processes-actually-using-io","title":"Show cumulative stats for processes actually using IO","text":"

          iotop -oa

          "},{"location":"iotop/#see-also","title":"See also","text":"
          • Top variant list
          "},{"location":"ip/","title":"ip","text":"

          This is about the ip command in Linux.

          "},{"location":"ip/#examples","title":"Examples","text":"

          The commands here can be shortened like cisco or other network device commands.

          "},{"location":"ip/#get-the-default-interface","title":"Get the default interface","text":"
          ip route get 8.8.8.8\n
          "},{"location":"ip/#show-ip-neighbors","title":"Show IP neighbors","text":"
          ip neighbor show\n
          "},{"location":"ip/#show-all-configured-ip-addresses","title":"Show all configured IP addresses","text":"

          This is more reliable than ifconfig, which sometimes omits entries.

          ip -f inet addr\n
          "},{"location":"ip/#show-information-about-eth0","title":"Show information about eth0","text":"

          This shows information about eth0 and all the virtual interfaces brought up on the physical interface.

          ip addr show eth0\n
          "},{"location":"ip/#monitor-ip-changes","title":"Monitor IP changes","text":"
          ip mon all\n
          "},{"location":"ip/#show-interfaces-that-would-route-to-a-given-network-address","title":"Show interfaces that would route to a given network address","text":"
          ip addr show to 10.1.8.0/24\n
          "},{"location":"ip/#show-negotiated-speeds-for-all-interfaces","title":"Show negotiated speeds for all interfaces","text":"
          ip -o link show | awk -F: '{print $2}' | while read -r X ; do\n  sudo ethtool \"${X}\" 2>/dev/null |\n  grep -E 'Settings|Speed' |\n  xargs echo\ndone |\nawk '{print $3, $5}' |\ncolumn -t -s:\n
          "},{"location":"ip/#add-a-static-route","title":"Add a static route","text":"
          ip route add 192.168.100.0/24 via 0.0.0.0 dev eth0\n
          "},{"location":"ip/#set-mtu-for-a-specific-route","title":"Set MTU for a specific route","text":"
          • http://lartc.org/howto/lartc.cookbook.mtu-discovery.html
          ip route add default via 10.0.0.1 mtu 296\n
          "},{"location":"ip/#show-multicast-addresses","title":"Show multicast addresses","text":"
          ip maddr show\n
          "},{"location":"iperf/","title":"iperf","text":"

          \"iperf3: A TCP, UDP, and SCTP network bandwidth measurement tool\" - https://github.com/esnet/iperf

          "},{"location":"iperf/#links","title":"Links","text":"
          • Sample usage - https://fasterdata.es.net/performance-testing/network-troubleshooting-tools/iperf/
          "},{"location":"iperf/#example-usage","title":"Example usage","text":""},{"location":"iperf/#example-server-receiving-side","title":"Example server (receiving side)","text":"
          iperf3 -s\n
          "},{"location":"iperf/#example-client-sending-side","title":"Example client (sending side)","text":"
          iperf3 -c remote-hostname --interval 0.5 --omit 2 --time 30 --set-mss 1460\n
          "},{"location":"ipmi/","title":"IPMI","text":"

          The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system's CPU, firmware (BIOS or UEFI) and operating system.

          "},{"location":"ipmi/#managing-servers-with-ipmi","title":"Managing servers with IPMI","text":""},{"location":"ipmi/#default-users","title":"Default Users","text":"

          The default users are 'Administrator' for HPs, 'root' for Dells, and 'ADMIN' for Silicon Mechanics.

          "},{"location":"ipmi/#server-setup","title":"Server Setup","text":"

          IPMI uses COM2 aka ttyS1 for the serial port on Dells and HPs, COM3 aka ttyS2 on Silicon Mechanics.

          "},{"location":"ipmi/#common-remote-commands","title":"Common Remote Commands","text":""},{"location":"ipmi/#see-if-a-server-is-on","title":"See if a server is on","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power status\n
          "},{"location":"ipmi/#turn-a-server-on","title":"Turn a server on","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power on\n
          "},{"location":"ipmi/#turn-a-server-off","title":"Turn a server off","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power off\n
          "},{"location":"ipmi/#tell-a-server-to-pxeboot","title":"Tell a server to PXEBoot","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power off\nipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis bootdev pxe\nipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP chassis power on\n
          "},{"location":"ipmi/#connect-to-the-serial-console","title":"Connect to the serial console","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sol activate\n
          "},{"location":"ipmi/#display-the-system-event-log","title":"Display the system event log","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sel list\n
          "},{"location":"ipmi/#clear-the-system-event-log","title":"Clear the system event log","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sel clear\n
          "},{"location":"ipmi/#display-sensor-information","title":"Display sensor information","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sdr list\n
          "},{"location":"ipmi/#disconnect-another-serial-console-session","title":"Disconnect another serial console session","text":"
          ipmitool -I lanplus -U $USER -P $PASSWORD -H $IPMI_IP sol deactivate\n
          "},{"location":"ipmi/#show-bootdev-help","title":"Show bootdev help","text":"
          ipmitool -H 10.5.8.30 -U ADMIN -P ADMIN chassis bootdev none options=help\n
          "},{"location":"ipmi/#common-local-commands","title":"Common Local Commands","text":"

          These commands require root access in most environments.

          "},{"location":"ipmi/#view-all-configured-lan-parameters","title":"View all configured LAN parameters","text":"
          ipmitool lan print\n

          You can view individual \"channels\" which are logical interfaces by giving the number:

          ipmitool lan print 1\n
          "},{"location":"ipmi/#configure-ipmi-static-ip-information","title":"Configure IPMI static IP information","text":"
          IPMI_IP_ADDR=192.168.1.250\nIPMI_IP_NETMASK=255.255.255.0\nIPMI_IP_GW_IP_ADDR=192.168.1.1\nIPMI_IP_GW_MAC_ADDR=00:01:02:aa:bb:cc\nipmitool lan set 1 ipsrc static\nipmitool lan set 1 ipaddr \"${IPMI_IP_ADDR}\"\nipmitool lan set 1 netmask \"${IPMI_IP_NETMASK}\"\nipmitool lan set 1 defgw ipaddr \"${IPMI_IP_GW_IP_ADDR}\"\nipmitool lan set 1 defgw macaddr \"${IPMI_IP_GW_MAC_ADDR}\"\nipmitool lan set 1 arp respond on\n
          "},{"location":"ipmi/#configure-ipmi-admin-user","title":"Configure IPMI admin user","text":"
          USERNAME=admin\nPASSWORD=hunter2\nipmitool user set name 2 \"$USERNAME\"\nipmitool user set password 2 \"$PASSWORD\"\n
          "},{"location":"ipmi/#reset-the-bmc","title":"Reset the BMC","text":"

          If a host loses it's IPMI (iLO, etc.) IP connectivity, issue this command from the host itself

          ipmitool mc reset cold\n
          "},{"location":"ipmi/#how-to-fix-devipmi-errors","title":"How to fix /dev/ipmi errors","text":"

          For errors like Could not open device at /dev/ipmi0 or /dev/ipmi/0 or /dev/ipmidev/0:

          modprobe ipmi_msghandler\nmodprobe ipmi_devintf\nmodprobe ipmi_si\n
          "},{"location":"ipmi/#reset-the-admin-password-to-admin-on-a-supermicro-bmc","title":"Reset the ADMIN password to ADMIN on a Supermicro BMC","text":"
          sudo ipmitool -I open user set password 2 ADMIN\n
          "},{"location":"ipmi/#reset-all-settings-to-defaults","title":"Reset all settings to defaults","text":"

          This may not work with your BMC, but has been known to work with some supermicro BMCs.

          ipmitool raw 0x30 0x40\n
          "},{"location":"ipmi/#conigure-sol-on-a-systemd-server","title":"Conigure sol on a systemd server","text":"

          A better way to do this is via Grub.

          /lib/systemd/system/ttyS1.service should look something like:

          [Unit]\nDescription=Serial Console Service: ttyS1\n\n[Service]\nExecStart=/sbin/getty -L 115200 ttyS1 vt102\nRestart=always\n\n[Install]\nWantedBy=system.target\n

          Then run:

          systemctl enable ttyS1.service\nsystemctl start ttyS1.service\n
          "},{"location":"ipmi/#see-also","title":"See Also","text":"
          • http://www.intel.com/design/servers/ipmi/
          "},{"location":"iptables/","title":"iptables","text":"

          iptables is the built-in linux firewall.

          "},{"location":"iptables/#examples","title":"Examples","text":"
          • How to simulate a slow network link: http://blogs.kde.org/node/1878
          "},{"location":"iptables/#allow-mysql","title":"Allow MySQL","text":"
          iptables -A INPUT -i eth0 -p tcp -m tcp --dport 3306 -j ACCEPT\n
          "},{"location":"iptables/#ssh-blocking","title":"SSH blocking","text":"

          Better idea: fail2ban

          Basically, it lets people connect with SSH 5 times within a minute, but with a mandatory 5 second wait before connection attempts. Once they hit 5 attempts in a minute they get banned for an hour. Several IP ranges are exceptions where access is always allowed.

          iptables -I INPUT 1 -s 172.16.0.0/16 -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -I INPUT 2 -s 17.1.2.0/27   -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -I INPUT 3 -s 18.3.4.0/27   -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -I INPUT 4 -s 19.5.6.0/24   -m state --state NEW -p tcp -m tcp --dport 22 -j ACCEPT\niptables -N SSH\niptables -N SSH_ABL\niptables -A SSH -m recent --name SSH_ABL --update --seconds 3600 -j REJECT\niptables -A SSH -m recent --name SSH --rcheck --seconds 60 --hitcount 5 -j SSH_ABL\niptables -A SSH_ABL -m recent --name SSH_ABL --set -j LOG --log-level warn --log-prefix \"ABL: +SSH: \"\niptables -A SSH_ABL -j REJECT\niptables -A SSH -m recent --name SSH --rcheck --seconds 5 -j LOG --log-level warn --log-prefix \"RATE: \"\niptables -A SSH -m recent --name SSH --update --seconds 5 -j REJECT\niptables -A SSH -m recent --name SSH_ABL --remove -j LOG --log-level warn --log-prefix \"ABL: -SSH: \"\niptables -A SSH -m recent --name SSH --set -j ACCEPT\niptables -A INPUT -m state --state NEW -p tcp -m tcp --dport 22 -j SSH\niptables -L\n
          "},{"location":"iptables/#show-all-tables","title":"Show all tables","text":"

          Not all tables are shown by default. To view all, issue the following commands as root:

          iptables -vL -t filter\niptables -vL -t nat\niptables -vL -t mangle\niptables -vL -t raw\niptables -vL -t security\n
          "},{"location":"irc/","title":"irc","text":"

          Internet Relay Chat

          "},{"location":"irc/#clients","title":"Clients","text":"
          • Colloquy - Mac GUI client
          • irssi - CLI client
          "},{"location":"irc/#chanserv","title":"ChanServ","text":"

          \"ChanServ allows you to register and control various aspects of channels.\" - http://www.geekshed.net/commands/chanserv

          "},{"location":"irc/#register-a-room","title":"Register a room","text":"

          /msg ChanServ REGISTER #git-cfe

          "},{"location":"irssi/","title":"irssi","text":"

          TUI irc client.

          "},{"location":"irssi/#how-to","title":"How To","text":""},{"location":"irssi/#add-an-auto-connect-ssl-server-that-requires-a-password","title":"Add an auto-connect SSL server that requires a password","text":"

          /server add -auto -ssl servername port user:pass network

          "},{"location":"irssi/#add-an-auto-connect-channel","title":"Add an auto-connect channel","text":"

          /channel add -auto #channelname network

          "},{"location":"irssi/#links","title":"Links","text":"
          • http://irssi.org/help
          • http://quadpoint.org/articles/irssi: A Guide to Efficiently Using Irssi and Screen
          • https://archive.li/TEp7p: Irssi \u2014 How to use it in a comfortable way?
          "},{"location":"jargon/","title":"jargon","text":"
          • AMoD: Autonomous Mobility on Demand
          • bikeshedding: The term was coined as a metaphor to illuminate Parkinson's Law of Triviality. Parkinson observed that a committee whose job is to approve plans for a nuclear power plant may spend the majority of its time on relatively unimportant but easy-to-grasp issues, such as what materials to use for the staff bikeshed, while neglecting the design of the power plant itself, which is far more important but also far more difficult to criticize constructively.
          • cargo cult: software containing elements that are included because of successful utilization elsewhere, unnecessary for the task at hand.
          • Conway's Law: \"organizations which design systems ... are constrained to produce designs which are copies of the communication structures of these organizations.\"
          • Cunningham's law: The best way to get the right answer on the Internet is not to ask a question, it's to post the wrong answer.
          • cybernetics: \"Cybernetics\" comes from a Greek word meaning \"the art of steering\". Cybernetics is about having a goal and taking action to achieve that goal.
          • [deterministic]: In mathematics and physics, a deterministic system is a system in which no randomness is involved in the development of future states of the system. A deterministic model will thus always produce the same output from a given starting condition or initial state.
          • DRY: Don't Repeat Yourself. \"Every piece of knowledge must have a single, unambiguous, authoritative representation within a system\"
          • idempotent: Idempotence is the property of certain operations in mathematics and computer science, that they can be applied multiple times without changing the result beyond the initial application.
          • KISS: Keep It Simple, Stupid!
          • martian packet: A Martian packet is an IP packet which specifies a source or destination address that is reserved for special-use by Internet Assigned Numbers Authority.
          • monotonic: A function or set of values that always increases or always decreases.
          • PRD: product requirements document
          • teleology: Teleology or finality is a reason or explanation for something in function of its end, purpose, or goal.
          • transitive dependency: a functional dependency which holds by virtue of transitivity among various software components. (EG: a dependency of a dependency)
          • warrant canary: Text on a website that states the company or person has never been served with a secret government subpoena. Once the statement is removed, the users can assume the company or person has been served and has been told not to talk about it.
          • YAGNI: a principle that states a programmer should not add functionality until deemed necessary.
          • yak shaving: Any apparently useless activity which, by allowing you to overcome intermediate difficulties, allows you to solve a larger problem.
          "},{"location":"javascript/","title":"JavaScript","text":"

          JavaScript is a scripting language that is ubiquitous in web browsers, and is found in may other places from back-end servers to microcontrollers to lego robots.

          "},{"location":"javascript/#links","title":"Links","text":"
          • https://eloquentjavascript.net: a book about JavaScript, programming, and the wonders of the digital.
          • https://github.com/getify/You-Dont-Know-JS: a series of books diving deep into the core mechanisms of the JavaScript language.
          • https://webpack.js.org: a static module bundler for modern JavaScript applications
          • https://gruntjs.com: the JavaScript task runner
          • https://www.espruino.com: JavaScript for microcontrollers
          • https://developer.mozilla.org/en-US/docs/Learn/JavaScript
          • https://developer.mozilla.org/en-US/docs/Learn/Common_questions/Tools_and_setup/What_are_browser_developer_tools
          • https://eloquentjavascript.net
          • https://jestjs.io: \"Jest is a delightful JavaScript Testing Framework with a focus on simplicity.\"
          "},{"location":"jdupes/","title":"jdupes","text":"

          \"finds and performs actions upon duplicate files\" - man jdupes

          jdupes is based on fdupes and is not written in java as the name may lead you to assume.

          "},{"location":"jdupes/#examples","title":"Examples","text":""},{"location":"jdupes/#recursively-find-all-duplicates-in-a-dir","title":"Recursively find all duplicates in a dir","text":"
          jdupes -r /path/to/dir\n
          "},{"location":"jdupes/#create-hard-links-of-any-duplicate-files","title":"Create hard-links of any duplicate files","text":"
          jdupes -r -L /path/\n
          "},{"location":"jdupes/#delete-all-but-one-duplicate","title":"Delete all but one duplicate","text":"

          Although the flags do not indicate this, the following command keeps the first file (see man page for details), and prints a summary of what was kept and what was deleted.

          jdupes -r --delete --noprompt /path/\n
          "},{"location":"jdupes/#links","title":"Links","text":"
          • https://github.com/jbruchon/jdupes
          "},{"location":"jmespath/","title":"jmespath","text":"

          \"JMESPath is a query language for JSON.\" - http://jmespath.org

          "},{"location":"jmespath/#links","title":"Links","text":"
          • http://jmespath.org/examples.html
          • http://jmespath.org/tutorial.html
          • https://github.com/jmespath/jmespath.terminal
          • https://github.com/jmespath/jp
          "},{"location":"jmespath/#quotes-matter","title":"Quotes matter","text":"

          Something that is completely absent from the jmespath tutorial is that quotes matter. You must quote value strings with single quotes or back-ticks, which means you have to quote the python function args with double quotes, use back-ticks, or escape your single quotes:

          >>> print(l)\n[{\"name\": \"foo-name\", \"age\": \"foo-age\"}, {\"name\": \"bar-name\", \"age\": \"bar-age\"}]\n>>> print(yaml.dump(l))\n- age: foo-age\n  name: foo-name\n- age: bar-age\n  name: bar-name\n\n>>> jmespath.search(\"[?name == 'bar-name']\", l)\n[{'name': 'bar-name', 'age': 'bar-age'}]\n>>> jmespath.search('[?name == \"bar-name\"]', l)\n[]\n>>> jmespath.search('[?name == \\'bar-name\\']', l)\n[{'name': 'bar-name', 'age': 'bar-age'}]\n>>> jmespath.search(\"[?name == `bar-name`]\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n

          However, in jmespath you also must double-quote keys (variable names) that contain dots. (Double-quotes are optional for keys that do not contain dots.) This becomes a bit burdensome to keep track of, and also failure-prone:

          >>> l = [{\"name\": \"foo-name\", \"the.age\": \"foo-the.age\"}, {\"name\": \"bar-name\", \"the.age\": \"bar-the.age\"}]\n>>> l[1]['the.age']\n'bar-the.age'\n>>> jmespath.search(\"[?'the.age' == 'bar-the.age']\", l)\n[]\n>>> jmespath.search('[?\"the.age\" == \"bar-the.age\"]', l)\n[]\n>>> jmespath.search('[?\"the.age\" == \\'bar-the.age\\']', l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n>>> jmespath.search('[?\"the.age\" == `bar-the.age`]', l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n>>> jmespath.search(\"[?\\\"the.age\\\" == 'bar-the.age']\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n

          Triple quotes can help here by avoiding the need to escape both single-quotes and double-quotes:

          >>> jmespath.search(\"\"\"[?'the.age' == 'bar-the.age']\"\"\", l)\n[]\n>>> jmespath.search(\"\"\"[?\"the.age\" == \"bar-the.age\"]\"\"\", l)\n[]\n>>> jmespath.search(\"\"\"[?\"the.age\" == 'bar-the.age']\"\"\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n>>> jmespath.search(\"\"\"[?\"the.age\" == `bar-the.age`]\"\"\", l)\n[{'name': 'bar-name', 'the.age': 'bar-the.age'}]\n

          The TL;DR is to only double-quote variable names, and single-quote or back-tick-quote anything that is a string literal. This requirement is inconsistent with normal python comparisons. The behavior is strict and unintuitive to people unfamiliar with jmespath. The fact that the failures are silent is quite problematic and leads to low confidence that jmespath is behaving as expected. jmespath would do well to have a linter to warn about this behavior, and at least mention the different types of quotes and their behaviors in the tutorial. (FWIW, these details are buried in the jmespath spec, including the nuanced differences between single-quotes and back-ticks.)

          "},{"location":"jmespath/#examples","title":"Examples","text":""},{"location":"jmespath/#grab-some-kubernetes-fields-and-remap-them-to-be-less-deep","title":"Grab some kubernetes fields and remap them to be less deep","text":"
          kubectl get po --all-namespaces -o json |\njp \"items[*].{name: metadata.name, namespace: metadata.namespace, imagePullSecrets: spec.imagePullSecrets[*].name}\"\n

          Or filter only to non-default namespace where imagePullSecrets is populated

          kubectl get po --all-namespaces -o json |\njp \"items[?metadata.namespace != 'default' && spec.imagePullSecrets != null].{name: metadata.name, namespace: metadata.namespace, imagePullSecrets: spec.imagePullSecrets[*].name}\"\n
          "},{"location":"join/","title":"join","text":"

          \"The join utility performs an 'equality join' on the specified files and writes the result to the standard output.\" - man join

          join is a unix tool that is similar to a sql join, combining two files by joining on a column. The macOS man page has more example than the GNU man page, but as usual the syntaxes aren't exactly the same.

          "},{"location":"join/#examples","title":"Examples","text":"

          The following example use the following dates.txt file as file 1. Notice it is missing data between December 25 and December 31 (inclusive).

          2022-12-21 9\n2022-12-22 2\n2022-12-23 1\n2022-12-24 5\n2023-01-01 6\n2023-01-02 6\n2023-01-03 2\n

          We also use a loop that produces dates that contain the dates missing from dates.txt but do not go back as far in time:

          $ for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done\n2022-12-24 offset=10\n2022-12-25 offset=9\n2022-12-26 offset=8\n2022-12-27 offset=7\n2022-12-28 offset=6\n2022-12-29 offset=5\n2022-12-30 offset=4\n2022-12-31 offset=3\n2023-01-01 offset=2\n2023-01-02 offset=1\n2023-01-03 offset=0\n
          "},{"location":"join/#show-only-lines-with-common-columns","title":"Show only lines with common columns","text":"
          $ join dates.txt <(for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F $OFFSET\" ; done)\n2022-12-24 5 10\n2023-01-01 6 2\n2023-01-02 6 1\n2023-01-03 2 0\n
          "},{"location":"join/#show-all-lines-as-long-as-data-is-present-in-file-1","title":"Show all lines, as long as data is present in file 1","text":"
          $ join -a 1 dates.txt <(for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n2022-12-21 9\n2022-12-22 2\n2022-12-23 1\n2022-12-24 5 offset=10\n2023-01-01 6 offset=2\n2023-01-02 6 offset=1\n2023-01-03 2 offset=0\n
          "},{"location":"join/#show-all-lines-as-long-as-data-is-present-in-file-2","title":"Show all lines, as long as data is present in file 2","text":"
          $ join -a 2 dates.txt <(for OFFSET in {10..0} ; do date -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n2022-12-24 5 offset=10\n2022-12-25 offset=9\n2022-12-26 offset=8\n2022-12-27 offset=7\n2022-12-28 offset=6\n2022-12-29 offset=5\n2022-12-30 offset=4\n2022-12-31 offset=3\n2023-01-01 6 offset=2\n2023-01-02 6 offset=1\n2023-01-03 2 offset=0\n
          "},{"location":"join/#only-show-certain-columns-in-the-output","title":"Only show certain columns in the output","text":"

          We can specify which columns we want to see in the output, which includes the ability to join on a column that is not shown the output:

          $ join -o 1.2,2.2 dates.txt <(for OFFSET in {10..0} ; do gdate -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n5 offset=10\n6 offset=2\n6 offset=1\n2 offset=0\n
          "},{"location":"join/#fill-in-missing-data-an-arbitrary-string","title":"Fill in missing data an arbitrary string","text":"

          We use -e Null to indicate we want to fill in missing values with Null. In order for -e to work right, e have to specify the output columns with -o. We also have to specify -a 2 to indicate we want to see all lines from file 2. Because we are showing all lines in file 2, we use -o 2.1 instead of -o 1.1 so that the date column is not populated with Null values.

          $ join -e Null -o 2.1,1.2,2.2 -a 2 dates.txt <(for OFFSET in {10..0} ; do gdate -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done)\n2022-12-24 5 offset=10\n2022-12-25 Null offset=9\n2022-12-26 Null offset=8\n2022-12-27 Null offset=7\n2022-12-28 Null offset=6\n2022-12-29 Null offset=5\n2022-12-30 Null offset=4\n2022-12-31 Null offset=3\n2023-01-01 6 offset=2\n2023-01-02 6 offset=1\n2023-01-03 2 offset=0\n

          A practical example of the above is making sure you have an entire sequence filled in, for example when graphing a set where no entry is created for days that have no data points. This ensures we're seeing a complete set, not just the days that have data:

          $ join -e 0 -o 2.1,1.2 -a 2 dates.txt <(for OFFSET in {13..0} ; do gdate -d \"-$OFFSET days\" \"+%F offset=$OFFSET\" ; done) | termgraph --format='{:.0f}'\n\n2022-12-21: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 9\n2022-12-22: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 2\n2022-12-23: \u2587\u2587\u2587\u2587\u2587 1\n2022-12-24: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 5\n2022-12-25:  0\n2022-12-26:  0\n2022-12-27:  0\n2022-12-28:  0\n2022-12-29:  0\n2022-12-30:  0\n2022-12-31:  0\n2023-01-01: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 6\n2023-01-02: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 6\n2023-01-03: \u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587\u2587 2\n
          "},{"location":"jot/","title":"jot","text":"

          jot is a BSD CLI tool to generate sequences or random data, usually numbers.

          "},{"location":"jot/#tricks","title":"Tricks","text":""},{"location":"jot/#generate-37-evenly-space-floating-point-numbers-with-two-significant-digits-between-10-and-10","title":"Generate 37 evenly space floating point numbers (with two significant digits) between -10 and +10","text":"
          jot 37 -10 10.00\n
          "},{"location":"jot/#generate-two-12-alphanumeric-random-character-passwords-with-rs","title":"Generate two 12 alphanumeric random character passwords with rs","text":"
          jot -rc 24 48 123 | rs -g 0 12\n
          "},{"location":"jot/#generate-5-capital-alphanumeric-strings","title":"Generate 5 capital alphanumeric strings","text":"
          jot -rc 500 48 90 | grep '[A-Z0-9]' | rs -g 5 32\n
          "},{"location":"jpeginfo/","title":"jpeginfo","text":"

          \"jpeginfo - prints information and tests integrity of JPEG/JFIF files.\" - man jpeginfo

          "},{"location":"jpeginfo/#example","title":"Example","text":""},{"location":"jpeginfo/#test-integrity-of-all-files-in-a-dir","title":"Test integrity of all files in a dir","text":"
          jpeginfo -c *\n
          "},{"location":"jpeginfo/#test-integrity-of-files-and-delete-any-corrupt-files","title":"Test integrity of files and delete any corrupt files","text":"
          jpeginfo -c -d *\n
          "},{"location":"jpeginfo/#see-also","title":"See Also","text":"
          • exiftool
          • graphicsmagick
          • imagemagick
          • sips
          "},{"location":"jq/","title":"jq","text":"

          \"jq is a lightweight and flexible command-line JSON processor.\" - https://stedolan.github.io/jq/

          "},{"location":"jq/#examples","title":"Examples","text":""},{"location":"jq/#sort-a-json-file","title":"Sort a json file","text":"
          jq -S . foo.json\n
          "},{"location":"jq/#select-key-name-with-dots","title":"Select key name with dots","text":"

          The syntax .foo.bar is the same as .[\"foo\"][\"bar\"], so to select keys that have dots, you would do .annotations[\"deployment.kubernetes.io/revision\"]

          "},{"location":"jq/#grab-first-element-of-an-array-and-print-the-value-of-timestamp-of-that-element","title":"Grab first element of an array, and print the value of 'timestamp' of that element.","text":"
          echo '\n[\n  {\n    \"foo\": \"this is foo string\",\n    \"timestamp\": \"this is the timestamp\"\n  },\n  {\n    \"second element\": \"second element value\"\n  }\n]' | jq '.[0].timestamp'\n
          "},{"location":"jq/#construct-flickr-urls-from-an-api-call","title":"Construct Flickr URLs from an API call","text":"
          curl -s \"https://api.flickr.com/services/rest/?\"\\\n\"&api_key=0123456789abcdef\"\\\n\"&format=json\"\\\n\"&method=flickr.photos.getRecent\"\\\n\"&nojsoncallback=1\" |\n  jq -S '\n    .photos.photo[] |\n    \"https://flickr.com/photos/\" + .owner + \"/\" + .id\n  '\n
          "},{"location":"jq/#use-mco-to-find-packages-of-a-certain-version-on-a-certain-os","title":"Use mco to find packages of a certain version on a certain OS","text":"

          This example could be used as an alternative to grep, where only the value of a key/value pair is matched.

          mco rpc package status package=apt -j -F lsbdistcodename=trusty |\n  jq -c '\n    .[] |\n    select(.data.ensure == \"1.0.1ubuntu2\") | {\n      version: .data.ensure, hostname: .sender\n    }\n  '\n
          "},{"location":"jq/#print-only-objects-whose-name-matches-a-string","title":"Print only objects whose name matches a string","text":"

          This example echoes some yaml, uses python to convert it to json, then filters matching data using jq. It could be used as an alternative to grep, where only the key of a key/value pair is matched.

          echo \"\ndata:\n  - This is a string, not an object, and contains the substrings foo and bar\n  - name: foo_name\n    value: foo_value\n  - name: bar_name\n    value: bar_value\" |\npython -c \"import yaml, sys, json; print(json.dumps(yaml.safe_load(sys.stdin)))\" |\njq '\n  .data[] |\n  select(type==\"object\") |\n  select (.name | . and contains(\"bar_n\"))\n'\n
          "},{"location":"jq/#build-a-json-entry-from-scratch","title":"Build a json entry from scratch","text":"

          This uses bash paramber expansion and subshell syntax, and may not work in other shells.

          create_json() {\n  local user=${1:-${USER}}\n  local host=${2:-${HOSTNAME}}\n  local more_stuff=${3:-$(uname -a)}\n  json=$(\n    jq -c -n \\\n      --arg timestamp  \"$(date \"+%F %T%z\")\" \\\n      --arg host       \"${host}\" \\\n      --arg user       \"${user}\" \\\n      --arg more_stuff \"${more_stuff}\" \\\n      '{\n        timestamp:  $timestamp,\n        host:       $host,\n        user:       $user,\n        more_stuff: $more_stuff\n      }'\n  )\n  echo \"$json\"\n}\n
          "},{"location":"jq/#render-yaml-with-anchors-as-json-data","title":"Render yaml with anchors as json data","text":"

          This example shows how you can use python and jq to view the result of dereferenced yaml anchors, a construct that is not supported by json. This example is less about how to use jq syntaxes, and more about how it can be used to view data that is otherwise difficult to sort through.

          echo \"\njob1: &template\n  directory: /tmp\n  extra_parameters: nosuid,noatime\n  remote_host: 10.1.1.1\n  user: nobody\njob2:\n  <<: *template\n  remote_host: 10.2.2.2\njob3:\n  <<: *template\n  remote_host: 10.3.3.3\n\" |\npython -c \"import yaml, sys, json; print(json.dumps(yaml.safe_load(sys.stdin)))\" |\njq -S .\n
          "},{"location":"jq/#select-matches-and-print-a-subset-of-values","title":"Select matches, and print a subset of values","text":"
          jq '.[] | select(.data.ensure != \"purged\") | [.sender,.data.ensure]' $*\n
          "},{"location":"jq/#output-bare-values-for-use-as-inputs","title":"Output bare values for use as inputs","text":"

          This is a contrived example, the better way to get this info would be awless list instances --format tsv --columns name,privateip,launched

          $ awless list instances --format json | jq -r '.[] | \"\\(.Name) \\(.PrivateIP) \\(.Launched)\"' | column -t\nsalt-master       172.18.9.48   2015-04-10T21:28:03Z\nconsul-server-01  172.18.9.116  2015-05-15T06:13:19Z\nconsul-server-02  172.18.9.117  2015-05-15T06:13:19Z\nconsul-server-03  172.18.9.118  2015-05-15T06:13:19Z\n
          "},{"location":"jq/#show-labels-for-each-locally-stored-docker-sha","title":"Show labels for each locally stored docker SHA","text":"
          docker images --format '{{.ID}}' |\nwhile read -r X ; do\n  docker inspect $X |\n  jq '.[] | [ .RepoTags, .Config.Labels ]'\ndone\n
          "},{"location":"jq/#sort-all-json-contents","title":"Sort all JSON contents","text":"

          Be aware that sometimes JSON should not be sorted, as arrays are sometimes expected to maintain their order.

          jq -S '. | walk( if type == \"array\" then sort else . end )'\n

          Or set up a shell alias

          alias jqsort=\"jq -S '. | walk( if type == \\\"array\\\" then sort else . end )'\"\n
          "},{"location":"jq/#store-a-value-as-a-variable","title":"Store a value as a variable","text":"

          When creating long pipelines, it's useful to be able to store a deep value as a variable. In the following example we store .metadata.namespace as $namespace and .metadata.name as $podname before digging into .status where we would no longer have access to .metadata:

          kubectl get pod -A -l k8s-app=kube-dns -o=json |\njq -r '\n  .items[] |\n  .metadata.namespace as $namespace |\n  .metadata.name as $podname |\n  .status.containerStatuses[] |\n  \"\\($namespace) \\($podname) \\(.name) \\(.restartCount)\"\n' |\ncolumn -t\n

          The output of this command is something like:

          kube-system  kube-dns-66f64447b8-7tzkn  dnsmasq           5\nkube-system  kube-dns-66f64447b8-7tzkn  kubedns           0\nkube-system  kube-dns-66f64447b8-7tzkn  prometheus-to-sd  0\nkube-system  kube-dns-66f64447b8-7tzkn  sidecar           0\nkube-system  kube-dns-66f64447b8-b2jsf  dnsmasq           3\nkube-system  kube-dns-66f64447b8-b2jsf  kubedns           0\nkube-system  kube-dns-66f64447b8-b2jsf  prometheus-to-sd  0\nkube-system  kube-dns-66f64447b8-b2jsf  sidecar           0\n
          "},{"location":"jq/#default-value-for-missing-keys","title":"Default value for missing keys","text":"
          $ echo '{\"foo\": 1, \"bar\": 2}' | jq '[.foo // \"missing\", .bar // \"missing\", .baz // \"missing\"]'\n[\n  1,\n  2,\n  \"missing\"\n]\n
          "},{"location":"jq/#craft-json-data-for-use-with-curl","title":"Craft json data for use with curl","text":"

          When using curl --data, in-line JSON can get out of hand real quick. To avoid confusing syntaxes, use jq to create a temp file, then reference that file in curl:

          JSON_DATA=$(mktemp)\njq -n '{service_account: env.SERVICE_ACCOUNT_ID, secret_key: env.SERVICE_ACCOUNT_KEY}' > \"$JSON_DATA\"\n

          The above commands create a temporary file with the correct and valid JSON:

          $ cat \"$JSON_DATA\"\n{\n  \"service_account\": \"whoever@serviceaccount.example.com\",\n  \"secret_key\": \"abc123\"\n}\n

          Then reference that in curl:

          $ curl -s --request POST --header \"content-type: application/json\" --data \"@${JSON_DATA}\" https://httpbin.org/anything\n{\n  \"args\": {},\n  \"data\": \"{  \\\"service_account\\\": \\\"whoever@serviceaccount.example.com\\\",  \\\"secret_key\\\": \\\"abc123\\\"}\",\n  \"files\": {},\n  \"form\": {},\n  \"headers\": {\n    \"Accept\": \"*/*\",\n    \"Content-Length\": \"84\",\n...\n
          "},{"location":"jq/#get-the-most-recently-dated-blob","title":"Get the most recently dated blob","text":"
          curl -fssL https://storage.googleapis.com/updates.astronomer.io/astronomer-certified |\njq '.available_releases | sort_by(.release_date)[-1]'\n
          "},{"location":"jq/#see-also","title":"See Also","text":"
          • Tutorial
          "},{"location":"json/","title":"JSON","text":"

          \"JSON (JavaScript Object Notation) is a lightweight data-interchange format.\" - https://www.json.org/

          "},{"location":"json/#see-also","title":"See also","text":"
          • toml: \"TOML aims to be a minimal configuration file format that's easy to read due to obvious semantics.\"
          • yaml: Better human readability, more options.
          "},{"location":"json/#is-yaml-a-superset-of-json","title":"Is YAML a superset of JSON?","text":"

          Many people say that JSON is a subset of YAML, but that is not strictly true. See https://metacpan.org/pod/JSON::XS#JSON-and-YAML

          Here's an example of json that does not work as yaml:

          $ sed 's/\\t/--->/g' break-yaml.json\n--->{\n--->--->\"list\": [\n--->--->--->{},\n--->--->--->{}\n--->--->]\n--->}\n$ jq -c . break-yaml.json\n{\"list\":[{},{}]}\n$ json-to-yaml.py break-yaml.json\nlist:\n- {}\n- {}\n$ yaml-to-json.py break-yaml.json\nERROR: break-yaml.json could not be parsed\nwhile scanning for the next token\nfound character '\\t' that cannot start any token\nin \"break-yaml.json\", line 1, column 1\n$ sed 's/\\t/    /g' break-yaml.json | yaml-to-json.py\n{\"list\": [{}, {}]}\n
          "},{"location":"json/#links","title":"Links","text":"
          • https://goessner.net/articles/JsonPath/index.html: JSONpath is used by kubernetes as a native way to restructure kubctl output.
          • https://stedolan.github.io/jq/: jq is generally useful for working with JSON in a shell.
          • http://jmespath.org/: jmespath is used in AWS APIs to restructure data.
          • https://json5.org/: Now with trailing comma and comment support!
          • https://github.com/antonmedv/fx: TUI with clickable expansion and search features, written in golang.
          • https://github.com/TomWright/dasel: Universal serialized data tool, supports json, yaml, csv, and more.
          • https://github.com/josephburnett/jd: \"jd is a commandline utility and Go library for diffing and patching JSON and YAML values. It supports a native jd format (similar to unified format) as well as JSON Merge Patch (RFC 7386) and a subset of JSON Patch (RFC 6902).\"
          • https://www.jsonfeed.org: \"The JSON Feed format is a pragmatic syndication format, like RSS and Atom, but with one big difference: it\u2019s JSON instead of XML.\"
          "},{"location":"jsonnet/","title":"jsonnet","text":"

          \"A data templating language for app and tool developers\" - https://jsonnet.org

          "},{"location":"jsonpath/","title":"JSONPath","text":"

          \"XPath for JSON\" - https://goessner.net/articles/JsonPath/index.html

          "},{"location":"jsonpath/#examples","title":"Examples","text":"

          There are other examples in kubernetes

          "},{"location":"jsonpath/#show-the-api-server-of-your-current-kubernetes-context","title":"Show the API server of your current kubernetes context","text":"
          kubectl config view --minify -o=jsonpath='{.clusters[0].cluster.server}'\n
          "},{"location":"jsonpath/#links","title":"Links","text":"
          • http://jsonpath.com/ - online expression evaluator
          • https://kubernetes.io/docs/reference/kubectl/jsonpath/
          • https://extendsclass.com/jsonpath-tester.html
          "},{"location":"juniper/","title":"Juniper","text":"

          \"We bring simplicity to networking with products, solutions, and services that connect the world.\" - https://www.juniper.net/us/en/company/

          "},{"location":"juniper/#examples","title":"Examples","text":""},{"location":"juniper/#default-credentials","title":"Default credentials","text":"
          • root:(blank)
          • admin:abc123
          "},{"location":"juniper/#load-configuration-from-terminal","title":"Load configuration from terminal","text":"

          I have experienced errors when pasting into screen /dev/tty.usbserial, but having iTerm2 paste at 256 bytes per second appears to fix them.

          Amnesiac (ttyu0)\n\nlogin: root\n\n--- JUNOS 13.2X51-D35.3 built 2015-04-09 20:48:22 UTC\nroot@:RE:0% cli\n{master:0}\nroot> configure\nEntering configuration mode\nUsers currently editing the configuration:\n  autodconfig (pid 1291) on since 2018-01-06 17:32:28 UTC\n      exclusive\n\n{master:0}[edit]\nroot# load override terminal\n[Type ^D at a new line to end input]\n

          Then:

          1. paste your config
          2. press ctrl-d
          3. run commit
          "},{"location":"juniper/#some-example-terminal-buffer","title":"Some example terminal buffer","text":"
          login: root\n\n--- JUNOS 13.2X51-D35.3 built 2015-04-09 20:48:22 UTC\nroot@:RE:0% cli\n{master:0}\nroot> set cli screen-length 75\nScreen length set to 75\n\n{master:0}\nroot> configure\nEntering configuration mode\n\n{master:0}[edit]\nroot# show ?\nPossible completions:\n  <[Enter]>            Execute this command\n> access               Network access configuration\n> access-profile       Access profile for this instance\n> accounting-options   Accounting data configuration\n+ apply-groups         Groups from which to inherit configuration data\n> chassis              Chassis configuration\n> class-of-service     Class-of-service configuration\n> diameter             Diameter protocol layer\n> event-options        Event processing configuration\n> firewall             Define a firewall configuration\n> forwarding-options   Configure options to control packet forwarding\n> groups               Configuration groups\n> interfaces           Interface configuration\n> jsrc                 JSRC partition configuration\n> jsrc-partition       JSRC partition configuration\n> multi-chassis\n> multicast-snooping-options  Multicast snooping option configuration\n> poe                  Power-over-Ethernet options\n> policy-options       Policy option configuration\n> protocols            Routing protocol configuration\n> routing-instances    Routing instance configuration\n> routing-options      Protocol-independent routing option configuration\n> security             Security configuration\n> services             System services\n> snmp                 Simple Network Management Protocol configuration\n> switch-options       Options for default routing-instance of type virtual-switch\n> system               System parameters\n> unified-edge\n> virtual-chassis      Virtual chassis configuration\n> vlans                VLAN configuration\n  |                    Pipe through a command\n{master:0}[edit]\nroot# show chassis\nauto-image-upgrade;\n\n{master:0}[edit]\n
          "},{"location":"juniper/#show-forwarding-table","title":"Show forwarding table","text":"

          This is exactly the same as typing netstat -nr at the system shell.

          show route forwarding-table detail\n
          "},{"location":"juniper/#remove-virtual-chassis-from-an-ex4300","title":"Remove virtual-chassis from an EX4300","text":"
          request virtual-chassis vc-port delete pic-slot 1 port 0\nrequest virtual-chassis vc-port delete pic-slot 1 port 1\nrequest virtual-chassis vc-port delete pic-slot 1 port 2\nrequest virtual-chassis vc-port delete pic-slot 1 port 3\n

          You can validate this by looking for the following interfaces. If you do not see the et-0/1/{0..3} interfaces then the virtual-chassis may still exist in full or in part.

          root> show interfaces terse | match et-\net-0/1/0                up    up\net-0/1/0.0              up    up   eth-switch\net-0/1/1                up    up\net-0/1/1.0              up    up   eth-switch\net-0/1/2                up    up\net-0/1/2.0              up    up   aenet    --> ae0.0\net-0/1/3                up    up\net-0/1/3.0              up    up   aenet    --> ae0.0\n
          "},{"location":"juniper/#ex4300-os-install","title":"EX4300 OS install","text":"
          • https://kb.juniper.net/InfoCenter/index?page=content&id=KB20551&cat=SWITCHING&actp=LIST#USB

          After copying the install image to a usb disk and inserting it into the EX4300

          mount_msdosfs /dev/da1s1 /mnt\ncp /mnt/jinstall-ex-4300-14.1X53-D45.3-domestic-signed.tgz /var/tmp/\ncli\nrequest system software add /var/tmp/jinstall-ex-4300-14.1X53-D45.3-domestic-signed.tgz\n
          "},{"location":"juniper/#clear-a-dhcp-client-lease","title":"Clear a DHCP client lease","text":"
          root@junos> show dhcp server binding\nIP address        Session Id  Hardware address   Expires     State      Interface\n10.8.52.6         2           00:1e:7c:f8:be:34  85166       BOUND      irb.100\n10.8.52.9         5           6c:c1:11:4e:52:8c  86299       BOUND      irb.100\n10.8.52.5         3           d8:fa:97:b8:1a:dd  85222       BOUND      irb.100\n\n{master:0}\nroot@junos> clear dhcp server binding 2\n\n{master:0}\nroot@junos> show dhcp server binding\nIP address        Session Id  Hardware address   Expires     State      Interface\n10.8.52.9         5           6c:c1:11:4e:52:8c  86275       BOUND      irb.100\n10.8.52.5         3           d8:fa:97:b8:1a:dd  85198       BOUND      irb.100\n\n{master:0}\n
          "},{"location":"juniper/#show-config-diff-before-commit","title":"Show config diff before commit","text":"

          You can view the changes that will be committed, which is useful when the commit is invalid and you need to inspect it.

          root# show | compare\n[edit access address-assignment pool p1 family inet]\n        host server1-ipmi { ... }\n+       host server2 {\n+           hardware-address 00:11:22:33:44:33;\n+           ip-address 172.17.1.6;\n+       }\n+       host server3 {\n+           hardware-address 00:11:22:33:44:35;\n+           ip-address 172.17.1.7;\n+       }\n+       host server4 {\n+           hardware-address 00:11:22:33:44:1d;\n+           ip-address 172.17.1.8;\n+       }\n
          "},{"location":"juniper/#abort-config-changes-without-committing","title":"Abort config changes without committing","text":"

          If you made changes you want to abandon, while still in configure mode run 'rollback 0'

          {master:0}[edit]\nroot@ex4300# rollback 0\nload complete\n\n{master:0}[edit]\nroot@voyage-van-3-ex4300# exit\nExiting configuration mode\n
          "},{"location":"juniper/#links","title":"Links","text":"
          • https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-cli-overview.html
          • https://www.juniper.net/documentation/en_US/junos/topics/task/configuration/virtual-chassis-mx-series-vc-ports-deleting.html
          • https://junos-ansible-modules.readthedocs.io/en/2.1.0/
          "},{"location":"jupyter/","title":"Jupyter","text":"

          \"Project Jupyter exists to develop open-source software, open-standards, and services for interactive computing across dozens of programming languages.\" - http://jupyter.org/

          "},{"location":"jupyter/#links","title":"Links","text":"
          • A gallery of interesting Jupyter Notebooks
          • Interactive coding challenges
          • Presenting Code Using Jupyter Notebook Slides
          • JupyterBook: Jupyter Book is an open source project for building beautiful, publication-quality books and documents from computational material.
          "},{"location":"jwt/","title":"jwt","text":"

          \"JSON Web Token (JWT) is an open standard (RFC 7519) that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. This information can be verified and trusted because it is digitally signed. JWTs can be signed using a secret (with the HMAC algorithm) or a public/private key pair using RSA.\" - https://jwt.io/introduction/

          \"A JSON web token, or JWT (\u201cjot\u201d) for short, is a standardized, optionally validated and/or encrypted container format that is used to securely transfer information between two parties.\" - A plain English introduction to JWT

          "},{"location":"jwt/#tidbits","title":"Tidbits","text":"
          • JWT is abstract. The concrete forms are signed (JWS) or encrypted (JWE)
          • Unsigned have \"alg\": \"none\" in the header, but are still JWS format.
          • JWS has three sections: header.payload.signature
          • JWE comes in two forms with either 5 or 6 sections
          • Signatures can be created using a shared key (required for signing and validating) or using public/private key pair where the private key is used to sign, and only the public key is needed to validate. In either case, there is a piece of information that must be configured ahead of time for JWT to function, so it is not a self mechanism.
          "},{"location":"jwt/#links","title":"Links","text":"
          • https://jwt.io
          • https://medium.com/ag-grid/a-plain-english-introduction-to-json-web-tokens-jwt-what-it-is-and-what-it-isnt-8076ca679843
          • https://docs.mongodb.com/realm/authentication/custom-jwt
          • https://hasura.io/blog/best-practices-of-using-jwt-with-graphql/
          • https://tools.ietf.org/html/rfc7519
          • https://medium.com/dataseries/public-claims-and-how-to-validate-a-jwt-1d6c81823826
          • https://auth0.com/docs/tokens/json-web-tokens
          "},{"location":"kaniko/","title":"kaniko","text":"

          \"kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster. kaniko doesn't depend on a Docker daemon and executes each command within a Dockerfile completely in userspace. This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster.\" - https://github.com/GoogleContainerTools/kaniko

          "},{"location":"keel/","title":"keel","text":"

          \"Automated Kubernetes deployment updates\" - https://github.com/keel-hq/keel

          \"Kubectl is the new SSH. If you are using it to update production workloads, you are doing it wrong.\" - https://keel.sh/

          "},{"location":"keybase/","title":"keybase","text":"

          Keybase is a free, open source security app. It's also a public directory of people.

          • Keybase.io
          • Introducing the Keybase filesystem
          • Github.com/Keybase
          "},{"location":"keybase/#my-id","title":"My ID","text":"
          • https://keybase.io/hoherd
          • keybase id hoherd
          "},{"location":"kubernetes/","title":"kubernetes","text":"

          \"Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts, providing container-centric infrastructure.\" - https://kubernetes.io/docs/whatisk8s

          "},{"location":"kubernetes/#glossary","title":"Glossary","text":"

          More terms in the k8s glossary: https://kubernetes.io/docs/reference/glossary/

          • Container Network Interface (CNI) - https://github.com/containernetworking/cni
          • Container Runtime Interface (CRI) - https://github.com/containerd/cri
          • Container Storage Interface (CSI) - https://github.com/container-storage-interface/spec
          • Custom Resource Definition (CRD)
          • Horizontal Pod Autoscaling (HPA)
          "},{"location":"kubernetes/#cli-usage","title":"cli usage","text":""},{"location":"kubernetes/#learn-about-kubernetes","title":"Learn about kubernetes","text":"
          kubectl explain roles\n
          "},{"location":"kubernetes/#show-what-api-permissions-you-have","title":"Show what API permissions you have","text":"
          $ kubectl auth can-i --list\nResources                                       Non-Resource URLs   Resource Names   Verbs\n*.*                                             []                  []               [*]\n                                                [*]                 []               [*]\nselfsubjectaccessreviews.authorization.k8s.io   []                  []               [create]\nselfsubjectrulesreviews.authorization.k8s.io    []                  []               [create]\n                                                [/api/*]            []               [get]\n                                                [/api]              []               [get]\n                                                [/apis/*]           []               [get]\n                                                [/apis]             []               [get]\n                                                [/healthz]          []               [get]\n                                                [/healthz]          []               [get]\n                                                [/livez]            []               [get]\n                                                [/livez]            []               [get]\n                                                [/openapi/*]        []               [get]\n                                                [/openapi]          []               [get]\n                                                [/readyz]           []               [get]\n                                                [/readyz]           []               [get]\n                                                [/version/]         []               [get]\n                                                [/version/]         []               [get]\n                                                [/version]          []               [get]\n                                                [/version]          []               [get]\n
          "},{"location":"kubernetes/#multiple-kubernetes-client-configs","title":"Multiple kubernetes client configs","text":"

          The default config is ~/.kube/config, but if you want to use multiple configs you can do this:

          export KUBECONFIG=\"${HOME}/code/kubespray/artifacts/admin.conf:${HOME}/.kube/config\"\n

          I have seen weird problems when the order of configs is changed, such as certificate-authority-data and client-certificate-data being missing.

          "},{"location":"kubernetes/#kubeadm","title":"kubeadm","text":"

          \"kubeadm: easily bootstrap a secure Kubernetes cluster.\" - kubeadm --help

          • https://github.com/kubernetes/kubeadm
          "},{"location":"kubernetes/#show-your-kubeadm-tokens","title":"Show your kubeadm tokens","text":"
          $ sudo kubeadm token list\nTOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS\nubyc9a.1eq2ihwtnz7c7c9e   23h       2018-05-24T16:19:33-04:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token\n

          See sudo kubeadm token -h for more usage.

          "},{"location":"kubernetes/#kubectl","title":"kubectl","text":"

          \"kubectl controls the Kubernetes cluster manager.\" - kubectl --help

          • https://github.com/kubernetes/kubectl

          • kubectl get - show all resource types with short-hand versions.

          • kubectl completion -h - show how to configure completion for your shell.
          • kubectl config get-contexts - show which k8s configuration contexts you can control.
          • kubectl config use-context foo - switch to the foo context.
          • kubectl get nodes - show the nodes in the k8s cluster.
          • kubectl get pods - show deployed pods. there can be many pods per deployment.
          • kubectl get pods -n kube-system - show pods in a specific namespace.
          • kubectl get pods,hpa,deployment --all-namespaces - get several resource types at once, from all namespaces
          • kubectl describe pod foo
          • kubectl get deployment
          • kubectl describe deployment foo
          • kubectl get ns - show namespaces.
          • kubectl get pv - show physical volumes.
          • kubectl get svc -n kube-system - show a table of important details about running services in the kube-system namespace.
          • kubectl get pods -o yaml - show the yaml configs for the currently running status of every pod.
          • kubectl explain pods.spec - show documentation about pod specifications.
          • kubectl describe pods/echoserver - describe the pod whose Name is echoserver.
          • kubectl get rs - show replica sets.
          • kubectl expose deployment <deployment_name> --type=NodePort - create a service for the given deployment.
          • kubectl scale deployment <deployment_name> --replicas=5 - scale a deployment to 5 pods.
          • kubectl rollout history deployment <deployment_name>
          • kubectl get cm - get a list of config maps.
          • kubectl get apiservices - get a list of api service endpoints. Show -o yaml to view status about availability, endpoint, etc..
          "},{"location":"kubernetes/#working-with-several-configs","title":"Working with several configs","text":"

          Sometimes you want to have individual configs, such as when you are using configs that are updated by other engineers and pulled down via git, and sometimes you want to have one monolithic config, such as when you are using a tool that cannot easily work with multiple configs.

          "},{"location":"kubernetes/#use-multiple-configs-via-alias","title":"Use multiple configs via alias","text":"

          This is a great method for requiring explicit selection of the environment, which is good for not accidentally operating in prod. Using KUBECONFIG also allows your to set different k8s environments per terminal session, which is great for doing comparisons across clusters.

          alias k8s-foo-prod=\"export KUBECONFIG=$HOME/.kube/foo-prod-config ; kubectl config set-context foo-prod --namespace=default ;\"\n

          See also Google Cloud for more examples like this related to GCP.

          "},{"location":"kubernetes/#merge-several-configs","title":"Merge several configs","text":"

          This produces a monolithic file named kube_config which can be moved to ~/.kube/config. It merges the contents of your existing ~/.kube/config file.

          REPO_DIR=/path/to/repo/\nexport KUBECONFIG=\"${HOME}/.kube/config\"\nfor X in $(find \"$REPO_DIR/kube_config.d\" -name '*.config') ; do\n    KUBECONFIG+=\":$X\"\ndone\nkubectl config view --flatten > kube_config\necho \"Config file successfully created at ${PWD}/kube_config\"\necho \"Run: mv -i ${PWD}/kube_config ${HOME}/.kube/config\"\n
          "},{"location":"kubernetes/#create-a-kubeconfig-env-var-from-several-config-files","title":"Create a KUBECONFIG env var from several config files","text":"

          This produces a KUBECONFIG that looks like file1:file2:file3

          REPO_DIR=/path/to/repo/\nKUBECONFIG=\"${HOME}/.kube/config\"\nfor config in $(find \"$REPO_DIR/kube_config.d\" -name '*.config') ; do\n    KUBECONFIG+=\":$config\"\ndone\necho \"KUBECONFIG=${KUBECONFIG}\" ;\n
          "},{"location":"kubernetes/#show-nodes-and-their-taints","title":"Show nodes and their taints","text":"
          kubectl get nodes --output 'jsonpath={range $.items[*]}{.metadata.name} {.spec.taints[*]}{\"\\n\"}{end}'\n
          "},{"location":"kubernetes/#drain-and-cordon-a-node","title":"Drain and cordon a node","text":"

          Do this before deleting or reloading a node.

          kubectl drain --ignore-daemonsets --force --delete-emptydir-data \"$NODE_NAME\"\n
          "},{"location":"kubernetes/#drain-all-but-the-top-20-nodes-in-some-node-pool-selected-by-most-cpu-usage","title":"Drain all but the top 20 nodes in some node-pool selected by most CPU usage","text":"
          kubectl top nodes --sort-by=cpu |\nawk '/node-pool-identifiable-string/ {print $1}' |\ntail -n +20 |\nsargs kubectl drain --ignore-daemonsets --force --delete-emptydir-data\n
          "},{"location":"kubernetes/#show-namespaces-and-how-many-hours-old-they-are","title":"Show namespaces and how many hours old they are","text":"
          kubectl get namespace --sort-by=\".metadata.creationTimestamp\" -o json |\njq -r '\n  .items[] |\n  ((now - (.metadata.creationTimestamp | fromdateiso8601))/3600 | floor) as $hours_old |\n  \"\\(.metadata.name) \\($hours_old)\"\n'\n
          "},{"location":"kubernetes/#show-pods-sorted-by-creation-time","title":"Show pods, sorted by creation time","text":"

          Only descending sort is supported

          kubectl get pods --sort-by=.metadata.creationTimestamp\n

          To sort ascending you can use awk and tac (which is cat in reverse)

          kubectl get pods --sort-by=.metadata.creationTimestamp |\nawk 'NR == 1; NR > 1 {print | \"tac\"}'\n
          "},{"location":"kubernetes/#show-pods-that-are-not-running","title":"Show pods that are not running","text":"
          kubectl get pods --all-namespaces --field-selector='status.phase!=Running' --sort-by=.metadata.creationTimestamp\n
          "},{"location":"kubernetes/#show-pods-that-are-not-running-or-did-not-exit-cleanly","title":"Show pods that are not running or did not exit cleanly","text":"
          kubectl get pods --all-namespaces --field-selector='status.phase!=Running,status.phase!=Succeeded' --sort-by=.metadata.creationTimestamp\n
          "},{"location":"kubernetes/#show-pods-that-are-terminating","title":"Show pods that are terminating","text":"

          Unfortunately \"Terminating\" shows up as a status, but is not a phase, so we have to jump through some hoops to show this list. Here's one way to do this:

          kubectl get pods -A |\nawk '$4 == \"Terminating\" {print $1,$2}' |\nwhile read -r NS POD ; do\n  kubectl get pod \"$POD\" -n \"$NS\" -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,TERMINATION_GRACE_PERIOD:.spec.terminationGracePeriodSeconds\ndone |\ncolumn -t |\nsort -u\n

          And the output will be something like:

          NAMESPACE                       NAME                                                TERMINATION_GRACE_PERIOD\notv-blazing-ray-3043            blazing-ray-3043-miner-7556f86b76-8mpdj            600\notv-gravitational-century-8705  gravitational-century-8705-miner-66b6dd97cc-c2mqq  600\notv-lunar-nova-0800             lunar-nova-0800-miner-86684cd6f8-d79wm             600\n
          "},{"location":"kubernetes/#show-all-images-referenced-by-your-k8s-manifests","title":"Show all images referenced by your k8s manifests","text":"
          kubectl get pods --all-namespaces -o jsonpath=\"{..image}\" |\ntr -s '[[:space:]]' '\\n' |\nsort |\nuniq -c |\nsort -n\n
          "},{"location":"kubernetes/#show-a-list-of-containers-formatted-as-pod_name-container_name","title":"Show a list of containers formatted as pod_name container_name","text":"
          kubectl get pods -n \"$NS\" -o json |\n  jq -r '.items[] | .metadata.name as $podname | .spec.containers[] | \"\\($podname) \\(.name)\"' |\n  column -t\n
          "},{"location":"kubernetes/#show-a-list-of-containers-formatted-as-pod_managerpod_name-container_name","title":"Show a list of containers formatted as pod_manager/pod_name container_name","text":"

          When you need to check all of the containers of pods in a given pod manager (sts, ds, deployment), you need a list that is formatted in a very specific way.

          For instance, to get a list of containers inside the prometheus sts pods

          kubectl get sts -l component=prometheus -n \"$NS\" -o json |\n  jq -r '.items[] | .kind as $kind | .metadata.name as $name | .spec.template.spec.containers[] | \"\\($kind)/\\($name) \\(.name)\"' |\n  column -t\n

          Which produces the output:

          StatefulSet/demo-prometheus  configmap-reloader\nStatefulSet/demo-prometheus  prometheus\n

          This can then be fed into anything needing such syntax, for example kubectl exec to check the runtime env of these containers:

          $ kubectl get sts -l component=prometheus -n $NS -o json |\n  jq -r '.items[] | .kind as $kind | .metadata.name as $name | .spec.template.spec.containers[] | \"\\($kind)/\\($name) \\(.name)\"' |\n  while read -r p c ; do echo \"$p $c $(kubectl -n $NS exec $p -c $c -- env | grep '^HOSTNAME=')\" ; done ;\nStatefulSet/demo-prometheus configmap-reloader HOSTNAME=demo-prometheus-1\nStatefulSet/demo-prometheus prometheus HOSTNAME=demo-prometheus-1\n

          That's obviously a contrived example, but the real learning here is that it is possible to iterate deep json data while referencing values form higher levels by storing that higher level as variables.

          "},{"location":"kubernetes/#decode-a-secret","title":"Decode a secret","text":"

          Use built in base64 decoding like this:

          kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o go-template='{{ printf \"%s\\n\" (.data.password | base64decode) }}'\n

          Things get tricky when you have a dot in the key name:

          kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o go-template='{{ printf \"%s\\n\" (index .data \"pgbouncer.ini\" | base64decode) }}'\n

          Or you can use -o jsonpath with an external base64 decoder:

          kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o jsonpath='{.data.pgbouncer\\.ini}' | base64 -d\n

          Alternatively you can use jq, which has the cleanest syntax when accessing keys with dots in the name:

          kubectl get secret -n \"${NAMESPACE}\" \"${SECRET_NAME}\" -o json | jq -r '.data[\"pgbouncer.ini\"]' | base64 -d\n
          "},{"location":"kubernetes/#decode-ssl-secrets-and-show-info-about-the-certificates","title":"Decode SSL secrets and show info about the certificates","text":"
          kubectl get secret -n istio-system istio.default -o json |\njq -r '.data | keys[] as $k | \"\\($k) \\(.[$k])\"' |\ngrep cert |\nwhile read -r k v ; do\n  echo \"------ $k ------\"\n  echo -n \"$v\" |\n  base64 -d |\n  openssl x509 -noout -subject -issuer -dates\ndone\n

          Example output:

          ------ cert-chain.pem ------\nsubject=\nissuer= /O=cluster.local\nnotBefore=Aug 10 13:55:50 2022 GMT\nnotAfter=Nov  8 13:55:50 2022 GMT\n------ root-cert.pem ------\nsubject= /O=cluster.local\nissuer= /O=cluster.local\nnotBefore=Sep 29 13:52:55 2021 GMT\nnotAfter=Sep 27 13:52:55 2031 GMT\n
          "},{"location":"kubernetes/#watch-whats-going-on-in-your-cluster","title":"Watch what's going on in your cluster","text":"
          watch kubectl get pods --all-namespaces -o wide\n

          or

          kubectl get pods --all-namespaces -o wide -w\n
          "},{"location":"kubernetes/#show-all-pods-and-their-containers-requests-and-limits","title":"Show all pods and their container's requests and limits","text":"
          kubectl get pods --all-namespaces -o json |\njq -r '\n  .items[] |\n  .metadata.namespace as $namespace |\n  .metadata.name as $pod_name |\n  .spec.containers[] |\n  [$namespace, $pod_name, .name, (.resources | tostring)] |\n  @tsv\n' | column -t -s$'\\t'\n

          This will produce output like with the columns namespace, pod, container, resources as a json blob:

          development  gamehouse-nats-0                  nats                {\"limits\":{\"cpu\":\"250m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"75m\",\"memory\":\"30Mi\"}}\ndevelopment  gamehouse-nats-2                  metrics             {\"limits\":{\"cpu\":\"250m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"75m\",\"memory\":\"30Mi\"}}\ndevelopment  gamehouse-nginx-85885cbb75-m5t58  nginx               {\"limits\":{\"cpu\":\"100m\",\"memory\":\"10Mi\"},\"requests\":{\"cpu\":\"80m\",\"memory\":\"7Mi\"}}\ndevelopment  gamehouse-nginx-85885cbb75-wdmhf  nginx               {\"limits\":{\"cpu\":\"100m\",\"memory\":\"10Mi\"},\"requests\":{\"cpu\":\"80m\",\"memory\":\"7Mi\"}}\ndevelopment  gamehouse-prometheus-0            configmap-reloader  {\"limits\":{\"cpu\":\"100m\",\"memory\":\"25Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"25Mi\"}}\ndevelopment  gamehouse-prometheus-0            prometheus          {\"limits\":{\"cpu\":\"3\",\"memory\":\"20Gi\"},\"requests\":{\"cpu\":\"1\",\"memory\":\"4Gi\"}}\n
          "},{"location":"kubernetes/#show-daemonsets-that-are-not-up-to-date","title":"Show daemonsets that are not up to date","text":"
          kubectl get daemonset -A | awk '$3 != $6 {print}'\n
          "},{"location":"kubernetes/#watch-events-in-a-given-namespace","title":"Watch events in a given namespace","text":"
          kubectl -n kube-system get events --field-selector type=Warning -w\n

          Or format the event messages with more useful information (really wide output)

          kubectl get events -w -o custom-columns=FirstSeen:.firstTimestamp,LastSeen:lastTimestamp,Kind:involvedObject.kind,Name:.involvedObject.name,Count:.count,From:.source.component,Type:.type,Reason:.reason,Message:.message\n
          "},{"location":"kubernetes/#show-all-containers-for-each-pod-matching-a-label","title":"Show all containers for each pod matching a label","text":"
          kubectl -n kube-system get pod -l k8s-app=kube-dns -o=jsonpath='{range .items[*]}{\"\\n\"}{.metadata.name}{\":\\n\\t\"}{range .spec.containers[*]}{.name}{\":\\t\"}{.image}{\"\\n\\t\"}{end}{\"\\n\"}{end}'\n
          "},{"location":"kubernetes/#show-a-list-of-everything-in-a-namespace","title":"Show a list of everything in a namespace","text":"
          NS=kube-system\nkubectl get all -n \"$NS\" --output 'jsonpath={range $.items[*]}{.kind} {.metadata.name}{\"\\n\"}{end}' |\ngrep -v '^List $'  # exclude empty namespace\n
          "},{"location":"kubernetes/#show-logs-for-a-given-pod-since-n-hours-ago","title":"Show logs for a given pod since N hours ago","text":"
          pod_name=httpbin\nkubectl logs $pod_name --since=12h\n

          The --since arg can take [s]econds, [m]inutes and [h]ours. Longer durations should use --since-time=<rfc3339 timestamp>

          "},{"location":"kubernetes/#show-logs-for-a-given-pod-since-a-given-date","title":"Show logs for a given pod since a given date","text":"

          The --since-time arg takes RFC3339 datetime. EG: 1991-08-03T13:31:46-07:00. This format requirement is strict, and is incompatible with the GNU date --rfc-3339=seconds output, which uses a space instead of a T to separate the full date from the full time, and +%FT%F%z, which does not include a colon between hours and minutes.

          pod_name=httpbin\nkubectl logs $pod_name --since-time=\"$(date -Iseconds -d '-5 weeks')\"\n
          "},{"location":"kubernetes/#output-custom-column-names","title":"Output custom column names","text":"
          $ kubectl get pvc --all-namespaces -o custom-columns='NAME:metadata.name,SIZE:spec.resources.requests.storage'\nNAME                   SIZE\nfoo-logs               256Gi\ntest-volume-2          1Gi\nsome-awesome-service   5Gi\n\n$ kubectl get pods -o custom-columns='NAME:.metadata.name,START_TIME:.status.startTime,.spec.containers[0].env[?(@.name == \"GITLAB_USER_EMAIL\")].value' | grep -E 'NAME|jobs'\nNAME                                                 START_TIME             GITLAB_USER_EMAIL\nrunner-ppzmy1zx-project-11548552-concurrent-0q2pmk   2019-10-23T17:00:56Z   user2@example.com\nrunner-ppzmy1zx-project-11548552-concurrent-1f7nfx   2019-10-23T17:04:27Z   user1@example.com\nrunner-ppzmy1zx-project-11548552-concurrent-2n84rv   2019-10-23T17:04:19Z   user1@example.com\n
          "},{"location":"kubernetes/#perform-a-restart-of-a-service-daemonset-or-statefulset","title":"Perform a restart of a service, daemonset or statefulset","text":"
          DEPLOYMENT_NAME=gibson_garbagefile_seeker\nkubectl rollout restart deployment $DEPLOYMENT_NAME\n
          "},{"location":"kubernetes/#run-a-cronjob-out-of-schedule","title":"Run a cronjob out of schedule","text":"
          kubectl create job --from=cronjob/download-cat-pix download-cat-pix-manual-run\n
          "},{"location":"kubernetes/#create-a-yaml-file-for-a-resource-type","title":"Create a yaml file for a resource type","text":"

          You can generate yaml for a variety of entities without having to create them on the server. Each entity requires different syntax, so you have to work through the error messages to get to a final solution.

          https://kubernetes.io/docs/reference/kubectl/conventions/#generators

          $ kubectl create --dry-run=client -o yaml cronjob --schedule='15 * * * *' --image=image-name:1.2.3 job-name\napiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n  creationTimestamp: null\n  name: job-name\nspec:\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n      name: job-name\n    spec:\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n          - image: image-name:1.2.3\n            name: job-name\n            resources: {}\n          restartPolicy: OnFailure\n  schedule: 15 * * * *\nstatus: {}\n
          "},{"location":"kubernetes/#installations","title":"Installations","text":"

          The standard way to install k8s by yourself is to use kubeadm.

          "},{"location":"kubernetes/#manually-on-ubuntu-16","title":"Manually on Ubuntu 16","text":"
          ## as root\nswapoff -a # https://github.com/kubernetes/kubernetes/issues/53533\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\ncurl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -\necho \"deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable\" > /etc/apt/sources.list.d/docker.list\necho \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" > /etc/apt/sources.list.d/kubernetes.list\napt update\napt dist-upgrade -y\napt install -y apt-transport-https ca-certificates curl software-properties-common\napt install -y docker-ce\napt install -y kubelet kubeadm kubectl\nkubeadm init\n

          kubeadm init guide: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#instructions

          "},{"location":"kubernetes/#dns","title":"DNS","text":"

          Kubernetes lets you resolve resources via DNS

          • https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
          • https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/
          "},{"location":"kubernetes/#enable-k8s-dns-logging","title":"Enable k8s dns logging","text":"
          kubectl -n kube-system edit configmap coredns\n## Add 'log' to the 'Corefile' config\n
          "},{"location":"kubernetes/#dns-entity-map","title":"DNS Entity map","text":"
          • Kubernetes Service: <service>.<namespace>.svc.cluster.local. (eg: httpbin.default.svc.cluster.local.)
          kubectl get svc --all-namespaces -o jsonpath='{range .items[*]}{.metadata.name}{\".\"}{.metadata.namespace}{\".svc.cluster.local.\\n\"}'\n

          Or with jq:

          kubectl get svc --all-namespaces -o json |\njq -r  '.items[] | \"\\(.metadata.name).\\(.metadata.namespace).svc.cluster.local.\"'\n

          And if you want to also add port numbers:

          kubectl get svc --all-namespaces -o json |\njq -r '.items[] | \"\\(.metadata.name).\\(.metadata.namespace).svc.cluster.local.\" as $base | .spec.ports[] | \"\\($base):\\(.port)\"'\n
          • With core-dns you can run dig SRV +short *.*.svc.cluster.local. to get a list of all services.
          • Kubernetes service srv records: _${service_port_name}._${protocol}.${service}.${namespace}.svc.cluster.local. (eg: _http._tcp.httpbin.default.svc.cluster.local.)
          "},{"location":"kubernetes/#crictl","title":"crictl","text":"

          crictl is a tool to inspect the local Container Runtime Interface (CRI)

          user@k3snode:~$ sudo crictl pods\nPOD ID          CREATED       STATE  NAME                            NAMESPACE    ATTEMPT\n688ecc2d9ce4d   2 weeks ago   Ready  log-fluentd-676d9d7c9d-ghz5x    default      8\nee1d8b0593e71   2 weeks ago   Ready  tiller-deploy-677f9cb999-rx6qp  kube-system  7\n1153f4c0bd1f4   2 weeks ago   Ready  coredns-78fcdf6894-qsl74        kube-system  8\n5be9c530c8cdc   2 weeks ago   Ready  calico-node-59spv               kube-system  10\nd76d211830064   2 weeks ago   Ready  kube-proxy-cqdvn                kube-system  104\naa1679e0bfcca   2 weeks ago   Ready  kube-scheduler-s1               kube-system  10\nef64eea461bc0   2 weeks ago   Ready  kube-controller-manager-s1      kube-system  10\n14ec5abe1e3ab   2 weeks ago   Ready  kube-apiserver-s1               kube-system  11\nd4ce465a0942f   2 weeks ago   Ready  etcd-s1                         kube-system  10\n
          "},{"location":"kubernetes/#cloud-provider-versions","title":"Cloud Provider versions","text":"
          • AKS: https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions#aks-kubernetes-release-calendar
          • EKS: https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-release-calendar
          • GKE: https://cloud.google.com/kubernetes-engine/docs/release-schedule
          • Upstream: https://en.wikipedia.org/wiki/Kubernetes#History
          "},{"location":"kubernetes/#code-snips","title":"Code snips","text":""},{"location":"kubernetes/#show-all-ingresses-and-what-hostnames-they-handle","title":"Show all ingresses and what hostnames they handle","text":"

          This is useful when you have a lot of ingresses or very long hostnames which cause them to be hidden by an ellipse with normal or even wide output.

          kubectl get ingresses -n \"$NS\" --output 'jsonpath={range $.items[*]}{.metadata.name}{\":\"}{range @.spec.rules[*]}{\"\\n\\t\"}{.host}{end}{\"\\n\"}{end}\n

          This will output a list like

          rodent-ingress:\n    mouse.example.com\n    hamster.example.com\nbird-ingress:\n    parrot.example.com\n    swallow.example.com\n    owl.example.com\n
          "},{"location":"kubernetes/#parse-swaggerjson-for-api-definitions","title":"Parse swagger.json for API definitions","text":"

          This is useful when manually writing helm chart templates to handle a range of k8s versions. (Keep an eye on https://github.com/helm/helm/issues/9765 though for hopefully a better way than manually doing this.)

          # Download a range of swagger.json files named by version. EG: v1.18.0.json\nfor X in {15..22} ;\n  do ver=\"v1.$X.0\"\n  curl -LsSo \"${ver}.json\" \"https://raw.githubusercontent.com/kubernetes/kubernetes/${ver}/api/openapi-spec/swagger.json\"\ndone\n\n# Parse these into a text list of API versions supported by the version. EG: v1.18.0-definitions.txt\nfor X in v1.* ; do\n  jq -r '.definitions | keys | .[]' $X > ${X/.json/}-definitions.txt\ndone\n\n# Then you can grep for a definition to see what versions support it\ngrep 'Ingress$' *definitions.txt | grep -vE 'LoadBalancer'\n
          "},{"location":"kubernetes/#use-jq-to-find-zombie-pods","title":"Use jq to find zombie pods","text":"

          If the base container shuts down, sometimes the istio sidecar can continue to run. You can find this condition with:

          kubectl get pods -A -o json | jq '\n  .items[] |\n  select(.status.containerStatuses[].name == \"base\" and .status.containerStatuses[].state.terminated.exitCode == 0) |\n  select(.status.containerStatuses[].name == \"istio-proxy\" and .status.containerStatuses[].state.terminated.exitCode == null) |\n  {\n    \"name\": .metadata.name,\n    \"namespace\": .metadata.namespace,\n    \"status\": [\n      .status.containerStatuses[] |\n      {\n        \"name\": .name,\n        \"exit_code\": .state.terminated.exitCode\n      }\n    ]\n  }\n'\n
          "},{"location":"kubernetes/#use-jq-to-find-all-pods-with-a-specific-container-state","title":"Use jq to find all pods with a specific container state","text":"
          kubectl get pods -A -o json | jq '\n  .items[]? |\n  select(.status.containerStatuses[]?.state.waiting.reason == \"CreateContainerConfigError\") |\n  .metadata.name\n'\n
          "},{"location":"kubernetes/#use-jq-to-find-pods-that-have-problematic-phases","title":"Use jq to find pods that have problematic phases","text":"
          kubectl get pods -A -o json |\njq -c '\n  .items[] |\n  select(.status.phase|test(\"Pending|Unknown\")) |\n  [.status.phase, .metadata.creationTimestamp, .metadata.namespace, .metadata.name]\n'\n
          "},{"location":"kubernetes/#linux-kernel-namespaces","title":"linux kernel namespaces","text":"

          Linux kernel namespaces are part of the magic that allows containers to run, and kubernetes pods take this a step further by allowing multiple containers to run inside a pod, and share only some of the namespaces. Which ones?

          diff -t -W 65 -y ns-container-1.txt ns-container-2.txt\n$ readlink /proc/$$/task/*/ns/*   $ readlink /proc/$$/task/*/ns/*\ncgroup:[4026531835]               cgroup:[4026531835]\nipc:[4026532832]                  ipc:[4026532832]\nmnt:[4026533233]                | mnt:[4026533326]\nnet:[4026532835]                  net:[4026532835]\npid:[4026533325]                | pid:[4026533328]\npid:[4026533325]                | pid:[4026533328]\nuser:[4026531837]                 user:[4026531837]\nuts:[4026533324]                | uts:[4026533327]\n
          "},{"location":"kubernetes/#links","title":"Links","text":"
          • https://kubernetes.io/docs/reference/kubectl/quick-reference
          • https://slack.kubernetes.io
          • https://blog.hypriot.com/post/setup-kubernetes-raspberry-pi-cluster
          • https://docs.tigera.io/calico/latest/about: \"Calico is a networking and security solution that enables Kubernetes workloads and non-Kubernetes/legacy workloads to communicate seamlessly and securely.\"
          • https://github.com/kelseyhightower/kubernetes-the-hard-way: \"The target audience for this tutorial is someone planning to support a production Kubernetes cluster and wants to understand how everything fits together.\"
          • https://github.com/kinvolk/kubernetes-the-hard-way-vagrant: \"A port of Kelsey Hightower's 'Kubernetes the Hard Way' tutorial to Vagrant.\"
          • https://github.com/kubernetes/dashboard#kubernetes-dashboard
          • https://github.com/kubernetes/kompose: Compose to Kubernetes
          • https://kubernetes.io/docs/concepts/cluster-administration/addons/
          • https://kubernetes.io/docs/concepts/cluster-administration/logging/
          • https://kubernetes.io/docs/concepts/services-networking/network-policies/
          • https://kubernetes.io/docs/concepts/workloads/
          • https://kubernetes.io/docs/getting-started-guides/minikube/
          • https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm
          • https://www.cncf.io/certification/expert/CKA/
          • https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-details
          • https://github.com/ClusterHQ/flocker: Flocker is an open-source Container Data Volume Manager for your Dockerized applications.
          • https://cloudplatform.googleblog.com/2018/05/Beyond-CPU-horizontal-pod-autoscaling-comes-to-Google-Kubernetes-Engine.html
          • https://github.com/vapor-ware/ksync: Sync local filesystem with a target container
          • https://metallb.universe.tf: For those of us not running in a cloud, MetalLB can serve as a k8s native LB.
          • https://k3s.io: Lightweight Kubernetes. Easy to install. A binary of less than 40 MB. Only 512 MB of RAM required to run.
          • https://learnk8s.io/production-best-practices/: A curated checklist of best practices designed to help you release to production.
          • https://kind.sigs.k8s.io/docs/user/quick-start#multinode-clusters: Multi-node kubernetes clusters running within docker
          • https://www.stackrox.com/categories/eks-vs-gke-vs-aks: Available cloud versions of hosted k8s and notable changes
          • https://velero.io: cluster backups
          • kube-dns-autoscaler: https://gist.github.com/MrHohn/1198bccc2adbd8cf3b066ab37ccd8355 / https://github.com/kubernetes-sigs/cluster-proportional-autoscaler
          • Kubernetes pods /etc/resolv.conf ndots:5 option and why it may negatively affect your application performances: https://pracucci.com/kubernetes-dns-resolution-ndots-options-and-why-it-may-affect-application-performances.html
          • https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/ \"Kubernetes admission controllers are plugins that govern and enforce how the cluster is used. They can be thought of as a gatekeeper that intercept (authenticated) API requests and may change the request object or deny the request altogether.\"
          • https://tunein.engineering/implementing-leader-election-for-kubernetes-pods-2477deef8f13: Leader election for Kubernetes pods
          • https://medium.com/kubernetes-tutorials/making-sense-of-taints-and-tolerations-in-kubernetes-446e75010f4e
          • https://web.archive.org/web/20190306132233/https://supergiant.io/blog/learn-how-to-assign-pods-to-nodes-in-kubernetes-using-nodeselector-and-affinity-features/
          • https://fluxcd.io: \"Open and extensible continuous delivery solution for Kubernetes. Powered by GitOps Toolkit.\"
          • https://k8slens.dev: \"Lens is the only IDE you\u2019ll ever need to take control of your Kubernetes clusters.\"
          • https://kustomize.io: \"Kubernetes native configuration management\"
          • https://github.com/kubernetes-sigs/kustomize/tree/master/examples: Kustomize examples
          • https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/: Downward API allows you to mount k8s spec information as env vars
          • https://isovalent.com/blog/post/2021-12-08-ebpf-servicemesh: How eBPF will solve Service Mesh - Goodbye Sidecars
          • https://www.kubeshark.co: Basically tcpdump and wireshark (ethereal) for k8s clusters. 2 nodes free as of 2024-10-17.
          • https://www.macchaffee.com/blog/2024/you-have-built-a-kubernetes/
          "},{"location":"launchd/","title":"launchd","text":"

          launchd is MacOS X's init system.

          "},{"location":"launchd/#example","title":"Example","text":""},{"location":"launchd/#watch-folder","title":"Watch Folder","text":"

          This user LaunchAgent would be placed into $HOME/Library/LaunchAgents/photo_processor.plist.

          We have to specify /bin/bash as the first ProgramArgument so OS X doesn't complain about DRM or mach-o executable shizz. This effectively limits us to bash 3.

          <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n  <dict>\n    <key>Label</key>\n    <string>photo_processor.sh</string>\n\n    <key>ProgramArguments</key><array>\n      <string>/bin/bash</string>\n      <string>/Users/hoherd/code/dho-bin/photo_processor.sh</string>\n    </array>\n\n    <key>WatchPaths</key>\n    <array>\n        <string>/Users/hoherd/Dropbox/yp/photo_queue/</string>\n    </array>\n\n  </dict>\n</plist>\n
          "},{"location":"launchd/#see-also","title":"See also","text":"
          • https://github.com/jordansissel/pleaserun
          "},{"location":"ldap/","title":"ldap","text":"

          Lightweight Directory Access Protocol

          "},{"location":"ldap/#acronyms-and-stuff","title":"Acronyms and stuff","text":"
          • o=organization
          • c=country
          • dn=distinguished_name
          • dc=domain_component
          • rdn=relative_distinguished_name
          • cn=common_name
          • uid=user_id
          "},{"location":"ldap/#tricks","title":"Tricks","text":""},{"location":"ldap/#show-the-whole-ldap-database","title":"Show the whole ldap database","text":"

          From the LDAP server

          slapcat\n
          "},{"location":"ldap/#show-pwdfailuretime-count-and-associated-uid","title":"Show pwdFailureTime count and associated uid","text":"
          sudo slapcat 2>/dev/null | egrep '^(pwdFailureTime|uid:)' | uniq -c -w 14 | grep -B1 pwdFailureTime\n
          "},{"location":"ldap/#show-ldap-and-local-user-account-info","title":"Show LDAP and local user account info","text":"
          getent passwd maxb\n
          "},{"location":"ldap/#search-an-ldap-tree","title":"Search an LDAP tree","text":"
          ldapsearch -x -b \"dc=example,dc=com\"\n

          This can be used when forwarding through ssh -L 3389:127.0.0.1:389 remotehost

          ldapsearch -x -h 127.0.0.1 -p 3389 -b \"dc=example,dc=com\"\n
          "},{"location":"ldap/#run-a-search-while-authenticated","title":"Run a search while authenticated","text":"

          This logs in as danielh and searches for a record with uid=robertc

          ldapsearch -x -b \"dc=example,dc=com\" -D \"uid=danielh,ou=people,dc=example,dc=com\" -W \"uid=danielh\"\n
          "},{"location":"ldap/#refresh-ldap-user-cache-on-centos-6","title":"Refresh LDAP user cache on CentOS 6","text":"
          nscd -i passwd ; nscd -i group ; /etc/init.d/lldpad restart ; /etc/init.d/nslcd restart ; /etc/init.d/nscd restart ;\n
          "},{"location":"ldap/#see-also","title":"See Also","text":"
          • Apache Directory Studio graphical interface for LDAP: https://directory.apache.org/studio/
          • A great series of articles on LDAP: http://www.ldapman.org/articles/
          • Ubuntu Server LDAP integration: https://help.ubuntu.com/community/LDAPClientAuthentication
          "},{"location":"lego/","title":"LEGO","text":"

          Lego STEM, including Mindstorms, Powered Up, etc..

          "},{"location":"lego/#robot-inventor","title":"Robot Inventor","text":"

          The 51515 is the successor to the EV3 31313. Its software is much more approachable and modern, but its hackability appears to be less than 31313 due to no USB or SD slot.

          "},{"location":"lego/#mindstorms-ev3","title":"Mindstorms EV3","text":"
          • EV3 programming software is available via cask: brew cask install lego-mindstorms-ev3
          "},{"location":"lego/#micropython","title":"MicroPython","text":"

          In 2019 LEGO started shipping a microSD card for developing MicroPython using vscode. This setup is based on ev3dev, including having an ev3dev splash screen on boot and the default ev3dev ssh credentials. On the vscode side, one interesting thing is that when you install the LEGO\u00ae Mindstorms\u00ae EV3 MicroPython extension, it installs ev3dev-browser

          "},{"location":"lego/#ev3-links","title":"EV3 links","text":"
          • Robot Operating System for EV3
          • https://education.lego.com/en-us/support/mindstorms-ev3/python-for-ev3
          • http://ev3lessons.com/en
          • ev3dev: \"ev3dev is a Debian Linux-based operating system that runs on several LEGO\u00ae MINDSTORMS compatible platforms including the LEGO\u00ae MINDSTORMS EV3 and Raspberry Pi-powered BrickPi.\"
          • LEGO MINDSTORMS EV3 source code
          • BUILD A ROBOT: \"These robots give you the full EV3 experience, complete with building instructions, programming missions and the programming tool included in the new free EV3 Programmer App.\"
          • http://flltutorials.com
          • Community Gallery: Robot designs from lego mindstorm community.
          "},{"location":"lego/#great-ball-contraption","title":"Great Ball Contraption","text":"

          \"A great ball contraption (GBC) is a machine which receives soccer balls or basketballs from one module and passes them to another module, rather like a bucket brigade. Modules built according to the GBC standard can be assembled into a collaborative display without pre-planning or modification.\" - https://www.greatballcontraption.com

          "},{"location":"lego/#ball-specs","title":"Ball specs","text":"

          From http://www.planet-gbc.com/planet-gbc-tutorial-get-your-gbc-balls

          • Diameter: 14mm
          • Weight: ~1.5g
          "},{"location":"lego/#gbc-links","title":"GBC Links","text":"
          • https://www.greatballcontraption.com
          • http://www.planet-gbc.com
          • https://www.reddit.com/r/GreatBallContraption
          "},{"location":"lego/#lego-mario","title":"Lego Mario","text":"

          Lego Mario is an electronic physical lego game device that has an accelerometer and a colored barcode reader. It interfaces with an smartphone/tablet app to enhance the physical gaming experience with instructions, a scoreboard, etc.. The barcodes that Mario reads have been reverse engineered and can be printed up with a color accurate printer.

          "},{"location":"lego/#videos","title":"Videos","text":"
          • https://www.youtube.com/playlist?list=PLz-GEJhc2xVdWHAlHnhTEK7g8Et25rgZY My (Daniel Hoherd) LEGO Robotics Youtube Playlist
          • https://www.youtube.com/playlist?list=PLA-qIpWJQgnowkY7u6TY7KLViXsyQqv6U Akiyuki LEGO GBC Modules Youtube Playlist
          "},{"location":"lego/#general-links","title":"General links","text":"
          • https://www.lego.com/en-us/mindstorms
          • BrickPi: \"This project combines the brains of a Raspberry Pi with the brawn of a LEGO MINDSTORMS NXT\"
          • https://www.raspberrypi.com/news/raspberry-pi-build-hat-lego-education: Control LEGO motors via Raspberry Pi
          • https://botbench.com
          • https://robotsquare.com
          • https://www.bricklink.com/v3/studio/download.page: \"Build, render, and create instructions\" with this CAD style LEGO software.
          • http://www.brickpile.com/wp-content/uploads/2015/10/brick-geometry-brickcon2015-compressed.pdf (935 KB): mm, Lego Draw Units, triangles and more! Lots of LEGO math for accuracy and precision.
          • https://github.com/virantha/bricknil: Library that supports programming Duplo Train Push & Go Motor (part 28743) and others.
          • https://youtu.be/I6Vnwi6oQYg: Toa Mata Band: Lego robot orchestra tribute to Depeche Mode - Everything Counts
          • https://brickset.com/buy: Deals on Lego sets
          • https://github.com/maarten-pennings/Lego-Mindstorms: technical info about Lego Mindstorms kits
          • https://www.code.pybricks.com: IDE for Powered Up lego tech. See also https://pybricks.com/install
          • https://lego.github.io/MINDSTORMS-Robot-Inventor-hub-API
          • https://pypi.org/project/mindstorms
          • https://antonsmindstorms.com/2021/01/14/advanced-undocumented-python-in-spike-prime-and-mindstorms-hubs
          • https://builderdude35.com
          "},{"location":"lego/#see-also","title":"See also","text":"
          • Robotics
          "},{"location":"lektor/","title":"Lektor","text":"

          \"A flexible and powerful static content management system for building complex and beautiful websites out of flat files\" - https://www.getlektor.com

          "},{"location":"lektor/#deploying-a-github-user-site-with-gh-pages","title":"Deploying a github user site with gh-pages","text":"

          Github user sites like danielhoherd.github.io must be served from the master branch. This means lektor must live in a feature branch. Create a feature branch, then go into the github repo settings and protect that branch. When you do a lektor deploy it will destroy all content in master and replace it with the static site. To make this deployment match a custom domain name, set up your whatever.lektorproject with something like the following configs:

          [servers.github]\nname = github\nenabled = yes\ndefault = yes\ntarget = ghpages://danielhoherd/danielhoherd.github.io?cname=danielhoherd.com\n
          "},{"location":"lektor/#links","title":"Links","text":"
          • https://www.getlektor.com/docs/deployment/ghpages/
          "},{"location":"linksys/","title":"linksys","text":"

          Linksys makes a variety of networking devices. They are owned by Cisco.

          "},{"location":"linksys/#ea3500","title":"ea3500","text":"
          • http://www.linksys.com/us/support-product?pid=01t80000003K7bbAAC
          "},{"location":"linksys/#1140162464","title":"1.1.40.162464","text":"
          • Bug where if 5ghz is enabled, 2.4ghz sometimes will not be accessible. Only workaround is to use only 2.4ghz or 5ghz, not both. Even then sometimes the configs can end up in a funky state where neither will work, and you have to re-configure the wifi settings using wired ethernet.
          "},{"location":"linux-performance-monitoring/","title":"Linux Performance Monitoring","text":"

          Notes from the Linux Performance Monitoring talk at Velocity 2015 - Part 1, Part 2

          http://www.brendangregg.com/linuxperf.html

          "},{"location":"linux-performance-monitoring/#use","title":"USE","text":"
          • Utilization
          • Saturation
          • Errors
          "},{"location":"linux-performance-monitoring/#observability-tools","title":"Observability Tools","text":"
          • atop (atop uses the linux kernel event interface rather than sampling on screen updates, so it is better for viewing systems affected by short-lived processes)
          • htop
          • vmstat -Sm 1
          • iostat -xmdz 1
          • mpstat -P ALL 1
          • free -m
          • sar -n DEV 1
          • strace -tttT # very disruptive of system performance, slows system significantly
          • tcpdump
          • pidstat -t 1
          • pidstat -d
          • swapon -s
          • lsof
          • sar -n TCP,ETCP,DEV 1
          • collectl
          • dstat
          • strace 2>&1 | head -n 100 # since there's no strace -c N
          • ss
          • iptraf
          • slabtop
          • pcstat
          • perf
          • tiptop
          • rdmsr
          • perf-tools/execsnoop
          "},{"location":"linux-performance-monitoring/#benchmarking-tools","title":"Benchmarking tools","text":"
          • unixbench
          • imbench
          • sysbench
          • lmbench
          • fio
          • pchar
          • iperf
          "},{"location":"linux-performance-monitoring/#tuning-tools","title":"Tuning tools","text":"
          • sysctl
          • ulimit
          • chcpu
          "},{"location":"linux-performance-monitoring/#static-tools","title":"Static tools","text":""},{"location":"linux-performance-monitoring/#tracing","title":"Tracing","text":"
          • ftrace
          • iosnoop
          • iolatency
          • opensnoop
          • tpoint
          • funccount
          • funcgraph
          • kprobe
          • bytehist
          • stap
          "},{"location":"linux/","title":"linux","text":"

          \"Linux is a family of free and open-source software operating systems built around the Linux kernel.\" - https://en.wikipedia.org/wiki/Linux

          Most linux distros are built on GNU tools, and this article is relevant in distinguishing the importance GNU plays in the linux ecosystem: https://www.gnu.org/gnu/why-gnu-linux.en.html

          Linux is part of the Unix family tree.

          "},{"location":"linux/#performance-monitoring","title":"Performance monitoring","text":"
          • Linux Load Averages: Solving the Mystery
          • Brendan Gregg's Linux Performance page
          • Notes from the Linux Performance Monitoring talk at Velocity 2015
          "},{"location":"linux/#tricks","title":"Tricks","text":""},{"location":"linux/#best-way-to-see-mounts","title":"Best way to see mounts","text":"

          There are a few ways to see mounts, but most of them will leave out little details in some cases. The best view of mounts is the /proc/self/mountinfo file.

          "},{"location":"linux/#determine-if-running-kernel-is-32-or-64-bit","title":"Determine if running kernel is 32 or 64 bit","text":"

          Works on x86 or ARM.

          getconf LONG_BIT\n
          "},{"location":"linux/#configure-a-system-to-reboot-on-kernel-panic","title":"Configure a system to reboot on kernel panic","text":"

          These lines should be added to sysctl.conf

          ## Reboot after 10 seconds if kernel panics\nkernel.panic = 10\n## Treat all oopses as panics\nkernel.panic_on_oops = 1\n
          "},{"location":"linux/#force-reboot-on-corrupt-system","title":"Force reboot on corrupt system","text":"

          For times that commands like reboot and shutdown are not available.

          echo 1 > /proc/sys/kernel/sysrq\necho b > /proc/sysrq-trigger\n
          "},{"location":"linux/#show-process-signals","title":"Show process signals","text":"

          This should work on other unixes too.

          trap -l\n
          "},{"location":"linux/#kernel-namespaces","title":"Kernel namespaces","text":"

          \"A namespace wraps a global system resource in an abstraction that makes it appear to the processes within the namespace that they have their own isolated instance of the global resource. Changes to the global resource are visible to other processes that are members of the namespace, but are invisible to other processes. One use of namespaces is to implement containers.\" - man namespaces

          \"Control cgroups, usually referred to as cgroups, are a Linux kernel feature which allow processes to be organized into hierarchical groups whose usage of various types of resources can then be limited and monitored.\" - man cgroups

          cgroup is one of the linux namespaces. (see man namespaces for more info.)

          "},{"location":"linux/#tools-and-stuff","title":"Tools and stuff","text":"
          • lsns - list namespaces
          • cgcreate - create new cgroup
          • cgexec - run the task in given control group
          • cgclassify - move running task(s) to given cgroup
          • nsenter - Run a command in a referenced process cgroup config
          • systemd-cgls - systemd-cgls - Recursively show control group contents
          • systemd-cgtop - Show top control groups by their resource usage
          • /proc/self/cgroup - cgroup introspection
          "},{"location":"linux/#various-namespace-aware-tool-examples","title":"Various namespace-aware tool examples","text":""},{"location":"linux/#ps-cgroup-output","title":"ps cgroup output","text":"
          ps -o pid,ppid,user,comm,flags,%cpu,sz,%mem,cgname\n
          "},{"location":"linux/#run-a-process-in-another-namespace","title":"Run a process in another namespace","text":"

          With nsenter you specify a target pid to reference, and then specify which namespaces of its you want to enter.

          On Ubuntu 18.04, udev mounts devices in a non-global namespace, which prevents normal users from viewing those mounts. You must use nsenter to enter the udevd namespaces to view the mounts, using either --all to get all namespaces of udevd, or --mount for just that one required namespace:

          root@bionic:~# lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1\n\nroot@bionic:~# nsenter --all -t $(pgrep systemd-udevd) lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1 /mnt/adea64ca-e340-4961-8a4d-75d8a5970664\n\nroot@bionic:~# nsenter --mount -t $(pgrep systemd-udevd) lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1 /mnt/adea64ca-e340-4961-8a4d-75d8a5970664\n

          See udev for one permanent fix for this.

          "},{"location":"linux/#find-the-path-to-a-namespace","title":"Find the path to a namespace","text":"

          The path to a namespace can be used in some instances instead of the pid. We can discover the path to a namespace by using lsns.

          root@bionic:~# lsns -p $(pgrep udevd) -o +PATH\n        NS TYPE   NPROCS   PID USER COMMAND                    PATH\n4026531835 cgroup    173     1 root /sbin/init                 /proc/1/ns/cgroup\n4026531836 pid       173     1 root /sbin/init                 /proc/1/ns/pid\n4026531837 user      173     1 root /sbin/init                 /proc/1/ns/user\n4026531838 uts       173     1 root /sbin/init                 /proc/1/ns/uts\n4026531839 ipc       173     1 root /sbin/init                 /proc/1/ns/ipc\n4026532009 net       173     1 root /sbin/init                 /proc/1/ns/net\n4026532286 mnt         1  5480 root /lib/systemd/systemd-udevd /proc/5480/ns/mnt\n
          "},{"location":"linux/#access-network-sockets-from-the-command-line","title":"Access network sockets from the command line","text":"

          This is a poor man's netcat, useful for when there is no netcat:

          echo asdf > /dev/tcp/${REMOTE_IP_ADDRESS}/${REMOTE_PORT}\n
          "},{"location":"linux/#see-also","title":"See also","text":""},{"location":"linux/#distros","title":"Distros","text":"
          • rhel
          • ubuntu
          "},{"location":"linux/#init-systems","title":"Init systems","text":"
          • openrc
          • systemd
          • sysvinit
          • upstart
          "},{"location":"linux/#filesystems-and-block-devices","title":"Filesystems and block devices","text":"
          • Filesystem Hierarchy Standards: http://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html
          • LVM
          • ZFS
          "},{"location":"linux/#links","title":"Links","text":"
          • https://blog.quarkslab.com/digging-into-linux-namespaces-part-1.html
          • https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
          • https://wiki.archlinux.org/index.php/Cgroups
          • https://poor.dev/blog/terminal-anatomy
          • https://www.linusakesson.net/programming/tty
          • https://www.sobyte.net/post/2022-05/tty/
          • https://www.linuxcommand.org/tlcl.php
          • https://unix.stackexchange.com/a/367012: Linux sockets full names are limited to 107 characters
          • https://www.linuxatemyram.com
          • https://syscalls.mebeim.net: \"Linux kernel syscall tables\"
          • https://specifications.freedesktop.org/basedir-spec/: \"Various specifications specify files and file formats. This specification defines where these files should be looked for by defining one or more base directories relative to which files should be located.\"
          • https://kevinboone.me/systemd_embedded.html: \"Why systemd is a problem for embedded Linux\"
          "},{"location":"logstash/","title":"logstash","text":"

          \"Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite stash.\" - https://www.elastic.co/products/logstash

          "},{"location":"lsblk/","title":"lsblk","text":"

          \"lsblk lists information about all available or the specified block devices. The lsblk command reads the sysfs filesystem and udev db to gather information.\" - man lsblkq

          "},{"location":"lsblk/#examples","title":"Examples","text":""},{"location":"lsblk/#simple-usage","title":"Simple usage","text":"

          Here is the output of lsblk on an Ubuntu 16.04 Vagrant box:

          $ lsblk\nNAME   MAJ:MIN RM SIZE RO TYPE MOUNTPOINT\nsda      8:0    0  10G  0 disk\n\u2514\u2500sda1   8:1    0  10G  0 part /\nsdb      8:16   0  10M  0 disk\n
          "},{"location":"lsblk/#show-filesystem-information","title":"Show filesystem information","text":"
          $ lsblk -f\nNAME   FSTYPE  LABEL           UUID                                 MOUNTPOINT\nsda\n\u2514\u2500sda1 ext4    cloudimg-rootfs 73ea38ed-7fcd-4871-8afa-17d36f4e4bfc /\nsdb    iso9660 cidata          2017-08-15-16-47-34-00\n
          "},{"location":"lsblk/#show-some-chosen-fields","title":"Show some chosen fields","text":"

          To see a list of available columns, run lsblk -o.

          $ lsblk -do NAME,SIZE,TYPE,VENDOR,MODEL,SERIAL\nNAME   SIZE TYPE VENDOR   MODEL            SERIAL\nsdd    1.8T disk ATA      Micron_5100_MTFD 18211C914753\nsdb    1.8T disk ATA      Micron_5100_MTFD 18211C914736\nsde    1.8T disk ATA      Micron_5100_MTFD 171216897B54\nsdc    1.8T disk ATA      Micron_5100_MTFD 171216897B63\nsda  223.6G disk ATA      INTEL SSDSC2KB24 BTYS815301VG245AGN\n
          ## Quick way to view ssd vs hdd models and mount points\n## ROTA: 1=hdd, 0=ssd\n## As of writing this method does not show zfs mount points. EG: sdb1 is mounted as zfs\n##\n$ lsblk -o name,rota,mountpoint,vendor,model,serial\nNAME   ROTA MOUNTPOINT VENDOR   MODEL            SERIAL\nsda       0            ATA      Samsung SSD 850  S39KNX0JA59421F\n\u2514\u2500sda1    0 /\nsdb       1            WD       Elements 25A1    575833314435383753393754\n\u251c\u2500sdb1    1\n\u2514\u2500sdb9    1\nsdc       1            Generic  External         002210107962\n\u251c\u2500sdc1    1\n\u2514\u2500sdc2    1 /mnt/sdc\nsr0       1            MATSHITA DVD+-RW SW830    CN0X85FC3686\n
          "},{"location":"lsblk/#show-all-available-information","title":"Show all available information","text":"

          The output here is really wide, but reformatting it through JSON makes it easier to read. Thankfully lsblk has a JSON output option, unlike many unix CLI tools.

          $ lsblk -O -J | jq .\n{\n  \"blockdevices\": [\n    {\n      \"name\": \"sda\",\n      \"kname\": \"sda\",\n      \"maj:min\": \"8:0\",\n      \"fstype\": null,\n      \"mountpoint\": null,\n      \"label\": null,\n      \"uuid\": null,\n      \"parttype\": null,\n      \"partlabel\": null,\n      \"partuuid\": null,\n      \"partflags\": null,\n      \"ra\": \"128\",\n      \"ro\": \"0\",\n      \"rm\": \"0\",\n      \"hotplug\": \"0\",\n      \"model\": \"HARDDISK        \",\n      \"serial\": null,\n      \"size\": \"10G\",\n      \"state\": \"running\",\n      \"owner\": \"root\",\n      \"group\": \"disk\",\n      \"mode\": \"brw-rw----\",\n      \"alignment\": \"0\",\n      \"min-io\": \"512\",\n      \"opt-io\": \"0\",\n      \"phy-sec\": \"512\",\n      \"log-sec\": \"512\",\n      \"rota\": \"1\",\n      \"sched\": \"deadline\",\n      \"rq-size\": \"128\",\n      \"type\": \"disk\",\n      \"disc-aln\": \"0\",\n      \"disc-gran\": \"0B\",\n      \"disc-max\": \"0B\",\n      \"disc-zero\": \"0\",\n      \"wsame\": \"0B\",\n      \"wwn\": null,\n      \"rand\": \"1\",\n      \"pkname\": null,\n      \"hctl\": \"2:0:0:0\",\n      \"tran\": \"spi\",\n      \"subsystems\": \"block:scsi:pci\",\n      \"rev\": \"1.0 \",\n      \"vendor\": \"VBOX    \",\n      \"children\": [\n        {\n          \"name\": \"sda1\",\n          \"kname\": \"sda1\",\n          \"maj:min\": \"8:1\",\n          \"fstype\": \"ext4\",\n          \"mountpoint\": \"/\",\n          \"label\": \"cloudimg-rootfs\",\n          \"uuid\": \"73ea38ed-7fcd-4871-8afa-17d36f4e4bfc\",\n          \"parttype\": \"0x83\",\n          \"partlabel\": null,\n          \"partuuid\": \"8d714561-01\",\n          \"partflags\": \"0x80\",\n          \"ra\": \"128\",\n          \"ro\": \"0\",\n          \"rm\": \"0\",\n          \"hotplug\": \"0\",\n          \"model\": null,\n          \"serial\": null,\n          \"size\": \"10G\",\n          \"state\": null,\n          \"owner\": \"root\",\n          \"group\": \"disk\",\n          \"mode\": \"brw-rw----\",\n          \"alignment\": \"0\",\n          \"min-io\": \"512\",\n          \"opt-io\": \"0\",\n          \"phy-sec\": \"512\",\n          \"log-sec\": \"512\",\n          \"rota\": \"1\",\n          \"sched\": \"deadline\",\n          \"rq-size\": \"128\",\n          \"type\": \"part\",\n          \"disc-aln\": \"0\",\n          \"disc-gran\": \"0B\",\n          \"disc-max\": \"0B\",\n          \"disc-zero\": \"0\",\n          \"wsame\": \"0B\",\n          \"wwn\": null,\n          \"rand\": \"1\",\n          \"pkname\": \"sda\",\n          \"hctl\": null,\n          \"tran\": null,\n          \"subsystems\": \"block:scsi:pci\",\n          \"rev\": null,\n          \"vendor\": null\n        }\n      ]\n    },\n    {\n      \"name\": \"sdb\",\n      \"kname\": \"sdb\",\n      \"maj:min\": \"8:16\",\n      \"fstype\": \"iso9660\",\n      \"mountpoint\": null,\n      \"label\": \"cidata\",\n      \"uuid\": \"2017-08-15-16-47-34-00\",\n      \"parttype\": null,\n      \"partlabel\": null,\n      \"partuuid\": null,\n      \"partflags\": null,\n      \"ra\": \"128\",\n      \"ro\": \"0\",\n      \"rm\": \"0\",\n      \"hotplug\": \"0\",\n      \"model\": \"HARDDISK        \",\n      \"serial\": null,\n      \"size\": \"10M\",\n      \"state\": \"running\",\n      \"owner\": \"root\",\n      \"group\": \"disk\",\n      \"mode\": \"brw-rw----\",\n      \"alignment\": \"0\",\n      \"min-io\": \"512\",\n      \"opt-io\": \"0\",\n      \"phy-sec\": \"512\",\n      \"log-sec\": \"512\",\n      \"rota\": \"1\",\n      \"sched\": \"deadline\",\n      \"rq-size\": \"128\",\n      \"type\": \"disk\",\n      \"disc-aln\": \"0\",\n      \"disc-gran\": \"0B\",\n      \"disc-max\": \"0B\",\n      \"disc-zero\": \"0\",\n      \"wsame\": \"32M\",\n      \"wwn\": null,\n      \"rand\": \"1\",\n      \"pkname\": null,\n      \"hctl\": \"2:0:1:0\",\n      \"tran\": \"spi\",\n      \"subsystems\": \"block:scsi:pci\",\n      \"rev\": \"1.0 \",\n      \"vendor\": \"VBOX    \"\n    }\n  ]\n}\n
          "},{"location":"lsblk/#see-also","title":"See also","text":"
          • findmnt
          "},{"location":"lshw/","title":"lshw","text":"

          lshw is a cli tool on linux to show you information about your hardware. It outputs in several formats that are human and computer friendly.

          "},{"location":"lshw/#examples","title":"Examples","text":""},{"location":"lshw/#lshw-help","title":"lshw --help","text":"
          $ lshw --help\nHardware Lister (lshw) - B.02.18\nusage: lshw [-format] [-options ...]\n       lshw -version\n\n        -version        print program version (B.02.18)\n\nformat can be\n        -html           output hardware tree as HTML\n        -xml            output hardware tree as XML\n        -json           output hardware tree as a JSON object\n        -short          output hardware paths\n        -businfo        output bus information\n\noptions can be\n        -class CLASS    only show a certain class of hardware\n        -C CLASS        same as '-class CLASS'\n        -c CLASS        same as '-class CLASS'\n        -disable TEST   disable a test (like pci, isapnp, cpuid, etc. )\n        -enable TEST    enable a test (like pci, isapnp, cpuid, etc. )\n        -quiet          don't display status\n        -sanitize       sanitize output (remove sensitive information like serial numbers, etc.)\n        -numeric        output numeric IDs (for PCI, USB, etc.)\n        -notime         exclude volatile attributes (timestamps) from output\n
          "},{"location":"lshw/#example-of-short-output","title":"Example of -short output","text":"

          It's best to use sudo, otherwise you will not see all hardware.

          $ sudo lshw -short\nH/W path                   Device      Class          Description\n=================================================================\n                                       system         OptiPlex 7010 (OptiPlex 7010)\n/0                                     bus            0YXT71\n/0/0                                   memory         64KiB BIOS\n/0/5e                                  processor      Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz\n/0/5e/3a                               memory         256KiB L1 cache\n/0/5e/3b                               memory         1MiB L2 cache\n/0/5e/3c                               memory         8MiB L3 cache\n/0/3d                                  memory         16GiB System Memory\n/0/3d/0                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/3d/1                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/3d/2                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/3d/3                                memory         4GiB DIMM DDR3 Synchronous 1600 MHz (0.6 ns)\n/0/100                                 bridge         Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller\n/0/100/2                               display        Xeon E3-1200 v2/3rd Gen Core processor Graphics Controller\n/0/100/14                              bus            7 Series/C210 Series Chipset Family USB xHCI Host Controller\n/0/100/14/0                usb3        bus            xHCI Host Controller\n/0/100/14/1                usb4        bus            xHCI Host Controller\n/0/100/14/1/4              scsi6       storage        Elements 25A1\n/0/100/14/1/4/0.0.0        /dev/sdb    disk           4TB Elements 25A1\n/0/100/14/1/4/0.0.0/1      /dev/sdb1   volume         3725GiB OS X ZFS partition or Solaris /usr partition\n/0/100/14/1/4/0.0.0/9      /dev/sdb9   volume         8191KiB reserved partition\n/0/100/16                              communication  7 Series/C216 Chipset Family MEI Controller #1\n/0/100/16.3                            communication  7 Series/C210 Series Chipset Family KT Controller\n/0/100/19                  eno1        network        82579LM Gigabit Network Connection\n/0/100/1a                              bus            7 Series/C216 Chipset Family USB Enhanced Host Controller #2\n/0/100/1a/1                usb1        bus            EHCI Host Controller\n/0/100/1a/1/1                          bus            Integrated Rate Matching Hub\n/0/100/1a/1/1/2                        generic        BCM20702A0\n/0/100/1b                              multimedia     7 Series/C216 Chipset Family High Definition Audio Controller\n/0/100/1d                              bus            7 Series/C216 Chipset Family USB Enhanced Host Controller #1\n/0/100/1d/1                usb2        bus            EHCI Host Controller\n/0/100/1d/1/1                          bus            Integrated Rate Matching Hub\n/0/100/1d/1/1/5                        bus            USB2.0 Hub\n/0/100/1d/1/1/5/2          scsi7       storage        External\n/0/100/1d/1/1/5/2/0.0.0    /dev/sdc    disk           256GB External\n/0/100/1d/1/1/5/2/0.0.0/1              volume         512MiB EFI GPT partition\n/0/100/1d/1/1/5/2/0.0.0/2  /dev/sdc2   volume         221GiB EXT4 volume\n/0/100/1e                              bridge         82801 PCI Bridge\n/0/100/1f                              bridge         Q77 Express Chipset LPC Controller\n/0/100/1f.2                            storage        7 Series/C210 Series Chipset Family 6-port SATA Controller [AHCI mode]\n/0/100/1f.3                            bus            7 Series/C216 Chipset Family SMBus Controller\n/0/1                       scsi0       storage\n/0/1/0.0.0                 /dev/sda    disk           256GB Samsung SSD 850\n/0/1/0.0.0/1               /dev/sda1   volume         238GiB EXT4 volume\n/0/2                       scsi1       storage\n/0/2/0.0.0                 /dev/cdrom  disk           DVD+-RW SW830\n/1                         docker0     network        Ethernet interface\n
          "},{"location":"lshw/#see-also","title":"See also","text":"
          • lsblk
          • lscpu
          • lslogins
          • lsmod
          • lsof
          • lspci
          • lsusb
          "},{"location":"lsof/","title":"lsof","text":"

          lsof lists open files. This CLI tool is available on most *nix OSes.

          On linux, a much quicker alternative may be to use fuser

          "},{"location":"lsof/#examples","title":"Examples","text":""},{"location":"lsof/#list-files-open-by-a-given-user","title":"List files open by a given user","text":"
          lsof -u username\n
          "},{"location":"lsof/#show-listening-tcp-sockets","title":"show listening TCP sockets","text":"

          Since everything in unix is a file, including network sockets, you can list open sockets and the programs that have them open. However, this is notably unreliable in Docker, so don't trust this completely. When in doubt, double check against ss -nlptu or netstat -nlptu, though this will likely only work in linux.

          lsof -iTCP -sTCP:LISTEN\n
          "},{"location":"lsof/#show-a-sorted-list-of-processes-by-listening-port","title":"Show a sorted list of processes by listening port","text":"
          lsof -iTCP -sTCP:LISTEN -P | sort -k2 -t: -n\n
          "},{"location":"lsof/#show-what-process-is-using-port-80-or-443-with-port-numbers","title":"show what process is using port 80 or 443 with port numbers","text":"
          ## -w make output wide\n## -n makes lsof not resolve hostnames from ip addresses\n## -P makes lsof not resolve service names for port numbers.\n## -iTCP shows IP TCP sockets.\n\nlsof -wnP -iTCP:80,443\n
          "},{"location":"lsof/#show-the-selinux-context-for-sockets","title":"show the selinux context for sockets","text":"
          lsof -i -Z\n

          See man page for extended syntax around Z

          "},{"location":"lsof/#see-also","title":"See Also","text":"
          • Process Explorer - LSOF type functionality for windows.
          • fuser
          "},{"location":"lua/","title":"LUA","text":"

          \"Lua is a powerful, efficient, lightweight, embeddable scripting language. It supports procedural programming, object-oriented programming, functional programming, data-driven programming, and data description.\" - https://www.lua.org/about.html

          "},{"location":"lua/#links","title":"Links","text":"
          • https://www.lua.org
          • https://learnxinyminutes.com/docs/lua
          • https://pico-8.fandom.com/wiki/Lua
          • https://www.lexaloffle.com/dl/docs/pico-8_manual.html#Lua_Syntax_Primer
          "},{"location":"lvm/","title":"lvm","text":"

          Linux Logical Volume Manager.

          • Tutorial
          • Overview
          • Managing
          "},{"location":"lvm/#general-flow","title":"General flow","text":"

          Physical volumes (pv) are grouped into volume groups (vg). Volume groups are sliced up into logical volumes (lv). Because of that, the general flow is something like:

          ## Partitioning is not necessary, so no need for fdisk or sgdisk\npvcreate /dev/sd{x..z}\nvgcreate vg_scratch /dev/sd{x..z}\nlvcreate -l 95%FREE -n lv_scratch vg_scratch\nmkfs.ext4 /dev/vg_scratch/lv_scratch\n
          "},{"location":"lvm/#examples","title":"Examples","text":""},{"location":"lvm/#show-a-bunch-of-info","title":"Show a bunch of info","text":"
          pvdisplay -v\npvs -v\npvs -a\npvs --segments\nvgdisplay -v\nvgs -v\nvgs -a -o +devices\n
          "},{"location":"lvm/#show-system-disks-and-if-they-are-in-an-lvm","title":"Show system disks and if they are in an LVM","text":"

          lvmdiskscan

          "},{"location":"lvm/#show-all-logical-volumes","title":"Show all logical volumes","text":"

          lvs

          "},{"location":"lvm/#activate-all-volume-groups","title":"Activate all volume groups","text":"

          vgchange -a y

          "},{"location":"lvm/#create-a-physical-volume","title":"Create a physical volume","text":"

          physical volumes are groups of physical disks that can be used to create logical volumes

          pvcreate pv_name /dev/sdb2 /dev/sdc2

          "},{"location":"lvm/#create-a-logical-volume","title":"Create a logical volume","text":"

          This creates a specifically named logical volume on a volume group named vg_data

          lvcreate -L 10G -n lv_name vg_data

          "},{"location":"lvm/#show-how-each-logical-volume-is-set-up","title":"Show how each logical volume is set up","text":"

          lvdisplay

          "},{"location":"lvm/#show-free-extents","title":"Show free extents","text":"

          vgs -o vg_free_count

          "},{"location":"lvm/#extend-a-volume-group-to-1tb","title":"Extend a volume group to 1TB","text":"
          lvextend -L 1T /dev/vgroot/lv_srv && \\\nresize2fs /dev/mapper/vgroot-lv_srv && \\\ndf -h /srv\n
          "},{"location":"lvm/#extend-a-volume-group-to-its-max","title":"Extend a volume group to its max","text":"
          lvextend -l +100%FREE /dev/vgroot/lv_srv && \\\nresize2fs /dev/mapper/vgroot-lv_srv && \\\ndf -h /srv\n
          "},{"location":"lxc/","title":"LXC","text":"

          \"LXC is a userspace interface for the Linux kernel containment features. Through a powerful API and simple tools, it lets Linux users easily create and manage system or application containers.\" - https://linuxcontainers.org

          "},{"location":"lxc/#see-also","title":"See Also","text":"
          • Docker
          "},{"location":"machine-learning/","title":"Machine Learning","text":"

          Machine learning is the subfield of computer science that, according to Arthur Samuel in 1959, gives \"computers the ability to learn without being explicitly programmed.\" - https://en.wikipedia.org/wiki/Machine_learning

          "},{"location":"machine-learning/#terminology-and-concepts","title":"Terminology and concepts","text":"
          • Supervised machine learning: The program is \"trained\" on a pre-defined set of \"training examples\", which then facilitate its ability to reach an accurate conclusion when given new data.
          • Unsupervised machine learning: The program is given a bunch of data and must find patterns and relationships therein.
          • \"The goal of ML is never to make 'perfect' guesses, because ML deals in domains where there is no such thing. The goal is to make guesses that are good enough to be useful.\"
          • Machine learning builds heavily on statistics.
          "},{"location":"machine-learning/#prerequisites","title":"Prerequisites","text":"
          • Statistics
          • Linear Algebra
          • Calculus
          "},{"location":"machine-learning/#resources","title":"Resources","text":"
          • Reddit /r/machinelearning wiki
          • Data Science From Scratch book
          • Andrew Ng's Coursera course on ML
          • Machine Learning with Python / Practical Machine Learning Tutorial with Python Introduction
          • Your First Machine Learning Project in Python Step-By-Step
          • Example Machine Learning IPython Notebook
          • FastML: Machine Learning Made Easy
          • Tensorflow
          • My Neural Network isn't working! What should I do?
          • Machine Learning Recipes with Josh Gordon - Google Developers
          "},{"location":"machine-learning/#see-also","title":"See Also","text":"
          • Life 3.0: Being Human in the Age of Artificial Intelligence: https://www.amazon.com/Life-3-0-Being-Artificial-Intelligence/dp/1101946598
          • DeepMind and Blizzard open StarCraft II as an AI research environment
          • Intuitive RL: Intro to Advantage-Actor-Critic (A2C)
          • Deep Reinforcement Learning instrumenting bettercap for WiFi pwning.
          • Creating music through image generation of spectrograms.
          "},{"location":"macos/","title":"macOS","text":"

          Apple's Unix desktop operating system.

          "},{"location":"macos/#links","title":"Links","text":"
          • Significant Changes in macOS 10.15 Catalina of Interest to Mac Admins
          • https://git.herrbischoff.com/awesome-macos-command-line/about/
          • https://support.apple.com/en-us/108900: How to revive or restore Mac firmware
          • https://weiyen.net/articles/useful-macos-cmd-line-utilities
          • https://dortania.github.io/OpenCore-Legacy-Patcher: Run newer macOS versions on older, unsupported Mac hardware.
          "},{"location":"macos/#useful-commands","title":"Useful Commands","text":"
          • caffeinate
          • mdutil
          • networksetup
          • scutil
          • serverinfo
          • sharing
          • tccutil
          "},{"location":"macos/#logout-user-from-a-shell","title":"Logout user from a shell","text":"
          sudo launchctl bootout gui/$(id -u \"$USERNAME\")\n

          or

          sudo launchctl bootout user/$(id -u \"$USERNAME\")\n
          "},{"location":"macos/#fix-sshd-client-timeout-config","title":"Fix sshd client timeout config","text":"

          macOS has an /etc/ssh/sshd_config that will never disconnect idle clients. With sketchy wifi, or for a variety of other reasons, this can cause ssh connections to pile up, causing a DoS on the ssh server. When this happens, a client that is attempting to connect may see kex_exchange_identification: read: Connection reset by peer. To fix this, set the following config values to anything other than 0. See man 5 sshd_config for more info.

          The following settings would allow an unresponsive ssh session 10 minutes before terminating it:

          ClientAliveInterval 120\nClientAliveCountMax 5\n

          Using BSD sed, you can quickly set this:

          sudo sed -i '' -E 's/^#?ClientAliveInterval [0-9]+/ClientAliveInterval 120/ ; s/^#?ClientAliveCountMax [0-9]+/ClientAliveCountMax 5/ ;' /etc/ssh/sshd_config\nsudo bash -c \"launchctl stop com.openssh.sshd ; launchctl start com.openssh.sshd ;\"\n
          "},{"location":"macos/#show-hardware-info","title":"Show hardware info","text":"
          system_profiler SPHardwareDataType\n
          "},{"location":"macos/#install-package-from-cli","title":"Install package from CLI","text":"

          Use brew.sh for most things. Otherwise:

          sudo installer -pkg /Volumes/ExifTool-9.16/ExifTool-9.16.pkg -target /\n
          "},{"location":"macos/#start-ftp-server","title":"Start FTP server","text":"
          sudo -s launchctl load -w /System/Library/LaunchDaemons/ftp.plist\n
          "},{"location":"macos/#check-swap-usage","title":"Check swap usage","text":"
          sysctl vm.swapusage\n
          "},{"location":"macos/#disable-wifi-disconnect-when-locking-screen","title":"Disable wifi disconnect when locking screen","text":"
          sudo /System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources airport en1 prefs DisconnectOnLogout=NO\n
          "},{"location":"macos/#show-some-downloaded-files","title":"Show some downloaded files","text":"

          This shows a list of all the quarantine checked downloads:

          sqlite3 ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV* 'select distinct LSQuarantineDataURLString from LSQuarantineEvent'\n
          "},{"location":"macos/#send-notifications-from-terminal","title":"Send Notifications from Terminal","text":"
          sudo gem install terminal-notifier\nterminal-notifier -message \"Hello, this is my message\" -title \"Message Title\"\n
          "},{"location":"macos/#enable-verbose-eap-logging","title":"Enable verbose eap logging","text":"
          sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.eapolclient LogFlags -int -1\n
          "},{"location":"macos/#network","title":"Network","text":"
          • gif0 - Generic Tunnel Interface. See man gif.
          • stf0 - Six To Four tunnel. See man stf.
          "},{"location":"macos/#migration-assistant","title":"Migration Assistant","text":"

          Some files are not migrated using Migration Assistant. EG:

          • /etc/hosts
          • crontabs
          • /etc/apache2/httpd.conf
          "},{"location":"macos/#remap-caps-lock-to-escape","title":"Remap caps-lock to escape","text":"

          This remaps using a different mechanism than what the Keyboard pref pane uses, so the change will not be reflected there, so you will want to make sure to save both of these functions so you can reset when you need normal behavior.

          caps_to_esc_map(){\n    hidutil property --set \\\n    '{\"UserKeyMapping\":[{\"HIDKeyboardModifierMappingSrc\":0x700000039,\"HIDKeyboardModifierMappingDst\":0x700000029}]}'\n}\n\ncaps_to_esc_unmap(){\n    hidutil property --set '{\"UserKeyMapping\":[]}'\n}\n
          "},{"location":"make/","title":"GNU make","text":"

          GNU make

          "},{"location":"make/#automatic-variables","title":"Automatic variables","text":"

          make requires commands to begin with tabs, so copying the examples below will not work unless you replace the leading spaces with tabs. This is probably the must frustrating thing about make.

          $ cat Makefile\nall: foo_one foo_two\n\nfoo_%: bar_a bar_b\n    @echo $*: this is $@ and it requires $^\n\nbar_%: baz\n    @echo $*: this is $@ and it requires $^\n\nbaz:\n    @echo this is baz\n\n$ make\nthis is baz\na: this is bar_a and it requires baz\nb: this is bar_b and it requires baz\none: this is foo_one and it requires bar_a bar_b\ntwo: this is foo_two and it requires bar_a bar_b\n
          "},{"location":"make/#links","title":"Links","text":"
          • https://www.gnu.org/software/make/manual/html_node/Automatic-Variables.html
          • https://www.gnu.org/software/make/manual/html_node/Standard-Targets.html
          • https://nullprogram.com/blog/2017/08/20: A Tutorial on Portable Makefiles
          "},{"location":"marlin/","title":"Marlin","text":"

          \"Marlin is an open source firmware for the RepRap family of replicating rapid prototypers \u2014 popularly known as \u201c3D printers.\u201d It was derived from Sprinter and grbl, and became a standalone open source project on August 12, 2011 with its Github release. Marlin is licensed under the GPLv3 and is free for all applications.\" - https://marlinfw.org/docs/basics/introduction.html

          Marlin is used on the Ender line of printers, as well as many others.

          "},{"location":"marlin/#examples","title":"Examples","text":""},{"location":"marlin/#show-firmware-info","title":"Show firmware info","text":"

          In a serial terminal, type M115. You will see something like:

          Send: M115\nRecv: FIRMWARE_NAME:2.0.8.2 (Jul  6 2022 19:18:56) SOURCE_CODE_URL:www.creality.com PROTOCOL_VERSION:1.0 MACHINE_TYPE:Ender-3 Pro EXTRUDER_COUNT:1 UUID:cede2a2f-41a2-4748-9b12-c55c62f367ff\nRecv: Cap:SERIAL_XON_XOFF:0\nRecv: Cap:BINARY_FILE_TRANSFER:0\nRecv: Cap:EEPROM:1\n...lots of similar lines lines...\nRecv: Cap:CHAMBER_TEMPERATURE:0\nRecv: Cap:COOLER_TEMPERATURE:0\nRecv: Cap:MEATPACK:0\nRecv: ok\nSend: M155 S2\nRecv: ok\n
          "},{"location":"marlin/#perform-a-pid-autotune","title":"Perform a PID Autotune","text":"

          In octoprint or some other terminal interface:

          1. enter M303 S215 C10 to perform a 215\u00baC tuning test 10 times. You will get Kp, Ki, and Kd values back at the end of the test.
          2. Enter those values in the terminal as M301 Pxx.xx Ixx.xx Dxx.xx
          3. Enter M500 to save the values to the EEPROM.
          "},{"location":"marlin/#see-also","title":"See Also","text":"
          • 3D Printing
          "},{"location":"math/","title":"math","text":""},{"location":"math/#links","title":"Links","text":"
          • https://en.wikipedia.org/wiki/Category:Probability_theory_paradoxes
          • https://en.wikipedia.org/wiki/Set_(mathematics) and https://realpython.com/python-sets
          "},{"location":"mdraid/","title":"mdraid","text":"

          Linux software raid.

          "},{"location":"mdraid/#examples","title":"Examples","text":""},{"location":"mdraid/#show-details-of-an-array","title":"Show details of an array","text":"
          mdadm --detail /dev/md0\n
          "},{"location":"mdraid/#links","title":"Links","text":"
          • http://poweredgec.dell.com/ - the Dell ldstate command is a good view into software raid and hardware raid (eg: megaraid, sas2) under one command.
          • https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Deployment_Guide/s2-raid-manage-removing.html
          • http://tldp.org/HOWTO/Software-RAID-HOWTO.html
          • https://raid.wiki.kernel.org/index.php/Linux_Raid
          • https://raid.wiki.kernel.org/index.php/RAID_setup
          "},{"location":"mechanical-keyboards/","title":"Keyboards","text":""},{"location":"mechanical-keyboards/#links","title":"Links","text":"
          • https://fingerpunch.xyz: Custom keyboards made by my friend Sadek.
          • https://www.caniusevia.com: A QMK firmware with online programming.
          • https://ergodox-ez.com: A great lineup of programmable mechanical keyboards.
          • https://configure.zsa.io: Eergodox online keyboard configuration and training tool.
          • https://shop.pimoroni.com/products/keybow-2040: A circuitpython programmable mechanical macro keypad based on Pico 2040.
          • https://shop.pimoroni.com/products/pico-rgb-keypad-base: A Raspberry Pi Pico compatible programmable mushy macro keypad.
          • https://1upkeyboards.com/shop/controllers/usb-to-usb-converter: Turn any usb keyboard into a programmable keyboard.
          "},{"location":"metallb/","title":"MetalLB","text":"

          \"MetalLB is a load-balancer implementation for bare metal Kubernetes clusters, using standard routing protocols.\" - https://metallb.universe.tf/

          \"This is not an official Google project, it is just code that happens to be owned by Google.\" - https://github.com/google/metallb

          "},{"location":"metallb/#links","title":"Links","text":"
          • https://github.com/google/metallb
          • https://metallb.universe.tf/installation
          • https://metallb.universe.tf/configuration/#layer-2-configuration
          "},{"location":"metallb/#see-also","title":"See Also","text":"
          • kubernetes
          • minikube
          "},{"location":"microbit/","title":"micro:bit","text":"

          \"micro:bit is a tiny programmable computer, designed to make learning and teaching easy and fun!\" - https://microbit.org

          "},{"location":"microbit/#notes","title":"Notes","text":"
          • Weight: 8g
          • Weight with 2x AAA batteries in case: 37g
          • Python doesn't support BTLE, JS does
          "},{"location":"microbit/#links","title":"Links","text":"
          • Hardware Description: https://tech.microbit.org/hardware/
          • Python API docs: https://microbit-micropython.readthedocs.io
          • Microbit focused python editor: https://codewith.mu
          • Online python editor: https://python.microbit.org
          • Online drag-and-drop and javascript editor: https://makecode.microbit.org
          "},{"location":"microcontrollers/","title":"microcontrollers","text":"

          Notes about microcontrollers like the esp8266.

          "},{"location":"microcontrollers/#links","title":"Links","text":"
          • https://blog.squix.org/2016/07/esp8266-based-plane-spotter-how-to.html: esp8266 with rtl-sdr for plane spotting.
          • https://youtu.be/WlkMbNnIECM: ping pong ball lamps with neopixels.
          • https://kno.wled.ge: WLED, by FAR the easiest way to use neopixels.
          • https://docs.micropython.org/en/latest/esp8266/quickref.html: Micropython on the esp8266.
          • https://learn.adafruit.com: There is SO MUCH awesome creativity with microcontrollers in here.
          • https://diyi0t.com/esp8266-nodemcu-tutorial: Good info on the esp8266. There are some conflicting forum posts out there, and the spec table here was useful for me.
          • https://youtu.be/udo8mv5oarg / https://github.com/bertrandom/snowball-thrower: Playing Zelda Breath of the Wild Bowling minigame using Teensy
          • https://tasmota.github.io: General purpose ESP firmware for connecting to and automating various snesors and devices.
          • https://www.solder.party/docs
          • https://www.withdiode.com: Modern circuit simulator. \"Build, program, and simulate hardware in the browser.\"
          • https://vanhunteradams.com/Pico/Bootloader/Boot_sequence.html
          "},{"location":"microcontrollers/#my-personal-projects","title":"My personal projects","text":"

          These are all components I've used. They may not be the best or even the most suitable, but I've had success with all of the things listed below.

          N.B.: There are some amazon links in here. They are all smile.amazon links, not affiliate links.

          "},{"location":"microcontrollers/#led-projects","title":"LED projects","text":""},{"location":"microcontrollers/#microcontrollers_1","title":"Microcontrollers","text":"

          As of January 2022 I've only used the HiLetgo NodeMCU ESP8266 (CP2102 ESP-12E). The NodeMCU ESP8266 has a built in voltage regulator on its voltage-in pin (Vin) that tolerates over 12v input, which means you can use a single 12v power supply to power both the device and one or more 12v LED strands or strips. Here's a wiring diagram of how I used to do it. I've iterated on the wiring since I first made this diagram, but logically the circuit itself is identical to this.

          "},{"location":"microcontrollers/#led-strands","title":"LED strands","text":"
          • Alitove WS2811 12v strand requires 12v power input.
          • Alitove WS2811 5v strand is identical to the previous, but because it's 5v it can run off usb power to the microcontroller, which is relayed over the vin port. See this wiring diagram. Because you can run the microcontroller and lights off usb power, this is a portable setup that would be good for bikes, cosplay, etc..
          • Alitove 5v strip is electrically the same as the above 5v strand, but in a different form factor. This is good for making lamps, accent lighting at home, etc..
          "},{"location":"microcontrollers/#software","title":"Software","text":"

          For software I've been using the led focused firmware WLED which has a bunch of pre-built animations and stuff. This doesn't let you easily do things like control individual LEDs, so if you want those features should look at using something else like circuitpython or micropython.

          Another firmware that may be worth checking out is Tasmota. I haven't used this, but it purportedly lets you animate LEDs and also connect a variety of other sensors and devices, and is more smart-home focused.

          "},{"location":"microcontrollers/#additional-hardware","title":"Additional hardware","text":"
          • Breadboard jumpers are great for avoiding having to solder things together.
          • Lever nuts also help avoid soldering.
          • A high-wattage power supply is good regardless of whether you're going with 12v or 5v. If you don't have enough amps, your LEDs will not go as bright as they potentially could.
          "},{"location":"micropython/","title":"MicroPython","text":"

          \"MicroPython is a lean and efficient implementation of the Python 3 programming language that includes a small subset of the Python standard library and is optimised to run on microcontrollers and in constrained environments.\"- https://micropython.org

          https://docs.micropython.org

          "},{"location":"micropython/#hardware","title":"Hardware","text":"
          • ESP8266
          • pyboard
          • Arduino Due
          • microbit
          "},{"location":"micropython/#tips","title":"Tips","text":"
          • screen /dev/usb.whatever can sometimes not work to get a usb repl Try using picocom instead.
          "},{"location":"micropython/#esp8266-workflow","title":"ESP8266 workflow","text":"
          • Info on flashing can be found here: https://docs.micropython.org/en/latest/esp8266/tutorial/intro.html
          • brew install picocom
          • pip install --user adafruit-ampy esptool
          • export AMPY_PORT=/dev/tty.usbserial-0001 sets up ampy with the needed sieral port.
          • ampy run test.py run test.py found in the current local directory on the ESP8266.
          • ampy --port /serial/port put test.py /foo/bar.py copy a file to the board. Use get to copy the other way. Omit destination file name to just view the file.
          • Modify boot.py for any boot setup. Run ampy get boot.py to see the defaults for the flash you have loaded.
          • Modify main.py with your main program code to auto-run.
          "},{"location":"micropython/#links","title":"Links","text":"
          • My microcontrollers notes have more info about LED projects and associated hardware.
          • https://docs.micropython.org
          • https://www.digikey.com/en/maker/projects/micropython-basics-load-files-run-code/fb1fcedaf11e4547943abfdd8ad825ce
          • TalkPython.fm Episode #108: MicroPython and Open Source Hardware at Adafruit
          • https://github.com/scientifichackers/ampy
          • https://youtu.be/odffCT0aW58: Tutorial on how to use Jupyter as a micropython development interface for neopixels
          "},{"location":"micropython/#see-also","title":"See also","text":"
          • python
          • circuitpython
          "},{"location":"minikube/","title":"minikube","text":"

          \"Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a single-node Kubernetes cluster inside a VM on your laptop for users looking to try out Kubernetes or develop with it day-to-day.\" - https://github.com/kubernetes/minikube

          "},{"location":"minikube/#examples","title":"Examples","text":""},{"location":"minikube/#list-available-addons","title":"List available addons","text":"
          $ minikube addons list\n- addon-manager: enabled\n- dashboard: disabled\n- default-storageclass: enabled\n- efk: disabled\n- freshpod: disabled\n- gvisor: disabled\n- heapster: disabled\n- ingress: enabled\n- kube-dns: disabled\n- metrics-server: disabled\n- nvidia-driver-installer: disabled\n- nvidia-gpu-device-plugin: disabled\n- registry: enabled\n- registry-creds: disabled\n- storage-provisioner: enabled\n- storage-provisioner-gluster: disabled\n
          "},{"location":"minikube/#launch-a-specific-version-of-k8s-in-minikube","title":"Launch a specific version of k8s in Minikube","text":"
          minikube start --kubernetes-version='v1.14.6'\n
          "},{"location":"minikube/#enable-the-registry-addon","title":"Enable the registry addon","text":"
          minikube addons enable registry\n
          "},{"location":"minikube/#links","title":"Links","text":"
          • https://github.com/kubernetes/minikube
          • https://minikube.sigs.k8s.io/docs
          • https://metallb.universe.tf/community/#testing-in-minikube
          "},{"location":"misc/","title":"Miscellaneous","text":"

          Things that don't have much content and don't deserve their own page.

          "},{"location":"misc/#misc","title":"Misc","text":"
          • https://developer.okta.com/blog/2019/10/21/illustrated-guide-to-oauth-and-oidc: An Illustrated Guide to OAuth and OpenID Connect
          • https://youtu.be/LPqqoOm8y5s?t=3816: Corey Quinn Scale 14x - Docker Must Die
          • https://www.youtube.com/watch?v=0T2XFSALOaU: Docker in Production: Tales From the Engine Room - Bryan Cantrill (Surge 2015)
          • https://vimeo.com/92770954: Sensu @ Yelp part 1
          • https://vimeo.com/92838680: Sensu @ Yelp part 2
          • https://www.youtube.com/watch?v=jr4zQc3g1Ts&t=416: Terrible ideas in Git
          • https://www.youtube.com/watch?v=9koJOCL8Bms: The Well Tempered API - Why can we play 400 year old music but our software only lasts a few months or years?
          • https://www.youtube.com/watch?v=ybGrm73dXow: Using Swagger to tame HTTP/JSON interfaces
          • https://www.usenix.org/conference/lisa13/working-theory-monitoring: Working Theory of Monitoring
          • https://youtu.be/YFDwdRVTg4g?t=33m11s: Yelp's Theory of PaaSes talk from Box SRE Hour
          • https://slack.engineering/introducing-nebula-the-open-source-global-overlay-network-from-slack/: Introducing Nebula, the open source global overlay network from Slack
          • https://mtlynch.io/code-review-love: How to Make Your Code Reviewer Fall in Love with You
          • https://sonic-pi.net: Sonic Pi: The \"code-based music creation and performance tool\" that DJ Dave used in the opening set of Github Universe 2020.
          • https://emojipedia.org/emoji-zwj-sequence: Emojipedia Lists all emoji's including ZWJ composed emoji's, EG: with different skin tone and hair.
          • http://www.sizecoding.org: SizeCoding.org is a wiki dedicated to the art of creating very tiny programs for most popular types of CPUs
          • http://www.ritsumei.ac.jp/~akitaoka/index-e.html: Optical Illusions: Things don't always make sense. It's good to be open to the possibility that you are wrong, even when you are certain you are right.
          • https://nautil.us/blog/12-mind_bending-perceptual-illusions: 12 Mind-Bending Perceptual Illusions. Some of these are insane. #2 shouts so many things about how flawed the human mind is and how important science is.
          • https://www.physics.smu.edu/pseudo: The Scientific Method - Critical and Creative Thinking (Debunking Pseudoscience) lots of great reading references in the bottom section
          • http://www.brendangregg.com/usemethod.html) \"The Utilization Saturation and Errors (USE: USE method Method is a methodology for analyzing the performance of any system.\"
          • https://martinfowler.com/articles/developer-effectiveness.html: Maximizing Developer Effectiveness
          • http://sl4.org/crocker.html: Crocker's Rules: \"other people are allowed to optimize their messages for information, not for being nice to you.\"
          • https://mkhan45.github.io/CalcuLaTeX-Web: CalcuLaTex: Online calculator
          • https://slack.engineering/a-faster-smarter-quick-switcher: A faster, smarter Quick Switcher: how slack implemented frecency in their cmd-k interface
          • https://endoflife.date: Easy reference for when software will go EOL
          • https://serr.disabilityrightsca.org: \"The Federal and California special education laws give eligible students with disabilities the right to receive a free appropriate public education in the least restrictive environmentThis manual explains how to become eligible for special education, how to request and advocate for special education services, and what to do if you disagree with school districts.\"
          • https://runyourown.social: How to run a small social network site for your friends
          • https://www.jamesrobertwatson.com: \"300 essays about design, culture, and Jim Watson.\"
          • http://blog.timhutt.co.uk/fast-inverse-square-root: Interactive fast inverse-square analysis.
          • https://wiki.c2.com/?MakeItWorkMakeItRightMakeItFast
          • https://en.wikipedia.org/wiki/SOLID: \"In software engineering, SOLID is a mnemonic acronym for five design principles intended to make software designs more understandable, flexible, and maintainable.\"
          • https://ciechanow.ski/internal-combustion-engine: Interactive animated explanation of an internal combustion engine and its components.
          • https://ciechanow.ski/mechanical-watch: Interactive animated explanation of mechanical watch design(This blog is full of great interactive content.)
          • https://hbr.org/2022/05/managers-what-are-you-doing-about-change-exhaustion
          • https://goodresearch.dev: The Good Research Code Handbook
          • https://en.wikipedia.org/wiki/Gartner_hype_cycle
          • https://mrogalski.eu/ansi-art
          • https://tailscale.com/kb/1114/pi-hole: How to route DNS to your self-hosted pi-hole using Tailscale VPN
          • https://minitokyo3d.com
          • https://maggieappleton.com/folk-interfaces
          • https://slsa.dev: a security framework, a check-list of standards and controls to prevent tampering, improve integrity, and secure packages and infrastructure in your projects, businesses or enterprises.
          • https://brutalist-web.design
          • https://fs.blog/chestertons-fence
          • https://www.jefftk.com/p/accidentally-load-bearing: further thoughts about chesterton's fence.
          • https://en.wikipedia.org/wiki/Single_source_of_truth
          • https://lateblt.tripod.com/bit68.txt: What happens when a CPU starts
          • https://en.wikipedia.org/wiki/Expert_system
          • https://www.interaction-design.org/literature/book/the-glossary-of-human-computer-interaction/cognitive-artifacts
          • https://en.wikipedia.org/wiki/Goodhart%27s_law: \"When a measure becomes a target, it ceases to be a good measure.\"
          • https://matt.blwt.io/post/corporate-legibility-for-engineers
          • https://en.wikipedia.org/wiki/Cooperative_principle
          • https://prog21.dadgum.com/80.html: Advice to Aimless, Excited Programmers
          • https://exrx.net: Exercise and fitness website
          • https://cohost.org/mcc/post/178201-the-baseline-scene: Deep dive on the baseline scene in Blade Runner 2049
          • https://shkspr.mobi/blog/2023/06/do-open-source-licences-cover-the-ship-of-theseus
          • https://en.wikipedia.org/wiki/Scunthorpe_problem: \"the unintentional blocking of online content by a spam filter or search engine because their text contains a string (or substring) of letters that appear to have an obscene or otherwise unacceptable meaning.\"
          • https://slatestarcodex.com/2014/07/30/meditations-on-moloch
          • https://en.wikipedia.org/wiki/Law_of_Demeter: \"a design guideline for developing software\"
          • https://skunkledger.substack.com/p/escaping-high-school
          • https://blog.rfox.eu/en/Hardware/Cyberdecks.html
          • https://en.wikipedia.org/wiki/Martha_Mitchell_effect: \"when a medical professional labels a patient's accurate perception of real events as delusional, resulting in misdiagnosis.\"
          • https://en.wikipedia.org/wiki/Principle_of_least_astonishment: \"a system should behave in a way that most users will expect it to behave\"
          • https://en.wikipedia.org/wiki/Ulysses_pact: \"A Ulysses pact or Ulysses contract is a freely made decision that is designed and intended to bind oneself in the future.\"
          • https://youtu.be/rimtaSgGz_4: DEF CON 31 - An Audacious Plan to Halt the Internet's Enshittification - Cory Doctorow
          • https://dgl.cx/2023/09/ansi-terminal-security
          • https://en.wikipedia.org/wiki/Two_Generals%27_Problem
          • https://en.wikipedia.org/wiki/List_of_emerging_technologies
          • https://supabase.com/blog/why-supabase-remote: Reasoning behind a fully remote workforce, and how that culture works.
          • https://catern.com/services.html: \"Write libraries instead of services, where possible\"
          • https://luke.hsiao.dev/blog/housing-documentation: Writing Documentation for Your House
          • https://filiph.net/text/we-need-technology-that-is-less-immersive,-not-more.html
          • https://norvig.com/21-days.html: \"Teach Yourself Programming in Ten Years\"
          • https://bitbytebit.substack.com/p/the-size-of-your-backlog-is-inversely
          • https://ferd.ca/a-distributed-systems-reading-list.html
          • https://sohl-dickstein.github.io/2024/02/12/fractal.html: \"Neural network training makes beautiful fractals\"
          • https://www.pluralsight.com/blog/software-development/programming-naming-conventions-explained: Lots of examples of different variable naming schemes, what they are called, and where they are used.
          • https://effectiviology.com/shirky-principle: \"Institutions Try to Preserve the Problem to Which They Are the Solution\"
          • https://github.com/charlax/professional-programming
          • https://bost.ocks.org/mike/algorithms/: \"Visualizing Algorithms\"
          • https://www.baldurbjarnason.com/2024/facing-reality-in-the-eu-and-tech/
          • https://jacobian.org/2021/jun/8/incorrect-estimates: \"So you messed up. Now what? This is the final part of a series on estimating software project timelines.\"
          • https://the-simulation-strategists.beehiiv.com/p/being-stuck: \"The Illusion of Being Stuck\"
          • https://calculusmadeeasy.org
          • https://vorakl.com/articles/posix: \"A few facts about POSIX\"
          • https://cleankotlin.nl/blog/double-negations: \"Double negatives should not not be avoided\"
          • https://newsletter.posthog.com/p/habits-of-effective-remote-teams
          • https://www.mensurdurakovic.com/hard-to-swallow-truths-they-wont-tell-you-about-software-engineer-job
          • https://www.brightball.com/articles/story-points-are-pointless-measure-queues
          • https://www.bitsandbeing.com/p/leaving-stripe-parting-thoughts
          • https://gwern.net/complement
          • https://www.raptitude.com/2024/08/do-quests-not-goals
          • https://photonlines.substack.com/p/visual-data-structures-cheat-sheet
          • https://jdstillwater.blogspot.com/2012/05/i-put-toaster-in-dishwasher.html
          • https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions: Document architectural decisions using Alexandrian Pattern language text snippets.
          • https://clig.dev: \"Command Line Interface Guidelines\"
          • https://www.atlassian.com/devops/frameworks/dora-metrics: \"DevOps Research and Assessment (DORA) provides a standard set of DevOps metrics used for evaluating process performance and maturity.\"
          • https://en.wikipedia.org/wiki/Tuckman%27s_stages_of_group_development: \"Forming, Storming, Norming, Performing\"
          • https://bitfieldconsulting.com/posts/career: \"Where will you be when you realise that this is where you've always wanted to be?\"
          • https://calebhearth.com/dont-get-distracted: \"But don\u2019t get distracted by all this; the software was intended to kill people.\"
          • https://grep.app: Search Github with better syntax, like regular expressions.
          • https://minds.md/zakirullin/cognitive: \"We should reduce any cognitive load above and beyond what is intrinsic to the work we do.\"
          "},{"location":"misc/#kids","title":"Kids","text":"
          • https://hourofcode.com/us/learn: ~1hr coding activities
          • https://scratch.mit.edu: \"Scratch is the world's largest free coding community for kids.\"
          • https://www.redblobgames.com: \"interactive visual explanations of math and algorithms, using motivating examples from computer games.\"
          "},{"location":"misc/#finance","title":"Finance","text":"
          • https://github.com/jlevy/og-equity-compensation
          • https://www.holloway.com/g/equity-compensation
          • https://www.benkuhn.net/optopt: \"Startup options are much better than they look\"
          "},{"location":"mkdocs/","title":"MkDocs","text":"

          \"MkDocs is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation. Documentation source files are written in Markdown, and configured with a single YAML configuration file.\" - http://www.mkdocs.org/

          "},{"location":"mkdocs/#links","title":"Links","text":"
          • https://www.mkdocs.org/user-guide/writing-your-docs/
          "},{"location":"molecule/","title":"Molecule","text":"

          \"Molecule is designed to aid in the development and testing of Ansible roles.\" - https://molecule.readthedocs.io

          "},{"location":"molecule/#examples","title":"Examples","text":""},{"location":"molecule/#initialize-a-new-role-to-be-tested-in-docker","title":"Initialize a new role to be tested in docker","text":"
          molecule init role ansible-role-whatever --driver-name docker\n

          The above command creates the following directory structure with boilerplate filled in, similar to what you'd expect from cookiecutter.

          ansible-role-whatever/.travis.yml\nansible-role-whatever/.yamllint\nansible-role-whatever/README.md\nansible-role-whatever/defaults/main.yml\nansible-role-whatever/handlers/main.yml\nansible-role-whatever/meta/main.yml\nansible-role-whatever/molecule/default/INSTALL.rst\nansible-role-whatever/molecule/default/converge.yml\nansible-role-whatever/molecule/default/molecule.yml\nansible-role-whatever/molecule/default/verify.yml\nansible-role-whatever/tasks/main.yml\nansible-role-whatever/tests/inventory\nansible-role-whatever/tests/test.yml\nansible-role-whatever/vars/main.yml\n

          Note: in newer versions of molecule this command is not available. It was deliberately removed because it can also be done in ansible-galaxy role init blah, though this doesn't cover the molecule test aspects, which need to be done with molecule init scenario.

          "},{"location":"molecule/#profile-a-molecule-run","title":"Profile a molecule run","text":""},{"location":"molecule/#profiling-molecule-itself","title":"Profiling molecule itself","text":"

          This profiles the molecule run, but not everything is shown due to not everything being python native.

          $ python3 -m cProfile -o ansible-run.cprofile $(which molecule) test\n...normal molecule output scrolls by...\n\n$ python3 -m pstats ansible-run.cprofile\nWelcome to the profile statistics browser.\nansible-run.cprofile% sort cumulative\nansible-run.cprofile% stats 20\nMon Jan 13 08:56:45 2020    ansible-run.cprofile\n         1782927 function calls (1731081 primitive calls) in 145.057 seconds\n   Ordered by: cumulative time\n   List reduced from 6303 to 20 due to restriction <20>\n   ncalls  tottime  percall  cumtime  percall filename:lineno(function)\n   1247/1    0.005    0.000  145.060  145.060 {built-in method builtins.exec}\n        1    0.000    0.000  145.060  145.060 /Users/daniel.hoherd/Library/Python/3.7/bin/molecule:3(<module>)\n        1    0.000    0.000  144.267  144.267 /Users/daniel.hoherd/Library/Python/3.7/lib/python/site-packages/click/core.py:762(__call__)\n...\n
          "},{"location":"molecule/#profiling-testinfra-runs","title":"Profiling testinfra runs","text":"

          Install pytest-profiling, which adds the --profile flag needed in the next step

          pip3 install --user pytest-profiling\n

          Then set up your molecule/scenario/molecule.yml file with the following env contents:

          verifier:\n  name: testinfra\n  env:\n    PYTEST_ADDOPTS: \"--profile\"\n
          "},{"location":"molecule/#see-also","title":"See Also","text":"
          • Ansible
          "},{"location":"molecule/#links","title":"Links","text":"
          • Ansible Tests with Molecule - https://ansible.readthedocs.io/projects/molecule/ / https://www.digitalocean.com/community/tutorials/how-to-test-ansible-roles-with-molecule
          • Molecule sequence of scenario events - https://ansible.readthedocs.io/projects/molecule/configuration/?h=scenario#scenario
          "},{"location":"mongodb/","title":"MongoDB","text":"

          \"MongoDB is a general purpose, document-based, distributed database built for modern application developers and for the cloud era.\" - https://www.mongodb.com

          "},{"location":"mongodb/#tricks-and-usage","title":"Tricks and Usage","text":""},{"location":"mongodb/#start-mongodb-locally-using-docker","title":"Start mongodb locally using docker","text":"
          docker run -d -p 27017:27017 --name mongotest mongo:4.2\n
          "},{"location":"mongodb/#see-also","title":"See Also","text":"
          • robo-3t: mongodb GUI (brew cask install robo-3t)
          • https://hub.docker.com/_/mongo
          "},{"location":"mqtt/","title":"mqtt","text":"

          'MQTT is a machine-to-machine (M2M)/\"Internet of Things\" connectivity protocol.' - http://mqtt.org/

          "},{"location":"mutt/","title":"mutt","text":"

          CLI e-mail client

          "},{"location":"mutt/#usage","title":"Usage","text":"

          http://www.mutt.org/doc/manual/manual-2.html

          j or Down       next-entry      move to the next entry\nk or Up         previous-entry  move to the previous entry\nz or PageDn     page-down       go to the next page\nZ or PageUp     page-up         go to the previous page\n= or Home       first-entry     jump to the first entry\n* or End        last-entry      jump to the last entry\nq               quit            exit the current menu\n?               help            list all keybindings for the current menu\n
          "},{"location":"mutt/#message-deletion","title":"Message Deletion","text":"

          http://www.sendmail.org/~ca/email/mutt/manual-4.html

          • Delete e-mails older than 2012-12-01: [shift-d] ~d 1/12/12-1/1/1 # D/M/Y. this will only delete back to 2001-01-01
          • Delete messages where the body matches a search: [shift-d] ~b search\\ pattern
          • Delete messages where the subject matches a search: [shift-d] ~s search\\ pattern
          • Delete messages older than one month: [shift-d] ~d >1m
          • Delete messages older than 14 days: [shift-d] ~d > 14d
          "},{"location":"mutt/#links","title":"Links","text":"
          • https://srobb.net/mutt.html: Not actually a quick guide.
          "},{"location":"myrepos/","title":"myrepos","text":"

          \"You have a lot of version control repositories. Sometimes you want to update them all at once. Or push out all your local changes. You use special command lines in some repositories to implement specific workflows. Myrepos provides a mr command, which is a tool to manage all your version control repositories.\" -- http://myrepos.branchable.com/

          "},{"location":"myrepos/#usage-examples","title":"Usage Examples","text":""},{"location":"myrepos/#register-a-bunch-of-repos","title":"Register a bunch of repos","text":"
          for repo in ~/code/* ; do\n  mr register \"$repo\"\ndone\n
          "},{"location":"myrepos/#update-all-of-your-registered-repos","title":"Update all of your registered repos","text":"
          mr up\n
          "},{"location":"mysql/","title":"MySQL","text":"

          \"MySQL is an open-source relational database management system. Its name is a combination of \"My\", the name of co-founder Michael Widenius's daughter, and \"SQL\", the abbreviation for Structured Query Language. The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, and is now owned by Oracle Corporation.\" - https://en.wikipedia.org/wiki/MySQL

          "},{"location":"mysql/#examples","title":"Examples","text":""},{"location":"mysql/#show-variables-of-the-running-server","title":"Show variables of the running server","text":"
          mysqladmin variables\n
          "},{"location":"mysql/#enable-bin-logging","title":"Enable bin logging","text":"

          Edit /etc/my.cnf:

          log-bin=/var/lib/mysql/mysql-bin\n
          "},{"location":"mysql/#show-how-a-table-was-created","title":"Show how a table was created","text":"
          SHOW CREATE TABLE table_name \\G\n
          "},{"location":"mysql/#create-a-table","title":"Create a table","text":"
          CREATE TABLE photo_sizes (\n  `photo_id` char(32) NOT NULL,\n  `format` mediumtext,\n  `width` mediumtext,\n  `height` mediumtext,\n  `source` mediumtext,\n  `url` mediumtext,\n  PRIMARY KEY(`photo_id`)\n) ;\n
          "},{"location":"mysql/#create-a-table-with-multiple-columns-as-the-primary-key","title":"Create a table with multiple columns as the primary key","text":"
          CREATE TABLE `photo_sizes` (\n  `photo_id` char(32) NOT NULL,\n  `format` char(32) NOT NULL DEFAULT '',\n  `width` mediumtext,\n  `height` mediumtext,\n  `source` mediumtext,\n  `url` mediumtext,\n  PRIMARY KEY (`photo_id`,`format`)\n) ENGINE=MyISAM DEFAULT CHARSET=latin1\n
          "},{"location":"mysql/#show-what-processes-are-running","title":"Show what processes are running","text":"
          show processlist;\n
          "},{"location":"mysql/#dump-databases-to-sql-files","title":"Dump databases to sql files","text":"

          All databases

          mysqldump -u root -phunter2 --all-databases | gzip -9 > ~/$(date +%F-%H%m).sql.gz\n

          Or just a single database

          mysqldump -u root -phunter2 my_favorite_db | gzip -9 > ~/my_favorite_db-$(date +%F-%H%m).sql.gz\n
          "},{"location":"mysql/#duplicate-a-database","title":"Duplicate a database","text":"
          sudo mysqldump -v mogilefs | sudo mysql -D mogilefs_sjc\n
          "},{"location":"mysql/#dump-the-schema-of-a-database-with-no-actual-data","title":"Dump the schema of a database with no actual data","text":"
          sudo mysqldump --no-data dbname > schema.sql\n
          "},{"location":"mysql/#show-privileges","title":"Show privileges","text":"
          show GRANTS ;\n
          "},{"location":"mysql/#create-a-new-user","title":"Create a new user","text":"
          CREATE USER 'a_new_user'@'10.0.5.%' IDENTIFIED BY 'the_user_password';\nGRANT ALL PRIVILEGES ON some_database.* TO 'a_new_user'@'10.0.5.%' WITH GRANT OPTION;\n
          "},{"location":"mysql/#delete-a-user","title":"Delete a user","text":"
          DELETE from mysql.user where user = 'user_name';\n
          "},{"location":"mysql/#grant-privileges","title":"Grant Privileges","text":"
          GRANT ALL ON database.* TO 'newuser'@'localhost';\n
          "},{"location":"mysql/#change-root-password","title":"Change root password","text":"
          /usr/bin/mysqladmin -u root password 'new-password'\n/usr/bin/mysqladmin -u root -h hostname password 'new-password'\n

          or...

          UPDATE mysql.user\n  SET Password=PASSWORD('hunter2')\n  WHERE User='leroy_jenkins'\n  AND Host='localhost' ;\n
          "},{"location":"mysql/#create-statements","title":"Create statements","text":""},{"location":"mysql/#create-an-index-on-table-images-for-column-rating_count","title":"Create an index on table images for column rating_count","text":"
          create index rating_count on images (rating_count) ;\n
          "},{"location":"mysql/#drop-an-index-from-a-table","title":"Drop an index from a table","text":"
          drop index rating_count on images ;\n
          "},{"location":"mysql/#table-alters","title":"Table Alters","text":""},{"location":"mysql/#add-a-column","title":"Add a column","text":"
          alter table flixplor add o_width char(12);\n
          "},{"location":"mysql/#drop-a-column","title":"Drop a column","text":"
          alter table flixplor drop column o_width;\n
          "},{"location":"mysql/#change-the-type-of-a-column","title":"Change the type of a column","text":"
          alter table flixplor modify o_height mediumint ;\n
          "},{"location":"mysql/#add-a-current-timestamp-column","title":"Add a current timestamp column","text":"
          alter table images add last_updated timestamp not null default current_timestamp on update current_timestamp;\n
          "},{"location":"mysql/#change-the-table-engine-to-innodb","title":"Change the table engine to innodb","text":"
          ALTER TABLE images ENGINE=INNODB;\n
          "},{"location":"mysql/#change-a-tables-encoding","title":"Change a table's encoding","text":"
          alter table raw_flickr_data CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci ;\n
          "},{"location":"mysql/#table-inserts","title":"Table Inserts","text":""},{"location":"mysql/#add-a-record","title":"Add a record","text":""},{"location":"mysql/#table-updates","title":"Table Updates","text":""},{"location":"mysql/#update-if-key-exists","title":"Update if key exists","text":"

          For unique fields, you cannot insert, so you need to have an on duplicate key section in your statement.

          INSERT INTO photo_sizes (\n  photo_id,\n  format,\n  height,\n  width,\n  url,\n  source\n) values (\n  '32704962848',\n  'Medium 640',\n  '427',\n  '640',\n  'https://www.flickr.com/photos/warzauwynn/32704962848/sizes/z/',\n  'https://farm5.staticflickr.com/4855/32704962848_3a96b4c635_z.jpg'\n) ON DUPLICATE KEY UPDATE\n  height='427',\n  width='640',\n  url='https://www.flickr.com/photos/warzauwynn/32704962848/sizes/z/',\n  source='https://farm5.staticflickr.com/4855/32704962848_3a96b4c635_z.jpg'\n
          • http://dev.mysql.com/doc/refman/5.0/en/insert-on-duplicate.html
          "},{"location":"mysql/#update-a-datetime-column-with-a-random-datetime","title":"Update a datetime column with a random datetime","text":"
          UPDATE flixplor\nSET last_retrieval = subtime(\n  concat_ws(' ','2019-01-01' - interval rand() * 10000 day ,'00:00:00'), -- create a random YYYY-MM-DD within 10k days of given datetime\n  sec_to_time(floor(0 + (rand() * 86401))) -- create a random HH:MM:SS within a 24 hour period\n)\nWHERE last_retrieval < '2019-01-01 00:00:00';\n
          "},{"location":"mysql/#table-selects","title":"Table Selects","text":""},{"location":"mysql/#select-values-and-dont-show-duplicates","title":"Select values and don't show duplicates","text":"
          SELECT col from servers group by col ;\n
          "},{"location":"mysql/#select-photo_id-and-discard-duplicates-uniq","title":"Select photo_id and discard duplicates (uniq)","text":"
          SELECT photo_id from photo_sizes group by photo_id ;\n
          "},{"location":"mysql/#select-and-count-unique-pairs-of-columns","title":"Select and count unique pairs of columns","text":"
          SELECT model, unit, count(*) as n from servers group by model, unit having n > 1 order by model asc ;\n
          "},{"location":"mysql/#select-the-count-of-rows-in-a-table","title":"Select the count of rows in a table","text":"
          SELECT count(*) from flixplor where o_height > 100 ;\n
          "},{"location":"mysql/#do-some-math-to-create-a-new-column-during-a-select","title":"Do some math to create a new column during a select","text":"
          SELECT photo_id,last_retrieval,o_height,o_width,(o_height * o_width) as pixels from flixplor\nwhere last_reposted < from_unixtime('1384268667') or last_reposted is NULL\norder by (o_height * o_width) limit 10 ;\n
          "},{"location":"mysql/#transform-datetime-into-a-date-diff","title":"Transform datetime into a date diff","text":"

          This selects the number of hours since the given datestamp instead of the datestamp itself.

          SELECT TIMESTAMPDIFF(HOUR, date_taken, NOW()) from photos ;\n

          See also DATEDIFF.

          "},{"location":"mysql/#statement-explanations","title":"Statement explanations","text":"

          The EXPLAIN statement can give you additional info about how complex your statement is.

          "},{"location":"mysql/#explain-select","title":"Explain select","text":"
          mysql> explain SELECT *,(rating_sum / rating_count) as average from images where (rating_sum / rating_count) > 20 or rating_count=0 ORDER BY RAND() LIMIT 1 ;\n+----+-------------+--------+------+---------------+------+---------+------+--------+----------------------------------------------+\n| id | select_type | table  | type | possible_keys | key  | key_len | ref  | rows   | Extra                                        |\n+----+-------------+--------+------+---------------+------+---------+------+--------+----------------------------------------------+\n|  1 | SIMPLE      | images | ALL  | rating_count  | NULL | NULL    | NULL | 301937 | Using where; Using temporary; Using filesort |\n+----+-------------+--------+------+---------------+------+---------+------+--------+----------------------------------------------+\n1 row in set (0.00 sec)\n
          "},{"location":"mysql/#misc","title":"Misc","text":"
          • Complete statement with \\G for different output format
          • ERROR 1045 (28000) may be caused by invalid hostname in connect command. Replace the --host token with the full hostname of the db server. Or, restart mysql and try again.
          "},{"location":"mysql/#recommended-reading","title":"Recommended reading","text":"
          • MySQL (5th Edition) (Developer's Library)
          • High Performance MySQL: Optimization, Backups, Replication, and More
          "},{"location":"mysql/#see-also","title":"See Also","text":"
          • http://www.sqlalchemy.org/ - ORM, better for abstracting database in code
          • http://www.mycli.net/ - A better CLI for MySQL
          • https://modern-sql.com/blog/2018-04/mysql-8.0 - Good comparison of modern (as of 2018) SQL options
          "},{"location":"namei/","title":"namei","text":"

          \"follow a pathname until a terminal point is found\" - man namei

          "},{"location":"namei/#examples","title":"Examples","text":""},{"location":"namei/#simple-usage","title":"Simple usage","text":"
          ## namei /etc/systemd/system/multi-user.target.wants/ssh.service\nf: /etc/systemd/system/multi-user.target.wants/ssh.service\n d /\n d etc\n d systemd\n d system\n d multi-user.target.wants\n l ssh.service -> /lib/systemd/system/ssh.service\n   d /\n   d lib\n   d systemd\n   d system\n   - ssh.service\n
          "},{"location":"namei/#show-permissions-of-all-entries","title":"Show permissions of all entries","text":"
          ## namei -l /etc/systemd/system/multi-user.target.wants/ssh.service\nf: /etc/systemd/system/multi-user.target.wants/ssh.service\ndrwxr-xr-x root root /\ndrwxr-xr-x root root etc\ndrwxr-xr-x root root systemd\ndrwxr-xr-x root root system\ndrwxr-xr-x root root multi-user.target.wants\nlrwxrwxrwx root root ssh.service -> /lib/systemd/system/ssh.service\ndrwxr-xr-x root root   /\ndrwxr-xr-x root root   lib\ndrwxr-xr-x root root   systemd\ndrwxr-xr-x root root   system\n-rw-r--r-- root root   ssh.service\n
          "},{"location":"ncftp/","title":"ncftp","text":"

          \"NcFTP Client is a set of FREE application programs implementing the File Transfer Protocol. ... The program has been in service on UNIX systems since 1991 and is a popular alternative to the standard FTP program, /usr/bin/ftp.\" - https://www.ncftp.com/ncftp/

          "},{"location":"ncftp/#examples","title":"Examples","text":""},{"location":"ncftp/#connect-to-a-non-standard-port","title":"Connect to a non-standard port","text":"
          ncftp ftp://10.8.5.103:5000\n
          "},{"location":"ncftp/#recursively-put-a-directory","title":"Recursively put a directory","text":"
          put -r local_dir\n
          "},{"location":"ncftp/#recursively-get-a-directory","title":"Recursively get a directory","text":"
          get -T -R remote_dir\n

          Note: Without the -T option you may run into the error:

          tar: This does not look like a tar archive\ntar: Exiting with failure status due to previous errors\n
          "},{"location":"ncftp/#recursively-delete-a-remote-directory","title":"Recursively delete a remote directory","text":"

          This does not always work.

          rm -r remote_dir\n
          "},{"location":"neopixel/","title":"Adafruit Neopixel","text":""},{"location":"neopixel/#hardware","title":"Hardware","text":"

          I've used the following devices together with success:

          • https://www.amazon.com/gp/product/B01AG923EU/: Alitove WS8211 neopixel strands that I've used with success Raspberry Pi and ESP8266.
          • https://www.amazon.com/gp/product/B081CSJV2V/: ESP8266 I've used with the above light Alitove light strands and WLED custom firmware for really simple awesome LEDs.
          "},{"location":"neopixel/#links","title":"Links","text":"
          • https://github.com/Aircoookie/WLED / https://kno.wled.ge: Custom microcontroller firmware for neopixel integration. This is by far the easiest way to do Neopixels.
          • https://learn.adafruit.com/adafruit-neopixel-uberguide
          • https://www.adafruit.com/category/168
          • https://learn.adafruit.com/neopixels-on-raspberry-pi/python-usage
          • https://docs.micropython.org/en/latest/esp8266/tutorial/neopixel.html
          • https://github.com/danielhoherd/stranger_things_lights: Stranger Things alphabet neopixel project for Raspberry Pi
          • https://www.youtube.com/watch?v=ciaFar8nfHc: Custom 16 x 16 x 16 (4096) neopixel cube
          "},{"location":"netgear/","title":"NETGEAR","text":""},{"location":"netgear/#netgear-r7000","title":"Netgear R7000","text":""},{"location":"netgear/#dd-wrt","title":"DD-WRT","text":"
          • https://www.myopenrouter.com/downloads/dd-wrt-r7000
          • http://www.desipro.de/ddwrt/K3-AC-Arm/

          See Also: dd-wrt

          "},{"location":"netgear/#netgear-gss116e","title":"Netgear GSS116E","text":"
          • https://www.netgear.com/support/product/GSS116E
          "},{"location":"netgear/#netgear-m4300-8x8f","title":"Netgear M4300-8X8F","text":"

          \"Stackable Managed Switch with 16x10G including 8x10GBASE-T and 8xSFP+ Layer 3\"

          • https://www.netgear.com/support/product/M4300-8X8F
          "},{"location":"netgear/#arlo","title":"Arlo","text":""},{"location":"netgear/#pros","title":"Pros","text":"
          • Completely wireless solution available but not required.
          "},{"location":"netgear/#cons","title":"Cons","text":"
          • Requires internet access to interact with, even for cameras that are accessible on the same LAN.
          "},{"location":"nethogs/","title":"nethogs","text":"

          \"Linux 'net top' tool\" - https://github.com/raboof/nethogs

          Nethogs shows you which PIDs used or are using how much bandwidth.

          "},{"location":"nethogs/#see-also","title":"See also","text":"
          • Top variant list
          "},{"location":"networking/","title":"networking","text":""},{"location":"networking/#links","title":"Links","text":"
          • https://en.wikipedia.org/wiki/Reserved_IP_addresses
          • https://wiki.debian.org/BridgeNetworkConnectionsProxyArp
          • https://routersecurity.org
          • https://github.com/apenwarr/blip: Web based graph of network latency, hosted at https://gfblip.appspot.com
          • https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy
          "},{"location":"networking/#see-also","title":"See also","text":"
          • bind - DNS server
          • ip command for linux
          • iperf - network performance testing
          • iptables - linux firewall
          • linksys - soho network hardware vendor
          • netgear - network hardware vendor
          • networksetup - Mac OSX configuration tool for network settings in System Preferences
          • ntop - network top
          • OSI model - The Open Systems Interconnection model (OSI model) is a conceptual model that characterizes and standardizes the communication functions of a telecommunication or computing system without regard to its underlying internal structure and technology.
          • pac - dynamic proxy configuration
          • procurve - HP managed networking
          "},{"location":"networksetup/","title":"networksetup","text":"

          \"networksetup -- configuration tool for network settings in System Preferences.\" - man networksetup

          networksetup is a standard tool on MacOS

          "},{"location":"networksetup/#examples","title":"Examples","text":""},{"location":"networksetup/#list-all-network-services","title":"List all network services","text":"

          Network Services are not the same as hardware devices.

          $ networksetup -listallnetworkservices\nAn asterisk (*) denotes that a network service is disabled.\nEthernet Adapter (en4)\nWi-Fi\nThunderbolt Bridge\n$ networksetup -listnetworkserviceorder\nAn asterisk (*) denotes that a network service is disabled.\n(1) Ethernet Adapter (en4)\n(Hardware Port: Thunderbolt Ethernet Slot 0, Device: en4)\n\n(2) Wi-Fi\n(Hardware Port: Wi-Fi, Device: en0)\n\n(3) Thunderbolt Bridge\n(Hardware Port: Thunderbolt Bridge, Device: bridge0)\n
          "},{"location":"networksetup/#modify-dns-for-a-device","title":"Modify DNS for a device","text":"

          Using Empty is pretty unintuitive. This is used in a few other places in networksetup.

          $ networksetup -getdnsservers Wi-Fi\nThere aren't any DNS Servers set on Wi-Fi.\n$ networksetup -setdnsservers Wi-Fi 8.8.8.8\n$ networksetup -getdnsservers Wi-Fi\n8.8.8.8\n$ networksetup -setdnsservers Wi-Fi Empty\n$ networksetup -getdnsservers Wi-Fi\nThere aren't any DNS Servers set on Wi-Fi.\n
          "},{"location":"networksetup/#show-info-for-the-device-named-wi-fi","title":"Show info for the device named Wi-Fi","text":"
          networksetup -getinfo \"Wi-Fi\"\n
          "},{"location":"networksetup/#show-all-connected-hardware-ports","title":"Show all connected hardware ports","text":"
          networksetup -listallhardwareports\n
          "},{"location":"networksetup/#show-all-search-domains","title":"Show all search domains","text":"
          networksetup -listallnetworkservices |\n  tail -n +2 |\n  xargs -I :: networksetup -getsearchdomains \"::\"\n
          "},{"location":"networksetup/#create-a-bunch-of-vlan-interfaces","title":"Create a bunch of VLAN interfaces","text":"
          for X in {1..32} ; do\n  sudo networksetup -createVLAN \"vlan${X}\" en3 \"${X}\" ;\ndone ;\n
          "},{"location":"networksetup/#delete-a-bunch-of-vlan-interfaces","title":"Delete a bunch of VLAN interfaces","text":"
          for X in {1..32} ; do\n  sudo networksetup -deleteVLAN \"vlan${X}\" en3 \"${X}\" ;\ndone ;\n
          "},{"location":"nfc/","title":"NFC","text":"

          \"Near-field communication (NFC) is a set of communication protocols that enables communication between two electronic devices over a distance of 4 centimetres (1.6 in) or less.\" - https://en.wikipedia.org/wiki/Near-field_communication

          "},{"location":"nfc/#mifare","title":"Mifare","text":"
          • FM11RF08S universal backdoor key: A396EFA4E24F via https://eprint.iacr.org/2024/1275.pdf
          "},{"location":"nfc/#links","title":"Links","text":"
          • https://gototags.com/nfc/standards/iso-14443
          • https://nfc-tools.github.io
          • https://docs.flipper.net/nfc
          "},{"location":"nfc/#see-also","title":"See also","text":"
          • My Nintendo Amiibo notes
          • My Flipper Zero notes
          "},{"location":"nfs/","title":"nfs","text":"

          nfs is the Network File System.

          • Configured in linux at /etc/exports
          • Great info here: http://nfs.sourceforge.net/
          "},{"location":"nfs/#tips-and-tricks","title":"Tips and Tricks","text":""},{"location":"nfs/#wait-for-network-to-be-online-before-mounting","title":"Wait for network to be online before mounting","text":"

          If you are using /etc/fstab for your nfs mounts, you may run into a race condition where the network target comes online and allows NFS mounts to continue, but DHCP has not yet completed. This causes the NFS mounts to fail.

          To fix this race condition, enable systemd-networkd-wait-online.service

          sudo systemctl enable systemd-networkd-wait-online.service\n

          Then edit your /etc/fstab entry to rely on that target.

          192.168.0.99:/share/media  /mnt/shared-media  nfs  x-systemd.requires=network-online.target, ... the_rest_of_the_options\n

          This solution so far works, but has the negative side effect of making boot take longer due to waiting for dhclient requests to time out before continuing. On Debian 12, this adds 2 minutes to the total boot time shown by systemd-analyze plot > boot-$(date +%F).svg.

          If you know the exact interface your mount points rely on, then you can tailor the wait-online.service to only wait for that one interface:

          sudo systemctl edit systemd-networkd-wait-online.service\n

          Then add the following section to the correct location as directed by the comments in the editor window:

          [Service]\nExecStart=\nExecStart=/lib/systemd/systemd-networkd-wait-online --ipv4 --interface=your_interface_name\n

          The reason there is an empty ExecStart= is that this is how systemd is instructed to empty out the previous assignments instead of appending to them. This works with other option too. More info here: https://www.freedesktop.org/software/systemd/man/systemd.service.html

          "},{"location":"nfs/#regarding-mount-points-within-shares","title":"Regarding mount points within shares","text":"

          If you have a mount point within an NFS share, you must have a separate entry in your exports file that sets the permissions of this mount point. Currently OS X has a problem with this, but officially this is the way to do it.

          "},{"location":"nfs/#show-hosts-that-are-connected-to-this-nfs-server","title":"Show hosts that are connected to this NFS server","text":"
          showmount\n
          "},{"location":"nfs/#show-what-hosts-are-using-what-exports","title":"Show what hosts are using what exports","text":"
          showmount -a\n
          "},{"location":"nfs/#show-exported-directories","title":"Show exported directories","text":"
          showmount -e\n
          "},{"location":"nfs/#show-directories-in-use-by-nfs","title":"Show directories in use by NFS","text":"
          showmount -d\n
          "},{"location":"nfs/#add-an-nfs-mount-to-fstab","title":"Add an NFS mount to fstab","text":"
          opal:/z4  /mnt/z4   nfs  rsize=8192,wsize=8192,timeo=14,intr\n
          "},{"location":"nfs/#linux-tips-and-tricks","title":"Linux Tips and Tricks","text":""},{"location":"nfs/#show-which-versions-of-nfs-your-nfs-server-supports","title":"Show which versions of NFS your NFS server supports","text":"
          rpcinfo -p\n
          "},{"location":"nfs/#allow-an-os-x-client-to-mount-nfs4-nested-zfs-data-sets","title":"Allow an OS X client to mount nfs4 nested zfs data sets","text":"

          OS X has problems with the privileged port default requirement in nfs4, so the insecure option is required.

          The nohide option allows you to mount nested zfs datasets, instead of requiring a separate export for each dataset.

          /z4 *.local(rw,async,no_subtree_check,insecure,nohide)\n
          "},{"location":"nfs/#os-x-tips-and-tricks","title":"OS X Tips and Tricks","text":""},{"location":"nfs/#create-persistent-nfs-mount-in-os-x-108","title":"Create persistent NFS mount in OS X 10.8","text":"

          This is not bulletproof. Modern OS X 10.9+ version are switching away from NFS to CIFS. The NFS client on OS X is pretty weak. For instance it might crash your machine if the share has 0 bytes free but is mounted RW. Use at your own risk.

          sudo mkdir /mnt # OS X doesn't like you playing with /Volumes, it may delete your dirs\nsudo dscl . -create /Mounts/z4\nsudo dscl . -create /Mounts/z4 VFSLinkDir /mnt/z4\nsudo dscl . -create /Mounts/z4 VFSOpts resvport rw nosuid\nsudo dscl . -create /Mounts/z4 VFSType nfs\nsudo dscl . -create /Mounts/z4 RecordName opal:/z4\nsudo dscl . -create /Mounts/iTunes\nsudo dscl . -create /Mounts/iTunes VFSLinkDir /mnt/z4/iTunes\nsudo dscl . -create /Mounts/iTunes VFSOpts resvport rw nosuid\nsudo dscl . -create /Mounts/iTunes VFSType nfs\nsudo dscl . -create /Mounts/iTunes RecordName opal:/z4/iTunes\nsudo dscl . -read /Mounts/opal:/z4\nsudo dscl . -read /Mounts/opal:/z4/iTunes\nsudo dscl . -list /Mounts\n\nsudo dscl . -delete /Mounts opal:/z4/iTunes\n
          "},{"location":"ngrok/","title":"ngrok","text":"

          \"ngrok exposes local networked services behinds NATs and firewalls to the public internet over a secure tunnel. Share local websites, build/test webhook consumers and self-host personal services.\" - ngrok --help

          "},{"location":"ngrok/#examples","title":"Examples","text":""},{"location":"ngrok/#simple-http-usage","title":"Simple http usage","text":"
          ngrok http 8000\n
          "},{"location":"ngrok/#host-a-helm-repository","title":"Host a helm repository","text":""},{"location":"ngrok/#create-a-helm-package","title":"Create a helm package","text":"

          This will create a tgz of your chart and its dependencies in your current directory

          helm package /path/to/your-chart --dependency-update\n
          "},{"location":"ngrok/#create-a-helm-repository-index","title":"Create a helm repository index","text":"
          helm repo index .\n
          "},{"location":"ngrok/#serve-this-directory-with-the-index-and-tgz-file-over-http","title":"Serve this directory with the index and tgz file over http","text":"
          helm\npython3 -m http.server\n
          "},{"location":"ngrok/#expose-the-http-server-to-the-internet","title":"Expose the http server to the internet","text":"
          ngrok http 8000\n
          "},{"location":"ngrok/#use-it","title":"Use it","text":"
          helm repo add super-awesome-test-repo \"${your_ngrok_url}\"\nhelm repo update\nhelm search repo super-awesome-test-repo\n
          "},{"location":"ngrok/#links","title":"Links","text":"
          • https://ngrok.com/docs
          "},{"location":"nintendo-3ds/","title":"Nintendo 3DS","text":""},{"location":"nintendo-3ds/#lego-3ds-notes","title":"Lego + 3DS notes","text":"
          • 3DSXL screen is ~ 9x11 Lego units
          • 3DSXL outside is 20x12 Lego units
          "},{"location":"nintendo-3ds/#emulation","title":"Emulation","text":"
          • Citra 3DS emulator
          • Dumping cartridges
          "},{"location":"nintendo-3ds/#hacking-and-cfw","title":"Hacking and CFW","text":"
          • https://3ds.hacks.guide/
          • https://github.com/AuroraWright/Luma3DS
          • https://www.3dbrew.org/wiki/Homebrew_Applications
          • https://zoogie.github.io/web/34%E2%85%95c3/#/
          • https://web.archive.org/web/20220130042347/https://www.reddit.com/r/3dshacks/comments/6iclr8/a_technical_overview_of_the_3ds_operating_system/: \"A Technical Overview of the 3DS Operating System\"
          • https://youtu.be/ImR-TdDAIJE: How a Terrible Game Cracked the 3DS's Security - Early Days of 3DS Hacking
          • https://www.youtube.com/watch?v=bZczf57HSag: (2015 32c3 talk) 3DS console hacking deep dive
          • https://www.youtube.com/watch?v=8C5cn_Qj0G8: Nintendo Hacking 2016 33c3
          • https://courses.csail.mit.edu/6.857/2019/project/20-Chau-Ko-Tang.pdf: History of Hacking the Nintendo 3DS
          • https://www.copetti.org/writings/consoles/nintendo-3ds: \"Nintendo 3DS Architecture. A practical analysis by Rodrigo Copetti.\"
          • https://pretendo.network: \"Pretendo is a free and open source replacement for Nintendo's servers for both the 3DS and Wii U, allowing online connectivity for all, even after the original servers are discontinued\"
          "},{"location":"nintendo-amiibo/","title":"Nintendo Amiibo","text":"

          Amiibo are NFC figurines that enable in-game features on Nintendo 3DS, Wii U and Switch platforms.

          "},{"location":"nintendo-amiibo/#info","title":"Info","text":"
          • Uses NTAG215 RFID chips.
          "},{"location":"nintendo-amiibo/#links","title":"Links","text":"
          • https://amiibo.wikia.com/wiki/Amiibo_Wiki
          • https://github.com/HiddenRamblings/TagMo - TagMo is an Android app which allows for cloning Amiibos using blank NTAG215 NFC tags.
          • https://nintendo.wikia.com/wiki/List_of_Amiibo_compatible_Games
          • https://pyamiibo.readthedocs.io/ - PyAmiibo helps to read, modify and write dump files of Nintendo Amiibo figures. PyAmiibo is capable of parsing most NTAG properties, as well as some Amiibo data.
          • https://www.amiiboapi.com - An Amiibo database that holds all amiibo information in a single API.
          • https://www.codejunkies.com/powersaves-for-amiibo/
          • https://www.neurohacked.com/how-to-mimic-any-amiibo/ - How to Mimic Any Amiibo
          • https://www.nintendo.com/amiibo/games
          • https://ally.ninja: \"Collect & backup your amiibo\", can write PowerTags.
          • https://www.tagmiibo.com: \"Tagmiibo app creates backup amiibos with NFC 215 tags or devices.\"
          "},{"location":"nintendo-amiibo/#see-also","title":"See also","text":"
          • My NFC notes
          • My Flipper Zero notes
          "},{"location":"nintendo-nes/","title":"Nintendo Entertainment System","text":"

          \"The Nintendo Entertainment System (NES) is an 8-bit third-generation home video game console produced by Nintendo.\" - https://en.wikipedia.org/wiki/Nintendo_Entertainment_System

          "},{"location":"nintendo-nes/#links","title":"Links","text":"
          • https://youtu.be/TPbroUDHG0s: \"Game Development in Eight Bits\" by Kevin Zurawel
          • https://www.famicom.party/book
          "},{"location":"nintendo-switch/","title":"Nintendo Switch","text":""},{"location":"nintendo-switch/#homebrew","title":"Homebrew","text":""},{"location":"nintendo-switch/#combine-multi-part-xci","title":"Combine multi-part xci","text":"

          When dumping games to fat32, you may need to create multi-part xci files. In my experience, you can combine these files by just concatenating them together. For example:

          cat *.xc[0-9] > foo.xci\n4nxci -k prod.keys foo.xci\n
          "},{"location":"nintendo-switch/#create-a-switch-compatible-partition-from-linux","title":"Create a Switch compatible partition from linux","text":"

          Rule #1 is to avoid using exfat at all costs.

          The Switch uses MBR \ud83d\ude44 and has crummy exfat support... The following requires root permissions.

          SD_DISK=/dev/sdZ\nsfdisk \"$SD_DISK\" <<EOF\nlabel: mbr\n,\nEOF\nmkfs.exfat \"$SD_DISK\"1\n
          "},{"location":"nintendo-switch/#homebrew-glossary","title":"Homebrew Glossary","text":"

          Most of these were taken from https://github.com/XorTroll/Goldleaf

          • Atmosphere: custom firmware
          • Hekate: custom bootloader
          • NSP (Nintendo Submission Package): It's the official format used by Nintendo to provide installable content from their CDN servers.
          • NRO (Nintendo Relocatable Object): Officially is treated as a dynamic library object, similar to DLLs on Windows. These are loaded dynamically by applications at runtime.
          • NCA (Nintendo Content Archive): This format is the base format used by Horizon OS to store content.
          • NACP (Nintendo Application Control Property): This is the format used by Nintendo to store several properties of installed titles, like the title's name, version, author name and other information, like if the title supports screenshots or video captures.
          • XCI: Cartridge dump file
          "},{"location":"nintendo-switch/#homebrew-links","title":"Homebrew links","text":"
          • https://switch.hacks.guide
          • https://github.com/Team-Neptune/DeepSea: Minimal CFW setup, all files included
          • https://github.com/HamletDuFromage/aio-switch-updater: All-in-one switch CFW updater
          • https://webcfw.sdsetup.com/: Online, client-side, web-USB payload injector (requires Chrome-ish browser)
          • https://nh-server.github.io/switch-guide/
          • https://github.com/nh-server/fusee-interfacee-tk: TK payload injector
          • https://github.com/DarkMatterCore/gcdumptool
          • https://github.com/mologie/nxboot: CLI payload injector. 0.2.0 is has macOS builds.
          • https://github.com/XorTroll/Goldleaf
          • https://nswdb.com: DB of Nintendo dump metadata. Downloadable as NSWreleases.xml
          • https://switchbrew.github.io/nx-hbl/
          • https://switchtools.sshnuke.net/: ChoiDuJour, which lets you go between Switch firmwares
          • https://www.reddit.com/r/SwitchHaxing/top/?sort=top&t=month
          • https://sigmapatches.su: Sigpatches and other useful homebrew files
          • https://www.cheatslips.com/wiki: How to write cheat codes
          • https://github.com/The-4n/4NXCI: (repo deleted) XCI to NSP converter
          • https://github.com/AtlasNX/Kosmos: (deprecated) All in one guide
          • https://sdsetup.com: (deprecated) Create zip file with with all the needed and desired software and configs to put onto an sd card.
          "},{"location":"nintendo-wii/","title":"Nintendo Wii","text":"

          A gaming system by Nintendo. See also the Dolphin emulator.

          "},{"location":"nintendo-wii/#tips","title":"Tips","text":"
          • In Boot-Mii you can use the power and reset buttons to navigate if you don't have a GC controller.
          "},{"location":"nintendo-wii/#wii-remote-sync","title":"Wii Remote Sync","text":"

          Standard Mode:

          1. Press the Power button on the Wii console to turn it on.
          2. Remove the battery cover on the back of the Wii Remote to be synced. Open the SD Card Slot cover on the front of the Wii console.
          3. Press and release the SYNC button just below the batteries on the Wii Remote; the Player LED on the front of the Wii Remote will blink. While the lights are still blinking, quickly press and release the red SYNC button on the front of the Wii console.
          4. When the Player LED blinking stops and stays lit, the syncing is complete. The LED that is illuminated indicates the player number (1 through 4).
          "},{"location":"nintendo-wii/#hack-notes","title":"Hack notes","text":"
          • IOSes Explained - http://gwht.wikidot.com/ioses-explained
          • Backup Launcher runs ISO - http://wiihacked.com/backup-launcher-v30gamma-download-here-and-how-to-install
          • Wiibrew is the channel for loading home-brew software. - http://wiibrew.org
          • Wii Backup Manager is an app to help manage ISOs and filesystems - http://www.wiibackupmanager.co.uk/
          • Wiimms ISO Tools will modify ISOs and WBFS - http://wit.wiimm.de/
          • A list of WBFS managers - http://wiki.gbatemp.net/wiki/WBFS_Managers
          • Anti-brick and general hacking tutorial - http://www.howtogeek.com/howto/38041/set-up-anti-brick-protection-to-safeguard-and-supercharge-your-wii/
          • https://sites.google.com/site/completesg/backup-launchers/installation - Successfully installed Trucha patched IOS to enable the USB loader
          • https://sites.google.com/site/completesg/cios/hermes-cios - More software that needs to be installed to get USB loaders to work nicely
          • http://www.howtogeek.com/howto/40349/install-a-wii-game-loader-for-easy-backups-and-fast-load-times/ - How to USB Load ISOs on the Wii
          • https://code.google.com/archive/p/nusdownloader/ - NUS Downloader lets you download Various official Wii software
          "},{"location":"nintendo-wiiu/","title":"Nintendo Wii U","text":"
          • https://wiiu.hacks.guide/
          • https://wiiu.hacks.guide/#/block-updates
          • https://github.com/GaryOderNichts/udpih: Best jailbreak as of 2023.
          • https://github.com/koolkdev/wfs-tools: Wii-U filesystem tools
          "},{"location":"ntop/","title":"ntop","text":"

          \"High-speed web-based traffic analysis.\" - https://www.ntop.org/

          This isn't a traditional top style tool since it has a web interface. For network top in a TUI, see iftop

          "},{"location":"ntop/#see-also","title":"See also","text":"
          • Top variant list
          "},{"location":"ntp/","title":"NTP","text":"

          \"Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems over packet-switched, variable-latency data networks. In operation since before 1985, NTP is one of the oldest Internet protocols in current use.\" - https://en.wikipedia.org/wiki/Network_Time_Protocol

          "},{"location":"ntp/#links","title":"Links","text":"
          • RFC 5905: Network Time Protocol Version 4: Protocol and Algorithms Specification
          • Understanding and mitigating NTP-based DDoS attacks
          • Google Public NTP: Leap Smear
          • Five different ways to handle leap seconds with NTP
          • The Unix leap second mess
          • ntp.org FAQ: What happens during a Leap Second?
          • The Raspberry Pi as a Stratum-1 NTP Server
          • NTP vs PTP: Network Timing Smackdown!
          "},{"location":"ntp/#ntp-daemon-in-systemd","title":"ntp daemon in systemd","text":"

          Systemd has its own time and date tools that replace classic linux tools like ntpd. See systemd-timesyncd and timedatectl. systemd-timesyncd refuses to start if the ntp package is installed.

          "},{"location":"ntp/#see-also","title":"See Also","text":"
          • time - Notes on time technologies
          • ptp - Precision Time Protocol
          "},{"location":"nvidia/","title":"nvidia","text":"
          • Verify linux nvidia drivers are installed: nvidia-smi
          • Verify linux nvidia drivers work within docker: docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi
          • Show nvidia container info: nvidia-container-cli -k -d /dev/tty info
          • https://github.com/Syllo/nvtop: GPU top command. Works with more than just Nvidia GPUs.
          "},{"location":"onboarding/","title":"Onboarding","text":"

          Notes about what to do when joining a new team, and what to do to help people who are joining your team.

          "},{"location":"onboarding/#tips","title":"Tips","text":"
          • Try to treat every bad piece of code you find as an opportunity. You were hired to solve problems, and you are going to find problems, so think of those bad pieces of code you find as the things you were hired to find.
          • Try to be charitable when you find bad code. Sometimes people wrote something under immense pressure, under a tight deadline, with poorly defined details, and sometimes they did the best they could to solve the problems that may have dealt with technology they were unfamiliar with.
          • Start taking your own notes. It's much easier to quickly add or modify notes you own and don't have to consult other people about. You can always copy parts of those notes out into shared resources if they are valuable enough for that.
          "},{"location":"onboarding/#links","title":"Links","text":"
          • https://www.simplermachines.com/why-you-need-a-wtf-notebook
          "},{"location":"openvpn/","title":"openvpn","text":"

          \"Your private path to access network resources and services securely\" - https://openvpn.net/

          "},{"location":"openvpn/#tips","title":"Tips","text":""},{"location":"openvpn/#check-status-of-logged-in-clients","title":"Check status of logged in clients","text":"

          kill -USR2 $OPENVPN_PID is the magic that causes the server to output its current client status. This may be logged to a different logfile, so look around if you don't see status.

          killall -USR2 /usr/sbin/openvpn ; tail /var/log/syslog ;\n

          There also may be a file called /etc/openvpn/openvpn-status.log with current status. Don't ask me why a status log file is located in /etc/... \ud83d\ude44

          "},{"location":"orbstack/","title":"orbstack","text":"

          \"OrbStack is a fast, light, and simple way to run containers and Linux machines on macOS. It's a supercharged alternative to Docker Desktop and WSL, all in one easy-to-use app.\" - https://docs.orbstack.dev

          Orbstack is like a modern alternative to virtualbox and docker in one app. It only supports linux distros as of 2024-02-13 though.

          "},{"location":"osquery/","title":"osquery","text":"

          \"SQL powered operating system instrumentation, monitoring, and analytics.\"

          oquery runs locally and allows you to inspect your host using sql queries. Tables exist for a variety of useful data, such as file hashes, process list, last user login, etc..

          "},{"location":"osquery/#links","title":"Links","text":"
          • https://github.com/facebook/osquery
          • https://osquery.readthedocs.io
          • https://osquery.io/schema/
          • https://github.com/UtahDave/salt-vagrant-demo
          • https://www.digitalocean.com/community/tutorials/how-to-monitor-your-system-security-with-osquery-on-ubuntu-16-04
          • https://kolide.com/fleet - osquery fleet/cluster system
          "},{"location":"outline/","title":"outline","text":"

          \"Journalists need safe access to information to research issues, communicate with sources, and report the news. Outline lets news organizations easily provide their network safer access to the open internet.\" - https://getoutline.org/

          Outline is a self hosted VPN that is geared around easy setup.

          • Source code available at https://github.com/jigsaw-code
          "},{"location":"pac/","title":"pac","text":"

          Information about proxy auto-config files.

          "},{"location":"pac/#example-pac-file","title":"Example pac file","text":"

          The following pac file will

          • Redirect all traffic destined to 192.168.1.0/24 to a proxy running on localhost:47000, but only if we do not have an ip address in that subnet
          • Redirect all traffic destined to 172.16.0.0/16 to a proxy running on localhost:33001
          • All other traffic bypasses the proxy.
          function FindProxyForURL(url, host) {\n  if ((isInNet(host, \"192.168.1.0\", \"255.255.255.0\"))\n  && (! isInNet(myIpAddress(), \"192.168.1.0\", \"255.255.255.0\"))) {\n    return \"SOCKS5 localhost:47000\" ;\n  } else if (isInNet(host, \"172.16.0.0\", \"255.255.0.0\")) {\n    return \"SOCKS5 localhost:33001\" ;\n  } else {\n    return \"DIRECT\" ;\n  }\n}\n
          "},{"location":"pac/#links","title":"Links","text":"
          • http://findproxyforurl.com/official-toolset
          • https://github.com/pacparser/pacparser
          "},{"location":"pandoc/","title":"pandoc","text":"

          Convert between document formats.

          http://pandoc.org/

          "},{"location":"pandoc/#examples","title":"Examples","text":""},{"location":"pandoc/#convert-a-doc-from-mediawiki-to-markdown","title":"Convert a doc from mediawiki to markdown","text":"
          pandoc -f mediawiki -t markdown nfs.mediawiki > nfs.md\n
          "},{"location":"panopticlick/","title":"panopticlick","text":"

          \"Panopticlick will analyze how well your browser and add-ons protect you against online tracking techniques. We\u2019ll also see if your system is uniquely configured\u2014and thus identifiable\u2014even if you are using privacy-protective software.\" - https://panopticlick.eff.org/

          "},{"location":"passwords/","title":"passwords","text":"

          \"A secret word or phrase that must be used to gain admission to something.\" - https://en.oxforddictionaries.com/definition/password

          "},{"location":"passwords/#generation","title":"Generation","text":""},{"location":"passwords/#pwgen","title":"pwgen","text":"
          $ pwgen 12 3\nahZielooC4ei Ielui3ahh9su aiZoa7fioy1o\n
          "},{"location":"passwords/#apg","title":"apg","text":"

          This tool lets you show how to pronounce the random password

          $ apg -a 1 -m 6 -n 3 -l\nI[hM@}]t: India-LEFT_BRACKET-hotel-Mike-AT_SIGN-RIGHT_BRACE-RIGHT_BRACKET-tango-COLON\nWoqrJ}R+ps Whiskey-oscar-quebec-romeo-Juliett-RIGHT_BRACE-Romeo-PLUS_SIGN-papa-sierra\nzni6VC3 zulu-november-india-SIX-Victor-Charlie-THREE\n
          "},{"location":"passwords/#links","title":"Links","text":"
          • https://nakedsecurity.sophos.com/2016/08/18/nists-new-password-rules-what-you-need-to-know/
          • https://pages.nist.gov/800-63-3/sp800-63-3.html
          "},{"location":"perl/","title":"perl","text":"

          Practical Extraction and Reporting Language

          "},{"location":"perl/#special-variables","title":"Special Variables","text":"
          • \"That thing\": $_
          • Record Separator: $/
          "},{"location":"perl/#techniques","title":"Techniques","text":""},{"location":"perl/#assign-an-array-to-some-matches","title":"Assign an array to some matches","text":"
          @array_of_matches = ($source_string =~ m/..pattern../g);\n
          "},{"location":"perl/#assign-several-variables-to-some-matches","title":"Assign several variables to some matches","text":"
          my ($num, $a, $t) = ($_ =~ m/([0-9]*)\\. (.*) - (.*)\\.mp3/) ;\n
          "},{"location":"perl/#iterate-a-hash","title":"Iterate a hash","text":"
          while(($key, $value) = each(%$_)){\n    print \"$value is $key\\n\" ;\n}\n
          "},{"location":"perl/#print-out-a-file-with-line-numbers","title":"Print out a file with line numbers","text":"
          cat ~/.bash_history | perl -nle 'print \"$.\\t$_\";'\n

          This should probably be done with nl -ba .bash_history instead.

          "},{"location":"perl/#edit-a-file-in-place","title":"Edit a file in-place","text":"

          To change all instances of \"foo\" to \"bar\":

          perl -i -pe 's/foo/bar/g' filename.txt\n
          "},{"location":"perl/#remove-blank-lines-from-a-file","title":"Remove blank lines from a file","text":"
          perl -pi -e \"s/^\\n//\" file.txt\n
          "},{"location":"perl/#remove-lines-from-a-file-that-match-a-certain-regex","title":"Remove lines from a file that match a certain regex","text":"
          perl -i -pe 'if ($_ =~ m/string to remove/ ){$_ = \"\";}' filename.txt\n
          "},{"location":"perl/#sort-a-line-by-spaces","title":"Sort a line by spaces","text":"

          See bash for a bash-only way

          echo -n \"whiskey tango foxtrot \" \\\n| perl -e '\n  $/=\" \" ;\n  @foo = <STDIN> ;\n  print (sort(@foo)) ;\n  print \"\\n\" ;\n'\n
          "},{"location":"perl/#sort-records-in-a-file-that-are-separated-by-a-blank-line","title":"Sort records in a file that are separated by a blank line","text":"
          ##!/usr/bin/perl\n$/ = \"\\n\\n\" ;\nmy @input = (<STDIN>) ;\n\nmy @sorted = sort { lc($a) cmp lc($b) } @input ;\n\nforeach (@sorted) {\n  if (length($_) > 10) { print \"$_\"; }\n}\n
          "},{"location":"perl/#subtract-two-from-the-last-octet-of-a-mac-address","title":"Subtract two from the last octet of a MAC address","text":"
          for X in 24:b6:fd:ff:b7:f{{a..f},{0..9}} ; do\n  echo -n \"${X} - 2 = \" ;\n  echo ${X} \\\n  | perl -ne '\n    @foo = split(\":\",$_) ;\n    $foo[5] = sprintf(\"%02x\", (hex($foo[5]) - 2)) ;\n    $new = join(\":\",@foo) ;\n    print \"$new\\n\" ;\n  ' ;\ndone ;\n
          "},{"location":"perl/#add-one-to-the-last-octet-of-a-mac-address","title":"Add one to the last octet of a MAC address","text":"
          for X in 24:b6:fd:ff:b7:c{{a..f},{0..9}} ; do\n  echo ${X} \\\n  | perl -ne '\n    @foo = split(\":\",$_) ;\n    $foo[5] = sprintf(\"%02x\", (hex($foo[5]) + 1)) ;\n    $new = join(\":\",@foo) ;\n    print \"$new\\n\";\n  ' ;\ndone ;\n
          "},{"location":"pgp/","title":"pgp","text":"

          \"Pretty Good Privacy (PGP) is an encryption program that provides cryptographic privacy and authentication for data communication. PGP is often used for signing, encrypting, and decrypting texts, e-mails, files, directories, and whole disk partitions and to increase the security of e-mail communications. It was created by Phil Zimmermann in 1991.\" - https://en.wikipedia.org/wiki/Pretty_Good_Privacy

          \"GNU Privacy Guard (GnuPG or GPG) is a free software replacement for Symantec's PGP cryptographic software suite. GnuPG is compliant with RFC 4880, which is the IETF standards track specification of OpenPGP. Modern versions of PGP and Veridis' Filecrypt are interoperable with GnuPG and other OpenPGP-compliant systems.\" - https://en.wikipedia.org/wiki/GNU_Privacy_Guard

          "},{"location":"pgp/#links","title":"Links","text":""},{"location":"pgp/#technology","title":"Technology","text":"
          • The GNU Privacy Guard
          • Creating a new GPG key
          • How to create a PGP/GPG-key free of SHA-1
          "},{"location":"pgp/#web-of-trust","title":"Web of Trust","text":"
          • A draft guide to organizing or participating in a PGP key signing party
          • OpenPGP key paper slip generator
          • PIUS: The PGP Individual UID Signer
          "},{"location":"pgp/#philosophy","title":"Philosophy","text":"
          • Op-ed: I'm throwing in the towel on PGP, and I work in security
          • Op-ed: Why I'm not giving up on PGP
          "},{"location":"philips-hue/","title":"Philips Hue","text":"

          \"Philips Hue is your personal wireless lighting system that lets you easily control your light and create the right ambiance for every moment.\" - https://www2.meethue.com

          "},{"location":"philips-hue/#siri-integration","title":"Siri integration","text":"

          Siri knows the names of all of the X11 colors

          "},{"location":"philips-hue/#links","title":"Links","text":"
          • Enabling the hidden Wi-Fi radio on the Philips Hue Bridge 2.0: Adventures with 802.11n, ZigBee 802.15.4 and OpenWrt
          • https://arantius.github.io/web-color-wheel: Color wheel of named colors, which can all be used to set your hue light colors using Siri.
          "},{"location":"photography/","title":"photography","text":""},{"location":"photography/#software-links","title":"Software Links","text":"
          • exiftool: File metadata swiss-army knife.
          • https://github.com/photoprism/photoprism: Self hosted photo gallery
          • https://github.com/LibrePhotos/librephotos: Self hosted photo gallery
          • https://damselfly.info: \"Damselfly is a server-based Digital Asset Management system.\"
          • https://photostructure.com: \"Your new home for all your photos & videos\"
          • Ask HN: Alternatives to Google Photos?
          "},{"location":"php/","title":"php","text":"

          The PHP scripting language.

          "},{"location":"php/#code-guidelines","title":"Code Guidelines","text":"
          • The PEAR code guidelines are pretty good - https://pear.php.net/manual/en/standards.php
          • Even better coding standards - https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-1-basic-coding-standard.md
          "},{"location":"php/#examples","title":"Examples","text":""},{"location":"php/#convert-date-formats","title":"Convert date formats","text":"

          This converts mysql time to epoch unix timestamp and back $timestamp = strtotime($mysqltime); echo date(\"Y-m-d H:i:s\", $timestamp);

          "},{"location":"php/#run-code-from-cli","title":"Run code from CLI","text":"
          php -r \"phpinfo();\"\n
          "},{"location":"php/#show-php-cli-env-vars","title":"Show php CLI env vars","text":"

          This shows the location of the ini file used for CLI. php -i

          "},{"location":"php/#enable-errors","title":"Enable Errors","text":"

          Set display_errors = On in php.ini, or in a php file add:

          error_reporting(E_ALL);\nini_set('display_errors', 1);\n
          "},{"location":"php/#disable-timeout","title":"Disable timeout","text":"
          set_time_limit(0);\nini_set ('max_execution_time', 0);\n
          "},{"location":"php/#random-numbers","title":"Random numbers","text":"
          rand() ; # random int\nrand(1,10) ; # random int between 1 and 10\nrand(100,1000)/100 ; # workaround for generating floats with 2 decimal points\n
          "},{"location":"php/#links","title":"Links","text":"
          • https://aloneonahill.com/blog/if-php-were-british/
          "},{"location":"plex/","title":"plex","text":"

          Plex is a media center system that runs on a variety of platforms including Linux, Roku, macOS, iOS, tvOS, and a variety of smart TVs.

          "},{"location":"plex/#links","title":"Links","text":"
          • https://support.plex.tv/articles/categories/your-media/
          "},{"location":"postgres/","title":"PostgreSQL","text":"

          \"PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.\" - https://www.postgresql.org

          "},{"location":"postgres/#usage","title":"Usage","text":""},{"location":"postgres/#connect-to-a-database","title":"Connect to a database","text":"
          psql \"postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT:-5432}/${POSTGRES_DB}\"\n
          "},{"location":"postgres/#meta-commands","title":"Meta Commands","text":"

          postgres shell has a lot of meta commands. See https://www.postgresql.org/docs/15/app-psql.html#APP-PSQL-META-COMMANDS for full descriptions.

          meta-command behavior \\l List databases. Filter like \\l foo* \\c Connect to database. There are various syntaxes to accomplish this. Here is one: \\c \"host=localhost port=5432 dbname=mydb connect_timeout=10 sslmode=disable\" \\d \"$table_name\" Show full definition for the given table \\d or \\d+ Show extended table description. Show all with \\d+ * \\df List functions \\di List indexes \\x Toggle expanded display. This is the same as \\G in MySQL, separating each record and showing each column as a row formatted as column_name \\| row value."},{"location":"postgres/#show-postgres-version","title":"Show postgres version","text":"
          astronomer_houston=> SELECT version();\n                                          version\n--------------------------------------------------------------------------------------------\n PostgreSQL 9.6.18 on x86_64-pc-linux-gnu, compiled by Debian clang version 10.0.1 , 64-bit\n(1 row)\n
          "},{"location":"postgres/#show-processes","title":"Show processes","text":"

          Each process is one connection to the db. (See How Connections are Establisted)

          select * from pg_stat_activity ;\n

          or

          select count(*) from pg_stat_activity where usename = 'airflow' ; -- note this is usename, not useRname\n

          or a more refined view

          select pid as process_id,\n       usename as username,\n       datname as database_name,\n       client_addr as client_address,\n       application_name,\n       backend_start,\n       state,\n       state_change\nfrom pg_stat_activity\nwhere state != 'idle' ;\n
          "},{"location":"postgres/#terminate-all-processes-connected-to-a-given-database","title":"Terminate all processes connected to a given database","text":"
          select pg_terminate_backend(pg_stat_activity.pid)\nfrom pg_stat_activity\nwhere pg_stat_activity.datname = 'some_db_name'\n  and pid <> pg_backend_pid();\n
          "},{"location":"postgres/#postgres-in-docker","title":"Postgres in Docker","text":"
          • Official images: https://hub.docker.com/_/postgres

          Some of these syntaxes apply to non-docker interactions too, so long as you remove the docker-isms from them.

          "},{"location":"postgres/#example-docker-compose-file-for-local-development","title":"Example docker-compose file for local development","text":"
          ## https://docs.docker.com/compose/compose-file/\nversion: \"3.7\"\nservices:\n  postgres:\n    # https://hub.docker.com/_/postgres\n    image: \"postgres:latest\"\n    restart: \"always\"\n    env_file: .env # Should contain POSTGRES_DB, POSTGRES_USER, POSTGRES_PASSWORD\n    # Uncomment the 'command' line to enable postgres query logging to the terminal\n    # https://www.postgresql.org/docs/15/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHEN\n    # command: [ \"postgres\", \"-c\", \"log_destination=stderr\", \"-c\", \"log_min_messages=debug\", \"-c\", \"log_min_error_statement=debug\" ]\n\n    expose:\n      - \"5432\"\n    ports:\n      - \"5432:5432\"\n
          "},{"location":"postgres/#dump-a-database","title":"Dump a database","text":"

          https://www.postgresql.org/docs/15/app-pgdump.html

          docker exec \"${POSTGRES_CONTAINER}\" pg_dump -U \"${POSTGRES_USER}\" \"${POSTGRES_DB}\"\n

          Full backups should be performed with pg_dumpall.

          "},{"location":"postgres/#load-local-data-into-a-db","title":"Load local data into a db","text":"

          https://www.postgresql.org/docs/15/app-psql.html

          cat foo.sql |\ndocker exec -i \"${POSTGRES_CONTAINER}\" \\\n  psql -U \"${POSTGRES_USER}\" \"${POSTGRES_DB}\"\n
          "},{"location":"postgres/#example-queries","title":"Example queries","text":""},{"location":"postgres/#show-sizes-for-all-dbs-in-a-given-server","title":"Show sizes for all DBs in a given server","text":"
          pokemon_collector=> SELECT\n    pg_database.datname,\n    pg_database_size(pg_database.datname) AS size,\n    pg_size_pretty(pg_database_size(pg_database.datname))\n    FROM pg_database\n    ORDER BY size ;\n     datname    |     size      | pg_size_pretty\n----------------+---------------+----------------\n postgres       |       7436824 | 7263 kB\n cloudsqladmin  |       7641624 | 7463 kB\n pokemon        |       8231448 | 8039 kB\n smeargle_4080  |      10230296 | 9991 kB\n ampharos_7008  |      10877464 | 10 MB\n litten_7803    |      12655128 | 12 MB\n silvally_9992  |   64589765144 | 60 GB\n cinderace_8705 |   86162946584 | 80 GB\n emolga_1932    |   92073171480 | 86 GB\n aegislash_4257 | 1265669151256 | 1179 GB\n(10 rows)\n
          "},{"location":"postgres/#show-all-of-the-table-sizes-for-a-given-schema","title":"Show all of the table sizes for a given schema","text":"
          sunny_moon_4257_airflow=> SELECT table_name, pg_total_relation_size(table_name), pg_size_pretty(pg_total_relation_size(table_name))\nFROM sunny_moon_4257_airflow.information_schema.tables\nwhere table_schema = 'airflow'\nORDER BY pg_total_relation_size(table_name) desc ;\n          table_name           | pg_total_relation_size | pg_size_pretty\n-------------------------------+------------------------+----------------\n xcom                          |          1269437857792 | 1182 GB\n job                           |               77586432 | 74 MB\n dag_run                       |               60440576 | 58 MB\n log                           |               58630144 | 56 MB\n task_instance                 |               31784960 | 30 MB\n serialized_dag                |                 851968 | 832 kB\n rendered_task_instance_fields |                 843776 | 824 kB\n task_fail                     |                 638976 | 624 kB\n import_error                  |                 393216 | 384 kB\n dag                           |                 122880 | 120 kB\n dag_code                      |                 122880 | 120 kB\n ab_user                       |                  98304 | 96 kB\n ab_permission_view_role       |                  90112 | 88 kB\n astro_available_version       |                  90112 | 88 kB\n slot_pool                     |                  81920 | 80 kB\n ab_user_role                  |                  73728 | 72 kB\n ab_view_menu                  |                  73728 | 72 kB\n ab_permission                 |                  73728 | 72 kB\n ab_role                       |                  73728 | 72 kB\n ab_permission_view            |                  73728 | 72 kB\n astro_version_check           |                  65536 | 64 kB\n sensor_instance               |                  57344 | 56 kB\n alembic_version               |                  57344 | 56 kB\n connection                    |                  24576 | 24 kB\n task_reschedule               |                  24576 | 24 kB\n ab_register_user              |                  24576 | 24 kB\n sla_miss                      |                  24576 | 24 kB\n variable                      |                  24576 | 24 kB\n dag_pickle                    |                  16384 | 16 kB\n known_event                   |                  16384 | 16 kB\n dag_tag                       |                   8192 | 8192 bytes\n known_event_type              |                   8192 | 8192 bytes\n(32 rows)\n
          "},{"location":"postgres/#show-the-rows-with-the-largest-values","title":"Show the rows with the largest values","text":"
          sunny_moon_4257_airflow=> select timestamp, pg_column_size(value) as size from xcom order by size desc limit 20 ;\n           timestamp           |   size\n-------------------------------+-----------\n 2021-09-25 14:23:40.0142+00   | 188149150\n 2021-09-24 14:24:39.699152+00 | 171979158\n 2021-09-23 14:24:08.201579+00 | 158880026\n 2021-09-22 14:24:03.309817+00 | 144807562\n 2021-09-21 14:24:25.052796+00 | 129923562\n(5 rows)\n
          "},{"location":"postgres/#see-also","title":"See Also","text":"
          • pgcli: \"Pgcli is a command line interface for Postgres with auto-completion and syntax highlighting.\" https://github.com/dbcli/pgcli
          • http://www.pgadmin.org - Graphical UI for postgres
          • MySQL: Another relational database
          • SQLite: File based local database that does not require a server.
          • https://hakibenita.com/postgresql-unknown-features: \"Lesser Known PostgreSQL Features. Features you already have but may not know about!\"
          • https://github.com/citusdata/citus: Horizontal scaling extension
          • https://cloud.google.com/alloydb: GCP managed postgres that has advanced clustered scaling features
          • https://www.postgresql.org/docs/current/postgres-fdw.html: module that provides sharding across multiple postgres servers
          • https://www.cybertec-postgresql.com/en/btree-vs-brin-2-options-for-indexing-in-postgresql-data-warehouses
          • https://www.amazingcto.com/postgres-for-everything
          • https://postgrest.org/en/stable: REST API directly from postgres
          • https://challahscript.com/what_i_wish_someone_told_me_about_postgres
          "},{"location":"powershell/","title":"powershell","text":"

          PowerShell is a shell for Windows operating systems, and it was ported to Linux in 2016.

          https://github.com/PowerShell/PowerShell/

          "},{"location":"powershell/#profileps1","title":"Profile.ps1","text":"

          On startup, powershell will run any .ps1 files it finds in the WindowsPowerShell directory under my documents. There is allegedly a Profile.ps1 file in there by default.

          $env:Path = \"c:\\Users\\user1\\Dropbox\\Scripts;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\System32\\Wbem;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\\"\n
          "},{"location":"powershell/#examples","title":"Examples","text":""},{"location":"powershell/#restart-a-remote-computer","title":"Restart a remote computer","text":"
          Restart-Computer remotehostname -Force\n
          "},{"location":"powershell/#find-a-command-that-matches-a-substring","title":"Find a command that matches a substring","text":"
          get-command *time*\n
          "},{"location":"powershell/#get-help-on-commands-that-match-a-substring","title":"Get help on commands that match a substring","text":"
          get-help *time*\n
          "},{"location":"powershell/#show-acls-of-the-current-dir","title":"Show ACLs of the current dir","text":"
          get-acl | format-list\n
          "},{"location":"powershell/#show-system-bios-information","title":"Show system BIOS information","text":"
          Get-WmiObject -ComputerName hostname win32_bios\n
          "},{"location":"powershell/#show-object-methods","title":"Show object methods","text":"
          $foo | Get-Member\n
          "},{"location":"powershell/#browse-the-registry","title":"Browse the registry","text":"
          Set-Location HKCU:\\Software\\Microsoft\\Windows\\\nGet-ChildItem\n
          "},{"location":"powershell/#show-top-processes","title":"Show top processes","text":"
          while (1) { ps | sort -desc cpu | select -first 30; sleep -seconds 1; cls }\n
          "},{"location":"powershell/#browse-the-cert-store","title":"Browse the Cert store","text":"
          Set-Location cert:\\CurrentUser\\\nGet-ChildItem\n
          "},{"location":"powershell/#get-a-list-of-stopped-services","title":"Get a list of stopped services","text":"
          Get-Service | Where-Object { $_.Status -eq \"Stopped\" }\n
          "},{"location":"powershell/#compare-two-objects","title":"Compare two objects","text":"

          This will only show the lines that are not common:

          Compare-Object $(Get-VIPrivilege -role admin) $(Get-VIPrivilege -role member)\n
          "},{"location":"powershell/#save-object-to-a-csv","title":"Save object to a csv","text":"
          Get-Process | Export-Csv -Encoding unicode processes.csv\n
          "},{"location":"powershell/#load-object-from-a-csv-and-parse-it","title":"Load object from a csv and parse it","text":"
          Import-Csv ./processes.csv | Where-Object { $_.Name -like \"*systemd*\" } | Select-Object -last 10 | Format-Table\n
          "},{"location":"powershell/#replacement-for-unix-tail","title":"Replacement for unix tail","text":"

          tail filename

          Get-Content [filename] | Select-Object -Last 10\n

          tail -f

          Get-Content -Path \"C:\\scripts\\test.txt\" -Wait\n
          "},{"location":"powershell/#replacement-for-unix-wc","title":"Replacement for unix wc","text":"
          Get-Content test.csv | Measure-Object -line -word -character\n
          "},{"location":"powershell/#replacement-for-unix-time","title":"Replacement for unix time","text":"
          Measure-Command { Sleep 5 }\n
          "},{"location":"powershell/#replacement-for-unix-grep-b2-a1","title":"Replacement for unix grep -B2 -A1","text":"
          Get-Content test.csv | Select-String \"searchstring\" -Context 2,1 -CaseSensitive\n
          "},{"location":"powershell/#install-powershell-in-ubuntu-1804","title":"Install PowerShell in Ubuntu 18.04","text":"
          wget -q https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb && \\\n    dpkg -i packages-microsoft-prod.deb && \\\n    apt-get update && \\\n    apt-get install -y powershell\n
          "},{"location":"powershell/#see-also","title":"See Also","text":"
          • http://poshcode.org/: Used to be a great PSH site with example code. Now it looks like it's just a chat network portal, which IMHO seems much less useful.
          • https://developer.vmware.com/web/tool/12.5.0/vmware-powercli: PowerCLI: VMware PSH CLI
          "},{"location":"powertop/","title":"powertop","text":"

          \"PowerTOP is a Linux tool to diagnose issues with power consumption and power management.\" - https://github.com/fenrus75/powertop

          "},{"location":"powertop/#examples","title":"Examples","text":""},{"location":"powertop/#generate-an-html-power-report","title":"Generate an html power report","text":"
          powertop --html=powertop.html\n
          "},{"location":"powertop/#see-also","title":"See also","text":"
          • Top variant list
          "},{"location":"procurve/","title":"procurve","text":"

          Procurve switches from HP offer cheap layer 2 and layer 3 switching.

          "},{"location":"procurve/#copy-config-files","title":"Copy config files","text":"

          scp user@switch:cfg/startup-config ./ scp user@switch:cfg/running-config ./

          "},{"location":"procurve/#firmware-update","title":"Firmware update","text":"
          • Via ssh: linuxclient$ scp /path/to/image user@switch-hostname:/os/primary
          • Via tftp: switch# copy tftp flash 172.28.115.151 flashfilename.swi primary This doesn't always work, try scp if it fails.

          Then on the switch...

          system boot flash primary

          "},{"location":"procurve/#config-examples","title":"Config Examples","text":""},{"location":"procurve/#set-an-ip-for-the-default-vlan","title":"Set an IP# for the default VLAN","text":"
          interface vlan 1 ip address 172.28.115.234 255.255.255.0\nip default-gateway 172.28.115.1\n
          "},{"location":"procurve/#set-up-additional-vlans","title":"Set up additional VLANs","text":"
          vlan 100 untagged 2\nvlan 100 ip address 172.28.100.1\nvlan 102 untagged 3\nvlan 102 ip address 172.28.102.1\n
          "},{"location":"procurve/#enable-routing-between-connected-networks","title":"Enable routing between connected networks","text":"

          ip routing

          "},{"location":"procurve/#set-up-sntp-clock","title":"Set up SNTP clock","text":"
          sntp server 172.28.111.16\ntimesync sntp\nsntp 120\nsntp unicast\n
          "},{"location":"procurve/#alter-dst-settings","title":"Alter DST settings","text":"

          time daylight-time-rule User-defined begin-date 3/8 end-date 11/1

          "},{"location":"procurve/#enable-ssh","title":"Enable SSH","text":"
          crypto key generate ssh\nip ssh\nip ssh version 2\nip ssh filetransfer\n
          "},{"location":"procurve/#disable-telnet","title":"Disable telnet","text":"

          no telnet-server

          "},{"location":"procurve/#set-up-snmp","title":"Set up snmp","text":"

          snmp-server community \"foobar\" Operator

          "},{"location":"procurve/#set-up-a-vlan-112-port-group","title":"Set up a VLAN 112 port group","text":"

          vlan 112 untagged 6-12

          "},{"location":"procurve/#set-two-groups-of-ports-as-a-trunks-eg-to-use-with-vmware-in-static-lacp","title":"Set two groups of ports as a trunks (eg: to use with VMware in static LACP)","text":"
          trunk 1-4 trk1 trunk\ntrunk 5-8 trk2 trunk\n
          "},{"location":"procurve/#set-up-vlan-multiplexing","title":"Set up VLAN multiplexing","text":"
          vlan 114 tagged 24\nvlan 115 tagged 24\nvlan 114 tagged Trk1\nvlan 115 tagged Trk1\n
          "},{"location":"procurve/#example-config","title":"Example Config","text":"
          hostname \"HP-CORE-0\"\nsnmp-server location \"Cup1-Closet1\"\nmax-vlans 64\ntime timezone -480\ntime daylight-time-rule User-defined begin-date 3/8 end-date 11/1\nconsole inactivity-timer 5\nno web-management\nweb-management ssl\nno telnet-server\ninterface 2\n   name \"Load Test Cluster\"\nexit\ninterface 5\n   name \"hq-vm-1\"\nexit\ninterface 6\n   name \"hq-vm-1\"\nexit\ninterface 8\n   name \"beast\"\nexit\ninterface 10\n   name \"Winserv\"\nexit\ninterface 12\n   name \"IT\"\nexit\ninterface 13\n   name \"Services\"\nexit\ninterface 14\n   name \"IT\"\nexit\ninterface 15\n   name \"IT\"\nexit\ninterface 16\n   name \"IT\"\nexit\ninterface 17\n   name \"beast\"\nexit\ninterface 18\n   name \"VPN\"\nexit\ninterface 19\n   name \"IT\"\nexit\ninterface 20\n   name \"IT\"\nexit\ninterface 21\n   name \"Radio Station\"\nexit\ninterface 22\n   name \"AT&T Network\"\nexit\ninterface 23\n   name \"HP-CORE trunk\"\nexit\ninterface 24\n   name \"Jun1-trunk\"\nexit\nip default-gateway 10.8.100.1\nsntp server 10.8.5.220\nip routing\ntimesync sntp\nsntp unicast\nsnmp-server community \"public\" Unrestricted\nsnmp-server host 10.8.5.189 \"public\"\nvlan 1\n   name \"DEFAULT_VLAN\"\n   untagged 4,14\n   no ip address\n   tagged 23\n   no untagged 1-3,5-13,15-22,24\n   exit\nvlan 101\n   name \"Services\"\n   untagged 3,8,10,15,19\n   ip address 10.8.1.1 255.255.255.0\n   ip helper-address 10.8.5.220\n   tagged 2,5-6,23-24\n   exit\nvlan 102\n   name \"LoadTest\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 2,5-6,15,23-24\n   exit\nvlan 103\n   name \"QATest\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23\n   exit\nvlan 104\n   name \"PS\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 105\n   name \"IT\"\n   untagged 1,5-6,9,12-13,16,20\n   ip address 10.8.5.1 255.255.255.0\n   ip helper-address 10.8.5.220\n   tagged 2,15,23-24\n   exit\nvlan 110\n   name \"Wireless\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 111\n   name \"Eng\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 113\n   name \"SW2\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 23\n   exit\nvlan 112\n   name \"SW1\"\n   untagged 21\n   ip address 10.8.12.1 255.255.255.0\n   ip helper-address 10.8.5.220\n   tagged 23\n   exit\nvlan 100\n   name \"Backbone\"\n   ip address 10.8.100.100 255.255.255.0\n   tagged 23-24\n   exit\nvlan 114\n   name \"Upstairs\"\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 23-24\n   exit\nvlan 106\n   name \"VPN\"\n   untagged 18\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 15,23-24\n   exit\nvlan 188\n   name \"OldNet\"\n   untagged 11,17\n   no ip address\n   ip helper-address 10.8.5.220\n   tagged 23-24\n   exit\nvlan 42\n   name \"ATT\"\n   untagged 22\n   tagged 23-24\n   exit\nvlan 107\n   name \"DMZ\"\n   untagged 7\n   ip helper-address 10.8.5.220\n   tagged 15,24\n   exit\nvlan 109\n   name \"Jail\"\n   tagged 23-24\n   exit\ndhcp-relay option 82 keep\nip route 0.0.0.0 0.0.0.0 10.8.100.1\nip route 10.8.11.0 255.255.255.0 10.8.100.101\nip route 10.8.3.0 255.255.255.0 10.8.100.101\nip route 10.172.188.0 255.255.255.0 10.8.100.1\nip route 10.8.13.0 255.255.255.0 10.8.100.101\nip route 10.8.2.0 255.255.255.0 10.8.100.1\nip route 10.8.10.0 255.255.255.0 10.8.100.1\nip route 10.8.7.0 255.255.255.0 10.8.100.1\nip route 10.8.4.0 255.255.255.0 10.8.100.1\nip route 10.8.14.0 255.255.255.0 10.8.100.102\nip route 10.8.9.0 255.255.255.0 10.8.100.1\nstack commander \"HP-CORE\"\nstack auto-grab\nstack member 1 mac-address 0016b90b4ea0\nstack member 2 mac-address 0016b968df40\nspanning-tree\nip ssh\nip ssh filetransfer\nno tftp client\nno tftp server\npassword manager\npassword operator\n
          "},{"location":"prometheus/","title":"Prometheus","text":"

          \"Prometheus, a Cloud Native Computing Foundation project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true.\" - https://github.com/prometheus/prometheus

          "},{"location":"prometheus/#tips","title":"Tips","text":""},{"location":"prometheus/#restart-prometheus-pods-in-kubernetes","title":"Restart prometheus pods in kubernetes","text":"
          kubectl get pods -l component=prometheus -o name |\nwhile read -r pod ; do\n  echo $pod\n  kubectl port-forward \"$pod\" 9090 &\n  sleep 10  # to let the port-forward establish before using it\n  curl -X POST localhost:9090/-/reload\n  kill %%\n  sleep 5  # to let the previous process exit before starting another port forward\ndone\n
          "},{"location":"prometheus/#validate-a-prometheus-config","title":"Validate a prometheus config","text":"
          promtool check config --syntax-only prometheus-config.yaml\n
          "},{"location":"prometheus/#links","title":"Links","text":"
          • https://prometheus.io/docs/introduction/overview
          • https://prometheus.io/docs/prometheus/latest/querying/basics: Good intro to promql fundamentals.
          • https://the-zen-of-prometheus.netlify.app
          • https://www.robustperception.io/cardinality-is-key
          • https://github.com/cortexproject/cortex: \"Horizontally scalable, highly available, multi-tenant, long term storage for Prometheus.\"
          • https://github.com/thanos-io/thanos: \"Thanos is a set of components that can be composed into a highly available metric system with unlimited storage capacity, which can be added seamlessly on top of existing Prometheus deployments.\"
          • https://github.com/prometheus/prometheus/blob/release-2.42/tsdb/docs/format
          • https://www.robustperception.io/using-tsdb-analyze-to-investigate-churn-and-cardinality
          • https://fiberplane.com/blog/why-are-prometheus-queries-hard
          • https://blog.cloudflare.com/how-cloudflare-runs-prometheus-at-scale
          • https://www.robustperception.io/cardinality-is-key
          "},{"location":"protobuf/","title":"protobuf","text":"

          \"Protocol buffers are a language-neutral, platform-neutral extensible mechanism for serializing structured data\" - https://developers.google.com/protocol-buffers/

          \"Google's data interchange format\" - https://github.com/google/protobuf

          "},{"location":"ps/","title":"ps","text":"

          ps shows a list of processes in a unix system.

          "},{"location":"ps/#examples","title":"Examples","text":""},{"location":"ps/#show-the-exact-command-used-to-start-all-process","title":"show the exact command used to start all process","text":"
          ps axwwo command\n
          "},{"location":"ps/#show-a-process-tree-view","title":"show a process tree view","text":"
          ps auxf\n
          "},{"location":"ps/#show-only-all-running-processes","title":"show only all running processes","text":"

          This excludes sleeping processes and threads.

          ps auxr\n
          "},{"location":"ps/#show-process-list-sorted-by-process-start-time","title":"Show process list sorted by process start time","text":"
          ps hax -o lstart,pid,args |\n  while read -r a b c d e f g ; do\n    echo \"$(date -d \"$a $b $c $d $e\" \"+%F %T%z\") $f $g\" ;\n  done |\n  sort\n
          "},{"location":"ps/#show-all-processes-not-owned-by-a-user-and-no-threads-including-cgroup-name","title":"Show all processes not owned by a user, and no threads, including cgroup name","text":"
          • -N after a condition makes hides the results of that condition
          • All linux kernel threads have parent pid 2 (kthreadd)
          FILTERED_USER=zerocool # user must exist\nps -o pid,ppid,user,comm,flags,%cpu,sz,%mem,cgname --user \"${FILTERED_USER}\" -N --ppid 2 -N\n
          "},{"location":"ps/#show-linux-kernel-namespaces-of-all-processes","title":"Show linux kernel namespaces of all processes","text":"

          You have to use sudo to see all processes in all namespaces. awk is to filter out kernel threads, which are irrelevant in this context.

          sudo ps -axe -o user,pid,ipcns,mntns,netns,pidns,userns,utsns,comm | awk '$3!=\"-\"'\n

          The output will look like:

          $ sudo ps -axe -o user,pid,ipcns,mntns,netns,pidns,userns,utsns,comm | awk '$3!=\"-\"' | grep -E \"udevd|init|MNTNS|dockerd\"\nUSER         PID      IPCNS      MNTNS      NETNS      PIDNS     USERNS      UTSNS COMMAND\nroot         477 4026531839 4026532239 4026531840 4026531836 4026531837 4026532259 systemd-udevd\nroot         748 4026531839 4026531841 4026531840 4026531836 4026531837 4026531838 dockerd\nroot       17781 4026532479 4026532477 4026531840 4026532480 4026531837 4026532478 s6-linux-init-s\n
          "},{"location":"ps_mem/","title":"ps_mem","text":"

          \"A utility to accurately report the in core memory usage for a program.\" - https://github.com/pixelb/ps_mem

          "},{"location":"ps_mem/#usage-examples","title":"Usage examples","text":""},{"location":"ps_mem/#simple-usage","title":"Simple usage","text":"
          $ sudo ps_mem\n Private  +   Shared  =  RAM used    Program\n\n144.0 KiB +  12.5 KiB = 156.5 KiB    acpid\n144.0 KiB +  31.5 KiB = 175.5 KiB    hald-addon-acpi\n160.0 KiB +  56.5 KiB = 216.5 KiB    hald-addon-input\n...snip...\n 17.9 MiB + 101.0 KiB =  18.0 MiB    mysqld [updated]\n 25.5 MiB + 516.5 KiB =  26.0 MiB    salt-minion\n 31.6 MiB + 730.0 KiB =  32.3 MiB    python (2)\n 41.0 MiB + 309.5 KiB =  41.3 MiB    ruby\n 45.5 MiB +  36.0 KiB =  45.6 MiB    init\n 48.9 MiB +   4.1 MiB =  53.0 MiB    ssh (48)\n 57.3 MiB +   2.5 MiB =  59.7 MiB    bash (114)\n115.0 MiB +  86.0 KiB = 115.1 MiB    named\n148.3 MiB + 132.5 KiB = 148.4 MiB    java\n  1.4 GiB + 449.5 KiB =   1.4 GiB    screen (15)\n---------------------------------\n                          2.0 GiB\n=================================\n
          "},{"location":"psp/","title":"psp","text":"

          Playstation Portable

          "},{"location":"psp/#links","title":"Links","text":"
          • Custom firmware and homebrew
          "},{"location":"pssh/","title":"pssh","text":"

          Parallel SSH tools for running commands on multiple system simultaneously.

          • http://www.theether.org/pssh/
          "},{"location":"pssh/#examples","title":"Examples","text":""},{"location":"pssh/#run-a-command-on-hosts-contained-in-a-file-showing-stdin-and-stdout","title":"Run a command on hosts contained in a file, showing stdin and stdout","text":"
          pssh -h hostnames.txt -i some_command some_arg\n
          "},{"location":"pssh/#run-commands-and-view-results-on-many-hosts","title":"Run commands and view results on many hosts","text":"
          o=$(date +%F-%T)\npssh -o \"$o\" -h hosts.txt uname -a\ngrep -r . $o\n
          "},{"location":"pssh/#run-two-commands-on-many-hosts-using-bash-expansion-for-host-list","title":"Run two commands on many hosts using bash expansion for host list","text":"
          o=$(date +pssh-%T)\npssh -p 50 -t 60 {-H\\ sea-z-app00{1..9},} -o $o 'whoami ; hostname ;'\ngrep -r . $o\n
          "},{"location":"pssh/#install-a-package-on-many-hosts","title":"Install a package on many hosts","text":"
          fping < hosts.txt | awk '$3 == \"alive\" {print $1}' > alive.txt\npssh \\\n  -h alive.txt \\\n  -o out_dir \\\n  -l root \\\n  yum -y localinstall ~danielh/rpms/cfengine-community-3.6.2-1.x86_64.rpm\n

          or directly from a db query and fping...

          pssh \\\n  -h <(\n    invdb -d sjc-z-01opsdbw 'select hostname from servers where colo = \"sjc\";' |\n    sort -u |\n    egrep '[0-9]+6[^0-9]' |\n    fping 2> /dev/null |\n    awk '$3 == \"alive\" {print $1}'\n  ) \\\n  -o out_dir \\\n  -l root \\\n  yum -y localinstall ~danielh/rpms/cfengine-community-3.6.2-1.x86_64.rpm\n

          or from mco...

          o=$(date +pssh-%T) ; pssh -O GlobalKnownHostsFile=/dev/null -O UserKnownHostsFile=/dev/null -O StrictHostKeyChecking=no -t300 -p10 -h <(mco find -C role::devbox) -o \"$o\" 'sudo apt-get install -y silversearcher-ag' ; grep -r . \"$o\" ;\n
          "},{"location":"ptp/","title":"ptp","text":"

          \"The Precision Time Protocol (PTP) is a protocol used to synchronize clocks throughout a computer network. On a local area network, it achieves clock accuracy in the sub-microsecond range, making it suitable for measurement and control systems.\" - https://en.wikipedia.org/wiki/Precision_Time_Protocol

          "},{"location":"ptp/#links","title":"Links","text":"
          • RFC 8173: Precision Time Protocol Version 2 (PTPv2)
          • Precision System Synchronization with the IEEE-1588 Precision Time Protocol (PTP)
          • GigE Vision
          • NTP vs PTP: Network Timing Smackdown!
          "},{"location":"ptp/#see-also","title":"See Also","text":"
          • time - Notes on time technologies
          • ptp - Precision Time Protocol
          "},{"location":"puppet/","title":"Puppet","text":"

          \"Puppet is an open-source configuration management tool. It runs on many Unix-like systems as well as on Microsoft Windows, and includes its own declarative language to describe system configuration.\" - https://en.wikipedia.org/wiki/Puppet_(software)

          "},{"location":"puppet/#videos-and-links","title":"Videos and links","text":"
          • Overview of Puppet's architecture
          • Puppet Documentation Index
          • Introduction to Puppet
          • Function Reference
          • stdlib is another good function reference.
          • Language: Basics
          • Include-like vs. resource-like class instantiation
          • Style Guide
          • Vagrant Docs - Puppet Apply Provisioner
          • Downloads
          • PuppetConf 2015
          • Designing Puppet: Roles/Profiles Pattern - based on the blog post Designing Puppet - Roles and Profiles
          • Building a Functional Puppet Workflow Part 2: Roles and Profiles
          • Configuration Management as Legos
          "},{"location":"puppet/#examples","title":"Examples","text":""},{"location":"puppet/#standalone-mode","title":"Standalone mode","text":"
          • puppet apply /path/to/manifests works, or you can specify a .pp file
          "},{"location":"puppet/#show-variables-about-the-host-that-puppet-knows-facts","title":"Show variables about the host that puppet knows (facts)","text":"
          facter\n
          "},{"location":"puppet/#show-how-puppet-interacts-with-a-resource","title":"Show how puppet interacts with a resource","text":"
          puppet describe cron\n
          "},{"location":"puppet/#show-available-puppet-types","title":"Show available puppet types","text":"
          puppet resource --types\n
          "},{"location":"puppet/#show-the-puppet-code-that-will-create-a-resource","title":"Show the puppet code that will create a resource","text":"
          $ puppet resource file /etc/hosts\nfile { '/etc/hosts':\n  ensure  => 'file',\n  content => '{md5}9ffbd726fd5b15de760cc0150d607628',\n  ctime   => 'Wed Apr 01 17:05:59 -0700 2015',\n  group   => '0',\n  mode    => '644',\n  mtime   => 'Wed Apr 01 17:05:59 -0700 2015',\n  owner   => '0',\n  type    => 'file',\n}\n
          "},{"location":"puppet/#tests","title":"Tests","text":"
          • https://rspec-puppet.com/documentation/
          "},{"location":"puppet/#marionette-collective","title":"Marionette Collective","text":"

          \"The Marionette Collective, also known as MCollective, is a framework for building server orchestration or parallel job-execution systems. Most users programmatically execute administrative tasks on clusters of servers.\" - http://docs.puppetlabs.com/mcollective/

          • Overview of MCollective Components and Configuration
          • Invoking MCollective actions
          • Cheatsheet: https://coderwall.com/p/ig9mxa/mcollective-mco-cheat-sheet
          • Vagrant demo: https://github.com/ripienaar/mcollective-vagrant
          "},{"location":"puppet/#mco","title":"mco","text":""},{"location":"puppet/#show-some-puppet-cluster-stats","title":"Show some puppet cluster stats","text":"
          mco puppet summary\nmco puppet count\nmco puppet status\n
          "},{"location":"puppet/#find-a-random-node-in-the-cluster","title":"Find a random node in the cluster","text":"
          mco find -1\n
          "},{"location":"puppet/#ping-all-nodes-in-the-puppet-cluster","title":"Ping all nodes in the puppet cluster","text":"
          mco ping\n
          "},{"location":"puppet/#show-if-a-file-exists-on-each-host-in-the-cluster","title":"Show if a file exists on each host in the cluster","text":"
          mco filemgr -f /srv/nginx status\n
          "},{"location":"puppet/#use-fstat-and-md5-to-detect-files-needing-repair","title":"Use fstat and md5 to detect files needing repair","text":"
          mco find -S \"fstat('/srv/somedir/somefile').md5=/af6db18c6dfa81c294895003e13a2eef/\" > files_needing_attention.txt\npssh -h files_needing_attention.txt) 'do_something_to_the_file'\n
          "},{"location":"puppet/#use-fstat-to-find-hosts-where-a-directory-has-not-been-modified-recently","title":"Use fstat to find hosts where a directory has not been modified recently","text":"
          mco find -S \"fstat('/srv').mtime_seconds<$(date +%s -d '-8 hours')\"\n
          "},{"location":"puppet/#show-stats-about-which-oses-you-have","title":"Show stats about which OSes you have","text":"
          mco facts lsbdistdescription\n
          "},{"location":"puppet/#show-all-ip-addresses-on-all-hosts-where-a-configured-ip-address-matches-a-regex","title":"Show all ip addresses on all hosts where a configured IP address matches a regex","text":"
          mco facts all_ipaddresses -F 'all_ipaddresses=~10\\.(56|29)\\.'\n
          "},{"location":"puppet/#show-a-report-about-uptimes-over-a-year","title":"Show a report about uptimes over a year","text":"
          mco facts uptime -F 'uptime_days>365' |\nawk '$2 == \"days\" {print}' |\nsort -n -k1 |\ncolumn -t\n
          "},{"location":"puppet/#find-machines-where-a-fact-is-true","title":"Find machines where a fact is true","text":"
          mco find is_ec2\n

          Which is the same as

          mco find -W is_ec2=true\n
          "},{"location":"puppet/#find-machines-that-have-a-certain-fact-value","title":"Find machines that have a certain fact value","text":"
          mco find --with-fact lsbdistcodename=lucid\n
          "},{"location":"puppet/#show-a-fact-on-machines-that-have-a-specific-fact-value","title":"Show a fact on machines that have a specific fact value","text":"
          mco facts role --with-fact lsbdistcodename=lucid -v\n
          "},{"location":"puppet/#find-ec2-hosts-with-low-uptime","title":"Find ec2 hosts with low uptime","text":"
          mco find -W 'is_ec2=true uptime_seconds<7200'\n
          "},{"location":"puppet/#show-detailed-info-about-a-node","title":"Show detailed info about a node","text":"
          mco inventory fqdn.example.com\n
          "},{"location":"puppet/#find-nodes-that-match-a-config-management-class","title":"Find nodes that match a config management class","text":"
          mco find -C role::awsadmin\n
          "},{"location":"puppet/#show-the-classes-for-a-given-host","title":"Show the classes for a given host","text":"
          sort /var/lib/puppet/state/classes.txt\n
          "},{"location":"puppet/#kick-off-a-puppet-run-on-all-hosts-of-a-certain-class","title":"Kick off a puppet run on all hosts of a certain class","text":"

          The following two syntaxes are essentially the same, using the same puppet agent of mco. The only differences are the use of runall vs runonce, and the method that performs parallel execution. I'm not sure what difference there is in the code path.

          mco rpc    -C \"class_boolean\" -F \"fact_name=fact_value\" --batch 10 --agent puppet --action runonce\nmco puppet -C \"class_boolean\" -F \"fact_name=fact_value\" runall 10\n
          "},{"location":"puppet/#show-the-status-and-puppet-policy-about-a-package-on-all-hosts","title":"Show the status and puppet policy about a package on all hosts","text":"
          mco rpc package status package=openssh-client --discovery-timeout 60 --json\n
          "},{"location":"puppet/#upgrade-an-installed-package-on-10-random-web-hosts","title":"Upgrade an installed package on 10 random web hosts","text":"

          This upgrades, but does not install if the package is not already present.

          mco package update 'nginx' -I '/web/' --limit=10\n
          "},{"location":"puppet/#show-breakdown-of-hosts-by-os-version-by-role","title":"Show breakdown of hosts by OS version by role","text":"
          mco facts -v --wc role::mon lsbdistdescription\n
          "},{"location":"puppet/#use-mco-to-find-packages-of-a-certain-version-on-a-certain-os","title":"Use mco to find packages of a certain version on a certain OS","text":"
          mco rpc package status package=apt -j -F lsbdistcodename=trusty > cache.json\njq -c '.[] | select(.data.ensure == \"1.0.1ubuntu2\") | { version: .data.ensure, hostname: .sender }' cache.json\n
          "},{"location":"puppet/#hiera","title":"Hiera","text":"

          \"Hiera is a key/value lookup tool for configuration data, built to make Puppet better and let you set node-specific data without repeating yourself.\" - http://docs.puppetlabs.com/hiera/latest/

          • https://github.com/puppetlabs/hiera
          • http://www.craigdunn.org/2011/10/puppet-configuration-variables-and-hiera/
          "},{"location":"puppet/#r10k","title":"r10k","text":"

          The suggested workflow for puppet is to use r10k on a control repo to manage the modules on your puppetmaster and the environments it provides. The general idea is that each module is represented by a puppetforge module name or a git repo listed inside of the ambiguously named Puppetfile. When r10k puppetfile install -v is run, all modules listed in this file are installed according to their definitions, and all modules that are not in this file are purged. Also, r10k will set up environments based on the git branches of the control repo. This workflow is described in detail at Managing and deploying Puppet code. It assumes you are not using a puppet apply type setup, which makes this difficult to follow for people who are playing with this at home in a non-puppetmaster scenario, such as in vagrant or on raspberry pi's.

          "},{"location":"pv/","title":"pv","text":"

          pv - monitor the progress of data through a pipe

          This can be used in place of dd or dcfldd in some cases, such as copying disks or files. It's also useful for including with nc so you can see stats about the flow of that pipe.

          "},{"location":"pv/#examples","title":"Examples","text":""},{"location":"pv/#show-the-average-growth-rate-of-logs","title":"Show the average growth rate of logs","text":"
          xtail /nail/scribe/buffer/some_service_gnerated | pv -a > /dev/null\n
          "},{"location":"pv/#write-a-disk-image-to-usb","title":"Write a disk image to usb","text":"

          This can be used in place of dd if=file of=/dev/disk

          ## As root\npv ~hoherd/Downloads/ubuntu-16.04.1-desktop-amd64.iso > /dev/rdisk4\n
          "},{"location":"pv/#show-the-number-of-k8s-pods-going-into-notready-per-second","title":"Show the number of k8s pods going into NotReady per second","text":"
          kubectl get pods -A --watch-only |\ngrep --line-buffered NotReady |\npv --line-mode --timer --rate --average-rate --wait --force >/dev/null\n
          "},{"location":"pv/#see-also","title":"See Also","text":"
          • dcfldd
          • dd
          • ddrescue
          "},{"location":"pxe/","title":"pxe","text":"

          \"In computing, the Preboot eXecution Environment, PXE (most often pronounced as pixie, often called PXE Boot/pixie boot.) specification describes a standardized client-server environment that boots a software assembly, retrieved from a network, on PXE-enabled clients. ... The concept behind the PXE originated in the early days of protocols like BOOTP/DHCP/TFTP, and as of 2015 it forms part of the Unified Extensible Firmware Interface (UEFI) standard. In modern data centers, PXE is the most frequent choice for operating system booting, installation and deployment.\" - https://en.wikipedia.org/wiki/Preboot_Execution_Environment

          "},{"location":"pxe/#links","title":"Links","text":"
          • https://netboot.xyz
          • https://ipxe.org
          "},{"location":"pytest/","title":"pytest","text":"

          \"The pytest framework makes it easy to write small tests, yet scales to support complex functional testing for applications and libraries.\" - https://docs.pytest.org

          "},{"location":"pytest/#common-args","title":"Common args","text":"

          pytest --help is a bit overwhelming, so here's a smaller reference:

          • --pdb drop to pdb when an exception is raised
          • --maxfail=N quit after this many test failures
          • --ff run previously failed tests first
          • --lf only run tests that previously failed
          • -k searchstring only run tests that have \"searchstring' in them (actually more complicated matches can be done with -k)
          • -s alias for --capture=no which basically means \"show output of print statements in tests\"
          "},{"location":"pytest/#usage-tips","title":"Usage Tips","text":""},{"location":"pytest/#debug-failing-test-with-pdb","title":"Debug failing test with pdb","text":"

          This will drop you into a pdb shell when a test failure occurs.

          pytest --pdb tests/test_*.py\n
          "},{"location":"pytest/#override-test-args","title":"Override test args","text":"
          export PYTEST_ADDOPTS='--maxfail=1 -v --pdb'\npytest app/tests/test_*.py\n
          "},{"location":"pytest/#run-only-tests-that-failed-on-the-last-run","title":"Run only tests that failed on the last run","text":"
          pytest --lf\n
          "},{"location":"pytest/#run-all-tests-but-put-the-last-failures-first","title":"Run all tests, but put the last failures first","text":"
          pytest --ff\n
          "},{"location":"pytest/#run-a-specific-test-case","title":"Run a specific test case","text":"

          You can use python expressions to match more than one test. Each given test is substring matched against available tests. The matching logic can get pretty complicated, so see the help docs.

          pytest -k 'test_this_specific_thing or that_specific_thing'\n
          "},{"location":"pytest/#passing-args-via-env-vars","title":"Passing args via ENV vars","text":"

          You can pass args via the PYTEST_ADDOPTS ENV var. This is useful for instance if you're using make to run tests, and the command line does additional things like source files, enter a venv, or whatever.

          PYTEST_ADDOPTS=\"--ff --maxfail=1\" make test\n
          "},{"location":"pytest/#show-your-fixtures","title":"Show your fixtures","text":"
          pytest --fixtures\n
          "},{"location":"pytest/#show-fixture-setup-and-teardown-during-run","title":"Show fixture setup and teardown during run","text":"
          pytest --setup-show\n
          "},{"location":"pytest/#plugins","title":"Plugins","text":"
          • pytest-profiling: \"Profiling plugin for pytest, with tabular and heat graph output.\"
          • pytest-sugar: improved display of test output
          • pytest-xdist: parallel runs of tests for speed improvements
          • pytest-testmon: \"selects tests affected by changed files and methods\"
          "},{"location":"python/","title":"python","text":"

          \"Python is a programming language that lets you work more quickly and integrate your systems more effectively.\" - https://www.python.org/

          "},{"location":"python/#tips-and-techniques","title":"Tips and techniques","text":"
          • Don't use assert statements for regular validation. assert statements can be disabled at the interpreter level, which would vastly change the flow of your code if they were used widespread.
          "},{"location":"python/#variable-names","title":"Variable names","text":"
          • _varname: Semi-private. Basically a convention that developers use to indicate that the scope of a variable is local, but this locality is not enforced by the interpreter.
          • __varname: Private variable in name, but not in logic or security. The interpreter mangles the name of the var to make it globally unique, but it is still globally accessible.
          • var_: Used to get around shadowing built-in variable names. EG: list_ won't conflict with list()
          • __magic_method__: See https://diveintopython3.net/special-method-names.html
          • _: Temp var, pretty common entity in programming. (eg: bash and perl both support this too.)
          "},{"location":"python/#virtual-environments","title":"Virtual Environments","text":"

          Virtual environments isolate your project away from the system's python interpreter and modules, so you can have full control over what code is available to your project. This makes it easy to develop, debug, and deploy to a new system. It's basically always a good idea to use a virtual environment. You will thank yourself later by learning this one up front.

          "},{"location":"python/#virtual-environments-using-venv","title":"Virtual environments using venv","text":""},{"location":"python/#creating-a-venv","title":"Creating a venv","text":"
          echo \"venv\" >> .gitignore  # optional\nvirtualenv venv\n. venv/bin/activate\npip install -r requirements.txt\n## write code, interact with it, whatever\ndeactivate\n
          "},{"location":"python/#use-venv-to-work-around-missing-pip","title":"Use venv to work around missing pip","text":"

          This is mostly useful for installing for your user, since if you can't install pip you won't be able to install into system-wide locations.

          virtualenv venv --system-site-packages && venv/bin/pip install --user \"$PACKAGENAME\" && rm -rf venv\n
          "},{"location":"python/#virtual-environments-with-poetry","title":"Virtual environments with poetry","text":"

          poetry is the new-school 2019 way of doing virtual environments. poetry stores its requirements in the new standard pyproject.toml file, and keeps the virtual environment stored outside of the current directory.

          • https://python-poetry.org/docs/
          • https://python-poetry.org/docs/pyproject/
          "},{"location":"python/#creating-a-virtual-environment-using-poetry","title":"Creating a virtual environment using poetry","text":"
          cd project_dir\npoetry init\n## walk through the dialogue\npoetry add bpython boto3\npoetry shell  # this spawns a subshell with the new python environment\n## interact with your python environment\nexit\n
          "},{"location":"python/#import-module-from-absolute-path","title":"Import module from absolute path","text":"
          sys.path.append('/Users/username/code/somedir')\nimport module # from somedir\n
          "},{"location":"python/#convert-between-character-number-and-string","title":"Convert between character number and string","text":"

          You can use these functions to convert ascii and unicode characters into their numeric representations and back. Technically, ord converts a unicode character into a unicode code point, and chr does the reverse.

          >>> ord('\ud83d\udc0d')\n128013\n>>> chr(128013)\n'\ud83d\udc0d'\n
          "},{"location":"python/#benchmarking","title":"Benchmarking","text":""},{"location":"python/#links","title":"Links","text":"
          • https://youtu.be/YY7yJHo0M5I: Talk - Anthony Shaw: Write faster Python! Common performance anti patterns
          • https://pypi.org/project/scalene
          • https://pypi.org/project/austin-python
          • https://docs.python.org/3/library/profile.html
          • https://docs.python.org/3/library/timeit.html
          "},{"location":"python/#show-stats-about-call-count-and-times","title":"Show stats about call count and times","text":"

          This example shows how to profile a pytest run, and then generate a stats.txt file showing stats sorted by total time:

          python -m cProfile -o output.prof pytest -sv tests\ncat <<EOF | python -m pstats output.prof > stats.txt\nsort time\nstats\nEOF\n

          Yes, that syntax is ugly, and yes, the stats module could use a better CLI, but it works. Creating a function to make the CLI interface better is left as an exercise for the reader.

          "},{"location":"python/#generate-a-flame-graph","title":"Generate a flame graph","text":"

          Until https://github.com/baverman/flameprof/pull/5 is merged you may have to edit the installed flameprof shell script.

          pip3 install flameprof\npython -m cProfile -o output.prof myscript.py\nflameprof output.prof > output.svg\n
          "},{"location":"python/#_1","title":"python","text":""},{"location":"python/#debugging","title":"Debugging","text":""},{"location":"python/#verbose-environment-var","title":"Verbose environment var","text":"

          https://docs.python.org/3/using/cmdline.html#envvar-PYTHONVERBOSE

          export PYTHONVERBOSE=1\n## or...\npython -v pip search beets\n
          "},{"location":"python/#follow-the-flow-of-a-python-script","title":"Follow the flow of a python script","text":"

          This is equivalent to bash -x / bash -o xtrace, but is probably even more useful because it prefixes the name of the file and the line number to what is actually being executed, which aids in debugging large projects.

          python -m trace --trace foo.py\n

          You can get the equivalent output for a single function with:

          import trace\ntracer = trace.Trace(trace=True)\ntracer.runfunc(some_func_name, 'some_func arg1', 'some_func arg2')\n
          "},{"location":"python/#enter-an-interactive-prompt-after-script-ends","title":"Enter an interactive prompt after script ends","text":"

          https://docs.python.org/3/using/cmdline.html#envvar-PYTHONINSPECT

          This works when your code causes an exception, but none of your code will actually be executed, you will simply be dropped into a shell, which is not very useful.

          export PYTHONINSPECT=1\n## or...\nsudo python -i ./ps_mem.py\n
          "},{"location":"python/#enter-a-python-terminal-arbitrarily","title":"Enter a python terminal arbitrarily","text":"

          https://docs.python.org/3/library/pdb.html

          import pdb; pdb.set_trace()\n

          In python 3.6+ you can simply insert breakpoint()

          This drops you into a pdb shell. This is not the same as a full python REPL. To get a python REPL, type interact. After you have inspected the current state, you can type continue.

          Alternatively there is the web-pdb package which allows you to debug via a web browser using web_pdb.set_trace

          "},{"location":"python/#print-variables-from-the-local-scope","title":"Print variables from the local scope","text":"
          for var in dir():\n    print(\"Debug: {0} = {1}\".format(var,eval(var)))\n
          "},{"location":"python/#inspect-things","title":"Inspect things","text":"
          >>> import inspect\n>>> inspect.getargspec(inspect.getargspec)\nArgSpec(args=['func'], varargs=None, keywords=None, defaults=None)\n
          "},{"location":"python/#create-an-http-server-using-pwd-as-document-root","title":"Create an http server using PWD as document root","text":"
          python3 -m http.server 9980\n
          "},{"location":"python/#discover-the-location-for-pip-user-installs","title":"Discover the location for pip --user installs","text":"
          echo $(python -m site --user-base)/bin\n
          "},{"location":"python/#add-pythons-pip-install-user-bin-path-to-path","title":"Add python's pip install --user bin path to PATH","text":"
          ## ~/.bash_profile\nif PYTHON3_USER_BASE=$(python3 -m site --user-base 2>/dev/null) ; then PATH=\"${PYTHON3_USER_BASE}/bin:${PATH}\" ; fi\n
          "},{"location":"python/#manage-a-requirementstxt-file-like-a-pro","title":"Manage a requirements.txt file like a pro","text":"

          Managing requirements.txt manually can lead to a variety of problems related to dependencies and package compatibility. The best way to manage the requirements.txt file is by using the pip-tools command pip-compile, which builds a requirements.in file into a requirements.txt.

          In your requirements.txt you define only your direct dependencies:

          pendulum\ntyper\n

          Then you run pip-compile --upgrade requirements.in, which would create a requirements.txt file like:

          #\n# This file is autogenerated by pip-compile with Python 3.10\n# by the following command:\n#\n#    pip-compile requirements.in\n#\nclick==8.1.3\n    # via typer\npendulum==2.1.2\n    # via -r requirements.in\npython-dateutil==2.8.2\n    # via pendulum\npytzdata==2020.1\n    # via pendulum\nsix==1.16.0\n    # via python-dateutil\ntyper==0.7.0\n    # via -r requirements.in\n

          You can also specify --generate-hashes to get a more reliable lockfile style result.

          "},{"location":"python/#show-currently-installed-versions-for-all-packages-in-requirementstxt","title":"Show currently installed versions for all packages in requirements.txt","text":"

          This can be used to update a requirements.txt file to the exact installed version.

          pip freeze | grep -f <(grep -o '^\\w\\+' requirements.txt)\n

          Or auto pin to the current major version

          pip freeze |\n  grep -f <(grep -o '^\\w\\+' requirements.txt) |\n  sed 's/==/~=/' |\n  awk -F. '{print $1 \".\" $2}'\n
          "},{"location":"python/#generate-a-tz-aware-datetime-using-only-the-standard-library","title":"Generate a TZ aware datetime using only the standard library","text":"
          from datetime import datetime, timezone\nTZ = datetime.now(timezone.utc).astimezone().tzinfo\ndatetime.now(TZ).isoformat(timespec='seconds')  # 2023-06-24T12:50:01-04:00\n
          "},{"location":"python/#common-date-operations","title":"Common date operations","text":"

          None of these examples deal with timezones.

          "},{"location":"python/#get-the-current-time-and-date","title":"Get the current time and date","text":"
          >>> from datetime import datetime\n>>> datetime.now().strftime(\"%s\")\n'1572039830'  # unix timestamp as a string\n>>> datetime.now().strftime(\"%F\")\n'2019-10-25'  # abribrarily formatted timestamp string\n>>> datetime.now()\ndatetime.datetime(2019, 10, 25, 14, 49, 49, 175165)  # as a datetime object\n
          "},{"location":"python/#convert-from-unix-timestamp","title":"Convert from unix timestamp","text":"
          >>> from datetime import datetime\n>>> datetime.utcfromtimestamp(1234567890)\ndatetime.datetime(2009, 2, 13, 23, 31, 30)  # to a datetime object\n>>> datetime.utcfromtimestamp(1234567890).strftime('%F %T')\n'2009-02-13 23:31:30'  # to a string, via datetime object formatting\n
          "},{"location":"python/#convert-from-datetime-string","title":"Convert from datetime string","text":"
          >>> from datetime import datetime\n>>> datetime.strptime('2019-05-01', \"%Y-%m-%d\")  # For some reason you can't use '%F' to describe inputs. Same with %s.\ndatetime.datetime(2019, 5, 1, 0, 0)  # to a datestamp object\n>>> datetime.strptime('2019-05-01 10:01:59', \"%Y-%m-%d %H:%M:%S\").strftime('%A %B %d, %Y')\n'Wednesday May 01, 2019'  # to a string, via datetime object\n
          "},{"location":"python/#install-a-package-from-git","title":"Install a package from git","text":"

          This is great for locally developed packages. This also works with pipx, which will install tools into their own virtualenv.

          pip install git+ssh://gitserver/path/repo.git@git-ref\n
          "},{"location":"python/#links_1","title":"Links","text":""},{"location":"python/#decorators","title":"Decorators","text":"
          • https://wiki.python.org/moin/PythonDecoratorLibrary
          • http://stackoverflow.com/questions/739654/how-can-i-make-a-chain-of-function-decorators-in-python/1594484#1594484
          • http://ains.co/blog/things-which-arent-magic-flask-part-1.html
          "},{"location":"python/#modules","title":"Modules","text":"
          • https://github.com/jonathanslenders/ptpython: improved python REPL
          • https://docs.python.org/3/library/sched.html: cross-platform cron-like scheduler
          • https://pypi.python.org/pypi/colorama: cross-platform colorized terminal output
          • https://pypi.python.org/pypi/begins/: Simplified CLI arguments
          • https://pypi.python.org/pypi/watchdog: cross-platform filesystem events API
          • https://github.com/giampaolo/psutil/: system information
          • https://github.com/timothycrosley/hug: simplified web API creation
          • http://python-future.org: \"python-future is the missing compatibility layer between Python 2 and Python 3. It allows you to use a single, clean Python 3.x-compatible codebase to support both Python 2 and Python 3 with minimal overhead.\"
          • https://pymotw.com/3/: Python Module of the Week has lots of useful module examples
          • https://docs.python.org/3/library/functools.html
          • https://docs.python.org/3/library/itertools.html
          • https://more-itertools.readthedocs.io/en/stable/
          "},{"location":"python/#various-links","title":"Various links","text":"
          • A gallery of interesting Jupyter and IPython Notebooks
          • Drag'n'drop Pivot Tables and Charts in Jupyter
          • Dive Into Python 3
          • Google's Python Class
          • Google Python Style Guide
          • Learn Python dot org
          • Python Cheatsheets
          • The Flask Mega-Tutorial
          • The Python IAQ: Infrequently Answered Questions
          • Why I use py.test and you probably should too
          • PyCon 2017 videos
          • PyCon 2018 videos
          • https://caremad.io/posts/2013/07/setup-vs-requirement/: Hard vs abstract dependencies in requirements.txt (and by extension Pipfile) vs setup.py
          • https://plot.ly/python/
          • https://realpython.com/factory-method-python/
          • https://pythontest.com
          • How to structure a Flask-RESTPlus web service for production builds
          • MIT Open Courseware: Introduction to Computer Science and Programming in Python
          • Documenting Python Code: A Complete Guide
          • Current Status of Python Packaging - April 2019: TL;DR: \"Create your development environment with Poetry, specifying the direct dependencies of your project with a strict version.\"
          • Brian Warner - Magic Wormhole - Simple Secure File Transfer - PyCon 2016
          • List and dict comprehension are taken from a mathematical notation https://en.wikipedia.org/wiki/Set-builder_notation#Parallels_in_programming_languages
          • My Python testing style guide
          • Film simulations from scratch using Python
          • timeit: stdlib library for testing many iterations of the same code. See also python3 -m timeit --help
          • https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html: Good walkthrough of colorizing terminals using ANSI escape codes.
          • https://guicommits.com/organize-python-code-like-a-pro: Other than some of the dir naming stuff, I agree with most of the content here.
          • https://packaging.python.org/en/latest/guides/tool-recommendations
          • https://towardsdatascience.com/12-python-decorators-to-take-your-code-to-the-next-level-a910a1ab3e99
          • https://nedbatchelder.com/blog/202312/realworld_matchcase.html
          • https://clickpy.clickhouse.com/dashboard: Show pypi stats in a nifty dashboard.
          "},{"location":"q/","title":"q","text":"

          N.B.: This tool isn't updated frequently. It's probably better to use something more broadly useful like dasel

          \"q - Text as Data\" - http://harelba.github.io/q/

          This tool is cool, but honestly you can do a lot more stuff and iterate quicker by importing your csv into sqlite using sqlite3 -csv new.db \".import some/file.csv destination_table_name\". q actually uses sqlite3 behind the scenes. See also https://til.simonwillison.net/sqlite/one-line-csv-operations

          "},{"location":"q/#examples","title":"Examples","text":""},{"location":"q/#format-the-pagerduty-incidentscsv-to-be-more-readable","title":"Format the Pagerduty incidents.csv to be more readable","text":"
          ## -d, = comma delimited input\n## -H  = use the headings found in the input csv files\n## -T  = tab delimited output\n## -f  = python 2 format strings to be applied to 1-indexed output fields\n$ q -d, -H -T -f '1=https://pagerduty.com/incidents/%s,2=alerted at %s,3=Description: %s' 'select id,created_on,description from incidents.csv order by created_on asc limit 5'\nhttps://pagerduty.com/incidents/P66XNLT    alerted at 2017-12-04T00:04:07-08:00    Description: proxy0302: 200 Status Code Proxy Log Watcher: Matches found in last run met or dropped below 0.0, dropping to 0.0 for 10 minutes at 12:00AM https://server.pingdom.com/a/3103869181\nhttps://pagerduty.com/incidents/PLUG344    alerted at 2017-12-04T04:14:05-08:00    Description: sandbox-apigateway00: API Gateway Error Watcher: Occurrences met or exceeded 10.00 /min, increasing to 15.82 /min for 10 minutes at 04:10AM https://server.pingdom.com/a/3104379391\nhttps://pagerduty.com/incidents/PT13M2B    alerted at 2017-12-04T06:48:14-08:00    Description: hadoop-r21: Hadoop Resource Monitor: Lostnodes met or exceeded 4.0, increasing to 4.0 at 06:47AM https://server.pingdom.com/a/3104686551\nhttps://pagerduty.com/incidents/P3RLOTT    alerted at 2017-12-04T08:56:07-08:00    Description: hadoop-c05: /srv Disk Usage: Disk Capacity met or exceeded 90%, increasing to 90% for 10 minutes at 08:50AM https://server.pingdom.com/a/3104929931\nhttps://pagerduty.com/incidents/PNOJZKC    alerted at 2017-12-04T09:02:21-08:00    Description: sjc-http2: HTTP 500 error Watcher: Occurrences met or exceeded 10.00 /min, increasing to 31.91 /min for 10 minutes at 09:00AM https://server.pingdom.com/a/3104941911\n
          "},{"location":"q/#format-pagerduty-events-as-html-for-pasting-into-confluence-for-issue-response-tracking","title":"Format pagerduty events as HTML for pasting into confluence for issue response tracking","text":"

          pagerduty-csv-download opens your browser and downloads the csv file for the last week of events. You'll have to change companyname to whatever your company URL is.

          pagerduty-csv-to-html uses q to reformat the csv into HTML lists you can paste into the source editor of your HTML friendly CMS like Confluence.

          This uses BSD relative date syntax, you'll have to change it for linux.

          pagerduty-csv-download() {\n  rm -f incidents.csv\n  TZ=America/Los_Angeles\n  past=\"$(date -v-7d \"+%FT00:00:00\")\"\n  present=\"$(date \"+%FT00:00:00\")\"\n  open \"$(date \"+https://companyname.pagerduty.com/api/v1/reports/raw/incidents.csv?since=${past}&until=${present}&time_zone=${TZ}\")\"\n}\npagerduty-csv-to-html() {\n  q \\\n    -H \\\n    -d',' \\\n    -D' ' \\\n    -f '1=<li>%s,2=<a href \\\"https://companyname.pagerduty.com/incidents/%s\\\">,3=%s</a>,4=%s<ul><li>...</li></ul></li>' \\\n    'select substr(created_on,12,5),id,id,description from incidents.csv order by created_on asc' | tail -n 50 | sed 's/href /href=/;s/> />/'\n}\n
          "},{"location":"q/#select-count-of-daily-alerts-by-date-from-pagerduty-incidentscsv","title":"Select count of daily alerts by date from PagerDuty incidents.csv","text":"
          q -H --delimiter=',' -O --output-delimiter=',' 'select substr(created_on,0,11) as date,count(substr(created_on,0,11)) as count from incidents.csv group by date'\n
          "},{"location":"raspberry-pi/","title":"raspberry-pi","text":"

          A small computer, good for running linux.

          • http://www.raspberrypi.org

          The standard OS used to be called raspbian, but it is now called Raspberry Pi OS.

          "},{"location":"raspberry-pi/#tips","title":"Tips","text":""},{"location":"raspberry-pi/#show-what-firmware-you-have-running","title":"Show what firmware you have running","text":"
          sudo /opt/vc/bin/vcgencmd version\n

          Check vcgencmd commands for more usage.

          "},{"location":"raspberry-pi/#update-raspberry-pi-os-and-firmware-versions","title":"Update Raspberry Pi OS and firmware versions","text":"
          sudo apt full-upgrade -y\n
          "},{"location":"raspberry-pi/#software","title":"Software","text":""},{"location":"raspberry-pi/#raspberry-pi-os","title":"Raspberry Pi OS","text":"
          dpkg-reconfigure locales\n
          "},{"location":"raspberry-pi/#hassio","title":"Hass.io","text":"

          \"Hass.io turns your Raspberry Pi (or another device) into the ultimate home automation hub powered by Home Assistant. With Hass.io you can focus on integrating your devices and writing automations.\" - https://home-assistant.io/hassio/

          "},{"location":"raspberry-pi/#spillpasspi","title":"SpillPassPi","text":"

          SpillPassPi is deprecated because Nintendo shut down the Streetpass Relay servers.

          "},{"location":"raspberry-pi/#v1","title":"V1","text":"

          Retired.

          'A Simple Homebrew Plug and Play 3DS HomePass Relay and Fake \"Nintendo Zone\" Hotspot' - http://www.spillmonkey.com/?page_id=5

          "},{"location":"raspberry-pi/#v2","title":"V2","text":"

          'A Simple Homebrew Plug and Play 2DS/3DS/N3DS StreetPass Relay and Fake \"Nintendo Zone\" Hotspot' - http://www.spillmonkey.com/?page_id=169

          "},{"location":"raspberry-pi/#homepass","title":"Homepass","text":"

          \"Nintendo 3DS homepass resources and software.\" - https://github.com/danielhoherd/homepass/tree/master/RaspberryPi

          "},{"location":"raspberry-pi/#links","title":"Links","text":"
          • https://www.raspberrypi.com/software/operating-systems
          • https://www.raspberrypi.com/documentation/computers/os.html Firmware and OS update instructions
          • https://www.jeffgeerling.com/blog/2018/raspberry-pi-microsd-card-performance-comparison-2018
          • https://techcrunch.com/2019/07/09/the-raspberry-pi-4-doesnt-work-with-all-usb-c-cables/
          • https://curriculum.raspberrypi.org
          • https://www.jeffgeerling.com/blog/2020/uasp-makes-raspberry-pi-4-disk-io-50-faster: Useful USB info in general, regarding UASP
          • https://ptx2.net/posts/unbricking-a-bike-with-a-raspberry-pi/
          • https://www.raspberrypi.com/news/raspberry-pi-build-hat-lego-education/: Control LEGO motors via Raspberry Pi
          "},{"location":"redis/","title":"redis","text":"

          \"Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker.\" - https://redis.io

          "},{"location":"redis/#tips-and-examples","title":"Tips and Examples","text":""},{"location":"redis/#solve-memory-allocation-problems","title":"Solve memory allocation problems","text":"

          Errors like this can cause the disk to fill up over long periods of time:

          [2535] 02 Jan 19:58:52.376 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 19:58:52.376 # Can't rewrite append only file in background: fork: Cannot allocate memory\n

          This problem can be solved without restarting anything:

          ## df -h .\nFilesystem      Size  Used Avail Use% Mounted on\n/dev/xvdf       250G  135G  116G  54% /srv\n## cat /proc/sys/vm/overcommit_memory\n0\n## echo 1 > /proc/sys/vm/overcommit_memory\n## tail  redis.log\n[2535] 02 Jan 22:03:23.707 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 22:03:23.707 # Can't rewrite append only file in background: fork: Cannot allocate memory\n[2535] 02 Jan 22:03:23.807 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 22:03:23.807 # Can't rewrite append only file in background: fork: Cannot allocate memory\n[2535] 02 Jan 22:03:23.907 * Starting automatic rewriting of AOF on 7885% growth\n[2535] 02 Jan 22:03:23.926 * Background append only file rewriting started by pid 27302\n[27302] 02 Jan 22:04:05.337 * SYNC append only file rewrite performed\n[27302] 02 Jan 22:04:05.379 * AOF rewrite: 36 MB of memory used by copy-on-write\n[2535] 02 Jan 22:04:05.406 * Background AOF rewrite terminated with success\n[2535] 02 Jan 22:04:05.406 * Parent diff successfully flushed to the rewritten AOF (42 bytes)\n[2535] 02 Jan 22:04:05.406 * Background AOF rewrite finished successfully\n## df -h .\nFilesystem      Size  Used Avail Use% Mounted on\n/dev/xvdf       250G  4.5G  246G   2% /srv\n
          "},{"location":"redis/#find-what-is-using-the-most-memory","title":"Find what is using the most memory","text":"
          redis-cli --bigkeys\n
          "},{"location":"redis/#links","title":"Links","text":"
          • https://redis.io/topics/faq
          • https://redis.io/commands
          "},{"location":"retropie/","title":"retropie","text":"

          \"RetroPie allows you to turn your Raspberry Pi, ODroid C1/C2, or PC into a retro-gaming machine. It builds upon Raspbian, EmulationStation, RetroArch and many other projects to enable you to play your favourite Arcade, home-console, and classic PC games with the minimum set-up.\" - https://retropie.org.uk/

          "},{"location":"retropie/#links","title":"Links","text":""},{"location":"retropie/#-httpsgithubcomretropieretropie-docsblob3719d6docsretroarch-configurationmddefault-joypad-hotkeys-httpsretropieorgukdocsretroarch-configurationhotkeys","title":"- https://github.com/RetroPie/RetroPie-Docs/blob/3719d6/docs/RetroArch-Configuration.md#default-joypad-hotkeys / https://retropie.org.uk/docs/RetroArch-Configuration/#hotkeys","text":""},{"location":"retropie/#tips","title":"Tips","text":""},{"location":"retropie/#pair-bluetooth-controller","title":"Pair bluetooth controller","text":"

          Pairing game controllers can be difficult. One way to do be sure to get the right controller is to pair the bluetooth controller to your computer, then look for the MAC address there, then unpair it. Then go onto the terminal and run sudo ~/RetroPie-Setup/retropie_setup.sh and walk through pairing the controller there now that you know the MAC address. https://retropie.org.uk/docs/Bluetooth-Controller/

          "},{"location":"retropie/#enable-rewind","title":"Enable rewind","text":"

          I don't know why rewind isn't enabled by default, since it's one of the best features.

          1. Start a game
          2. Enter the quick menu (Share + Triangle on PS4 controller)
          3. Go up one menu (X on PS4 controller)
          4. Navigate to Settings -> Input -> Hotkeys, then configure Rewind and Fast Forward to whatever you want.
          5. Go back up to the top menu, then go to Settings -> Frame Throttle -> Rewind, and toggle Rewind Support on.
          6. Go back up to the top menu, then go to Configuration File -> Save Current Configuration
          "},{"location":"rhel/","title":"rhel","text":"

          \"Red Hat, Inc. is an American multinational software company providing open-source software products to the enterprise community.\" - https://en.wikipedia.org/wiki/Red_Hat

          "},{"location":"rhel/#see-also","title":"See Also","text":"
          • CentOS is now owned by RedHat
          • Inline with Upstream Stable Community Project
          "},{"location":"robotics/","title":"robotics","text":""},{"location":"robotics/#links","title":"Links","text":"
          • http://botbench.com
          • http://www.andymark.com
          • http://www.vexrobotics.com
          • https://www.adafruit.com
          • https://www.reddit.com/r/robotics/wiki/index
          • https://www.sparkfun.com/
          "},{"location":"robotics/#see-also","title":"See also","text":"
          • Lego Robotics
          • Wonder Workshop
          "},{"location":"roku/","title":"Roku","text":""},{"location":"roku/#links","title":"Links","text":"
          • https://sdkdocs.roku.com/display/sdkdoc/BrightScript+Language+Reference
          "},{"location":"rook/","title":"rook","text":"

          \"File, Block, and Object Storage Services for your Cloud-Native Environments\" - https://rook.io/

          Rook is based on ceph.

          "},{"location":"ros/","title":"ros","text":"

          \"The Robot Operating System (ROS) is a set of software libraries and tools that help you build robot applications. From drivers to state-of-the-art algorithms, and with powerful developer tools, ROS has what you need for your next robotics project. And it's all open source.\": http://www.ros.org

          "},{"location":"ros/#links","title":"Links","text":"
          • https://wiki.ros.org/Distributions: ROS Distributions
          • http://wiki.ros.org/ROS/Tutorials: ROS tutorials
          • http://wiki.ros.org/turtlesim/Tutorials: ROS Turtle Example
          • https://github.com/CPFL/Autoware: Autoware built on top of ROS for Self-driving cars
          • http://wiki.ros.org/Robots/EV3: Robot Operating System for ev3
          • https://www.cse.sc.edu/~jokane/agitr/: A Gentle Introduction to ROS
          • https://github.com/svautosarusersgroup/meetupslides/: ROS2 / Apex.AI slides
          "},{"location":"rpm/","title":"rpm","text":"

          Redhat Package Manager. \"rpm is a powerful Package Manager, which can be used to build, install, query, verify, update, and erase individual software packages.\" - man rpm

          "},{"location":"rpm/#tricks","title":"Tricks","text":""},{"location":"rpm/#show-installed-keys","title":"Show installed keys","text":"
          rpm -qa gpg-pubkey\n
          "},{"location":"rpm/#show-extended-info-about-all-keys","title":"Show extended info about all keys","text":"
          rpm -qa gpg-pubkey | xargs -n1 -P1 rpm -qi\n
          "},{"location":"rpm/#show-information-about-an-rpm-file","title":"Show information about an rpm file","text":"
          rpm -qpi\n
          "},{"location":"rpm/#show-all-installed-packages-and-when-they-were-installed","title":"Show all installed packages and when they were installed","text":"
          rpm -qa --last\n
          "},{"location":"rpm/#show-information-about-the-installed-wget-package","title":"Show information about the installed wget package","text":"
          rpm -qi wget\n
          "},{"location":"rpm/#output-formatted-information-about-packages","title":"Output formatted information about packages","text":"
          rpm -qa --queryformat \"%{NAME} %{PACKAGER} %{URL}\\n\" tomcat7\n

          More info on queryformat: http://www.rpm.org/max-rpm/ch-queryformat-tags.html

          "},{"location":"rpm/#show-which-package-installed-a-file","title":"Show which package installed a file","text":"
          rpm -qf /usr/bin/wget\n
          "},{"location":"rpm/#show-all-files-that-were-installed-by-package-wget","title":"Show all files that were installed by package wget","text":"
          rpm -ql wget\n
          "},{"location":"rpm/#show-all-files-in-a-package-that-is-not-yet-installed","title":"Show all files in a package that is not yet installed","text":"
          rpm -qpl ~/downloads/wget-1.10.2-78.i586.rpm\n
          "},{"location":"rpm/#show-which-documentation-files-get-installed-with-a-package","title":"Show which documentation files get installed with a package","text":"
          rpm -qd wget\n
          "},{"location":"rpm/#show-what-has-changed-on-the-system-since-installing-a-package","title":"Show what has changed on the system since installing a package","text":"

          This will verify file integrity and show you what has changed for each file.

          rpm -V openssl\n
          "},{"location":"rpm/#show-installation-and-uninstallation-scripts","title":"Show installation and uninstallation scripts","text":"
          rpm -qp --scripts foo.rpm\n
          "},{"location":"rpm/#check-the-integrity-of-an-rpm","title":"Check the integrity of an RPM","text":"
          rpm -K ~/downloads/filename.rpm\n
          "},{"location":"rpm/#show-which-packages-are-hogging-all-the-space","title":"Show which packages are hogging all the space","text":"
          rpm -qa --queryformat \"%{SIZE} %{NAME}\\n\" |sort -rn |head -n50 | column -t\n
          "},{"location":"rpm/#show-a-table-about-rpm-files-versions-and-creators-in-a-directory","title":"Show a table about RPM files versions and creators in a directory","text":"
          rpm -qp --queryformat \"%{NAME},%{VERSION},%{PACKAGER}\\n\" * | column -s, -t\n
          "},{"location":"rpm/#show-what-files-were-installed-into-varlog","title":"Show what files were installed into /var/log","text":"
          rpm -qa --filesbypkg | grep \" /var/log\" # space before /var is necessary to weed out things like /usr/var\n
          "},{"location":"rpm/#rebuild-a-corrupt-rpm-db","title":"Rebuild a corrupt rpm db","text":"
          rm -rf /var/lib/rpm/__db*\nrpm --rebuilddb\n
          "},{"location":"rpm/#see-also","title":"See Also","text":"
          • How to create RPMs - http://fedoraproject.org/wiki/How_to_create_an_RPM_package
          • yum - supplement to rpm command
          "},{"location":"rrd/","title":"rrd","text":"

          \"RRDtool is the OpenSource industry standard, high performance data logging and graphing system for time series data. RRDtool can be easily integrated in shell scripts, perl, python, ruby, lua or tcl applications.\" - https://oss.oetiker.ch/rrdtool/index.en.html

          "},{"location":"rrd/#acronyms","title":"Acronyms","text":"
          • cs = consolidation function
          • ds = data source
          • dst = data source type
          • rra = round robin archive
          "},{"location":"rrd/#examples","title":"Examples","text":""},{"location":"rrd/#reconfigure-the-x-axis-precision-of-an-rrd","title":"Reconfigure the X-axis precision of an RRD","text":"

          Assuming the first value (eg: 5856) is the value you want and 244 is the value you currently have, reconfigure data index 0,1,2:

          sudo rrdtool tune coral/pkts_in.rrd \"RRA#0:+$((5856-244))\" \"RRA#1:+$((20160-244))\" \"RRA#2:+$((52704-244))\"\n
          "},{"location":"rrd/#links","title":"Links","text":"
          • https://oss.oetiker.ch/rrdtool/tut/rrd-beginners.en.html
          "},{"location":"rst/","title":"reStructuredText","text":"

          \"reStructuredText is an easy-to-read, what-you-see-is-what-you-get plaintext markup syntax and parser system.\" - http://docutils.sourceforge.net/rst.html

          • http://rst.ninjs.org/
          "},{"location":"rsync/","title":"rsync","text":"

          Great way to sync one location to another, local or remote. Note that this does not mean full synchronization, two commands with reversed source and destinations are required to accomplish that.

          "},{"location":"rsync/#syntax-examples","title":"Syntax Examples","text":""},{"location":"rsync/#giving-additional-ssh-options","title":"Giving additional ssh options","text":"
          rsync -e 'ssh -o ConnectTimeout=10 -o PasswordAuthentication=no' -Rai /home target:/\n
          "},{"location":"rsync/#exclude-filters","title":"Exclude Filters","text":"

          Exclude filters are kinda weird.

          • They're case sensitive and there's no way to be case insensitive.
          • They are relative to the root of the source URI. EG, rsync --exclude=\"Desktop/\" ~/ remotehost:~/

          Here is an example of what to use in --exclude-from=file.txt

          **Cache\n**Caches\n**cache\n**caches\n**/.dropbox\n**Previews.lrdata\n**/Library/Application\\ Support/Google/Chrome\n
          "},{"location":"rsync/#include-filters","title":"Include Filters","text":"

          Include filters should be specified before exclude filters if you have very broad exclude filters. For instance, to only transfer mp4 files:

          rsync -ai --include='*.mp4' --exclude=\"*\" remote-server:/media/movies/ ~/Movies/\n

          If you exclude first, nothing will be transferred.

          "},{"location":"rsync/#long-and-partial-transfers","title":"Long and Partial Transfers","text":"

          If you're doing transfers which you'd like to monitor and risk being cut off, use this syntax:

          rsync -e ssh -az --partial --progress ./foo remotehost:~/bar/\n

          This will resume broken file transfers where they were left off, and give you completion statistics with transfer rate, percent complete and estimated time left.

          "},{"location":"rsync/#recursively-link-src-to-dst","title":"Recursively link src to dst","text":"

          rsync can be used to create a hard linked local copy of a whole tree. This is useful if you don't have GNU cp where the same could be done with simply cp -lrp. On OS X with homebrew, GNU cp can be installed via brew install coreutils and accessed via gcp. See also ls -la /usr/local/opt/coreutils/bin/.

          Slashes are really really important here; this won't work if you get them wrong. Absolute paths must be given, thus ${PWD} and ${HOME} vs ~

          rsync -aP --link-dest=\"${PWD}/src\" ./src/ dst #recursively hard link ./src to dst\n

          For example:

          rsync -aivv --link-dest=\"${HOME}/Dropbox\" ${HOME}/Dropbox/some_dir ${HOME}/temp/\n

          This will create the directory ${HOME}/temp/some_dir and hard link all the files from the source into the destination. It should only take a few seconds. Lines with 'hf' indicate a hard linked file. Lines with 'cd' indicate 'created directory'.

          rsync can copy not only data, but also filesystem attributes, and if these differ between the link-dest and the src, a hard link may not be created but instead a copy of the file from the local filesystem is made and correct metadata is applied from the source.

          "},{"location":"rsync/#backup-to-remote-host-with-timestamp","title":"Backup to remote host with timestamp","text":"

          The following example copies files from /local/host/src into /remote/host/path-2 but hard links those files against the data in /remote/host/path-1 on the receiving side if any files are identical. This avoids transferring data, is an efficient use of disk space for files that will be archived (IE: not changed in-place), and allows deletion of older copies of the data while keeping newer copies.

          rsync -aP --link-dest=\"/remote/host/path-1\" \"/local/host/src/\" \"$REMOTE_HOST\":/remote/host/path-2/\n

          Or for a daily datestamped backup using GNU date (this example will not work with BSD date like macOS has):

          rsync -aivv --link-dest=\"/archive/path/$(date -d \"-1 day\" \"+%F\")/\" /src/data/ \"${REMOTE_HOST}:/archive/path/$(date \"+%F\")/\"\n
          "},{"location":"rsync/#move-files-to-another-server-in-small-batches","title":"Move files to another server in small batches","text":"

          This is useful if you want to gradually clear up disk space rather than waiting until the end of a transfer of a large number of files to clear up disk space in one large operation.

          while date ;\nfiles=$(find /srv/backups/scribe/./ -type f -mtime +400 | head -n 500) ;\necho md5 of files ${#files} is $(echo ${files} | md5sum) ;\n[ ! -z \"${files}\" ] ; do\n  sudo rsync --bwlimit 20000 -RaPi --remove-source-files ${files} root@10.2.17.7:/srv/backups/scribe-sea/ ; echo sleeping ;\n  sleep 10 ;\ndone ;\n
          "},{"location":"rsync/#move-all-datestamped-files-older-than-the-beginning-of-the-previous-month-excluding-symlinks","title":"Move all datestamped files older than the beginning of the previous month, excluding symlinks","text":"

          This relies on gnu date, so use gdate if used on OS X.

          rsync -aPiv \\\n  --remove-source-files \\\n  --bwlimit 20000 \\\n  --exclude=\"**$(date -d \"1 month ago\" \"+%Y-%m\")**\" \\\n  --exclude=\"**$(date \"+%Y-%m\")**\" \\\n  --no-links \\\n  /srv/backups/scribe/* \\\n  root@10.2.17.7:/srv/backups/scribe-sea/\n
          "},{"location":"rsync/#reduce-time-precision-during-comparison","title":"Reduce time precision during comparison","text":"

          This is useful for rsyncing to FAT filesystems where time precision is 2 seconds.

          rsync --modify-window=1 # allow 1 second of difference in timestamps\n
          "},{"location":"rsync/#connect-as-a-normal-user-and-escalate-using-sudo","title":"Connect as a normal user and escalate using sudo","text":"

          Many times you have to copy files that your remote user does not have access to without sudo. You can perform this hoop-jump with ease using the following syntax:

          rsync --rsync-path='sudo rsync' $REMOTE_HOSTNAME:/etc/kubernetes/admin.conf \"$HOME/.kube/config\"\n
          "},{"location":"rsync/#use-rsync-with-find","title":"Use rsync with find","text":"

          You can use rsync --files-from=- to read files from stdin. You have to make sure the path used in find matches the source path for rsync.

          cd /tank/photos/ && find . -type f -print0 | rsync -ia0 --files-from=- . remote-host:/dest/\n
          "},{"location":"rsync/#rsync-to-fat32","title":"rsync to fat32","text":"

          fat32 has 4GiB - 1 byte (0xFFFFFFFF, or 4294967295 bytes) file size limit. It also has some other shortcomings that I have not worked through yet, so this example is far from perfect.

          rsync --max-size=4GiB-1\n
          "},{"location":"rsync/#see-also","title":"See Also","text":"
          • https://github.com/lilydjwg/pssh: parallel ssh (pssh), rsync (prsync), and scp (pscp)
          • https://github.com/rsnapshot/rsnapshot: \"a tool for backing up your data using rsync\"
          "},{"location":"rust/","title":"Rust","text":"

          \"A language empowering everyone to build reliable and efficient software.\" - https://www.rust-lang.org

          "},{"location":"rust/#links","title":"Links","text":"
          • https://www.youtube.com/playlist?list=PLJbE2Yu2zumDF6BX6_RdPisRVHgzV02NW: Intro to rust programming videos
          • https://github.com/rust-lang/rustup: the Rust toolchain installer - https://rustup.rs
          • https://github.com/rust-lang/rust-by-example
          • https://github.com/esp-rs: Rust on ESP microcontrollers
          • https://github.com/joaocarvalhoopen/How_to_learn_modern_Rust
          • https://github.com/rochacbruno/py2rs: From Python into Rust
          • https://rauljordan.com/rust-concepts-i-wish-i-learned-earlier
          • https://doc.rust-lang.org/book: \"The Rust Programming Language\"
          • https://lokathor.github.io/gba-from-scratch: \"GBA From Scratch With Rust ... a book about programming for the Game Boy Advance using the Rust programming language.\"
          "},{"location":"rust/#books","title":"Books","text":"
          • https://www.manning.com/books/rust-in-action
          • https://www.oreilly.com/library/view/command-line-rust/9781098109424
          • https://hands-on-rust.com
          "},{"location":"saltstack/","title":"saltstack","text":"

          \"Software to automate the management and configuration of any infrastructure or application at scale.\" - http://github.com/saltstack/salt

          • https://github.com/saltstack/salt
          • https://docs.saltstack.com/en/latest/topics/tutorials
          • https://docs.saltstack.com/en/latest/topics/development/index.html
          "},{"location":"saltstack/#design-characteristics","title":"Design characteristics","text":"
          • Glossary: https://docs.saltstack.com/en/latest/glossary.html
          "},{"location":"saltstack/#common-commands","title":"Common commands","text":"

          All salt* commands require root access, so use sudo or log in as root.

          • salt: Salt allows for commands to be executed across a swath of remote systems in parallel. This means that remote systems can be both controlled and queried with ease.
          • salt-call: The salt-call command is used to run module functions locally on a minion instead of executing them from the master. Salt-call is used to run a Standalone Minion, and was originally created for troubleshooting.
          • salt-cloud: Salt Cloud is the system used to provision virtual machines on various public clouds via a cleanly controlled profile and mapping system.
          • salt-cp: Salt copy copies a local file out to all of the Salt minions matched by the given target.
          • salt-key: Salt-key executes simple management of Salt server public keys used for authentication.
          • salt-minion: The Salt minion daemon, receives commands from a remote Salt master.
          • salt-run: salt-run is the frontend command for executing Salt Runners. Salt runners are simple modules used to execute convenience functions on the master.
          • salt-ssh: Salt SSH allows for salt routines to be executed using only SSH for transport.
          "},{"location":"saltstack/#state-files","title":"State files","text":"

          These are desired state files, not the view of the current state. These are where you describe how you want the system to be

          "},{"location":"saltstack/#grains","title":"Grains","text":"

          Facts about a system. Similar to facter in puppet land.

          "},{"location":"saltstack/#pillar","title":"Pillar","text":"

          Hierarchical data to be interpolated into variables in state files. Similar to hiera in puppet land.

          "},{"location":"saltstack/#examples","title":"Examples","text":""},{"location":"saltstack/#configure-output-options","title":"Configure output options","text":"

          Unfortunately this only applies to the salt command, not salt-run, salt-key, etc..

          $ cat ~/.saltrc\noutput: yaml\n
          "},{"location":"saltstack/#view-salt-versions","title":"View salt versions","text":"

          For simple salt version:

          salt --version\n

          For more specific versions:

          salt --versions\n
          "},{"location":"saltstack/#show-all-minions","title":"Show all minions","text":""},{"location":"saltstack/#show-all-responding-minions","title":"Show all responding minions","text":"
          salt-run manage.up\n
          "},{"location":"saltstack/#show-all-minions-listed-by-hostst-that-are-up-and-hosts-that-are-down","title":"Show all minions, listed by hostst that are up and hosts that are down","text":"
          salt-run manage.status\n

          Example output:

          down:\n    - hadoop4.chn1.example\nup:\n    - appserver1.chn1.example\n    - backups1.chn1.example\n
          "},{"location":"saltstack/#show-any-host-that-has-had-salt-applied-at-some-point","title":"Show any host that has had salt applied at some point","text":"

          This shows only accepted keys. Without the jq part, rejected and denied keys would also show up in this list.

          salt-key --out json | jq '.minions[]'\n
          "},{"location":"saltstack/#accept-a-key-that-has-not-yet-been-accepted","title":"Accept a key that has not yet been accepted","text":"

          After finding the hostname in the Unaccepted list returned by salt-key:

          salt-key -a hostname.example.com\n
          "},{"location":"saltstack/#show-the-version-of-an-installed-package-on-all-hosts","title":"Show the version of an installed package on all hosts","text":"
          salt '*' pkg.version bash\n
          "},{"location":"saltstack/#targeting-hosts","title":"Targeting hosts","text":"
          • https://docs.saltstack.com/en/latest/topics/targeting/#advanced-targeting-methods
          "},{"location":"saltstack/#target-using-globs","title":"Target using globs","text":"
          salt '*dev*' pkg.install pre-commit\n
          "},{"location":"saltstack/#target-using-regular-expressions","title":"Target using regular expressions","text":"
          salt -b1 -E 'miner..-aws' cmd.run 'service miner restart'\n
          "},{"location":"saltstack/#target-an-ip-subnet","title":"Target an IP subnet","text":"
          salt -t 15 -S '172.21.5.0/24' cmd.run 'dpkg -l linux-image'\n
          "},{"location":"saltstack/#target-a-specific-os","title":"Target a specific OS","text":"

          https://docs.saltstack.com/en/latest/topics/targeting/compound.html

          salt -C 'G@lsb_distrib_codename:trusty' pkg.install cmatrix\n
          "},{"location":"saltstack/#run-a-command-on-a-subset-of-hosts","title":"Run a command on a subset of hosts","text":"

          Check ntp stats on hadoop hosts.

          salt \"*hadoop*\" cmd.run \"ntpq -p\"\n

          Many more complicated examples of remote command execution: https://docs.saltstack.com/en/latest/topics/execution/remote_execution.html

          "},{"location":"saltstack/#show-ip-addresses","title":"Show IP addresses","text":"

          https://docs.saltstack.com/en/latest/ref/modules/all/salt.modules.network.html

          salt '*itni*' network.ip_addrs\n
          "},{"location":"saltstack/#show-available-grains","title":"Show available grains","text":"

          This just lists the grain keys, not the values

          salt '*minecraft*' grains.ls\n
          "},{"location":"saltstack/#show-grain-data-for-a-subset-of-hosts","title":"Show grain data for a subset of hosts","text":"

          This lists the keys and values

          salt '*dorks*' grains.items\n
          "},{"location":"saltstack/#show-one-grain-for-a-subset-of-hosts","title":"Show one grain for a subset of hosts","text":"
          salt '*elk*' grains.fetch lsb_distrib_release\n

          or...

          salt '*elk*' grains.item os\n
          "},{"location":"saltstack/#look-up-grain-data-while-logged-into-a-minion","title":"Look up grain data while logged into a minion","text":"

          While logged into a minion, you can view what pillar data would be applied:

          salt-call pillar.get users\n
          "},{"location":"saltstack/#append-a-username-to-the-accounts-grain-and-apply-the-users-saltstate","title":"Append a username to the accounts grain and apply the users saltstate","text":"
          salt '*searchstring*' grains.append accounts user-to-add\nsalt '*searchstring*' state.sls users\nsalt '*searchstring*' user.list_users --out yaml > list_users.yaml\n

          Or as a function to run locally

          add_user_via_salt_grains() {\n  new_user=$1\n  id \"${new_user}\" && return 0\n  salt-call grains.append accounts \"$new_user\" && \\\n  salt-call state.sls users\n  id \"$new_user\"\n}\n
          "},{"location":"samba/","title":"Samba","text":"

          Samba is the unix SMB daemon of choice.

          "},{"location":"samba/#commands","title":"Commands","text":""},{"location":"samba/#usrbinnet","title":"/usr/bin/net","text":"

          Samba has a generically named binary file called net, which matches the windows command. It's used to manage Samba and CIFS servers.

          "},{"location":"samba/#testparm","title":"testparm","text":"

          testparm - check an smb.conf configuration file for internal correctness. This is great for having a heavily commented main file, like smb.conf.master, then generating the bare smb.conf from that file using testparm -s smb.conf.master > smb.conf.

          "},{"location":"samba/#smbtree","title":"smbtree","text":"

          The smbtree tool will print out a tree list of all the reachable samba shares.

          "},{"location":"samba/#tips","title":"Tips","text":""},{"location":"samba/#get-info-from-winbind","title":"get info from winbind","text":"

          wbinfo

          "},{"location":"samba/#clear-name-resolution-cache","title":"Clear name resolution cache","text":"

          net cache flush

          "},{"location":"samba/#get-a-remote-rpc-shell","title":"Get a remote rpc shell","text":"

          net rpc shell -U $user_name -S $host_name

          "},{"location":"samba/#show-open-sessions-on-local-server","title":"Show open sessions on local server","text":"

          net status shares

          "},{"location":"samba/#show-open-files-on-remote-server","title":"Show open files on remote server","text":"

          net rpc file -S $server_name

          "},{"location":"samba/#mount-a-samba-share-on-a-linux-client","title":"Mount a samba share on a linux client","text":"

          mount -t smbfs -o username=$user_name //$server_name/C$ $local_share_name

          "},{"location":"samba/#mount-a-remote-share","title":"Mount a remote share","text":"

          mount_smbfs \"//domain_name;username@hostname/groups\" asdf

          "},{"location":"samba/#kill-all-samba-sessions-for-a-given-user-hence-forcing-refresh-of-their-group-memberships","title":"Kill all Samba sessions for a given user, hence forcing refresh of their group memberships","text":"

          net status sessions | grep johndoe | awk '{print $1}' | xargs sudo kill

          "},{"location":"samba/#join-domain-in-ads-security-mode","title":"Join domain in ads security mode","text":"

          net ads join -U dhoherd@DOMAIN.EXAMPLE.COM

          "},{"location":"samba/#leave-domain","title":"Leave domain:","text":"

          net ads leave -U dhoherd@DOMAIN.EXAMPLE.COM

          "},{"location":"samba/#upgrading-supermicro-firmware","title":"Upgrading Supermicro firmware","text":"

          Supermicro iKVM can only mount ISOs that are hosted on Samba shares... \ud83d\ude44 In my experience they also only support the old and vulnerable SMBv1 protocol. (wannacry used SMBv1.) In order to host ISOs for use with iKVM, it's useful to run Samba in Docker so it is isolated and only running while you are using it.

          This example uses the docker image dperson/samba to start a Samba server sharing one passwordless /public share which mounts $HOME/isos from outside the container.

          docker run \\\n  --name \"$USER-samba\" \\\n  --rm \\\n  -d \\\n  -p 139:139 \\\n  -p 445:445  \\\n  -v $HOME/isos:/public  \\\n  -d dperson/samba \\\n  -s \"public;/public\" \\\n  -g \"server min protocol = NT1\" \\\n  -g \"log level = 3\"\n
          "},{"location":"samsung/","title":"Samsung","text":""},{"location":"samsung/#android-links-relevant-to-samsung","title":"Android links relevant to Samsung","text":"
          • https://www.sammobile.com/firmwares/database/SM-T700/
          • https://www.sammobile.com/samsung/galaxy-s6/firmware/
          • https://en.wikipedia.org/wiki/Android_version_history
          • https://en.wikipedia.org/wiki/Samsung_Galaxy_S6
          • https://en.wikipedia.org/wiki/Samsung_Galaxy_Tab_S_8.4
          • https://www.kingoapp.com/help/samsung-knox-counter.htm
          • https://wiki.lineageos.org/devices/klimtwifi/install: Galaxy Tab S 8.4 WiFi
          • https://wiki.lineageos.org/devices/zerofltexx/: Galaxy S6
          • https://github.com/LineageOS/lineage_wiki/blob/57994e983af4e7/_data/devices/klimtwifi.yml
          "},{"location":"samsung/#tv-links","title":"TV links","text":"
          • UN55D6000 specs
          "},{"location":"samsung/#frame-info","title":"Frame info","text":"
          • 2022 versions of the frame removed an API that allows you to manage artwork that is available to the TV for display.
          "},{"location":"science/","title":"Science","text":""},{"location":"science/#links","title":"Links","text":"
          • https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5862244/: \"Getting rigorous with scientific rigor\"
          "},{"location":"screenshot/","title":"Screenshots","text":"

          Different methods for grabbing an graphical image of what is being displayed on a physical or virtual device.

          "},{"location":"screenshot/#linux","title":"Linux","text":""},{"location":"screenshot/#grab-all-vt-screenshots","title":"Grab all vt screenshots","text":"
          for X in {0..10} ; do\n  sudo DISPLAY=:0 fbgrab -c${X} fbgrab_vt${X}_screenshot.png ;\ndone ;\n
          "},{"location":"screenshot/#screenshot-x-using-scrot","title":"Screenshot X using scrot","text":"
          sudo DISPLAY=:0 scrot -b -d 5 'scrot_%F-%T.png'\n
          "},{"location":"screenshot/#screenshot-x-using-imagemagick","title":"Screenshot X using imagemagick","text":"
          sudo DISPLAY=:0 import -window root imagemagick_screenshot.png\n
          "},{"location":"screenshot/#macos","title":"macOS","text":"
          screencapture screenshot.png\n

          There are a lot of command line args for this tool.

          "},{"location":"sdr/","title":"sdr","text":"

          Software Defined Radio

          "},{"location":"sdr/#overview","title":"Overview","text":"

          SDR is dominated by windows software, so this is going to leave all that out and deal with Linux and Mac OS software.

          "},{"location":"sdr/#links","title":"Links","text":"
          • https://www.weather.gov/nwr: NOAA Weather Radio
          • http://gqrx.dk: GUI SDR software for macOS and Linux (including Raspberry Pi).
          • https://cubicsdr.com: GUI SDR software for macOS, Linux (including Raspberry Pi), and Windows.
          • https://www.radioreference.com
          • http://www.sigidwiki.com
          • http://www.baudline.com: \"Baudline is a time-frequency browser designed for scientific visualization of the spectral domain.\"
          • http://gnuradio.org: \"GNU Radio is a free & open-source software development toolkit that provides signal processing blocks to implement software radios.\"
          • https://github.com/miek/inspectrum: \"inspectrum is a tool for analysing captured signals, primarily from software-defined radio receivers.\"
          • https://www.sigidwiki.com/wiki/Signal_Identification_Guide
          • https://www.rail.watch/rwpi.html: \"Rail Watch Raspberry Pi Monitoring Software\"
          "},{"location":"security/","title":"Security","text":""},{"location":"security/#internet","title":"Internet","text":"
          • https://www.cisecurity.org
          • https://csrc.nist.gov/glossary: \"This Glossary includes terminology from the final version of NIST's cybersecurity and privacy publications\"
          "},{"location":"security/#physical","title":"Physical","text":"
          • https://www.youtube.com/@lockpickinglawyer
          "},{"location":"sed/","title":"sed","text":"

          sed is the stream editor.

          "},{"location":"sed/#tips","title":"Tips","text":""},{"location":"sed/#osx-pitfalls","title":"OSX Pitfalls","text":"

          Beware that BSD sed -i requires a mandatory flag for the backup file. You can use -i '' to have no backup file.

          Also, OS X sed doesn't support case insensitivity! WTF?! We have to use perl -pe 's/foo/bar/i' foo.txt or homebrew's gsed.

          "},{"location":"sed/#only-print-a-specific-line","title":"Only print a specific line","text":"

          This will print only the second line of the file

          sed -n ' 2{p;q;}' foo.txt\n
          "},{"location":"sed/#only-print-if-match","title":"Only print if match","text":"

          This will perform a replacement and print the result. Use -i (with caution!) to edit the file at the same time.

          sed -n 's/\\(127.[0-9]\\{1,3\\}.[0-9]\\{1,3\\}.[0-9]\\{1,3\\}\\)/\\1 localhost localhost4/p' /etc/hosts\n
          "},{"location":"sed/#add-a-new-line-with-content-after-a-match","title":"Add a new line with content after a match","text":"

          Since sed can't insert things like \\n, this has to take place on multiple lines, so it's a bit funky looking but still functional.

          sed -i \"\" -e '/respawn/a\\\nrespawn limit 10 5' app_worker_*.conf\n
          "},{"location":"sed/#print-file-starting-with-first-string-match","title":"Print file starting with first string match","text":"
          sed -n '/ROUTING TABLE/,$p' /etc/openvpn/openvpn-status.log\n
          "},{"location":"sed/#print-only-lines-after-a-match","title":"Print only lines after a match","text":"

          The syntax in both of these is <sed_address_one>,<sed_address_two> <action>

          $ k describe pod -n moms-iot-oven cookie-baker-2022-proxy |\nsed -ne '/^Events/,$ p;'\nEvents:\n  Type    Reason   Age                       From     Message\n  ----    ------   ----                      ----     -------\n  Normal  Pulling  56m (x1281 over 4d13h)    kubelet  Pulling image \"ubuntu:2004\"\n  Normal  BackOff  118s (x29062 over 4d13h)  kubelet  Back-off pulling image \"ubuntu:2004\"\n

          That says \"Do not print each line by default (-n). Start at the sed address which is a 'context address' regex /^Events/, and end at the special sed address $ which means the last line of input, and print those lines. (All of this info is in the man page.)

          Or if you don't want to include the match:

          sed -e '1,/^Events/ d'\n

          This says \"Start at line one and delete every line up to and including the match ^Events.\"

          "},{"location":"sed/#print-file-contents-between-two-string-matches","title":"Print file contents between two string matches","text":"

          This will print the contents of the log file between ROUTING TABLE and GLOBAL STATS inclusive.

          sed -n '/^ROUTING TABLE/,/^GLOBAL STATS/p;/^GLOBAL STATS/q' /etc/openvpn/openvpn-status.log\n

          Or as a bash function

          show-contents-between() { sed -n \"/$1/,/$2/p;/$2/q\"; }\n
          "},{"location":"sed/#uncomment-a-line-that-matches-a-regex","title":"Uncomment a line that matches a regex","text":"

          This removes the comment and adds wheel to the sudoers list

          /bin/sed -i '/^#\\s\\+%wheel\\s\\+ALL=(ALL)\\s\\+ALL$/s/^#\\s*//' /etc/sudoers\n
          "},{"location":"sed/#delete-lines-containing-a-string","title":"Delete lines containing a string","text":"
          sed -i -e '/root/d' asdf.txt\n
          "},{"location":"sed/#delete-lines-not-containing-a-string","title":"Delete lines not containing a string","text":"
          sed -i '/foo/!d' wy.txt\n

          Or not containing a MAC address:

          sed -i '' -E '/[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}/!d' *\n
          "},{"location":"sed/#do-a-replacement-on-all-files-in-a-dir","title":"Do a replacement on all files in a dir","text":"
          sed -i \"s/foo/bar/g\" /etc/apache2/sites-available/*\n
          "},{"location":"sed/#switch-all-github-urls-from-http-to-ssh","title":"Switch all github urls from http to ssh","text":"
          sed '/url = /s%https?://github.com/\\([^/]*/[^/]*\\)%git@github.com:\\1%' ~/code/*/.git/config\n
          "},{"location":"sed/#word-boundaries","title":"Word boundaries","text":"

          Normally, word boundaries look like this:

          /\\bMyWord\\b/\n

          or

          /\\<myword\\>/\n

          But in OS X, you have to do them like this:

          /[[:<:]]MyWord[[:>:]]/\n

          Which is just ridiculous, so use homebrew's gsed if you can.

          "},{"location":"sed/#add-a-bell-to-tail","title":"Add a bell to tail","text":"
          tail -n 0 -f /var/log/messages | sed 's/$/\\a'\n
          "},{"location":"sed/#see-also","title":"See Also","text":"
          • Some great sed tips - http://www-rohan.sdsu.edu/doc/sed.html
          "},{"location":"selinux/","title":"selinux","text":"

          Security Enhanced Linux

          "},{"location":"selinux/#notes","title":"Notes","text":"
          • Tutorial Video: https://www.youtube.com/watch?v=MxjenQ31b70
          • CentOS HowTo: http://wiki.centos.org/HowTos/SELinux
          • Labels are in user:role:type:level(optional)
          • Logs go in /var/log/audit/audit.log and /var/log/messages
          • Additional tools:

          • semanage and more are included in CentOS package policycoreutils

          • setroubleshoot has a bunch of tools included. Lots of prerequisites
          • setroubleshoot-server has a bunch of tools included. Lots of prerequisites
          "},{"location":"selinux/#examples","title":"Examples","text":""},{"location":"selinux/#show-status-of-selinux","title":"Show status of selinux","text":"
          sestatus\ngetenforce\n
          "},{"location":"selinux/#disable-without-rebooting","title":"Disable without rebooting","text":"
          echo 0 >/selinux/enforce\n

          or...

          setenforce 0\n
          "},{"location":"selinux/#list-selinux-contexts-for-processes","title":"List selinux contexts for processes","text":"
          ps auxZ\n
          "},{"location":"selinux/#list-selinux-contexts-for-processes-that-have-open-sockets","title":"List selinux contexts for processes that have open sockets","text":"
          lsof -i -Z # See `man lsof` for more specific selinux syntaxes\n
          "},{"location":"selinux/#list-selinux-contexts-for-the-current-user","title":"List selinux contexts for the current user","text":"
          id -Z\n
          "},{"location":"selinux/#list-selinux-contexts-for-files","title":"List selinux contexts for files","text":"
          ls -lZ\n
          "},{"location":"selinux/#recursively-set-a-context-type","title":"Recursively set a context type","text":"
          chcon -R -t httpd_sys_content_t sole\n
          "},{"location":"selinux/#copy-the-selinux-context-from-another-file-or-directory","title":"Copy the selinux context from another file or directory","text":"
          chcon --reference /file/or/dir/to/reference /target/file\n
          "},{"location":"selinux/#restore-default-contexts","title":"Restore default contexts","text":"

          This command restores the contexts as referenced in /etc/selinux/targeted/contexts/files/file_contexts

          restorecon /path/to/broken/file\nrestorecon -vR /path/to/broken/dir\n
          "},{"location":"selinux/#restore-defaults-context-automatically-at-system-reboot","title":"Restore defaults context automatically at system reboot","text":"

          This should take roughly the same amount of time as a fsck would.

          touch /.autorelabel\n
          "},{"location":"selinux/#define-a-default-context-for-a-directory","title":"Define a default context for a directory","text":"
          semanage fcontext -a /z5/sole\n
          "},{"location":"selinux/#define-a-default-context-for-a-directory-using-a-reference-from-the-original-policy","title":"Define a default context for a directory, using a reference from the original policy","text":"
          semanage fcontext -a -e /var/www /z5/sole\ncat /etc/selinux/targeted/contexts/files/file_contexts.subs # view the result\n
          "},{"location":"selinux/#list-policies","title":"List policies","text":"
          semanage port -l\nsemanage user -l\n
          "},{"location":"selinux/#show-selinux-booleans","title":"Show selinux booleans","text":"
          getsebool -a\n
          "},{"location":"selinux/#permanetnly-set-an-selinux-boolean","title":"Permanetnly set an selinux boolean","text":"
          setsebool -P booleanname 1\n
          "},{"location":"semver/","title":"semver","text":"

          Semantic Versioning

          "},{"location":"semver/#links","title":"Links","text":"
          • Online semver checking tool: https://jubianchi.github.io/semver-check
          • https://hynek.me/articles/semver-will-not-save-you/
          • BNF checking tool
          • Example valid and invalid semvers
          • Semver cheatsheet
          • Semantic Versioning Will Not Save You
          "},{"location":"sensu/","title":"sensu","text":"

          \"Monitor servers, services, application health, and business KPIs. Get notified about failures before your users do. Collect and analyze custom metrics. Give your business the competitive advantage it deserves.\" - https://sensuapp.org

          "},{"location":"sensu/#overview","title":"Overview","text":"
          • Checks - used to monitor services or measure resources
          • Handlers - for taking action on Sensu events, which are produced by checks
          • Filters - for filtering (removing) events destined for one or more event handlers
          • Mutators - transform event data for handlers
          "},{"location":"sensu/#checks","title":"Checks","text":"
          • standalone checks are scheduled to run periodically on the client (eg: all hosts need to check disks every 15 minutes)
          • subscription checks are requested by the server to hosts with a given tag (eg: all web hosts need to run check_http)
          "},{"location":"sensu/#see-also","title":"See Also","text":"
          • Uchiwa - Open source dashboard for Sensu.
          • Puppet + Sensu = Love; Infrastructure as Code and Monitoring, Sharing the Same Development Workflow
          • SF DevOps Meetup: Kyle Anderson, Sensu @ Yelp Part 1, Part 2
          "},{"location":"serverless/","title":"serverless","text":"

          \"Serverless is your toolkit for deploying and operating serverless architectures. Focus on your application, not your infrastructure.\" - https://serverless.com/

          "},{"location":"sgdisk/","title":"sgdisk","text":"

          \"sgdisk - Command-line GUID partition table (GPT) manipulator for Linux and Unix\" - man sgdisk

          "},{"location":"sgdisk/#see-also","title":"See also","text":"
          • gdisk is an interactive prompt interface.
          • cgdisk is a curses interface to gdisk, similar to fdisk in MS Windows of yore.
          "},{"location":"sgdisk/#examples","title":"Examples","text":""},{"location":"sgdisk/#delete-all-gpt-and-mbr-entries-and-create-a-new-gpt","title":"Delete all GPT and MBR entries and create a new GPT","text":"
          sgdisk -Z /dev/sdz\n
          "},{"location":"sgdisk/#create-a-new-partition","title":"Create a new partition","text":"
          • Partition numbers start at 1
          • The syntax here is --new <partition_number>[:<start>[:<end>]]
          • See the man page for a variety of ways to reference start and end.
          sgdisk --new 1:2048:732566636 /dev/sdz\n
          "},{"location":"sgdisk/#randomize-guids-to-ensure-uniqueness-after-cloning","title":"Randomize GUIDs to ensure uniqueness after cloning","text":"
          sgdisk --randomize-guids /dev/sdz\n
          "},{"location":"sgdisk/#print-info-about-partitions-on-a-disk","title":"Print info about partitions on a disk","text":"
          sgdisk -p /dev/sdz\n
          "},{"location":"sgdisk/#destroy-gpt-partition-table","title":"Destroy GPT partition table","text":"
          sgdisk -z /dev/sdz\n
          "},{"location":"sgdisk/#print-last-block-number-of-the-largest-available-section-of-the-disk","title":"Print last block number of the largest available section of the disk","text":"
          sgdisk -E /dev/sdz\n
          "},{"location":"sgdisk/#print-extended-info-about-the-first-partition-on-devsda","title":"Print extended info about the first partition on /dev/sda","text":"
          sgdisk -i 1 /dev/sdz\n
          "},{"location":"sgdisk/#backup-a-guid-partition-table","title":"Backup a GUID partition table","text":"
          sgdisk -b ~/sdz_partition_backup /dev/sdz\n
          "},{"location":"sgdisk/#restore-a-guid-partition-table","title":"Restore a GUID partition table","text":"
          sgdisk -l ~/sdz_partition_backup /dev/sdz\n
          "},{"location":"sgdisk/#create-a-new-partition_1","title":"Create a new partition","text":"

          This creates a 4th partition that is 50G using the default starting point (0 is default)

          sgdisk /dev/sdz --new=4:0:+50G\n
          "},{"location":"sgdisk/#delete-the-4th-partition","title":"Delete the 4th partition","text":"
          sgdisk /dev/sdz -d 4\n
          "},{"location":"sgdisk/#create-a-new-partition-number-4-that-fills-the-biggest-available-section-of-the-disk","title":"Create a new partition number 4 that fills the biggest available section of the disk","text":"
          sgdisk /dev/sdz -N 4\n
          "},{"location":"sgdisk/#grab-the-name-of-a-partition","title":"Grab the name of a partition","text":"
          ## sloppy, doesn't handle spaces or single quotes\nsgdisk /dev/sdk -i 1 | grep '^Partition name' | awk '{print $NF}' | sed \"s/'//g\"\n
          "},{"location":"shairport-sync/","title":"shairport-sync","text":"

          \"AirPlay audio player. Shairport Sync adds multi-room capability with Audio Synchronisation\" - https://github.com/mikebrady/shairport-sync

          "},{"location":"shairport-sync/#example","title":"Example","text":""},{"location":"shairport-sync/#shairport-sync-in-docker","title":"shairport-sync in docker","text":"

          This is a really easy way to get shairport-sync running on Linux. The container is 12mb.

          docker run \\\n  --rm \\\n  --detach \\\n  --network=host \\\n  --device /dev/snd \\\n  -e AIRPLAY_NAME=\"shairport ${HOSTNAME}\" \\\n  kevineye/shairport-sync\n
          "},{"location":"shred/","title":"Shred","text":"

          \"shred - overwrite a file to hide its contents, and optionally delete it.\" - man shred

          "},{"location":"shred/#examples","title":"Examples","text":""},{"location":"shred/#write-random-data-to-a-hard-disk-5-times-then-once-with-zeroes","title":"Write random data to a hard disk 5 times, then once with zeroes","text":"

          shred -n 5 -z /dev/disk/by-id/usb-Seagate_External_2HC015KJ-0:0

          "},{"location":"shred/#write-zeroes-to-a-disk-5-times-and-report-progress","title":"Write zeroes to a disk 5 times and report progress","text":"
          for _ in {1..5} ; do shred -n 0 -z -v /dev/disk/by-id/usb-Seagate_External_2HC015KJ-0:0 ; done ;\n
          "},{"location":"shutdown/","title":"shutdown","text":"

          a command to reboot a *nix server.

          "},{"location":"shutdown/#linux-examples","title":"Linux Examples","text":""},{"location":"shutdown/#reboot-a-server-in-5-minutes-with-a-message","title":"Reboot a server in 5 minutes with a message","text":"
          shutdown -r +5 \"Please announce in #dev if you would like to cancel this reboot\"\n
          "},{"location":"shutdown/#power-off-a-server-in-5-minutes","title":"Power off a server in 5 minutes","text":"
          shutdown -P +5\n
          "},{"location":"shutdown/#cancel-a-scheduled-shutdown","title":"Cancel a scheduled shutdown","text":"
          shutdown -c\n
          "},{"location":"shutdown/#show-when-a-shutdown-will-occur","title":"Show when a shutdown will occur","text":"
          awk -F= 'NR == 1 {print substr($2,0,length($2)-6)}' scheduled |\nxargs -I{} date -d @{}\n
          "},{"location":"shutdown/#macos-examples","title":"MacOS examples","text":""},{"location":"shutdown/#power-off-a-machine-in-5-minutes","title":"Power off a machine in 5 minutes","text":"
          shutdown -h +5 \"Shutting down in 5 minutes\"\n

          or...

          echo \"Shutting down in 5 minutes\" | shutdown -h $(date -v +5M +%y%m%d%H%M) -\n
          "},{"location":"shutdown/#other-functions","title":"Other functions","text":"

          Using the above shutdown syntax, you can use -s for sleep, -r for reboot, and others. See man date for more.

          "},{"location":"sips/","title":"sips","text":"

          \"scriptable image processing system.\" - sip --help

          "},{"location":"sips/#examples","title":"Examples","text":""},{"location":"sips/#resize-a-dng-and-save-the-output-as-jpg","title":"Resize a DNG and save the output as JPG","text":"
          SOURCE=foo.dng\nsips --resampleHeightWidthMax 1024  --setProperty format jpeg \"$SOURCE\" --out \"${SOURCE%.dng}.jpg\"\n
          "},{"location":"sips/#resize-all-images-in-the-cwd-that-were-taken-by-the-d5100","title":"Resize all images in the CWD that were taken by the D5100","text":"
          mdfind -onlyin \"$PWD\" 'kMDItemAcquisitionModel == \"NIKON D5100\"' |\nwhile read -r file ; do\n  sips --resampleHeightWidthMax 1600 --setProperty format jpeg \"${file}\" --out \"${file%.*}.jpg\"\ndone\n
          "},{"location":"sips/#resize-all-images-in-a-dir-tree-convert-them-to-jpg-and-output-them-to-a-different-folder","title":"Resize all images in a dir tree, convert them to jpg and output them to a different folder","text":"

          In the following example it is important to leave off the trailing slash on the target dir:

          SRC_DIR=\"${HOME}/Pictures/photo_queue\"\nOUT_DIR=\"${HOME}/Desktop/Stuff/\"\nMAX_WIDTH=1600\nfind \"${SRC_DIR}\" \\\n  -type f \\\n  -exec sips \\\n        --resampleHeightWidthMax \"${MAX_WIDTH}\" \\\n        --setProperty format jpeg {} \\\n        --out \"${OUT_DIR}\" \\;\n
          "},{"location":"sips/#see-also","title":"See Also","text":"
          • exiftool
          • graphicsmagick
          • imagemagick
          • jpeginfo
          "},{"location":"slides/","title":"slides","text":"

          Technologies used to create slide decks

          "},{"location":"slides/#links","title":"Links","text":"
          • https://github.com/hakimel/reveal.js
          • https://github.com/mikepea/awk_tawk
          • https://rise.readthedocs.io
          "},{"location":"smartctl/","title":"smartctl","text":"

          Linux interface to SMART data for hard disks.

          "},{"location":"smartctl/#examples","title":"Examples","text":""},{"location":"smartctl/#show-identifying-information-about-a-device","title":"Show identifying information about a device","text":"
          smartctl -i /dev/sda\n
          "},{"location":"smartctl/#show-drive-attributes","title":"Show drive attributes","text":"

          This shows a bunch of recorded information that is updated over the life of the drive.

          smartctl -A /dev/sda\n

          Show the same information with better output showing vendor flags

          smartctl -A -f brief /dev/sda\n
          "},{"location":"smartctl/#show-all-data","title":"Show all data","text":"
          smartctl -x /dev/sda\n
          "},{"location":"smartctl/#perform-a-self-test","title":"Perform a self test","text":"
          smartctl -t short /dev/sda\n
          "},{"location":"smartctl/#view-the-results-of-a-self-test","title":"View the results of a self test","text":"

          Make sure to check for the presence of a \"Self-test routine in progress\" line.

          smartctl -l selftest /dev/sda\n
          "},{"location":"smartctl/#show-how-many-hours-each-drive-has-been-powered-on","title":"Show how many hours each drive has been powered on","text":"
          $ lsblk -I8 -pdo NAME,SIZE,MODEL,SERIAL -n |\nwhile read -r drive size extras ; do\n  echo \"$drive|$(sudo smartctl -A \"$drive\" | awk '/Power_On_Hours/ {print $NF}')|$size|$extras\"\ndone | column -s'|' -t\n/dev/sda    35265    238.5G    Samsung SSD 850  S33KNX0JB59421F\n/dev/sdb    41261    7.3T      HGST HUH728080AL VKHAVT1X\n/dev/sdc    41305    7.3T      HGST HUH728080AL VKH46K7X\n/dev/sdd    41320    7.3T      HGST HUH728080AL VKH82NNX\n/dev/sdf    73       10.9T     HGST HUH721212AL 8CK26DWK\n
          "},{"location":"smartctl/#see-also","title":"See Also","text":"
          • https://www.backblaze.com/blog/what-smart-stats-indicate-hard-drive-failures/
          "},{"location":"smartstack/","title":"smartstack","text":"

          SmartStack is an automated service discovery and registration framework.

          "},{"location":"smartstack/#components","title":"Components","text":"
          • Synapse: Announces state of local services
          • Nerve: Query zookeeper for healthy services to connect to, then configures HAProxy
          • Zookeper: Service registry
          • HAProxy: Load balancing
          "},{"location":"smartstack/#links","title":"Links","text":"
          • DockerCon 14: Tomas Doran - Building a smarter application stack
          • Smartstack ( HAProxy + Serf ) Automated service discovery without rewriting apps
          • GetYourGuide's SmartStack handbook
          "},{"location":"snap/","title":"snap","text":"

          \"Package any app for every Linux desktop, server, cloud or device, and deliver updates directly.\" - http://snapcraft.io/

          A snap is a fancy zip file containing an application together with its dependencies, and a description of how it should safely be run on your system, especially the different ways it should talk to other software.

          Most importantly snaps are designed to be secure, sandboxed, containerised applications isolated from the underlying system and from other applications. Snaps allow the safe installation of apps from any vendor on mission critical devices and desktops.

          "},{"location":"snap/#links","title":"Links","text":"
          • https://www.ubuntu.com/internet-of-things
          • https://developer.ubuntu.com/en/snappy/
          • http://snapcraft.io/
          "},{"location":"snmp/","title":"snmp","text":"

          Simple Network Management Protocol

          "},{"location":"snmp/#links","title":"Links","text":"
          • Third party MIBs: https://github.com/trevoro/snmp-mibs/tree/master/mibs
          • How-To: https://web.archive.org/web/20070122063239/http://www.linuxhomenetworking.com/wiki/index.php/Quick_HOWTO_:_Ch22_:_Monitoring_Server_Performance
          • APC has some snmp tricks specific to those devices.
          "},{"location":"snmp/#examples","title":"Examples","text":""},{"location":"snmp/#install-snmp-utils-on-redhat-centos","title":"Install snmp utils on redhat / centos","text":"
          yum install net-snmp-utils\n
          "},{"location":"snmp/#show-the-system-description-of-a-host","title":"Show the system description of a host","text":"
          snmpwalk -v 1 -c public 192.168.9.1 SNMPv2-MIB::sysDescr.0\n
          "},{"location":"snmp/#walk-1722811110-with-community-string-itgwrk","title":"Walk 172.28.111.10 with community string itgwrk","text":"
          snmpwalk -v 1 -c \"public\" 172.28.111.10\n

          IPV6 is different...

          snmpwalk -v 1 -c public udp6:fe80::a2e:5fff:feba:f586%en0 enterprises\n
          "},{"location":"snmp/#show-network-info","title":"Show Network Info","text":"
          snmpwalk -c public 192.168.9.1 1.3.6.1.2.1\n
          "},{"location":"snmp/#show-airport-upload-and-download-bytes","title":"Show Airport Upload and Download bytes","text":"
          snmpwalk -c public 192.168.9.1 IF-MIB::ifOutOctets\nsnmpwalk -c public 192.168.9.1 IF-MIB::ifInOctets\n
          "},{"location":"snmp/#show-configured-ip-addresses","title":"Show configured IP addresses","text":"

          IPV4 by querying IPV6

          snmpwalk -v 1 -c public udp6:fe80::a2e:5fff:feba:f586%en0 ipAdEntAddr\n
          "},{"location":"solo/","title":"solo","text":"

          solo is a command that uses a per-user loopback IP# and a designated port to ensure that multiple copies of a command are not run. This takes the place of pid files and process tracking, and has the benefit of never leaving around a false positive. It also lets you skip building trap, pid file and process checking into every shell script you write.

          https://github.com/timkay/solo

          "},{"location":"sort/","title":"sort","text":"

          sort is a command to sort lines of data.

          "},{"location":"sort/#gnu-syntax-examples","title":"GNU Syntax Examples","text":"

          In OS X, this is gsort.

          "},{"location":"sort/#avoid-leading-space-caveat","title":"Avoid leading space caveat","text":"

          There is a big caveat to using GNU sort with data that has inconsistent whitespace.

          $ cat test-data.txt\nfoo     5\nbar  2\nbaz         9\n$ gsort -k2 --debug test-data.txt\ngsort: text ordering performed using \u2018en_US.UTF-8\u2019 sorting rules\ngsort: leading blanks are significant in key 1; consider also specifying 'b'\nbaz         9\n   __________\n_____________\nfoo     5\n   ______\n_________\nbar  2\n   ___\n______\n

          As the debug text indicates, using -b can avoid this, though I'm not sure why this isn't the default behavior:

          $ gsort -b -k2 --debug test-data.txt\ngsort: text ordering performed using \u2018en_US.UTF-8\u2019 sorting rules\nbar  2\n     _\n______\nfoo     5\n        _\n_________\nbaz         9\n            _\n_____________\n
          "},{"location":"sort/#randomly-sort-a-file-in-place","title":"Randomly sort a file in place","text":"

          By giving the -o the same output file as the input file we can shuffle in-place without errors. Trying this same thing using a pipe or a redirect will usually cause an empty file.

          Beware that this will put duplicate lines right next to each other. If you need better file content shuffling use shuf.

          sort -o foo -R foo\n
          "},{"location":"sort/#sort-by-multiple-fields-with-different-sort-requirements","title":"Sort by multiple fields with different sort requirements","text":"

          When sorting by multiple fields, it's important to specify the start and end of where you want the sort to occur. If you do not do this, you may get too short of a comparison, or too long of a comparison. Check the output of --debug if you don't get the right sort order.

          # -k defines the sort key as starting position, sort style, ending position\n# -r is included in the second key to reverse numeric sort\n\ngsort -k1d,1 -k2nr,2\n

          Another practical example of this is sorting authorized_keys files by their comment, putting commented keys at the bottom, which keeps the columns nicely aligned. For instance, if you have this file:

          #ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBDhB9kjaireRsJgPASR2jJqU0o8UvIVIPunKNQmS+mw user@a-key-we-want-to-manually-enable\n#ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKeUlnQ8TVgYkI1/DvPMhHJoujYarUvdBx3/BA1mlZLs another-user@some-other-commented-key\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMWu8gF1qT19FtikCMoIBnmEJH1nKyrcC/pRCnvWzoSa bastion-01\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICY9ScGyTpXOLnYnUfqGDfdwMf4kRIPey1xvPRJ8CsAX root@some-old-box\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFSoe1Ho3PjOrS4Hz+B+ILHh40Xi2kbN2f7qb2tNKb1d admin@some-other-box\n

          Normal sort authorized_keys would put the comments at the top, and not sort by the third column, which is the human readable comment of the keys. A better view would be to reverse sort the first column so the comments are at the bottom, then sort by the third column so it's easy to glance through:

          $ gsort -b -k1,1r -k3 authorized_keys\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFSoe1Ho3PjOrS4Hz+B+ILHh40Xi2kbN2f7qb2tNKb1d admin@some-other-box\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMWu8gF1qT19FtikCMoIBnmEJH1nKyrcC/pRCnvWzoSa bastion-01\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICY9ScGyTpXOLnYnUfqGDfdwMf4kRIPey1xvPRJ8CsAX root@some-old-box\n#ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKeUlnQ8TVgYkI1/DvPMhHJoujYarUvdBx3/BA1mlZLs another-user@some-other-commented-key\n#ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBDhB9kjaireRsJgPASR2jJqU0o8UvIVIPunKNQmS+mw user@a-key-we-want-to-manually-enable\n
          "},{"location":"sort/#sort-ip-addresses-by-first-octet-then-last-octet-showing-which-fields-are-sorted","title":"Sort IP Addresses by first octet then last octet, showing which fields are sorted","text":"
          ip neigh show | sort -k1,1n -k4,4n -t. --debug\n

          Console example:

          $ ip neigh show | sort -k1,1n -k4,4n -t. --debug\nsort: using \u2018en_US.UTF-8\u2019 sorting rules\n10.0.2.2 dev eth0 lladdr 52:54:00:12:35:02 REACHABLE\n__\n       _\n____________________________________________________\n10.0.2.3 dev eth0 lladdr 52:54:00:12:35:03 STALE\n__\n       _\n________________________________________________\n192.16.35.10 dev eth1 lladdr 08:00:27:7a:50:42 STALE\n___\n          __\n____________________________________________________\n192.16.35.11 dev eth1 lladdr 08:00:27:56:64:2f STALE\n___\n          __\n____________________________________________________\n
          "},{"location":"sort/#bsd-syntax-examples","title":"BSD Syntax Examples","text":"

          GNU sort and BSD sort behave differently, which is mostly lame.

          "},{"location":"sort/#sort-by-the-third-column","title":"Sort by the third column","text":"
          sort -k 3 filename\n
          "},{"location":"sort/#sort-dates-by-the-day","title":"Sort dates by the day","text":"

          This example shows how to sort dates in ISO Year format by date. (EG: 2017-01-19). Assumes use of bash 4 to generate the example dates.

          ## -n for numeric sort\n## -k3 for column 3\n## -t- to use - as a column delimiter\n\nfor X in {2016..2017}-{01..12..03}-{01..19..06} ; do echo ${X} ; done |\n\nsort -n -k3 -t-\n
          "},{"location":"sort/#sort-the-etcpasswd-by-uid","title":"Sort the /etc/passwd by UID","text":"

          Also works on /etc/group file and GID

          sort -n -t: -k 3 /etc/passwd\n
          "},{"location":"sound-and-music/","title":"Sound and Music","text":""},{"location":"sound-and-music/#links","title":"Links","text":"
          • https://muted.io/major-minor-scales/
          • https://youtu.be/JcjT7zgs6cs: Music Theory for Techno
          • https://www.jezzamon.com/fourier/: An Interactive Introduction to Fourier Transforms
          • https://github.com/stemrollerapp/stemroller: OSS audio track separation tool. Spits out drums, vocals, bass, and lead tracks for input songs.
          • https://www.riffusion.com/about: Use Stable Diffusion to generate spectrograms, then play them back.
          "},{"location":"sphinx/","title":"sphinx","text":"

          Sphinx is how many opensource projects generate sites for their documentation.

          • http://sphinx-doc.org/contents.html
          "},{"location":"split/","title":"split","text":"

          \"split - split a file into pieces\" - man split

          split is a common unix command.

          "},{"location":"split/#usage-examples","title":"Usage Examples","text":""},{"location":"split/#split-into-dvd-sized-chunks","title":"Split into DVD sized chunks","text":"

          This example isn't practical, the size needs to be smaller than specified because of DVD filesystem overhead, so you should use 4700000000 or similar if you want to actually burn the data.

          split -b 4707319808 source\n
          "},{"location":"split/#split-a-big-file-and-compress-it-with-gzip-before-writing-to-disk","title":"Split a big file and compress it with gzip before writing to disk","text":"

          This is useful for splitting up large uncompressed logs. This command is background safe.

          split -a4 --additional-suffix='-redis.log.1' -l500000 --filter='gzip -9 > $FILE.gz' redis.log.1\n

          And if you want to see read stats, throw pv in the mix:

          $ split -a4 --additional-suffix='-redis.log.1' -l500000 --filter='pv | gzip -9 > $FILE.gz' redis.log.1\n1.94GB 0:00:58 [21.2MB/s] [                                         <=>                              ]\n
          "},{"location":"splunk/","title":"splunk","text":"

          Enterprise log consumption and analysis.

          • http://www.splunk.com/
          • http://docs.splunk.com/Documentation/Splunk/latest/SearchReference/Sort
          "},{"location":"splunk/#examples","title":"Examples","text":""},{"location":"splunk/#get-a-list-of-indexes","title":"Get a list of indexes","text":"
          | REST /services/data/indexes | dedup title | table title\n
          "},{"location":"splunk/#get-a-list-of-sourcetypes","title":"Get a list of sourcetypes","text":"
          | metadata type=sourcetypes index=* OR index=_*\n
          "},{"location":"splunk/#cfengine-runs-per-hour-by-version","title":"CFEngine runs per hour by version","text":"
          source=\"/var/log/messages\" OR source=\"udp:514\" \"Starting CFEngine\" earliest=\"-1w\" | rex \"Starting CFEngine (?<version>3.[0-9]+.[0-9]+).*\" | timechart span=4h usenull=0 dc(host) by version\n
          "},{"location":"splunk/#strip-domain-from-hostname-for-consistent-uqdns","title":"Strip domain from hostname for consistent UQDNs","text":"
          rex mode=sed field=host \"s/\\.foo\\.example\\.com//\"\n
          "},{"location":"splunk/#count-of-records-per-hour-by-host","title":"Count of records per hour by host","text":"
          earliest=\"-7d\" | timechart span=1h count(_raw) by host\n
          "},{"location":"splunk/#count-of-records-per-source-by-5m-with-no-limit-on-list","title":"Count of records per source by 5m with no limit on list","text":"
          earliest=\"-8h\" | timechart span=5m count(_raw) by source limit=0\n
          "},{"location":"splunk/#count-of-records-per-source-with-a-given-list","title":"Count of records per source with a given list","text":"
          earliest=\"-1d\" source=\"/var/log/messages\" OR source=\"udp:10514\" OR source=\"udp:514\" | timechart count by source\n
          "},{"location":"splunk/#count-of-records-per-splunk-server-for-a-given-time-period","title":"Count of records per splunk server for a given time period","text":"

          Stupidly, splunk doesn't support ISO date format by default (in the version I'm using).

          earliest=\"06/19/2015:3:0:0\" latest=\"06/19/2015:3:3:0\" | timechart count(_raw) by splunk_server\n
          "},{"location":"splunk/#order-number-of-hits-for-a-given-string-by-an-extracted-ip-address","title":"Order number of hits for a given string by an extracted IP address","text":"
          earliest=\"7/6/2015:9:30:0\" \"verifying pingback from\" | rex \"verifying pingback from (?<pingback_source_ip>[0-9\\.]*)\\\"\" | stats count(_raw) as pingback_source_ip_total by pingback_source_ip | sort pingback_source_ip_total desc\n
          "},{"location":"splunk/#order-an-rpm-report","title":"Order an RPM report","text":"

          Given a report where RPM fields are exported as field=\"value\", such as:

          rpm -qa --queryformat 'report=\"rpm\", name=\"%{NAME}\", release=\"%{RELEASE}\", version=\"%{VERSION}\", packager=\"%{PACKAGER}\", url=\"%{URL}\", installtime=\"%{INSTALLTIME}\"\\n'\n

          This search in splunk will show a useful table:

          earliest=\"-1d\" report=\"rpm\" | dedup name | eval install_timestamp = strftime(installtime, \"%F %T.%3N\") | sort installtime desc | table host,name,version,release,install_timestamp\n

          See also:

          • http://docs.splunk.com/Documentation/Splunk/latest/Admin/Propsconf
          • http://answers.splunk.com/answers/140493/timestamp-contain-t-between-date-and-time.html#answer-140495
          "},{"location":"splunk/#count-of-kernel-versions","title":"Count of kernel versions","text":"

          Assuming you have a report that sends kernel_version=$(uname -r):

          kernel_version | stats count(kernel_version) by kernel_version, host\n
          "},{"location":"spotlight/","title":"Spotlight","text":"

          Spotlight is the Apple metadata database.

          "},{"location":"spotlight/#indexing","title":"Indexing","text":"

          You can edit the file /.Spotlight-V100/_rules.plist to add or deny indexing of specific folders. Use mdutil to edit per-disk indexing behavior. More info at MacOSXHints.com: configure spotlight to index excluded directories

          "},{"location":"spotlight/#spotlight-search-bar-examples","title":"Spotlight Search Bar Examples","text":"
          • name:nikon kind:pdf
          • kind:image modified:>3/25/2011 # does not support iso-8601 format \ud83d\ude1e
          "},{"location":"spotlight/#mdls","title":"mdls","text":"

          \"mdls -- lists the metadata attributes for the specified file\" - man mdls

          "},{"location":"spotlight/#show-gps-date-for-all-jpg-files","title":"Show GPS date for all JPG files","text":"
          mdls -name kMDItemGPSDateStamp *.jpg\n
          "},{"location":"spotlight/#show-name-and-version-of-an-app","title":"Show name and version of an app","text":"
          mdls -name kMDItemVersion -name kMDItemDisplayName /Applications/Alacritty.app\n
          "},{"location":"spotlight/#mdutil","title":"mdutil","text":"

          \"mdutil -- manage the metadata stores used by Spotlight\" - man mdutil

          "},{"location":"spotlight/#disable-indexing-on-a-volume","title":"Disable indexing on a volume","text":"
          sudo mdutil -i off /Volumes/volume_name\n
          "},{"location":"spotlight/#delete-local-indexes","title":"Delete local indexes","text":"

          This flag will cause each local store for the volumes indicated to be erased. The stores will be rebuilt if appropriate.

          sudo mdutil -E /\n
          "},{"location":"spotlight/#mdimport","title":"mdimport","text":"

          A tool to manage the way things are imported.

          "},{"location":"spotlight/#show-the-schema","title":"Show the schema","text":"
          mdimport -X\n
          "},{"location":"spotlight/#mdfind","title":"mdfind","text":"

          The terminal search tool for spotlight.

          \"mdfind -- finds files matching a given query\" - man mdfind

          "},{"location":"spotlight/#find-all-landscape-oriented-photos","title":"Find all landscape oriented photos","text":"
          mdfind -onlyin \"$PWD\" \"kMDItemOrientation = 0\"\n
          "},{"location":"spotlight/#find-all-portrait-oriented-photos","title":"Find all portrait oriented photos","text":"
          mdfind -onlyin \"$PWD\" \"kMDItemOrientation = 1\"\n
          "},{"location":"spotlight/#spotlight-search-by-filename-function-for-bash","title":"Spotlight search by filename function for bash","text":"

          function sl { mdfind \"kMDItemFSName == '$*'wc\" ; }

          "},{"location":"spotlight/#find-music-files-modified-since-yesterday","title":"Find music files modified since yesterday","text":"

          You must use single quotes for the spotlitght $time variable so bash does not attempt to interpolate it as a bash variable. You could also use double quotes and escape it.

          mdfind -onlyin \"/Volumes/Peter/Music/\" 'kMDItemContentModificationDate >= $time.yesterday'

          "},{"location":"spotlight/#mdgrep","title":"mdgrep","text":"
          #!/bin/bash\n#\n# Spotlight metadata find and grep by Daniel.Hoherd at gmail dot com\n\n## Check for at least two arguments, print usage if else\nif [ $# -lt 2 ] ; then\n    echo \"usage: $0 searchstring [dir or file] [dir2 or file2]\"\n    exit ;\nfi\n\nss=$1;\nshift;\nfiles=$@;\n\nuntil [ -z \"$1\" ] ; do\n    thisitem=$1\n    onlyin=\"-onlyin '$thisitem' $onlyin\"\n    shift\ndone;\neval mdfind -0 $onlyin \"$ss\" | xargs -0 grep -Hi \"$ss\"\n
          "},{"location":"spotlight/#xattr","title":"xattr","text":"

          xattr can be used to set arbitrary spotlight metadata:

          dho@tro:~/temp$ touch foo\ndho@tro:~/temp$ xattr -w com.apple.metadata:kMDItemStarRating 10 foo\ndho@tro:~/temp$ mdls -name kMDItemStarRating foo\nkMDItemStarRating = \"10\"\n
          "},{"location":"spotlight/#see-also","title":"See Also","text":"
          • Extensive article on OS X metadata: http://code.google.com/p/understand/wiki/MacOSMetadata
          • macOS User Guide: Search with Spotlight on Mac: https://support.apple.com/guide/mac-help/spotlight-mchlp1008/mac
          "},{"location":"sqlite/","title":"sqlite","text":""},{"location":"sqlite/#links","title":"Links","text":"
          • CLI Shell info: http://www.sqlite.org/sqlite.html
          • Better CLI tutorial: http://souptonuts.sourceforge.net/readme_sqlite_tutorial.html
          • FAQ - http://www.sqlite.org/faq.html
          • When to use SQLite - https://www.sqlite.org/whentouse.html
          • SpatiaLite - \"SpatiaLite is an open source library intended to extend the SQLite core to support fully fledged Spatial SQL capabilities.\"
          • Improved CLI - https://github.com/dbcli/litecli
          • GUI tool with sqlite support - https://dbeaver.io
          • \"An open source multi-tool for exploring and publishing data\" https://docs.datasette.io/en/stable/ / https://simonwillison.net/2018/Aug/19/instantly-publish-datasette/
          • https://www.sqlite.org/appfileformat.html
          • https://sqlite.org/src4/doc/trunk/www/design.wiki The Design of SQLite4
          • https://til.simonwillison.net/sqlite/one-line-csv-operations Simon Willison has a lot of great sqlite knowledge, tools, and examples.
          • https://sqlitestudio.pl: GUI app to \"Create, edit, browse SQLite databases.\"
          "},{"location":"sqlite/#syntax-examples","title":"Syntax Examples","text":""},{"location":"sqlite/#open-a-db-read-only","title":"Open a db read-only","text":"
          sqlite3 \"file:///absolute/path/to/file/datasette.db?mode=ro\"\n
          "},{"location":"sqlite/#import-a-csv-file","title":"Import a csv file","text":"

          This method does not appear to support ~/filename or $HOME/filename, but does support relative and absolute paths. The sqlite3 help text says that -csv will \"set output mode to 'csv'\", but it also affects .import statements.

          sqlite3 -csv filename.db \".import path/to/some_file.csv destination_table_name\"\n
          "},{"location":"sqlite/#export-to-a-csv-file-including-headers","title":"Export to a csv file, including headers","text":"

          This would export the cards database table as a csv:

          sqlite3 -csv pokemon-card-collection.db -cmd '.headers on' 'select * from cards' > pokemon-cards.csv\n
          "},{"location":"sqlite/#create-a-table","title":"Create a table","text":"
          CREATE TABLE servers (\n  id INTEGER NOT NULL,\n  hostname VARCHAR(255),\n  ip_addr VARCHAR(32),\nPRIMARY KEY (id), UNIQUE (id,hostname));\n

          Or from a unix shell

          sqlite3 foo.db \"CREATE TABLE servers (\n  id INTEGER NOT NULL,\n  hostname VARCHAR(255),\n  ip_addr VARCHAR(32),\nPRIMARY KEY (id),\nUNIQUE (id,hostname));\"\n
          "},{"location":"sqlite/#add-a-column-to-the-table","title":"Add a column to the table","text":"
          ALTER TABLE servers ADD os varchar(255);\n
          "},{"location":"sqlite/#create-a-view","title":"Create a view","text":"

          \"a view is the result set of a stored query, which can be queried in the same manner as a persistent database collection object\" - https://en.wikipedia.org/wiki/View_(SQL)

          https://www.sqlite.org/lang_createview.html

          The following view would show only rows from servers where the ip_addr starts with 192.168.. This is an effective way to move logic into the database, potentially reudcing app complexity:

          CREATE VIEW local_servers AS\nSELECT hostname,ip_addr FROM servers WHERE ip_addr like '192.168.%' ;\n
          "},{"location":"sqlite/#add-rows-to-the-table-from-unix-shell","title":"Add rows to the table from unix shell","text":"
          sqlite3 foo.db \"insert into baz values ('50','some text');\"\n
          "},{"location":"sqlite/#add-rows-or-update-if-the-row-already-exists","title":"Add rows or update if the row already exists","text":"

          This syntax is different from other SQL implementations

          insert or replace into tablename(filename, hash) values\n  ('/etc/hosts', 'somehash'),\n  ('/etc/resolv.conf', 'someotherhash');\n
          "},{"location":"sqlite/#output-rows-via-the-unix-shell","title":"Output rows via the unix shell","text":"

          This outputs as columns, but csv, html line, and list exist too.

          sqlite3 -column foo.db \"SELECT * FROM baz LIMIT 5;\"\n

          If -column is truncating your output, instead use -list -separator ' '

          -line is the equivalent of mysql's \\G or postgres's \\x

          "},{"location":"sqlite/#select-a-field-where-its-value-occurs-more-than-n-times","title":"Select a field where its value occurs more than N times","text":"
          select DateTimeOriginal\nfrom photos\ngroup by DateTimeOriginal\nhaving count(DateTimeOriginal) > 1 ;\n
          "},{"location":"sqlite/#select-field-a-where-the-value-field-b-occurs-more-than-n-times","title":"Select field A where the value Field B occurs more than N times","text":"

          This selects all values for field A (SourceFile), including where duplicates exist for field B (DateTimeOriginal). The prior example would not have shown this if we had added SourceFile to the select.

          select SourceFile from photos\nwhere DateTimeOriginal in (\n  select DateTimeOriginal from photos\n  group by DateTimeOriginal\n  having count(DateTimeOriginal) > 1\n) order by SourceFile ;\n
          "},{"location":"sqlite/#select-a-random-row-from-a-table","title":"Select a random row from a table","text":"
          TABLE='costreport'\nsqlite3 -line CostReport-1.db \"SELECT * FROM $TABLE\nWHERE _ROWID_ >= (abs(random()) % (SELECT max(_ROWID_) FROM $TABLE))\nLIMIT 1 ;\"\n
          "},{"location":"sqlite/#select-data-from-a-table-and-include-an-incrementing-id-column","title":"Select data from a table and include an incrementing id column","text":"

          Given the following data:

          sqlite> .schema\nCREATE TABLE IF NOT EXISTS \"people\"(\n\"name\" TEXT, \"age\" TEXT);\nsqlite> .mode box\nsqlite> select * from people ;\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 name  \u2502 age \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 brad  \u2502 18  \u2502\n\u2502 david \u2502 9   \u2502\n\u2502 grace \u2502 29  \u2502\n\u2502 john  \u2502 51  \u2502\n\u2502 katie \u2502 23  \u2502\n\u2502 nora  \u2502 33  \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2518\n

          You can sort by age and add an id column to show the numeric ordering of their age:

          sqlite> select row_number() over (order by cast(age as integer)) as id, * from people ;\n\u250c\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 id \u2502 name  \u2502 age \u2502\n\u251c\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 1  \u2502 david \u2502 9   \u2502\n\u2502 2  \u2502 brad  \u2502 18  \u2502\n\u2502 3  \u2502 katie \u2502 23  \u2502\n\u2502 4  \u2502 grace \u2502 29  \u2502\n\u2502 5  \u2502 nora  \u2502 33  \u2502\n\u2502 6  \u2502 john  \u2502 51  \u2502\n\u2514\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2518\n

          The row_number() over (order by cast(age as integer)) as id adds the extra column. Because the age column is a string we have to cast it to an integer for the sort to work correctly.

          "},{"location":"sqlite/#sum-one-column-ordered-by-another-column","title":"Sum one column ordered by another column","text":"

          This does a quick cost analysis on an AWS Cost-Report-1.csv file.

          ## First sanitize the column names\nsed -i '1 s#[/: ]#_#g' CostReport-1.csv  # linux sed, macos use gsed\n## Next import the csv into a sqlite db\nsqlite3 -csv CostReport-1.db \".import CostReport-1.csv costreport\"\n## Then start a sqlite shell\nsqlite3 CostReport-1.db\n
          -- Output Usage Type, ARN or ID, and summed cost as columns\nSELECT lineItem_UsageType, lineItem_ResourceId, sum(lineItem_BlendedCost) cost\nFROM costreport\nGROUP BY lineItem_ResourceId\nORDER BY cost ;\n
          "},{"location":"sqlite/#dump-a-db-from-cli","title":"Dump a db from CLI","text":"
          sqlite3 foo.db \".dump\" > foo.sql\n
          "},{"location":"sqlite/#search-skype-chat-history","title":"Search Skype chat history","text":"
          sqlite3 \"$HOME/Library/Application Support/Skype/daniel.austin/main.db\" \\\n  \"SELECT author, timestamp, body_xml FROM messages WHERE body_xml LIKE '%music%' ;\"\n
          "},{"location":"sqlite/#expanded-functionality-skype-history-search","title":"Expanded functionality skype history search","text":"
          function skypesearch(){\n  skypeusername=$1\n  searchstring=$2\n  /usr/bin/env sqlite3 \"$HOME/Library/Application Support/Skype/${skypeusername}/main.db\" \\\n  \"SELECT author, datetime(timestamp,'unixepoch','localtime'), body_xml\n  FROM messages\n  WHERE body_xml\n  LIKE '%${searchstring}%' ;\"\n}\nalias ss=\"skypesearch john.doe\"\n
          "},{"location":"sqlite/#quickly-create-an-image-database","title":"Quickly create an image database","text":"

          A better way to do this would be to use sqlite-utils, which is part of datasette, and there is an example below that shows how to do this.

          ## Create the database\nsqlite3 images.db \"create table images (filename varchar(255), createdate timestamp, unique(filename))\"\n## Populate the database. This can be blindly re-run when new files are added.\nexiftool -d \"%s\" -p 'insert into images values (\"$filename\", \"$DateTimeOriginal\");' -q -f -r . | sqlite3 images.db 2> /dev/null\n## Query the database\nsqlite3 images.db \"SELECT filename,datetime(createdate,'unixepoch','localtime') as date FROM images WHERE date LIKE '2014-08-02%';\"\n
          "},{"location":"sqlite/#use-exiftool-and-sqlite-utils-to-find-duplicate-photos","title":"Use exiftool and sqlite-utils to find duplicate photos","text":"

          This example shows how to delete JPG files where a DNG also exists, for photos taken with a camera that keeps track of the ImageNumber, like most DSLRs.

          First, ingest all the exif data from your photos into a sqlite db. We specify only the fields we need from exiftool, and output the data as a json blob, then use jq to reformat the json as one record per line, then send that into sqlite-utils. The sqlite-utils --nl option tells it to expect one record per line, --pk SourceFile specifies that field (which is always present in exiftool json output) as the primary key to ensure uniqueness, --replace will update any rows where the primary key already exists (so we can re-run this on an existing database). If we were not specifying exiftool fields, we would need an --alter flag here to tell sqlite-utils to add columns to the table for exif fields that did not exist in any previously imported photos.

          SOURCE_DIR=/some/absolute/path/with/photos\nfind \"${SOURCE_DIR}\" -type f -iname '*.jpg' -print0 -or -iname '*.dng' -print0 |\nxargs -0 exiftool -SerialNumber -ImageNumber -FileType -json |\njq -c '.[]' |\nsqlite-utils insert --nl --pk SourceFile --replace ~/photos.db photos -\n

          Now do a sql select for all JPG files that finds the duplicates. The idea here is to find distinct shots from a specific camera body which are identified by concatenating the SerialNumber and the ImageNumber together, and only select rows with FileType = JPEG where that same shot also has a FileType = DNG entry. This avoids deletion of JPG files where there is no DNG for the same shot.

          This example does echo rm just for the example. Remove echo to actually do the deletion. This example also shows how to interpolate fields in sqlite3 by using || to concatenate fields and strings to produce a single string that is used to compare with, or output as a result.

          sqlite3 ~/photos.db \"\nselect SourceFile from photos\nwhere FileType = 'JPEG'\nand SerialNumber || ImageNumber in (\n  select SerialNumber || ImageNumber from photos where FileType = 'DNG'\n) ;\n\" |\nxargs -r echo rm -fv\n
          "},{"location":"sqlite/#vacuum-a-database-file","title":"Vacuum a database file","text":"

          \"The VACUUM command rebuilds the database file, repacking it into a minimal amount of disk space.\" - https://www.sqlite.org/lang_vacuum.html

          sqlite3 filename.db \"VACUUM;\"\n
          "},{"location":"ss/","title":"ss","text":"

          \"ss - another utility to investigate sockets\" - man ss

          This tool shows all sockets, not just networking sockets.

          "},{"location":"ss/#examples","title":"Examples","text":"

          Options can be concatenated, so ss -t -n -l -p can be ss -tnlp

          "},{"location":"ss/#show-all-established-connections-dont-resolve-service-names","title":"Show all established connections, don't resolve service names","text":"
          ss -n\n
          "},{"location":"ss/#show-all-listening-sockets","title":"Show all listening sockets","text":"
          ss -l\n
          "},{"location":"ss/#display-all-tcp-sockets","title":"Display all TCP sockets","text":"
          ss -t -a\n
          "},{"location":"ss/#show-ipv4-listening-sockets-sorted-by-port","title":"Show ipv4 listening sockets sorted by port","text":"
          ss -4 -ltn | sort -k2 -t: -n\n
          "},{"location":"ss/#show-ssh-connections","title":"Show ssh connections","text":"
          ss -at '( sport = :ssh or dport = :ssh )'\n
          "},{"location":"ss/#show-ipv4-sockets-in-a-particular-state","title":"Show ipv4 sockets in a particular state","text":"
          ss -t4 state time-wait\n
          "},{"location":"ss/#show-the-processes-for-listening-ipv4-sockets","title":"Show the processes for listening ipv4 sockets","text":"
          ss -lt4p\n
          "},{"location":"ssh/","title":"ssh","text":"

          ssh is the secure shell, an encrypted version of telnet and a whole lot more

          "},{"location":"ssh/#ssh_1","title":"ssh","text":"

          The secure shell itself, very useful for administering remote systems, tunneling arbitrary ports, tunneling X sessions, and a whole lot more.

          "},{"location":"ssh/#scp","title":"scp","text":"

          scp is like cp, but it happens securely and allows host-to-host transfers over ssh. Very handy when used with ssh_config and key-based authentication.

          "},{"location":"ssh/#sftp","title":"sftp","text":"

          A secure FTP client built into ssh. The native client sucks, try lftp or rsync if it's available.

          "},{"location":"ssh/#sshd","title":"sshd","text":""},{"location":"ssh/#output-effective-server-configuration-variables","title":"Output effective server configuration variables","text":"

          This is useful for troubleshooting ssh_config matching.

          sshd -T # requires root\n
          "},{"location":"ssh/#ssh_2","title":"ssh","text":""},{"location":"ssh/#output-effective-client-configuration-variables","title":"Output effective client configuration variables","text":"
          ssh -G user@host\n
          "},{"location":"ssh/#tunnel-local-port-to-the-destination-through-the-ssh-connection","title":"tunnel local port to the destination through the SSH connection","text":"

          This allows you to hit remote services as if they were running on your own machine on the given port.

          This will only listen on localhost, not ethernet interfaces. Use -g to listen on all interfaces.

          local_port=9980\nremote_port=80\ndestination_host=some_other_remote_server\nssh -L \"${remote_port}:${destination_host}:${local_port}\"\n
          "},{"location":"ssh/#tunnel-remote-port-through-the-ssh-connection-to-the-local-machine","title":"Tunnel remote port through the ssh connection to the local machine","text":"

          This allows remote hosts to connect to a server running on your local network.

          local_port=80\nremote_port=9980\ndestination_host=some_other_local_server\nssh -R \"${remote_port}:${destination_host}:${local_port}\"\n
          "},{"location":"ssh/#create-a-socks-5-proxy-on-a-local-port","title":"Create a socks 5 proxy on a local port","text":"
          local_port=5555\nssh -D \"$local_port\" user@host\n
          "},{"location":"ssh/#loop-through-some-ssh-hosts-and-execute-a-command","title":"Loop through some ssh hosts and execute a command","text":"

          -n is required in order to proceed past the first host.

          cat hostnames.txt | while read -r host ; do\n  ssh -o ConnectTimeout=10 -o PasswordAuthentication=no -n \"$host\" 'some_command ; another_command ;'\ndone\n
          "},{"location":"ssh/#be-really-verbose-about-not-wanting-to-use-an-interactive-login","title":"Be really verbose about not wanting to use an interactive login","text":"

          Some ssh servers (EG: macOS 11) need even more options to not ask for a password:

          ssh \\\n  -o PasswordAuthentication=no \\\n  -o KbdInteractiveAuthentication=no \\\n  -o KbdInteractiveDevices=no\n

          This is also really handy for putting into GIT_SSH_COMMAND to avoid password prompts in scripts.

          "},{"location":"ssh/#prefer-password-auth","title":"Prefer password auth","text":"

          Sometimes you need to prefer password auth over key based auth. For example, if you have lots of keys and you are trying to connect to a host that only allows one failure, you will expire your failures before you ever reach a password dialogue.

          ssh -o PreferredAuthentications=password root@libreelec.local\n
          "},{"location":"ssh/#ssh_config","title":"ssh_config","text":"

          The user ssh config file, ~/.ssh/config, lets you override default options. This makes it handy for command line stuff where the syntax is funky such as using non-standard ports.

          Notably, global variables need to come at the end of the file, not the beginning!

          "},{"location":"ssh/#simple-host-aliasing","title":"Simple host aliasing","text":"

          The following example will let you simply ssh sugarbeast to log in on the non-standard port on the proper IP# with the specified user.

          Host sugarbeast\n  HostName 66.134.66.42\n  User daniel\n  Port 888\n
          "},{"location":"ssh/#multiplexed-connections","title":"Multiplexed connections","text":"

          After running mkdir -p -m 700 ~/.ssh/sockets add this to your ~/.ssh/config

          Host *\n  ControlPersist yes\n  ControlMaster auto\n  ControlPath ~/.ssh/sockets/%r@%h:%p\n

          To kill a multiplexed connection, run ssh -O exit user@host

          "},{"location":"ssh/#proxycommand","title":"ProxyCommand","text":"

          This config option lets you execute an arbitrary series of commands to connect with.

          SSH proxy through ssh host for openSSH v4 and earlier (Ubuntu 8):

          ProxyCommand ssh -q bastion nc -q 0 %h %p\n

          SSH proxy through ssh host for openSSH v5 and later:

          ProxyCommand ssh -W %h:%p bastion\n

          HTTP proxy (from man ssh_config):

          ProxyCommand nc -X connect -x 192.0.2.0:8080 %h %p\n
          "},{"location":"ssh/#key-based-authentication","title":"key-based authentication","text":"

          Key-based authentication lets you log in without specifying a password. This is useful for rsync, scp and just plain old ssh shell. Adding comments to the public key makes it easy to sort through the keys that authorized_keys file. The $HOME/.ssh/authorized_keys file is the default list of public keys which are allowed password-less login. See also man authorized_keys for more info.

          "},{"location":"ssh/#key-based-auth-permissions","title":"Key-based auth Permissions","text":"

          Permissions on this file need to be set like this:

          #!/bin/sh\n# This will repair permissions for the current user's ssh key-pair authentication.\nmkdir ~/.ssh/\ntouch ~/.ssh/authorized_keys\nchmod go-w ~          && \\\nchmod 700 ~/.ssh      && \\\nchmod 600 ~/.ssh/*    && \\\necho \"Successfully fixed ssh authentication files permissions.\"\n
          "},{"location":"ssh/#ssh-keygen","title":"ssh-keygen","text":""},{"location":"ssh/#validate-each-entry-of-authorized_keys","title":"Validate each entry of authorized_keys","text":"
          ssh-keygen -lvf ~/.ssh/authorized_keys\n
          "},{"location":"ssh/#generate-keys","title":"Generate Keys","text":"

          Not all systems support ed25519, but as of 2016 it is the most secure key type.

          ssh-keygen -t ssh-ed25519 -C \"Daniel Hoherd: ${USER}@${HOSTNAME} -- $(date +%F)\"\n

          If you require backwards compatibility, use 4096 bit RSA keys.

          ssh-keygen -b 4096 -t rsa -C \"Daniel Hoherd: ${USER}@${HOSTNAME} -- $(date +%F)\"\n
          "},{"location":"ssh/#create-or-change-a-password-for-an-ssh-identity","title":"Create or change a password for an ssh identity","text":"

          This will update the password used to unlock an ssh identity.

          ssh-keygen -p -f ~/.ssh/id_ed25519\n
          "},{"location":"ssh/#generate-a-public-key-from-a-given-private-key","title":"Generate a public key from a given private key","text":"

          This outputs the pub key, including the comment that is stored in the private key.

          ssh-keygen -y -f id_rsa_bar\n
          "},{"location":"ssh/#change-the-comment-in-an-ssh-key-pair","title":"Change the comment in an ssh key pair","text":"

          The old comment will be printed when changing the comment:

          ssh-keygen -c -f ~/.ssh/id_ed25519 -C 'this is the new comment'\n
          "},{"location":"ssh/#show-the-fingerprints-for-the-given-key-file","title":"Show the fingerprints for the given key file","text":"

          This works with both private and public keys

          ssh-keygen -E MD5 -l -f id_rsa_baz\n
          "},{"location":"ssh/#ssh-add","title":"ssh-add","text":""},{"location":"ssh/#show-fingerprints-for-all-keys-that-are-loaded-into-ssh-agent","title":"Show fingerprints for all keys that are loaded into ssh-agent","text":"
          # ssh-add -l\n2048 SHA256:aFAG8RjEr+mvqNyFR10kwCF9LP5ttJR3vI85qPDHDbo  (RSA)\n4096 SHA256:8K5XkmSFyAUgA6DLhQTbmTDnkh1kPc7GTdg5EYP7C8s  (RSA)\n4096 SHA256:7Bmhh1TGQkY7RfT9gmShNb1Eaq7erRkDphcOsQH0jaE  (RSA)\n

          Or if you need to show the hash as the older MD5 hash, EG to use with CircleCI

          # ssh-add -l -E md5\n2048 MD5:65:fd:c2:05:1e:b2:a6:32:15:37:3d:e6:98:81:a9:ab  (RSA)\n4096 MD5:db:af:71:c0:44:06:33:5f:63:b0:cb:8f:8a:59:0b:46  (RSA)\n4096 MD5:8e:f3:02:1c:bb:39:8e:b2:5e:27:5a:48:c4:d1:0c:4b  (RSA)\n
          "},{"location":"ssh/#delete-keys-from-the-agent","title":"Delete keys from the agent","text":"

          Delete all keys with

          ssh-add -D\n

          Delete the key for the givent filename from ssh-agent

          ssh-add -d ~/.ssh/id_rsa\n
          "},{"location":"ssh/#fetch-pub-keys-from-ssh-agent","title":"Fetch pub keys from ssh-agent","text":"

          These keys will show the comment contained within the key

          ssh-add -L\n
          "},{"location":"ssh/#limit-root-login-to-key-based-auth","title":"Limit root login to key based auth","text":"

          In /etc/ssh/sshd_config

          PermitRootLogin without-password\n
          "},{"location":"ssh/#see-also","title":"See Also","text":"
          • sshuttle - IP network router over ssh
          • sslh - lets one accept both HTTPS and SSH connections on the same port. It makes it possible to connect to an SSH server on port 443 (e.g. from inside a corporate firewall)
          • Corkscrew - a tool for tunneling SSH through HTTP proxies
          • Putty - An SSH (and telnet) client for windows.
          • Passwordless SSH logins
          • SSH server for Windows
          • SSH jump hosts
          • The Secure Shell (SSH) Connection Protocol - https://www.ietf.org/rfc/rfc4254.txt
          • The Secure Shell (SSH) Authentication Protocol - https://www.ietf.org/rfc/rfc4252.txt
          • The Secure Shell (SSH) Transport Layer Protocol - https://www.ietf.org/rfc/rfc4253.txt
          • https://www.agwa.name/blog/post/ssh_signatures
          • lwn.net: Restricting SSH agent keys
          "},{"location":"sshuttle/","title":"sshuttle","text":"

          sshuttle is an SSH powered ipv4 routed VPN that doesn't require admin rights on the target host.

          • https://github.com/apenwarr/sshuttle
          "},{"location":"sshuttle/#usage","title":"Usage","text":"

          Tunnel DNS queries and create a route through the given host to the given subnet, and be verbose about it.

          sshuttle --dns -vr user@host:port 192.168.1.0/24\n
          "},{"location":"stat/","title":"stat","text":"

          show filesystem metadata about a file

          "},{"location":"stat/#gnu-stat-examples","title":"GNU stat examples","text":""},{"location":"stat/#show-permissions-modify-date-ownership-and-long-filename","title":"Show permissions, modify date, ownership and long filename","text":"
          stat -c \"%a/%A  %y %G(%g):%U(%u) %N\" /srv/log/apache2/\n
          "},{"location":"stat/#sum-file-sizes","title":"Sum file sizes","text":"
          stat -c '%s' *2016* | awk '{sum += $1} END {print sum}'\n
          "},{"location":"stat/#gnu-stat-c-variables","title":"GNU stat -c variables","text":"

          This section is taken from man stat

          "},{"location":"stat/#the-valid-format-sequences-for-files-without-file-system","title":"The valid format sequences for files (without --file-system):","text":"
          • %A - access rights in human readable form
          • %a - access rights in octal (note '#' and '0' printf flags)
          • %b - number of blocks allocated (see %B)
          • %B - the size in bytes of each block reported by %b
          • %C - SELinux security context string
          • %d - device number in decimal
          • %D - device number in hex
          • %F - file type
          • %f - raw mode in hex
          • %g - group ID of owner
          • %G - group name of owner
          • %h - number of hard links
          • %i - inode number
          • %m - mount point
          • %n - file name
          • %N - quoted file name with dereference if symbolic link
          • %o - optimal I/O transfer size hint
          • %s - total size, in bytes
          • %t - major device type in hex, for character/block device special files
          • %T - minor device type in hex, for character/block device special files
          • %u - user ID of owner
          • %U - user name of owner
          • %w - time of file birth, human-readable; - if unknown
          • %W - time of file birth, seconds since Epoch; 0 if unknown
          • %x - time of last access, human-readable
          • %X - time of last access, seconds since Epoch
          • %y - time of last data modification, human-readable
          • %Y - time of last data modification, seconds since Epoch
          • %z - time of last status change, human-readable
          • %Z - time of last status change, seconds since Epoch
          "},{"location":"stat/#valid-format-sequences-for-file-systems","title":"Valid format sequences for file systems:","text":"
          • %a - free blocks available to non-superuser
          • %b - total data blocks in file system
          • %c - total file nodes in file system
          • %d - free file nodes in file system
          • %f - free blocks in file system
          • %i - file system ID in hex
          • %l - maximum length of filenames
          • %n - file name
          • %s - block size (for faster transfers)
          • %S - fundamental block size (for block counts)
          • %t - file system type in hex
          • %T - file system type in human readable form
          "},{"location":"strace/","title":"strace","text":"

          strace is a tool to trace system calls and signals in Linux.

          "},{"location":"strace/#examples","title":"Examples","text":""},{"location":"strace/#trace-a-running-process","title":"Trace a running process","text":"
          strace -p 5789\n
          "},{"location":"strace/#trace-only-exec-calls-of-a-command-and-all-child-processes","title":"Trace only exec calls of a command and all child processes","text":"
          strace -f -eexecve cf-agent -K\n
          "},{"location":"sudo/","title":"sudo","text":"

          super user do

          "},{"location":"sudo/#examples","title":"Examples","text":""},{"location":"sudo/#includedir-etcsudoersd","title":"#includedir /etc/sudoers.d","text":"
          ##includedir /etc/sudoers.d\n

          This line is in some sudoers files, and is not a comment. #includedir is a configuration directive. This directory does not like to have .conf files, so they should be flatly named. eg: 00_default

          "},{"location":"sudo/#check-sudoers-for-errors","title":"Check sudoers for errors","text":"
          visudo -c\n
          "},{"location":"sudo/#show-sudo-rules-that-match-for-the-given-user","title":"Show sudo rules that match for the given user","text":"
          sudo -l -U username\n

          The order shown is important. sudo picks the last matching rule. Rules are parsed in order from /etc/sudoers and all included files. Because of this, #includedir /etc/sudoers.d should be the last line in the /etc/sudoers file, and the order of the /etc/sudoers.d/ files will be important when fine tuning rules.

          "},{"location":"sudo/#allow-admins-to-run-mtr-without-a-password","title":"Allow admins to run mtr without a password","text":"

          Add the following line to /etc/sudoers

          %admin ALL=(ALL)NOPASSWD:/usr/local/sbin/mtr\n
          "},{"location":"sudo/#allow-several-groups-and-users-to-execute-upstart-commands","title":"Allow several groups and users to execute upstart commands","text":"
          User_Alias UPSTART_USERS = %wheel, %sysadmin, %adm\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/start\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/stop\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/initctl\nUPSTART_USERS  ALL=(ALL)     NOPASSWD: /sbin/restart\n
          "},{"location":"sudo/#run-several-commands-with-one-sudo-command","title":"Run several commands with one sudo command","text":"
          sudo -s -- <<EOF\nwhoami\nwhoami\nEOF\n

          or

          sudo bash -c \"whoami ; whoami ;\"\n
          "},{"location":"sudo/#links","title":"Links","text":"
          • sudoers config - http://ubuntuforums.org/showthread.php?t=1132821
          "},{"location":"swagger/","title":"swagger","text":"

          \"Swagger is the world\u2019s largest framework of API developer tools for the OpenAPI Specification(OAS), enabling development across the entire API lifecycle, from design and documentation, to test and deployment.\" - https://swagger.io/

          "},{"location":"swagger/#links","title":"Links","text":"
          • Hello World with Swagger - https://swagger.io/blog/getting-started-with-swagger-i-what-is-swagger/
          • Swagger 101 - https://app.swaggerhub.com/help/tutorials/writing-swagger-definitions
          • Online editor with example app definition https://editor.swagger.io/
          • https://swagger.io/tools/
          "},{"location":"swift/","title":"Swift","text":"

          \"Swift is a powerful and intuitive programming language for macOS, iOS, watchOS and tvOS. Writing Swift code is interactive and fun, the syntax is concise yet expressive, and Swift includes modern features developers love. Swift code is safe by design, yet also produces software that runs lightning-fast.\"

          • https://developer.apple.com/swift/
          • https://www.appcoda.com/learnswift/
          "},{"location":"sysctl/","title":"sysctl","text":"

          \"sysctl - configure kernel parameters at runtime\" - man sysctl

          /etc/sysctl.conf is for storing permanent changes, sysctl is used for making changes to the running system.

          "},{"location":"sysctl/#containers-caveat","title":"Containers caveat","text":"

          Because sysctl is a kernel level feature, its values are shared between all containers running on a given node. This means that if you have containers in kubernetes that modify sysctl, this alters the behavior of every other container in every pod running on that same node. It also means that if you do such things, you cannot rely on those values being consistent because they are not managed in a central place, but instead are being modified by pods that are scheduled on the node. Because of this, it's a good idea to try to avoid this if at all possible, and if you need to tweak sysctl for pods, try to account for that in your pod affinities.

          "},{"location":"sysctl/#sysctl-command-examples","title":"sysctl command examples","text":""},{"location":"sysctl/#show-all-kernel-variables-for-the-in-memory-kernel","title":"Show all kernel variables for the in-memory kernel","text":"
          sysctl -a\n
          "},{"location":"sysctl/#assign-a-new-variable-for-the-running-kernel-to-use","title":"Assign a new variable for the running kernel to use","text":"
          sysctl -w variable=value\n
          "},{"location":"sysctl/#load-values-from-etcsysctlconf","title":"Load values from /etc/sysctl.conf","text":"
          sysctl -p\n
          "},{"location":"sysctl/#sysctlconf-examples","title":"sysctl.conf examples","text":""},{"location":"sysctl/#reboot-after-10-seconds-if-kernel-panics","title":"Reboot after 10 seconds if kernel panics","text":"
          kernel.panic = 10\n
          "},{"location":"sysctl/#treat-all-oopses-as-panics","title":"Treat all oopses as panics","text":"
          kernel.panic_on_oops = 1\n
          "},{"location":"sysdig/","title":"sysdig","text":"

          An awesome host inspection tool, with tcpdump like tool and an interface similar to top et al. - http://www.sysdig.org/

          "},{"location":"sysdig/#installation-centos","title":"Installation - CentOS","text":"
          • https://github.com/draios/sysdig/wiki/How-to-Install-Sysdig-for-Linux

          Their shell script installs epel from a 3rd party source, so it's best to use this method instead:

          rpm --import https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public && \\\ncurl -s -o /etc/yum.repos.d/draios.repo http://download.draios.com/stable/rpm/draios.repo && \\\nyum -y install kernel-devel-$(uname -r) && \\\nyum -y install sysdig\n
          "},{"location":"sysdig/#examples","title":"Examples","text":""},{"location":"sysdig/#simple-usage","title":"Simple usage","text":"
          sysdig\n
          "},{"location":"sysdig/#write-a-system-trace-file","title":"Write a system trace file","text":"
          sysdig -w tracefile.scap\n
          "},{"location":"sysdig/#replay-a-tracefile","title":"Replay a tracefile","text":"
          sysdig -r tracefile.scap\n
          "},{"location":"sysdig/#show-filters","title":"Show filters","text":"
          sysdig -l\n
          "},{"location":"sysdig/#show-activity-for-access-to-a-given-file","title":"Show activity for access to a given file","text":"
          sysdig -l fd.name=/etc/hosts\n
          "},{"location":"sysdig/#show-shell-commands-for-all-users","title":"Show shell commands for all users","text":"
          sysdig -pc -c spy_users\n
          "},{"location":"sysdig/#spy-on-a-user-and-exclude-a-process","title":"Spy on a user and exclude a process","text":"
          sysdig -pc -c spy_users proc.name!=gmetric\n

          Or exclude multiple processes

          sysdig -pc -c spy_users \"not proc.name in ( gmetric, awk, sed, grep )\"\n
          "},{"location":"sysdig/#show-a-top-like-interface","title":"Show a top like interface","text":"
          csysdig\n
          "},{"location":"sysdig/#links","title":"Links","text":"
          • https://github.com/draios/sysdig/wiki
          • https://github.com/draios/sysdig/wiki/Sysdig%20Examples
          • http://man7.org/linux/man-pages/man8/sysdig.8.html
          • Getting Started With Sysdig
          • Getting Started With Csysdig
          "},{"location":"systemd-resolved/","title":"systemd-resolved","text":"

          \"systemd-resolved - Network Name Resolution manager\" - man systemd-resolved

          systemd-resolved enhances (ie: interferes with) old school simple methods of managing DNS on a linux system. The way it reads the /etc/resolv.conf file as either a file or symlink alters the behavior of the service, which is a new mechanic that can have unexpected effects if you're not familiar with it.

          "},{"location":"systemd-resolved/#examples","title":"Examples","text":""},{"location":"systemd-resolved/#resolve-a-hostname","title":"Resolve a hostname","text":"
          $ systemd-resolve google.com\ngoogle.com: 216.58.192.238\n\n-- Information acquired via protocol DNS in 23.9ms.\n-- Data is authenticated: no\n
          "},{"location":"systemd-resolved/#check-name-resolution-statistics","title":"Check name resolution statistics","text":"
          $ systemd-resolve --statistics\nDNSSEC supported by current servers: no\n\nTransactions\nCurrent Transactions: 0\n  Total Transactions: 36\n\nCache\n  Current Cache Size: 2\n          Cache Hits: 2\n        Cache Misses: 103\n\nDNSSEC Verdicts\n              Secure: 0\n            Insecure: 0\n               Bogus: 0\n       Indeterminate: 0\n
          "},{"location":"systemd-resolved/#see-also","title":"See Also","text":"
          • http://man7.org/linux/man-pages/man1/systemd-resolve.1.html
          • http://manpages.ubuntu.com/manpages/xenial/man8/systemd-resolved.service.8.html
          • http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html
          "},{"location":"systemd/","title":"systemd","text":"

          \"systemd is a system and service manager for Linux, compatible with SysV and LSB init scripts. systemd provides aggressive parallelization capabilities, uses socket and D-Bus activation for starting services, offers on-demand starting of daemons, keeps track of processes using Linux control groups, supports snapshotting and restoring of the system state, maintains mount and automount points and implements an elaborate transactional dependency-based service control logic. It can work as a drop-in replacement for sysvinit.\" - http://www.freedesktop.org/wiki/Software/systemd/

          "},{"location":"systemd/#links","title":"Links","text":"
          • https://www.freedesktop.org/wiki/Software/systemd/TipsAndTricks: Tips and tricks
          • https://man7.org/linux/man-pages/man5/systemd.resource-control.5.html: Man page for control groups
          • https://www.freedesktop.org/wiki/Software/systemd/FrequentlyAskedQuestions: FAQ
          • https://fedoraproject.org/wiki/SysVinit_to_Systemd_Cheatsheet: sysvinit to chkconfig
          • https://wiki.ubuntu.com/SystemdForUpstartUsers: systemd for upstart users
          • https://github.com/joehillen/sysz: fzf interface for systemd
          "},{"location":"systemd/#tips","title":"Tips","text":""},{"location":"systemd/#init-file-locations","title":"Init file locations","text":"
          /usr/lib/systemd/system\n/etc/systemd/system # has precedence\n
          "},{"location":"systemd/#show-the-full-systemd-journal","title":"Show the full systemd journal","text":"

          The systemd journal is syslog and more.

          journalctl --full\n
          "},{"location":"systemd/#show-logs-for-the-last-15-minutes","title":"Show logs for the last 15 minutes","text":"
          journalctl --since \"-15 minutes\"\n
          "},{"location":"systemd/#show-logs-for-one-service","title":"Show logs for one service","text":"

          Find your service with systemctl list-units

          journalctl --unit=\"docker.service\" --since -5m\n
          "},{"location":"systemd/#show-only-logs-that-match-a-certain-pattern","title":"Show only logs that match a certain pattern","text":"

          \"PERL-compatible regular expressions are used. \u2026 If the pattern is all lowercase, matching is case insensitive. Otherwise, matching is case sensitive. This can be overridden with the --case-sensitive option\" - man journalctl

          journalctl -g '\\b(foo|bar)\\b\n
          "},{"location":"systemd/#show-the-nginx-journal-for-today","title":"Show the nginx journal for today","text":"

          The -u here is \"unit\", not \"user\".

          journalctl -u nginx.service --since today\n
          "},{"location":"systemd/#list-journals-by-boot","title":"List journals by boot","text":"
          journalctl --list-boots\n

          The above command produces the following style of output:

           -5 e6fcef265a164688b5f9aad999a9b1d2 Mon 2019-09-09 08:55:47 PDT\u2014Tue 2019-09-17 17:41:25 PDT\n -4 1e402042ad0a48bebe17298fd80dfb66 Tue 2019-09-17 17:42:06 PDT\u2014Tue 2019-09-17 18:26:28 PDT\n -3 1b36653fa7b64b1a808f10a894a0e303 Tue 2019-09-17 18:27:11 PDT\u2014Sun 2019-09-22 13:34:59 PDT\n -2 be854ba422934cf2a9e7952dc052461a Sun 2019-09-22 16:23:43 PDT\u2014Mon 2019-09-30 07:55:08 PDT\n -1 0454f1208e5e49c59fabf95cf4f68346 Mon 2019-09-30 07:55:51 PDT\u2014Fri 2019-10-04 08:54:38 PDT\n  0 f8c09c85ed9f4976987121a345b6f446 Fri 2019-10-04 08:55:22 PDT\u2014Wed 2019-10-09 15:34:01 PDT\n
          "},{"location":"systemd/#show-journal-for-previous-boot","title":"Show journal for previous boot","text":"

          Using the info from the previous --list-boots example, we can view the system log for the previous boot. This gives all system logs from the time the system booted to the time it shut down.

          journalctl -b -1\njournalctl -b 0454f1208e5e49c59fabf95cf4f68346\n
          "},{"location":"systemd/#show-timers","title":"Show timers","text":"

          \"A unit configuration file whose name ends in \".timer\" encodes information about a timer controlled and supervised by systemd, for timer-based activation.\" - man systemd.timer

          systemctl list-timers\n
          "},{"location":"systemd/#show-units","title":"Show units","text":"

          Units are things that are handled by systemd, including services.

          systemctl list-units\n
          "},{"location":"systemd/#show-dependencies","title":"Show dependencies","text":"

          This works on any .target or .service

          systemctl list-dependencies network.service\n
          "},{"location":"systemd/#enable-a-service","title":"Enable a service","text":"

          This behavior replaces chkconfig

          systemctl enable docker.service\n
          "},{"location":"systemd/#check-the-status-of-a-service-and-show-20-lines","title":"Check the status of a service and show 20 lines","text":"
          systemctl -n 20 status nodejs\n
          "},{"location":"systemd/#per-user-services","title":"Per-user services","text":"

          https://wiki.archlinux.org/index.php/Systemd/User

          /usr/lib/systemd/user/ # where services provided by installed packages go.\n/etc/systemd/user/ # where system-wide user services are placed by the system administrator.\n~/.config/systemd/user/ # where the user puts its own services.\n
          "},{"location":"systemd/#alter-power-sleep-hibernate-button-behaviors","title":"Alter power / sleep / hibernate button behaviors","text":"
          /etc/systemd/logind.conf\n
          "},{"location":"systemd/#show-name-resolution-status","title":"Show name resolution status","text":"
          systemd-resolve --status\n
          "},{"location":"systemd/#show-boot-performance","title":"Show boot performance","text":"
          systemd-analyze blame\nsystemd-analyze critical-chain\n
          "},{"location":"systemd/#view-a-time-chart-of-the-boot-sequence","title":"View a time-chart of the boot sequence","text":"
          sudo systemd-analyze plot > systemd.svg\n
          "},{"location":"systemd/#show-cgroup-contexts","title":"Show cgroup contexts","text":"
          systemd-cgls\n
          "},{"location":"systemd/#show-top-control-groups-by-their-resource-usage","title":"Show top control groups by their resource usage","text":"
          systemd-cgtop\n
          "},{"location":"systemd/#detect-if-you-are-running-in-a-container-or-on-a-vm","title":"Detect if you are running in a container or on a VM","text":"
          systemd-detect-virt\n
          "},{"location":"tar/","title":"tar","text":"

          \"GNU 'tar' saves many files together into a single tape or disk archive, and can restore individual files from the archive.\" - man tar on linux

          \"tar - manipulate tape archives\" - man tar on macOS

          "},{"location":"tar/#examples","title":"Examples","text":""},{"location":"tar/#deal-with-leading-slash-quirks","title":"Deal with leading slash quirks","text":"

          Some tar archives have a leading ./ stored for every filename:

          $ curl -fsSL https://github.com/vectordotdev/vector/releases/download/v0.20.1/vector-0.20.1-x86_64-unknown-linux-gnu.tar.gz | tar -tzf -\n./vector-x86_64-unknown-linux-gnu/\n./vector-x86_64-unknown-linux-gnu/README.md\n./vector-x86_64-unknown-linux-gnu/bin/\n./vector-x86_64-unknown-linux-gnu/bin/vector\n

          and some do not

          $ curl -fsSL \"https://get.helm.sh/helm-v3.8.2-linux-amd64.tar.gz\" | tar -tzf -\nlinux-amd64/\nlinux-amd64/helm\nlinux-amd64/LICENSE\nlinux-amd64/README.md\n

          This alters the syntax when you want to extract a single file. You need to give the exact filename seen in tar -t to extract a single file. If you want to never have to deal with that leading ./, you can add --no-anchored

          danielh@cs-462709900404-default:~/temp/2022-04-20$ curl -fsSL https://github.com/vectordotdev/vector/releases/download/v0.20.1/vector-0.20.1-x86_64-unknown-linux-gnu.tar.gz | tar -tzf - vector-x86_64-unknown-linux-gnu/bin/\ntar: vector-x86_64-unknown-linux-gnu/bin: Not found in archive\ntar: Exiting with failure status due to previous errors\ndanielh@cs-462709900404-default:~/temp/2022-04-20$ curl -fsSL https://github.com/vectordotdev/vector/releases/download/v0.20.1/vector-0.20.1-x86_64-unknown-linux-gnu.tar.gz | tar -tzf - --no-anchored vector-x86_64-unknown-linux-gnu/bin/\n./vector-x86_64-unknown-linux-gnu/bin/\n./vector-x86_64-unknown-linux-gnu/bin/vector\n
          "},{"location":"tcl/","title":"TCL","text":"

          \"TCL Corporation is a Chinese multinational electronics company headquartered in Huizhou, Guangdong Province.\" - https://en.wikipedia.org/wiki/Tcl

          "},{"location":"tcl/#tcl-65c807","title":"TCL 65C807","text":"
          • https://www.tclusa.com/products/home-theater/c-series/tcl-65-class-c-series-4k-uhd-hdr-roku-smart-tv-65c807
          Attribute Value Product Size (WxHxD) with Stand 57.2\" x 36.0\" x 12.8\" Product Size (WxHxD) without Stand 57.2\" x 34.4\" x 2.3\" Stand Separation Distance 55.6\" Product Weight with Stand (lbs) 53 lbs Product Weight without stand (lbs) 52 lbs"},{"location":"tcpdump/","title":"tcpdump","text":"

          Network sniffing tool.

          "},{"location":"tcpdump/#syntax-examples","title":"Syntax Examples","text":""},{"location":"tcpdump/#capture-packets-to-and-from-an-ip-address","title":"Capture packets to and from an IP address","text":"

          Captures all data that includes 1.2.3.4 as the source or destination address, but no other traffic.

          tcpdump host 1.2.3.4\n
          "},{"location":"tcpdump/#capture-traffic-that-contains-a-given-mac-address","title":"Capture traffic that contains a given mac address","text":"

          writes capfile.cap containing all traffic to or from the specified mac address on the network attached to eth1

          tcpdump -w capfile.cap -i eth1 ether host 00:03:fa:46:2c:08\n
          "},{"location":"tcpdump/#filter-packets-from-an-existing-capture","title":"Filter packets from an existing capture","text":"

          Filters port 53 packets out of the old capfile into the new

          tcpdump -r oldcapfile.cap -w newcapfile.cap port 53\n
          "},{"location":"tcpdump/#capture-all-pop3-traffic-and-all-traffic-from-a-particular-host","title":"Capture all pop3 traffic and all traffic from a particular host","text":"

          Captures all pop3 traffic and all traffic to or from the specified host on the first interface of a Mac OS X computer

          tcpdump -w foo.cap -i en0 ether host 00:03:9a:28:44:01 or port 110\n
          "},{"location":"tcpdump/#capture-all-traffic-not-a-mac-address","title":"Capture all traffic not a mac address","text":"

          Captures all traffic not from the host 00:1b:63:ce:83:2e, useful for filtering out your own traffic.

          tcpdump -i en1 not ether src 00:1b:63:ce:83:2e\n
          "},{"location":"tcpdump/#capture-lldp-traffic","title":"Capture LLDP traffic","text":"

          This matches 2 bytes starting at the 12th byte against 88cc

          tcpdump -v -s 1500 -c 1 '(ether[12:2]=0x88cc)'\n
          "},{"location":"tcpdump/#capture-syn-packets","title":"Capture SYN packets","text":"
          tcpdump -n 'tcp[13] & 2!=0'\n
          "},{"location":"tcpdump/#capture-synack-packets","title":"Capture SYN/ACK packets","text":"
          tcpdump -n 'tcp[13]=18'\n

          Or another way

          tcpdump 'tcp[tcpflags] && tcp-syn != 0'\n

          Or capture all SYN packets going only to two ethernet destinations:

          tcpdump 'tcp[13] & 2!=0 && (ether dst 00:22:64:f4:d0:70 or ether dst 00:22:64:f4:d0:6e)'\n
          "},{"location":"tcpdump/#write-capture-to-file-and-replay-it-at-the-same-time","title":"Write capture to file and replay it at the same time","text":"
          sudo tcpdump -n 'host 216.200.102.84' -s 1500 -l -w - | tee logcopy.pcap | tcpdump -r -\n
          "},{"location":"tcpdump/#write-a-circular-buffer-of-traffic","title":"Write a circular buffer of traffic","text":"

          This will write 5 files 1 mb each and loop through them as the destination for writing traffic. That is, the filenames do not indicate chronology. The files will be named foo.cap[0-4]

          sudo tcpdump -C 1 -W 5 -w foo.cap\n
          "},{"location":"tcpdump/#show-how-many-bytes-were-captured-in-a-cap-file","title":"Show how many bytes were captured in a cap file","text":"

          This prints out some stats about captured packets, then adds up all the bytes. The size is from layer 3 up, so it excludes ethernet frame data.

          tcpdump -q -n -r hillary-clintons-email.cap | awk '{sum+=$NF} END {print sum}'\n
          "},{"location":"tcpdump/#print-out-a-list-of-observed-src-ip-addresses-every-5-seconds","title":"Print out a list of observed src ip addresses every 5 seconds","text":"

          This is limited to 192.168.1 matches

          while true ; do\n  date '+%F %T%z'\n  sudo timeout 5 tcpdump -n 2>/dev/null |\n  awk '$3 ~ /10.8/ {\n    print gensub(/([0-9]*\\.[0-9]*\\.[0-9]*\\.[0-9]*)(\\.[0-9]*)?/, \"ip address: \\\\1\", \"g\", $3) ;\n  }' |\n  sort -t. -k4n |\n  uniq -c\ndone\n

          You can reassemble these files chronologically with mergecap -w merged.cap foo.cap*

          "},{"location":"tcpdump/#show-wpa-4-way-handshakes","title":"Show WPA 4-way handshakes","text":"
          tcpdump -n -i en0 \"ether proto 0x888e\"\n
          "},{"location":"tcpdump/#links","title":"Links","text":"
          • http://www.danielmiessler.com/study/tcpdump/
          • https://github.com/mozillazg/ptcpdump: \"Process-aware, eBPF-based tcpdump\" that can sniff k8s namespaces, pods, containers, etc..
          "},{"location":"tcpflow/","title":"tcpflow","text":"

          tcpflow uses the tcpdump libraries to reconstruct full TCP streams. It uses much of tcpdump syntax.

          "},{"location":"tcpflow/#examples","title":"Examples","text":""},{"location":"tcpflow/#sniff-http-into-the-console","title":"Sniff HTTP into the console","text":"
          sudo tcpflow -c port 80\n
          "},{"location":"tcpflow/#see-also","title":"See Also","text":"
          • ngrep
          • tcpdump
          "},{"location":"terminal-emulator/","title":"Terminal Emulator","text":""},{"location":"terminal-emulator/#comparison","title":"Comparison","text":"

          Tracking things I care about in a some terminal emulators I use.

          iTerm2 kitty alacritty cool-retro-term Select a rectangle cmd-opt N ctrl-opt N Tabs cmd-t N N N Panes cmd-d / cmd-shift-d N N N Broadcast input opt-cmd-i N N N Speed medium fast fast slow Easily export prefs Decent Y Y N"},{"location":"terminal-emulator/#links","title":"Links","text":"
          • https://en.wikipedia.org/wiki/List_of_terminal_emulators
          • https://sw.kovidgoyal.net/kitty/performance: Performance comparison of several popular terminal emulators comopared to kitty
          • https://jvns.ca/blog/2024/10/01/terminal-colours: \"Terminal colours are tricky\"
          "},{"location":"terraform/","title":"Terraform","text":"

          \"Write, Plan, and Create Infrastructure as Code\" - https://www.terraform.io/

          "},{"location":"terraform/#links","title":"Links","text":"
          • https://www.terraform.io/docs/providers/github/index.html
          • https://www.terraform.io/docs/providers/gitlab/index.html
          • https://github.com/28mm/blast-radius - Blast Radius is a tool for reasoning about Terraform dependency graphs with interactive visualizations.
          • https://terragrunt.gruntwork.io - \"Terragrunt is a thin wrapper that provides extra tools for keeping your configurations DRY, working with multiple Terraform modules, and managing remote state.\"
          "},{"location":"terraform/#examples","title":"Examples","text":""},{"location":"terraform/#generate-a-graph-of-module-dependencies","title":"Generate a graph of module dependencies","text":"
          terraform init # must succeed\nterraform graph | dot -Tsvg > graph.svg\n
          "},{"location":"time/","title":"time","text":"

          Notes about time technologies.

          "},{"location":"time/#iso-8601","title":"ISO 8601","text":"

          ISO 8601 Data elements and interchange formats - Information interchange - Representation of dates and times is an international standard covering the exchange of date and time-related data.

          "},{"location":"time/#iso-8601-format-examples","title":"ISO 8601 format examples","text":"

          See the ISO 8601 wikipedia page for many examples. Much of the content in this section was taken from that article.

          There are a ton of examples on this page: https://ijmacd.github.io/rfc3339-iso8601

          One notable syntax is that the letter T should always precede times. This aids in parsing, and distinguishes between month and minute, which are both shortened to M.

          Another notable syntax is the use of Z to mean a timezone offset of 0 hours, or GMT.

          "},{"location":"time/#single-points-in-time","title":"Single points in time","text":"
          $ for fmt in date hours minutes seconds ns  ; do\n    bash -x -c \"\n      TZ=$(\n        awk '$1 !~ /^#/ {print $3}' /usr/share/zoneinfo/zone.tab |\n        sort -R |\n        head -n 1\n      ) \\\n      date --iso-8601=${fmt}\n    \" ;\n  done ;\n+ TZ=America/Paramaribo\n+ date --iso-8601=date\n2016-08-09\n+ TZ=Africa/Dakar\n+ date --iso-8601=hours\n2016-08-09T21+00:00\n+ TZ=Indian/Kerguelen\n+ date --iso-8601=minutes\n2016-08-10T02:58+05:00\n+ TZ=Pacific/Saipan\n+ date -Iseconds\n2016-08-10T07:58:48+10:00\n+ TZ=Pacific/Midway\n+ date --iso-8601=ns\n2016-08-09T10:58:48,503878101-11:00\n
          • Week: 2016-W32
          • Date with week number: 2016-W32-2
          • Month and day without year: -12-31
          "},{"location":"time/#durations-or-ranges-of-time","title":"Durations, or ranges of time","text":"

          Durations are a component of time intervals and define the amount of intervening time in a time interval.

          "},{"location":"time/#examples","title":"Examples","text":"
          • P10Y - a duration of ten years.
          • P5DT12H - a duration of five days and twelve hours.
          • P3Y6M4DT12H30M5S - a duration of three years, six months, four days, twelve hours, thirty minutes, and five seconds.
          • P1M - one month.
          • PT1M - one minute.
          "},{"location":"time/#time-intervals","title":"Time intervals","text":"

          A time interval is the intervening time between two time points. There are four ways to express a time interval:

          • Start and end, such as 2007-03-01T13:00:00Z/2008-05-11T15:30:00Z
          • Start and duration, such as 2007-03-01T13:00:00Z/P1Y2M10DT2H30M
          • Duration and end, such as P1Y2M10DT2H30M/2008-05-11T15:30:00Z
          • Duration only, such as P1Y2M10DT2H30M, with additional context information
          "},{"location":"time/#repeating-intervals","title":"Repeating intervals","text":"

          Repeating intervals are formed by adding R[n]/ to the beginning of an interval expression. Such as R5/2007-03-01T13:00:00Z/2008-05-11T15:30:00Z. The n can be omitted if the interval should repeat forever.

          "},{"location":"time/#rfc-3339","title":"RFC 3339","text":"

          RFC 3339 is considered a profile of ISO 8601. It defines a profile of ISO 8601 for use in Internet protocols and standards. It explicitly excludes durations and dates before the common era. The more complex formats such as week numbers and ordinal days are not permitted.

          • https://tools.ietf.org/html/rfc3339
          "},{"location":"time/#leap-seconds","title":"Leap Seconds","text":"

          \"A leap second is a one-second adjustment that is occasionally applied to Coordinated Universal Time (UTC) in order to keep its time of day close to the mean solar time, or UT1.\" - https://en.wikipedia.org/wiki/Leap_second

          Leap seconds are scheduled by the international earth rotation and reference systems service (See also: https://en.wikipedia.org/wiki/International_Earth_Rotation_and_Reference_Systems_Service) Leap seconds cause a variety of problems in computer systems, and complicate time tracking in general.

          "},{"location":"time/#public-time-server-handling-of-leap-seconds","title":"Public time server handling of leap seconds","text":"
          • https://developers.google.com/time/: Google time servers do leap second smearing
          • https://aws.amazon.com/about-aws/whats-new/2017/11/introducing-the-amazon-time-sync-service: AWS time servers do leap second smearing
          • https://docs.ntpsec.org/latest/leapsmear.html: ntp.org servers do not leap smear: \"Leap Second Smearing MUST NOT be used for public servers, e.g. servers provided by metrology institutes, or servers participating in the NTP pool project.\"
          "},{"location":"time/#leap-second-links","title":"Leap Second Links","text":"
          • https://www.nature.com/articles/d41586-022-03783-5: \"The leap second\u2019s time is up: world votes to stop pausing clocks\"
          • https://datacenter.iers.org/data/latestVersion/bulletinC.txt: When is the next leap second?
          • https://access.redhat.com/articles/15145: Resolve Leap Second Issues in Red Hat Enterprise Linux 4-8
          • https://developers.google.com/time/smear: Google Public NTP: Leap Smear
          • https://developers.redhat.com/blog/2015/06/01/five-different-ways-handle-leap-seconds-ntp/: Five different ways to handle leap seconds with NTP
          • http://www.madore.org/~david/computers/unix-leap-seconds.html: The Unix leap second mess
          • http://www.ntp.org/ntpfaq/NTP-s-algo-real.htm#AEN2499: ntp.org FAQ: What happens during a Leap Second?
          "},{"location":"time/#code-snips-and-examples","title":"Code snips and examples","text":""},{"location":"time/#quick-and-dirty-time-sync-in-linux-for-when-ntp-is-blocked","title":"Quick and dirty time sync in Linux for when NTP is blocked.","text":"
          date -s $(curl -s -D - google.com | sed '/Date:/s/.*Date: //p ; d')\n
          "},{"location":"time/#links","title":"Links","text":""},{"location":"time/#reading","title":"Reading","text":"
          • https://en.wikipedia.org/wiki/ISO_8601: \"ISO 8601 is an international standard covering the worldwide exchange and communication of date and time related data.\"
          • https://tools.ietf.org/html/rfc3339: Date and Time on the Internet: Timestamps - RFC 3339
          • https://www.gnu.org/software/coreutils/manual/html_node/Examples-of-date.html: Examples of date (GNU)
          • https://man7.org/linux/man-pages/man1/date.1.html: man date (linux)
          • https://man.freebsd.org/cgi/man.cgi?date: man date (freebsd)
          • https://infiniteundo.com/post/25326999628/falsehoods-programmers-believe-about-time: Falsehoods programmers believe about time
          • https://infiniteundo.com/post/25509354022/more-falsehoods-programmers-believe-about-time: More falsehoods programmers believe about time; \"wisdom of the crowd\" edition
          • https://www.iers.org: International Earth Rotation and Reference Systems Service
          • https://ijmacd.github.io/rfc3339-iso8601/: RFC 3339 vs ISO 8601 format visualizer
          • https://blog.healthchecks.io/2021/10/how-debian-cron-handles-dst-transitions/
          • https://everytimezone.com: Tool for coordinating across many time zones. Great for coordinating shift coverage across geographically distributed teams.
          • http://timesched.pocoo.org: A simpler tool for coordinating across many time zones.
          • https://blog.poormansmath.net/the-time-it-takes-to-change-the-time: Contains a map showing the difference in solar time and local time.
          "},{"location":"time/#videos","title":"Videos","text":"
          • https://youtu.be/-5wpm-gesOY: The Problem with Time & Timezones - Computerphile
          "},{"location":"time/#see-also","title":"See Also","text":"
          • ntp - Network Time Protocol
          • ptp - Precision Time Protocol
          "},{"location":"tls/","title":"tls","text":"

          TLS is Transport Layer Security. It used to be called SSL: the Secure Sockets Layer. It has to do with encrypted IP traffic.

          "},{"location":"tls/#apache-ssl-steps","title":"Apache SSL steps","text":"
          1. Generate a host key: openssl genrsa -out foo.com.key 2048
          2. Generate a CSR from that key: openssl req -new -key foo.com.key -out foo.com.csr

          To set up VirtualHosts, follow this template: https://cwiki.apache.org/confluence/display/HTTPD/NameBasedSSLVHosts

          "},{"location":"tls/#examples","title":"Examples","text":""},{"location":"tls/#download-a-certificate-from-an-https-server","title":"Download a certificate from an https server","text":"
          get_certificate_from_server() {\n  hostname=\"$1\"\n  port=\"${2:-443}\"\n  ip_address=\"$(dig +short \"$hostname\")\"\n  echo |\n    openssl s_client -servername \"$hostname\" -connect \"${ip_address}:${port}\" 2>/dev/null |\n    sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p'\n}\n\nget_certificate_from_server github.com\n
          "},{"location":"tls/#show-info-about-a-certificate-file","title":"Show info about a certificate file","text":"
          openssl x509 -noout -text -in foo.pem\n
          "},{"location":"tls/#validate-a-keys-cert-pair","title":"Validate a keys / cert pair","text":"

          To validate that a particular key was used to generate a certificate, useful for testing https key/crt files, do the following and make sure the modulus sections match:

          openssl rsa  -noout -modulus -in server.key\nopenssl x509 -noout -modulus -in server.crt # or server.pem\n

          Or as a function:

          function crt-key-compare {\n  if [ ! -f \"$1\" ] || [ ! -f \"$2\" ] ; then\n    echo \"ERROR: check that both files exist.\"\n    return 1\n  fi\n\n  if [[ \"$1\" != *crt* ]] || [[ \"$2\" != *key* ]] ; then\n    echo \"usage: crt-key-compare <server.crt> <server.key>\" ;\n  else\n    crt_modulus=$(openssl x509 -in \"$1\" -modulus | grep Modulus) || return 1\n    key_modulus=$(openssl rsa  -in \"$2\" -modulus | grep Modulus) || return 1\n\n    if diff <(echo \"$crt_modulus\") <(echo \"$key_modulus\") ; then\n      echo \"key and crt match.\"\n    else\n      echo \"key and crt do not match\"\n    fi\n  fi\n}\n
          "},{"location":"tls/#see-some-information-about-a-servers-certificate","title":"See some information about a server's certificate","text":"
          SERVER_NAME=linuxforums.org\nSERVER_PORT=443\nopenssl s_client -connect \"${SERVER_NAME}:${SERVER_PORT:-443}\" -servername \"${SERVER_NAME}\n
          "},{"location":"tls/#see-just-the-dates-of-a-webservers-ssl-certificate","title":"See just the dates of a webserver's SSL certificate","text":"
          check-server-cert-dates() {\n    test -z \"$1\" && { echo \"Usage: check-server-cert-dates <servername> [port]\" ; return 1 ; }\n    openssl s_client -connect \"${1}:${2:-443}\" -servername \"$1\" 2>/dev/null </dev/null |\n    openssl x509 -noout -dates\n}\n
          "},{"location":"tls/#show-the-issuer-and-dates-of-a-certificate","title":"Show the issuer and dates of a certificate","text":"

          This is useful when you're moving certs between issuers, for instance if you moved from letsencrypt to something else, then later get an expiration notice from letsencrypt, and want to verify that you're not using that certificate anymore:

          openssl s_client -connect \"${REMOTE_HOST}:443\" -servername \"$REMOTE_HOST\" 2>/dev/null </dev/null |\nopenssl x509 -noout -issuer -dates\n

          The output will be something like:

          issuer= /C=US/O=DigiCert Inc/CN=DigiCert TLS Hybrid ECC SHA384 2020 CA1\nnotBefore=Feb 18 00:00:00 2022 GMT\nnotAfter=Oct  5 23:59:59 2022 GMT\n
          "},{"location":"tls/#encrypt-a-file","title":"Encrypt a file","text":"
          openssl enc -aes-256-cbc -salt -in yourfile -out yourfile.enc\n
          "},{"location":"tls/#decrypt-a-file","title":"Decrypt a file","text":"
          openssl enc -aes-256-cbc -d -in encryptedfile.enc -out decryptedfile\n
          "},{"location":"tls/#encrypt-decrypt-bash-functions","title":"Encrypt / Decrypt bash functions","text":"
          function encrypt_file() { openssl enc -aes-256-cbc -salt -in \"$1\" -out \"$1.enc\" ; }\nfunction decrypt_file() { openssl enc -aes-256-cbc -d -in \"$1\" -out \"$1.dec\" ; }\n
          "},{"location":"tls/#perform-a-benchmark","title":"Perform a benchmark","text":"

          You can run benchmarks on one or more ciphers or digests using openssl speed.

          openssl speed -seconds 5 -evp sha256 sha512\n
          "},{"location":"tls/#generate-random-data","title":"Generate random data","text":"

          openssl can generate pseudo random data faster than /dev/urandom. This generates a 1 megabyte random data file at over 3x the speed of using /dev/urandom.

          openssl enc -aes-256-ctr -pbkdf2 -pass pass:\"foo\" < /dev/zero | dd bs=1024 count=1024 of=1_megabyte_random.dat\n
          "},{"location":"tls/#see-also","title":"See Also","text":"
          • https://datatracker.ietf.org/doc/html/rfc5246 - The Transport Layer Security (TLS) Protocol Version 1.2
          • https://github.com/cloudflare/cfssl - CFSSL: Cloudflare's PKI and TLS toolkit https://cfssl.org
          • https://tls12.xargs.org - Every byte of a TLS connection explained and reproduced
          • https://github.com/FiloSottile/mkcert - simple local dev Certificate Authority
          • https://www.tutorialsteacher.com/https/ssl-certificate-format - Good overview of certificate formats, including diagrams
          • https://shkspr.mobi/blog/2022/01/should-you-use-lets-encrypt-for-internal-hostnames
          • https://badssl.com: Hosting a variety of ssl failure modes, this site is a great resource when writing code that needs to handle all of these different scenarios.
          "},{"location":"tmux/","title":"tmux","text":"

          \"tmux is a terminal multiplexer: it enables a number of terminals to be created, accessed, and controlled from a single screen.\" - man tmux

          You can think of it as a replacement for GNU Screen that is more modern.

          "},{"location":"tmux/#examples","title":"Examples","text":""},{"location":"tmux/#attach-to-a-new-session-or-open-a-new-session-if-there-is-none","title":"Attach to a new session, or open a new session if there is none","text":"

          Use this shell alias:

          alias t=\"tmux attach || tmux new-session\"\n
          "},{"location":"tmux/#open-several-named-tabs","title":"Open several named tabs","text":"

          This example opens named tabs for several hosts and connects to them via ssh. I use a shell script called ssh-stay-connected which attempts to reconnect to any lost session, otherwise the tab will be closed when the ssh command terminates:

          for node in n1 n2 n3 ; do tmux new-window -n \"$node\" ssh-stay-connected \"$node\" ; done ;\n
          "},{"location":"tmux/#links","title":"Links","text":"
          • https://github.com/tmux/tmux/wiki
          • https://github.com/rothgar/awesome-tmux
          "},{"location":"top-variant-list/","title":"Top Variant List","text":"

          The top interface is a common pattern in the CLI tool world. Here are some top style tools.

          "},{"location":"top-variant-list/#top-style-tools","title":"top style tools","text":"
          • atop: Linux top tool that catches short-lived processes. Written in C.
          • bottom: \"Yet another cross-platform graphical process/system monitor.\" Written in rust.
          • btop: \"Resource monitor that shows usage and stats for processor, memory, disks, network and processes.\" Written in C++.
          • glances: \"Glances is an open-source system cross-platform monitoring tool. It allows real-time monitoring of various aspects of your system such as CPU, memory, disk, network usage etc. It also allows monitoring of running processes, logged in users, temperatures, voltages, fan speeds etc.\" Written in python.
          • htop: \"htop is a cross-platform interactive process viewer.\" Written in in C.
          • iftop: Top for network interfaces. Hasn't been updated since 2014 (as of 2024-02-05)
          • innotop: \"A realtime terminal-based top-like monitor for MySQL\" Written in perl.
          • iotop: \"A top utility for IO\" Written in C.
          • nethogs: Network top that shows usage by pid.
          • ngxtop: top like view of nginx logs.
          • ntop: Top for networking.
          • nvtop: \"Nvtop stands for Neat Videocard TOP, a (h)top like task monitor for AMD, Intel and NVIDIA GPUs.\"
          • powertop: Top for power usage.
          • top: The original.
          "},{"location":"top/","title":"top","text":"

          top is a standard unix CLI tool to show running processes. This is one of those tools that behaves differently on BSD (macOS) and Linux, and may behave different on other unixes.

          "},{"location":"top/#gnu-top-usage","title":"GNU top Usage","text":"
          • Change the number of displayed processes with n
          • Display all CPUs with 1
          • Kill a process with k
          • Renice a process with r
          • Save current display as default in ~/.toprc with W
          • Show or hide idle processes with i
          • Sort output with O
          "},{"location":"top/#bsd-top-usage","title":"BSD top Usage","text":""},{"location":"top/#start-top-sorted-by-cpu","title":"Start top sorted by cpu","text":"
          top -u\n
          "},{"location":"top/#see-also","title":"See also","text":"
          • Top variant list
          "},{"location":"touch/","title":"touch","text":"

          touch is a command to modify the date of filesystem metadata for a given file. By default it updates the create date of the given files.

          "},{"location":"touch/#examples","title":"Examples","text":""},{"location":"touch/#create-an-empty-file","title":"Create an empty file","text":"
          touch somefile\n
          "},{"location":"touch/#update-a-files-modify-time-to-a-specific-date","title":"Update a file's modify time to a specific date","text":"
          • -m to set modify time
          • -t to specify a timestamp in the format [[CC]YY]MMDDhhmm[.SS]
          touch -m -t 199902030405.06 1999-02-03-04-05-06.gpx\n
          "},{"location":"touch/#update-access-time-on-several-files","title":"Update access time on several files","text":"

          Not all filesystems support access time.

          touch -a -t 199509150102 GARBAGE/FILE\\ [1234]\n
          "},{"location":"touch/#randomize-the-mtime-for-a-given-file","title":"Randomize the mtime for a given file","text":"

          bash's random only goes up to 32767, which is about 9 hours in 1-second increments. With RANDOM * 32767 + RANDOM we can get this up to just over 34 years.

          randomize-mtime() {\n  seconds=\"$(( $(date +%s) - $(( RANDOM * 32767 )) - RANDOM))\"\n  new_mtime=\"$(gdate -d @\"${seconds}\" \"+%Y%m%d%H%M.%S\")\"\n  echo \"${new_mtime} $*\" 1>&2\n  touch -m -t \"${new_mtime}\" \"$@\"\n}\n\n## change mtime of all files to the same random mtime\nrandomize-mtime test-foo{1..3} ;\n\n## change mtime of each file to a different random mtime\nfor F in test-bar{1..3} ; do\n  randomize-mtime \"$F\"\ndone\n
          "},{"location":"tr/","title":"tr","text":"

          \"The tr utility copies the standard input to the standard output with substitution or deletion of selected characters.\" - man tr

          "},{"location":"tr/#examples","title":"Examples","text":"

          Interestingly, tr does not have any features to operate on files. It operates only on stdin. To use it on files you must use input redirection like tr .... < filename.txt or pipes like cat filename.txt | tr ...

          "},{"location":"tr/#replace-all-non-letters-with-a-carriage-return","title":"Replace all non-letters with a carriage return","text":"

          -s shrinks adjacent matches into a single match.

          $ echo abcdefghijklmnopqrstuvwxyz | tr g-t '_'\nabcdef______________uvwxyz\n$ echo abcdefghijklmnopqrstuvwxyz | tr -c g-t '_'\n______ghijklmnopqrst_______\n$ echo abcdefghijklmnopqrstuvwxyz | tr -s g-t '_'\nabcdef_uvwxyz\n$ echo abcdefghijklmnopqrstuvwxyz | tr -cs g-t '_'\n_ghijklmnopqrst_$\n

          In Doug McIlroy's critique of Donald Knuth's unique word count finder, tr was used twice. Here's a somewhat modified version:

          $ man tr | tr -cs A-Za-z '\\n' | tr A-Z a-z | sort | uniq -c | sort -rn | head\n  96 the\n  45 in\n  44 characters\n  38 string\n  30 of\n  29 a\n  25 to\n  23 tr\n  22 character\n  21 is\n
          "},{"location":"tr/#see-also","title":"See also","text":"
          • tr is often used with [cut](cut.md), though I prefer [awk](awk.md) most of the time.
          "},{"location":"ubuntu/","title":"Ubuntu Linux","text":"

          \"Ubuntu is an open source software operating system that runs from the desktop, to the cloud, to all your internet connected things.\" - https://www.ubuntu.com

          I used Ubuntu for a long time but finally switched back to Debian because of snapd, so this doc is mostly historical.

          "},{"location":"ubuntu/#versions-overview","title":"Versions overview","text":"Codename Version EOS Kernel Python bash zfs Jammy 22.04 LTS 2027-04 5.15 3.10 5.1 2.1.5 Impish 21.10 2022-07 5.13 3.9.7 5.1.8 2.0.6 Focal 20.04 LTS 2025-04 5.4 3.8 5.0.17 0.8.3 Eoan 19.10 2010-07 5.3 2.7.17, 3.7 5.0.3 0.8.1 Disco 19.04 2020-01 5.0 2.7.16, 3.7 5.0.3 0.7.12 Bionic 18.04 LTS 2023-04 4.15 2.7.15, 3.6 4.4.20 0.7.5 Xenial 16.04 LTS 2021-04 4.4 2.7.12, 3.5 4.3.46 0.6.5 Trusty 14.04 LTS 2019-04 3.13 2.7.6 4.3.11"},{"location":"ubuntu/#links","title":"Links","text":"
          • Ubuntu Kernel Support and Schedules
          • List of releases
          • Table of Versions
          • https://endoflife.date/ubuntu
          "},{"location":"udev/","title":"udev","text":"

          \"udev supplies the system software with device events, manages permissions of device nodes and may create additional symlinks in the /dev directory, or renames network interfaces.\" - man udev

          "},{"location":"udev/#tips","title":"Tips","text":"
          • Default rules are in /lib/udev/rules.d
          "},{"location":"udev/#monitor-udev-events-in-real-time","title":"Monitor udev events in real time","text":"
          udevadm monitor\n
          "},{"location":"udev/#view-udev-environment-for-a-given-disk","title":"View udev environment for a given disk","text":"

          This is helpful when writing udev rules.

          udevadm info /dev/sda\n
          "},{"location":"udev/#view-human-readable-information-about-a-disk","title":"View human readable information about a disk","text":"
          udisksctl info -b /dev/sda\n
          "},{"location":"udev/#mount-namespace-problems","title":"Mount namespace problems","text":"

          In Ubuntu and probably other linuxes udevd runs in its own mount namespace. This means if you mount things using udev rules, by default they will be in an isolated namespace where users and other processes cannot access them. You can view this with:

          root@bionic:~# lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1\n\nroot@bionic:~# nsenter --all -t $(pgrep systemd-udevd) lsblk -o NAME,MOUNTPOINT /dev/sdc\nNAME   MOUNTPOINT\nsdc\n\u2514\u2500sdc1 /mnt/adea64ca-e340-4961-8a4d-75d8a5970664\n

          To solve this, run systemctl edit systemd-udevd and input the following config:

          [Service]\nMountFlags=shared\n

          Then run systemctl daemon-reload ; service systemd-udevd --full-restart ; See man 7 mount_namespaces for more info.

          "},{"location":"udev/#trigger-a-disk-add-event-for-a-disk-that-has-already-been-inserted","title":"Trigger a disk add event for a disk that has already been inserted","text":"
          udevadm trigger -t devices --name /dev/sda1 --action=add\n
          "},{"location":"udev/#list-builtin-modules","title":"List builtin modules","text":"
          udevadm test-builtin --help\n
          "},{"location":"udev/#links","title":"Links","text":"
          • http://www.reactivated.net/writing_udev_rules.html
          "},{"location":"upstart/","title":"Upstart","text":"

          \"Upstart is an event-based replacement for the /sbin/init daemon which handles starting of tasks and services during boot, stopping them during shutdown and supervising them while the system is running.\" - http://upstart.ubuntu.com

          "},{"location":"upstart/#examples","title":"Examples","text":""},{"location":"upstart/#start-multiple-instances-of-the-same-services","title":"Start multiple instances of the same services","text":"

          http://upstart.ubuntu.com/cookbook/#instance

          "},{"location":"upstart/#master","title":"Master","text":"
          start on runlevel [2345]\nstop on runlevel [^2345]\nrespawn\n\nenv job_count=6\n\npre-start script\n  for i in $(seq -w 1 ${job_count}) ; do\n    start photoworker N=${i}\n  done\nend script\n\npost-stop script\n  for i in $(seq -w 1 ${job_count}) ; do\n    stop photoworker N=${i}\n  done\nend script\n\n
          "},{"location":"upstart/#child","title":"Child","text":"
          respawn\nrespawn limit 10 5\ninstance $N\nenv logfile=\"/var/log/worker_photoworker.log\"\nchdir /srv/photoworkers/current/web/services/jobworkers\n\npre-start exec bash -c \"echo $(date --rfc-3339=seconds) beginning worker run >> ${logfile}\"\nexec su -s /bin/sh -c 'exec \"$0\" \"$@\"' php-worker -- php photoworker.php >> ${logfile} 2>&1\npost-stop exec bash -c \"echo $(date --rfc-3339=seconds) ended worker run >> ${logfile}\"\n
          "},{"location":"upstart/#redirect-all-output-of-an-upstart-script-to-syslog","title":"Redirect all output of an upstart script to syslog","text":"

          Found at http://serverfault.com/questions/114052/logging-a-daemons-output-with-upstart

          script\n  FIFO=fifo.temp\n  mkfifo $FIFO\n\n  ( logger -t myservice <$FIFO & )\n\n  exec > $FIFO\n  rm $FIFO\n\n  exec /usr/local/bin/myservice 2>&1\nend script\n
          "},{"location":"upstart/#links","title":"Links","text":"
          • http://upstart.ubuntu.com/cookbook
          "},{"location":"utm/","title":"utm","text":"

          \"UTM employs Apple's Hypervisor virtualization framework to run ARM64 operating systems on Apple Silicon at near native speeds. On Intel Macs, x86/x64 operating system can be virtualized. In addition, lower performance emulation is available to run x86/x64 on Apple Silicon as well as ARM64 on Intel.\" - https://mac.getutm.app

          "},{"location":"vagrant/","title":"vagrant","text":"

          \"Vagrant is a tool for building and managing virtual machine environments in a single workflow.\" - https://www.vagrantup.com/intro/index.html

          "},{"location":"vagrant/#links","title":"Links","text":"
          • https://www.vagrantup.com
          • https://docs.vagrantup.com/v2
          • https://atlas.hashicorp.com
          • Examples: https://github.com/patrickdlee/vagrant-examples
          "},{"location":"vagrant/#os-x-shell-tweaks","title":"OS X Shell tweaks","text":"
          brew tap homebrew/completions\nbrew install vagrant-completion\n

          Then in .bash_profile:

          if [ -f $(brew --prefix)/etc/bash_completion ]; then\n  . $(brew --prefix)/etc/bash_completion\nfi\n
          "},{"location":"vagrant/#plugins","title":"Plugins","text":"
          vagrant plugin install vagrant-vbguest\nvagrant plugin install vagrant-hosts\n
          "},{"location":"vagrant/#usage-examples","title":"Usage Examples","text":""},{"location":"vagrant/#list-which-boxes-you-have-stored-locally","title":"List which boxes you have stored locally","text":"
          vagrant box List\n
          "},{"location":"vagrant/#remove-an-old-version-of-a-vagrant-box","title":"Remove an old version of a vagrant box","text":"
          vagrant box remove ubuntu/trusty64 --box-version 20151201.0.0\n
          "},{"location":"vagrant/#script-box-updates","title":"Script box updates","text":"

          This may fail in some circumstances, I haven't tested it exhaustively.

          vagrant box outdated --machine-readable --global |\n  awk -F, '$4 == \"warn\" {print $5; exit 1}' |\n  awk -F\"'\" '{print $2}' |\n  xargs -n1 vagrant box update --box\n
          "},{"location":"vagrant/#show-status-of-all-running-vagrant-boxes-not-just-the-one-in-the-cwd","title":"Show status of all running Vagrant boxes, not just the one in the CWD","text":"
          vagrant global-status\n
          "},{"location":"vector/","title":"vector","text":"

          \"A lightweight, ultra-fast tool for building observability pipelines\" - https://vector.dev

          You can think of vector as being an replacement for fluentd or fluentbit. It is great for reading inputs, transforming those inputs, and sending those inputs elsewhere. EG: for reading logs and shipping them.

          "},{"location":"vector/#links","title":"Links","text":"
          • https://github.com/vectordotdev/vector
          • https://vector.dev/docs/reference
          "},{"location":"vector/#examples","title":"Examples","text":""},{"location":"vector/#show-the-supported-sources-transforms-sinks","title":"Show the supported sources, transforms, sinks","text":"

          I'm not going to paste them here because the list is long and likely would be different depending on your version, but you can view them via:

          vector list\n

          The list as of vector 0.22.0 includes things from aws, gcp, splunk, prometheus, kafka, influxdb, elasticsearch, azure, and more.

          "},{"location":"vector/#spawn-a-process-and-handle-its-stdout-and-stderr","title":"Spawn a process and handle its stdout and stderr","text":"

          One problem with reading stdout and stderr in linux is that those are two different file handles, so you have to handle them as such. Having a tool to aggregate them back into a single stream with annotations about what stream they were taken from is great. This example shows how to use vector to spawn a subprocess, remove some fields, and print to stdout:

          #!/bin/bash\n# Filename: /tmp/stream-test.sh\n\nfor _ in {1..5} ; do\n  echo \"This is stdout\"\n  echo \"This is stderr\" >&2\n  sleep 0.$(( RANDOM ))\ndone\n

          The default config file format is toml, but the below example uses yaml because it is my preference. You can convert between them with dasel.

          # Filename: vector.yaml\n---\n# https://vector.dev/docs/reference/configuration/sources/exec\nsources:\n  exec:\n    command:\n      - /tmp/stream-test.sh\n    decoding:\n      codec: bytes\n    mode: streaming\n    streaming:\n      respawn_on_exit: False\n    type: exec\n\n# https://vector.dev/docs/reference/configuration/transforms\ntransforms:\n  remove_exec_fields:\n    inputs:\n      - exec\n    # https://vector.dev/docs/reference/vrl/\n    source: |-\n      del(.command)\n      del(.host)\n      del(.source_type)\n    type: remap\n\n# https://vector.dev/docs/reference/configuration/sinks/console\nsinks:\n  print:\n    encoding:\n      codec: json\n    inputs:\n      - remove_exec_fields\n    type: console\n
          $ vector --config vector.yaml\n2022-06-01T21:29:35.914895Z  INFO vector::app: Log level is enabled. level=\"vector=info,codec=info,vrl=info,file_source=info,tower_limit=trace,rdkafka=info,buffers=info,kube=info\"\n2022-06-01T21:29:35.915019Z  INFO vector::app: Loading configs. paths=[\"vector.yaml\"]\n2022-06-01T21:29:35.916968Z  INFO vector::topology::running: Running healthchecks.\n2022-06-01T21:29:35.917095Z  INFO vector: Vector has started. debug=\"false\" version=\"0.22.0\" arch=\"x86_64\" build_id=\"5e937e3 2022-06-01\"\n2022-06-01T21:29:35.917138Z  INFO vector::app: API is disabled, enable by setting `api.enabled` to `true` and use commands like `vector top`.\n2022-06-01T21:29:35.917152Z  INFO vector::topology::builder: Healthcheck: Passed.\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:35.918778044Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:35.918821210Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:36.679150968Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:36.679193905Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:36.959284295Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:36.959315187Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:37.124459926Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:37.124598441Z\"}\n{\"message\":\"This is stderr\",\"pid\":2470931,\"stream\":\"stderr\",\"timestamp\":\"2022-06-01T21:29:37.241035793Z\"}\n{\"message\":\"This is stdout\",\"pid\":2470931,\"stream\":\"stdout\",\"timestamp\":\"2022-06-01T21:29:37.241074381Z\"}\n2022-06-01T21:29:37.484711Z  INFO vector::shutdown: All sources have finished.\n2022-06-01T21:29:37.484751Z  INFO vector: Vector has stopped.\n

          Even in the above example you can see how difficult it is to aggregate stdout and stderr with accurate order. In the script, stderr always comes second, but in all but one of these iterations, stderr was handled before stdout. This is not a problem of vector, this is a fundamental posix problem due to stderr and stdout having separate streams. However, vector seems to have a method for handling this when a timestamp shows up in the stream. If I replace echo with date \"+%FT%T%z.%N foo\" in both streams, they are consistently in-order. Of course, another way to handle this is to output logs as structured data with the timestamp right from the source, but you will not always have control over the source log format.

          Another aspect of this setup is you can use vector as a type of init system, because you can set sources.exec.streaming.respawn_on_exit = true which will re-launch the process if it dies for some reason.

          "},{"location":"vector/#tap-a-running-vector-instance","title":"Tap a running vector instance","text":"

          https://vector.dev/guides/level-up/vector-tap-guide/

          Vector has a feature called tap that lets you hook into an running instance and see what is coming through. You can enable this in your vector config via:

          # Filename: vector.toml\n[api]\nenabled = true\n

          Then simply

          vector tap\n

          This shows pre-transform inputs, and outputs, which is useful when you are not seeing the output you expect because you can see the before and after right next to each other. There are also some further arguments you can pass to vector tap that let you filter so you can see specific inputs or outputs. See vector tap --help for those syntaxes.

          "},{"location":"vector/#debug-syntax-using-a-repl","title":"Debug syntax using a repl","text":"

          https://vector.dev/docs/reference/vrl/

          Vector has a repl feature that can be use for developing configs and debugging. Launch it with vector vrl. Once inside, type help to get guidance on how to proceed.

          "},{"location":"velero/","title":"velero","text":"

          \"Velero is a tool for managing disaster recovery, specifically for Kubernetes cluster resources. It provides a simple, configurable, and operationally robust way to back up your application state and associated data.\" - velero --help

          "},{"location":"velero/#examples","title":"Examples","text":""},{"location":"velero/#list-backups","title":"List backups","text":"
          $ velero backup get\nNAME                             STATUS            ERRORS   WARNINGS   CREATED                         EXPIRES   STORAGE LOCATION   SELECTOR\nvelero-somename-20210916020049   PartiallyFailed   1        0          2021-09-15 18:00:49 -0800 PST   26d       default            <none>\nvelero-somename-20210915020048   PartiallyFailed   1        0          2021-09-14 18:00:48 -0800 PST   25d       default            <none>\nvelero-somename-20210914020048   Completed         0        0          2021-09-13 18:00:48 -0800 PST   24d       default            <none>\nvelero-somename-20210913020026   Completed         0        0          2021-09-12 18:00:26 -0800 PST   23d       default            <none>\n

          Or as yaml

          velero backup get -o yaml\n
          "},{"location":"velero/#list-backup-schedules","title":"List backup schedules","text":"
          velero schedule get\n
          "},{"location":"velero/#get-logs-for-a-specific-backup","title":"Get logs for a specific backup","text":"

          This looks like the same thing that comes out of kubectl logs or stern, but it lets you see the entire history, which is likely not available other ways.

          velero logs \"${backup_name}\"\n
          "},{"location":"vim/","title":"vim","text":"

          \"Vim is a text editor that is upwards compatible to Vi.\" - man vim

          "},{"location":"vim/#modelines","title":"Modelines","text":"

          modelines are commented lines in files that set vim settings to use when editing that file.

          http://vim.wikia.com/wiki/Modeline_magic

          "},{"location":"vim/#modeline-example","title":"modeline example:","text":"
          ## vim: set expandtab ts=2\n
          "},{"location":"vim/#links","title":"Links","text":"
          • https://www.barbarianmeetscoding.com/boost-your-coding-fu-with-vscode-and-vim/moving-blazingly-fast-with-the-core-vim-motions
          "},{"location":"virtual-reality/","title":"Virtual Realtiy","text":"

          \"Virtual reality (VR) is a simulated experience that employs pose tracking and 3D near-eye displays to give the user an immersive feel of a virtual world.\" - https://en.wikipedia.org/wiki/Virtual_reality

          \"Virtual reality will grow, just as the telegraph grew to the telephone - as the radio to the TV - it will be everywhere.\" - Jobe Smith

          "},{"location":"virtual-reality/#links","title":"Links","text":"
          • https://vr-compare.com
          • https://hugo.blog/2024/03/11/vision-pro: \"What we got wrong at Oculus that Apple got right\"
          "},{"location":"virtualbox/","title":"VirtualBox","text":""},{"location":"virtualbox/#vboxmanage-examples","title":"VBoxManage Examples","text":"

          VBoxManage is the command line tool used to interact with VirtualBox.

          "},{"location":"virtualbox/#create-a-new-virtual-disk-image","title":"Create a new virtual disk image","text":"

          https://www.virtualbox.org/manual/ch08.html#vboxmanage-createmedium

          VBoxManage createmedium --format vdi --filename $DISK_NAME --size $DISK_SIZE_IN_MB\n
          "},{"location":"virtualbox/#attach-storage-to-a-vm","title":"Attach storage to a vm","text":"

          https://www.virtualbox.org/manual/ch08.html#vboxmanage-storageattach

          VBoxManage storageattach $VM_NAME --storagectl $STORAGE_CONTROLLER --format vdi --filename $DISK_NAME --size $DISK_SIZE_IN_MB\n
          "},{"location":"vpn/","title":"VPN","text":""},{"location":"vpn/#links","title":"Links","text":"
          • Nebula: \"Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, and Windows.\"
          • OpenVPN: \"OpenVPN is an open-source commercial software that implements virtual private network (VPN) techniques to create secure point-to-point or site-to-site connections in routed or bridged configurations and remote access facilities. It uses a custom security protocol that utilizes SSL/TLS for key exchange. It is capable of traversing network address translators (NATs) and firewalls. It was written by James Yonan and is published under the GNU General Public License (GPL)\" - OpenVPN Wikipedia entry
          • Tailscale: \"A secure network that just works\"
          • Tinc: \"tinc is a Virtual Private Network (VPN) daemon that uses tunnelling and encryption to create a secure private network between hosts on the Internet.\"
          • WireGuard: \"WireGuard is a free and open-source software application and communication protocol that implements virtual private network (VPN) techniques to create secure point-to-point connections in routed or bridged configurations. It is run as a module inside the Linux kernel and aims for better performance than the IPsec and OpenVPN tunneling protocols. It was written by Jason A. Donenfeld and is published under the second version of the GNU General Public License (GPL).\" - WireGuard Wikipedia entry
          • ZeroTier: \"ZeroTier, Inc is a software company with a Freemium business model based in Irvine, California. ZeroTier provides open-source tools and commercial products in the SDWAN sector that enable developers, operators and security professionals to create and manage geographically-agnostic virtual data centers. The company's flagship product ZeroTier One is a Private peer-to-peer client that enables devices such as laptops, desktops, phones, embedded devices, cloud resources, and services to securely connect to virtual networks.\" - ZeroTier Wikipedia entry
          "},{"location":"wasm/","title":"web assembly","text":"

          \"WebAssembly (abbreviated Wasm) is a binary instruction format for a stack-based virtual machine. Wasm is designed as a portable compilation target for programming languages, enabling deployment on the web for client and server applications.\" - https://webassembly.org

          "},{"location":"wasm/#links","title":"Links","text":"
          • https://webassembly.org
          • https://pyscript.net
          • https://github.com/webrcade/webrcade: Multi-game-system app in-browser
          • https://pythondev.readthedocs.io/wasm.html
          • https://github.com/sagemathinc/python-wasm
          • https://bytecodealliance.org/articles/wasmtime-1-0-fast-safe-and-production-ready
          • https://system7.app: Mac OS 7 in wasm
          • https://copy.sh/v86/: A variety of x86 operating systems running in-browser
          • https://github.com/psf/webassembly: \"A repo to track the progress of Python on WebAssembly (WASM)\"
          "},{"location":"webgl/","title":"webgl","text":"

          \"WebGL (short for Web Graphics Library) is a JavaScript API for rendering interactive 2D and 3D graphics within any compatible web browser without the use of plug-ins.\" - https://en.wikipedia.org/wiki/WebGL

          "},{"location":"webgl/#links","title":"Links","text":"
          • https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API
          • https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Tutorial/Getting_started_with_WebGL
          • https://threejs.org/examples/#webgl_animation_skinning_ik
          • https://ciechanow.ski: Really cool educational blog using webgl to teach about physical objects.
          "},{"location":"wget/","title":"Wget","text":"

          \"GNU Wget is a free software package for retrieving files using HTTP, HTTPS, FTP and FTPS the most widely-used Internet protocols. It is a non-interactive commandline tool, so it may easily be called from scripts, cron jobs, terminals without X-Windows support, etc.\" - https://www.gnu.org/software/wget

          "},{"location":"wget/#examples","title":"Examples","text":""},{"location":"wget/#mirror-site-for-local-viewing","title":"Mirror site for local viewing","text":"

          From the man page: to download a single page and all its requisites (even if they exist on separate websites), and make sure the lot displays properly locally, this author likes to use a few options in addition to -p:

          wget -E -H -k -K -p \"https://$site/$dir\"\n
          "},{"location":"wget/#download-all-images-from-a-site","title":"Download all images from a site","text":"

          To politely download all images from within a current remote directory:

          wget \\\n  --accept \".jpg\" \\\n  --adjust-extension \\\n  --continue \\\n  --no-parent \\\n  --random-wait \\\n  --recursive \\\n  --timestamping \\\n  --tries=0 \\\n  --wait=2 \\\n  --waitretry=30 \\\n  \"https://$site/$dir/\"\n
          "},{"location":"wget/#simple-use-of-cookies","title":"Simple use of cookies","text":"

          Some servers that need referrers and cookies can be accessed by doing:

          wget --save-cookies=\"cookies.txt\" \"foo.html\"\nwget --load-cookies=\"cookies.txt\" --referer=\"foo.html\" \"foo.mp3\"\n
          "},{"location":"wget/#set-default-behavior","title":"Set default behavior","text":"

          ~/.wgetrc sets default parameter values

          tries=0\ncontinue=1\n
          "},{"location":"wget/#see-also","title":"See Also","text":"
          • aria2
          • curl
          • httpstat - download and show a some useful connection information
          "},{"location":"winbind/","title":"winbind","text":"

          These examples may only work on Samba 3. See info about Winbindd here: https://wiki.samba.org/index.php/Configuring_Winbindd_on_a_Samba_AD_DC

          "},{"location":"winbind/#examples","title":"Examples","text":""},{"location":"winbind/#ping-the-winbind-servers","title":"Ping the winbind servers","text":"

          wbinfo -p

          "},{"location":"winbind/#list-the-domain-users","title":"list the domain users","text":"

          wbinfo -u

          "},{"location":"winbind/#try-authenticating-the-user-against-winbind","title":"try authenticating the user against winbind","text":"

          wbinfo -a dhoherd

          "},{"location":"wireshark/","title":"Wireshark","text":"

          Wireshark) is the new name for what was Ethereal. It is a graphical packet sniffer that uses the same libraries as tcpdump.

          "},{"location":"wireshark/#display-filters","title":"Display Filters","text":"

          Display filters have their own syntax, whereas capture filters use tcpdump syntax.

          See also: https://www.wireshark.org/docs/man-pages/wireshark-filter.html

          "},{"location":"wireshark/#filter-by-packet-data-content","title":"Filter by packet data content","text":"

          Display all packets that contain \"foo\" in the data section:

          data contains foo\n

          contains is a simple substring match, whereas matches is a Perl compatible regex.

          "},{"location":"wireshark/#display-hosts-within-a-given-subnet","title":"Display hosts within a given subnet","text":"
          ip.addr == 10.57.8.244/30\n
          "},{"location":"wireshark/#display-data-within-a-port-range","title":"Display data within a port range","text":"

          To see all ceph-osd data

          tcp.port >= 6800 and tcp.port <= 7300\n
          "},{"location":"wireshark/#show-only-dns-traffic-about-a-certain-host","title":"Show only dns traffic about a certain host","text":"
          dns.qry.name contains www.rmi.net || dns.resp.name contains www.rmi.net\n
          "},{"location":"wireshark/#show-all-dns-queries-that-do-not-have-a-response","title":"Show all dns queries that do not have a response","text":"

          In order for this to work you must perform the capture on the client side, or capture traffic from all DNS servers and combine it.

          dns && (dns.flags.response == 0) && ! dns.response_in\n
          "},{"location":"wireshark/#io-graphing","title":"IO graphing","text":"

          Within the Statistic -> IO Graph window, you can create graphs that illustrate trends in traffic

          "},{"location":"wireshark/#dns-response-time-stats","title":"DNS response time stats","text":"

          Create graphs that have the following data:

          Graph Name Display Filter Style Y Axis Y Field SMA Period AVG DNS Time dns line AVG(Y Field) dns.time 10 interval SMA MAX DNS Time dns line MAX(Y Field) dns.time 10 interval SMA MIN DNS Time dns line MIN(Y Field) dns.time 10 interval SMA"},{"location":"wireshark/#see-also","title":"See also","text":"
          • tcpdump
          "},{"location":"wonder-workshop/","title":"wonder-workshop","text":"

          Wonder Workshop

          "},{"location":"wonder-workshop/#dash","title":"Dash","text":"

          Dash is a tripod robot with two wheels and a caster, with IR I/O, bluetooth, microphone, speaker, LEDs, and proximity sensors.

          "},{"location":"wonder-workshop/#links","title":"Links","text":"
          • https://github.com/playi/WonderPy - Python API for Wonder Dash, Dot and Cue.
          • https://www.makewonder.com/robots/dash/
          "},{"location":"wuzz/","title":"wuzz","text":"

          \"Interactive cli tool for HTTP inspection\" - https://github.com/asciimoo/wuzz

          "},{"location":"xargs/","title":"xargs","text":"

          xargs issues commands with the STDIN as arguments, by default appended to the end of the command.

          "},{"location":"xargs/#examples","title":"Examples","text":""},{"location":"xargs/#handle-spaces-and-use-the-arg-as-something-other-than-the-last-token","title":"Handle spaces and use the arg as something other than the last token","text":"

          The -I argument takes a string to use as a delimiter for any input. The -print0 arg causes find to terminate each result with a null, which allows it to handle filename with characters that might not play nicely with the shell. We have to then use xargs -0 to make it also handle the null terminated lines. Lots of commands have a feature like this, so be on the lookout for it.

          find . -maxdepth 1 -type f -print0 |\nxargs -0 -I {} mv \"{}\" ~/some/dir/\n
          "},{"location":"xargs/#tun-3-concurrent-processes-each-consuming-5-results","title":"Tun 3 concurrent processes, each consuming 5 results","text":"
          find /dir/with/large/files -type f -print0 |\nxargs -0 -n5 -P3 sha256sum\n

          This would run 3 instances of sha256sum with each instance operating on 5 files. Since sha256sum is single-threaded, this would speed things up by using multiple CPU cores instead of being bound to a single CPU core.

          "},{"location":"xargs/#use-sed-to-change-git-files-containing-a-certain-string","title":"use sed to change git files containing a certain string","text":"

          This uses GNU sed -i, on macOS you should use sed -i '' or gsed -i. The -z on git grep causes it to null terminate the entries so xargs -0 will work.

          git grep -z -l 192.168.5 |\nxargs -0 sed -i 's/192.168.5/172.18.0/g'\n
          "},{"location":"xargs/#issue-the-same-command-several-times-in-parallel","title":"Issue the same command several times in parallel","text":"

          This takes 1 directory as input and starts a sub-shell that cd's to the directory and runs a command. Up to 4 subhells are run in parallel. This is very similar to the GNU parallel command.

          find ~/code/ -mindepth 1 -maxdepth 1 -type d -print0 |\nxargs -0 -I {} -n1 -P4 bash -c \"cd {} ; make install-hooks ;\"\n
          "},{"location":"yaml/","title":"yaml","text":"

          \"YAML Ain't Markup Language\" - https://yaml.org/

          "},{"location":"yaml/#links","title":"Links","text":"
          • https://www.yaml.org/
          • https://www.yaml.org/refcard.html
          • https://en.wikipedia.org/wiki/YAML
          • http://www.yamllint.com/
          • https://yaml-multiline.info: Good reference of different multi-line syntaxes.
          • https://play.yaml.io/main/parser: See how different parsers treat the same snip of yaml.
          • https://matrix.yaml.info: Table of yaml 1.2 test results for various parsers. Surprsingly, as of 2022-02 only two pass 100%.
          • https://github.com/crdoconnor/strictyaml: \"StrictYAML is a type-safe YAML parser that parses and validates a restricted subset of the YAML specification.\"
          • https://hitchdev.com/strictyaml/why-not/toml: Why StrictYAML is better than TOML.
          "},{"location":"youtube-dl/","title":"youtube-dl","text":"

          \"Command-line program to download videos from YouTube.com and other video sites\" - https://github.com/rg3/youtube-dl/

          "},{"location":"youtube-dl/#examples","title":"Examples","text":""},{"location":"youtube-dl/#show-available-media-formats","title":"Show available media formats","text":"
          youtube-dl -F 'https://youtu.be/LdCq6y1Uu5Y'\n
          "},{"location":"youtube-dl/#download-the-best-quality-within-resolution-bounds","title":"Download the best quality within resolution bounds","text":"
          youtube-dl -f 'bestvideo[height<=480]+bestaudio' 'https://youtu.be/-kgTCpv_W64'\n
          "},{"location":"youtube-dl/#download-the-best-quality-m4a-version","title":"Download the best quality m4a version","text":"
          youtube-dl -f 'bestaudio[ext=m4a]' 'https://youtu.be/0ZII3Cu_Uo4'\n
          "},{"location":"youtube-dl/#download-songs-longer-than-30-minutes","title":"Download songs longer than 30 minutes","text":"

          The --match-filter only alters what gets downloaded, not what is displayed when using options like -j

          youtube-dl -f bestaudio --match-filter 'duration > 1800' 'https://www.youtube.com/user/boyceavenue'\n
          "},{"location":"youtube-dl/#reformat-a-youtube-channel-into-lines-as-url-title","title":"Reformat a youtube channel into lines as \"URL - Title\"","text":"
          youtube-dl -j --flat-playlist 'https://www.youtube.com/watch?v=CHMZW9kLpg0&list=PL1B627337ED6F55F0' |\n  jq -S -r '\"https://youtu.be/\" + .id + \" - \" + .title'\n
          "},{"location":"yum/","title":"yum","text":"

          \"yum - Yellowdog Updater Modified\" - man yum

          \"yum is an interactive, rpm based, package manager.\" - man yum

          "},{"location":"yum/#examples","title":"Examples","text":""},{"location":"yum/#show-a-list-of-enabled-repositories","title":"Show a list of enabled repositories","text":"
          yum repolist\n
          "},{"location":"yum/#show-a-list-of-available-repositories","title":"Show a list of available repositories","text":"
          yum repolist all\n
          "},{"location":"yum/#show-all-installed-packages-their-versions-and-their-source-repo","title":"Show all installed packages, their versions and their source repo","text":"
          yum list installed\n
          "},{"location":"yum/#list-available-packages-and-the-repo-they-come-from","title":"List available packages and the repo they come from","text":"
          yum list available | grep jre\n
          "},{"location":"yum/#show-all-duplicates-in-a-search","title":"Show all duplicates in a search","text":"

          This is a good way to get a complete list of packages that are available that match a certain string

          yum --showduplicates search thrift\n
          "},{"location":"yum/#query-available-packages-in-a-given-repository","title":"Query available packages in a given repository","text":"
          yum --disablerepo=\"*\" --enablerepo=\"epel\" list available\n
          "},{"location":"yum/#upgrade-and-skip-broken-dependencies","title":"Upgrade and skip broken dependencies","text":"
          yum upgrade -y --skip-broken\n
          "},{"location":"yum/#upgrade-and-skip-certain-packages","title":"Upgrade and skip certain packages","text":"
          yum upgrade --exclude=*rabbitmq*\n
          "},{"location":"yum/#check-for-package-conflicts","title":"Check for package conflicts","text":"

          install yum-utils, then run package-cleanup

          "},{"location":"yum/#find-a-package-that-includes-a-specific-command-or-file","title":"Find a package that includes a specific command or file","text":"
          yum whatprovides \"*/filename\"\n
          "},{"location":"yum/#check-for-groups-of-packages","title":"Check for groups of packages","text":"

          yum grouplist

          "},{"location":"yum/#enable-optional-installs-in-groups","title":"Enable optional installs in groups","text":"

          Add group_package_types=mandatory,default,optional in /etc/yum.conf

          "},{"location":"yum/#download-but-do-not-install-packages-for-update","title":"Download but do not install packages for update","text":"
          yum upgrade --downloadonly --skip-broken\n
          "},{"location":"yum/#install-a-local-file-using-yum","title":"Install a local file using yum","text":"
          yum localinstall whatever.rpm\n
          "},{"location":"yum/#auto-updates-for-centos5","title":"Auto-updates for Centos5","text":"
          yum install yum-updatesd\n
          "},{"location":"yum/#auto-updates-for-centos6","title":"Auto-updates for Centos6","text":"
          yum install yum-cron\n
          "},{"location":"yum/#see-also","title":"See Also","text":"
          • rpm - interact with rpms directly
          • https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/sec-Managing_Yum_Repositories.html
          "},{"location":"zerotier/","title":"ZeroTier","text":"

          \"ZeroTier delivers the capabilities of VPNs, SDN, and SD-WAN with a single system. Manage all your connected resources across both local and wide area networks as if the whole world is a single data center.\" - https://www.zerotier.com/

          "},{"location":"zfs/","title":"ZFS","text":"

          ZFS is the Zetabyte File System.

          "},{"location":"zfs/#links","title":"Links","text":"
          • OpenZFS - http://open-zfs.org
          • Tuning Guide - https://web.archive.org/web/20161223004915/http://www.solarisinternals.com/wiki/index.php/ZFS_Evil_Tuning_Guide
          • Hardware recommendations - http://blog.zorinaq.com/?e=10
          • Mac ZFS - http://code.google.com/p/maczfs/
          • Shadow migration feature - http://docs.oracle.com/cd/E23824_01/html/821-1448/gkkud.html
          • Speed tuning - http://icesquare.com/wordpress/how-to-improve-zfs-performance/
          • ZFS RAID levels - https://web.archive.org/web/20201120053331/http://www.zfsbuild.com/2010/05/26/zfs-raid-levels/
          • http://en.wikipedia.org/wiki/ZFS
          • http://wiki.freebsd.org/ZFSQuickStartGuide
          • http://www.solarisinternals.com/wiki/index.php/ZFS_Best_Practices_Guide
          • http://zfsguru.com
          • http://zfsonlinux.org/faq.html
          • https://web.archive.org/web/20190603150811/http://www.oracle.com/technetwork/articles/servers-storage-admin/o11-113-size-zfs-dedup-1354231.html
          • http://wiki.freebsd.org/ZFSTuningGuide#Deduplication
          • Corruption / failure to import - https://github.com/zfsonlinux/zfs/issues/2457
          • https://www.percona.com/blog/2018/05/15/about-zfs-performance/
          • https://wiki.freebsd.org/ZFSTuningGuide
          • https://freebsdfoundation.org/blog/raid-z-expansion-feature-for-zfs
          • https://www.binwang.me/2023-12-14-ZFS-Profiling-on-Arch-Linux.html
          • https://despairlabs.com/blog/posts/2024-10-27-openzfs-dedup-is-good-dont-use-it
          "},{"location":"zfs/#tips","title":"Tips","text":""},{"location":"zfs/#memory","title":"Memory","text":"
          • For normal operation, 1gb of memory per tb of disk space is suitable.
          • For dedup operation, 5gb of memory per tb of addressable disk space is suitable.
          "},{"location":"zfs/#log-devices","title":"Log devices","text":"
          • Use a log device if you have lots of writes.
          • Mirror it, because if you lose it you lose the whole volume.
          • Speed and latency are most important, not size. Log flushes every 5 seconds.
          • Get SLC if possible, otherwise MLC
          "},{"location":"zfs/#l2arc-cache-devices","title":"l2arc Cache devices","text":"
          • Use if you have lots of reads.
          • Size does matter, with big devices more data can be cached for faster reads of more data.
          • Speed and latency matter.
          • Mirrororing l2arc does not matter because if it fails, reads come from the spinning disks.
          • Too big of a device can suck up resources and cause poor performance. See https://wiki.freebsd.org/ZFSTuningGuide

          Good explanation: https://web.archive.org/web/20160324170916/https://blogs.oracle.com/brendan/entry/test

          "},{"location":"zfs/#zdb","title":"zdb","text":""},{"location":"zfs/#show-the-potential-savings-of-turning-on-dedupe-on-zpool-tank","title":"Show the potential savings of turning on dedupe on zpool tank","text":"

          https://web.archive.org/web/20130217052412/http://hub.opensolaris.org/bin/view/Community+Group+zfs/dedup

          zdb -S tank\n
          "},{"location":"zfs/#show-transactions-and-human-readable-dates-in-the-zdb-history","title":"Show transactions and human readable dates in the zdb history","text":"

          Use zdb -e for pools that are not mounted.

          zdb -hh tank \\\n| egrep 'txg|time' \\\n| while read -r _ a b ; do\n  if [ \"$a\" == \"time:\" ] ; then\n    date -d @$b \"+$a %F %T\" ;\n  else\n    echo \"$a  $b\" ;\n  fi ;\ndone\n
          "},{"location":"zfs/#zpool","title":"zpool","text":""},{"location":"zfs/#create-a-zpool-and-its-base-filesystem","title":"Create a zpool and its base filesystem","text":"
          zpool create -f -o cachefile=/tmp/zpool.cache zpoolname /dev/ada1 #create a zpool\n
          "},{"location":"zfs/#add-a-cache-device-to-a-pool","title":"Add a cache device to a pool","text":"
          ## add ada0p3 as a cache device to the tank zpool\nzpool add tank cache ada0p3\n
          "},{"location":"zfs/#show-all-configured-zpool-options-for-a-given-zpool","title":"Show all configured zpool options for a given zpool","text":"
          zpool get all tank\n
          "},{"location":"zfs/#show-history-of-all-operations-on-a-given-pool","title":"Show history of all operations on a given pool","text":"
          ## show history of operations on the pool, eg: snapshots, attribute changes\nzpool history\n
          "},{"location":"zfs/#show-real-time-statistics-on-a-given-zpool","title":"Show real time statistics on a given zpool","text":"
          ## show per-device statistics every 1 second\nzpool iostat -v 1\n
          "},{"location":"zfs/#show-basic-information-about-all-imported-zpools","title":"Show basic information about all imported zpools","text":"
          ## show zpool space info, deduplication ratio and health\nzpool list\n
          "},{"location":"zfs/#show-deduplication-tables","title":"Show deduplication tables","text":"
          ## show deduplication table entries. Take entries * size / 1024 / 1024 to calculate DDT consumption\nzpool status -D z2\n
          "},{"location":"zfs/#import-a-pool-by-different-disk-path","title":"Import a pool by different disk path","text":"

          You can change the paths your pool is imported from. This is useful if you created your zpool using /dev/sdN when you should have used /dev/disk/by-id/, which is deterministic. The -d option lets you specify a directory to look within for the given pool's devices.

          zpool import -d /dev/disk/by-id/ \"$ZPOOL_NAME\"\n

          You may find that your pool was imported using links from this path that are not desirable, because there are several options available. For instance, you may find that your pool was imported using wwn links (EG: wwn-0x5000cca22eca1056) that are not very user friendly compared to a link that shows the model and serial number (EG: scsi-SATA_HGST_HMS5C4141BM_PM1302LAGR5A0F). Because these links are managed by udev and are created when the disk is seen by the system, either at boot or at insertion, and because nothing else should be referencing these symlinks, they are safe to delete. Export your pool, then delete unwanted symlinks for the devices related to your pool, leaving only the symlinks you want to use, then run zpool import -d once again.

          "},{"location":"zfs/#replace-a-disk-in-a-zpool","title":"Replace a disk in a zpool","text":"
          ## Replace the first disk with the second in the tank pool\nzpool replace -f tank /dev/disk/by-id/ata-ST3000DM001-9YN166_W1F09CW9 /dev/disk/by-id/ata-ST3000DM001-9YN166_Z1F0N9S7\n
          "},{"location":"zfs/#real-example","title":"Real example","text":"
          $ zpool replace -f tank /dev/disk/by-id/ata-HGST_HDN724040ALE640_PK1334PCJY9ASS /dev/disk/by-id/ata-HGST_HUH728080ALE600_VKHA6YDX\n$ zpool status\n  pool: home\n state: ONLINE\n  scan: scrub repaired 0 in 0h0m with 0 errors on Sun Dec 10 00:24:07 2017\nconfig:\n\n        NAME                                             STATE     READ WRITE CKSUM\n        home                                             ONLINE       0     0     0\n          ata-M4-CT064M4SSD2_0000000012170908F759-part4  ONLINE       0     0     0\n\nerrors: No known data errors\n\n  pool: tank\n state: DEGRADED\nstatus: One or more devices is currently being resilvered.  The pool will\n        continue to function, possibly in a degraded state.\naction: Wait for the resilver to complete.\n  scan: resilver in progress since Mon Jan  8 19:57:45 2018\n    47.1M scanned out of 13.7T at 6.72M/s, 592h39m to go\n    11.5M resilvered, 0.00% done\nconfig:\n\n        NAME                                           STATE     READ WRITE CKSUM\n        tank                                           DEGRADED     0     0     0\n          raidz1-0                                     DEGRADED     0     0     0\n            replacing-0                                UNAVAIL      0     0     0\n              ata-HGST_HDN724040ALE640_PK1334PCJY9ASS  UNAVAIL      0     1     0  corrupted data\n              ata-HGST_HUH728080ALE600_VKHA6YDX        ONLINE       0     0     0  (resilvering)\n            ata-HGST_HDN724040ALE640_PK2334PEHG8LAT    ONLINE       0     0     0\n            ata-HGST_HDN724040ALE640_PK2334PEHGD37T    ONLINE       0     0     0\n            ata-HGST_HDN724040ALE640_PK2338P4H3TJPC    ONLINE       0     0     0\n\nerrors: No known data errors\n
          "},{"location":"zfs/#expand-a-zpool-in-place-after-replacing-disks-with-larger-disks","title":"Expand a zpool in place after replacing disks with larger disks","text":"

          Expansion happens automatically if you have done zpool set autoexpand=on tank. If you did not do that and you find your pool has not expanded, you can perform the following:

          List the absolute paths of your devices with something like:

          zpool list -v -PH | awk '$1 ~ \"^\\/dev\\/\" {gsub(\"-part1\",\"\",$1) ; print $1 ;}'\n

          Then go through your device list and run

          zpool online -e tank <disk-name> # do the expansion\nzpool list -v tank # check the EXPANDSZ column for the disk\n

          After doing all of these your pool should be expanded.

          "},{"location":"zfs/#zfs_1","title":"zfs","text":""},{"location":"zfs/#show-differences-between-current-filesystem-state-and-snapshot-state","title":"show differences between current filesystem state and snapshot state","text":"
          zfs diff tank tank@snap\n
          "},{"location":"zfs/#show-configured-properties-for-a-filesystem","title":"Show configured properties for a filesystem","text":"
          zfs get all\n
          "},{"location":"zfs/#show-custom-filesystem-attributes","title":"Show custom filesystem attributes","text":"
          ## show custom attributes that override inherited attributes\nzfs get all -s local tank\n
          "},{"location":"zfs/#show-an-overview-of-all-mounted-zfs-filesystems","title":"Show an overview of all mounted zfs filesystems","text":"
          ## show disk space including free physical disk space and mount info\nzfs list\n
          "},{"location":"zfs/#show-specified-fields-of-each-filesystem","title":"Show specified fields of each filesystem","text":"
          ## show the listed fields of all filesystems\nzfs list -t all -o name,referenced,used,written,creation,userused@root\n
          "},{"location":"zfs/#show-only-snapshots","title":"Show only snapshots","text":"
          zfs list -t snapshot\n
          "},{"location":"zfs/#show-space-consumed-by-file-owner","title":"Show space consumed by file owner","text":"
          zfs userspace tank\n
          "},{"location":"zfs/#disable-atime-updates-for-a-filesystem","title":"Disable atime updates for a filesystem","text":"
          zfs set atime=off tank\n
          "},{"location":"zfs/#set-compression-to-lz4-for-a-filesystem","title":"Set compression to lz4 for a filesystem","text":"
          zfs set compression=lz4 tank\n
          "},{"location":"zfs/#set-deduplication-to-enabled-for-a-filesystem","title":"Set deduplication to enabled for a filesystem","text":"
          zfs set dedup=on tank\n
          "},{"location":"zfs/#set-a-filesystem-to-readonly","title":"Set a filesystem to readonly","text":"
          zfs set readonly=on zpoolname/dataset\n
          "},{"location":"zfs/#set-a-filesystem-to-allow-nfs-sharing","title":"Set a filesystem to allow NFS sharing","text":"
          zfs set sharenfs=on tank\n
          "},{"location":"zfs/#create-a-dataset","title":"Create a dataset","text":"
          ## create a dataset 'sole' on zpool 'tank'\nzfs create tank/sole\n
          "},{"location":"zfs/#destroy-multiple-snapshots","title":"Destroy multiple snapshots","text":"
          zfs destroy tank@20130413-weekly,20130420-weekly,20130428-weekly,20130505-weekly\n
          "},{"location":"zfs/#zfs-send-receive","title":"zfs send / receive","text":"

          Replicate a zpool (use the latest snapshot name as the source) to a blank zpool:

          zfs send -v -D -R tank@20120907-oldest | zfs receive -F -v z2\n
          • -D enables a deduplicated stream.
          • -R enables a recursive send of all snapshots and filesystems up to that point.
          • -F enables deletion of any snapshots on the target that don't exist on the sender
          • -v enables verbose mode
          "},{"location":"zfs/#recursively-zfs-send-a-filesystem-to-a-remote-host-and-receive-it-as-a-new-dataset","title":"recursively zfs send a filesystem to a remote host and receive it as a new dataset","text":"
          zfs send -v -D -R z1@20120907-oldest | ssh otherhost zfs receive -v z2/z1\n
          "},{"location":"zfs/#show-summary-of-what-would-be-sent","title":"Show summary of what would be sent","text":"

          This shows an entire dataset up to the given snapshot

          zfs send -n -v -D -R tank@20140531-monthly\n
          "},{"location":"zfs/#show-the-space-differences-between-two-snapshots","title":"Show the space differences between two snapshots","text":"
          zfs send -n -v -D -i tank@20140531-monthly tank@20141031-monthly\n
          "},{"location":"zfs/#show-the-amount-of-new-space-consumed-by-each-monthly","title":"Show the amount of new space consumed by each monthly","text":"
          zfs list -o name | grep 'tank@.*monthly' | while read -r X ; do [[ ! $a =~ .*monthly ]] && a=$X || zfs send -n -v -D -i $a $X && a=$X ; done 2>&1 | grep send\n
          "},{"location":"zfs/#complex-examples","title":"Complex examples","text":""},{"location":"zfs/#create-a-raidz-called-tank","title":"Create a raidz called tank","text":"

          Create a raidz pool from 4 disks and set some properties:

          pool=tank\nzpool create -f \"${pool}\" raidz /dev/disk/by-id/scsi-SATA_HGST_HDN724040A_PK2338P4H*-part1 -o ashift=12\nzfs set dedup=on \"${pool}\"\nzpool set listsnapshots=on \"${pool}\"\nzfs set atime=off \"${pool}\"\nzfs set compression=lz4 \"${pool}\"\n
          "},{"location":"zfs/#create-a-case-insensitive-raidz3-out-of-50-files","title":"Create a case insensitive raidz3 out of 50 files","text":"
          pool=tank\nfor X in {1..50} ; do mkfile -n 2g ${pool}.$X ; done ;\nsudo zpool create -O casesensitivity=insensitive ${pool} raidz3 \"${PWD}/${pool}\".{1..50}\n
          "},{"location":"zfs/#troubleshooting","title":"Troubleshooting","text":""},{"location":"zfs/#mount-a-pool-that-is-giving-you-trouble","title":"Mount a pool that is giving you Trouble","text":"
          zpool import -o failmode=continue -o readonly=on zpool_name\n

          This helped me get read access to a pool that was kernel panicking with the following error when I tried to import it normally:

          Dec  7 14:48:40 localhost kernel: PANIC: blkptr at ffff8803fddb4200 DVA 0 has invalid OFFSET 294940902907904\n
          "},{"location":"zfs/#zfs-on-mac-os-x","title":"ZFS on Mac OS X","text":"
          • http://openzfsonosx.org
          "},{"location":"zfs/#create-a-zfs-partition-on-devdisk3","title":"Create a ZFS partition on /dev/disk3","text":"
          ## Must eject device in Disk Utility first\ndiskutil partitiondisk /dev/disk3 GPTFormat ZFS %noformat% 100% # strange syntax, but works\nzpool create backups1 /dev/disk3s2 # create the zpool\nmdutil -i off /Volumes/backups1 # required on MacZFS since spotlight does not function\n
          "},{"location":"zfs/#zfs-on-linux","title":"ZFS on Linux","text":"
          • If you get module errors: modprobe zfs ; ldconfig
          • If you get permission denied, check selinux settings
          "},{"location":"zfs/#centos-6-repository","title":"CentOS 6 Repository","text":"
          sudo yum install -y epel-release # assumes later CentOS 6 where epel is provided upstream\nsudo yum localinstall --nogpgcheck http://archive.zfsonlinux.org/epel/zfs-release.el6.noarch.rpm\nsudo yum install zfs -y\n
          "},{"location":"zfs/#reinstalling-when-things-fail","title":"Reinstalling when things fail","text":"
          ##!/bin/bash -x\nyum install -y kernel-devel-$(uname -r)\nzfs_version=0.6.5.4\ndkms remove  -m zfs -v \"${zfs_version}\" --all\ndkms remove  -m spl -v \"${zfs_version}\" --all\ndkms add     -m spl -v \"${zfs_version}\" --force\ndkms add     -m zfs -v \"${zfs_version}\" --force\ndkms install -m spl -v \"${zfs_version}\" --force\ndkms install -m zfs -v \"${zfs_version}\" --force\n
          "},{"location":"zfs/#inspect-the-rpm-for-what-scripts-it-runs","title":"Inspect the rpm for what scripts it runs","text":"

          This is useful for debugging failures after kernel upgrade.

          rpm -q --scripts zfs-dkms\n
          "},{"location":"zfs/#building-on-centos-6","title":"Building on CentOS 6","text":"
          yum groupinstall \"Development tools\" && yum install -y libuuid-devel zlib-devel bc lsscsi mdadm parted kernel-debug\n## For spl, then again for zfs:\n./configure && make && make rpm && rpm -i *64.rpm\n
          "},{"location":"zookeeper/","title":"zookeeper","text":"

          ZooKeeper is a high-performance coordination service for distributed applications. - https://zookeeper.apache.org/doc/current/

          "},{"location":"zookeeper/#examples","title":"Examples","text":""},{"location":"zookeeper/#the-four-letter-words","title":"The four letter words","text":"

          \"ZooKeeper responds to a small set of commands. Each command is composed of four letters. You issue the commands to ZooKeeper via telnet or nc, at the client port.\" - https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands

          "},{"location":"zookeeper/#lists-brief-details-for-the-server-and-connected-clients","title":"Lists brief details for the server and connected clients","text":"

          echo 'stat' | nc localhost 2181

          "},{"location":"zookeeper/#view-a-list-of-variables-that-could-be-used-for-monitoring-the-health-of-the-cluster","title":"View a list of variables that could be used for monitoring the health of the cluster","text":"

          echo 'mntr' | nc localhost 2181

          "},{"location":"zookeeper/#list-full-details-for-the-server","title":"List full details for the server","text":"

          echo 'srvr' | nc localhost 2181

          "},{"location":"zookeeper/#links","title":"Links","text":"
          • https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html
          "},{"location":"zsh/","title":"zsh","text":"

          \"Zsh is a UNIX command interpreter (shell) usable as an interactive login shell and as a shell script command processor.\" - man zsh

          One big caveat to using zsh is that it has no syntax linter. There is an open shellcheck gh issue about this.

          "},{"location":"zsh/#links","title":"Links","text":"
          • https://ohmyz.sh
          • https://github.com/unixorn/awesome-zsh-plugins
          • https://github.com/unixorn/zsh-quickstart-kit
          "}]} \ No newline at end of file diff --git a/security/index.html b/security/index.html index 758049281..6328847c3 100644 --- a/security/index.html +++ b/security/index.html @@ -7151,6 +7151,18 @@

          Physical

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sed/index.html b/sed/index.html index 45b11a645..feb0ebb42 100644 --- a/sed/index.html +++ b/sed/index.html @@ -7486,6 +7486,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/selinux/index.html b/selinux/index.html index 283ff4481..2c1cdba9e 100644 --- a/selinux/index.html +++ b/selinux/index.html @@ -7495,6 +7495,18 @@

          Permanetnly set an selinux boolean + + +
          + + + + + + + + +
          diff --git a/semver/index.html b/semver/index.html index 40bfcd5f3..1ec85197a 100644 --- a/semver/index.html +++ b/semver/index.html @@ -7134,6 +7134,18 @@

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sensu/index.html b/sensu/index.html index 384cda36d..f255eeaaf 100644 --- a/sensu/index.html +++ b/sensu/index.html @@ -7179,6 +7179,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/serverless/index.html b/serverless/index.html index 5e39ab6f9..d01a47c8f 100644 --- a/serverless/index.html +++ b/serverless/index.html @@ -7069,6 +7069,18 @@

          serverless

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sgdisk/index.html b/sgdisk/index.html index 4b5e00c29..3a015e99d 100644 --- a/sgdisk/index.html +++ b/sgdisk/index.html @@ -7441,6 +7441,18 @@

          Grab the name of a partition

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/shairport-sync/index.html b/shairport-sync/index.html index 45edf8cba..d3572f08a 100644 --- a/shairport-sync/index.html +++ b/shairport-sync/index.html @@ -7166,6 +7166,18 @@

          shairport-sync in docker

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/shred/index.html b/shred/index.html index 6fdde49a2..04eae0488 100644 --- a/shred/index.html +++ b/shred/index.html @@ -7179,6 +7179,18 @@

          Write zeroes to a di Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/shutdown/index.html b/shutdown/index.html index 3c472a2a1..63ca21c73 100644 --- a/shutdown/index.html +++ b/shutdown/index.html @@ -7298,6 +7298,18 @@

          Other functions

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sips/index.html b/sips/index.html index 8dfbef5e7..617f2d3da 100644 --- a/sips/index.html +++ b/sips/index.html @@ -7239,6 +7239,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sitemap.xml b/sitemap.xml index c511e989d..ca5d09015 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,1362 +2,1362 @@ https://danielhoherd.com/tech-notes/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/3d-printing/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/airflow/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/airport/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/amazon/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/android/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ansible/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/apfs/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/aptly/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/aria2/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/arpwatch/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/atomicparsley/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/automotive/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/autonomous-vehicles/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/avahi/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/awk/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/awless/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/aws-cloudformation/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/awscli/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/backups/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/badblocks/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/bash/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/bbcp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/bc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/bind/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/blkid/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/bluetooth/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/bpf/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/c/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/calico/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/calver/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/centos/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ceph/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/chkconfig/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/chocolatey/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/chronos/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/circleci/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/circuitpython/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cncf/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cobbler/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/colorblindness/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/computing/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/consul/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cookiecutter/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cowsay/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cpp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cradlepoint/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cron/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cryptsetup/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/csplit/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/css/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/curl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/cut/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/d2/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dasel/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/data/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/datasette/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/date/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dcfldd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dcgc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dcraw/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dd-wrt/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ddrescue/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/deb/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/debian/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/defaults/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/devops/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dhcp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dhcpd.conf/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/diagrams/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dig/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dmidecode/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dns/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/docker/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/document-query/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/drone/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dsrc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/dtrace/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/du/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/duplicity/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/e-bike/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/education/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/elasticsearch/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/eleduino/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/etcd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ethtool/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/exiftool/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fediverse/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fedramp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ffmpeg/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/figlet/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/finance/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/find/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/findmnt/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fio/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/flask/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/flipper-zero/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fluent-bit/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fluentd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fortune/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/fuser/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/gaming/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/genicam/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/geodata/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/gige-vision/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/git/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/github/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/gitlab/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/gitolite/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/gnu-screen/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/golang/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/google-cloud/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/google-earth/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/google-sheets/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/google/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/graphicsmagick/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/graphql/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/grep/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/grub/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/hadoop/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/handbrake/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/hashids/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/helm/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/home-assistant/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/hp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/htmx/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/htop/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/httpstat/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/iTunes/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/image-formats/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/imagemagick/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/img2xterm/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/inotify/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/internet/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/interview/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/iotop/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ip/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/iperf/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ipmi/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/iptables/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/irc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/irssi/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jargon/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/javascript/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jdupes/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jmespath/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/join/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jot/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jpeginfo/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jq/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/json/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jsonnet/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jsonpath/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/juniper/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jupyter/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/jwt/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/kaniko/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/keel/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/keybase/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/kubernetes/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/launchd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ldap/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lego/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lektor/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/linksys/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/linux-performance-monitoring/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/linux/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/logstash/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lsblk/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lshw/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lsof/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lua/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lvm/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/lxc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/machine-learning/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/macos/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/make/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/marlin/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/math/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mdraid/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mechanical-keyboards/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/metallb/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/microbit/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/microcontrollers/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/micropython/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/minikube/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/misc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mkdocs/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/molecule/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mongodb/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mqtt/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mutt/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/myrepos/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/mysql/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/namei/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ncftp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/neopixel/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/netgear/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nethogs/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/networking/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/networksetup/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nfc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nfs/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ngrok/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nintendo-3ds/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nintendo-amiibo/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nintendo-nes/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nintendo-switch/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nintendo-wii/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nintendo-wiiu/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ntop/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ntp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/nvidia/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/onboarding/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/openvpn/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/orbstack/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/osquery/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/outline/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pac/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pandoc/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/panopticlick/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/passwords/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/perl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pgp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/philips-hue/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/photography/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/php/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/plex/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/postgres/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/powershell/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/powertop/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/procurve/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/prometheus/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/protobuf/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ps/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ps_mem/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/psp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pssh/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ptp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/puppet/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pv/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pxe/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/pytest/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/python/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/q/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/raspberry-pi/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/redis/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/retropie/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rhel/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/robotics/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/roku/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rook/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ros/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rpm/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rrd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rst/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rsync/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/rust/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/saltstack/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/samba/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/samsung/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/science/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/screenshot/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sdr/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/security/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sed/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/selinux/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/semver/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sensu/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/serverless/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sgdisk/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/shairport-sync/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/shred/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/shutdown/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sips/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/slides/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/smartctl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/smartstack/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/snap/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/snmp/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/solo/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sort/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sound-and-music/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sphinx/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/split/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/splunk/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/spotlight/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sqlite/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ss/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ssh/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sshuttle/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/stat/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/strace/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sudo/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/swagger/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/swift/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sysctl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/sysdig/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/systemd-resolved/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/systemd/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tar/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tcl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tcpdump/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tcpflow/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/terminal-emulator/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/terraform/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/time/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tls/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tmux/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/top-variant-list/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/top/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/touch/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/tr/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/ubuntu/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/udev/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/upstart/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/utm/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/vagrant/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/vector/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/velero/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/vim/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/virtual-reality/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/virtualbox/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/vpn/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/wasm/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/webgl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/wget/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/winbind/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/wireshark/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/wonder-workshop/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/wuzz/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/xargs/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/yaml/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/youtube-dl/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/yum/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/zerotier/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/zfs/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/zookeeper/ - 2024-11-26 + 2024-12-25 https://danielhoherd.com/tech-notes/zsh/ - 2024-11-26 + 2024-12-25 \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index cc1229ce62407bee1acc49ef7189e75c5b026223..ff81e639f946f1fe2d942b53c09da44e4c01e329 100644 GIT binary patch delta 1682 zcmV;D25tGn55o_CABzYG0849U0{?SqbY*Q}a4vXlYyicb%W~bg5kU9z6)x>*c{G!% zBxP%M`GU!M3Iv*Diy#`t<5E{&A5e0+vQ8F9RkmJN_kegb`h~{VKVFac=_aMBrEd?v ze*W_CNs_mqY5n%_??3-F{r%yO=U=||605XN^<@@@afrZ}9Qe`^VR4{`q>BI^HI@q!KsVq=e7jj>jf`o02uD zk6$0JSN``Oy4ns4&re^TexH7Qnx6hPUh?B-AEd{hsQ(rETbOdu)J7jL4RBhPXeS2G zRvH99+?yp9h@C<)4HUfU90h{srBdV;?-07Ed!n-x0Z0LVeeY5r<2MWwkQ(z^D9xo6 z=c%Xe6n9v>UK=#eWSVt{x?Do{U>IJmH&kE66)GD?;^+fSPz+0&4-|Q8W{@E+uexyb zMS@-_w}Rn^76s{$%W`Ca6sbkmD2-w#m#B?x!7?!}@{zDUhotH<-f*YTnzNMYO$zq# z{ZgD~%7_1d!KdcCmDVY(ed(35msWRR;n`J^dUsH8X5z6!a(FBNHT;9~z!>#If zunpfM1(!^bDQUmCV3sL%3UhIkb8d5W(*P~>O(LX!(rtFg~*svhgpYm)*A8y%l?xl{1*;E8gHzSD0iP@<9= zCUF$wN?9oMp%l5f89U&uR3Kf;&|nz!iz_STk$Q1ASA7|S4)H_J)LeTxq-IAs2VP96 zF*j;|u(Pb17AVm2@*Uz zXGf)`?0tSWNT?k&x}N+7`S)XRkX;aEHHy49g9J;|wAXvZ;Qe*F?6J%HlDtfZB^`Z% zh0)(z1fgxM4Xex(>l}9qy`2=E2Q+7Ne93 zwt}n`bm%Z0hb=?VE4AV5!8|qS7R7feH)v?@s|9M!e{a-!?-==BBx8ETy;3~9(rauQm4_Z(mwo@Gh5 z44JJ~=Gq*UiD?xqwqm_Sr4DXp=PB@i5N;gXa#J6%)ZgrC9oi+OYGk2me&!XD?0M6u zxlS8ZD=%->?EMVYooWEp^--YcZnoOiLTR$uPAPSh7(qU!!{uVW7wVd=K|iEz(ksSq zHg_n{O%;gD^{+z?icL-QN~Mov%1p{VXDS&s-yrTQCCcvO|Aw)#NG6nHUR=$8vy^h5 zGUWOx60}n2RxB$t)5}JMS&GeeRb!;+d#A{hzr9ca)RG|ur4%SVsZhH|srLlAcuJ=p zy1LjvS7vp`>@algl=r%N6|BnTM`fzIQKtcGfHDNC3>zOSA6+F-VDQL7l>@u{vyNHb zDRdc+&E4N!*P)eDLVa^uvXwS}aXV_=3bx#~-h!Qw9a@!gr`4!|a$eWNmF8On>7*8w zr~{ETQT5TQMWe{nwO!SE3X>cEj$W0Lta;N!DHGg{mjm4swZ5~psOv^O`gg99E~YQ8(C4jxZH}8T!i&vrs-- z!N41JKBW0bfph4To;=`7G^lvORW4MMi<2BZI;?=nXhv~Y{SXMR2eh%Z2&qr)p!|)^ z@^o`WU20QQDy)L#u{G<|DU=4S?mqX_AUAIh0wwtLs}wioi0BOxrMK&1W2)BtO+2=R z-6UccmEDvYZ7r=2?V*Tqi&)t1Mss cX1fShn)&Vjap=E)s?whS16oD>!w04S02e+_egFUf delta 1682 zcmV;D25tGn55o_CABzYGfDc7y0{?SqbY*Q}a4vXlYyicb%W~bg5kU9z6)x>5*`8D? zN!gxVzF@LWfk2aN5ky0HT z-UF${rS)7Zx4U``0ZOSu}b?iUYzRphpn}L{Ew%n^E@A`HaT3bk5<;F z>bLPtefs&sr_YanfB5m+r}1&U!N*tcAK#++AJ@Co@ixIFmAKg^B|LgNo|^b=O4g)4 zeS5lI`QLx&YCA0a`1<+lU#2f#rmue;FZubi57N^w)c*?oJxsZ1YNHRB1~@HCv=f78 zD-D7l?#&Vw#7?1@1`1wvjsn5+QYmtacL-h7eWkM$0Z0LVy>uy%@f(H-NR4?dl;+Zk z^VCy!iaRV`uML`KGR?X}T`r+}Fbpr(8>+A33YCo`arA*ED264?2a3EkGsqB^S6w*z zB0;Z|Tfy){i-L5>WjV4yiqxWOlt!_WOVq};V3`;f`AArwLsE4aZ@5!v%~{IyCIx%= zeksl~<-`Af;8XM6O6!!?zVu4jORGDu@a!r{y*ntlabEpZSD9{FG5vX^7Wn{gea6_) zacql91yk14r|OFcHQg*aOv;!I?eQad9Bsj{RjKUKpDn1H2<3VIXadA8E^q4V;a2rK z*oK!#!6j2k~EwMl`5jgC*c+$s2Y@I<*p-|06MC{alb zlQ@cTr7RTsP>S5#j2-Y+Dv+*aXfO==*_D;@NIkootGV^J*jZLh3lwO1eu20ZTik<;sVf&8TwOg~yEg^TckKMx^9c?3K7O}DYkL>!jLI(e z*-@z}d!OG85^4vHt|z}i{{0vnWEVtPjUw;OAi)wf?e$(Ucz>NPd+hSQBrnrpNk?B` zVf6PFL1((tZzn}I$P*Adlr0f|Z;+2KzCu2L!}(2D2}SUb&N0Fl?fK7ONTXAsL>c{Is%Tw!=?8SdJZ#a81-}|`s`4#>iUTApODSQS{Y`^HPC`rXo&#*dvn=VB zA+y!WT$`gZF|C5dR;;(E)WOZ{JOv(q!i{5FZt5eJ`kP&?L%XC@jVx5n&%8pCJ#QK{ z*J-0_<>k$qy`Q1FQw^ZHJ_;1w%~so5C`~roDWz@_Bgn^exLnNlLS3^p=!djTdd2w7 z<_-nAsREI?{`-)FVp9{nQt2a^GLv%8nM#JuH;DU6iL(3nzhP`Fk_qLQ7gzItET!D1 z47q-a1g#Xh70U|E^s-T5mSVGA)fg%I-YGKWZ!c5;wPZ*^DFsSTD%9>#>ODa&p3xTQJJc4)M>yPpbUX3!^X$TM^_0H7(B92<-jigtYemU z3SGuybN6@Gb!g?3P~V)EY^6Tnef-SeLw_qn^hgPNBX*Ft~oY(bmrTG>?I;lk^ z>Of>oRDJYn(I_%?ZCACP!sN!kqgSOQYu+?b$^>`gbg;n{=!w##q=jC zN>VTNp;4zZxbl7#yw9liKIS>X5vC%4Lw~tp7RpB} z71{F`Z%7tojagw7)hZQgx%_#1w9|Ga^fHt-kA@!*pl)tfA zo^GzFOKplug;lUTwq~6=h0>ta-RGVfY{YEPhK3z-I$edv{gioyboFzkGDS~KGj{#tsly`O zT!9>%r==iedOyLwQL59~-`*JVt1DF9g1bAQm3Ft@`YcnR_#KjN*Nvl1DW~5XRW0Go cY!|^wGr#>m4*mB}RoajL0A3->!3U-Q04;-41^@s6 diff --git a/slides/index.html b/slides/index.html index 2be961441..3b6c528ca 100644 --- a/slides/index.html +++ b/slides/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/smartctl/index.html b/smartctl/index.html index 664537bfe..4ac413a36 100644 --- a/smartctl/index.html +++ b/smartctl/index.html @@ -7299,6 +7299,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/smartstack/index.html b/smartstack/index.html index 48ca3bf43..7104ab6e7 100644 --- a/smartstack/index.html +++ b/smartstack/index.html @@ -7156,6 +7156,18 @@ Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/snap/index.html b/snap/index.html index f8727326a..4917e22d8 100644 --- a/snap/index.html +++ b/snap/index.html @@ -7135,6 +7135,18 @@ Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/snmp/index.html b/snmp/index.html index 02378be76..55d30f87f 100644 --- a/snmp/index.html +++ b/snmp/index.html @@ -7293,6 +7293,18 @@

          Show configured IP addresses

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/solo/index.html b/solo/index.html index 7d4dc3fa6..2225e7ed1 100644 --- a/solo/index.html +++ b/solo/index.html @@ -7070,6 +7070,18 @@

          solo

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sort/index.html b/sort/index.html index e0ba7965a..f1084492a 100644 --- a/sort/index.html +++ b/sort/index.html @@ -7396,6 +7396,18 @@

          Sort the /etc/passwd by UID

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sound-and-music/index.html b/sound-and-music/index.html index 17f35cb27..7edff866d 100644 --- a/sound-and-music/index.html +++ b/sound-and-music/index.html @@ -7132,6 +7132,18 @@ Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sphinx/index.html b/sphinx/index.html index 1d6f978ae..32959eacf 100644 --- a/sphinx/index.html +++ b/sphinx/index.html @@ -7072,6 +7072,18 @@

          sphinx

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/split/index.html b/split/index.html index ed4134716..36132b8a9 100644 --- a/split/index.html +++ b/split/index.html @@ -7187,6 +7187,18 @@

          Split Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/splunk/index.html b/splunk/index.html index 1f76c8380..b2495eb53 100644 --- a/splunk/index.html +++ b/splunk/index.html @@ -7384,6 +7384,18 @@

          Count of kernel versions

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/spotlight/index.html b/spotlight/index.html index 622872c2a..403babc75 100644 --- a/spotlight/index.html +++ b/spotlight/index.html @@ -7556,6 +7556,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sqlite/index.html b/sqlite/index.html index 7414ce746..efff709d7 100644 --- a/sqlite/index.html +++ b/sqlite/index.html @@ -7713,6 +7713,18 @@

          Vacuum a database file

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/ss/index.html b/ss/index.html index d3f40ec40..6cc8e8d88 100644 --- a/ss/index.html +++ b/ss/index.html @@ -7287,6 +7287,18 @@

          Show the processes for li Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/ssh/index.html b/ssh/index.html index 4e9b9f18f..29f9591b0 100644 --- a/ssh/index.html +++ b/ssh/index.html @@ -7919,6 +7919,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sshuttle/index.html b/sshuttle/index.html index 93d36e9f9..fab16be13 100644 --- a/sshuttle/index.html +++ b/sshuttle/index.html @@ -7132,6 +7132,18 @@

          Usage

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/stat/index.html b/stat/index.html index 3515744f4..9b74f11bf 100644 --- a/stat/index.html +++ b/stat/index.html @@ -7296,6 +7296,18 @@

          Valid format sequences for file Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/static_assets/written-by-human-not-by-ai-white.svg b/static_assets/written-by-human-not-by-ai-white.svg new file mode 100644 index 000000000..f056f531a --- /dev/null +++ b/static_assets/written-by-human-not-by-ai-white.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/strace/index.html b/strace/index.html index 51c5d66c8..ff52cf2c1 100644 --- a/strace/index.html +++ b/strace/index.html @@ -7180,6 +7180,18 @@

          Trace only e Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sudo/index.html b/sudo/index.html index d41d1568a..89d72e021 100644 --- a/sudo/index.html +++ b/sudo/index.html @@ -7299,6 +7299,18 @@

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/swagger/index.html b/swagger/index.html index 110dffcf0..5a1c5cfe3 100644 --- a/swagger/index.html +++ b/swagger/index.html @@ -7132,6 +7132,18 @@ Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/swift/index.html b/swift/index.html index 8f701b7fb..c539da077 100644 --- a/swift/index.html +++ b/swift/index.html @@ -7073,6 +7073,18 @@

          Swift

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sysctl/index.html b/sysctl/index.html index 10f3a4f94..b020b2ad3 100644 --- a/sysctl/index.html +++ b/sysctl/index.html @@ -7295,6 +7295,18 @@

          Treat all oopses as panics

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/sysdig/index.html b/sysdig/index.html index 15bf2c644..ed8c7e722 100644 --- a/sysdig/index.html +++ b/sysdig/index.html @@ -7363,6 +7363,18 @@ Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/systemd-resolved/index.html b/systemd-resolved/index.html index 36b09a910..82e93a780 100644 --- a/systemd-resolved/index.html +++ b/systemd-resolved/index.html @@ -7225,6 +7225,18 @@

          See Also

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/systemd/index.html b/systemd/index.html index de68dade7..1a4797178 100644 --- a/systemd/index.html +++ b/systemd/index.html @@ -7629,6 +7629,18 @@

          Detect if you are r Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/tar/index.html b/tar/index.html index c475f5d9d..bfed0672b 100644 --- a/tar/index.html +++ b/tar/index.html @@ -7180,6 +7180,18 @@

          Deal with leading slash quirks

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/tcl/index.html b/tcl/index.html index 44d0316a8..db64fc7be 100644 --- a/tcl/index.html +++ b/tcl/index.html @@ -7159,6 +7159,18 @@

          TCL 65C807

          Material for MkDocs + + +
          + + + + + + + + +
          diff --git a/tcpdump/index.html b/tcpdump/index.html index 9dc20afe0..8aef3331b 100644 --- a/tcpdump/index.html +++ b/tcpdump/index.html @@ -7378,7 +7378,7 @@

          Capture all traffic not a mac add

    Capture LLDP traffic

    This matches 2 bytes starting at the 12th byte against 88cc

    -
    tcpdump -v -s 1500 -c 1  '(ether[12:2]=0x88cc)'
    +
    tcpdump -v -s 1500 -c 1 '(ether[12:2]=0x88cc)'
     

    Capture SYN packets

    tcpdump -n 'tcp[13] & 2!=0'
    @@ -7417,7 +7417,7 @@ 

    You can reassemble these files chronologically with mergecap -w merged.cap foo.cap*

    Show WPA 4-way handshakes

    -
    tcpdump -n -i en0 "ether proto 0x888e"
    +
    tcpdump -n -i en0 "ether proto 0x888e"
     

    Videos

      @@ -7597,6 +7598,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/tls/index.html b/tls/index.html index dfee134d1..42c4759b6 100644 --- a/tls/index.html +++ b/tls/index.html @@ -7469,6 +7469,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/tmux/index.html b/tmux/index.html index a256f1c71..79c951f69 100644 --- a/tmux/index.html +++ b/tmux/index.html @@ -7206,6 +7206,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/top-variant-list/index.html b/top-variant-list/index.html index 573c573a8..0bff44a7b 100644 --- a/top-variant-list/index.html +++ b/top-variant-list/index.html @@ -7142,6 +7142,18 @@

      top style tools

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/top/index.html b/top/index.html index 6fd924e4f..1ff7aaa55 100644 --- a/top/index.html +++ b/top/index.html @@ -7209,6 +7209,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/touch/index.html b/touch/index.html index 6f49c8e8c..12f747b25 100644 --- a/touch/index.html +++ b/touch/index.html @@ -7241,6 +7241,18 @@

      Randomize the mtime for a given fi Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/tr/index.html b/tr/index.html index 3162cf805..d8e6ac840 100644 --- a/tr/index.html +++ b/tr/index.html @@ -7203,6 +7203,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/ubuntu/index.html b/ubuntu/index.html index ccfcf8955..262c71b66 100644 --- a/ubuntu/index.html +++ b/ubuntu/index.html @@ -7239,6 +7239,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/udev/index.html b/udev/index.html index a981af228..caf8ec664 100644 --- a/udev/index.html +++ b/udev/index.html @@ -7304,6 +7304,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/upstart/index.html b/upstart/index.html index 2798520ef..b6e261908 100644 --- a/upstart/index.html +++ b/upstart/index.html @@ -7291,6 +7291,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/utm/index.html b/utm/index.html index 8be0b07c9..7f107a97c 100644 --- a/utm/index.html +++ b/utm/index.html @@ -7069,6 +7069,18 @@

      utm

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/vagrant/index.html b/vagrant/index.html index 5804cee95..236b3df73 100644 --- a/vagrant/index.html +++ b/vagrant/index.html @@ -7300,6 +7300,18 @@

      Sh Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/vector/index.html b/vector/index.html index 5d272b429..5411e145d 100644 --- a/vector/index.html +++ b/vector/index.html @@ -7164,6 +7164,18 @@

      Debug syntax using a repl

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/velero/index.html b/velero/index.html index a9c08cc65..1ceee84e9 100644 --- a/velero/index.html +++ b/velero/index.html @@ -7210,6 +7210,18 @@

      Get logs for a specific backup

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/vim/index.html b/vim/index.html index c6e238c01..fbc7e30cd 100644 --- a/vim/index.html +++ b/vim/index.html @@ -7183,6 +7183,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/virtual-reality/index.html b/virtual-reality/index.html index bf717ddcf..742e02bff 100644 --- a/virtual-reality/index.html +++ b/virtual-reality/index.html @@ -7131,6 +7131,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/virtualbox/index.html b/virtualbox/index.html index f83fea84d..0e7269368 100644 --- a/virtualbox/index.html +++ b/virtualbox/index.html @@ -7182,6 +7182,18 @@

      Attach storage to a vm

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/vpn/index.html b/vpn/index.html index acd761c33..5792f97b5 100644 --- a/vpn/index.html +++ b/vpn/index.html @@ -7133,6 +7133,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/wasm/index.html b/wasm/index.html index fde9250a6..ca28231c0 100644 --- a/wasm/index.html +++ b/wasm/index.html @@ -7137,6 +7137,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/webgl/index.html b/webgl/index.html index a36a2e31d..cf1a99468 100644 --- a/webgl/index.html +++ b/webgl/index.html @@ -7132,6 +7132,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/wget/index.html b/wget/index.html index 112a2ecee..41da7facd 100644 --- a/wget/index.html +++ b/wget/index.html @@ -7263,6 +7263,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/winbind/index.html b/winbind/index.html index 4a28a1706..790f9a422 100644 --- a/winbind/index.html +++ b/winbind/index.html @@ -7198,6 +7198,18 @@

      try authenticating the user Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/wireshark/index.html b/wireshark/index.html index 8a4328038..425a1691d 100644 --- a/wireshark/index.html +++ b/wireshark/index.html @@ -7361,6 +7361,18 @@

      See also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/wonder-workshop/index.html b/wonder-workshop/index.html index b7e2bc62e..95613092b 100644 --- a/wonder-workshop/index.html +++ b/wonder-workshop/index.html @@ -7150,6 +7150,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/wuzz/index.html b/wuzz/index.html index 115cef5a8..4ce525d41 100644 --- a/wuzz/index.html +++ b/wuzz/index.html @@ -7069,6 +7069,18 @@

      wuzz

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/xargs/index.html b/xargs/index.html index 492800a63..ea331874e 100644 --- a/xargs/index.html +++ b/xargs/index.html @@ -7230,6 +7230,18 @@

      Issue the same command Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/yaml/index.html b/yaml/index.html index 2d9207f57..dc6eebb9d 100644 --- a/yaml/index.html +++ b/yaml/index.html @@ -7137,6 +7137,18 @@

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/youtube-dl/index.html b/youtube-dl/index.html index 71ca81476..4e49f66c1 100644 --- a/youtube-dl/index.html +++ b/youtube-dl/index.html @@ -7245,6 +7245,18 @@

      Reformat a youtube c Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/yum/index.html b/yum/index.html index 5215c1415..ba596a370 100644 --- a/yum/index.html +++ b/yum/index.html @@ -7496,6 +7496,18 @@

      See Also

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/zerotier/index.html b/zerotier/index.html index 813dcdae0..b78ec890e 100644 --- a/zerotier/index.html +++ b/zerotier/index.html @@ -7069,6 +7069,18 @@

      ZeroTier

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/zfs/index.html b/zfs/index.html index 65a5bde67..6ac95a166 100644 --- a/zfs/index.html +++ b/zfs/index.html @@ -8439,6 +8439,18 @@

      Building on CentOS 6

      Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/zookeeper/index.html b/zookeeper/index.html index bd2f7cd8e..5f779da8d 100644 --- a/zookeeper/index.html +++ b/zookeeper/index.html @@ -7240,6 +7240,18 @@ Material for MkDocs + + +
      + + + + + + + + +
      diff --git a/zsh/index.html b/zsh/index.html index f92b2b53c..1d6d5ef39 100644 --- a/zsh/index.html +++ b/zsh/index.html @@ -7130,6 +7130,18 @@ Material for MkDocs + + +
      + + + + + + + + +