Compare commits
27 Commits
bc45e0df27
...
sepia
| Author | SHA1 | Date | |
|---|---|---|---|
| efaefe12e8 | |||
| 5d7fc719a6 | |||
| 1854e15c62 | |||
| 3ace2a993f | |||
| f2e1c4cd62 | |||
| da288fc5d4 | |||
| 6208982bde | |||
| 7c70cc7760 | |||
| d59ed1c749 | |||
| fb7cf5a3a5 | |||
| 7d8188f668 | |||
| 40ca1222f5 | |||
| 8164db5549 | |||
| 1f4be5334b | |||
| 0dc9f4e2ad | |||
| 4cb4793a09 | |||
| 12c45e6c15 | |||
| 02d8e689cf | |||
| de1504c70c | |||
| c07138bf1a | |||
| c9d1e57f2f | |||
| e4f0ecb807 | |||
| 648f5b62d7 | |||
| f7941e8ef5 | |||
| 02495dde81 | |||
| 89f726317b | |||
| 8760b31de9 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -9,6 +9,7 @@
|
||||
.idea/
|
||||
.ipynb_checkpoints/
|
||||
.ruff_cache/
|
||||
current_ip
|
||||
log/
|
||||
logs/
|
||||
borgmatic/borg/
|
||||
@@ -36,6 +37,7 @@ openvpn-server/pki/
|
||||
openvpn-server/*.ovpn
|
||||
pgadmin/
|
||||
rsnapshot/var/
|
||||
seafile/ccnet/
|
||||
seafile/seafile-data/
|
||||
seafile/seafile-server-*
|
||||
seafile/seahub-data/
|
||||
|
||||
@@ -12,13 +12,22 @@
|
||||
}
|
||||
}
|
||||
|
||||
uitgeest.veenboer.xyz \
|
||||
herderin.veenboer.xyz \
|
||||
peter.veenboer.xyz {
|
||||
reverse_proxy nginx
|
||||
}
|
||||
import unprotected esp host:6052
|
||||
import unprotected grafana host:3333
|
||||
import unprotected ha host:8123
|
||||
import unprotected seafile host:8082
|
||||
|
||||
(protected) {
|
||||
{args[0]}.{$SUBDOMAIN}.{$DOMAIN} {
|
||||
log {
|
||||
output file /var/log/{args[0]}.log
|
||||
}
|
||||
basic_auth {
|
||||
user $2a$14$iNMen9TulKubYW8iXz5nFO./gQm0BzkaASYdhc48mSSGIc8GA4bxG
|
||||
}
|
||||
reverse_proxy {args[1]}
|
||||
}
|
||||
}
|
||||
|
||||
import protected esp host:6052
|
||||
import protected grafana host:3333
|
||||
|
||||
import sites/auth.caddy
|
||||
|
||||
38
collectd/docker/Dockerfile
Normal file
38
collectd/docker/Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
||||
FROM debian:bookworm
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV TIMEZONE=Europe/Amsterdam
|
||||
|
||||
RUN apt update --fix-missing
|
||||
RUN apt dist-upgrade -y --no-install-recommends
|
||||
|
||||
RUN echo $TIMEZONE > /etc/timezone
|
||||
RUN dpkg-reconfigure -f noninteractive tzdata
|
||||
|
||||
RUN apt install -y libsensors5 liblzo2-2 collectd btrfs-progs libatasmart4 speedtest-cli
|
||||
|
||||
RUN apt install -y smartmontools
|
||||
|
||||
RUN apt install -y wget git
|
||||
|
||||
ENV HDDTEMP_VERSION=0.3.1
|
||||
RUN wget https://github.com/slowpeek/hddtemp/archive/refs/tags/${HDDTEMP_VERSION}.tar.gz \
|
||||
&& tar xvf ${HDDTEMP_VERSION}.tar.gz && mv hddtemp-${HDDTEMP_VERSION}/hddtemp-lt /usr/sbin/hddtemp
|
||||
|
||||
RUN apt -y install make g++ python3 python3-dev python3-pybind11 cmake
|
||||
COPY PMT /pmt
|
||||
#RUN git clone https://git.astron.nl:/RD/pmt
|
||||
RUN cmake -Spmt -Bpmt/build -DPMT_BUILD_RAPL=1 -DPMT_BUILD_BINARY=1 -DPMT_BUILD_PYTHON=1 -DCMAKE_INSTALL_PREFIX=/opt/pmt
|
||||
RUN make -Cpmt/build -j install
|
||||
ENV LD_LIBRARY_PATH="/opt/pmt/lib"
|
||||
ENV PYTHONPATH="/opt/pmt/lib/python3.11/site-packages"
|
||||
ENV PATH="$PATH:/opt/pmt/bin"
|
||||
RUN apt -y remove make g++ python3-dev python3-pybind11 cmake
|
||||
RUN apt autoremove -y
|
||||
|
||||
RUN apt -y install sudo
|
||||
RUN adduser collectd
|
||||
RUN usermod -aG sudo collectd
|
||||
RUN echo 'collectd ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/collectd
|
||||
|
||||
CMD ["/usr/sbin/collectd", "-f"]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
#LoadPlugin exec
|
||||
#
|
||||
#<Plugin "exec">
|
||||
# Exec nobody "/host/usr/local/bin/btrfs-data"
|
||||
#</Plugin>
|
||||
LoadPlugin exec
|
||||
|
||||
<Plugin "exec">
|
||||
Exec collectd "/host/usr/local/bin/btrfs-data"
|
||||
</Plugin>
|
||||
|
||||
5
collectd/etc/collectd.conf.d/cpufreq-data.conf
Normal file
5
collectd/etc/collectd.conf.d/cpufreq-data.conf
Normal file
@@ -0,0 +1,5 @@
|
||||
LoadPlugin exec
|
||||
|
||||
<Plugin "exec">
|
||||
Exec collectd "/host/usr/local/bin/cpufreq-data"
|
||||
</Plugin>
|
||||
@@ -1,6 +1,5 @@
|
||||
<Plugin df>
|
||||
MountPoint "/media/docker"
|
||||
MountPoint "/media/scratch"
|
||||
FSType "ext4"
|
||||
IgnoreSelected false
|
||||
</Plugin>
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
LoadPlugin exec
|
||||
|
||||
<Plugin "exec">
|
||||
Exec nobody "/host/usr/local/bin/du-data"
|
||||
Exec collectd "/host/usr/local/bin/du-data"
|
||||
</Plugin>
|
||||
|
||||
|
||||
@@ -3,4 +3,3 @@ LoadPlugin exec
|
||||
<Plugin "exec">
|
||||
Exec nobody "/host/usr/local/bin/speedtest-data"
|
||||
</Plugin>
|
||||
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
import sys
|
||||
import time
|
||||
import commands
|
||||
import subprocess
|
||||
import argparse
|
||||
|
||||
|
||||
#
|
||||
# Misc
|
||||
#
|
||||
#sys.tracebacklimit = 0
|
||||
# sys.tracebacklimit = 0
|
||||
|
||||
|
||||
#
|
||||
@@ -28,21 +28,42 @@ size_snapshot_exclusive = 0
|
||||
# Methods
|
||||
#
|
||||
def get_subvol_list(path):
|
||||
command = "btrfs subvolume list -t %s" % (path)
|
||||
status, output = commands.getstatusoutput(command)
|
||||
command = "sudo btrfs subvolume list -t %s" % (path)
|
||||
status, output = subprocess.getstatusoutput(command)
|
||||
|
||||
if status is not 0:
|
||||
if status != 0:
|
||||
raise Exception(command)
|
||||
|
||||
# Every line contains the following values: subvol_id, gen, toplevel, path
|
||||
return output.splitlines()[2:]
|
||||
|
||||
|
||||
def get_filesystem_size(path):
|
||||
command = "sudo btrfs filesystem show --raw %s" % (path)
|
||||
status, output = subprocess.getstatusoutput(command)
|
||||
|
||||
if status != 0:
|
||||
# This command fails when running inside Docker container
|
||||
# return maximum size of any filesystem instead
|
||||
command = "sudo btrfs filesystem show --raw"
|
||||
status, output = subprocess.getstatusoutput(command)
|
||||
lines = output.splitlines()
|
||||
lines = [x for x in lines if "devid" in x]
|
||||
sizes = [int(line.split()[3]) for line in lines]
|
||||
return max(sizes)
|
||||
|
||||
# The sizes are on the third line
|
||||
line = output.splitlines()[2]
|
||||
|
||||
# Element 3 and 5 respectively contain total and used sizes
|
||||
return int(line.split()[3])
|
||||
|
||||
|
||||
def get_id_root(name, path):
|
||||
lines = get_subvol_list(path)
|
||||
|
||||
# Filter lines where toplevel == 5
|
||||
subvol_ids = filter(lambda x: int(x.split()[2]) == 5, lines)
|
||||
subvol_ids = [x for x in lines if int(x.split()[2]) == 5]
|
||||
|
||||
# Try to retrieve the subvol_id for the root subvolume (if any)
|
||||
if len(subvol_ids) == 1:
|
||||
@@ -51,28 +72,30 @@ def get_id_root(name, path):
|
||||
else:
|
||||
# The path contains a btrfs filesystem with multiple subvolumes for data
|
||||
try:
|
||||
return int(filter(lambda x: x.split()[3] == name, subvol_ids)[0].split()[0])
|
||||
return int(list(filter(lambda x: x.split()[3] == name, subvol_ids))[0].split()[0])
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
|
||||
# Volume not found, root is probably the btrfs default (5)
|
||||
return 5
|
||||
|
||||
|
||||
|
||||
def get_id_subvolumes(path, subvol_id):
|
||||
lines = get_subvol_list(path)
|
||||
lines = filter(lambda x: int(x.split()[2]) == subvol_id, lines)
|
||||
return list(map(lambda x: int(x.split()[0]), lines))
|
||||
lines = [x for x in lines if int(x.split()[2]) == subvol_id]
|
||||
return list([int(x.split()[0]) for x in lines])
|
||||
|
||||
|
||||
def get_disk_usage(name, path):
|
||||
id_root = get_id_root(name, path)
|
||||
id_subvolumes = get_id_subvolumes(path, id_root)
|
||||
size_filesystem = get_filesystem_size(path)
|
||||
|
||||
command = "btrfs qgroup show --raw %s" % (path)
|
||||
status, output = commands.getstatusoutput(command)
|
||||
# Get disk usage from quota
|
||||
command = "sudo btrfs qgroup show --raw %s" % (path)
|
||||
status, output = subprocess.getstatusoutput(command)
|
||||
|
||||
if status is not 0:
|
||||
if status != 0:
|
||||
raise Exception(command)
|
||||
|
||||
lines = output.splitlines()[2:]
|
||||
@@ -94,16 +117,28 @@ def get_disk_usage(name, path):
|
||||
# -> data that is not (yet) incorporated in a snapshot
|
||||
size_data_exclusive = 0
|
||||
|
||||
|
||||
# Data exclusively available in snapshots
|
||||
# -> data that was removed from volume
|
||||
size_snapshot_exclusive = 0
|
||||
|
||||
for line in lines:
|
||||
split = line.split()
|
||||
subvol_id = int(split[0].split("/")[1])
|
||||
size_total = float(split[1])
|
||||
size_exclusive = float(split[2])
|
||||
subvol_id = 0
|
||||
size_total = 0
|
||||
size_exclusive = 0
|
||||
try:
|
||||
subvol_id = int(split[0].split("/")[1])
|
||||
size_total = float(split[1])
|
||||
size_exclusive = float(split[2])
|
||||
except IndexError:
|
||||
# ignore "WARNING: Quota disabled"
|
||||
pass
|
||||
|
||||
# size_exclusive is incorrect when snapshot is
|
||||
# removed and qgroups are not updated yet,
|
||||
# ignore the value when it seems unrealistic
|
||||
if size_exclusive > size_filesystem:
|
||||
size_exclusive = 0
|
||||
|
||||
if subvol_id == id_root:
|
||||
size_data_total = size_total
|
||||
@@ -112,27 +147,57 @@ def get_disk_usage(name, path):
|
||||
size_snapshot_total += size_total
|
||||
size_snapshot_exclusive += size_exclusive
|
||||
|
||||
|
||||
def rescan_quota(path):
|
||||
command = "btrfs quota rescan %s" % (path)
|
||||
status, output = commands.getstatusoutput(command)
|
||||
if status is not 0:
|
||||
command = "sudo btrfs quota rescan %s" % (path)
|
||||
status, output = subprocess.getstatusoutput(command)
|
||||
if status != 0:
|
||||
Exception(command)
|
||||
|
||||
|
||||
def print_human_readable(name):
|
||||
global size_data_total
|
||||
global size_data_exclusive
|
||||
global size_snapshot_exclusive
|
||||
size_data_total = size_data_total / (1024*1e6)
|
||||
size_data_exclusive = size_data_exclusive / (1024*1e6)
|
||||
size_snapshot_exclusive = size_snapshot_exclusive / (1024*1e6)
|
||||
print "%10s: %6.1f Gb, %6.1f Gb, %6.1f Gb" % (name, size_data_total, size_data_exclusive, size_snapshot_exclusive)
|
||||
size_data_total = size_data_total / (1024 * 1e6)
|
||||
size_data_exclusive = size_data_exclusive / (1024 * 1e6)
|
||||
size_snapshot_exclusive = size_snapshot_exclusive / (1024 * 1e6)
|
||||
print(
|
||||
"%10s: %6.1f Gb, %6.1f Gb, %6.1f Gb"
|
||||
% (name, size_data_total, size_data_exclusive, size_snapshot_exclusive)
|
||||
)
|
||||
|
||||
|
||||
def print_rrd(name):
|
||||
timestamp = int(time.time())
|
||||
print("PUTVAL {}/exec-btrfs_{}/gauge-data_total {}:{:.1f}".format(hostname, name, timestamp, size_data_total))
|
||||
print("PUTVAL {}/exec-btrfs_{}/gauge-data_exclusive {}:{:.1f}".format(hostname, name, timestamp, size_data_exclusive))
|
||||
print("PUTVAL {}/exec-btrfs_{}/gauge-snapshot_total {}:{:.1f}".format(hostname, name, timestamp, size_snapshot_total))
|
||||
print("PUTVAL {}/exec-btrfs_{}/gauge-snapshot_exclusive {}:{:.1f}".format(hostname, name, timestamp, size_snapshot_exclusive))
|
||||
print(
|
||||
(
|
||||
"PUTVAL {}/exec-btrfs_{}/gauge-data_total {}:{:.1f}".format(
|
||||
hostname, name, timestamp, size_data_total
|
||||
)
|
||||
)
|
||||
)
|
||||
print(
|
||||
(
|
||||
"PUTVAL {}/exec-btrfs_{}/gauge-data_exclusive {}:{:.1f}".format(
|
||||
hostname, name, timestamp, size_data_exclusive
|
||||
)
|
||||
)
|
||||
)
|
||||
print(
|
||||
(
|
||||
"PUTVAL {}/exec-btrfs_{}/gauge-snapshot_total {}:{:.1f}".format(
|
||||
hostname, name, timestamp, size_snapshot_total
|
||||
)
|
||||
)
|
||||
)
|
||||
print(
|
||||
(
|
||||
"PUTVAL {}/exec-btrfs_{}/gauge-snapshot_exclusive {}:{:.1f}".format(
|
||||
hostname, name, timestamp, size_snapshot_exclusive
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
@@ -143,19 +208,18 @@ interval = 10
|
||||
volumes = list()
|
||||
|
||||
# 275 GB SSD
|
||||
volumes.append(["@", "/host/root/"])
|
||||
volumes.append(["@home", "/host/root/home"])
|
||||
volumes.append(["opt", "/host/root/opt"])
|
||||
volumes.append(["home", "/host/root/home"])
|
||||
|
||||
# 2x 4TB HDD
|
||||
volumes.append(["data", "/host/root/media/data"])
|
||||
|
||||
volumes.append(["backup", "/host/root/media/backup"])
|
||||
volumes.append(["seafile", "/host/root/media/seafile"])
|
||||
|
||||
#
|
||||
# Command line arguments
|
||||
#
|
||||
parser = argparse.ArgumentParser(description='Get BTRFS disk usage')
|
||||
parser.add_argument('-s', action='store_true', help='print in human readable format')
|
||||
parser = argparse.ArgumentParser(description="Get BTRFS disk usage")
|
||||
parser.add_argument("-s", action="store_true", help="print in human readable format")
|
||||
args = parser.parse_args()
|
||||
human_readable = args.s
|
||||
|
||||
@@ -163,7 +227,7 @@ human_readable = args.s
|
||||
#
|
||||
# Main
|
||||
#
|
||||
if (human_readable):
|
||||
if human_readable:
|
||||
for (name, path) in volumes:
|
||||
get_disk_usage(name, path)
|
||||
print_human_readable(name)
|
||||
@@ -172,9 +236,9 @@ else:
|
||||
while True:
|
||||
for (name, path) in volumes:
|
||||
get_disk_usage(name, path)
|
||||
|
||||
|
||||
print_rrd(name)
|
||||
|
||||
sys.stdout.flush()
|
||||
time.sleep(interval)
|
||||
#rescan_quota(path)
|
||||
# rescan_quota(path)
|
||||
|
||||
57
collectd/usr/local/bin/cpufreq-data
Executable file
57
collectd/usr/local/bin/cpufreq-data
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/python3
|
||||
import argparse
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
|
||||
hostname = "sepia"
|
||||
measurement_interval = 5
|
||||
|
||||
|
||||
def get_cpu_frequencies():
|
||||
frequencies = []
|
||||
try:
|
||||
cpu_dirs = [
|
||||
d
|
||||
for d in os.listdir("/sys/devices/system/cpu/")
|
||||
if d.startswith("cpu") and d[3:].isdigit()
|
||||
]
|
||||
for cpu_dir in cpu_dirs:
|
||||
with open(
|
||||
f"/sys/devices/system/cpu/{cpu_dir}/cpufreq/scaling_cur_freq", "r"
|
||||
) as f:
|
||||
frequency = int(f.read().strip()) / 1000 # Convert Hz to MHz
|
||||
frequencies.append((int(cpu_dir[3:]), frequency))
|
||||
except Exception as e:
|
||||
print("Error:", e)
|
||||
return frequencies
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Query CPU frequencies.")
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--human-readable",
|
||||
action="store_true",
|
||||
help="Print frequencies in human-readable format",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.human_readable:
|
||||
frequencies = get_cpu_frequencies()
|
||||
for cpu, frequency in frequencies:
|
||||
print(f"CPU{cpu} Frequency: {frequency:.2f} MHz")
|
||||
else:
|
||||
while True:
|
||||
frequencies = get_cpu_frequencies()
|
||||
timestamp = int(time.time())
|
||||
for cpu, frequency in frequencies:
|
||||
print(
|
||||
f"PUTVAL {hostname}/cpu-frequency/gauge-cpu{cpu} {timestamp}:{frequency:.0f}"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
time.sleep(measurement_interval)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,25 +1,77 @@
|
||||
#!/bin/bash
|
||||
COLLECTION=sepia
|
||||
INTERVAL=90
|
||||
#!/usr/bin/python3
|
||||
|
||||
DIRS=$(cat <<LIST
|
||||
/host/root/media/data/Inverter
|
||||
/host/root/media/data/Monique
|
||||
/host/root/media/data/Music
|
||||
/host/root/media/data/Peter
|
||||
/host/root/media/data/Photographs
|
||||
/host/root/media/data/Raw
|
||||
/host/root/media/data/Sanne
|
||||
/host/root/media/data/Wii
|
||||
LIST
|
||||
)
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
import sys
|
||||
import time
|
||||
import subprocess
|
||||
import argparse
|
||||
|
||||
while :; do
|
||||
SECONDS=0
|
||||
for DIR in $DIRS; do
|
||||
SIZE=$(du -cs $DIR | tail -1 | awk '{print $1}')
|
||||
NAME=$(echo $DIR | sed 's/.//' | tr / - )
|
||||
echo "PUTVAL $COLLECTION/exec-du-$NAME/gauge-size interval=$INTERVAL N:$SIZE"
|
||||
done
|
||||
sleep $((INTERVAL-$SECONDS))
|
||||
done
|
||||
|
||||
#
|
||||
# Methods
|
||||
#
|
||||
def get_disk_usage(path, human_readable):
|
||||
"""disk usage in human readable format (e.g. '2,1GB')"""
|
||||
arguments = "-sh" if human_readable else "-s"
|
||||
command = "du %s %s" % (arguments, path)
|
||||
status, output = subprocess.getstatusoutput(command)
|
||||
|
||||
if status != 0:
|
||||
raise Exception(command)
|
||||
|
||||
disk_usage = output.split()[0]
|
||||
if not human_readable:
|
||||
# du reports in units of 1024 bytes, convert to plain number of bytes
|
||||
disk_usage = int(disk_usage) * 1024
|
||||
return disk_usage
|
||||
|
||||
|
||||
#
|
||||
# Directories to scan
|
||||
#
|
||||
hostname = "sepia"
|
||||
interval = 10
|
||||
directories = list()
|
||||
|
||||
directories.append(["inverter", "/host/root/media/data/Inverter"])
|
||||
directories.append(["monique", "/host/root/media/data/Monique"])
|
||||
directories.append(["music", "/host/root/media/data/Music"])
|
||||
directories.append(["peter", "/host/root/media/data/Peter"])
|
||||
directories.append(["photographs", "/host/root/media/data/Photographs"])
|
||||
directories.append(["sanne", "/host/root/media/data/Sanne"])
|
||||
directories.append(["wii", "/host/root/media/data/Wii"])
|
||||
|
||||
#
|
||||
# Command line arguments
|
||||
#
|
||||
parser = argparse.ArgumentParser(description="Get DU disk usage")
|
||||
parser.add_argument("-s", action="store_true", help="print in human readable format")
|
||||
args = parser.parse_args()
|
||||
human_readable = args.s
|
||||
|
||||
|
||||
#
|
||||
# Main
|
||||
#
|
||||
if human_readable:
|
||||
for (name, path) in directories:
|
||||
disk_usage = get_disk_usage(path, human_readable)
|
||||
print(("%s: %s" % (name, disk_usage)))
|
||||
else:
|
||||
# RRD mode
|
||||
while True:
|
||||
for (name, path) in directories:
|
||||
disk_usage = get_disk_usage(path, human_readable)
|
||||
timestamp = int(time.time())
|
||||
size = float(disk_usage)
|
||||
print(
|
||||
(
|
||||
"PUTVAL {}/exec-du_{}/gauge-size {}:{:.1f}".format(
|
||||
hostname, name, timestamp, size
|
||||
)
|
||||
)
|
||||
)
|
||||
sys.stdout.flush()
|
||||
time.sleep(interval)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
|
||||
#
|
||||
# Imports
|
||||
@@ -6,61 +6,58 @@
|
||||
import sys
|
||||
import time
|
||||
import argparse
|
||||
import pylikwid
|
||||
import pmt
|
||||
|
||||
#
|
||||
# Configuration
|
||||
#
|
||||
hostname = "sepia"
|
||||
cpuid = 0
|
||||
pinfo = pylikwid.getpowerinfo()
|
||||
domainid = pinfo.get("domains").get("PKG").get("ID")
|
||||
measurement_duration = 5
|
||||
measurement_interval = 15
|
||||
dinfo = pinfo.get("domains")
|
||||
domain_names = dinfo.keys()
|
||||
domain_ids = [domain['ID'] for domain in dinfo.values()]
|
||||
pm = pmt.create("rapl")
|
||||
|
||||
#
|
||||
# Command line arguments
|
||||
#
|
||||
parser = argparse.ArgumentParser(description='Get CPU power consumption')
|
||||
parser.add_argument('-s', action='store_true', help='print in human readable format')
|
||||
parser = argparse.ArgumentParser(description="Get CPU power consumption")
|
||||
parser.add_argument("-s", action="store_true", help="print in human readable format")
|
||||
args = parser.parse_args()
|
||||
human_readable = args.s
|
||||
|
||||
#
|
||||
# Methods
|
||||
#
|
||||
def get_power():
|
||||
start = list()
|
||||
end = list()
|
||||
power = list()
|
||||
for domain_id in domain_ids:
|
||||
e_start = pylikwid.startpower(cpuid, domain_id)
|
||||
start.append(e_start)
|
||||
time.sleep(measurement_duration)
|
||||
for domain_id in domain_ids:
|
||||
e_stop = pylikwid.stoppower(cpuid, domain_id)
|
||||
end.append(e_stop)
|
||||
for events in zip(start, end, domain_ids):
|
||||
joules = pylikwid.getpower(events[0], events[1], events[2])
|
||||
power.append(joules / measurement_duration)
|
||||
|
||||
return dict(zip(domain_names, power))
|
||||
#
|
||||
# Methods
|
||||
#
|
||||
def get_power():
|
||||
time.sleep(measurement_duration)
|
||||
measurements = dict()
|
||||
state = pm.read()
|
||||
for i in range(state.nr_measurements()):
|
||||
name = state.name(i)
|
||||
watts = state.watts(i)
|
||||
measurements[name] = watts
|
||||
return measurements
|
||||
|
||||
|
||||
def print_rrd(measurements):
|
||||
timestamp = int(time.time())
|
||||
for measurement in measurements.items():
|
||||
for measurement in list(measurements.items()):
|
||||
name = measurement[0].lower()
|
||||
power = measurement[1]
|
||||
print("PUTVAL {}/exec-power/gauge-{} {}:{:.1f}".format(hostname, name, timestamp, power))
|
||||
print(
|
||||
(
|
||||
"PUTVAL {}/exec-power/gauge-{} {}:{:.1f}".format(
|
||||
hostname, name, timestamp, power
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Main
|
||||
#
|
||||
if (human_readable):
|
||||
print get_power()
|
||||
if human_readable:
|
||||
print(get_power())
|
||||
else:
|
||||
while True:
|
||||
power = get_power()
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
#!/bin/bash
|
||||
SPEEDTEST=/sbin/speedtest-cli
|
||||
|
||||
SPEEDTEST="/usr/bin/speedtest-cli --secure"
|
||||
COLLECTION=sepia
|
||||
INTERVAL=900
|
||||
|
||||
while :; do
|
||||
SECONDS=0
|
||||
SECONDS=0
|
||||
RESULT=($($SPEEDTEST | grep Mbit | cut -d' ' -f 2))
|
||||
echo "PUTVAL $COLLECTION/exec-speedtest/gauge-download interval=$INTERVAL N:${RESULT[0]}"
|
||||
echo "PUTVAL $COLLECTION/exec-speedtest/gauge-upload interval=$INTERVAL N:${RESULT[1]}"
|
||||
TIMESTAMP=$(date +%s)
|
||||
echo "PUTVAL $COLLECTION/exec-speedtest/gauge-download ${TIMESTAMP}:${RESULT[0]}"
|
||||
echo "PUTVAL $COLLECTION/exec-speedtest/gauge-upload ${TIMESTAMP}:${RESULT[1]}"
|
||||
sleep $((INTERVAL-$SECONDS))
|
||||
done
|
||||
|
||||
@@ -5,17 +5,17 @@ services:
|
||||
container_name: caddy
|
||||
environment:
|
||||
- DOMAIN=veenboer.xyz
|
||||
- SUBDOMAIN=herderin
|
||||
- SUBDOMAIN=uitgeest
|
||||
- AWS_REGION=eu-west-1
|
||||
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:?}
|
||||
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:?}
|
||||
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID:?}
|
||||
- OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET:?}
|
||||
image: caddy
|
||||
links:
|
||||
- nginx
|
||||
# links:
|
||||
# - nginx
|
||||
ports:
|
||||
- 444:443
|
||||
- 443:443
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/caddy/Caddyfile:/etc/caddy/Caddyfile
|
||||
19
compose.collectd.yaml
Normal file
19
compose.collectd.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
services:
|
||||
collectd:
|
||||
build:
|
||||
context: /opt/collectd/docker
|
||||
dockerfile: Dockerfile
|
||||
container_name: collectd
|
||||
image: collectd:bookworm
|
||||
privileged: true
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/collectd/etc:/etc/collectd
|
||||
- /opt/collectd/var:/var/lib/collectd
|
||||
- /opt/collectd/usr:/host/usr
|
||||
- /:/host/root
|
||||
- /media:/host/media
|
||||
- /media/jupiter/borg:/host/media/borg
|
||||
- /media/jupiter/rsnapshot:/host/media/rsnapshot
|
||||
- /var/lib/docker:/media/docker
|
||||
- /dev/mapper:/dev/mapper
|
||||
@@ -1,9 +1,10 @@
|
||||
services:
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana:9.0.2
|
||||
image: grafana/grafana:11.4.0
|
||||
ports:
|
||||
- 3333:3000
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/grafana:/var/lib/grafana
|
||||
- /opt/grafana/grafana.ini:/etc/grafana/grafana.ini
|
||||
@@ -1,7 +1,7 @@
|
||||
services:
|
||||
homeassistant:
|
||||
container_name: homeassistant
|
||||
image: homeassistant/home-assistant:2024.11
|
||||
image: homeassistant/home-assistant:2025.1.2
|
||||
network_mode: host
|
||||
privileged: true
|
||||
restart: unless-stopped
|
||||
67
compose.seafile.yaml
Normal file
67
compose.seafile.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
services:
|
||||
seafile-mysql:
|
||||
image: mariadb:11.8.5
|
||||
container_name: seafile-mysql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=ROOT_PASSWORD
|
||||
- MYSQL_LOG_CONSOLE=true
|
||||
- MARIADB_AUTO_UPGRADE=1
|
||||
volumes:
|
||||
- /opt/seafile/database:/var/lib/mysql
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"/usr/local/bin/healthcheck.sh",
|
||||
"--connect",
|
||||
"--mariadbupgrade",
|
||||
"--innodb_initialized",
|
||||
]
|
||||
interval: 20s
|
||||
start_period: 30s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
seafile-redis:
|
||||
image: redis:8.4.0
|
||||
container_name: seafile-redis
|
||||
ports:
|
||||
- "6379:6379"
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- redis-server --requirepass "$$REDIS_PASSWORD"
|
||||
environment:
|
||||
- REDIS_PASSWORD=PASSWORD
|
||||
|
||||
seafile-server:
|
||||
image: seafileltd/seafile-mc:13.0-latest
|
||||
container_name: seafile-server
|
||||
ports:
|
||||
- "8082:80"
|
||||
volumes:
|
||||
- /opt/seafile/server:/shared/seafile
|
||||
- /media/seafile:/shared/seafile/seafile-data
|
||||
environment:
|
||||
- DB_HOST=seafile-mysql
|
||||
- DB_PORT=3306
|
||||
- DB_ROOT_PASSWD=ROOT_PASSWORD
|
||||
- DB_PASSWORD=PASSWORD
|
||||
- TIME_ZONE=Europe/Amsterdam
|
||||
- JWT_PRIVATE_KEY=F23HhfNSNxPsDNB9UaGFLbfNku4F7DPB
|
||||
- SEAFILE_SERVER_HOSTNAME=seafile.uitgeest.veenboer.xyz
|
||||
- SEAFILE_SERVER_PROTOCOL=https
|
||||
- SEAFILE_MYSQL_DB_PORT=3306
|
||||
- SEAFILE_MYSQL_DB_SEAFILE_DB_NAME=seafile_db
|
||||
- SEAFILE_MYSQL_DB_SEAHUB_DB_NAME=seahub_db
|
||||
- SEAFILE_MYSQL_DB_CCNET_DB_NAME=ccnet_db
|
||||
- SEAFILE_MYSQL_DB_HOST=seafile-mysql
|
||||
- SEAFILE_MYSQL_DB_USER=seafile
|
||||
- SEAFILE_MYSQL_DB_PASSWORD=seafile
|
||||
- REDIS_HOST=seafile-redis
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_PASSWORD=PASSWORD
|
||||
depends_on:
|
||||
- seafile-mysql
|
||||
- seafile-redis
|
||||
@@ -9,4 +9,4 @@ services:
|
||||
- 6543:5432
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/timescaledb:/var/lib/postgresql/data
|
||||
- /media/scratch/timescaledb:/var/lib/postgresql/data
|
||||
21
compose.yaml
Normal file
21
compose.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
include:
|
||||
# Web
|
||||
- compose.caddy.yaml
|
||||
|
||||
# Networking
|
||||
- compose.dns-ad-blocker.yaml
|
||||
|
||||
# Backup
|
||||
- compose.borgmatic.yaml
|
||||
|
||||
# Storage
|
||||
- compose.seafile.yaml
|
||||
- compose.collectd.yaml
|
||||
|
||||
# Sensors
|
||||
- compose.esphome.yaml
|
||||
|
||||
# Other
|
||||
- compose.homeassistant.yaml
|
||||
- compose.timescaledb.yaml
|
||||
- compose.grafana.yaml
|
||||
@@ -1,15 +0,0 @@
|
||||
services:
|
||||
collectd:
|
||||
container_name: collectd
|
||||
image: collectd:latest
|
||||
privileged: true
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/collectd/etc:/etc/collectd
|
||||
- /opt/collectd/var:/var/lib/collectd
|
||||
- /opt/collectd/usr:/host/usr
|
||||
- /root/scripts/speedtest-cli:/sbin/speedtest-cli
|
||||
- /:/host/root
|
||||
- /media:/host/media
|
||||
- /var/lib/docker:/media/docker
|
||||
- /dev/mapper:/dev/mapper
|
||||
@@ -1,13 +0,0 @@
|
||||
services:
|
||||
nginx:
|
||||
container_name: nginx
|
||||
extra_hosts:
|
||||
- host:192.168.2.150
|
||||
image: nginx:1.25.4
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/nginx/etc/.htpasswd:/host/etc/.htpasswd
|
||||
- /opt/nginx/etc/nginx/conf:/etc/nginx/conf
|
||||
- /opt/nginx/etc/nginx/conf.d:/etc/nginx/conf.d
|
||||
- /opt/nginx/var/log/nginx:/var/log/nginx
|
||||
- /opt/seafile/seafile-server-latest/seahub/media:/host/var/www/seafile
|
||||
@@ -1,15 +0,0 @@
|
||||
services:
|
||||
openvpn-server:
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
container_name: openvpn-server
|
||||
extra_hosts:
|
||||
- host:192.168.2.150
|
||||
image: kylemanna/openvpn:2.4
|
||||
network_mode: bridge
|
||||
ports:
|
||||
- 443:443
|
||||
privileged: true
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/openvpn-server:/etc/openvpn
|
||||
@@ -1,13 +0,0 @@
|
||||
services:
|
||||
seafile:
|
||||
container_name: seafile
|
||||
environment:
|
||||
- MODE=autorun
|
||||
image: gronis/seafile:9.0.4
|
||||
ports:
|
||||
- 8000:8000
|
||||
- 8082:8082
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /opt/seafile:/seafile:rw
|
||||
- /media/seafile:/seafile/seafile-data:rw
|
||||
@@ -1,24 +0,0 @@
|
||||
include:
|
||||
# Web
|
||||
- docker-compose.nginx.yaml
|
||||
- docker-compose.caddy.yaml
|
||||
|
||||
# Networking
|
||||
- docker-compose.dns-ad-blocker.yaml
|
||||
- docker-compose.openvpn-server.yaml
|
||||
|
||||
# Backup
|
||||
- docker-compose.borgmatic.yaml
|
||||
|
||||
# Storage
|
||||
- docker-compose.seafile.yaml
|
||||
|
||||
- docker-compose.collectd.yaml
|
||||
|
||||
# Sensors
|
||||
- docker-compose.esphome.yaml
|
||||
|
||||
# Other
|
||||
- docker-compose.homeassistant.yaml
|
||||
- docker-compose.timescaledb.yaml
|
||||
- docker-compose.grafana.yaml
|
||||
@@ -21,8 +21,8 @@ wifi:
|
||||
networks:
|
||||
- ssid: "DD-WRT"
|
||||
password: "qwerty123"
|
||||
- ssid: "H369AC68342 2.4"
|
||||
password: "6A6277E455C9"
|
||||
- ssid: "Odido-2F24FB 2.4"
|
||||
password: "5WHSUXG7MP7WFQHM"
|
||||
|
||||
|
||||
# Enable fallback hotspot (captive portal) in case wifi connection fails
|
||||
|
||||
1946
grafana/grafana.ini
Normal file
1946
grafana/grafana.ini
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
<?php
|
||||
define('KEY', 'e8f868de4eb21a7c6a877f8197cc3ed3');
|
||||
define('KEY', '1c0450532b2b8d6e7bc3480cb795de6a');
|
||||
define('LIMIT_MINUTE', 10);
|
||||
define('LIMIT_DAY', 500);
|
||||
define('LIMIT_FILE', '/opt/inverter/data/openweathermap.json');
|
||||
@@ -62,8 +62,6 @@ function openweathermap($iCity, $bDebug = false) {
|
||||
|
||||
/* Perform actual call */
|
||||
$sUrl = sprintf('https://api.openweathermap.org/data/2.5/weather?id=%d&appid=%s', $iCity, KEY);
|
||||
$sUrl = sprintf('https://api.openweathermap.org/data/2.5/weather?id=%d&appid=%s', $iCity, KEY);
|
||||
// $sUrl = 'https://samples.openweathermap.org/data/2.5/weather?q=Uitgeeddst&appid=5fc7ebf9168bfbe9745920438e3b1';
|
||||
$sJSON = file_get_contents($sUrl);
|
||||
return json_decode($sJSON, true);
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
user:$apr1$PUlYgSSM$cZbvFGsKVt4SsAsB5Bifr1
|
||||
@@ -1,16 +0,0 @@
|
||||
server {
|
||||
listen 80;
|
||||
location /seafile {
|
||||
proxy_pass http://host:8000;
|
||||
include /etc/nginx/conf/proxy.conf;
|
||||
}
|
||||
location /seafhttp {
|
||||
rewrite ^/seafhttp(.*)$ $1 break;
|
||||
proxy_pass http://host:8082;
|
||||
client_max_body_size 0;
|
||||
}
|
||||
location /seafmedia {
|
||||
rewrite ^/seafmedia(.*)$ $1 break;
|
||||
root /host/var/www/seafile;
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
# Proxy
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
|
||||
# Websocket
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
@@ -1,21 +0,0 @@
|
||||
# Enable SSL
|
||||
ssl_certificate /host/etc/certs/certificate+intermediate.pem;
|
||||
ssl_certificate_key /host/etc/certs/key.pem;
|
||||
ssl_dhparam /host/etc/certs/dh.pem;
|
||||
|
||||
# Increased security, from https://cipherli.st/
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_tickets off;
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver_timeout 5s;
|
||||
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload";
|
||||
add_header X-Frame-Options DENY;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
# Redirect HTTP trafic
|
||||
error_page 497 https://$host:$server_port$request_uri;
|
||||
@@ -1,31 +0,0 @@
|
||||
server 192.168.255.0 255.255.255.0
|
||||
verb 3
|
||||
key /etc/openvpn/pki/private/herderin.veenboer.xyz.key
|
||||
ca /etc/openvpn/pki/ca.crt
|
||||
cert /etc/openvpn/pki/issued/herderin.veenboer.xyz.crt
|
||||
dh /etc/openvpn/pki/dh.pem
|
||||
tls-auth /etc/openvpn/pki/ta.key
|
||||
key-direction 0
|
||||
keepalive 10 60
|
||||
persist-key
|
||||
persist-tun
|
||||
|
||||
proto tcp
|
||||
# Rely on Docker to do port mapping, internally always 1194
|
||||
port 443
|
||||
dev tun0
|
||||
status /tmp/openvpn-status.log
|
||||
|
||||
user nobody
|
||||
group nogroup
|
||||
comp-lzo no
|
||||
|
||||
### Route Configurations Below
|
||||
route 192.168.254.0 255.255.255.0
|
||||
|
||||
### Push Configurations Below
|
||||
push "block-outside-dns"
|
||||
push "dhcp-option DNS 192.168.2.150"
|
||||
push "comp-lzo no"
|
||||
|
||||
port-share host 444
|
||||
@@ -1,25 +0,0 @@
|
||||
declare -x OVPN_AUTH=
|
||||
declare -x OVPN_CIPHER=
|
||||
declare -x OVPN_CLIENT_TO_CLIENT=
|
||||
declare -x OVPN_CN=herderin.veenboer.xyz
|
||||
declare -x OVPN_COMP_LZO=0
|
||||
declare -x OVPN_DEFROUTE=1
|
||||
declare -x OVPN_DEVICE=tun
|
||||
declare -x OVPN_DEVICEN=0
|
||||
declare -x OVPN_DISABLE_PUSH_BLOCK_DNS=0
|
||||
declare -x OVPN_DNS=1
|
||||
declare -x OVPN_DNS_SERVERS=([0]="8.8.8.8" [1]="8.8.4.4")
|
||||
declare -x OVPN_ENV=/etc/openvpn/ovpn_env.sh
|
||||
declare -x OVPN_EXTRA_CLIENT_CONFIG=()
|
||||
declare -x OVPN_EXTRA_SERVER_CONFIG=()
|
||||
declare -x OVPN_FRAGMENT=
|
||||
declare -x OVPN_KEEPALIVE='10 60'
|
||||
declare -x OVPN_MTU=
|
||||
declare -x OVPN_NAT=0
|
||||
declare -x OVPN_PORT=1194
|
||||
declare -x OVPN_PROTO=tcp
|
||||
declare -x OVPN_PUSH=()
|
||||
declare -x OVPN_ROUTES=([0]="192.168.254.0/24")
|
||||
declare -x OVPN_SERVER=192.168.255.0/24
|
||||
declare -x OVPN_SERVER_URL=tcp://herderin.veenboer.xyz
|
||||
declare -x OVPN_TLS_CIPHER=
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/usr/bin/bash
|
||||
OVPN_DATA=/opt/openvpn-server
|
||||
VERSION=2.4
|
||||
CLIENTNAME=herderin
|
||||
docker run -v /$OVPN_DATA:/etc/openvpn --rm kylemanna/openvpn:$VERSION ovpn_genconfig -u tcp://$CLIENTNAME.veenboer.xyz
|
||||
docker run -v /$OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn:$VERSION ovpn_initpki
|
||||
docker run -v /$OVPN_DATA:/etc/openvpn -d -p 443:443/tcp --cap-add=NET_ADMIN kylemanna/openvpn:$VERSION
|
||||
docker run -v /$OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn easyrsa build-client-full $CLIENTNAME nopass
|
||||
docker run -v /$OVPN_DATA:/etc/openvpn --rm kylemanna/openvpn:$VERSION ovpn_getclient $CLIENTNAME > $CLIENTNAME.ovpn
|
||||
@@ -1,17 +0,0 @@
|
||||
config_version 1.2
|
||||
|
||||
cmd_cp /bin/cp
|
||||
cmd_rsync /usr/bin/rsync
|
||||
cmd_ssh /usr/bin/ssh
|
||||
|
||||
retain daily 7
|
||||
retain weekly 4
|
||||
retain monthly 3
|
||||
|
||||
loglevel 3
|
||||
|
||||
link_dest 1
|
||||
|
||||
stop_on_stale_lockfile 0
|
||||
|
||||
exclude *.snapshot
|
||||
@@ -1,10 +0,0 @@
|
||||
include_conf /etc/rsnapshot/common.conf
|
||||
|
||||
logfile /var/log/docker.log
|
||||
lockfile /var/run/rsnapshot/docker.pid
|
||||
|
||||
snapshot_root /host/scratch/docker
|
||||
|
||||
backup root@shuttle:/opt shuttle
|
||||
backup root@sepia:/opt sepia
|
||||
backup root@server:/opt server
|
||||
@@ -1,37 +0,0 @@
|
||||
include_conf /etc/rsnapshot/common.conf
|
||||
|
||||
logfile /var/log/os.log
|
||||
lockfile /var/run/rsnapshot/os.pid
|
||||
|
||||
snapshot_root /host/scratch/os
|
||||
|
||||
backup root@shuttle:/bin shuttle
|
||||
backup root@shuttle:/boot shuttle
|
||||
backup root@shuttle:/etc shuttle
|
||||
backup root@shuttle:/lib shuttle
|
||||
backup root@shuttle:/lib64 shuttle
|
||||
backup root@shuttle:/usr shuttle
|
||||
backup root@shuttle:/var shuttle
|
||||
|
||||
backup root@sepia:/bin sepia
|
||||
backup root@sepia:/boot sepia
|
||||
backup root@sepia:/etc sepia
|
||||
backup root@sepia:/lib sepia
|
||||
backup root@sepia:/lib64 sepia
|
||||
backup root@sepia:/usr sepia
|
||||
backup root@sepia:/var sepia
|
||||
|
||||
backup root@server:/bin server
|
||||
backup root@server:/boot server
|
||||
backup root@server:/etc server
|
||||
backup root@server:/lib server
|
||||
backup root@server:/lib64 server
|
||||
backup root@server:/usr server
|
||||
backup root@server:/var server
|
||||
|
||||
exclude /var/cache/apt
|
||||
exclude /var/lib/apt
|
||||
exclude /var/lib/docker
|
||||
exclude /var/lib/mlocate
|
||||
exclude /usr/lib/debug
|
||||
exclude /usr/share/locale
|
||||
@@ -1,15 +0,0 @@
|
||||
include_conf /etc/rsnapshot/common.conf
|
||||
|
||||
logfile /var/log/users.log
|
||||
lockfile /var/run/rsnapshot/users.pid
|
||||
|
||||
snapshot_root /host/scratch/users
|
||||
|
||||
backup root@shuttle:/home shuttle
|
||||
backup root@shuttle:/root shuttle
|
||||
|
||||
backup root@sepia:/home sepia
|
||||
backup root@sepia:/root sepia
|
||||
|
||||
backup root@server:/home server
|
||||
backup root@server:/root server
|
||||
@@ -1,3 +0,0 @@
|
||||
#! /bin/bash
|
||||
. /etc/container_environment.sh
|
||||
/usr/bin/nice -n 19 /usr/bin/ionice -c2 -n7 /usr/bin/rsnapshot -c /etc/rsnapshot/$1.conf $2
|
||||
@@ -1,3 +0,0 @@
|
||||
[General]
|
||||
SERVICE_URL = https://herderin.veenboer.xyz/seafile
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
/seafile/conf
|
||||
@@ -1,16 +0,0 @@
|
||||
import os
|
||||
|
||||
daemon = True
|
||||
workers = 5
|
||||
|
||||
# default localhost:8000
|
||||
bind = "0.0.0.0:8000"
|
||||
|
||||
# Pid
|
||||
pids_dir = '/opt/haiwen/pids'
|
||||
pidfile = os.path.join(pids_dir, 'seahub.pid')
|
||||
|
||||
# for file upload, we need a longer timeout value (default is only 30s, too short)
|
||||
timeout = 1200
|
||||
|
||||
limit_request_line = 8190
|
||||
@@ -1,3 +0,0 @@
|
||||
[fileserver]
|
||||
port=8082
|
||||
max_download_dir_size=8192
|
||||
@@ -1,10 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
SECRET_KEY = "by-t3pizx0b7b^x#4q8be4b@4n1g8vxle#bl@+gq8*x!2jt*g7"
|
||||
SERVE_STATIC = False
|
||||
MEDIA_URL = '/seafmedia/'
|
||||
COMPRESS_URL = MEDIA_URL
|
||||
STATIC_URL = MEDIA_URL + 'assets/'
|
||||
SITE_ROOT = '/seafile/'
|
||||
LOGIN_URL = '/seafile/accounts/login/'
|
||||
FILE_SERVER_ROOT = 'https://herderin.veenboer.xyz/seafhttp'
|
||||
SERVICE_URL = 'https://herderin.veenboer.xyz/seafile'
|
||||
@@ -1 +0,0 @@
|
||||
9.0.4
|
||||
Reference in New Issue
Block a user