Compare commits
128 Commits
Author | SHA1 | Date | |
---|---|---|---|
6a54c05fbb | |||
1d0631417d | |||
847d3589c5 | |||
ca53a4e084 | |||
006d3a1833 | |||
ad3b622c23 | |||
e51d6bd391 | |||
78c1978dd5 | |||
4ebb4d790e | |||
11f1456538 | |||
be6797efc7 | |||
42fe066e1a | |||
9eb91cbe1a | |||
395884f643 | |||
ef542ec821 | |||
254e4ee08c | |||
07273c3ebd | |||
e04723d128 | |||
8f498f4960 | |||
dd69f60fd0 | |||
a5d599a52c | |||
66d203e72a | |||
7800bf73a8 | |||
5b39598487 | |||
ed59260a10 | |||
7c70d121be | |||
213e768708 | |||
0696900d67 | |||
4fb90a22e3 | |||
6aa197dcae | |||
46f6c9fe99 | |||
2baaeb561b | |||
867cd7018a | |||
9c686913dd | |||
d3c4dc3fb7 | |||
af8b1c2387 | |||
d37d96dc34 | |||
21d052523f | |||
22110df791 | |||
c7f354d50c | |||
3bdfc94527 | |||
9473f10653 | |||
e9ad1d7791 | |||
f97ee9c8fc | |||
236948e483 | |||
948934ad0e | |||
3ef14e5522 | |||
ee50ede2ea | |||
b11579de98 | |||
dc33c29158 | |||
6f9107087b | |||
01fd954252 | |||
995dbdac80 | |||
65de4d0534 | |||
51b28dcdc3 | |||
9512e29ed9 | |||
713e33eed1 | |||
4b16b02a7d | |||
3e9d7c0061 | |||
7480cb4cf7 | |||
58ffa752f3 | |||
2d7909546d | |||
fec099ae81 | |||
fd4fa84dc5 | |||
5ff3295658 | |||
ca38ebe3fd | |||
df5cb3c9c3 | |||
eca64fb1d5 | |||
7ae1201c8f | |||
a4e98dae46 | |||
d4267f3ee6 | |||
c02ca762ea | |||
10caf2310c | |||
7380e4328e | |||
c1eaa33d9e | |||
45545953ed | |||
4c8c2ed3d6 | |||
6483af4ba9 | |||
e6595c05d5 | |||
fb1dbea1ee | |||
f576ded038 | |||
54eaf55ee8 | |||
3119d2c7ea | |||
e40f4ce41f | |||
576f22333e | |||
0f53436700 | |||
c79636b1c2 | |||
8de9a89e56 | |||
801d1786ef | |||
7e5f8b6c8a | |||
98da86fc2e | |||
14cf6a953f | |||
6a5d3cb2f7 | |||
774a1c21a1 | |||
d1a77df0e1 | |||
c9c73f6cf2 | |||
b216dcb3dd | |||
d2e4042932 | |||
04a1ee0948 | |||
c1b5f83f44 | |||
9d1422cbdf | |||
7b7f14bd82 | |||
f72c0f7db3 | |||
76d8753f28 | |||
593db0e789 | |||
3d9e3cb88f | |||
995b944135 | |||
e200c84345 | |||
04ff36c94d | |||
89704bc2a1 | |||
6395214fa0 | |||
c8e000eb3e | |||
c8e5676c01 | |||
6f11bc414c | |||
1fc5ac8378 | |||
87334fbfad | |||
0fac033657 | |||
7cd4e6fde0 | |||
a22b56edbe | |||
e42dc6aa86 | |||
86f703a36c | |||
71795cc7a2 | |||
4ef041bb54 | |||
e1bbd42b50 | |||
b905ab9dd9 | |||
e3745d7fa8 | |||
164b5ebc44 | |||
70250f1fca |
@ -2,7 +2,7 @@ Package: fastapi-dls
|
|||||||
Version: 0.0
|
Version: 0.0
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Maintainer: Oscar Krause oscar.krause@collinwebdesigns.de
|
Maintainer: Oscar Krause oscar.krause@collinwebdesigns.de
|
||||||
Depends: python3, python3-fastapi, python3-uvicorn, python3-dotenv, python3-dateutil, python3-jose, python3-sqlalchemy, python3-pycryptodome, python3-markdown, python3-httpx, uvicorn, openssl
|
Depends: python3, python3-fastapi, python3-uvicorn, python3-dotenv, python3-dateutil, python3-jose, python3-sqlalchemy, python3-pycryptodome, python3-markdown, uvicorn, openssl
|
||||||
Recommends: curl
|
Recommends: curl
|
||||||
Installed-Size: 10240
|
Installed-Size: 10240
|
||||||
Homepage: https://git.collinwebdesigns.de/oscar.krause/fastapi-dls
|
Homepage: https://git.collinwebdesigns.de/oscar.krause/fastapi-dls
|
||||||
|
11
.DEBIAN/requirements-bookworm-12.txt
Normal file
11
.DEBIAN/requirements-bookworm-12.txt
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# https://packages.debian.org/hu/
|
||||||
|
fastapi==0.92.0
|
||||||
|
uvicorn[standard]==0.17.6
|
||||||
|
python-jose[pycryptodome]==3.3.0
|
||||||
|
pycryptodome==3.11.0
|
||||||
|
python-dateutil==2.8.2
|
||||||
|
sqlalchemy==1.4.46
|
||||||
|
markdown==3.4.1
|
||||||
|
python-dotenv==0.21.0
|
||||||
|
jinja2==3.1.2
|
||||||
|
httpx==0.23.3
|
10
.DEBIAN/requirements-ubuntu-23.04.txt
Normal file
10
.DEBIAN/requirements-ubuntu-23.04.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# https://packages.ubuntu.com
|
||||||
|
fastapi==0.91.0
|
||||||
|
uvicorn[standard]==0.15.0
|
||||||
|
python-jose[pycryptodome]==3.3.0
|
||||||
|
pycryptodome==3.11.0
|
||||||
|
python-dateutil==2.8.2
|
||||||
|
sqlalchemy==1.4.46
|
||||||
|
markdown==3.4.3
|
||||||
|
python-dotenv==0.21.0
|
||||||
|
jinja2==3.1.2
|
10
.DEBIAN/requirements-ubuntu-23.10.txt
Normal file
10
.DEBIAN/requirements-ubuntu-23.10.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# https://packages.ubuntu.com
|
||||||
|
fastapi==0.101.0
|
||||||
|
uvicorn[standard]==0.23.2
|
||||||
|
python-jose[pycryptodome]==3.3.0
|
||||||
|
pycryptodome==3.11.0
|
||||||
|
python-dateutil==2.8.2
|
||||||
|
sqlalchemy==1.4.47
|
||||||
|
markdown==3.4.4
|
||||||
|
python-dotenv==1.0.0
|
||||||
|
jinja2==3.1.2
|
10
.DEBIAN/requirements-ubuntu-24.04.txt
Normal file
10
.DEBIAN/requirements-ubuntu-24.04.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# https://packages.ubuntu.com
|
||||||
|
fastapi==0.101.0
|
||||||
|
uvicorn[standard]==0.27.1
|
||||||
|
python-jose[pycryptodome]==3.3.0
|
||||||
|
pycryptodome==3.20.0
|
||||||
|
python-dateutil==2.8.2
|
||||||
|
sqlalchemy==1.4.50
|
||||||
|
markdown==3.5.2
|
||||||
|
python-dotenv==1.0.1
|
||||||
|
jinja2==3.1.2
|
@ -12,7 +12,7 @@ depends=('python' 'python-jose' 'python-starlette' 'python-httpx' 'python-fastap
|
|||||||
provider=("$pkgname")
|
provider=("$pkgname")
|
||||||
install="$pkgname.install"
|
install="$pkgname.install"
|
||||||
backup=('etc/default/fastapi-dls')
|
backup=('etc/default/fastapi-dls')
|
||||||
source=('git+file:///builds/oscar.krause/fastapi-dls' # https://gitea.publichub.eu/oscar.krause/fastapi-dls.git
|
source=("git+file://${CI_PROJECT_DIR}"
|
||||||
"$pkgname.default"
|
"$pkgname.default"
|
||||||
"$pkgname.service"
|
"$pkgname.service"
|
||||||
"$pkgname.tmpfiles")
|
"$pkgname.tmpfiles")
|
||||||
@ -22,8 +22,9 @@ sha256sums=('SKIP'
|
|||||||
'3dc60140c08122a8ec0e7fa7f0937eb8c1288058890ba09478420fc30ce9e30c')
|
'3dc60140c08122a8ec0e7fa7f0937eb8c1288058890ba09478420fc30ce9e30c')
|
||||||
|
|
||||||
pkgver() {
|
pkgver() {
|
||||||
|
echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > $srcdir/$pkgname/version.env
|
||||||
source $srcdir/$pkgname/version.env
|
source $srcdir/$pkgname/version.env
|
||||||
echo ${VERSION}
|
echo $VERSION
|
||||||
}
|
}
|
||||||
|
|
||||||
check() {
|
check() {
|
||||||
|
48
.UNRAID/FastAPI-DLS.xml
Normal file
48
.UNRAID/FastAPI-DLS.xml
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<Container version="2">
|
||||||
|
<Name>FastAPI-DLS</Name>
|
||||||
|
<Repository>collinwebdesigns/fastapi-dls:latest</Repository>
|
||||||
|
<Registry>https://hub.docker.com/r/collinwebdesigns/fastapi-dls</Registry>
|
||||||
|
<Network>br0</Network>
|
||||||
|
<MyIP></MyIP>
|
||||||
|
<Shell>sh</Shell>
|
||||||
|
<Privileged>false</Privileged>
|
||||||
|
<Support/>
|
||||||
|
<Project/>
|
||||||
|
<Overview>Source:
|
||||||
|
https://git.collinwebdesigns.de/oscar.krause/fastapi-dls#docker
|
||||||
|

|
||||||
|
Make sure you create these certificates before starting the container for the first time:
|
||||||
|
```
|
||||||
|
# Check https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/main/#docker for more information:
|
||||||
|
WORKING_DIR=/mnt/user/appdata/fastapi-dls/cert
|
||||||
|
mkdir -p $WORKING_DIR
|
||||||
|
cd $WORKING_DIR
|
||||||
|
# create instance private and public key for singing JWT's
|
||||||
|
openssl genrsa -out $WORKING_DIR/instance.private.pem 2048 
|
||||||
|
openssl rsa -in $WORKING_DIR/instance.private.pem -outform PEM -pubout -out $WORKING_DIR/instance.public.pem
|
||||||
|
# create ssl certificate for integrated webserver (uvicorn) - because clients rely on ssl
|
||||||
|
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout $WORKING_DIR/webserver.key -out $WORKING_DIR/webserver.crt
|
||||||
|
```
|
||||||
|
</Overview>
|
||||||
|
<Category/>
|
||||||
|
<WebUI>https://[IP]:[PORT:443]</WebUI>
|
||||||
|
<TemplateURL/>
|
||||||
|
<Icon>https://git.collinwebdesigns.de/uploads/-/system/project/avatar/106/png-transparent-nvidia-grid-logo-business-nvidia-electronics-text-trademark.png?width=64</Icon>
|
||||||
|
<ExtraParams>--restart always</ExtraParams>
|
||||||
|
<PostArgs/>
|
||||||
|
<CPUset/>
|
||||||
|
<DateInstalled>1679161568</DateInstalled>
|
||||||
|
<DonateText/>
|
||||||
|
<DonateLink/>
|
||||||
|
<Requires/>
|
||||||
|
<Config Name="HTTPS Port" Target="" Default="443" Mode="tcp" Description="Same as DLS Port below." Type="Port" Display="always-hide" Required="true" Mask="false">443</Config>
|
||||||
|
<Config Name="App Cert" Target="/app/cert" Default="/mnt/user/appdata/fastapi-dls/cert" Mode="rw" Description="[REQUIRED] Read the description above to make this folder. You do not need to change the path." Type="Path" Display="always-hide" Required="true" Mask="false">/mnt/user/appdata/fastapi-dls/cert</Config>
|
||||||
|
<Config Name="DLS Port" Target="DSL_PORT" Default="443" Mode="" Description="Choose port you want to use. Make sure to change the HTTPS port above to match it." Type="Variable" Display="always-hide" Required="true" Mask="false">443</Config>
|
||||||
|
<Config Name="App database" Target="/app/database" Default="/mnt/user/appdata/fastapi-dls/data" Mode="rw" Description="[REQUIRED] Read the description above to make this folder. You do not need to change the path." Type="Path" Display="always-hide" Required="true" Mask="false">/mnt/user/appdata/fastapi-dls/data</Config>
|
||||||
|
<Config Name="DSL IP" Target="DLS_URL" Default="localhost" Mode="" Description="Put your container's IP (or your host's IP if it's shared)." Type="Variable" Display="always-hide" Required="true" Mask="false"></Config>
|
||||||
|
<Config Name="Time Zone" Target="TZ" Default="" Mode="" Description="Format example: America/New_York. MUST MATCH YOUR CURRENT TIMEZONE AND THE GUEST VMS TIMEZONE! Otherwise you'll get into issues, read the guide above." Type="Variable" Display="always-hide" Required="true" Mask="false"></Config>
|
||||||
|
<Config Name="Database" Target="DATABASE" Default="sqlite:////app/database/db.sqlite" Mode="" Description="Set to sqlite:////app/database/db.sqlite" Type="Variable" Display="advanced-hide" Required="true" Mask="false">sqlite:////app/database/db.sqlite</Config>
|
||||||
|
<Config Name="Debug" Target="DEBUG" Default="true" Mode="" Description="true to enable debugging, false to disable them." Type="Variable" Display="advanced-hide" Required="false" Mask="false">true</Config>
|
||||||
|
<Config Name="Lease" Target="LEASE_EXPIRE_DAYS" Default="90" Mode="" Description="90 days is the maximum value." Type="Variable" Display="advanced" Required="false" Mask="false">90</Config>
|
||||||
|
</Container>
|
197
.UNRAID/setup_vgpu_license.sh
Normal file
197
.UNRAID/setup_vgpu_license.sh
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script automates the licensing of the vGPU guest driver
|
||||||
|
# on Unraid boot. Set the Schedule to: "At Startup of Array".
|
||||||
|
#
|
||||||
|
# Relies on FastAPI-DLS for the licensing.
|
||||||
|
# It assumes FeatureType=1 (vGPU), change it as you see fit in line <114>
|
||||||
|
#
|
||||||
|
# Requires `eflutils` to be installed in the system for `nvidia-gridd` to run
|
||||||
|
# To Install it:
|
||||||
|
# 1) You might find it here: https://packages.slackware.com/ (choose the 64bit version of Slackware)
|
||||||
|
# 2) Download the package and put it in /boot/extra to be installed on boot
|
||||||
|
# 3) a. Reboot to install it, OR
|
||||||
|
# b. Run `upgradepkg --install-new /boot/extra/elfutils*`
|
||||||
|
# [i]: Make sure to have only one version of elfutils, otherwise you might run into issues
|
||||||
|
|
||||||
|
# Sources and docs:
|
||||||
|
# https://docs.nvidia.com/grid/15.0/grid-vgpu-user-guide/index.html#configuring-nls-licensed-client-on-linux
|
||||||
|
#
|
||||||
|
|
||||||
|
################################################
|
||||||
|
# MAKE SURE YOU CHANGE THESE VARIABLES #
|
||||||
|
################################################
|
||||||
|
|
||||||
|
###### CHANGE ME!
|
||||||
|
# IP and PORT of FastAPI-DLS
|
||||||
|
DLS_IP=192.168.0.123
|
||||||
|
DLS_PORT=443
|
||||||
|
# Token folder, must be on a filesystem that supports
|
||||||
|
# linux filesystem permissions (eg: ext4,xfs,btrfs...)
|
||||||
|
TOKEN_PATH=/mnt/user/system/nvidia
|
||||||
|
PING=$(which ping)
|
||||||
|
|
||||||
|
# Check if the License is applied
|
||||||
|
if [[ "$(nvidia-smi -q | grep "Expiry")" == *Expiry* ]]; then
|
||||||
|
echo " [i] Your vGPU Guest drivers are already licensed."
|
||||||
|
echo " [i] $(nvidia-smi -q | grep "Expiry")"
|
||||||
|
echo " [<] Exiting..."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if the FastAPI-DLS server is reachable
|
||||||
|
# Check if the License is applied
|
||||||
|
MAX_RETRIES=30
|
||||||
|
for i in $(seq 1 $MAX_RETRIES); do
|
||||||
|
echo -ne "\r [>] Attempt $i to connect to $DLS_IP."
|
||||||
|
if ping -c 1 $DLS_IP >/dev/null 2>&1; then
|
||||||
|
echo -e "\n [*] Connection successful."
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ $i -eq $MAX_RETRIES ]; then
|
||||||
|
echo -e "\n [!] Connection failed after $MAX_RETRIES attempts."
|
||||||
|
echo -e "\n [<] Exiting..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if the token folder exists
|
||||||
|
if [ -d "${TOKEN_PATH}" ]; then
|
||||||
|
echo " [*] Token Folder exists. Proceeding..."
|
||||||
|
else
|
||||||
|
echo " [!] Token Folder does not exists or not ready yet. Exiting."
|
||||||
|
echo " [!] Token Folder Specified: ${TOKEN_PATH}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if elfutils are installed, otherwise nvidia-gridd service
|
||||||
|
# wont start
|
||||||
|
if [ "$(grep -R "elfutils" /var/log/packages/* | wc -l)" != 0 ]; then
|
||||||
|
echo " [*] Elfutils is installed, proceeding..."
|
||||||
|
else
|
||||||
|
echo " [!] Elfutils is not installed, downloading and installing..."
|
||||||
|
echo " [!] Downloading elfutils to /boot/extra"
|
||||||
|
echo " [i] This script will download elfutils from slackware64-15.0 repository."
|
||||||
|
echo " [i] If you have a different version of Unraid (6.11.5), you might want to"
|
||||||
|
echo " [i] download and install a suitable version manually from the slackware"
|
||||||
|
echo " [i] repository, and put it in /boot/extra to be install on boot."
|
||||||
|
echo " [i] You may also install it by running: "
|
||||||
|
echo " [i] upgradepkg --install-new /path/to/elfutils-*.txz"
|
||||||
|
echo ""
|
||||||
|
echo " [>] Downloading elfutils from slackware64-15.0 repository:"
|
||||||
|
wget -q -nc --show-progress --progress=bar:force:noscroll -P /boot/extra https://slackware.uk/slackware/slackware64-15.0/slackware64/l/elfutils-0.186-x86_64-1.txz 2>/dev/null \
|
||||||
|
|| { echo " [!] Error while downloading elfutils, please download it and install it manually."; exit 1; }
|
||||||
|
echo ""
|
||||||
|
if upgradepkg --install-new /boot/extra/elfutils-0.186-x86_64-1.txz
|
||||||
|
then
|
||||||
|
echo " [*] Elfutils installed and will be installed automatically on boot"
|
||||||
|
else
|
||||||
|
echo " [!] Error while installing, check logs..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo " [~] Sleeping for 60 seconds before continuing..."
|
||||||
|
echo " [i] The script is waiting until the boot process settles down."
|
||||||
|
|
||||||
|
for i in {60..1}; do
|
||||||
|
printf "\r [~] %d seconds remaining" "$i"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
printf "\n"
|
||||||
|
|
||||||
|
create_token () {
|
||||||
|
echo " [>] Creating new token..."
|
||||||
|
if ${PING} -c1 ${DLS_IP} > /dev/null 2>&1
|
||||||
|
then
|
||||||
|
# curl --insecure -L -X GET https://${DLS_IP}:${DLS_PORT}/-/client-token -o ${TOKEN_PATH}/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok || { echo " [!] Could not get the token, please check the server."; exit 1;}
|
||||||
|
wget -q -nc -4c --no-check-certificate --show-progress --progress=bar:force:noscroll -O "${TOKEN_PATH}"/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok https://${DLS_IP}:${DLS_PORT}/-/client-token \
|
||||||
|
|| { echo " [!] Could not get the token, please check the server."; exit 1;}
|
||||||
|
chmod 744 "${TOKEN_PATH}"/*.tok || { echo " [!] Could not chmod the tokens."; exit 1; }
|
||||||
|
echo ""
|
||||||
|
echo " [*] Token downloaded and stored in ${TOKEN_PATH}."
|
||||||
|
else
|
||||||
|
echo " [!] Could not get token, DLS server unavailable ."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_run () {
|
||||||
|
echo " [>] Setting up gridd.conf"
|
||||||
|
cp /etc/nvidia/gridd.conf.template /etc/nvidia/gridd.conf || { echo " [!] Error configuring gridd.conf, did you install the drivers correctly?"; exit 1; }
|
||||||
|
sed -i 's/FeatureType=0/FeatureType=1/g' /etc/nvidia/gridd.conf
|
||||||
|
echo "ClientConfigTokenPath=${TOKEN_PATH}" >> /etc/nvidia/gridd.conf
|
||||||
|
echo " [>] Creating /var/lib/nvidia folder structure"
|
||||||
|
mkdir -p /var/lib/nvidia/GridLicensing
|
||||||
|
echo " [>] Starting nvidia-gridd"
|
||||||
|
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||||
|
echo " [!] nvidia-gridd service is running. Closing."
|
||||||
|
sh /usr/lib/nvidia/sysv/nvidia-gridd stop
|
||||||
|
stop_exit_code=$?
|
||||||
|
if [ $stop_exit_code -eq 0 ]; then
|
||||||
|
echo " [*] nvidia-gridd service stopped successfully."
|
||||||
|
else
|
||||||
|
echo " [!] Error while stopping nvidia-gridd service."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Kill the service if it does not close
|
||||||
|
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||||
|
kill -9 "$(pgrep nvidia-gridd)" || {
|
||||||
|
echo " [!] Error while closing nvidia-gridd service"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo " [*] Restarting nvidia-gridd service."
|
||||||
|
sh /usr/lib/nvidia/sysv/nvidia-gridd start
|
||||||
|
|
||||||
|
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||||
|
echo " [*] Service started, PID: $(pgrep nvidia-gridd)"
|
||||||
|
else
|
||||||
|
echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
sh /usr/lib/nvidia/sysv/nvidia-gridd start
|
||||||
|
|
||||||
|
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||||
|
echo " [*] Service started, PID: $(pgrep nvidia-gridd)"
|
||||||
|
else
|
||||||
|
echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
for token in "${TOKEN_PATH}"/*; do
|
||||||
|
if [ "${token: -4}" == ".tok" ]
|
||||||
|
then
|
||||||
|
echo " [*] Tokens found..."
|
||||||
|
setup_run
|
||||||
|
else
|
||||||
|
echo " [!] No Tokens found..."
|
||||||
|
create_token
|
||||||
|
setup_run
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
if nvidia-smi -q | grep "Expiry" >/dev/null 2>&1; then
|
||||||
|
echo " [>] vGPU licensed!"
|
||||||
|
echo " [i] $(nvidia-smi -q | grep "Expiry")"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo -ne " [>] vGPU not licensed yet... Checking again in 5 seconds\c"
|
||||||
|
for i in {1..5}; do
|
||||||
|
sleep 1
|
||||||
|
echo -ne ".\c"
|
||||||
|
done
|
||||||
|
echo -ne "\r\c"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo " [>] Done..."
|
||||||
|
exit 0
|
@ -1,7 +1,9 @@
|
|||||||
|
version: "2"
|
||||||
plugins:
|
plugins:
|
||||||
bandit:
|
bandit:
|
||||||
enabled: true
|
enabled: true
|
||||||
sonar-python:
|
sonar-python:
|
||||||
enabled: true
|
enabled: true
|
||||||
pylint:
|
config:
|
||||||
enabled: true
|
tests_patterns:
|
||||||
|
- test/**
|
||||||
|
139
.gitlab-ci.yml
139
.gitlab-ci.yml
@ -8,6 +8,9 @@ include:
|
|||||||
cache:
|
cache:
|
||||||
key: one-key-to-rule-them-all
|
key: one-key-to-rule-them-all
|
||||||
|
|
||||||
|
variables:
|
||||||
|
DOCKER_BUILDX_PLATFORM: "linux/amd64,linux/arm64"
|
||||||
|
|
||||||
build:docker:
|
build:docker:
|
||||||
image: docker:dind
|
image: docker:dind
|
||||||
interruptible: true
|
interruptible: true
|
||||||
@ -20,26 +23,38 @@ build:docker:
|
|||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
||||||
tags: [ docker ]
|
tags: [ docker ]
|
||||||
before_script:
|
before_script:
|
||||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env # COMMIT=`git rev-parse HEAD`
|
- docker buildx inspect
|
||||||
|
- docker buildx create --use
|
||||||
script:
|
script:
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||||
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF}
|
- IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA
|
||||||
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF}
|
- docker buildx build --progress=plain --platform $DOCKER_BUILDX_PLATFORM --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE --push .
|
||||||
|
- docker buildx imagetools inspect $IMAGE
|
||||||
|
- echo "CS_IMAGE=$IMAGE" > container_scanning.env
|
||||||
|
artifacts:
|
||||||
|
reports:
|
||||||
|
dotenv: container_scanning.env
|
||||||
|
|
||||||
build:apt:
|
build:apt:
|
||||||
image: debian:bookworm-slim
|
image: debian:bookworm-slim
|
||||||
interruptible: true
|
interruptible: true
|
||||||
stage: build
|
stage: build
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
- if: $CI_COMMIT_TAG
|
||||||
|
variables:
|
||||||
|
VERSION: $CI_COMMIT_REF_NAME
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- .DEBIAN/**/*
|
- .DEBIAN/**/*
|
||||||
|
- .gitlab-ci.yml
|
||||||
|
variables:
|
||||||
|
VERSION: "0.0.1"
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
||||||
|
variables:
|
||||||
|
VERSION: "0.0.1"
|
||||||
before_script:
|
before_script:
|
||||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env
|
||||||
- source version.env
|
|
||||||
# install build dependencies
|
# install build dependencies
|
||||||
- apt-get update -qq && apt-get install -qq -y build-essential
|
- apt-get update -qq && apt-get install -qq -y build-essential
|
||||||
# create build directory for .deb sources
|
# create build directory for .deb sources
|
||||||
@ -60,7 +75,7 @@ build:apt:
|
|||||||
# cd into "build/"
|
# cd into "build/"
|
||||||
- cd build/
|
- cd build/
|
||||||
script:
|
script:
|
||||||
# set version based on value in "$VERSION" (which is set above from version.env)
|
# set version based on value in "$CI_COMMIT_REF_NAME"
|
||||||
- sed -i -E 's/(Version\:\s)0.0/\1'"$VERSION"'/g' DEBIAN/control
|
- sed -i -E 's/(Version\:\s)0.0/\1'"$VERSION"'/g' DEBIAN/control
|
||||||
# build
|
# build
|
||||||
- dpkg -b . build.deb
|
- dpkg -b . build.deb
|
||||||
@ -75,14 +90,21 @@ build:pacman:
|
|||||||
interruptible: true
|
interruptible: true
|
||||||
stage: build
|
stage: build
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
- if: $CI_COMMIT_TAG
|
||||||
|
variables:
|
||||||
|
VERSION: $CI_COMMIT_REF_NAME
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- .PKGBUILD/**/*
|
- .PKGBUILD/**/*
|
||||||
|
- .gitlab-ci.yml
|
||||||
|
variables:
|
||||||
|
VERSION: "0.0.1"
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
||||||
|
variables:
|
||||||
|
VERSION: "0.0.1"
|
||||||
before_script:
|
before_script:
|
||||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
#- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env
|
||||||
# install build dependencies
|
# install build dependencies
|
||||||
- pacman -Syu --noconfirm git
|
- pacman -Syu --noconfirm git
|
||||||
# create a build-user because "makepkg" don't like root user
|
# create a build-user because "makepkg" don't like root user
|
||||||
@ -97,22 +119,37 @@ build:pacman:
|
|||||||
# download dependencies
|
# download dependencies
|
||||||
- source PKGBUILD && pacman -Syu --noconfirm --needed --asdeps "${makedepends[@]}" "${depends[@]}"
|
- source PKGBUILD && pacman -Syu --noconfirm --needed --asdeps "${makedepends[@]}" "${depends[@]}"
|
||||||
# build
|
# build
|
||||||
- sudo -u build makepkg -s
|
- sudo --preserve-env -u build makepkg -s
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
paths:
|
paths:
|
||||||
- "*.pkg.tar.zst"
|
- "*.pkg.tar.zst"
|
||||||
|
|
||||||
test:
|
test:
|
||||||
image: python:3.11-slim-bullseye
|
image: $IMAGE
|
||||||
stage: test
|
stage: test
|
||||||
|
interruptible: true
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH
|
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||||
|
- if: $CI_COMMIT_TAG
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
|
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
||||||
|
changes:
|
||||||
|
- app/**/*
|
||||||
|
- test/**/*
|
||||||
variables:
|
variables:
|
||||||
DATABASE: sqlite:///../app/db.sqlite
|
DATABASE: sqlite:///../app/db.sqlite
|
||||||
|
parallel:
|
||||||
|
matrix:
|
||||||
|
- IMAGE: [ 'python:3.11-slim-bookworm', 'python:3.12-slim-bullseye' ]
|
||||||
|
REQUIREMENTS:
|
||||||
|
- requirements.txt
|
||||||
|
- .DEBIAN/requirements-bookworm-12.txt
|
||||||
|
- .DEBIAN/requirements-ubuntu-23.10.txt
|
||||||
|
- .DEBIAN/requirements-ubuntu-24.04.txt
|
||||||
before_script:
|
before_script:
|
||||||
- pip install -r requirements.txt
|
- apt-get update && apt-get install -y python3-dev gcc
|
||||||
|
- pip install -r $REQUIREMENTS
|
||||||
- pip install pytest httpx
|
- pip install pytest httpx
|
||||||
- mkdir -p app/cert
|
- mkdir -p app/cert
|
||||||
- openssl genrsa -out app/cert/instance.private.pem 2048
|
- openssl genrsa -out app/cert/instance.private.pem 2048
|
||||||
@ -170,7 +207,7 @@ test:debian:
|
|||||||
|
|
||||||
test:ubuntu:
|
test:ubuntu:
|
||||||
extends: .test:linux
|
extends: .test:linux
|
||||||
image: ubuntu:22.10
|
image: ubuntu:24.04
|
||||||
|
|
||||||
test:archlinux:
|
test:archlinux:
|
||||||
image: archlinux:base
|
image: archlinux:base
|
||||||
@ -188,6 +225,8 @@ test:archlinux:
|
|||||||
- pacman -U --noconfirm *.pkg.tar.zst
|
- pacman -U --noconfirm *.pkg.tar.zst
|
||||||
|
|
||||||
code_quality:
|
code_quality:
|
||||||
|
variables:
|
||||||
|
SOURCE_CODE: app
|
||||||
rules:
|
rules:
|
||||||
- if: $CODE_QUALITY_DISABLED
|
- if: $CODE_QUALITY_DISABLED
|
||||||
when: never
|
when: never
|
||||||
@ -199,7 +238,8 @@ secret_detection:
|
|||||||
- if: $SECRET_DETECTION_DISABLED
|
- if: $SECRET_DETECTION_DISABLED
|
||||||
when: never
|
when: never
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
before_script:
|
||||||
|
- git config --global --add safe.directory $CI_PROJECT_DIR
|
||||||
|
|
||||||
semgrep-sast:
|
semgrep-sast:
|
||||||
rules:
|
rules:
|
||||||
@ -209,11 +249,22 @@ semgrep-sast:
|
|||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||||
|
|
||||||
test_coverage:
|
test_coverage:
|
||||||
extends: test
|
# extends: test
|
||||||
|
image: python:3.11-slim-bookworm
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
stage: test
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
variables:
|
||||||
|
DATABASE: sqlite:///../app/db.sqlite
|
||||||
|
before_script:
|
||||||
|
- apt-get update && apt-get install -y python3-dev gcc
|
||||||
|
- pip install -r requirements.txt
|
||||||
|
- pip install pytest httpx
|
||||||
|
- mkdir -p app/cert
|
||||||
|
- openssl genrsa -out app/cert/instance.private.pem 2048
|
||||||
|
- openssl rsa -in app/cert/instance.private.pem -outform PEM -pubout -out app/cert/instance.public.pem
|
||||||
|
- cd test
|
||||||
script:
|
script:
|
||||||
- pip install pytest pytest-cov
|
- pip install pytest pytest-cov
|
||||||
- coverage run -m pytest main.py
|
- coverage run -m pytest main.py
|
||||||
@ -232,7 +283,6 @@ container_scanning:
|
|||||||
- if: $CONTAINER_SCANNING_DISABLED
|
- if: $CONTAINER_SCANNING_DISABLED
|
||||||
when: never
|
when: never
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
|
||||||
|
|
||||||
gemnasium-python-dependency_scanning:
|
gemnasium-python-dependency_scanning:
|
||||||
rules:
|
rules:
|
||||||
@ -243,40 +293,34 @@ gemnasium-python-dependency_scanning:
|
|||||||
|
|
||||||
.deploy:
|
.deploy:
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
|
||||||
- if: $CI_COMMIT_TAG
|
- if: $CI_COMMIT_TAG
|
||||||
when: never
|
|
||||||
|
|
||||||
deploy:docker:
|
deploy:docker:
|
||||||
extends: .deploy
|
extends: .deploy
|
||||||
|
image: docker:dind
|
||||||
stage: deploy
|
stage: deploy
|
||||||
rules:
|
tags: [ docker ]
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
|
||||||
before_script:
|
before_script:
|
||||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
- echo "Building docker image for commit $CI_COMMIT_SHA with version $CI_COMMIT_REF_NAME"
|
||||||
- source version.env
|
- docker buildx inspect
|
||||||
- echo "Building docker image for commit ${COMMIT} with version ${VERSION}"
|
- docker buildx create --use
|
||||||
script:
|
script:
|
||||||
- echo "GitLab-Registry"
|
- echo "========== GitLab-Registry =========="
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||||
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION}
|
- IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH
|
||||||
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest
|
- docker buildx build --progress=plain --platform $DOCKER_BUILDX_PLATFORM --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_COMMIT_REF_NAME --push .
|
||||||
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION}
|
- docker buildx build --progress=plain --platform $DOCKER_BUILDX_PLATFORM --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest --push .
|
||||||
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest
|
- echo "========== Docker-Hub =========="
|
||||||
- echo "Docker-Hub"
|
|
||||||
- docker login -u $PUBLIC_REGISTRY_USER -p $PUBLIC_REGISTRY_TOKEN
|
- docker login -u $PUBLIC_REGISTRY_USER -p $PUBLIC_REGISTRY_TOKEN
|
||||||
- docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION}
|
- IMAGE=$PUBLIC_REGISTRY_USER/$CI_PROJECT_NAME
|
||||||
- docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest
|
- docker buildx build --progress=plain --platform $DOCKER_BUILDX_PLATFORM --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_COMMIT_REF_NAME --push .
|
||||||
- docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION}
|
- docker buildx build --progress=plain --platform $DOCKER_BUILDX_PLATFORM --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest --push .
|
||||||
- docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest
|
|
||||||
|
|
||||||
deploy:apt:
|
deploy:apt:
|
||||||
# doc: https://git.collinwebdesigns.de/help/user/packages/debian_repository/index.md#install-a-package
|
# doc: https://git.collinwebdesigns.de/help/user/packages/debian_repository/index.md#install-a-package
|
||||||
extends: .deploy
|
extends: .deploy
|
||||||
image: debian:bookworm-slim
|
image: debian:bookworm-slim
|
||||||
stage: deploy
|
stage: deploy
|
||||||
rules:
|
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
|
||||||
needs:
|
needs:
|
||||||
- job: build:apt
|
- job: build:apt
|
||||||
artifacts: true
|
artifacts: true
|
||||||
@ -316,18 +360,15 @@ deploy:pacman:
|
|||||||
extends: .deploy
|
extends: .deploy
|
||||||
image: archlinux:base-devel
|
image: archlinux:base-devel
|
||||||
stage: deploy
|
stage: deploy
|
||||||
rules:
|
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
|
||||||
needs:
|
needs:
|
||||||
- job: build:pacman
|
- job: build:pacman
|
||||||
artifacts: true
|
artifacts: true
|
||||||
script:
|
script:
|
||||||
- source .PKGBUILD/PKGBUILD
|
- source .PKGBUILD/PKGBUILD
|
||||||
- source version.env
|
|
||||||
# fastapi-dls-1.0-1-any.pkg.tar.zst
|
# fastapi-dls-1.0-1-any.pkg.tar.zst
|
||||||
- BUILD_NAME=${pkgname}-${VERSION}-${pkgrel}-any.pkg.tar.zst
|
- BUILD_NAME=${pkgname}-${CI_COMMIT_REF_NAME}-${pkgrel}-any.pkg.tar.zst
|
||||||
- PACKAGE_NAME=${pkgname}
|
- PACKAGE_NAME=${pkgname}
|
||||||
- PACKAGE_VERSION=${VERSION}
|
- PACKAGE_VERSION=${CI_COMMIT_REF_NAME}
|
||||||
- PACKAGE_ARCH=any
|
- PACKAGE_ARCH=any
|
||||||
- EXPORT_NAME=${BUILD_NAME}
|
- EXPORT_NAME=${BUILD_NAME}
|
||||||
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
||||||
@ -339,19 +380,15 @@ deploy:pacman:
|
|||||||
release:
|
release:
|
||||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||||
stage: .post
|
stage: .post
|
||||||
needs:
|
needs: [ test ]
|
||||||
- job: test
|
|
||||||
artifacts: true
|
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_TAG
|
- if: $CI_COMMIT_TAG
|
||||||
when: never
|
|
||||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
|
||||||
script:
|
script:
|
||||||
- echo "Running release-job for $VERSION"
|
- echo "Running release-job for $CI_COMMIT_TAG"
|
||||||
release:
|
release:
|
||||||
name: $CI_PROJECT_TITLE $VERSION
|
name: $CI_PROJECT_TITLE $CI_COMMIT_TAG
|
||||||
description: Release of $CI_PROJECT_TITLE version $VERSION
|
description: Release of $CI_PROJECT_TITLE version $CI_COMMIT_TAG
|
||||||
tag_name: $VERSION
|
tag_name: $CI_COMMIT_TAG
|
||||||
ref: $CI_COMMIT_SHA
|
ref: $CI_COMMIT_SHA
|
||||||
assets:
|
assets:
|
||||||
links:
|
links:
|
||||||
|
11
Dockerfile
11
Dockerfile
@ -1,17 +1,20 @@
|
|||||||
FROM python:3.11-alpine
|
FROM python:3.11-alpine
|
||||||
|
|
||||||
|
ARG VERSION
|
||||||
|
ARG COMMIT=""
|
||||||
|
RUN echo -e "VERSION=$VERSION\nCOMMIT=$COMMIT" > /version.env
|
||||||
|
|
||||||
COPY requirements.txt /tmp/requirements.txt
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
|
|
||||||
RUN apk update \
|
RUN apk update \
|
||||||
&& apk add --no-cache --virtual build-deps gcc g++ python3-dev musl-dev \
|
&& apk add --no-cache --virtual build-deps gcc g++ python3-dev musl-dev pkgconfig \
|
||||||
&& apk add --no-cache curl postgresql postgresql-dev mariadb-connector-c-dev sqlite-dev \
|
&& apk add --no-cache curl postgresql postgresql-dev mariadb-dev sqlite-dev \
|
||||||
&& pip install --no-cache-dir --upgrade uvicorn \
|
&& pip install --no-cache-dir --upgrade uvicorn \
|
||||||
&& pip install --no-cache-dir psycopg2==2.9.5 mysqlclient==2.1.1 pysqlite3==0.5.0 \
|
&& pip install --no-cache-dir psycopg2==2.9.9 mysqlclient==2.2.4 pysqlite3==0.5.2 \
|
||||||
&& pip install --no-cache-dir -r /tmp/requirements.txt \
|
&& pip install --no-cache-dir -r /tmp/requirements.txt \
|
||||||
&& apk del build-deps
|
&& apk del build-deps
|
||||||
|
|
||||||
COPY app /app
|
COPY app /app
|
||||||
COPY version.env /version.env
|
|
||||||
COPY README.md /README.md
|
COPY README.md /README.md
|
||||||
|
|
||||||
HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=3 CMD curl --insecure --fail https://localhost/-/health || exit 1
|
HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=3 CMD curl --insecure --fail https://localhost/-/health || exit 1
|
||||||
|
192
README.md
192
README.md
@ -2,19 +2,28 @@
|
|||||||
|
|
||||||
Minimal Delegated License Service (DLS).
|
Minimal Delegated License Service (DLS).
|
||||||
|
|
||||||
Compatibility tested with official DLS 2.0.1.
|
Compatibility tested with official NLS 2.0.1, 2.1.0, 3.1.0. For Driver compatibility see [here](#setup-client).
|
||||||
|
|
||||||
This service can be used without internet connection.
|
This service can be used without internet connection.
|
||||||
Only the clients need a connection to this service on configured port.
|
Only the clients need a connection to this service on configured port.
|
||||||
|
|
||||||
**Official Links**
|
**Official Links**
|
||||||
|
|
||||||
- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls
|
* https://git.collinwebdesigns.de/oscar.krause/fastapi-dls (Private Git)
|
||||||
- https://gitea.publichub.eu/oscar.krause/fastapi-dls
|
* https://gitea.publichub.eu/oscar.krause/fastapi-dls (Public Git)
|
||||||
- Docker Image `collinwebdesigns/fastapi-dls:latest`
|
* https://hub.docker.com/r/collinwebdesigns/fastapi-dls (Docker-Hub `collinwebdesigns/fastapi-dls:latest`)
|
||||||
|
|
||||||
*All other repositories are forks! (which is no bad - just for information and bug reports)*
|
*All other repositories are forks! (which is no bad - just for information and bug reports)*
|
||||||
|
|
||||||
|
[Releases & Release Notes](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/releases)
|
||||||
|
|
||||||
|
**Further Reading**
|
||||||
|
|
||||||
|
* [NVIDIA vGPU Guide](https://gitlab.com/polloloco/vgpu-proxmox) - This document serves as a guide to install NVIDIA vGPU host drivers on the latest Proxmox VE version
|
||||||
|
* [vgpu_unlock](https://github.com/DualCoder/vgpu_unlock) - Unlock vGPU functionality for consumer-grade Nvidia GPUs.
|
||||||
|
* [vGPU_Unlock Wiki](https://docs.google.com/document/d/1pzrWJ9h-zANCtyqRgS7Vzla0Y8Ea2-5z2HEi4X75d2Q) - Guide for `vgpu_unlock`
|
||||||
|
* [Proxmox All-In-One Installer Script](https://wvthoog.nl/proxmox-vgpu-v3/) - Also known as `proxmox-installer.sh`
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[[_TOC_]]
|
[[_TOC_]]
|
||||||
@ -25,32 +34,22 @@ Only the clients need a connection to this service on configured port.
|
|||||||
|
|
||||||
- 256mb ram
|
- 256mb ram
|
||||||
- 4gb hdd
|
- 4gb hdd
|
||||||
|
- *maybe IPv6 must be disabled*
|
||||||
|
|
||||||
Tested with Ubuntu 22.10 (from Proxmox templates), actually its consuming 100mb ram and 750mb hdd.
|
Tested with Ubuntu 22.10 (EOL!) (from Proxmox templates), actually its consuming 100mb ram and 750mb hdd.
|
||||||
|
|
||||||
**Prepare your system**
|
**Prepare your system**
|
||||||
|
|
||||||
- Make sure your timezone is set correct on you fastapi-dls server and your client
|
- Make sure your timezone is set correct on you fastapi-dls server and your client
|
||||||
|
|
||||||
**HA Setup Notes**
|
|
||||||
|
|
||||||
- only *failover mode* is supported by team-green (see *high availability* in official user guide)
|
|
||||||
- make sure you're using same configuration on each node
|
|
||||||
- use same `instance.private.pem` and `instance.private.key` on each node
|
|
||||||
- add `cronjob` on each node with `curl -X GET --insecure https://localhost/-/ha/replicate`
|
|
||||||
|
|
||||||
If you want to use *real* HA, you should use a proxy in front of this service and use a clustered database in backend.
|
|
||||||
This is not documented and supported by me, but it *can* work. Please ask the community for help.
|
|
||||||
Maybe the simplest solution for HA-ing this service is to use a Docker-Swarm with redundant storage and database.
|
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
Docker-Images are available here:
|
Docker-Images are available here for Intel (x86), AMD (amd64) and ARM (arm64):
|
||||||
|
|
||||||
- [Docker-Hub](https://hub.docker.com/repository/docker/collinwebdesigns/fastapi-dls): `collinwebdesigns/fastapi-dls:latest`
|
- [Docker-Hub](https://hub.docker.com/repository/docker/collinwebdesigns/fastapi-dls): `collinwebdesigns/fastapi-dls:latest`
|
||||||
- [GitLab-Registry](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry): `registry.git.collinwebdesigns.de/oscar.krause/fastapi-dls/main:latest`
|
- [GitLab-Registry](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry): `registry.git.collinwebdesigns.de/oscar.krause/fastapi-dls:latest`
|
||||||
|
|
||||||
The images include database drivers for `postgres`, `mysql`, `mariadb` and `sqlite`.
|
The images include database drivers for `postgres`, `mariadb` and `sqlite`.
|
||||||
|
|
||||||
**Run this on the Docker-Host**
|
**Run this on the Docker-Host**
|
||||||
|
|
||||||
@ -76,7 +75,9 @@ docker run -e DLS_URL=`hostname -i` -e DLS_PORT=443 -p 443:443 -v $WORKING_DIR:/
|
|||||||
|
|
||||||
**Docker-Compose / Deploy stack**
|
**Docker-Compose / Deploy stack**
|
||||||
|
|
||||||
Goto [`docker-compose.yml`](docker-compose.yml) for more advanced example (with reverse proxy usage).
|
See [`examples`](examples) directory for more advanced examples (with reverse proxy usage).
|
||||||
|
|
||||||
|
> Adjust *REQUIRED* variables as needed
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: '3.9'
|
version: '3.9'
|
||||||
@ -110,9 +111,10 @@ volumes:
|
|||||||
dls-db:
|
dls-db:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Debian/Ubuntu (manual method using `git clone` and python virtual environment)
|
## Debian / Ubuntu / macOS (manual method using `git clone` and python virtual environment)
|
||||||
|
|
||||||
Tested on `Debian 11 (bullseye)`, Ubuntu may also work.
|
Tested on `Debian 11 (bullseye)`, `Debian 12 (bookworm)` and `macOS Ventura (13.6)`, Ubuntu may also work.
|
||||||
|
**Please note that setup on macOS differs from Debian based systems.**
|
||||||
|
|
||||||
**Make sure you are logged in as root.**
|
**Make sure you are logged in as root.**
|
||||||
|
|
||||||
@ -156,13 +158,15 @@ This is only to test whether the service starts successfully.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
cd /opt/fastapi-dls/app
|
cd /opt/fastapi-dls/app
|
||||||
su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
sudo -u www-data /opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app
|
||||||
# or
|
# or
|
||||||
sudo -u www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
||||||
```
|
```
|
||||||
|
|
||||||
**Create config file**
|
**Create config file**
|
||||||
|
|
||||||
|
> Adjust `DLS_URL` as needed (accessing from LAN won't work with 127.0.0.1)
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
mkdir /etc/fastapi-dls
|
mkdir /etc/fastapi-dls
|
||||||
cat <<EOF >/etc/fastapi-dls/env
|
cat <<EOF >/etc/fastapi-dls/env
|
||||||
@ -258,15 +262,18 @@ This is only to test whether the service starts successfully.
|
|||||||
BASE_DIR=/opt/fastapi-dls
|
BASE_DIR=/opt/fastapi-dls
|
||||||
SERVICE_USER=dls
|
SERVICE_USER=dls
|
||||||
cd ${BASE_DIR}
|
cd ${BASE_DIR}
|
||||||
|
sudo -u ${SERVICE_USER} ${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app
|
||||||
|
# or
|
||||||
su - ${SERVICE_USER} -c "${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app"
|
su - ${SERVICE_USER} -c "${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app"
|
||||||
```
|
```
|
||||||
|
|
||||||
**Create config file**
|
**Create config file**
|
||||||
|
|
||||||
|
> Adjust `DLS_URL` as needed (accessing from LAN won't work with 127.0.0.1)
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
BASE_DIR=/opt/fastapi-dls
|
BASE_DIR=/opt/fastapi-dls
|
||||||
cat <<EOF >/etc/fastapi-dls/env
|
cat <<EOF >/etc/fastapi-dls/env
|
||||||
# Adjust DSL_URL as needed (accessing from LAN won't work with 127.0.0.1)
|
|
||||||
DLS_URL=127.0.0.1
|
DLS_URL=127.0.0.1
|
||||||
DLS_PORT=443
|
DLS_PORT=443
|
||||||
LEASE_EXPIRE_DAYS=90
|
LEASE_EXPIRE_DAYS=90
|
||||||
@ -311,7 +318,7 @@ EOF
|
|||||||
Now you have to run `systemctl daemon-reload`. After that you can start service
|
Now you have to run `systemctl daemon-reload`. After that you can start service
|
||||||
with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
||||||
|
|
||||||
## Debian/Ubuntu (using `dpkg`)
|
## Debian / Ubuntu (using `dpkg` / `apt`)
|
||||||
|
|
||||||
Packages are available here:
|
Packages are available here:
|
||||||
|
|
||||||
@ -319,8 +326,11 @@ Packages are available here:
|
|||||||
|
|
||||||
Successful tested with:
|
Successful tested with:
|
||||||
|
|
||||||
- Debian 12 (Bookworm) (works but not recommended because it is currently in *testing* state)
|
- Debian 12 (Bookworm) (EOL: tba.)
|
||||||
- Ubuntu 22.10 (Kinetic Kudu)
|
- Ubuntu 22.10 (Kinetic Kudu) (EOL: July 20, 2023)
|
||||||
|
- Ubuntu 23.04 (Lunar Lobster) (EOL: January 2024)
|
||||||
|
- Ubuntu 23.10 (Mantic Minotaur) (EOL: July 2024)
|
||||||
|
- Ubuntu 24.04 (Noble Numbat) (EOL: April 2036)
|
||||||
|
|
||||||
Not working with:
|
Not working with:
|
||||||
|
|
||||||
@ -341,6 +351,7 @@ apt-get install -f --fix-missing
|
|||||||
```
|
```
|
||||||
|
|
||||||
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
||||||
|
Now you have to edit `/etc/fastapi-dls/env` as needed.
|
||||||
|
|
||||||
## ArchLinux (using `pacman`)
|
## ArchLinux (using `pacman`)
|
||||||
|
|
||||||
@ -362,6 +373,20 @@ pacman -U --noconfirm fastapi-dls.pkg.tar.zst
|
|||||||
```
|
```
|
||||||
|
|
||||||
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
||||||
|
Now you have to edit `/etc/default/fastapi-dls` as needed.
|
||||||
|
|
||||||
|
## unRAID
|
||||||
|
|
||||||
|
1. Download [this xml file](.UNRAID/FastAPI-DLS.xml)
|
||||||
|
2. Put it in /boot/config/plugins/dockerMan/templates-user/
|
||||||
|
3. Go to Docker page, scroll down to `Add Container`, click on Template list and choose `FastAPI-DLS`
|
||||||
|
4. Open terminal/ssh, follow the instructions in overview description
|
||||||
|
5. Setup your container `IP`, `Port`, `DLS_URL` and `DLS_PORT`
|
||||||
|
6. Apply and let it boot up
|
||||||
|
|
||||||
|
*Unraid users must also make sure they have Host access to custom networks enabled if unraid is the vgpu guest*.
|
||||||
|
|
||||||
|
Continue [here](#unraid-guest) for docker guest setup.
|
||||||
|
|
||||||
## Let's Encrypt Certificate (optional)
|
## Let's Encrypt Certificate (optional)
|
||||||
|
|
||||||
@ -381,34 +406,29 @@ After first success you have to replace `--issue` with `--renew`.
|
|||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
| Variable | Default | Usage |
|
| Variable | Default | Usage |
|
||||||
|------------------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------|
|
|------------------------|----------------------------------------|------------------------------------------------------------------------------------------------------|
|
||||||
| `DEBUG` | `false` | Toggles `fastapi` debug mode |
|
| `DEBUG` | `false` | Toggles `fastapi` debug mode |
|
||||||
| `DLS_URL` | `localhost` | Used in client-token to tell guest driver where dls instance is reachable |
|
| `DLS_URL` | `localhost` | Used in client-token to tell guest driver where dls instance is reachable |
|
||||||
| `DLS_PORT` | `443` | Used in client-token to tell guest driver where dls instance is reachable |
|
| `DLS_PORT` | `443` | Used in client-token to tell guest driver where dls instance is reachable |
|
||||||
| `HA_REPLICATE` | | `DLS_URL` + `DLS_PORT` of primary DLS instance, e.g. `dls-node:443` (for HA only **two** nodes are supported!) \*1 |
|
| `TOKEN_EXPIRE_DAYS` | `1` | Client auth-token validity (used for authenticate client against api, **not `.tok` file!**) |
|
||||||
| `HA_ROLE` | | `PRIMARY` or `SECONDARY` |
|
| `LEASE_EXPIRE_DAYS` | `90` | Lease time in days |
|
||||||
| `TOKEN_EXPIRE_DAYS` | `1` | Client auth-token validity (used for authenticate client against api, **not `.tok` file!**) |
|
| `LEASE_RENEWAL_PERIOD` | `0.15` | The percentage of the lease period that must elapse before a licensed client can renew a license \*1 |
|
||||||
| `LEASE_EXPIRE_DAYS` | `90` | Lease time in days |
|
| `DATABASE` | `sqlite:///db.sqlite` | See [official SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/engines.html) |
|
||||||
| `LEASE_RENEWAL_PERIOD` | `0.15` | The percentage of the lease period that must elapse before a licensed client can renew a license \*2 |
|
| `CORS_ORIGINS` | `https://{DLS_URL}` | Sets `Access-Control-Allow-Origin` header (comma separated string) \*2 |
|
||||||
| `DATABASE` | `sqlite:///db.sqlite` | See [official SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/engines.html) |
|
| `SITE_KEY_XID` | `00000000-0000-0000-0000-000000000000` | Site identification uuid |
|
||||||
| `CORS_ORIGINS` | `https://{DLS_URL}` | Sets `Access-Control-Allow-Origin` header (comma separated string) \*3 |
|
| `INSTANCE_REF` | `10000000-0000-0000-0000-000000000001` | Instance identification uuid |
|
||||||
| `SITE_KEY_XID` | `00000000-0000-0000-0000-000000000000` | Site identification uuid |
|
| `ALLOTMENT_REF` | `20000000-0000-0000-0000-000000000001` | Allotment identification uuid |
|
||||||
| `INSTANCE_REF` | `10000000-0000-0000-0000-000000000001` | Instance identification uuid |
|
| `INSTANCE_KEY_RSA` | `<app-dir>/cert/instance.private.pem` | Site-wide private RSA key for singing JWTs \*3 |
|
||||||
| `ALLOTMENT_REF` | `20000000-0000-0000-0000-000000000001` | Allotment identification uuid |
|
| `INSTANCE_KEY_PUB` | `<app-dir>/cert/instance.public.pem` | Site-wide public key \*3 |
|
||||||
| `INSTANCE_KEY_RSA` | `<app-dir>/cert/instance.private.pem` | Site-wide private RSA key for singing JWTs \*4 |
|
|
||||||
| `INSTANCE_KEY_PUB` | `<app-dir>/cert/instance.public.pem` | Site-wide public key \*4 |
|
|
||||||
|
|
||||||
\*1 If you want to use HA, this value should be point to `secondary` on `primary` and `primary` on `secondary`. Don't
|
\*1 For example, if the lease period is one day and the renewal period is 20%, the client attempts to renew its license
|
||||||
use same database for both instances!
|
|
||||||
|
|
||||||
\*2 For example, if the lease period is one day and the renewal period is 20%, the client attempts to renew its license
|
|
||||||
every 4.8 hours. If network connectivity is lost, the loss of connectivity is detected during license renewal and the
|
every 4.8 hours. If network connectivity is lost, the loss of connectivity is detected during license renewal and the
|
||||||
client has 19.2 hours in which to re-establish connectivity before its license expires.
|
client has 19.2 hours in which to re-establish connectivity before its license expires.
|
||||||
|
|
||||||
\*3 Always use `https`, since guest-drivers only support secure connections!
|
\*2 Always use `https`, since guest-drivers only support secure connections!
|
||||||
|
|
||||||
\*4 If you recreate instance keys you need to **recreate client-token for each guest**!
|
\*3 If you recreate your instance keys you need to **recreate client-token for each guest**!
|
||||||
|
|
||||||
# Setup (Client)
|
# Setup (Client)
|
||||||
|
|
||||||
@ -416,9 +436,30 @@ client has 19.2 hours in which to re-establish connectivity before its license e
|
|||||||
|
|
||||||
Successfully tested with this package versions:
|
Successfully tested with this package versions:
|
||||||
|
|
||||||
- `14.3` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `513.91`)
|
| vGPU Suftware | Driver Branch | Linux vGPU Manager | Linux Driver | Windows Driver | Release Date | EOL Date |
|
||||||
- `14.4` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `514.08`)
|
|:-------------:|:-------------:|--------------------|--------------|----------------|--------------:|--------------:|
|
||||||
- `15.0` (Linux-Host: `525.60.12`, Linux-Guest: `525.60.13`, Windows-Guest: `527.41`)
|
| `17.2` | R550 | `550.90.05` | `550.90.07` | `552.55` | June 2024 | February 2025 |
|
||||||
|
| `17.1` | R550 | `550.54.16` | `550.54.15` | `551.78` | March 2024 | |
|
||||||
|
| `17.0` | R550 | `550.54.10` | `550.54.14` | `551.61` | February 2024 | |
|
||||||
|
| `16.6` | R535 | `535.183.04` | `535.183.01` | `538.67` | June 2024 | July 2026 |
|
||||||
|
| `16.5` | R535 | `535.161.05` | `535.161.08` | `538.46` | February 2024 | |
|
||||||
|
| `16.4` | R535 | `535.161.05` | `535.161.07` | `538.33` | February 2024 | |
|
||||||
|
| `16.3` | R535 | `535.154.02` | `535.154.05` | `538.15` | January 2024 | |
|
||||||
|
| `16.2` | R535 | `535.129.03` | `535.129.03` | `537.70` | October 2023 | |
|
||||||
|
| `16.1` | R535 | `535.104.06` | `535.104.05` | `537.13` | August 2023 | |
|
||||||
|
| `16.0` | R535 | `535.54.06` | `535.54.03` | `536.22` | July 2023 | |
|
||||||
|
| `15.4` | R525 | `525.147.01` | `525.147.05` | `529.19` | June 2023 | October 2023 |
|
||||||
|
| `15.3` | R525 | `525.125.03` | `525.125.06` | `529.11` | June 2023 | |
|
||||||
|
| `15.2` | R525 | `525.105.14` | `525.105.17` | `528.89` | March 2023 | |
|
||||||
|
| `15.1` | R525 | `525.85.07` | `525.85.05` | `528.24` | January 2023 | |
|
||||||
|
| `15.0` | R525 | `525.60.12` | `525.60.13` | `527.41` | December 2022 | |
|
||||||
|
| `14.4` | R510 | `510.108.03` | `510.108.03` | `514.08` | December 2022 | February 2023 |
|
||||||
|
| `14.3` | R510 | `510.108.03` | `510.108.03` | `513.91` | November 2022 | |
|
||||||
|
|
||||||
|
- https://docs.nvidia.com/grid/index.html
|
||||||
|
- https://docs.nvidia.com/grid/gpus-supported-by-vgpu.html
|
||||||
|
|
||||||
|
*To get the latest drivers, visit Nvidia or search in Discord-Channel `GPU Unlocking` (Server-ID: `829786927829745685`) on channel `licensing` `biggerthanshit`
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
@ -470,7 +511,7 @@ Restart-Service NVDisplay.ContainerLocalSystem
|
|||||||
Check licensing status:
|
Check licensing status:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
& 'C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe' -q | Select-String "License"
|
& 'nvidia-smi' -q | Select-String "License"
|
||||||
```
|
```
|
||||||
|
|
||||||
Output should be something like:
|
Output should be something like:
|
||||||
@ -482,8 +523,24 @@ vGPU Software Licensed Product
|
|||||||
|
|
||||||
Done. For more information check [troubleshoot section](#troubleshoot).
|
Done. For more information check [troubleshoot section](#troubleshoot).
|
||||||
|
|
||||||
|
## unRAID Guest
|
||||||
|
|
||||||
|
1. Make sure you create a folder in a linux filesystem (BTRFS/XFS/EXT4...), I recommend `/mnt/user/system/nvidia` (this is where docker and libvirt preferences are saved, so it's a good place to have that)
|
||||||
|
2. Edit the script to put your `DLS_IP`, `DLS_PORT` and `TOKEN_PATH`, properly
|
||||||
|
3. Install `User Scripts` plugin from *Community Apps* (the Apps page, or google User Scripts Unraid if you're not using CA)
|
||||||
|
4. Go to `Settings > Users Scripts > Add New Script`
|
||||||
|
5. Give it a name (the name must not contain spaces preferably)
|
||||||
|
6. Click on the *gear icon* to the left of the script name then edit script
|
||||||
|
7. Paste the script and save
|
||||||
|
8. Set schedule to `At First Array Start Only`
|
||||||
|
9. Click on Apply
|
||||||
|
|
||||||
|
|
||||||
# Endpoints
|
# Endpoints
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>show</summary>
|
||||||
|
|
||||||
### `GET /`
|
### `GET /`
|
||||||
|
|
||||||
Redirect to `/-/readme`.
|
Redirect to `/-/readme`.
|
||||||
@ -535,11 +592,18 @@ Generate client token, (see [installation](#installation)).
|
|||||||
### Others
|
### Others
|
||||||
|
|
||||||
There are many other internal api endpoints for handling authentication and lease process.
|
There are many other internal api endpoints for handling authentication and lease process.
|
||||||
|
</details>
|
||||||
|
|
||||||
# Troubleshoot
|
# Troubleshoot / Debug
|
||||||
|
|
||||||
**Please make sure that fastapi-dls and your guests are on the same timezone!**
|
**Please make sure that fastapi-dls and your guests are on the same timezone!**
|
||||||
|
|
||||||
|
Maybe you have to disable IPv6 on the machine you are running FastAPI-DLS.
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
Logs are available with `docker logs <container>`. To get the correct container-id use `docker container ls` or `docker ps`.
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
Logs are available with `journalctl -u nvidia-gridd -f`.
|
Logs are available with `journalctl -u nvidia-gridd -f`.
|
||||||
@ -597,7 +661,7 @@ only
|
|||||||
gets a valid local license.
|
gets a valid local license.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Log</summary>
|
<summary>Log example</summary>
|
||||||
|
|
||||||
**Display-Container-LS**
|
**Display-Container-LS**
|
||||||
|
|
||||||
@ -663,7 +727,7 @@ The error message can safely be ignored (since we have no license limitation :P)
|
|||||||
<0>:End Logging
|
<0>:End Logging
|
||||||
```
|
```
|
||||||
|
|
||||||
#### log with nginx as reverse proxy (see [docker-compose.yml](docker-compose.yml))
|
#### log with nginx as reverse proxy (see [docker-compose-http-and-https.yml](examples/docker-compose-http-and-https.yml))
|
||||||
|
|
||||||
```
|
```
|
||||||
<1>:NLS initialized
|
<1>:NLS initialized
|
||||||
@ -684,4 +748,14 @@ The error message can safely be ignored (since we have no license limitation :P)
|
|||||||
|
|
||||||
Thanks to vGPU community and all who uses this project and report bugs.
|
Thanks to vGPU community and all who uses this project and report bugs.
|
||||||
|
|
||||||
Special thanks to @samicrusader who created build file for ArchLinux and @cyrus who wrote the section for openSUSE.
|
Special thanks to
|
||||||
|
|
||||||
|
- @samicrusader who created build file for **ArchLinux**
|
||||||
|
- @cyrus who wrote the section for **openSUSE**
|
||||||
|
- @midi who wrote the section for **unRAID**
|
||||||
|
- @polloloco who wrote the *[NVIDIA vGPU Guide](https://gitlab.com/polloloco/vgpu-proxmox)*
|
||||||
|
- @DualCoder who creates the `vgpu_unlock` functionality [vgpu_unlock](https://github.com/DualCoder/vgpu_unlock)
|
||||||
|
- Krutav Shah who wrote the [vGPU_Unlock Wiki](https://docs.google.com/document/d/1pzrWJ9h-zANCtyqRgS7Vzla0Y8Ea2-5z2HEi4X75d2Q/)
|
||||||
|
- Wim van 't Hoog for the [Proxmox All-In-One Installer Script](https://wvthoog.nl/proxmox-vgpu-v3/)
|
||||||
|
|
||||||
|
And thanks to all people who contributed to all these libraries!
|
||||||
|
27
ROADMAP.md
Normal file
27
ROADMAP.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# Roadmap
|
||||||
|
|
||||||
|
I am planning to implement the following features in the future.
|
||||||
|
|
||||||
|
|
||||||
|
## HA - High Availability
|
||||||
|
|
||||||
|
Support Failover-Mode (secondary ip address) as in official DLS.
|
||||||
|
|
||||||
|
**Note**: There is no Load-Balancing / Round-Robin HA Mode supported! If you want to use that, consider to use
|
||||||
|
Docker-Swarm with shared/cluster database (e.g. postgres).
|
||||||
|
|
||||||
|
*See [ha branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ha) for current status.*
|
||||||
|
|
||||||
|
|
||||||
|
## UI - User Interface
|
||||||
|
|
||||||
|
Add a user interface to manage origins and leases.
|
||||||
|
|
||||||
|
*See [ui branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ui) for current status.*
|
||||||
|
|
||||||
|
|
||||||
|
## Config Database
|
||||||
|
|
||||||
|
Instead of using environment variables, configuration files and manually create certificates, store configs and
|
||||||
|
certificates in database (like origins and leases). Also, there should be provided a startup assistant to prefill
|
||||||
|
required attributes and create instance-certificates. This is more user-friendly and should improve fist setup.
|
132
app/main.py
132
app/main.py
@ -6,7 +6,7 @@ from os.path import join, dirname
|
|||||||
from os import getenv as env
|
from os import getenv as env
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from fastapi import FastAPI, BackgroundTasks
|
from fastapi import FastAPI
|
||||||
from fastapi.requests import Request
|
from fastapi.requests import Request
|
||||||
from json import loads as json_loads
|
from json import loads as json_loads
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
@ -19,7 +19,7 @@ from starlette.responses import StreamingResponse, JSONResponse as JSONr, HTMLRe
|
|||||||
from sqlalchemy import create_engine
|
from sqlalchemy import create_engine
|
||||||
from sqlalchemy.orm import sessionmaker
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
from util import load_key, load_file, ha_replicate
|
from util import load_key, load_file
|
||||||
from orm import Origin, Lease, init as db_init, migrate
|
from orm import Origin, Lease, init as db_init, migrate
|
||||||
|
|
||||||
load_dotenv('../version.env')
|
load_dotenv('../version.env')
|
||||||
@ -36,7 +36,6 @@ db_init(db), migrate(db)
|
|||||||
# everything prefixed with "INSTANCE_*" is used as "SERVICE_INSTANCE_*" or "SI_*" in official dls service
|
# everything prefixed with "INSTANCE_*" is used as "SERVICE_INSTANCE_*" or "SI_*" in official dls service
|
||||||
DLS_URL = str(env('DLS_URL', 'localhost'))
|
DLS_URL = str(env('DLS_URL', 'localhost'))
|
||||||
DLS_PORT = int(env('DLS_PORT', '443'))
|
DLS_PORT = int(env('DLS_PORT', '443'))
|
||||||
HA_REPLICATE, HA_ROLE = env('HA_REPLICATE', None), env('HA_ROLE', None) # only failover is supported
|
|
||||||
SITE_KEY_XID = str(env('SITE_KEY_XID', '00000000-0000-0000-0000-000000000000'))
|
SITE_KEY_XID = str(env('SITE_KEY_XID', '00000000-0000-0000-0000-000000000000'))
|
||||||
INSTANCE_REF = str(env('INSTANCE_REF', '10000000-0000-0000-0000-000000000001'))
|
INSTANCE_REF = str(env('INSTANCE_REF', '10000000-0000-0000-0000-000000000001'))
|
||||||
ALLOTMENT_REF = str(env('ALLOTMENT_REF', '20000000-0000-0000-0000-000000000001'))
|
ALLOTMENT_REF = str(env('ALLOTMENT_REF', '20000000-0000-0000-0000-000000000001'))
|
||||||
@ -83,7 +82,7 @@ async def _index():
|
|||||||
|
|
||||||
|
|
||||||
@app.get('/-/health', summary='* Health')
|
@app.get('/-/health', summary='* Health')
|
||||||
async def _health(request: Request):
|
async def _health():
|
||||||
return JSONr({'status': 'up'})
|
return JSONr({'status': 'up'})
|
||||||
|
|
||||||
|
|
||||||
@ -187,6 +186,12 @@ async def _leases(request: Request, origin: bool = False):
|
|||||||
return JSONr(response)
|
return JSONr(response)
|
||||||
|
|
||||||
|
|
||||||
|
@app.delete('/-/leases/expired', summary='* Leases')
|
||||||
|
async def _lease_delete_expired(request: Request):
|
||||||
|
Lease.delete_expired(db)
|
||||||
|
return Response(status_code=201)
|
||||||
|
|
||||||
|
|
||||||
@app.delete('/-/lease/{lease_ref}', summary='* Lease')
|
@app.delete('/-/lease/{lease_ref}', summary='* Lease')
|
||||||
async def _lease_delete(request: Request, lease_ref: str):
|
async def _lease_delete(request: Request, lease_ref: str):
|
||||||
if Lease.delete(db, lease_ref) == 1:
|
if Lease.delete(db, lease_ref) == 1:
|
||||||
@ -200,36 +205,6 @@ async def _client_token():
|
|||||||
cur_time = datetime.utcnow()
|
cur_time = datetime.utcnow()
|
||||||
exp_time = cur_time + CLIENT_TOKEN_EXPIRE_DELTA
|
exp_time = cur_time + CLIENT_TOKEN_EXPIRE_DELTA
|
||||||
|
|
||||||
if HA_REPLICATE is not None and HA_ROLE.lower() == "secondary":
|
|
||||||
return RedirectResponse(f'https://{HA_REPLICATE}/-/client-token')
|
|
||||||
|
|
||||||
idx_port, idx_node = 0, 0
|
|
||||||
|
|
||||||
def create_svc_port_set(port: int):
|
|
||||||
idx = idx_port
|
|
||||||
return {
|
|
||||||
"idx": idx,
|
|
||||||
"d_name": "DLS",
|
|
||||||
"svc_port_map": [{"service": "auth", "port": port}, {"service": "lease", "port": port}]
|
|
||||||
}
|
|
||||||
|
|
||||||
def create_node_url(url: str, svc_port_set_idx: int):
|
|
||||||
idx = idx_node
|
|
||||||
return {"idx": idx, "url": url, "url_qr": url, "svc_port_set_idx": svc_port_set_idx}
|
|
||||||
|
|
||||||
service_instance_configuration = {
|
|
||||||
"nls_service_instance_ref": INSTANCE_REF,
|
|
||||||
"svc_port_set_list": [create_svc_port_set(DLS_PORT)],
|
|
||||||
"node_url_list": [create_node_url(DLS_URL, idx_port)]
|
|
||||||
}
|
|
||||||
idx_port += 1
|
|
||||||
idx_node += 1
|
|
||||||
|
|
||||||
if HA_REPLICATE is not None and HA_ROLE.lower() == "primary":
|
|
||||||
SEC_URL, SEC_PORT, *invalid = HA_REPLICATE.split(':')
|
|
||||||
service_instance_configuration['svc_port_set_list'].append(create_svc_port_set(SEC_PORT))
|
|
||||||
service_instance_configuration['node_url_list'].append(create_node_url(SEC_URL, idx_port))
|
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"jti": str(uuid4()),
|
"jti": str(uuid4()),
|
||||||
"iss": "NLS Service Instance",
|
"iss": "NLS Service Instance",
|
||||||
@ -240,7 +215,17 @@ async def _client_token():
|
|||||||
"update_mode": "ABSOLUTE",
|
"update_mode": "ABSOLUTE",
|
||||||
"scope_ref_list": [ALLOTMENT_REF],
|
"scope_ref_list": [ALLOTMENT_REF],
|
||||||
"fulfillment_class_ref_list": [],
|
"fulfillment_class_ref_list": [],
|
||||||
"service_instance_configuration": service_instance_configuration,
|
"service_instance_configuration": {
|
||||||
|
"nls_service_instance_ref": INSTANCE_REF,
|
||||||
|
"svc_port_set_list": [
|
||||||
|
{
|
||||||
|
"idx": 0,
|
||||||
|
"d_name": "DLS",
|
||||||
|
"svc_port_map": [{"service": "auth", "port": DLS_PORT}, {"service": "lease", "port": DLS_PORT}]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"node_url_list": [{"idx": 0, "url": DLS_URL, "url_qr": DLS_URL, "svc_port_set_idx": 0}]
|
||||||
|
},
|
||||||
"service_instance_public_key_configuration": {
|
"service_instance_public_key_configuration": {
|
||||||
"service_instance_public_key_me": {
|
"service_instance_public_key_me": {
|
||||||
"mod": hex(INSTANCE_KEY_PUB.public_key().n)[2:],
|
"mod": hex(INSTANCE_KEY_PUB.public_key().n)[2:],
|
||||||
@ -260,67 +245,6 @@ async def _client_token():
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
@app.get('/-/ha/replicate', summary='* HA replicate - trigger')
|
|
||||||
async def _ha_replicate_to_ha(request: Request, background_tasks: BackgroundTasks):
|
|
||||||
if HA_REPLICATE is None or HA_ROLE is None:
|
|
||||||
logger.warning('HA replicate endpoint triggerd, but no value for "HA_REPLICATE" or "HA_ROLE" is set!')
|
|
||||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'no value for "HA_REPLICATE" or "HA_ROLE" set'})
|
|
||||||
|
|
||||||
session = sessionmaker(bind=db)()
|
|
||||||
origins = [origin.serialize() for origin in session.query(Origin).all()]
|
|
||||||
leases = [lease.serialize(renewal_period=LEASE_RENEWAL_PERIOD, renewal_delta=LEASE_RENEWAL_DELTA) for lease in session.query(Lease).all()]
|
|
||||||
|
|
||||||
background_tasks.add_task(ha_replicate, logger, HA_REPLICATE, HA_ROLE, VERSION, DLS_URL, DLS_PORT, SITE_KEY_XID, INSTANCE_REF, origins, leases)
|
|
||||||
return JSONr(status_code=202, content=None)
|
|
||||||
|
|
||||||
|
|
||||||
@app.put('/-/ha/replicate', summary='* HA replicate')
|
|
||||||
async def _ha_replicate_by_ha(request: Request):
|
|
||||||
j, cur_time = json_loads((await request.body()).decode('utf-8')), datetime.utcnow()
|
|
||||||
|
|
||||||
if HA_REPLICATE is None:
|
|
||||||
logger.warning(f'HA replicate endpoint triggerd, but no value for "HA_REPLICATE" is set!')
|
|
||||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'no value for "HA_REPLICATE" set'})
|
|
||||||
|
|
||||||
version = j.get('VERSION')
|
|
||||||
if version != VERSION:
|
|
||||||
logger.error(f'Version missmatch on HA replication task!')
|
|
||||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Missmatch for "VERSION"'})
|
|
||||||
|
|
||||||
site_key_xid = j.get('SITE_KEY_XID')
|
|
||||||
if site_key_xid != SITE_KEY_XID:
|
|
||||||
logger.error(f'Site-Key missmatch on HA replication task!')
|
|
||||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Missmatch for "SITE_KEY_XID"'})
|
|
||||||
|
|
||||||
instance_ref = j.get('INSTANCE_REF')
|
|
||||||
if instance_ref != INSTANCE_REF:
|
|
||||||
logger.error(f'Version missmatch on HA replication task!')
|
|
||||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Missmatch for "INSTANCE_REF"'})
|
|
||||||
|
|
||||||
sync_timestamp, max_seconds_behind = datetime.fromisoformat(j.get('sync_timestamp')), 30
|
|
||||||
if sync_timestamp <= cur_time - timedelta(seconds=max_seconds_behind):
|
|
||||||
logger.error(f'Request time more than {max_seconds_behind}s behind!')
|
|
||||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Request time behind'})
|
|
||||||
|
|
||||||
origins, leases = j.get('origins'), j.get('leases')
|
|
||||||
for origin in origins:
|
|
||||||
origin_ref = origin.get('origin_ref')
|
|
||||||
logging.info(f'> [ ha ]: origin {origin_ref}')
|
|
||||||
data = Origin.deserialize(origin)
|
|
||||||
Origin.create_or_update(db, data)
|
|
||||||
|
|
||||||
for lease in leases:
|
|
||||||
lease_ref = lease.get('lease_ref')
|
|
||||||
x = Lease.find_by_lease_ref(db, lease_ref)
|
|
||||||
if x is not None and x.lease_updated > sync_timestamp:
|
|
||||||
continue
|
|
||||||
logging.info(f'> [ ha ]: lease {lease_ref}')
|
|
||||||
data = Lease.deserialize(lease)
|
|
||||||
Lease.create_or_update(db, data)
|
|
||||||
|
|
||||||
return JSONr(status_code=202, content=None)
|
|
||||||
|
|
||||||
|
|
||||||
# venv/lib/python3.9/site-packages/nls_services_auth/test/test_origins_controller.py
|
# venv/lib/python3.9/site-packages/nls_services_auth/test/test_origins_controller.py
|
||||||
@app.post('/auth/v1/origin', description='find or create an origin')
|
@app.post('/auth/v1/origin', description='find or create an origin')
|
||||||
async def auth_v1_origin(request: Request):
|
async def auth_v1_origin(request: Request):
|
||||||
@ -627,22 +551,6 @@ async def app_on_startup():
|
|||||||
Your client-token file (.tok) is valid for {str(CLIENT_TOKEN_EXPIRE_DELTA)}.
|
Your client-token file (.tok) is valid for {str(CLIENT_TOKEN_EXPIRE_DELTA)}.
|
||||||
''')
|
''')
|
||||||
|
|
||||||
if HA_REPLICATE is not None and HA_ROLE is not None:
|
|
||||||
from hashlib import sha1
|
|
||||||
|
|
||||||
sha1digest = sha1(INSTANCE_KEY_RSA.export_key()).hexdigest()
|
|
||||||
fingerprint_key = ':'.join(sha1digest[i: i + 2] for i in range(0, len(sha1digest), 2))
|
|
||||||
sha1digest = sha1(INSTANCE_KEY_PUB.export_key()).hexdigest()
|
|
||||||
fingerprint_pub = ':'.join(sha1digest[i: i + 2] for i in range(0, len(sha1digest), 2))
|
|
||||||
|
|
||||||
logger.info(f'''
|
|
||||||
HA mode is enabled. Make sure theses fingerprints matches on all your nodes:
|
|
||||||
- INSTANCE_KEY_RSA: "{str(fingerprint_key)}"
|
|
||||||
- INSTANCE_KEY_PUB: "{str(fingerprint_pub)}"
|
|
||||||
|
|
||||||
This node ({HA_ROLE}) listens to "https://{DLS_URL}:{DLS_PORT}" and replicates to "https://{HA_REPLICATE}".
|
|
||||||
''')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
28
app/orm.py
28
app/orm.py
@ -32,16 +32,6 @@ class Origin(Base):
|
|||||||
'os_version': self.os_version,
|
'os_version': self.os_version,
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def deserialize(j) -> "Origin":
|
|
||||||
return Origin(
|
|
||||||
origin_ref=j.get('origin_ref'),
|
|
||||||
hostname=j.get('hostname'),
|
|
||||||
guest_driver_version=j.get('guest_driver_version'),
|
|
||||||
os_platform=j.get('os_platform'),
|
|
||||||
os_version=j.get('os_version'),
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_statement(engine: Engine):
|
def create_statement(engine: Engine):
|
||||||
from sqlalchemy.schema import CreateTable
|
from sqlalchemy.schema import CreateTable
|
||||||
@ -105,16 +95,6 @@ class Lease(Base):
|
|||||||
'lease_renewal': lease_renewal.isoformat(),
|
'lease_renewal': lease_renewal.isoformat(),
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def deserialize(j) -> "Lease":
|
|
||||||
return Lease(
|
|
||||||
lease_ref=j.get('lease_ref'),
|
|
||||||
origin_ref=j.get('origin_ref'),
|
|
||||||
lease_created=datetime.fromisoformat(j.get('lease_created')),
|
|
||||||
lease_expires=datetime.fromisoformat(j.get('lease_expires')),
|
|
||||||
lease_updated=datetime.fromisoformat(j.get('lease_updated')),
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_statement(engine: Engine):
|
def create_statement(engine: Engine):
|
||||||
from sqlalchemy.schema import CreateTable
|
from sqlalchemy.schema import CreateTable
|
||||||
@ -180,6 +160,14 @@ class Lease(Base):
|
|||||||
session.close()
|
session.close()
|
||||||
return deletions
|
return deletions
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_expired(engine: Engine) -> int:
|
||||||
|
session = sessionmaker(bind=engine)()
|
||||||
|
deletions = session.query(Lease).filter(Lease.lease_expires <= datetime.utcnow()).delete()
|
||||||
|
session.commit()
|
||||||
|
session.close()
|
||||||
|
return deletions
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def calculate_renewal(renewal_period: float, delta: timedelta) -> timedelta:
|
def calculate_renewal(renewal_period: float, delta: timedelta) -> timedelta:
|
||||||
"""
|
"""
|
||||||
|
26
app/util.py
26
app/util.py
@ -26,29 +26,3 @@ def generate_key() -> "RsaKey":
|
|||||||
from Cryptodome.PublicKey.RSA import RsaKey
|
from Cryptodome.PublicKey.RSA import RsaKey
|
||||||
|
|
||||||
return RSA.generate(bits=2048)
|
return RSA.generate(bits=2048)
|
||||||
|
|
||||||
|
|
||||||
def ha_replicate(logger: "logging.Logger", ha_replicate: str, ha_role: str, version: str, dls_url: str, dls_port: int, site_key_xid: str, instance_ref: str, origins: list, leases: list) -> bool:
|
|
||||||
from datetime import datetime
|
|
||||||
import httpx
|
|
||||||
|
|
||||||
if f'{dls_url}:{dls_port}' == ha_replicate:
|
|
||||||
logger.error(f'Failed to replicate this node ({ha_role}) to "{ha_replicate}": can\'t replicate to itself')
|
|
||||||
return False
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'VERSION': str(version),
|
|
||||||
'HA_REPLICATE': f'{dls_url}:{dls_port}',
|
|
||||||
'SITE_KEY_XID': str(site_key_xid),
|
|
||||||
'INSTANCE_REF': str(instance_ref),
|
|
||||||
'origins': origins,
|
|
||||||
'leases': leases,
|
|
||||||
'sync_timestamp': datetime.utcnow().isoformat(),
|
|
||||||
}
|
|
||||||
|
|
||||||
r = httpx.put(f'https://{ha_replicate}/-/ha/replicate', json=data, verify=False)
|
|
||||||
if r.status_code == 202:
|
|
||||||
logger.info(f'Successfully replicated this node ({ha_role}) to "{ha_replicate}".')
|
|
||||||
return True
|
|
||||||
logger.error(f'Failed to replicate this node ({ha_role}) to "{ha_replicate}": {r.status_code} - {r.content}')
|
|
||||||
return False
|
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
version: '3.9'
|
version: '3.9'
|
||||||
|
|
||||||
x-dls-variables: &dls-variables
|
x-dls-variables: &dls-variables
|
||||||
DLS_URL: localhost # REQUIRED, change to your ip or hostname
|
TZ: Europe/Berlin # REQUIRED, set your timezone correctly on fastapi-dls AND YOUR CLIENTS !!!
|
||||||
DLS_PORT: 443 # must match nginx listen & exposed port
|
DLS_URL: localhost # REQUIRED, change to your ip or hostname
|
||||||
LEASE_EXPIRE_DAYS: 90
|
DLS_PORT: 443
|
||||||
|
LEASE_EXPIRE_DAYS: 90 # 90 days is maximum
|
||||||
DATABASE: sqlite:////app/database/db.sqlite
|
DATABASE: sqlite:////app/database/db.sqlite
|
||||||
DEBUG: false
|
DEBUG: false
|
||||||
|
|
||||||
@ -13,108 +14,16 @@ services:
|
|||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
<<: *dls-variables
|
<<: *dls-variables
|
||||||
volumes:
|
|
||||||
- /etc/timezone:/etc/timezone:ro
|
|
||||||
- /opt/docker/fastapi-dls/cert:/app/cert # instance.private.pem, instance.public.pem
|
|
||||||
- db:/app/database
|
|
||||||
entrypoint: ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--app-dir", "/app", "--proxy-headers"]
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "--fail", "http://localhost:8000/-/health"]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 30s
|
|
||||||
proxy:
|
|
||||||
image: nginx
|
|
||||||
ports:
|
ports:
|
||||||
# thees are ports where nginx (!) is listen to
|
- "443:443"
|
||||||
- "80:80" # for "/leasing/v1/lessor/shutdown" used by windows guests, can't be changed!
|
|
||||||
- "443:443" # first part must match "DLS_PORT"
|
|
||||||
volumes:
|
volumes:
|
||||||
- /etc/timezone:/etc/timezone:ro
|
- /opt/docker/fastapi-dls/cert:/app/cert
|
||||||
- /opt/docker/fastapi-dls/cert:/opt/cert
|
- dls-db:/app/database
|
||||||
healthcheck:
|
logging: # optional, for those who do not need logs
|
||||||
test: ["CMD", "curl", "--insecure", "--fail", "https://localhost/-/health"]
|
driver: "json-file"
|
||||||
interval: 10s
|
options:
|
||||||
timeout: 5s
|
max-file: 5
|
||||||
retries: 3
|
max-size: 10m
|
||||||
start_period: 30s
|
|
||||||
command: |
|
|
||||||
bash -c "bash -s <<\"EOF\"
|
|
||||||
cat > /etc/nginx/nginx.conf <<\"EON\"
|
|
||||||
daemon off;
|
|
||||||
user root;
|
|
||||||
worker_processes auto;
|
|
||||||
|
|
||||||
events {
|
|
||||||
worker_connections 1024;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
gzip on;
|
|
||||||
gzip_disable "msie6";
|
|
||||||
include /etc/nginx/mime.types;
|
|
||||||
|
|
||||||
upstream dls-backend {
|
|
||||||
server dls:8000; # must match dls listen port
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 443 ssl http2 default_server;
|
|
||||||
listen [::]:443 ssl http2 default_server;
|
|
||||||
|
|
||||||
root /var/www/html;
|
|
||||||
index index.html;
|
|
||||||
server_name _;
|
|
||||||
|
|
||||||
ssl_certificate "/opt/cert/webserver.crt";
|
|
||||||
ssl_certificate_key "/opt/cert/webserver.key";
|
|
||||||
ssl_session_cache shared:SSL:1m;
|
|
||||||
ssl_session_timeout 10m;
|
|
||||||
ssl_protocols TLSv1.3 TLSv1.2;
|
|
||||||
# ssl_ciphers "ECDHE-ECDSA-CHACHA20-POLY1305";
|
|
||||||
# ssl_ciphers PROFILE=SYSTEM;
|
|
||||||
ssl_prefer_server_ciphers on;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_set_header Host $$http_host;
|
|
||||||
proxy_set_header X-Real-IP $$remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
|
||||||
proxy_pass http://dls-backend$$request_uri;
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /-/health {
|
|
||||||
access_log off;
|
|
||||||
add_header 'Content-Type' 'application/json';
|
|
||||||
return 200 '{\"status\":\"up\",\"service\":\"nginx\"}';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
|
|
||||||
root /var/www/html;
|
|
||||||
index index.html;
|
|
||||||
server_name _;
|
|
||||||
|
|
||||||
location /leasing/v1/lessor/shutdown {
|
|
||||||
proxy_set_header Host $$http_host;
|
|
||||||
proxy_set_header X-Real-IP $$remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
|
||||||
proxy_pass http://dls-backend/leasing/v1/lessor/shutdown;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
|
||||||
return 301 https://$$host$$request_uri;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EON
|
|
||||||
nginx
|
|
||||||
EOF"
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
db:
|
dls-db:
|
||||||
|
120
examples/docker-compose-http-and-https.yml
Normal file
120
examples/docker-compose-http-and-https.yml
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
version: '3.9'
|
||||||
|
|
||||||
|
x-dls-variables: &dls-variables
|
||||||
|
DLS_URL: localhost # REQUIRED, change to your ip or hostname
|
||||||
|
DLS_PORT: 443 # must match nginx listen & exposed port
|
||||||
|
LEASE_EXPIRE_DAYS: 90
|
||||||
|
DATABASE: sqlite:////app/database/db.sqlite
|
||||||
|
DEBUG: false
|
||||||
|
|
||||||
|
services:
|
||||||
|
dls:
|
||||||
|
image: collinwebdesigns/fastapi-dls:latest
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
<<: *dls-variables
|
||||||
|
volumes:
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /opt/docker/fastapi-dls/cert:/app/cert # instance.private.pem, instance.public.pem
|
||||||
|
- db:/app/database
|
||||||
|
entrypoint: ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--app-dir", "/app", "--proxy-headers"]
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "--fail", "http://localhost:8000/-/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
proxy:
|
||||||
|
image: nginx
|
||||||
|
ports:
|
||||||
|
# thees are ports where nginx (!) is listen to
|
||||||
|
- "80:80" # for "/leasing/v1/lessor/shutdown" used by windows guests, can't be changed!
|
||||||
|
- "443:443" # first part must match "DLS_PORT"
|
||||||
|
volumes:
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /opt/docker/fastapi-dls/cert:/opt/cert
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "--insecure", "--fail", "https://localhost/-/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
command: |
|
||||||
|
bash -c "bash -s <<\"EOF\"
|
||||||
|
cat > /etc/nginx/nginx.conf <<\"EON\"
|
||||||
|
daemon off;
|
||||||
|
user root;
|
||||||
|
worker_processes auto;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
gzip on;
|
||||||
|
gzip_disable "msie6";
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
|
||||||
|
upstream dls-backend {
|
||||||
|
server dls:8000; # must match dls listen port
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2 default_server;
|
||||||
|
listen [::]:443 ssl http2 default_server;
|
||||||
|
|
||||||
|
root /var/www/html;
|
||||||
|
index index.html;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
ssl_certificate "/opt/cert/webserver.crt";
|
||||||
|
ssl_certificate_key "/opt/cert/webserver.key";
|
||||||
|
ssl_session_cache shared:SSL:1m;
|
||||||
|
ssl_session_timeout 10m;
|
||||||
|
ssl_protocols TLSv1.3 TLSv1.2;
|
||||||
|
# ssl_ciphers "ECDHE-ECDSA-CHACHA20-POLY1305";
|
||||||
|
# ssl_ciphers PROFILE=SYSTEM;
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header Host $$http_host;
|
||||||
|
proxy_set_header X-Real-IP $$remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||||
|
proxy_pass http://dls-backend$$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /-/health {
|
||||||
|
access_log off;
|
||||||
|
add_header 'Content-Type' 'application/json';
|
||||||
|
return 200 '{\"status\":\"up\",\"service\":\"nginx\"}';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
|
||||||
|
root /var/www/html;
|
||||||
|
index index.html;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
location /leasing/v1/lessor/shutdown {
|
||||||
|
proxy_set_header Host $$http_host;
|
||||||
|
proxy_set_header X-Real-IP $$remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||||
|
proxy_pass http://dls-backend/leasing/v1/lessor/shutdown;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
return 301 https://$$host$$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EON
|
||||||
|
nginx
|
||||||
|
EOF"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
db:
|
@ -1,9 +1,8 @@
|
|||||||
fastapi==0.92.0
|
fastapi==0.111.0
|
||||||
uvicorn[standard]==0.20.0
|
uvicorn[standard]==0.29.0
|
||||||
python-jose==3.3.0
|
python-jose==3.3.0
|
||||||
pycryptodome==3.17
|
pycryptodome==3.20.0
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
sqlalchemy==2.0.3
|
sqlalchemy==2.0.30
|
||||||
markdown==3.4.1
|
markdown==3.6
|
||||||
python-dotenv==0.21.1
|
python-dotenv==1.0.1
|
||||||
httpx==0.23.3
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
VERSION=1.3.5
|
|
Loading…
Reference in New Issue
Block a user