forked from oscar.krause/fastapi-dls
Compare commits
37 Commits
Author | SHA1 | Date | |
---|---|---|---|
14cf6a953f | |||
6a5d3cb2f7 | |||
774a1c21a1 | |||
d1a77df0e1 | |||
c9c73f6cf2 | |||
b216dcb3dd | |||
d2e4042932 | |||
04a1ee0948 | |||
c1b5f83f44 | |||
9d1422cbdf | |||
7b7f14bd82 | |||
f72c0f7db3 | |||
76d8753f28 | |||
593db0e789 | |||
3d9e3cb88f | |||
995b944135 | |||
e200c84345 | |||
04ff36c94d | |||
89704bc2a1 | |||
6395214fa0 | |||
c8e000eb3e | |||
c8e5676c01 | |||
6f11bc414c | |||
1fc5ac8378 | |||
87334fbfad | |||
0fac033657 | |||
7cd4e6fde0 | |||
a22b56edbe | |||
e42dc6aa86 | |||
86f703a36c | |||
71795cc7a2 | |||
4ef041bb54 | |||
e1bbd42b50 | |||
b905ab9dd9 | |||
e3745d7fa8 | |||
164b5ebc44 | |||
70250f1fca |
@ -2,7 +2,7 @@ Package: fastapi-dls
|
||||
Version: 0.0
|
||||
Architecture: all
|
||||
Maintainer: Oscar Krause oscar.krause@collinwebdesigns.de
|
||||
Depends: python3, python3-fastapi, python3-uvicorn, python3-dotenv, python3-dateutil, python3-jose, python3-sqlalchemy, python3-pycryptodome, python3-markdown, python3-httpx, uvicorn, openssl
|
||||
Depends: python3, python3-fastapi, python3-uvicorn, python3-dotenv, python3-dateutil, python3-jose, python3-sqlalchemy, python3-pycryptodome, python3-markdown, uvicorn, openssl
|
||||
Recommends: curl
|
||||
Installed-Size: 10240
|
||||
Homepage: https://git.collinwebdesigns.de/oscar.krause/fastapi-dls
|
||||
|
@ -22,8 +22,9 @@ sha256sums=('SKIP'
|
||||
'3dc60140c08122a8ec0e7fa7f0937eb8c1288058890ba09478420fc30ce9e30c')
|
||||
|
||||
pkgver() {
|
||||
echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > $srcdir/$pkgname/version.env
|
||||
source $srcdir/$pkgname/version.env
|
||||
echo ${VERSION}
|
||||
echo $VERSION
|
||||
}
|
||||
|
||||
check() {
|
||||
|
48
.UNRAID/FastAPI-DLS.xml
Normal file
48
.UNRAID/FastAPI-DLS.xml
Normal file
@ -0,0 +1,48 @@
|
||||
<?xml version="1.0"?>
|
||||
<Container version="2">
|
||||
<Name>FastAPI-DLS</Name>
|
||||
<Repository>collinwebdesigns/fastapi-dls:latest</Repository>
|
||||
<Registry>https://hub.docker.com/r/collinwebdesigns/fastapi-dls</Registry>
|
||||
<Network>br0</Network>
|
||||
<MyIP></MyIP>
|
||||
<Shell>sh</Shell>
|
||||
<Privileged>false</Privileged>
|
||||
<Support/>
|
||||
<Project/>
|
||||
<Overview>Source:
|
||||
https://git.collinwebdesigns.de/oscar.krause/fastapi-dls#docker
|
||||

|
||||
Make sure you create these certificates before starting the container for the first time:
|
||||
```
|
||||
# Check https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/main/#docker for more information:
|
||||
WORKING_DIR=/mnt/user/appdata/fastapi-dls/cert
|
||||
mkdir -p $WORKING_DIR
|
||||
cd $WORKING_DIR
|
||||
# create instance private and public key for singing JWT's
|
||||
openssl genrsa -out $WORKING_DIR/instance.private.pem 2048 
|
||||
openssl rsa -in $WORKING_DIR/instance.private.pem -outform PEM -pubout -out $WORKING_DIR/instance.public.pem
|
||||
# create ssl certificate for integrated webserver (uvicorn) - because clients rely on ssl
|
||||
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout $WORKING_DIR/webserver.key -out $WORKING_DIR/webserver.crt
|
||||
```
|
||||
</Overview>
|
||||
<Category/>
|
||||
<WebUI>https://[IP]:[PORT:443]</WebUI>
|
||||
<TemplateURL/>
|
||||
<Icon>https://git.collinwebdesigns.de/uploads/-/system/project/avatar/106/png-transparent-nvidia-grid-logo-business-nvidia-electronics-text-trademark.png?width=64</Icon>
|
||||
<ExtraParams>--restart always</ExtraParams>
|
||||
<PostArgs/>
|
||||
<CPUset/>
|
||||
<DateInstalled>1679161568</DateInstalled>
|
||||
<DonateText/>
|
||||
<DonateLink/>
|
||||
<Requires/>
|
||||
<Config Name="HTTPS Port" Target="" Default="443" Mode="tcp" Description="Same as DLS Port below." Type="Port" Display="always-hide" Required="true" Mask="false">443</Config>
|
||||
<Config Name="App Cert" Target="/app/cert" Default="/mnt/user/appdata/fastapi-dls/cert" Mode="rw" Description="[REQUIRED] Read the description above to make this folder. You do not need to change the path." Type="Path" Display="always-hide" Required="true" Mask="false">/mnt/user/appdata/fastapi-dls/cert</Config>
|
||||
<Config Name="DLS Port" Target="DSL_PORT" Default="443" Mode="" Description="Choose port you want to use. Make sure to change the HTTPS port above to match it." Type="Variable" Display="always-hide" Required="true" Mask="false">443</Config>
|
||||
<Config Name="App database" Target="/app/database" Default="/mnt/user/appdata/fastapi-dls/data" Mode="rw" Description="[REQUIRED] Read the description above to make this folder. You do not need to change the path." Type="Path" Display="always-hide" Required="true" Mask="false">/mnt/user/appdata/fastapi-dls/data</Config>
|
||||
<Config Name="DSL IP" Target="DLS_URL" Default="localhost" Mode="" Description="Put your container's IP (or your host's IP if it's shared)." Type="Variable" Display="always-hide" Required="true" Mask="false"></Config>
|
||||
<Config Name="Time Zone" Target="TZ" Default="" Mode="" Description="Format example: America/New_York. MUST MATCH YOUR CURRENT TIMEZONE AND THE GUEST VMS TIMEZONE! Otherwise you'll get into issues, read the guide above." Type="Variable" Display="always-hide" Required="true" Mask="false"></Config>
|
||||
<Config Name="Database" Target="DATABASE" Default="sqlite:////app/database/db.sqlite" Mode="" Description="Set to sqlite:////app/database/db.sqlite" Type="Variable" Display="advanced-hide" Required="true" Mask="false">sqlite:////app/database/db.sqlite</Config>
|
||||
<Config Name="Debug" Target="DEBUG" Default="true" Mode="" Description="true to enable debugging, false to disable them." Type="Variable" Display="advanced-hide" Required="false" Mask="false">true</Config>
|
||||
<Config Name="Lease" Target="LEASE_EXPIRE_DAYS" Default="90" Mode="" Description="90 days is the maximum value." Type="Variable" Display="advanced" Required="false" Mask="false">90</Config>
|
||||
</Container>
|
197
.UNRAID/setup_vgpu_license.sh
Normal file
197
.UNRAID/setup_vgpu_license.sh
Normal file
@ -0,0 +1,197 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script automates the licensing of the vGPU guest driver
|
||||
# on Unraid boot. Set the Schedule to: "At Startup of Array".
|
||||
#
|
||||
# Relies on FastAPI-DLS for the licensing.
|
||||
# It assumes FeatureType=1 (vGPU), change it as you see fit in line <114>
|
||||
#
|
||||
# Requires `eflutils` to be installed in the system for `nvidia-gridd` to run
|
||||
# To Install it:
|
||||
# 1) You might find it here: https://packages.slackware.com/ (choose the 64bit version of Slackware)
|
||||
# 2) Download the package and put it in /boot/extra to be installed on boot
|
||||
# 3) a. Reboot to install it, OR
|
||||
# b. Run `upgradepkg --install-new /boot/extra/elfutils*`
|
||||
# [i]: Make sure to have only one version of elfutils, otherwise you might run into issues
|
||||
|
||||
# Sources and docs:
|
||||
# https://docs.nvidia.com/grid/15.0/grid-vgpu-user-guide/index.html#configuring-nls-licensed-client-on-linux
|
||||
#
|
||||
|
||||
################################################
|
||||
# MAKE SURE YOU CHANGE THESE VARIABLES #
|
||||
################################################
|
||||
|
||||
###### CHANGE ME!
|
||||
# IP and PORT of FastAPI-DLS
|
||||
DLS_IP=192.168.0.123
|
||||
DLS_PORT=443
|
||||
# Token folder, must be on a filesystem that supports
|
||||
# linux filesystem permissions (eg: ext4,xfs,btrfs...)
|
||||
TOKEN_PATH=/mnt/user/system/nvidia
|
||||
PING=$(which ping)
|
||||
|
||||
# Check if the License is applied
|
||||
if [[ "$(nvidia-smi -q | grep "Expiry")" == *Expiry* ]]; then
|
||||
echo " [i] Your vGPU Guest drivers are already licensed."
|
||||
echo " [i] $(nvidia-smi -q | grep "Expiry")"
|
||||
echo " [<] Exiting..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if the FastAPI-DLS server is reachable
|
||||
# Check if the License is applied
|
||||
MAX_RETRIES=30
|
||||
for i in $(seq 1 $MAX_RETRIES); do
|
||||
echo -ne "\r [>] Attempt $i to connect to $DLS_IP."
|
||||
if ping -c 1 $DLS_IP >/dev/null 2>&1; then
|
||||
echo -e "\n [*] Connection successful."
|
||||
break
|
||||
fi
|
||||
if [ $i -eq $MAX_RETRIES ]; then
|
||||
echo -e "\n [!] Connection failed after $MAX_RETRIES attempts."
|
||||
echo -e "\n [<] Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Check if the token folder exists
|
||||
if [ -d "${TOKEN_PATH}" ]; then
|
||||
echo " [*] Token Folder exists. Proceeding..."
|
||||
else
|
||||
echo " [!] Token Folder does not exists or not ready yet. Exiting."
|
||||
echo " [!] Token Folder Specified: ${TOKEN_PATH}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if elfutils are installed, otherwise nvidia-gridd service
|
||||
# wont start
|
||||
if [ "$(grep -R "elfutils" /var/log/packages/* | wc -l)" != 0 ]; then
|
||||
echo " [*] Elfutils is installed, proceeding..."
|
||||
else
|
||||
echo " [!] Elfutils is not installed, downloading and installing..."
|
||||
echo " [!] Downloading elfutils to /boot/extra"
|
||||
echo " [i] This script will download elfutils from slackware64-15.0 repository."
|
||||
echo " [i] If you have a different version of Unraid (6.11.5), you might want to"
|
||||
echo " [i] download and install a suitable version manually from the slackware"
|
||||
echo " [i] repository, and put it in /boot/extra to be install on boot."
|
||||
echo " [i] You may also install it by running: "
|
||||
echo " [i] upgradepkg --install-new /path/to/elfutils-*.txz"
|
||||
echo ""
|
||||
echo " [>] Downloading elfutils from slackware64-15.0 repository:"
|
||||
wget -q -nc --show-progress --progress=bar:force:noscroll -P /boot/extra https://slackware.uk/slackware/slackware64-15.0/slackware64/l/elfutils-0.186-x86_64-1.txz 2>/dev/null \
|
||||
|| { echo " [!] Error while downloading elfutils, please download it and install it manually."; exit 1; }
|
||||
echo ""
|
||||
if upgradepkg --install-new /boot/extra/elfutils-0.186-x86_64-1.txz
|
||||
then
|
||||
echo " [*] Elfutils installed and will be installed automatically on boot"
|
||||
else
|
||||
echo " [!] Error while installing, check logs..."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo " [~] Sleeping for 60 seconds before continuing..."
|
||||
echo " [i] The script is waiting until the boot process settles down."
|
||||
|
||||
for i in {60..1}; do
|
||||
printf "\r [~] %d seconds remaining" "$i"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
printf "\n"
|
||||
|
||||
create_token () {
|
||||
echo " [>] Creating new token..."
|
||||
if ${PING} -c1 ${DLS_IP} > /dev/null 2>&1
|
||||
then
|
||||
# curl --insecure -L -X GET https://${DLS_IP}:${DLS_PORT}/-/client-token -o ${TOKEN_PATH}/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok || { echo " [!] Could not get the token, please check the server."; exit 1;}
|
||||
wget -q -nc -4c --no-check-certificate --show-progress --progress=bar:force:noscroll -O "${TOKEN_PATH}"/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok https://${DLS_IP}:${DLS_PORT}/-/client-token \
|
||||
|| { echo " [!] Could not get the token, please check the server."; exit 1;}
|
||||
chmod 744 "${TOKEN_PATH}"/*.tok || { echo " [!] Could not chmod the tokens."; exit 1; }
|
||||
echo ""
|
||||
echo " [*] Token downloaded and stored in ${TOKEN_PATH}."
|
||||
else
|
||||
echo " [!] Could not get token, DLS server unavailable ."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
setup_run () {
|
||||
echo " [>] Setting up gridd.conf"
|
||||
cp /etc/nvidia/gridd.conf.template /etc/nvidia/gridd.conf || { echo " [!] Error configuring gridd.conf, did you install the drivers correctly?"; exit 1; }
|
||||
sed -i 's/FeatureType=0/FeatureType=1/g' /etc/nvidia/gridd.conf
|
||||
echo "ClientConfigTokenPath=${TOKEN_PATH}" >> /etc/nvidia/gridd.conf
|
||||
echo " [>] Creating /var/lib/nvidia folder structure"
|
||||
mkdir -p /var/lib/nvidia/GridLicensing
|
||||
echo " [>] Starting nvidia-gridd"
|
||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||
echo " [!] nvidia-gridd service is running. Closing."
|
||||
sh /usr/lib/nvidia/sysv/nvidia-gridd stop
|
||||
stop_exit_code=$?
|
||||
if [ $stop_exit_code -eq 0 ]; then
|
||||
echo " [*] nvidia-gridd service stopped successfully."
|
||||
else
|
||||
echo " [!] Error while stopping nvidia-gridd service."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Kill the service if it does not close
|
||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||
kill -9 "$(pgrep nvidia-gridd)" || {
|
||||
echo " [!] Error while closing nvidia-gridd service"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
echo " [*] Restarting nvidia-gridd service."
|
||||
sh /usr/lib/nvidia/sysv/nvidia-gridd start
|
||||
|
||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||
echo " [*] Service started, PID: $(pgrep nvidia-gridd)"
|
||||
else
|
||||
echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
sh /usr/lib/nvidia/sysv/nvidia-gridd start
|
||||
|
||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
||||
echo " [*] Service started, PID: $(pgrep nvidia-gridd)"
|
||||
else
|
||||
echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
for token in "${TOKEN_PATH}"/*; do
|
||||
if [ "${token: -4}" == ".tok" ]
|
||||
then
|
||||
echo " [*] Tokens found..."
|
||||
setup_run
|
||||
else
|
||||
echo " [!] No Tokens found..."
|
||||
create_token
|
||||
setup_run
|
||||
fi
|
||||
done
|
||||
|
||||
while true; do
|
||||
if nvidia-smi -q | grep "Expiry" >/dev/null 2>&1; then
|
||||
echo " [>] vGPU licensed!"
|
||||
echo " [i] $(nvidia-smi -q | grep "Expiry")"
|
||||
break
|
||||
else
|
||||
echo -ne " [>] vGPU not licensed yet... Checking again in 5 seconds\c"
|
||||
for i in {1..5}; do
|
||||
sleep 1
|
||||
echo -ne ".\c"
|
||||
done
|
||||
echo -ne "\r\c"
|
||||
fi
|
||||
done
|
||||
|
||||
echo " [>] Done..."
|
||||
exit 0
|
100
.gitlab-ci.yml
100
.gitlab-ci.yml
@ -20,26 +20,38 @@ build:docker:
|
||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
||||
tags: [ docker ]
|
||||
before_script:
|
||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env # COMMIT=`git rev-parse HEAD`
|
||||
- docker buildx inspect
|
||||
- docker buildx create --use
|
||||
script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF}
|
||||
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF}
|
||||
- IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_BUILD_REF_NAME:$CI_BUILD_REF
|
||||
- docker buildx build --progress=plain --platform linux/amd64,linux/arm64 --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE --push .
|
||||
- docker buildx imagetools inspect $IMAGE
|
||||
- echo "CS_IMAGE=$IMAGE" > container_scanning.env
|
||||
artifacts:
|
||||
reports:
|
||||
dotenv: container_scanning.env
|
||||
|
||||
build:apt:
|
||||
image: debian:bookworm-slim
|
||||
interruptible: true
|
||||
stage: build
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
- if: $CI_COMMIT_TAG
|
||||
variables:
|
||||
VERSION: $CI_BUILD_REF_NAME
|
||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
||||
changes:
|
||||
- app/**/*
|
||||
- .DEBIAN/**/*
|
||||
- .gitlab-ci.yml
|
||||
variables:
|
||||
VERSION: "0.0.1"
|
||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
||||
variables:
|
||||
VERSION: "0.0.1"
|
||||
before_script:
|
||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||
- source version.env
|
||||
- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env
|
||||
# install build dependencies
|
||||
- apt-get update -qq && apt-get install -qq -y build-essential
|
||||
# create build directory for .deb sources
|
||||
@ -60,7 +72,7 @@ build:apt:
|
||||
# cd into "build/"
|
||||
- cd build/
|
||||
script:
|
||||
# set version based on value in "$VERSION" (which is set above from version.env)
|
||||
# set version based on value in "$CI_BUILD_REF_NAME"
|
||||
- sed -i -E 's/(Version\:\s)0.0/\1'"$VERSION"'/g' DEBIAN/control
|
||||
# build
|
||||
- dpkg -b . build.deb
|
||||
@ -75,14 +87,21 @@ build:pacman:
|
||||
interruptible: true
|
||||
stage: build
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
- if: $CI_COMMIT_TAG
|
||||
variables:
|
||||
VERSION: $CI_BUILD_REF_NAME
|
||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
||||
changes:
|
||||
- app/**/*
|
||||
- .PKGBUILD/**/*
|
||||
- .gitlab-ci.yml
|
||||
variables:
|
||||
VERSION: "0.0.1"
|
||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
||||
variables:
|
||||
VERSION: "0.0.1"
|
||||
before_script:
|
||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||
#- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env
|
||||
# install build dependencies
|
||||
- pacman -Syu --noconfirm git
|
||||
# create a build-user because "makepkg" don't like root user
|
||||
@ -97,7 +116,7 @@ build:pacman:
|
||||
# download dependencies
|
||||
- source PKGBUILD && pacman -Syu --noconfirm --needed --asdeps "${makedepends[@]}" "${depends[@]}"
|
||||
# build
|
||||
- sudo -u build makepkg -s
|
||||
- sudo --preserve-env -u build makepkg -s
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
paths:
|
||||
@ -108,6 +127,7 @@ test:
|
||||
stage: test
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH
|
||||
- if: $CI_COMMIT_TAG
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
variables:
|
||||
DATABASE: sqlite:///../app/db.sqlite
|
||||
@ -192,28 +212,26 @@ code_quality:
|
||||
- if: $CODE_QUALITY_DISABLED
|
||||
when: never
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
|
||||
secret_detection:
|
||||
rules:
|
||||
- if: $SECRET_DETECTION_DISABLED
|
||||
when: never
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
before_script:
|
||||
- git config --global --add safe.directory $CI_PROJECT_DIR
|
||||
|
||||
semgrep-sast:
|
||||
rules:
|
||||
- if: $SAST_DISABLED
|
||||
when: never
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
|
||||
test_coverage:
|
||||
extends: test
|
||||
allow_failure: true
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
script:
|
||||
- pip install pytest pytest-cov
|
||||
- coverage run -m pytest main.py
|
||||
@ -232,51 +250,43 @@ container_scanning:
|
||||
- if: $CONTAINER_SCANNING_DISABLED
|
||||
when: never
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
|
||||
gemnasium-python-dependency_scanning:
|
||||
rules:
|
||||
- if: $DEPENDENCY_SCANNING_DISABLED
|
||||
when: never
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
|
||||
.deploy:
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
- if: $CI_COMMIT_TAG
|
||||
when: never
|
||||
|
||||
deploy:docker:
|
||||
extends: .deploy
|
||||
stage: deploy
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
before_script:
|
||||
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||
- source version.env
|
||||
- echo "Building docker image for commit ${COMMIT} with version ${VERSION}"
|
||||
- echo "Building docker image for commit $CI_COMMIT_SHA with version $CI_BUILD_REF_NAME"
|
||||
script:
|
||||
- echo "GitLab-Registry"
|
||||
- echo "========== GitLab-Registry =========="
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION}
|
||||
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest
|
||||
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION}
|
||||
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest
|
||||
- echo "Docker-Hub"
|
||||
- IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_BUILD_REF_NAME
|
||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_BUILD_REF_NAME
|
||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest
|
||||
- docker push $IMAGE:$CI_BUILD_REF_NAME
|
||||
- docker push $IMAGE:latest
|
||||
- echo "========== Docker-Hub =========="
|
||||
- docker login -u $PUBLIC_REGISTRY_USER -p $PUBLIC_REGISTRY_TOKEN
|
||||
- docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION}
|
||||
- docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest
|
||||
- docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION}
|
||||
- docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest
|
||||
- IMAGE=$PUBLIC_REGISTRY_USER/$CI_PROJECT_NAME
|
||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_BUILD_REF_NAME
|
||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest
|
||||
- docker push $IMAGE:$CI_BUILD_REF_NAME
|
||||
- docker push $IMAGE:latest
|
||||
|
||||
deploy:apt:
|
||||
# doc: https://git.collinwebdesigns.de/help/user/packages/debian_repository/index.md#install-a-package
|
||||
extends: .deploy
|
||||
image: debian:bookworm-slim
|
||||
stage: deploy
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
needs:
|
||||
- job: build:apt
|
||||
artifacts: true
|
||||
@ -316,8 +326,6 @@ deploy:pacman:
|
||||
extends: .deploy
|
||||
image: archlinux:base-devel
|
||||
stage: deploy
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
needs:
|
||||
- job: build:pacman
|
||||
artifacts: true
|
||||
@ -325,9 +333,9 @@ deploy:pacman:
|
||||
- source .PKGBUILD/PKGBUILD
|
||||
- source version.env
|
||||
# fastapi-dls-1.0-1-any.pkg.tar.zst
|
||||
- BUILD_NAME=${pkgname}-${VERSION}-${pkgrel}-any.pkg.tar.zst
|
||||
- BUILD_NAME=${pkgname}-${CI_BUILD_REF_NAME}-${pkgrel}-any.pkg.tar.zst
|
||||
- PACKAGE_NAME=${pkgname}
|
||||
- PACKAGE_VERSION=${VERSION}
|
||||
- PACKAGE_VERSION=${CI_BUILD_REF_NAME}
|
||||
- PACKAGE_ARCH=any
|
||||
- EXPORT_NAME=${BUILD_NAME}
|
||||
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
||||
@ -339,19 +347,15 @@ deploy:pacman:
|
||||
release:
|
||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||
stage: .post
|
||||
needs:
|
||||
- job: test
|
||||
artifacts: true
|
||||
needs: [ test ]
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG
|
||||
when: never
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
script:
|
||||
- echo "Running release-job for $VERSION"
|
||||
- echo "Running release-job for $CI_COMMIT_TAG"
|
||||
release:
|
||||
name: $CI_PROJECT_TITLE $VERSION
|
||||
description: Release of $CI_PROJECT_TITLE version $VERSION
|
||||
tag_name: $VERSION
|
||||
name: $CI_PROJECT_TITLE $CI_COMMIT_TAG
|
||||
description: Release of $CI_PROJECT_TITLE version $CI_COMMIT_TAG
|
||||
tag_name: $CI_COMMIT_TAG
|
||||
ref: $CI_COMMIT_SHA
|
||||
assets:
|
||||
links:
|
||||
|
@ -1,5 +1,9 @@
|
||||
FROM python:3.11-alpine
|
||||
|
||||
ARG VERSION
|
||||
ARG COMMIT=""
|
||||
RUN echo -e "VERSION=$VERSION\nCOMMIT=$COMMIT" > /version.env
|
||||
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
|
||||
RUN apk update \
|
||||
@ -11,7 +15,6 @@ RUN apk update \
|
||||
&& apk del build-deps
|
||||
|
||||
COPY app /app
|
||||
COPY version.env /version.env
|
||||
COPY README.md /README.md
|
||||
|
||||
HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=3 CMD curl --insecure --fail https://localhost/-/health || exit 1
|
||||
|
106
README.md
106
README.md
@ -9,9 +9,9 @@ Only the clients need a connection to this service on configured port.
|
||||
|
||||
**Official Links**
|
||||
|
||||
- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls
|
||||
- https://gitea.publichub.eu/oscar.krause/fastapi-dls
|
||||
- Docker Image `collinwebdesigns/fastapi-dls:latest`
|
||||
- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls (Private Git)
|
||||
- https://gitea.publichub.eu/oscar.krause/fastapi-dls (Public Git)
|
||||
- https://hub.docker.com/r/collinwebdesigns/fastapi-dls (Docker-Hub `collinwebdesigns/fastapi-dls:latest`)
|
||||
|
||||
*All other repositories are forks! (which is no bad - just for information and bug reports)*
|
||||
|
||||
@ -32,17 +32,6 @@ Tested with Ubuntu 22.10 (from Proxmox templates), actually its consuming 100mb
|
||||
|
||||
- Make sure your timezone is set correct on you fastapi-dls server and your client
|
||||
|
||||
**HA Setup Notes**
|
||||
|
||||
- only *failover mode* is supported by team-green (see *high availability* in official user guide)
|
||||
- make sure you're using same configuration on each node
|
||||
- use same `instance.private.pem` and `instance.private.key` on each node
|
||||
- add `cronjob` on each node with `curl -X GET --insecure https://localhost/-/ha/replicate`
|
||||
|
||||
If you want to use *real* HA, you should use a proxy in front of this service and use a clustered database in backend.
|
||||
This is not documented and supported by me, but it *can* work. Please ask the community for help.
|
||||
Maybe the simplest solution for HA-ing this service is to use a Docker-Swarm with redundant storage and database.
|
||||
|
||||
## Docker
|
||||
|
||||
Docker-Images are available here:
|
||||
@ -156,9 +145,9 @@ This is only to test whether the service starts successfully.
|
||||
|
||||
```shell
|
||||
cd /opt/fastapi-dls/app
|
||||
su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
||||
sudo -u www-data /opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app
|
||||
# or
|
||||
sudo -u www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
||||
su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
||||
```
|
||||
|
||||
**Create config file**
|
||||
@ -258,6 +247,8 @@ This is only to test whether the service starts successfully.
|
||||
BASE_DIR=/opt/fastapi-dls
|
||||
SERVICE_USER=dls
|
||||
cd ${BASE_DIR}
|
||||
sudo -u ${SERVICE_USER} ${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app
|
||||
# or
|
||||
su - ${SERVICE_USER} -c "${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app"
|
||||
```
|
||||
|
||||
@ -363,6 +354,19 @@ pacman -U --noconfirm fastapi-dls.pkg.tar.zst
|
||||
|
||||
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
||||
|
||||
## unRAID
|
||||
|
||||
1. Download [this xml file](.UNRAID/FastAPI-DLS.xml)
|
||||
2. Put it in /boot/config/plugins/dockerMan/templates-user/
|
||||
3. Go to Docker page, scroll down to `Add Container`, click on Template list and choose `FastAPI-DLS`
|
||||
4. Open terminal/ssh, follow the instructions in overview description
|
||||
5. Setup your container `IP`, `Port`, `DLS_URL` and `DLS_PORT`
|
||||
6. Apply and let it boot up
|
||||
|
||||
*Unraid users must also make sure they have Host access to custom networks enabled if unraid is the vgpu guest*.
|
||||
|
||||
Continue [here](#unraid-guest) for docker guest setup.
|
||||
|
||||
## Let's Encrypt Certificate (optional)
|
||||
|
||||
If you're using installation via docker, you can use `traefik`. Please refer to their documentation.
|
||||
@ -381,28 +385,23 @@ After first success you have to replace `--issue` with `--renew`.
|
||||
|
||||
# Configuration
|
||||
|
||||
| Variable | Default | Usage |
|
||||
|------------------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------|
|
||||
| `DEBUG` | `false` | Toggles `fastapi` debug mode |
|
||||
| `DLS_URL` | `localhost` | Used in client-token to tell guest driver where dls instance is reachable |
|
||||
| `DLS_PORT` | `443` | Used in client-token to tell guest driver where dls instance is reachable |
|
||||
| `HA_REPLICATE` | | `DLS_URL` + `DLS_PORT` of primary DLS instance, e.g. `dls-node:443` (for HA only **two** nodes are supported!) \*1 |
|
||||
| `HA_ROLE` | | `PRIMARY` or `SECONDARY` |
|
||||
| `TOKEN_EXPIRE_DAYS` | `1` | Client auth-token validity (used for authenticate client against api, **not `.tok` file!**) |
|
||||
| `LEASE_EXPIRE_DAYS` | `90` | Lease time in days |
|
||||
| `LEASE_RENEWAL_PERIOD` | `0.15` | The percentage of the lease period that must elapse before a licensed client can renew a license \*2 |
|
||||
| `DATABASE` | `sqlite:///db.sqlite` | See [official SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/engines.html) |
|
||||
| `CORS_ORIGINS` | `https://{DLS_URL}` | Sets `Access-Control-Allow-Origin` header (comma separated string) \*3 |
|
||||
| `SITE_KEY_XID` | `00000000-0000-0000-0000-000000000000` | Site identification uuid |
|
||||
| `INSTANCE_REF` | `10000000-0000-0000-0000-000000000001` | Instance identification uuid |
|
||||
| `ALLOTMENT_REF` | `20000000-0000-0000-0000-000000000001` | Allotment identification uuid |
|
||||
| `INSTANCE_KEY_RSA` | `<app-dir>/cert/instance.private.pem` | Site-wide private RSA key for singing JWTs \*4 |
|
||||
| `INSTANCE_KEY_PUB` | `<app-dir>/cert/instance.public.pem` | Site-wide public key \*4 |
|
||||
| Variable | Default | Usage |
|
||||
|------------------------|----------------------------------------|------------------------------------------------------------------------------------------------------|
|
||||
| `DEBUG` | `false` | Toggles `fastapi` debug mode |
|
||||
| `DLS_URL` | `localhost` | Used in client-token to tell guest driver where dls instance is reachable |
|
||||
| `DLS_PORT` | `443` | Used in client-token to tell guest driver where dls instance is reachable |
|
||||
| `TOKEN_EXPIRE_DAYS` | `1` | Client auth-token validity (used for authenticate client against api, **not `.tok` file!**) |
|
||||
| `LEASE_EXPIRE_DAYS` | `90` | Lease time in days |
|
||||
| `LEASE_RENEWAL_PERIOD` | `0.15` | The percentage of the lease period that must elapse before a licensed client can renew a license \*1 |
|
||||
| `DATABASE` | `sqlite:///db.sqlite` | See [official SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/engines.html) |
|
||||
| `CORS_ORIGINS` | `https://{DLS_URL}` | Sets `Access-Control-Allow-Origin` header (comma separated string) \*2 |
|
||||
| `SITE_KEY_XID` | `00000000-0000-0000-0000-000000000000` | Site identification uuid |
|
||||
| `INSTANCE_REF` | `10000000-0000-0000-0000-000000000001` | Instance identification uuid |
|
||||
| `ALLOTMENT_REF` | `20000000-0000-0000-0000-000000000001` | Allotment identification uuid |
|
||||
| `INSTANCE_KEY_RSA` | `<app-dir>/cert/instance.private.pem` | Site-wide private RSA key for singing JWTs \*3 |
|
||||
| `INSTANCE_KEY_PUB` | `<app-dir>/cert/instance.public.pem` | Site-wide public key \*3 |
|
||||
|
||||
\*1 If you want to use HA, this value should be point to `secondary` on `primary` and `primary` on `secondary`. Don't
|
||||
use same database for both instances!
|
||||
|
||||
\*2 For example, if the lease period is one day and the renewal period is 20%, the client attempts to renew its license
|
||||
\*1 For example, if the lease period is one day and the renewal period is 20%, the client attempts to renew its license
|
||||
every 4.8 hours. If network connectivity is lost, the loss of connectivity is detected during license renewal and the
|
||||
client has 19.2 hours in which to re-establish connectivity before its license expires.
|
||||
|
||||
@ -416,9 +415,15 @@ client has 19.2 hours in which to re-establish connectivity before its license e
|
||||
|
||||
Successfully tested with this package versions:
|
||||
|
||||
- `14.3` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `513.91`)
|
||||
- `14.4` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `514.08`)
|
||||
- `15.0` (Linux-Host: `525.60.12`, Linux-Guest: `525.60.13`, Windows-Guest: `527.41`)
|
||||
| vGPU Suftware | vGPU Manager | Linux Driver | Windows Driver | Release Date |
|
||||
|---------------|--------------|--------------|----------------|---------------|
|
||||
| `15.2` | `525.105.14` | `525.105.17` | `528.89` | March 2023 |
|
||||
| `15.1` | `525.85.07` | `525.85.05` | `528.24` | January 2023 |
|
||||
| `15.0` | `525.60.12` | `525.60.13` | `527.41` | December 2022 |
|
||||
| `14.4` | `510.108.03` | `510.108.03` | `514.08` | December 2022 |
|
||||
| `14.3` | `510.108.03` | `510.108.03` | `513.91` | November 2022 |
|
||||
|
||||
- https://docs.nvidia.com/grid/index.html
|
||||
|
||||
## Linux
|
||||
|
||||
@ -470,7 +475,7 @@ Restart-Service NVDisplay.ContainerLocalSystem
|
||||
Check licensing status:
|
||||
|
||||
```shell
|
||||
& 'C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe' -q | Select-String "License"
|
||||
& 'nvidia-smi' -q | Select-String "License"
|
||||
```
|
||||
|
||||
Output should be something like:
|
||||
@ -482,6 +487,19 @@ vGPU Software Licensed Product
|
||||
|
||||
Done. For more information check [troubleshoot section](#troubleshoot).
|
||||
|
||||
## unRAID Guest
|
||||
|
||||
1. Make sure you create a folder in a linux filesystem (BTRFS/XFS/EXT4...), I recommend `/mnt/user/system/nvidia` (this is where docker and libvirt preferences are saved, so it's a good place to have that)
|
||||
2. Edit the script to put your `DLS_IP`, `DLS_PORT` and `TOKEN_PATH`, properly
|
||||
3. Install `User Scripts` plugin from *Community Apps* (the Apps page, or google User Scripts Unraid if you're not using CA)
|
||||
4. Go to `Settings > Users Scripts > Add New Script`
|
||||
5. Give it a name (the name must not contain spaces preferably)
|
||||
6. Click on the *gear icon* to the left of the script name then edit script
|
||||
7. Paste the script and save
|
||||
8. Set schedule to `At First Array Start Only`
|
||||
9. Click on Apply
|
||||
|
||||
|
||||
# Endpoints
|
||||
|
||||
### `GET /`
|
||||
@ -684,4 +702,8 @@ The error message can safely be ignored (since we have no license limitation :P)
|
||||
|
||||
Thanks to vGPU community and all who uses this project and report bugs.
|
||||
|
||||
Special thanks to @samicrusader who created build file for ArchLinux and @cyrus who wrote the section for openSUSE.
|
||||
Special thanks to
|
||||
|
||||
- @samicrusader who created build file for ArchLinux
|
||||
- @cyrus who wrote the section for openSUSE
|
||||
- @midi who wrote the section for unRAID
|
||||
|
27
ROADMAP.md
Normal file
27
ROADMAP.md
Normal file
@ -0,0 +1,27 @@
|
||||
# Roadmap
|
||||
|
||||
I am planning to implement the following features in the future.
|
||||
|
||||
|
||||
## HA - High Availability
|
||||
|
||||
Support Failover-Mode (secondary ip address) as in official DLS.
|
||||
|
||||
**Note**: There is no Load-Balancing / Round-Robin HA Mode supported! If you want to use that, consider to use
|
||||
Docker-Swarm with shared/cluster database (e.g. postgres).
|
||||
|
||||
*See [ha branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ha) for current status.*
|
||||
|
||||
|
||||
## UI - User Interface
|
||||
|
||||
Add a user interface to manage origins and leases.
|
||||
|
||||
*See [ui branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ui) for current status.*
|
||||
|
||||
|
||||
## Config Database
|
||||
|
||||
Instead of using environment variables, configuration files and manually create certificates, store configs and
|
||||
certificates in database (like origins and leases). Also, there should be provided a startup assistant to prefill
|
||||
required attributes and create instance-certificates. This is more user-friendly and should improve fist setup.
|
126
app/main.py
126
app/main.py
@ -6,7 +6,7 @@ from os.path import join, dirname
|
||||
from os import getenv as env
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI, BackgroundTasks
|
||||
from fastapi import FastAPI
|
||||
from fastapi.requests import Request
|
||||
from json import loads as json_loads
|
||||
from datetime import datetime, timedelta
|
||||
@ -19,7 +19,7 @@ from starlette.responses import StreamingResponse, JSONResponse as JSONr, HTMLRe
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from util import load_key, load_file, ha_replicate
|
||||
from util import load_key, load_file
|
||||
from orm import Origin, Lease, init as db_init, migrate
|
||||
|
||||
load_dotenv('../version.env')
|
||||
@ -36,7 +36,6 @@ db_init(db), migrate(db)
|
||||
# everything prefixed with "INSTANCE_*" is used as "SERVICE_INSTANCE_*" or "SI_*" in official dls service
|
||||
DLS_URL = str(env('DLS_URL', 'localhost'))
|
||||
DLS_PORT = int(env('DLS_PORT', '443'))
|
||||
HA_REPLICATE, HA_ROLE = env('HA_REPLICATE', None), env('HA_ROLE', None) # only failover is supported
|
||||
SITE_KEY_XID = str(env('SITE_KEY_XID', '00000000-0000-0000-0000-000000000000'))
|
||||
INSTANCE_REF = str(env('INSTANCE_REF', '10000000-0000-0000-0000-000000000001'))
|
||||
ALLOTMENT_REF = str(env('ALLOTMENT_REF', '20000000-0000-0000-0000-000000000001'))
|
||||
@ -83,7 +82,7 @@ async def _index():
|
||||
|
||||
|
||||
@app.get('/-/health', summary='* Health')
|
||||
async def _health(request: Request):
|
||||
async def _health():
|
||||
return JSONr({'status': 'up'})
|
||||
|
||||
|
||||
@ -200,36 +199,6 @@ async def _client_token():
|
||||
cur_time = datetime.utcnow()
|
||||
exp_time = cur_time + CLIENT_TOKEN_EXPIRE_DELTA
|
||||
|
||||
if HA_REPLICATE is not None and HA_ROLE.lower() == "secondary":
|
||||
return RedirectResponse(f'https://{HA_REPLICATE}/-/client-token')
|
||||
|
||||
idx_port, idx_node = 0, 0
|
||||
|
||||
def create_svc_port_set(port: int):
|
||||
idx = idx_port
|
||||
return {
|
||||
"idx": idx,
|
||||
"d_name": "DLS",
|
||||
"svc_port_map": [{"service": "auth", "port": port}, {"service": "lease", "port": port}]
|
||||
}
|
||||
|
||||
def create_node_url(url: str, svc_port_set_idx: int):
|
||||
idx = idx_node
|
||||
return {"idx": idx, "url": url, "url_qr": url, "svc_port_set_idx": svc_port_set_idx}
|
||||
|
||||
service_instance_configuration = {
|
||||
"nls_service_instance_ref": INSTANCE_REF,
|
||||
"svc_port_set_list": [create_svc_port_set(DLS_PORT)],
|
||||
"node_url_list": [create_node_url(DLS_URL, idx_port)]
|
||||
}
|
||||
idx_port += 1
|
||||
idx_node += 1
|
||||
|
||||
if HA_REPLICATE is not None and HA_ROLE.lower() == "primary":
|
||||
SEC_URL, SEC_PORT, *invalid = HA_REPLICATE.split(':')
|
||||
service_instance_configuration['svc_port_set_list'].append(create_svc_port_set(SEC_PORT))
|
||||
service_instance_configuration['node_url_list'].append(create_node_url(SEC_URL, idx_port))
|
||||
|
||||
payload = {
|
||||
"jti": str(uuid4()),
|
||||
"iss": "NLS Service Instance",
|
||||
@ -240,7 +209,17 @@ async def _client_token():
|
||||
"update_mode": "ABSOLUTE",
|
||||
"scope_ref_list": [ALLOTMENT_REF],
|
||||
"fulfillment_class_ref_list": [],
|
||||
"service_instance_configuration": service_instance_configuration,
|
||||
"service_instance_configuration": {
|
||||
"nls_service_instance_ref": INSTANCE_REF,
|
||||
"svc_port_set_list": [
|
||||
{
|
||||
"idx": 0,
|
||||
"d_name": "DLS",
|
||||
"svc_port_map": [{"service": "auth", "port": DLS_PORT}, {"service": "lease", "port": DLS_PORT}]
|
||||
}
|
||||
],
|
||||
"node_url_list": [{"idx": 0, "url": DLS_URL, "url_qr": DLS_URL, "svc_port_set_idx": 0}]
|
||||
},
|
||||
"service_instance_public_key_configuration": {
|
||||
"service_instance_public_key_me": {
|
||||
"mod": hex(INSTANCE_KEY_PUB.public_key().n)[2:],
|
||||
@ -260,67 +239,6 @@ async def _client_token():
|
||||
return response
|
||||
|
||||
|
||||
@app.get('/-/ha/replicate', summary='* HA replicate - trigger')
|
||||
async def _ha_replicate_to_ha(request: Request, background_tasks: BackgroundTasks):
|
||||
if HA_REPLICATE is None or HA_ROLE is None:
|
||||
logger.warning('HA replicate endpoint triggerd, but no value for "HA_REPLICATE" or "HA_ROLE" is set!')
|
||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'no value for "HA_REPLICATE" or "HA_ROLE" set'})
|
||||
|
||||
session = sessionmaker(bind=db)()
|
||||
origins = [origin.serialize() for origin in session.query(Origin).all()]
|
||||
leases = [lease.serialize(renewal_period=LEASE_RENEWAL_PERIOD, renewal_delta=LEASE_RENEWAL_DELTA) for lease in session.query(Lease).all()]
|
||||
|
||||
background_tasks.add_task(ha_replicate, logger, HA_REPLICATE, HA_ROLE, VERSION, DLS_URL, DLS_PORT, SITE_KEY_XID, INSTANCE_REF, origins, leases)
|
||||
return JSONr(status_code=202, content=None)
|
||||
|
||||
|
||||
@app.put('/-/ha/replicate', summary='* HA replicate')
|
||||
async def _ha_replicate_by_ha(request: Request):
|
||||
j, cur_time = json_loads((await request.body()).decode('utf-8')), datetime.utcnow()
|
||||
|
||||
if HA_REPLICATE is None:
|
||||
logger.warning(f'HA replicate endpoint triggerd, but no value for "HA_REPLICATE" is set!')
|
||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'no value for "HA_REPLICATE" set'})
|
||||
|
||||
version = j.get('VERSION')
|
||||
if version != VERSION:
|
||||
logger.error(f'Version missmatch on HA replication task!')
|
||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Missmatch for "VERSION"'})
|
||||
|
||||
site_key_xid = j.get('SITE_KEY_XID')
|
||||
if site_key_xid != SITE_KEY_XID:
|
||||
logger.error(f'Site-Key missmatch on HA replication task!')
|
||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Missmatch for "SITE_KEY_XID"'})
|
||||
|
||||
instance_ref = j.get('INSTANCE_REF')
|
||||
if instance_ref != INSTANCE_REF:
|
||||
logger.error(f'Version missmatch on HA replication task!')
|
||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Missmatch for "INSTANCE_REF"'})
|
||||
|
||||
sync_timestamp, max_seconds_behind = datetime.fromisoformat(j.get('sync_timestamp')), 30
|
||||
if sync_timestamp <= cur_time - timedelta(seconds=max_seconds_behind):
|
||||
logger.error(f'Request time more than {max_seconds_behind}s behind!')
|
||||
return JSONr(status_code=503, content={'status': 503, 'detail': 'Request time behind'})
|
||||
|
||||
origins, leases = j.get('origins'), j.get('leases')
|
||||
for origin in origins:
|
||||
origin_ref = origin.get('origin_ref')
|
||||
logging.info(f'> [ ha ]: origin {origin_ref}')
|
||||
data = Origin.deserialize(origin)
|
||||
Origin.create_or_update(db, data)
|
||||
|
||||
for lease in leases:
|
||||
lease_ref = lease.get('lease_ref')
|
||||
x = Lease.find_by_lease_ref(db, lease_ref)
|
||||
if x is not None and x.lease_updated > sync_timestamp:
|
||||
continue
|
||||
logging.info(f'> [ ha ]: lease {lease_ref}')
|
||||
data = Lease.deserialize(lease)
|
||||
Lease.create_or_update(db, data)
|
||||
|
||||
return JSONr(status_code=202, content=None)
|
||||
|
||||
|
||||
# venv/lib/python3.9/site-packages/nls_services_auth/test/test_origins_controller.py
|
||||
@app.post('/auth/v1/origin', description='find or create an origin')
|
||||
async def auth_v1_origin(request: Request):
|
||||
@ -627,22 +545,6 @@ async def app_on_startup():
|
||||
Your client-token file (.tok) is valid for {str(CLIENT_TOKEN_EXPIRE_DELTA)}.
|
||||
''')
|
||||
|
||||
if HA_REPLICATE is not None and HA_ROLE is not None:
|
||||
from hashlib import sha1
|
||||
|
||||
sha1digest = sha1(INSTANCE_KEY_RSA.export_key()).hexdigest()
|
||||
fingerprint_key = ':'.join(sha1digest[i: i + 2] for i in range(0, len(sha1digest), 2))
|
||||
sha1digest = sha1(INSTANCE_KEY_PUB.export_key()).hexdigest()
|
||||
fingerprint_pub = ':'.join(sha1digest[i: i + 2] for i in range(0, len(sha1digest), 2))
|
||||
|
||||
logger.info(f'''
|
||||
HA mode is enabled. Make sure theses fingerprints matches on all your nodes:
|
||||
- INSTANCE_KEY_RSA: "{str(fingerprint_key)}"
|
||||
- INSTANCE_KEY_PUB: "{str(fingerprint_pub)}"
|
||||
|
||||
This node ({HA_ROLE}) listens to "https://{DLS_URL}:{DLS_PORT}" and replicates to "https://{HA_REPLICATE}".
|
||||
''')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import uvicorn
|
||||
|
20
app/orm.py
20
app/orm.py
@ -32,16 +32,6 @@ class Origin(Base):
|
||||
'os_version': self.os_version,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def deserialize(j) -> "Origin":
|
||||
return Origin(
|
||||
origin_ref=j.get('origin_ref'),
|
||||
hostname=j.get('hostname'),
|
||||
guest_driver_version=j.get('guest_driver_version'),
|
||||
os_platform=j.get('os_platform'),
|
||||
os_version=j.get('os_version'),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_statement(engine: Engine):
|
||||
from sqlalchemy.schema import CreateTable
|
||||
@ -105,16 +95,6 @@ class Lease(Base):
|
||||
'lease_renewal': lease_renewal.isoformat(),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def deserialize(j) -> "Lease":
|
||||
return Lease(
|
||||
lease_ref=j.get('lease_ref'),
|
||||
origin_ref=j.get('origin_ref'),
|
||||
lease_created=datetime.fromisoformat(j.get('lease_created')),
|
||||
lease_expires=datetime.fromisoformat(j.get('lease_expires')),
|
||||
lease_updated=datetime.fromisoformat(j.get('lease_updated')),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_statement(engine: Engine):
|
||||
from sqlalchemy.schema import CreateTable
|
||||
|
26
app/util.py
26
app/util.py
@ -26,29 +26,3 @@ def generate_key() -> "RsaKey":
|
||||
from Cryptodome.PublicKey.RSA import RsaKey
|
||||
|
||||
return RSA.generate(bits=2048)
|
||||
|
||||
|
||||
def ha_replicate(logger: "logging.Logger", ha_replicate: str, ha_role: str, version: str, dls_url: str, dls_port: int, site_key_xid: str, instance_ref: str, origins: list, leases: list) -> bool:
|
||||
from datetime import datetime
|
||||
import httpx
|
||||
|
||||
if f'{dls_url}:{dls_port}' == ha_replicate:
|
||||
logger.error(f'Failed to replicate this node ({ha_role}) to "{ha_replicate}": can\'t replicate to itself')
|
||||
return False
|
||||
|
||||
data = {
|
||||
'VERSION': str(version),
|
||||
'HA_REPLICATE': f'{dls_url}:{dls_port}',
|
||||
'SITE_KEY_XID': str(site_key_xid),
|
||||
'INSTANCE_REF': str(instance_ref),
|
||||
'origins': origins,
|
||||
'leases': leases,
|
||||
'sync_timestamp': datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
r = httpx.put(f'https://{ha_replicate}/-/ha/replicate', json=data, verify=False)
|
||||
if r.status_code == 202:
|
||||
logger.info(f'Successfully replicated this node ({ha_role}) to "{ha_replicate}".')
|
||||
return True
|
||||
logger.error(f'Failed to replicate this node ({ha_role}) to "{ha_replicate}": {r.status_code} - {r.content}')
|
||||
return False
|
||||
|
@ -1,9 +1,8 @@
|
||||
fastapi==0.92.0
|
||||
uvicorn[standard]==0.20.0
|
||||
fastapi==0.95.1
|
||||
uvicorn[standard]==0.22.0
|
||||
python-jose==3.3.0
|
||||
pycryptodome==3.17
|
||||
python-dateutil==2.8.2
|
||||
sqlalchemy==2.0.3
|
||||
markdown==3.4.1
|
||||
python-dotenv==0.21.1
|
||||
httpx==0.23.3
|
||||
sqlalchemy==2.0.12
|
||||
markdown==3.4.3
|
||||
python-dotenv==1.0.0
|
||||
|
@ -1 +0,0 @@
|
||||
VERSION=1.3.5
|
Loading…
Reference in New Issue
Block a user