diff --git a/.PKGBUILD/PKGBUILD b/.PKGBUILD/PKGBUILD index 883845e..9e8330f 100644 --- a/.PKGBUILD/PKGBUILD +++ b/.PKGBUILD/PKGBUILD @@ -11,6 +11,7 @@ license=('MIT') depends=('python' 'python-jose' 'python-starlette' 'python-httpx' 'python-fastapi' 'python-dotenv' 'python-dateutil' 'python-sqlalchemy' 'python-pycryptodome' 'python-jinja' 'uvicorn' 'python-markdown' 'openssl') provider=("$pkgname") install="$pkgname.install" +backup=('etc/default/fastapi-dls') source=('git+file:///builds/oscar.krause/fastapi-dls' # https://gitea.publichub.eu/oscar.krause/fastapi-dls.git "$pkgname.default" "$pkgname.service" @@ -21,8 +22,9 @@ sha256sums=('SKIP' '3dc60140c08122a8ec0e7fa7f0937eb8c1288058890ba09478420fc30ce9e30c') pkgver() { + echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > $srcdir/$pkgname/version.env source $srcdir/$pkgname/version.env - echo ${VERSION} + echo $VERSION } check() { diff --git a/.UNRAID/FastAPI-DLS.xml b/.UNRAID/FastAPI-DLS.xml new file mode 100644 index 0000000..f6bf52b --- /dev/null +++ b/.UNRAID/FastAPI-DLS.xml @@ -0,0 +1,48 @@ + + + FastAPI-DLS + collinwebdesigns/fastapi-dls:latest + https://hub.docker.com/r/collinwebdesigns/fastapi-dls + br0 + + sh + false + + + Source: +https://git.collinwebdesigns.de/oscar.krause/fastapi-dls#docker + +Make sure you create these certificates before starting the container for the first time: +``` +# Check https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/main/#docker for more information: +WORKING_DIR=/mnt/user/appdata/fastapi-dls/cert +mkdir -p $WORKING_DIR +cd $WORKING_DIR +# create instance private and public key for singing JWT's +openssl genrsa -out $WORKING_DIR/instance.private.pem 2048 +openssl rsa -in $WORKING_DIR/instance.private.pem -outform PEM -pubout -out $WORKING_DIR/instance.public.pem +# create ssl certificate for integrated webserver (uvicorn) - because clients rely on ssl +openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout $WORKING_DIR/webserver.key -out $WORKING_DIR/webserver.crt +``` + + + https://[IP]:[PORT:443] + + https://git.collinwebdesigns.de/uploads/-/system/project/avatar/106/png-transparent-nvidia-grid-logo-business-nvidia-electronics-text-trademark.png?width=64 + --restart always + + + 1679161568 + + + + 443 + /mnt/user/appdata/fastapi-dls/cert + 443 + /mnt/user/appdata/fastapi-dls/data + + + sqlite:////app/database/db.sqlite + true + 90 + diff --git a/.UNRAID/setup_vgpu_license.sh b/.UNRAID/setup_vgpu_license.sh new file mode 100644 index 0000000..c3c93a0 --- /dev/null +++ b/.UNRAID/setup_vgpu_license.sh @@ -0,0 +1,197 @@ +#!/bin/bash + +# This script automates the licensing of the vGPU guest driver +# on Unraid boot. Set the Schedule to: "At Startup of Array". +# +# Relies on FastAPI-DLS for the licensing. +# It assumes FeatureType=1 (vGPU), change it as you see fit in line <114> +# +# Requires `eflutils` to be installed in the system for `nvidia-gridd` to run +# To Install it: +# 1) You might find it here: https://packages.slackware.com/ (choose the 64bit version of Slackware) +# 2) Download the package and put it in /boot/extra to be installed on boot +# 3) a. Reboot to install it, OR +# b. Run `upgradepkg --install-new /boot/extra/elfutils*` +# [i]: Make sure to have only one version of elfutils, otherwise you might run into issues + +# Sources and docs: +# https://docs.nvidia.com/grid/15.0/grid-vgpu-user-guide/index.html#configuring-nls-licensed-client-on-linux +# + +################################################ +# MAKE SURE YOU CHANGE THESE VARIABLES # +################################################ + +###### CHANGE ME! +# IP and PORT of FastAPI-DLS +DLS_IP=192.168.0.123 +DLS_PORT=443 +# Token folder, must be on a filesystem that supports +# linux filesystem permissions (eg: ext4,xfs,btrfs...) +TOKEN_PATH=/mnt/user/system/nvidia +PING=$(which ping) + +# Check if the License is applied +if [[ "$(nvidia-smi -q | grep "Expiry")" == *Expiry* ]]; then + echo " [i] Your vGPU Guest drivers are already licensed." + echo " [i] $(nvidia-smi -q | grep "Expiry")" + echo " [<] Exiting..." + exit 0 +fi + +# Check if the FastAPI-DLS server is reachable +# Check if the License is applied +MAX_RETRIES=30 +for i in $(seq 1 $MAX_RETRIES); do + echo -ne "\r [>] Attempt $i to connect to $DLS_IP." + if ping -c 1 $DLS_IP >/dev/null 2>&1; then + echo -e "\n [*] Connection successful." + break + fi + if [ $i -eq $MAX_RETRIES ]; then + echo -e "\n [!] Connection failed after $MAX_RETRIES attempts." + echo -e "\n [<] Exiting..." + exit 1 + fi + sleep 1 +done + +# Check if the token folder exists +if [ -d "${TOKEN_PATH}" ]; then + echo " [*] Token Folder exists. Proceeding..." +else + echo " [!] Token Folder does not exists or not ready yet. Exiting." + echo " [!] Token Folder Specified: ${TOKEN_PATH}" + exit 1 +fi + +# Check if elfutils are installed, otherwise nvidia-gridd service +# wont start +if [ "$(grep -R "elfutils" /var/log/packages/* | wc -l)" != 0 ]; then + echo " [*] Elfutils is installed, proceeding..." +else + echo " [!] Elfutils is not installed, downloading and installing..." + echo " [!] Downloading elfutils to /boot/extra" + echo " [i] This script will download elfutils from slackware64-15.0 repository." + echo " [i] If you have a different version of Unraid (6.11.5), you might want to" + echo " [i] download and install a suitable version manually from the slackware" + echo " [i] repository, and put it in /boot/extra to be install on boot." + echo " [i] You may also install it by running: " + echo " [i] upgradepkg --install-new /path/to/elfutils-*.txz" + echo "" + echo " [>] Downloading elfutils from slackware64-15.0 repository:" + wget -q -nc --show-progress --progress=bar:force:noscroll -P /boot/extra https://slackware.uk/slackware/slackware64-15.0/slackware64/l/elfutils-0.186-x86_64-1.txz 2>/dev/null \ + || { echo " [!] Error while downloading elfutils, please download it and install it manually."; exit 1; } + echo "" + if upgradepkg --install-new /boot/extra/elfutils-0.186-x86_64-1.txz + then + echo " [*] Elfutils installed and will be installed automatically on boot" + else + echo " [!] Error while installing, check logs..." + exit 1 + fi +fi + +echo " [~] Sleeping for 60 seconds before continuing..." +echo " [i] The script is waiting until the boot process settles down." + +for i in {60..1}; do + printf "\r [~] %d seconds remaining" "$i" + sleep 1 +done + +printf "\n" + +create_token () { + echo " [>] Creating new token..." + if ${PING} -c1 ${DLS_IP} > /dev/null 2>&1 + then + # curl --insecure -L -X GET https://${DLS_IP}:${DLS_PORT}/-/client-token -o ${TOKEN_PATH}/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok || { echo " [!] Could not get the token, please check the server."; exit 1;} + wget -q -nc -4c --no-check-certificate --show-progress --progress=bar:force:noscroll -O "${TOKEN_PATH}"/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok https://${DLS_IP}:${DLS_PORT}/-/client-token \ + || { echo " [!] Could not get the token, please check the server."; exit 1;} + chmod 744 "${TOKEN_PATH}"/*.tok || { echo " [!] Could not chmod the tokens."; exit 1; } + echo "" + echo " [*] Token downloaded and stored in ${TOKEN_PATH}." + else + echo " [!] Could not get token, DLS server unavailable ." + exit 1 + fi +} + +setup_run () { + echo " [>] Setting up gridd.conf" + cp /etc/nvidia/gridd.conf.template /etc/nvidia/gridd.conf || { echo " [!] Error configuring gridd.conf, did you install the drivers correctly?"; exit 1; } + sed -i 's/FeatureType=0/FeatureType=1/g' /etc/nvidia/gridd.conf + echo "ClientConfigTokenPath=${TOKEN_PATH}" >> /etc/nvidia/gridd.conf + echo " [>] Creating /var/lib/nvidia folder structure" + mkdir -p /var/lib/nvidia/GridLicensing + echo " [>] Starting nvidia-gridd" + if pgrep nvidia-gridd >/dev/null 2>&1; then + echo " [!] nvidia-gridd service is running. Closing." + sh /usr/lib/nvidia/sysv/nvidia-gridd stop + stop_exit_code=$? + if [ $stop_exit_code -eq 0 ]; then + echo " [*] nvidia-gridd service stopped successfully." + else + echo " [!] Error while stopping nvidia-gridd service." + exit 1 + fi + + # Kill the service if it does not close + if pgrep nvidia-gridd >/dev/null 2>&1; then + kill -9 "$(pgrep nvidia-gridd)" || { + echo " [!] Error while closing nvidia-gridd service" + exit 1 + } + fi + + echo " [*] Restarting nvidia-gridd service." + sh /usr/lib/nvidia/sysv/nvidia-gridd start + + if pgrep nvidia-gridd >/dev/null 2>&1; then + echo " [*] Service started, PID: $(pgrep nvidia-gridd)" + else + echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default." + exit 1 + fi + else + sh /usr/lib/nvidia/sysv/nvidia-gridd start + + if pgrep nvidia-gridd >/dev/null 2>&1; then + echo " [*] Service started, PID: $(pgrep nvidia-gridd)" + else + echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default." + exit 1 + fi + fi +} + +for token in "${TOKEN_PATH}"/*; do + if [ "${token: -4}" == ".tok" ] + then + echo " [*] Tokens found..." + setup_run + else + echo " [!] No Tokens found..." + create_token + setup_run + fi +done + +while true; do + if nvidia-smi -q | grep "Expiry" >/dev/null 2>&1; then + echo " [>] vGPU licensed!" + echo " [i] $(nvidia-smi -q | grep "Expiry")" + break + else + echo -ne " [>] vGPU not licensed yet... Checking again in 5 seconds\c" + for i in {1..5}; do + sleep 1 + echo -ne ".\c" + done + echo -ne "\r\c" + fi +done + +echo " [>] Done..." +exit 0 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 360fe9a..8298580 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,3 +1,10 @@ +include: + - template: Jobs/Code-Quality.gitlab-ci.yml + - template: Jobs/Secret-Detection.gitlab-ci.yml + - template: Jobs/SAST.gitlab-ci.yml + - template: Jobs/Container-Scanning.gitlab-ci.yml + - template: Jobs/Dependency-Scanning.gitlab-ci.yml + cache: key: one-key-to-rule-them-all @@ -14,26 +21,38 @@ build:docker: - if: $CI_PIPELINE_SOURCE == 'merge_request_event' tags: [ docker ] before_script: - - echo "COMMIT=${CI_COMMIT_SHA}" >> version.env # COMMIT=`git rev-parse HEAD` + - docker buildx inspect + - docker buildx create --use script: - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - - docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF} - - docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF} + - IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA + - docker buildx build --progress=plain --platform linux/amd64,linux/arm64 --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE --push . + - docker buildx imagetools inspect $IMAGE + - echo "CS_IMAGE=$IMAGE" > container_scanning.env + artifacts: + reports: + dotenv: container_scanning.env build:apt: image: debian:bookworm-slim interruptible: true stage: build rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + - if: $CI_COMMIT_TAG + variables: + VERSION: $CI_COMMIT_REF_NAME - if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH changes: - app/**/* - .DEBIAN/**/* + - .gitlab-ci.yml + variables: + VERSION: "0.0.1" - if: $CI_PIPELINE_SOURCE == 'merge_request_event' + variables: + VERSION: "0.0.1" before_script: - - echo "COMMIT=${CI_COMMIT_SHA}" >> version.env - - source version.env + - echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env # install build dependencies - apt-get update -qq && apt-get install -qq -y build-essential # create build directory for .deb sources @@ -54,7 +73,7 @@ build:apt: # cd into "build/" - cd build/ script: - # set version based on value in "$VERSION" (which is set above from version.env) + # set version based on value in "$CI_COMMIT_REF_NAME" - sed -i -E 's/(Version\:\s)0.0/\1'"$VERSION"'/g' DEBIAN/control # build - dpkg -b . build.deb @@ -69,14 +88,21 @@ build:pacman: interruptible: true stage: build rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + - if: $CI_COMMIT_TAG + variables: + VERSION: $CI_COMMIT_REF_NAME - if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH changes: - app/**/* - .PKGBUILD/**/* + - .gitlab-ci.yml + variables: + VERSION: "0.0.1" - if: $CI_PIPELINE_SOURCE == 'merge_request_event' + variables: + VERSION: "0.0.1" before_script: - - echo "COMMIT=${CI_COMMIT_SHA}" >> version.env + #- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env # install build dependencies - pacman -Syu --noconfirm git # create a build-user because "makepkg" don't like root user @@ -91,7 +117,7 @@ build:pacman: # download dependencies - source PKGBUILD && pacman -Syu --noconfirm --needed --asdeps "${makedepends[@]}" "${depends[@]}" # build - - sudo -u build makepkg -s + - sudo --preserve-env -u build makepkg -s artifacts: expire_in: 1 week paths: @@ -102,6 +128,7 @@ test: stage: test rules: - if: $CI_COMMIT_BRANCH + - if: $CI_COMMIT_TAG - if: $CI_PIPELINE_SOURCE == "merge_request_event" variables: DATABASE: sqlite:///../app/db.sqlite @@ -113,10 +140,11 @@ test: - openssl rsa -in app/cert/instance.private.pem -outform PEM -pubout -out app/cert/instance.public.pem - cd test script: - - pytest main.py + - python -m pytest main.py --junitxml=report.xml artifacts: reports: dotenv: version.env + junit: ['**/report.xml'] .test:linux: stage: test @@ -180,42 +208,86 @@ test:archlinux: - pacman -Sy - pacman -U --noconfirm *.pkg.tar.zst +code_quality: + rules: + - if: $CODE_QUALITY_DISABLED + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + +secret_detection: + rules: + - if: $SECRET_DETECTION_DISABLED + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + before_script: + - git config --global --add safe.directory $CI_PROJECT_DIR + +semgrep-sast: + rules: + - if: $SAST_DISABLED + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + +test_coverage: + extends: test + allow_failure: true + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + script: + - pip install pytest pytest-cov + - coverage run -m pytest main.py + - coverage report + - coverage xml + coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' + artifacts: + reports: + coverage_report: + coverage_format: cobertura + path: '**/coverage.xml' + +container_scanning: + dependencies: [ build:docker ] + rules: + - if: $CONTAINER_SCANNING_DISABLED + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + +gemnasium-python-dependency_scanning: + rules: + - if: $DEPENDENCY_SCANNING_DISABLED + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + .deploy: rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - if: $CI_COMMIT_TAG - when: never deploy:docker: extends: .deploy stage: deploy - rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH before_script: - - echo "COMMIT=${CI_COMMIT_SHA}" >> version.env - - source version.env - - echo "Building docker image for commit ${COMMIT} with version ${VERSION}" + - echo "Building docker image for commit $CI_COMMIT_SHA with version $CI_COMMIT_REF_NAME" script: - - echo "GitLab-Registry" + - echo "========== GitLab-Registry ==========" - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - - docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION} - - docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest - - docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION} - - docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest - - echo "Docker-Hub" + - IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_COMMIT_REF_NAME + - docker build . --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_COMMIT_REF_NAME + - docker build . --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest + - docker push $IMAGE:$CI_COMMIT_REF_NAME + - docker push $IMAGE:latest + - echo "========== Docker-Hub ==========" - docker login -u $PUBLIC_REGISTRY_USER -p $PUBLIC_REGISTRY_TOKEN - - docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION} - - docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest - - docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION} - - docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest + - IMAGE=$PUBLIC_REGISTRY_USER/$CI_PROJECT_NAME + - docker build . --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_COMMIT_REF_NAME + - docker build . --build-arg VERSION=$CI_COMMIT_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest + - docker push $IMAGE:$CI_COMMIT_REF_NAME + - docker push $IMAGE:latest deploy:apt: # doc: https://git.collinwebdesigns.de/help/user/packages/debian_repository/index.md#install-a-package extends: .deploy image: debian:bookworm-slim stage: deploy - rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH needs: - job: build:apt artifacts: true @@ -255,8 +327,6 @@ deploy:pacman: extends: .deploy image: archlinux:base-devel stage: deploy - rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH needs: - job: build:pacman artifacts: true @@ -264,9 +334,9 @@ deploy:pacman: - source .PKGBUILD/PKGBUILD - source version.env # fastapi-dls-1.0-1-any.pkg.tar.zst - - BUILD_NAME=${pkgname}-${VERSION}-${pkgrel}-any.pkg.tar.zst + - BUILD_NAME=${pkgname}-${CI_COMMIT_REF_NAME}-${pkgrel}-any.pkg.tar.zst - PACKAGE_NAME=${pkgname} - - PACKAGE_VERSION=${VERSION} + - PACKAGE_VERSION=${CI_COMMIT_REF_NAME} - PACKAGE_ARCH=any - EXPORT_NAME=${BUILD_NAME} - 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"' @@ -278,19 +348,15 @@ deploy:pacman: release: image: registry.gitlab.com/gitlab-org/release-cli:latest stage: .post - needs: - - job: test - artifacts: true + needs: [ test ] rules: - if: $CI_COMMIT_TAG - when: never - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH script: - - echo "Running release-job for $VERSION" + - echo "Running release-job for $CI_COMMIT_TAG" release: - name: $CI_PROJECT_TITLE $VERSION - description: Release of $CI_PROJECT_TITLE version $VERSION - tag_name: $VERSION + name: $CI_PROJECT_TITLE $CI_COMMIT_TAG + description: Release of $CI_PROJECT_TITLE version $CI_COMMIT_TAG + tag_name: $CI_COMMIT_TAG ref: $CI_COMMIT_SHA assets: links: diff --git a/Dockerfile b/Dockerfile index e92f5dc..99c76bd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,9 @@ FROM python:3.11-alpine +ARG VERSION +ARG COMMIT="" +RUN echo -e "VERSION=$VERSION\nCOMMIT=$COMMIT" > /version.env + COPY requirements.txt /tmp/requirements.txt RUN apk update \ @@ -11,7 +15,6 @@ RUN apk update \ && apk del build-deps COPY app /app -COPY version.env /version.env COPY README.md /README.md HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=3 CMD curl --insecure --fail https://localhost/-/health || exit 1 diff --git a/README.md b/README.md index bb590f0..4d78b1e 100644 --- a/README.md +++ b/README.md @@ -9,11 +9,11 @@ Only the clients need a connection to this service on configured port. **Official Links** -- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls -- https://gitea.publichub.eu/oscar.krause/fastapi-dls -- Docker Image `collinwebdesigns/fastapi-dls:latest` +- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls (Private Git) +- https://gitea.publichub.eu/oscar.krause/fastapi-dls (Public Git) +- https://hub.docker.com/r/collinwebdesigns/fastapi-dls (Docker-Hub `collinwebdesigns/fastapi-dls:latest`) -*All other repositories are forks!* +*All other repositories are forks! (which is no bad - just for information and bug reports)* --- @@ -21,14 +21,14 @@ Only the clients need a connection to this service on configured port. # Setup (Service) -**System requirements**: +**System requirements** - 256mb ram - 4gb hdd Tested with Ubuntu 22.10 (from Proxmox templates), actually its consuming 100mb ram and 750mb hdd. -**Prepare your system**: +**Prepare your system** - Make sure your timezone is set correct on you fastapi-dls server and your client @@ -39,6 +39,8 @@ Docker-Images are available here: - [Docker-Hub](https://hub.docker.com/repository/docker/collinwebdesigns/fastapi-dls): `collinwebdesigns/fastapi-dls:latest` - [GitLab-Registry](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry): `registry.git.collinwebdesigns.de/oscar.krause/fastapi-dls/main:latest` +The images include database drivers for `postgres`, `mysql`, `mariadb` and `sqlite`. + **Run this on the Docker-Host** ```shell @@ -69,10 +71,12 @@ Goto [`docker-compose.yml`](docker-compose.yml) for more advanced example (with version: '3.9' x-dls-variables: &dls-variables + TZ: Europe/Berlin # REQUIRED, set your timezone correctly on fastapi-dls AND YOUR CLIENTS !!! DLS_URL: localhost # REQUIRED, change to your ip or hostname DLS_PORT: 443 - LEASE_EXPIRE_DAYS: 90 + LEASE_EXPIRE_DAYS: 90 # 90 days is maximum DATABASE: sqlite:////app/database/db.sqlite + DEBUG: false services: dls: @@ -85,7 +89,12 @@ services: volumes: - /opt/docker/fastapi-dls/cert:/app/cert - dls-db:/app/database - + logging: # optional, for those who do not need logs + driver: "json-file" + options: + max-file: 5 + max-size: 10m + volumes: dls-db: ``` @@ -94,6 +103,8 @@ volumes: Tested on `Debian 11 (bullseye)`, Ubuntu may also work. +**Make sure you are logged in as root.** + **Install requirements** ```shell @@ -118,7 +129,7 @@ chown -R www-data:www-data $WORKING_DIR ```shell WORKING_DIR=/opt/fastapi-dls/app/cert -mkdir $WORKING_DIR +mkdir -p $WORKING_DIR cd $WORKING_DIR # create instance private and public key for singing JWT's openssl genrsa -out $WORKING_DIR/instance.private.pem 2048 @@ -134,12 +145,15 @@ This is only to test whether the service starts successfully. ```shell cd /opt/fastapi-dls/app +sudo -u www-data /opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app +# or su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app" ``` **Create config file** ```shell +mkdir /etc/fastapi-dls cat </etc/fastapi-dls/env DLS_URL=127.0.0.1 DLS_PORT=443 @@ -184,6 +198,110 @@ EOF Now you have to run `systemctl daemon-reload`. After that you can start service with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`. +## openSUSE Leap (manual method using `git clone` and python virtual environment) + +Tested on `openSUSE Leap 15.4`, openSUSE Tumbleweed may also work. + +**Install requirements** + +```shell +zypper in -y python310 python3-virtualenv python3-pip +``` + +**Install FastAPI-DLS** + +```shell +BASE_DIR=/opt/fastapi-dls +SERVICE_USER=dls +mkdir -p ${BASE_DIR} +cd ${BASE_DIR} +git clone https://git.collinwebdesigns.de/oscar.krause/fastapi-dls . +python3.10 -m venv venv +source venv/bin/activate +pip install -r requirements.txt +deactivate +useradd -r ${SERVICE_USER} -M -d /opt/fastapi-dls +chown -R ${SERVICE_USER} ${BASE_DIR} +``` + +**Create keypair and webserver certificate** + +```shell +CERT_DIR=${BASE_DIR}/app/cert +SERVICE_USER=dls +mkdir ${CERT_DIR} +cd ${CERT_DIR} +# create instance private and public key for singing JWT's +openssl genrsa -out ${CERT_DIR}/instance.private.pem 2048 +openssl rsa -in ${CERT_DIR}/instance.private.pem -outform PEM -pubout -out ${CERT_DIR}/instance.public.pem +# create ssl certificate for integrated webserver (uvicorn) - because clients rely on ssl +openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout ${CERT_DIR}/webserver.key -out ${CERT_DIR}/webserver.crt +chown -R ${SERVICE_USER} ${CERT_DIR} +``` + +**Test Service** + +This is only to test whether the service starts successfully. + +```shell +BASE_DIR=/opt/fastapi-dls +SERVICE_USER=dls +cd ${BASE_DIR} +sudo -u ${SERVICE_USER} ${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app +# or +su - ${SERVICE_USER} -c "${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app" +``` + +**Create config file** + +```shell +BASE_DIR=/opt/fastapi-dls +cat </etc/fastapi-dls/env +# Adjust DSL_URL as needed (accessing from LAN won't work with 127.0.0.1) +DLS_URL=127.0.0.1 +DLS_PORT=443 +LEASE_EXPIRE_DAYS=90 +DATABASE=sqlite:///${BASE_DIR}/app/db.sqlite + +EOF +``` + +**Create service** + +```shell +BASE_DIR=/opt/fastapi-dls +SERVICE_USER=dls +cat </etc/systemd/system/fastapi-dls.service +[Unit] +Description=Service for fastapi-dls vGPU licensing service +After=network.target + +[Service] +User=${SERVICE_USER} +AmbientCapabilities=CAP_NET_BIND_SERVICE +WorkingDirectory=${BASE_DIR}/app +EnvironmentFile=/etc/fastapi-dls/env +ExecStart=${BASE_DIR}/venv/bin/uvicorn main:app \\ + --env-file /etc/fastapi-dls/env \\ + --host \$DLS_URL --port \$DLS_PORT \\ + --app-dir ${BASE_DIR}/app \\ + --ssl-keyfile ${BASE_DIR}/app/cert/webserver.key \\ + --ssl-certfile ${BASE_DIR}/app/cert/webserver.crt \\ + --proxy-headers +Restart=always +KillSignal=SIGQUIT +Type=simple +NotifyAccess=all + +[Install] +WantedBy=multi-user.target + +EOF +``` + +Now you have to run `systemctl daemon-reload`. After that you can start service +with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`. + ## Debian/Ubuntu (using `dpkg`) Packages are available here: @@ -192,7 +310,7 @@ Packages are available here: Successful tested with: -- Debian 12 (Bookworm) (works but not recommended because it is currently in *testing* state) +- Debian 12 (Bookworm) - Ubuntu 22.10 (Kinetic Kudu) Not working with: @@ -236,6 +354,19 @@ pacman -U --noconfirm fastapi-dls.pkg.tar.zst Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`. +## unRAID + +1. Download [this xml file](.UNRAID/FastAPI-DLS.xml) +2. Put it in /boot/config/plugins/dockerMan/templates-user/ +3. Go to Docker page, scroll down to `Add Container`, click on Template list and choose `FastAPI-DLS` +4. Open terminal/ssh, follow the instructions in overview description +5. Setup your container `IP`, `Port`, `DLS_URL` and `DLS_PORT` +6. Apply and let it boot up + +*Unraid users must also make sure they have Host access to custom networks enabled if unraid is the vgpu guest*. + +Continue [here](#unraid-guest) for docker guest setup. + ## Let's Encrypt Certificate (optional) If you're using installation via docker, you can use `traefik`. Please refer to their documentation. @@ -274,9 +405,9 @@ After first success you have to replace `--issue` with `--renew`. every 4.8 hours. If network connectivity is lost, the loss of connectivity is detected during license renewal and the client has 19.2 hours in which to re-establish connectivity before its license expires. -\*2 Always use `https`, since guest-drivers only support secure connections! +\*3 Always use `https`, since guest-drivers only support secure connections! -\*3 If you recreate instance keys you need to **recreate client-token for each guest**! +\*4 If you recreate instance keys you need to **recreate client-token for each guest**! # Setup (Client) @@ -284,9 +415,15 @@ client has 19.2 hours in which to re-establish connectivity before its license e Successfully tested with this package versions: -- `14.3` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `513.91`) -- `14.4` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `514.08`) -- `15.0` (Linux-Host: `525.60.12`, Linux-Guest: `525.60.13`, Windows-Guest: `527.41`) +| vGPU Suftware | vGPU Manager | Linux Driver | Windows Driver | Release Date | +|---------------|--------------|--------------|----------------|---------------| +| `15.2` | `525.105.14` | `525.105.17` | `528.89` | March 2023 | +| `15.1` | `525.85.07` | `525.85.05` | `528.24` | January 2023 | +| `15.0` | `525.60.12` | `525.60.13` | `527.41` | December 2022 | +| `14.4` | `510.108.03` | `510.108.03` | `514.08` | December 2022 | +| `14.3` | `510.108.03` | `510.108.03` | `513.91` | November 2022 | + +- https://docs.nvidia.com/grid/index.html ## Linux @@ -338,7 +475,7 @@ Restart-Service NVDisplay.ContainerLocalSystem Check licensing status: ```shell -& 'C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe' -q | Select-String "License" +& 'nvidia-smi' -q | Select-String "License" ``` Output should be something like: @@ -350,6 +487,19 @@ vGPU Software Licensed Product Done. For more information check [troubleshoot section](#troubleshoot). +## unRAID Guest + +1. Make sure you create a folder in a linux filesystem (BTRFS/XFS/EXT4...), I recommend `/mnt/user/system/nvidia` (this is where docker and libvirt preferences are saved, so it's a good place to have that) +2. Edit the script to put your `DLS_IP`, `DLS_PORT` and `TOKEN_PATH`, properly +3. Install `User Scripts` plugin from *Community Apps* (the Apps page, or google User Scripts Unraid if you're not using CA) +4. Go to `Settings > Users Scripts > Add New Script` +5. Give it a name (the name must not contain spaces preferably) +6. Click on the *gear icon* to the left of the script name then edit script +7. Paste the script and save +8. Set schedule to `At First Array Start Only` +9. Click on Apply + + # Endpoints ### `GET /` @@ -368,10 +518,6 @@ Shows current runtime environment variables and their values. HTML rendered README.md. -### `GET /-/docs`, `GET /-/redoc` - -OpenAPI specifications rendered from `GET /-/openapi.json`. - ### `GET /-/manage` Shows a very basic UI to delete origins or leases. @@ -556,5 +702,8 @@ The error message can safely be ignored (since we have no license limitation :P) Thanks to vGPU community and all who uses this project and report bugs. -Special thanks to @samicrusader who created build file for ArchLinux. +Special thanks to +- @samicrusader who created build file for ArchLinux +- @cyrus who wrote the section for openSUSE +- @midi who wrote the section for unRAID diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 0000000..60e0a87 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,27 @@ +# Roadmap + +I am planning to implement the following features in the future. + + +## HA - High Availability + +Support Failover-Mode (secondary ip address) as in official DLS. + +**Note**: There is no Load-Balancing / Round-Robin HA Mode supported! If you want to use that, consider to use +Docker-Swarm with shared/cluster database (e.g. postgres). + +*See [ha branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ha) for current status.* + + +## UI - User Interface + +Add a user interface to manage origins and leases. + +*See [ui branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ui) for current status.* + + +## Config Database + +Instead of using environment variables, configuration files and manually create certificates, store configs and +certificates in database (like origins and leases). Also, there should be provided a startup assistant to prefill +required attributes and create instance-certificates. This is more user-friendly and should improve fist setup. diff --git a/app/main.py b/app/main.py index 5a31820..93c6212 100644 --- a/app/main.py +++ b/app/main.py @@ -30,7 +30,7 @@ TZ = datetime.now().astimezone().tzinfo VERSION, COMMIT, DEBUG = env('VERSION', 'unknown'), env('COMMIT', 'unknown'), bool(env('DEBUG', False)) -config = dict(openapi_url='/-/openapi.json', docs_url='/-/docs', redoc_url='/-/redoc') +config = dict(openapi_url=None, docs_url=None, redoc_url=None) # dict(openapi_url='/-/openapi.json', docs_url='/-/docs', redoc_url='/-/redoc') app = FastAPI(title='FastAPI-DLS', description='Minimal Delegated License Service (DLS).', version=VERSION, **config) app.mount('/static', StaticFiles(directory=join(dirname(__file__), 'static'), html=True), name='static'), templates = Jinja2Templates(directory=join(dirname(__file__), 'templates')) @@ -87,7 +87,7 @@ async def _index(request: Request): @app.get('/-/health', summary='* Health') -async def _health(request: Request): +async def _health(): return JSONr({'status': 'up'}) @@ -182,6 +182,12 @@ async def _leases(request: Request, origin: bool = False): return JSONr(response) +@app.delete('/-/leases/expired', summary='* Leases') +async def _lease_delete_expired(request: Request): + Lease.delete_expired(db) + return Response(status_code=201) + + @app.delete('/-/lease/{lease_ref}', summary='* Lease') async def _lease_delete(request: Request, lease_ref: str): if Lease.delete(db, lease_ref) == 1: diff --git a/app/orm.py b/app/orm.py index a6e5974..25e641b 100644 --- a/app/orm.py +++ b/app/orm.py @@ -1,10 +1,9 @@ from datetime import datetime, timedelta, timezone from dateutil.relativedelta import relativedelta -from sqlalchemy import Column, VARCHAR, CHAR, ForeignKey, DATETIME, update, and_, inspect -from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, VARCHAR, CHAR, ForeignKey, DATETIME, update, and_, inspect, text from sqlalchemy.engine import Engine -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import sessionmaker, declarative_base Base = declarative_base() @@ -161,6 +160,14 @@ class Lease(Base): session.close() return deletions + @staticmethod + def delete_expired(engine: Engine) -> int: + session = sessionmaker(bind=engine)() + deletions = session.query(Lease).filter(Lease.lease_expires <= datetime.utcnow()).delete() + session.commit() + session.close() + return deletions + @staticmethod def calculate_renewal(renewal_period: float, delta: timedelta) -> timedelta: """ @@ -190,7 +197,7 @@ def init(engine: Engine): session = sessionmaker(bind=engine)() for table in tables: if not db.dialect.has_table(engine.connect(), table.__tablename__): - session.execute(str(table.create_statement(engine))) + session.execute(text(str(table.create_statement(engine)))) session.commit() session.close() diff --git a/docker-compose.yml b/docker-compose.yml index b52a58a..3f02cdc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -14,6 +14,7 @@ services: environment: <<: *dls-variables volumes: + - /etc/timezone:/etc/timezone:ro - /opt/docker/fastapi-dls/cert:/app/cert # instance.private.pem, instance.public.pem - db:/app/database entrypoint: ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--app-dir", "/app", "--proxy-headers"] @@ -30,6 +31,7 @@ services: - "80:80" # for "/leasing/v1/lessor/shutdown" used by windows guests, can't be changed! - "443:443" # first part must match "DLS_PORT" volumes: + - /etc/timezone:/etc/timezone:ro - /opt/docker/fastapi-dls/cert:/opt/cert healthcheck: test: ["CMD", "curl", "--insecure", "--fail", "https://localhost/-/health"] diff --git a/requirements.txt b/requirements.txt index 086a68a..74eee56 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,9 @@ -fastapi==0.89.1 -uvicorn[standard]==0.20.0 +fastapi==0.97.0 +uvicorn[standard]==0.22.0 python-jose==3.3.0 -pycryptodome==3.16.0 +pycryptodome==3.18.0 python-dateutil==2.8.2 -sqlalchemy==1.4.46 -markdown==3.4.1 -python-dotenv==0.21.0 +sqlalchemy==2.0.16 +markdown==3.4.3 +python-dotenv==1.0.0 jinja2==3.1.2 diff --git a/version.env b/version.env deleted file mode 100644 index 93176fc..0000000 --- a/version.env +++ /dev/null @@ -1 +0,0 @@ -VERSION=1.3.3