forked from oscar.krause/fastapi-dls
Compare commits
1 Commits
main
...
set-depend
Author | SHA1 | Date | |
---|---|---|---|
cb5dc6a70d |
@ -22,9 +22,8 @@ sha256sums=('SKIP'
|
|||||||
'3dc60140c08122a8ec0e7fa7f0937eb8c1288058890ba09478420fc30ce9e30c')
|
'3dc60140c08122a8ec0e7fa7f0937eb8c1288058890ba09478420fc30ce9e30c')
|
||||||
|
|
||||||
pkgver() {
|
pkgver() {
|
||||||
echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > $srcdir/$pkgname/version.env
|
|
||||||
source $srcdir/$pkgname/version.env
|
source $srcdir/$pkgname/version.env
|
||||||
echo $VERSION
|
echo ${VERSION}
|
||||||
}
|
}
|
||||||
|
|
||||||
check() {
|
check() {
|
||||||
|
@ -1,48 +0,0 @@
|
|||||||
<?xml version="1.0"?>
|
|
||||||
<Container version="2">
|
|
||||||
<Name>FastAPI-DLS</Name>
|
|
||||||
<Repository>collinwebdesigns/fastapi-dls:latest</Repository>
|
|
||||||
<Registry>https://hub.docker.com/r/collinwebdesigns/fastapi-dls</Registry>
|
|
||||||
<Network>br0</Network>
|
|
||||||
<MyIP></MyIP>
|
|
||||||
<Shell>sh</Shell>
|
|
||||||
<Privileged>false</Privileged>
|
|
||||||
<Support/>
|
|
||||||
<Project/>
|
|
||||||
<Overview>Source:
|
|
||||||
https://git.collinwebdesigns.de/oscar.krause/fastapi-dls#docker
|
|
||||||

|
|
||||||
Make sure you create these certificates before starting the container for the first time:
|
|
||||||
```
|
|
||||||
# Check https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/main/#docker for more information:
|
|
||||||
WORKING_DIR=/mnt/user/appdata/fastapi-dls/cert
|
|
||||||
mkdir -p $WORKING_DIR
|
|
||||||
cd $WORKING_DIR
|
|
||||||
# create instance private and public key for singing JWT's
|
|
||||||
openssl genrsa -out $WORKING_DIR/instance.private.pem 2048 
|
|
||||||
openssl rsa -in $WORKING_DIR/instance.private.pem -outform PEM -pubout -out $WORKING_DIR/instance.public.pem
|
|
||||||
# create ssl certificate for integrated webserver (uvicorn) - because clients rely on ssl
|
|
||||||
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout $WORKING_DIR/webserver.key -out $WORKING_DIR/webserver.crt
|
|
||||||
```
|
|
||||||
</Overview>
|
|
||||||
<Category/>
|
|
||||||
<WebUI>https://[IP]:[PORT:443]</WebUI>
|
|
||||||
<TemplateURL/>
|
|
||||||
<Icon>https://git.collinwebdesigns.de/uploads/-/system/project/avatar/106/png-transparent-nvidia-grid-logo-business-nvidia-electronics-text-trademark.png?width=64</Icon>
|
|
||||||
<ExtraParams>--restart always</ExtraParams>
|
|
||||||
<PostArgs/>
|
|
||||||
<CPUset/>
|
|
||||||
<DateInstalled>1679161568</DateInstalled>
|
|
||||||
<DonateText/>
|
|
||||||
<DonateLink/>
|
|
||||||
<Requires/>
|
|
||||||
<Config Name="HTTPS Port" Target="" Default="443" Mode="tcp" Description="Same as DLS Port below." Type="Port" Display="always-hide" Required="true" Mask="false">443</Config>
|
|
||||||
<Config Name="App Cert" Target="/app/cert" Default="/mnt/user/appdata/fastapi-dls/cert" Mode="rw" Description="[REQUIRED] Read the description above to make this folder. You do not need to change the path." Type="Path" Display="always-hide" Required="true" Mask="false">/mnt/user/appdata/fastapi-dls/cert</Config>
|
|
||||||
<Config Name="DLS Port" Target="DSL_PORT" Default="443" Mode="" Description="Choose port you want to use. Make sure to change the HTTPS port above to match it." Type="Variable" Display="always-hide" Required="true" Mask="false">443</Config>
|
|
||||||
<Config Name="App database" Target="/app/database" Default="/mnt/user/appdata/fastapi-dls/data" Mode="rw" Description="[REQUIRED] Read the description above to make this folder. You do not need to change the path." Type="Path" Display="always-hide" Required="true" Mask="false">/mnt/user/appdata/fastapi-dls/data</Config>
|
|
||||||
<Config Name="DSL IP" Target="DLS_URL" Default="localhost" Mode="" Description="Put your container's IP (or your host's IP if it's shared)." Type="Variable" Display="always-hide" Required="true" Mask="false"></Config>
|
|
||||||
<Config Name="Time Zone" Target="TZ" Default="" Mode="" Description="Format example: America/New_York. MUST MATCH YOUR CURRENT TIMEZONE AND THE GUEST VMS TIMEZONE! Otherwise you'll get into issues, read the guide above." Type="Variable" Display="always-hide" Required="true" Mask="false"></Config>
|
|
||||||
<Config Name="Database" Target="DATABASE" Default="sqlite:////app/database/db.sqlite" Mode="" Description="Set to sqlite:////app/database/db.sqlite" Type="Variable" Display="advanced-hide" Required="true" Mask="false">sqlite:////app/database/db.sqlite</Config>
|
|
||||||
<Config Name="Debug" Target="DEBUG" Default="true" Mode="" Description="true to enable debugging, false to disable them." Type="Variable" Display="advanced-hide" Required="false" Mask="false">true</Config>
|
|
||||||
<Config Name="Lease" Target="LEASE_EXPIRE_DAYS" Default="90" Mode="" Description="90 days is the maximum value." Type="Variable" Display="advanced" Required="false" Mask="false">90</Config>
|
|
||||||
</Container>
|
|
@ -1,197 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script automates the licensing of the vGPU guest driver
|
|
||||||
# on Unraid boot. Set the Schedule to: "At Startup of Array".
|
|
||||||
#
|
|
||||||
# Relies on FastAPI-DLS for the licensing.
|
|
||||||
# It assumes FeatureType=1 (vGPU), change it as you see fit in line <114>
|
|
||||||
#
|
|
||||||
# Requires `eflutils` to be installed in the system for `nvidia-gridd` to run
|
|
||||||
# To Install it:
|
|
||||||
# 1) You might find it here: https://packages.slackware.com/ (choose the 64bit version of Slackware)
|
|
||||||
# 2) Download the package and put it in /boot/extra to be installed on boot
|
|
||||||
# 3) a. Reboot to install it, OR
|
|
||||||
# b. Run `upgradepkg --install-new /boot/extra/elfutils*`
|
|
||||||
# [i]: Make sure to have only one version of elfutils, otherwise you might run into issues
|
|
||||||
|
|
||||||
# Sources and docs:
|
|
||||||
# https://docs.nvidia.com/grid/15.0/grid-vgpu-user-guide/index.html#configuring-nls-licensed-client-on-linux
|
|
||||||
#
|
|
||||||
|
|
||||||
################################################
|
|
||||||
# MAKE SURE YOU CHANGE THESE VARIABLES #
|
|
||||||
################################################
|
|
||||||
|
|
||||||
###### CHANGE ME!
|
|
||||||
# IP and PORT of FastAPI-DLS
|
|
||||||
DLS_IP=192.168.0.123
|
|
||||||
DLS_PORT=443
|
|
||||||
# Token folder, must be on a filesystem that supports
|
|
||||||
# linux filesystem permissions (eg: ext4,xfs,btrfs...)
|
|
||||||
TOKEN_PATH=/mnt/user/system/nvidia
|
|
||||||
PING=$(which ping)
|
|
||||||
|
|
||||||
# Check if the License is applied
|
|
||||||
if [[ "$(nvidia-smi -q | grep "Expiry")" == *Expiry* ]]; then
|
|
||||||
echo " [i] Your vGPU Guest drivers are already licensed."
|
|
||||||
echo " [i] $(nvidia-smi -q | grep "Expiry")"
|
|
||||||
echo " [<] Exiting..."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if the FastAPI-DLS server is reachable
|
|
||||||
# Check if the License is applied
|
|
||||||
MAX_RETRIES=30
|
|
||||||
for i in $(seq 1 $MAX_RETRIES); do
|
|
||||||
echo -ne "\r [>] Attempt $i to connect to $DLS_IP."
|
|
||||||
if ping -c 1 $DLS_IP >/dev/null 2>&1; then
|
|
||||||
echo -e "\n [*] Connection successful."
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
if [ $i -eq $MAX_RETRIES ]; then
|
|
||||||
echo -e "\n [!] Connection failed after $MAX_RETRIES attempts."
|
|
||||||
echo -e "\n [<] Exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if the token folder exists
|
|
||||||
if [ -d "${TOKEN_PATH}" ]; then
|
|
||||||
echo " [*] Token Folder exists. Proceeding..."
|
|
||||||
else
|
|
||||||
echo " [!] Token Folder does not exists or not ready yet. Exiting."
|
|
||||||
echo " [!] Token Folder Specified: ${TOKEN_PATH}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if elfutils are installed, otherwise nvidia-gridd service
|
|
||||||
# wont start
|
|
||||||
if [ "$(grep -R "elfutils" /var/log/packages/* | wc -l)" != 0 ]; then
|
|
||||||
echo " [*] Elfutils is installed, proceeding..."
|
|
||||||
else
|
|
||||||
echo " [!] Elfutils is not installed, downloading and installing..."
|
|
||||||
echo " [!] Downloading elfutils to /boot/extra"
|
|
||||||
echo " [i] This script will download elfutils from slackware64-15.0 repository."
|
|
||||||
echo " [i] If you have a different version of Unraid (6.11.5), you might want to"
|
|
||||||
echo " [i] download and install a suitable version manually from the slackware"
|
|
||||||
echo " [i] repository, and put it in /boot/extra to be install on boot."
|
|
||||||
echo " [i] You may also install it by running: "
|
|
||||||
echo " [i] upgradepkg --install-new /path/to/elfutils-*.txz"
|
|
||||||
echo ""
|
|
||||||
echo " [>] Downloading elfutils from slackware64-15.0 repository:"
|
|
||||||
wget -q -nc --show-progress --progress=bar:force:noscroll -P /boot/extra https://slackware.uk/slackware/slackware64-15.0/slackware64/l/elfutils-0.186-x86_64-1.txz 2>/dev/null \
|
|
||||||
|| { echo " [!] Error while downloading elfutils, please download it and install it manually."; exit 1; }
|
|
||||||
echo ""
|
|
||||||
if upgradepkg --install-new /boot/extra/elfutils-0.186-x86_64-1.txz
|
|
||||||
then
|
|
||||||
echo " [*] Elfutils installed and will be installed automatically on boot"
|
|
||||||
else
|
|
||||||
echo " [!] Error while installing, check logs..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo " [~] Sleeping for 60 seconds before continuing..."
|
|
||||||
echo " [i] The script is waiting until the boot process settles down."
|
|
||||||
|
|
||||||
for i in {60..1}; do
|
|
||||||
printf "\r [~] %d seconds remaining" "$i"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
printf "\n"
|
|
||||||
|
|
||||||
create_token () {
|
|
||||||
echo " [>] Creating new token..."
|
|
||||||
if ${PING} -c1 ${DLS_IP} > /dev/null 2>&1
|
|
||||||
then
|
|
||||||
# curl --insecure -L -X GET https://${DLS_IP}:${DLS_PORT}/-/client-token -o ${TOKEN_PATH}/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok || { echo " [!] Could not get the token, please check the server."; exit 1;}
|
|
||||||
wget -q -nc -4c --no-check-certificate --show-progress --progress=bar:force:noscroll -O "${TOKEN_PATH}"/client_configuration_token_"$(date '+%d-%m-%Y-%H-%M-%S')".tok https://${DLS_IP}:${DLS_PORT}/-/client-token \
|
|
||||||
|| { echo " [!] Could not get the token, please check the server."; exit 1;}
|
|
||||||
chmod 744 "${TOKEN_PATH}"/*.tok || { echo " [!] Could not chmod the tokens."; exit 1; }
|
|
||||||
echo ""
|
|
||||||
echo " [*] Token downloaded and stored in ${TOKEN_PATH}."
|
|
||||||
else
|
|
||||||
echo " [!] Could not get token, DLS server unavailable ."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_run () {
|
|
||||||
echo " [>] Setting up gridd.conf"
|
|
||||||
cp /etc/nvidia/gridd.conf.template /etc/nvidia/gridd.conf || { echo " [!] Error configuring gridd.conf, did you install the drivers correctly?"; exit 1; }
|
|
||||||
sed -i 's/FeatureType=0/FeatureType=1/g' /etc/nvidia/gridd.conf
|
|
||||||
echo "ClientConfigTokenPath=${TOKEN_PATH}" >> /etc/nvidia/gridd.conf
|
|
||||||
echo " [>] Creating /var/lib/nvidia folder structure"
|
|
||||||
mkdir -p /var/lib/nvidia/GridLicensing
|
|
||||||
echo " [>] Starting nvidia-gridd"
|
|
||||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
|
||||||
echo " [!] nvidia-gridd service is running. Closing."
|
|
||||||
sh /usr/lib/nvidia/sysv/nvidia-gridd stop
|
|
||||||
stop_exit_code=$?
|
|
||||||
if [ $stop_exit_code -eq 0 ]; then
|
|
||||||
echo " [*] nvidia-gridd service stopped successfully."
|
|
||||||
else
|
|
||||||
echo " [!] Error while stopping nvidia-gridd service."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Kill the service if it does not close
|
|
||||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
|
||||||
kill -9 "$(pgrep nvidia-gridd)" || {
|
|
||||||
echo " [!] Error while closing nvidia-gridd service"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo " [*] Restarting nvidia-gridd service."
|
|
||||||
sh /usr/lib/nvidia/sysv/nvidia-gridd start
|
|
||||||
|
|
||||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
|
||||||
echo " [*] Service started, PID: $(pgrep nvidia-gridd)"
|
|
||||||
else
|
|
||||||
echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
sh /usr/lib/nvidia/sysv/nvidia-gridd start
|
|
||||||
|
|
||||||
if pgrep nvidia-gridd >/dev/null 2>&1; then
|
|
||||||
echo " [*] Service started, PID: $(pgrep nvidia-gridd)"
|
|
||||||
else
|
|
||||||
echo -e " [!] Error while starting nvidia-gridd service. Use strace -f nvidia-gridd to debug.\n [i] Check if elfutils is installed.\n [i] strace is not installed by default."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
for token in "${TOKEN_PATH}"/*; do
|
|
||||||
if [ "${token: -4}" == ".tok" ]
|
|
||||||
then
|
|
||||||
echo " [*] Tokens found..."
|
|
||||||
setup_run
|
|
||||||
else
|
|
||||||
echo " [!] No Tokens found..."
|
|
||||||
create_token
|
|
||||||
setup_run
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
if nvidia-smi -q | grep "Expiry" >/dev/null 2>&1; then
|
|
||||||
echo " [>] vGPU licensed!"
|
|
||||||
echo " [i] $(nvidia-smi -q | grep "Expiry")"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo -ne " [>] vGPU not licensed yet... Checking again in 5 seconds\c"
|
|
||||||
for i in {1..5}; do
|
|
||||||
sleep 1
|
|
||||||
echo -ne ".\c"
|
|
||||||
done
|
|
||||||
echo -ne "\r\c"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo " [>] Done..."
|
|
||||||
exit 0
|
|
291
.gitlab-ci.yml
291
.gitlab-ci.yml
@ -1,133 +1,97 @@
|
|||||||
include:
|
# You can override the included template(s) by including variable overrides
|
||||||
- template: Jobs/Code-Quality.gitlab-ci.yml
|
# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings
|
||||||
- template: Jobs/Secret-Detection.gitlab-ci.yml
|
# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings
|
||||||
- template: Jobs/SAST.gitlab-ci.yml
|
# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings
|
||||||
- template: Jobs/Container-Scanning.gitlab-ci.yml
|
# Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings
|
||||||
- template: Jobs/Dependency-Scanning.gitlab-ci.yml
|
# Note that environment variables can be set in several places
|
||||||
|
# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence
|
||||||
cache:
|
cache:
|
||||||
key: one-key-to-rule-them-all
|
key: one-key-to-rule-them-all
|
||||||
|
|
||||||
build:docker:
|
build:docker:
|
||||||
image: docker:dind
|
image: docker:dind
|
||||||
interruptible: true
|
interruptible: true
|
||||||
stage: build
|
stage: build
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
- if: "$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH"
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- Dockerfile
|
- Dockerfile
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
- if: "$CI_PIPELINE_SOURCE == 'merge_request_event'"
|
||||||
tags: [ docker ]
|
tags:
|
||||||
|
- docker
|
||||||
before_script:
|
before_script:
|
||||||
- docker buildx inspect
|
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||||
- docker buildx create --use
|
|
||||||
script:
|
script:
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||||
- IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_BUILD_REF_NAME:$CI_BUILD_REF
|
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF}
|
||||||
- docker buildx build --progress=plain --platform linux/amd64,linux/arm64 --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE --push .
|
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${CI_BUILD_REF}
|
||||||
- docker buildx imagetools inspect $IMAGE
|
|
||||||
- echo "CS_IMAGE=$IMAGE" > container_scanning.env
|
|
||||||
artifacts:
|
|
||||||
reports:
|
|
||||||
dotenv: container_scanning.env
|
|
||||||
|
|
||||||
build:apt:
|
build:apt:
|
||||||
image: debian:bookworm-slim
|
image: debian:bookworm-slim
|
||||||
interruptible: true
|
interruptible: true
|
||||||
stage: build
|
stage: build
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_TAG
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
variables:
|
- if: "$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH"
|
||||||
VERSION: $CI_BUILD_REF_NAME
|
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- .DEBIAN/**/*
|
- ".DEBIAN/**/*"
|
||||||
- .gitlab-ci.yml
|
- if: "$CI_PIPELINE_SOURCE == 'merge_request_event'"
|
||||||
variables:
|
|
||||||
VERSION: "0.0.1"
|
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
|
||||||
variables:
|
|
||||||
VERSION: "0.0.1"
|
|
||||||
before_script:
|
before_script:
|
||||||
- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env
|
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||||
# install build dependencies
|
- source version.env
|
||||||
- apt-get update -qq && apt-get install -qq -y build-essential
|
- apt-get update -qq && apt-get install -qq -y build-essential
|
||||||
# create build directory for .deb sources
|
|
||||||
- mkdir build
|
- mkdir build
|
||||||
# copy install instructions
|
|
||||||
- cp -r .DEBIAN build/DEBIAN
|
- cp -r .DEBIAN build/DEBIAN
|
||||||
- chmod -R 0775 build/DEBIAN
|
- chmod -R 0775 build/DEBIAN
|
||||||
# copy app into "/usr/share/fastapi-dls" as "/usr/share/fastapi-dls/app" & copy README.md and version.env
|
|
||||||
- mkdir -p build/usr/share/fastapi-dls
|
- mkdir -p build/usr/share/fastapi-dls
|
||||||
- cp -r app build/usr/share/fastapi-dls
|
- cp -r app build/usr/share/fastapi-dls
|
||||||
- cp README.md version.env build/usr/share/fastapi-dls
|
- cp README.md version.env build/usr/share/fastapi-dls
|
||||||
# create conf file
|
|
||||||
- mkdir -p build/etc/fastapi-dls
|
- mkdir -p build/etc/fastapi-dls
|
||||||
- cp .DEBIAN/env.default build/etc/fastapi-dls/env
|
- cp .DEBIAN/env.default build/etc/fastapi-dls/env
|
||||||
# create service file
|
|
||||||
- mkdir -p build/etc/systemd/system
|
- mkdir -p build/etc/systemd/system
|
||||||
- cp .DEBIAN/fastapi-dls.service build/etc/systemd/system/fastapi-dls.service
|
- cp .DEBIAN/fastapi-dls.service build/etc/systemd/system/fastapi-dls.service
|
||||||
# cd into "build/"
|
|
||||||
- cd build/
|
- cd build/
|
||||||
script:
|
script:
|
||||||
# set version based on value in "$CI_BUILD_REF_NAME"
|
|
||||||
- sed -i -E 's/(Version\:\s)0.0/\1'"$VERSION"'/g' DEBIAN/control
|
- sed -i -E 's/(Version\:\s)0.0/\1'"$VERSION"'/g' DEBIAN/control
|
||||||
# build
|
|
||||||
- dpkg -b . build.deb
|
- dpkg -b . build.deb
|
||||||
- dpkg -I build.deb
|
- dpkg -I build.deb
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
paths:
|
paths:
|
||||||
- build/build.deb
|
- build/build.deb
|
||||||
|
|
||||||
build:pacman:
|
build:pacman:
|
||||||
image: archlinux:base-devel
|
image: archlinux:base-devel
|
||||||
interruptible: true
|
interruptible: true
|
||||||
stage: build
|
stage: build
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_TAG
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
variables:
|
- if: "$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH"
|
||||||
VERSION: $CI_BUILD_REF_NAME
|
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- .PKGBUILD/**/*
|
- ".PKGBUILD/**/*"
|
||||||
- .gitlab-ci.yml
|
- if: "$CI_PIPELINE_SOURCE == 'merge_request_event'"
|
||||||
variables:
|
|
||||||
VERSION: "0.0.1"
|
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
|
||||||
variables:
|
|
||||||
VERSION: "0.0.1"
|
|
||||||
before_script:
|
before_script:
|
||||||
#- echo -e "VERSION=$VERSION\nCOMMIT=$CI_COMMIT_SHA" > version.env
|
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||||
# install build dependencies
|
|
||||||
- pacman -Syu --noconfirm git
|
- pacman -Syu --noconfirm git
|
||||||
# create a build-user because "makepkg" don't like root user
|
|
||||||
- useradd --no-create-home --shell=/bin/false build && usermod -L build
|
- useradd --no-create-home --shell=/bin/false build && usermod -L build
|
||||||
- 'echo "build ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers'
|
- 'echo "build ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers'
|
||||||
- 'echo "root ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers'
|
- 'echo "root ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers'
|
||||||
- chown -R build:build .
|
- chown -R build:build .
|
||||||
# move .PKGBUILD contents to root directory
|
|
||||||
- mv .PKGBUILD/* .
|
- mv .PKGBUILD/* .
|
||||||
script:
|
script:
|
||||||
- pwd
|
- pwd
|
||||||
# download dependencies
|
- source PKGBUILD && pacman -Syu --noconfirm --needed --asdeps "${makedepends[@]}"
|
||||||
- source PKGBUILD && pacman -Syu --noconfirm --needed --asdeps "${makedepends[@]}" "${depends[@]}"
|
"${depends[@]}"
|
||||||
# build
|
- sudo -u build makepkg -s
|
||||||
- sudo --preserve-env -u build makepkg -s
|
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
paths:
|
paths:
|
||||||
- "*.pkg.tar.zst"
|
- "*.pkg.tar.zst"
|
||||||
|
|
||||||
test:
|
test:
|
||||||
image: python:3.11-slim-bullseye
|
image: python:3.11-slim-bullseye
|
||||||
stage: test
|
stage: test
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH
|
- if: "$CI_COMMIT_BRANCH"
|
||||||
- if: $CI_COMMIT_TAG
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
variables:
|
variables:
|
||||||
DATABASE: sqlite:///../app/db.sqlite
|
DATABASE: sqlite:///../app/db.sqlite
|
||||||
@ -139,20 +103,18 @@ test:
|
|||||||
- openssl rsa -in app/cert/instance.private.pem -outform PEM -pubout -out app/cert/instance.public.pem
|
- openssl rsa -in app/cert/instance.private.pem -outform PEM -pubout -out app/cert/instance.public.pem
|
||||||
- cd test
|
- cd test
|
||||||
script:
|
script:
|
||||||
- python -m pytest main.py --junitxml=report.xml
|
- pytest main.py
|
||||||
artifacts:
|
artifacts:
|
||||||
reports:
|
reports:
|
||||||
dotenv: version.env
|
dotenv: version.env
|
||||||
junit: ['**/report.xml']
|
".test:linux":
|
||||||
|
|
||||||
.test:linux:
|
|
||||||
stage: test
|
stage: test
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
- if: "$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH"
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- .DEBIAN/**/*
|
- ".DEBIAN/**/*"
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
- if: "$CI_PIPELINE_SOURCE == 'merge_request_event'"
|
||||||
needs:
|
needs:
|
||||||
- job: build:apt
|
- job: build:apt
|
||||||
artifacts: true
|
artifacts: true
|
||||||
@ -161,181 +123,113 @@ test:
|
|||||||
before_script:
|
before_script:
|
||||||
- apt-get update -qq && apt-get install -qq -y jq curl
|
- apt-get update -qq && apt-get install -qq -y jq curl
|
||||||
script:
|
script:
|
||||||
# test installation
|
|
||||||
- apt-get install -q -y ./build/build.deb --fix-missing
|
- apt-get install -q -y ./build/build.deb --fix-missing
|
||||||
- openssl req -x509 -newkey rsa:2048 -nodes -out /etc/fastapi-dls/webserver.crt -keyout /etc/fastapi-dls/webserver.key -days 7 -subj "/C=DE/O=GitLab-CI/OU=Test/CN=localhost"
|
- openssl req -x509 -newkey rsa:2048 -nodes -out /etc/fastapi-dls/webserver.crt
|
||||||
# copy example config from GitLab-CI-Variables
|
-keyout /etc/fastapi-dls/webserver.key -days 7 -subj "/C=DE/O=GitLab-CI/OU=Test/CN=localhost"
|
||||||
#- cat ${EXAMPLE_CONFIG} > /etc/fastapi-dls/env
|
|
||||||
# start service in background
|
|
||||||
- cd /usr/share/fastapi-dls/app
|
- cd /usr/share/fastapi-dls/app
|
||||||
- uvicorn main:app
|
- uvicorn main:app --host 127.0.0.1 --port 443 --app-dir /usr/share/fastapi-dls/app
|
||||||
--host 127.0.0.1 --port 443
|
--ssl-keyfile /etc/fastapi-dls/webserver.key --ssl-certfile /etc/fastapi-dls/webserver.crt
|
||||||
--app-dir /usr/share/fastapi-dls/app
|
|
||||||
--ssl-keyfile /etc/fastapi-dls/webserver.key
|
|
||||||
--ssl-certfile /etc/fastapi-dls/webserver.crt
|
|
||||||
--proxy-headers &
|
--proxy-headers &
|
||||||
- FASTAPI_DLS_PID=$!
|
- FASTAPI_DLS_PID=$!
|
||||||
- echo "Started service with pid $FASTAPI_DLS_PID"
|
- echo "Started service with pid $FASTAPI_DLS_PID"
|
||||||
- cat /etc/fastapi-dls/env
|
- cat /etc/fastapi-dls/env
|
||||||
# testing service
|
- if [ "`curl --insecure -s https://127.0.0.1/-/health | jq .status`" != "up" ];
|
||||||
- if [ "`curl --insecure -s https://127.0.0.1/-/health | jq .status`" != "up" ]; then echo "Success"; else "Error"; fi
|
then echo "Success"; else "Error"; fi
|
||||||
# cleanup
|
|
||||||
- kill $FASTAPI_DLS_PID
|
- kill $FASTAPI_DLS_PID
|
||||||
- apt-get purge -qq -y fastapi-dls
|
- apt-get purge -qq -y fastapi-dls
|
||||||
- apt-get autoremove -qq -y && apt-get clean -qq
|
- apt-get autoremove -qq -y && apt-get clean -qq
|
||||||
|
|
||||||
test:debian:
|
test:debian:
|
||||||
extends: .test:linux
|
extends: ".test:linux"
|
||||||
image: debian:bookworm-slim
|
image: debian:bookworm-slim
|
||||||
|
|
||||||
test:ubuntu:
|
test:ubuntu:
|
||||||
extends: .test:linux
|
extends: ".test:linux"
|
||||||
image: ubuntu:22.10
|
image: ubuntu:22.10
|
||||||
|
|
||||||
test:archlinux:
|
test:archlinux:
|
||||||
image: archlinux:base
|
image: archlinux:base
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
- if: "$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH"
|
||||||
changes:
|
changes:
|
||||||
- app/**/*
|
- app/**/*
|
||||||
- .PKGBUILD/**/*
|
- ".PKGBUILD/**/*"
|
||||||
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
|
- if: "$CI_PIPELINE_SOURCE == 'merge_request_event'"
|
||||||
needs:
|
needs:
|
||||||
- job: build:pacman
|
- job: build:pacman
|
||||||
artifacts: true
|
artifacts: true
|
||||||
script:
|
script:
|
||||||
- pacman -Sy
|
- pacman -Sy
|
||||||
- pacman -U --noconfirm *.pkg.tar.zst
|
- pacman -U --noconfirm *.pkg.tar.zst
|
||||||
|
".deploy":
|
||||||
code_quality:
|
|
||||||
rules:
|
rules:
|
||||||
- if: $CODE_QUALITY_DISABLED
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
|
- if: "$CI_COMMIT_TAG"
|
||||||
when: never
|
when: never
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
|
|
||||||
secret_detection:
|
|
||||||
rules:
|
|
||||||
- if: $SECRET_DETECTION_DISABLED
|
|
||||||
when: never
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
before_script:
|
|
||||||
- git config --global --add safe.directory $CI_PROJECT_DIR
|
|
||||||
|
|
||||||
semgrep-sast:
|
|
||||||
rules:
|
|
||||||
- if: $SAST_DISABLED
|
|
||||||
when: never
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
|
|
||||||
test_coverage:
|
|
||||||
extends: test
|
|
||||||
allow_failure: true
|
|
||||||
rules:
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
script:
|
|
||||||
- pip install pytest pytest-cov
|
|
||||||
- coverage run -m pytest main.py
|
|
||||||
- coverage report
|
|
||||||
- coverage xml
|
|
||||||
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
|
|
||||||
artifacts:
|
|
||||||
reports:
|
|
||||||
coverage_report:
|
|
||||||
coverage_format: cobertura
|
|
||||||
path: '**/coverage.xml'
|
|
||||||
|
|
||||||
container_scanning:
|
|
||||||
dependencies: [ build:docker ]
|
|
||||||
rules:
|
|
||||||
- if: $CONTAINER_SCANNING_DISABLED
|
|
||||||
when: never
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
|
|
||||||
gemnasium-python-dependency_scanning:
|
|
||||||
rules:
|
|
||||||
- if: $DEPENDENCY_SCANNING_DISABLED
|
|
||||||
when: never
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
|
|
||||||
.deploy:
|
|
||||||
rules:
|
|
||||||
- if: $CI_COMMIT_TAG
|
|
||||||
|
|
||||||
deploy:docker:
|
deploy:docker:
|
||||||
extends: .deploy
|
extends: ".deploy"
|
||||||
stage: deploy
|
stage: deploy
|
||||||
|
rules:
|
||||||
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
before_script:
|
before_script:
|
||||||
- echo "Building docker image for commit $CI_COMMIT_SHA with version $CI_BUILD_REF_NAME"
|
- echo "COMMIT=${CI_COMMIT_SHA}" >> version.env
|
||||||
|
- source version.env
|
||||||
|
- echo "Building docker image for commit ${COMMIT} with version ${VERSION}"
|
||||||
script:
|
script:
|
||||||
- echo "========== GitLab-Registry =========="
|
- echo "GitLab-Registry"
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||||
- IMAGE=$CI_REGISTRY/$CI_PROJECT_PATH/$CI_BUILD_REF_NAME
|
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION}
|
||||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_BUILD_REF_NAME
|
- docker build . --tag ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest
|
||||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest
|
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:${VERSION}
|
||||||
- docker push $IMAGE:$CI_BUILD_REF_NAME
|
- docker push ${CI_REGISTRY}/${CI_PROJECT_PATH}/${CI_BUILD_REF_NAME}:latest
|
||||||
- docker push $IMAGE:latest
|
- echo "Docker-Hub"
|
||||||
- echo "========== Docker-Hub =========="
|
|
||||||
- docker login -u $PUBLIC_REGISTRY_USER -p $PUBLIC_REGISTRY_TOKEN
|
- docker login -u $PUBLIC_REGISTRY_USER -p $PUBLIC_REGISTRY_TOKEN
|
||||||
- IMAGE=$PUBLIC_REGISTRY_USER/$CI_PROJECT_NAME
|
- docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION}
|
||||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:$CI_BUILD_REF_NAME
|
- docker build . --tag $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest
|
||||||
- docker build . --build-arg VERSION=$CI_BUILD_REF_NAME --build-arg COMMIT=$CI_COMMIT_SHA --tag $IMAGE:latest
|
- docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:${VERSION}
|
||||||
- docker push $IMAGE:$CI_BUILD_REF_NAME
|
- docker push $PUBLIC_REGISTRY_USER/${CI_PROJECT_NAME}:latest
|
||||||
- docker push $IMAGE:latest
|
|
||||||
|
|
||||||
deploy:apt:
|
deploy:apt:
|
||||||
# doc: https://git.collinwebdesigns.de/help/user/packages/debian_repository/index.md#install-a-package
|
extends: ".deploy"
|
||||||
extends: .deploy
|
|
||||||
image: debian:bookworm-slim
|
image: debian:bookworm-slim
|
||||||
stage: deploy
|
stage: deploy
|
||||||
|
rules:
|
||||||
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
needs:
|
needs:
|
||||||
- job: build:apt
|
- job: build:apt
|
||||||
artifacts: true
|
artifacts: true
|
||||||
before_script:
|
before_script:
|
||||||
- apt-get update -qq && apt-get install -qq -y curl lsb-release
|
- apt-get update -qq && apt-get install -qq -y curl lsb-release
|
||||||
# create distribution initial
|
|
||||||
- CODENAME=`lsb_release -cs`
|
- CODENAME=`lsb_release -cs`
|
||||||
# create repo if not exists
|
- 'if [ "`curl -s -o /dev/null -w "%{http_code}" --header "JOB-TOKEN: $CI_JOB_TOKEN"
|
||||||
- 'if [ "`curl -s -o /dev/null -w "%{http_code}" --header "JOB-TOKEN: $CI_JOB_TOKEN" -s ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/debian_distributions/${CODENAME}/key.asc`" != "200" ]; then curl --request POST --header "JOB-TOKEN: $CI_JOB_TOKEN" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/debian_distributions?codename=${CODENAME}"; fi'
|
-s ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/debian_distributions/${CODENAME}/key.asc`"
|
||||||
|
!= "200" ]; then curl --request POST --header "JOB-TOKEN: $CI_JOB_TOKEN" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/debian_distributions?codename=${CODENAME}";
|
||||||
|
fi'
|
||||||
script:
|
script:
|
||||||
# Naming format: <name>_<version>-<release>_<arch>.deb
|
- BUILD_NAME=build/build.deb
|
||||||
# Version is the version number of the app being packaged
|
|
||||||
# Release number is the version number of the *packaging* itself.
|
|
||||||
# The release number might increment if the package maintainer
|
|
||||||
# updated the packaging, while the version number of the application
|
|
||||||
# being packaged did not change.
|
|
||||||
- BUILD_NAME=build/build.deb # inherited by build-stage
|
|
||||||
- PACKAGE_NAME=`dpkg -I ${BUILD_NAME} | grep "Package:" | awk '{ print $2 }'`
|
- PACKAGE_NAME=`dpkg -I ${BUILD_NAME} | grep "Package:" | awk '{ print $2 }'`
|
||||||
- PACKAGE_VERSION=`dpkg -I ${BUILD_NAME} | grep "Version:" | awk '{ print $2 }'`
|
- PACKAGE_VERSION=`dpkg -I ${BUILD_NAME} | grep "Version:" | awk '{ print $2 }'`
|
||||||
- PACKAGE_ARCH=amd64
|
- PACKAGE_ARCH=amd64
|
||||||
#- EXPORT_NAME="${PACKAGE_NAME}_${PACKAGE_VERSION}-0_${PACKAGE_ARCH}.deb"
|
|
||||||
- EXPORT_NAME="${PACKAGE_NAME}_${PACKAGE_VERSION}_${PACKAGE_ARCH}.deb"
|
- EXPORT_NAME="${PACKAGE_NAME}_${PACKAGE_VERSION}_${PACKAGE_ARCH}.deb"
|
||||||
- mv ${BUILD_NAME} ${EXPORT_NAME}
|
- mv ${BUILD_NAME} ${EXPORT_NAME}
|
||||||
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
||||||
- 'echo "PACKAGE_VERSION: ${PACKAGE_VERSION}"'
|
- 'echo "PACKAGE_VERSION: ${PACKAGE_VERSION}"'
|
||||||
- 'echo "PACKAGE_ARCH: ${PACKAGE_ARCH}"'
|
- 'echo "PACKAGE_ARCH: ${PACKAGE_ARCH}"'
|
||||||
- 'echo "EXPORT_NAME: ${EXPORT_NAME}"'
|
- 'echo "EXPORT_NAME: ${EXPORT_NAME}"'
|
||||||
# https://docs.gitlab.com/14.3/ee/user/packages/debian_repository/index.html
|
|
||||||
- URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/debian/${EXPORT_NAME}"
|
- URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/debian/${EXPORT_NAME}"
|
||||||
- 'echo "URL: ${URL}"'
|
- 'echo "URL: ${URL}"'
|
||||||
#- 'curl --request PUT --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file ${EXPORT_NAME} ${URL}'
|
|
||||||
# using generic-package-registry until debian-registry is GA
|
|
||||||
# https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-generic-package-by-using-cicd
|
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file ${EXPORT_NAME} "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}/${EXPORT_NAME}"'
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file ${EXPORT_NAME} "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}/${EXPORT_NAME}"'
|
||||||
|
|
||||||
deploy:pacman:
|
deploy:pacman:
|
||||||
extends: .deploy
|
extends: ".deploy"
|
||||||
image: archlinux:base-devel
|
image: archlinux:base-devel
|
||||||
stage: deploy
|
stage: deploy
|
||||||
|
rules:
|
||||||
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
needs:
|
needs:
|
||||||
- job: build:pacman
|
- job: build:pacman
|
||||||
artifacts: true
|
artifacts: true
|
||||||
script:
|
script:
|
||||||
- source .PKGBUILD/PKGBUILD
|
- source .PKGBUILD/PKGBUILD
|
||||||
- source version.env
|
- source version.env
|
||||||
# fastapi-dls-1.0-1-any.pkg.tar.zst
|
- BUILD_NAME=${pkgname}-${VERSION}-${pkgrel}-any.pkg.tar.zst
|
||||||
- BUILD_NAME=${pkgname}-${CI_BUILD_REF_NAME}-${pkgrel}-any.pkg.tar.zst
|
|
||||||
- PACKAGE_NAME=${pkgname}
|
- PACKAGE_NAME=${pkgname}
|
||||||
- PACKAGE_VERSION=${CI_BUILD_REF_NAME}
|
- PACKAGE_VERSION=${VERSION}
|
||||||
- PACKAGE_ARCH=any
|
- PACKAGE_ARCH=any
|
||||||
- EXPORT_NAME=${BUILD_NAME}
|
- EXPORT_NAME=${BUILD_NAME}
|
||||||
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
- 'echo "PACKAGE_NAME: ${PACKAGE_NAME}"'
|
||||||
@ -343,23 +237,28 @@ deploy:pacman:
|
|||||||
- 'echo "PACKAGE_ARCH: ${PACKAGE_ARCH}"'
|
- 'echo "PACKAGE_ARCH: ${PACKAGE_ARCH}"'
|
||||||
- 'echo "EXPORT_NAME: ${EXPORT_NAME}"'
|
- 'echo "EXPORT_NAME: ${EXPORT_NAME}"'
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file ${EXPORT_NAME} "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}/${EXPORT_NAME}"'
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file ${EXPORT_NAME} "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}/${EXPORT_NAME}"'
|
||||||
|
|
||||||
release:
|
release:
|
||||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||||
stage: .post
|
stage: ".post"
|
||||||
needs: [ test ]
|
needs:
|
||||||
|
- job: test
|
||||||
|
artifacts: true
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_TAG
|
- if: "$CI_COMMIT_TAG"
|
||||||
|
when: never
|
||||||
|
- if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
|
||||||
script:
|
script:
|
||||||
- echo "Running release-job for $CI_COMMIT_TAG"
|
- echo "Running release-job for $VERSION"
|
||||||
release:
|
release:
|
||||||
name: $CI_PROJECT_TITLE $CI_COMMIT_TAG
|
name: "$CI_PROJECT_TITLE $VERSION"
|
||||||
description: Release of $CI_PROJECT_TITLE version $CI_COMMIT_TAG
|
description: Release of $CI_PROJECT_TITLE version $VERSION
|
||||||
tag_name: $CI_COMMIT_TAG
|
tag_name: "$VERSION"
|
||||||
ref: $CI_COMMIT_SHA
|
ref: "$CI_COMMIT_SHA"
|
||||||
assets:
|
assets:
|
||||||
links:
|
links:
|
||||||
- name: 'Package Registry'
|
- name: Package Registry
|
||||||
url: 'https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/packages'
|
url: https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/packages
|
||||||
- name: 'Container Registry'
|
- name: Container Registry
|
||||||
url: 'https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry/40'
|
url: https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry/40
|
||||||
|
include:
|
||||||
|
- template: Auto-DevOps.gitlab-ci.yml
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
FROM python:3.11-alpine
|
FROM python:3.11-alpine
|
||||||
|
|
||||||
ARG VERSION
|
|
||||||
ARG COMMIT=""
|
|
||||||
RUN echo -e "VERSION=$VERSION\nCOMMIT=$COMMIT" > /version.env
|
|
||||||
|
|
||||||
COPY requirements.txt /tmp/requirements.txt
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
|
|
||||||
RUN apk update \
|
RUN apk update \
|
||||||
@ -15,6 +11,7 @@ RUN apk update \
|
|||||||
&& apk del build-deps
|
&& apk del build-deps
|
||||||
|
|
||||||
COPY app /app
|
COPY app /app
|
||||||
|
COPY version.env /version.env
|
||||||
COPY README.md /README.md
|
COPY README.md /README.md
|
||||||
|
|
||||||
HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=3 CMD curl --insecure --fail https://localhost/-/health || exit 1
|
HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=3 CMD curl --insecure --fail https://localhost/-/health || exit 1
|
||||||
|
64
README.md
64
README.md
@ -9,9 +9,9 @@ Only the clients need a connection to this service on configured port.
|
|||||||
|
|
||||||
**Official Links**
|
**Official Links**
|
||||||
|
|
||||||
- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls (Private Git)
|
- https://git.collinwebdesigns.de/oscar.krause/fastapi-dls
|
||||||
- https://gitea.publichub.eu/oscar.krause/fastapi-dls (Public Git)
|
- https://gitea.publichub.eu/oscar.krause/fastapi-dls
|
||||||
- https://hub.docker.com/r/collinwebdesigns/fastapi-dls (Docker-Hub `collinwebdesigns/fastapi-dls:latest`)
|
- Docker Image `collinwebdesigns/fastapi-dls:latest`
|
||||||
|
|
||||||
*All other repositories are forks! (which is no bad - just for information and bug reports)*
|
*All other repositories are forks! (which is no bad - just for information and bug reports)*
|
||||||
|
|
||||||
@ -39,8 +39,6 @@ Docker-Images are available here:
|
|||||||
- [Docker-Hub](https://hub.docker.com/repository/docker/collinwebdesigns/fastapi-dls): `collinwebdesigns/fastapi-dls:latest`
|
- [Docker-Hub](https://hub.docker.com/repository/docker/collinwebdesigns/fastapi-dls): `collinwebdesigns/fastapi-dls:latest`
|
||||||
- [GitLab-Registry](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry): `registry.git.collinwebdesigns.de/oscar.krause/fastapi-dls/main:latest`
|
- [GitLab-Registry](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/container_registry): `registry.git.collinwebdesigns.de/oscar.krause/fastapi-dls/main:latest`
|
||||||
|
|
||||||
The images include database drivers for `postgres`, `mysql`, `mariadb` and `sqlite`.
|
|
||||||
|
|
||||||
**Run this on the Docker-Host**
|
**Run this on the Docker-Host**
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@ -145,9 +143,9 @@ This is only to test whether the service starts successfully.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
cd /opt/fastapi-dls/app
|
cd /opt/fastapi-dls/app
|
||||||
sudo -u www-data /opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app
|
|
||||||
# or
|
|
||||||
su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
su - www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
||||||
|
# or
|
||||||
|
sudo -u www-data -c "/opt/fastapi-dls/venv/bin/uvicorn main:app --app-dir=/opt/fastapi-dls/app"
|
||||||
```
|
```
|
||||||
|
|
||||||
**Create config file**
|
**Create config file**
|
||||||
@ -247,8 +245,6 @@ This is only to test whether the service starts successfully.
|
|||||||
BASE_DIR=/opt/fastapi-dls
|
BASE_DIR=/opt/fastapi-dls
|
||||||
SERVICE_USER=dls
|
SERVICE_USER=dls
|
||||||
cd ${BASE_DIR}
|
cd ${BASE_DIR}
|
||||||
sudo -u ${SERVICE_USER} ${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app
|
|
||||||
# or
|
|
||||||
su - ${SERVICE_USER} -c "${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app"
|
su - ${SERVICE_USER} -c "${BASE_DIR}/venv/bin/uvicorn main:app --app-dir=${BASE_DIR}/app"
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -354,19 +350,6 @@ pacman -U --noconfirm fastapi-dls.pkg.tar.zst
|
|||||||
|
|
||||||
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
Start with `systemctl start fastapi-dls.service` and enable autostart with `systemctl enable fastapi-dls.service`.
|
||||||
|
|
||||||
## unRAID
|
|
||||||
|
|
||||||
1. Download [this xml file](.UNRAID/FastAPI-DLS.xml)
|
|
||||||
2. Put it in /boot/config/plugins/dockerMan/templates-user/
|
|
||||||
3. Go to Docker page, scroll down to `Add Container`, click on Template list and choose `FastAPI-DLS`
|
|
||||||
4. Open terminal/ssh, follow the instructions in overview description
|
|
||||||
5. Setup your container `IP`, `Port`, `DLS_URL` and `DLS_PORT`
|
|
||||||
6. Apply and let it boot up
|
|
||||||
|
|
||||||
*Unraid users must also make sure they have Host access to custom networks enabled if unraid is the vgpu guest*.
|
|
||||||
|
|
||||||
Continue [here](#unraid-guest) for docker guest setup.
|
|
||||||
|
|
||||||
## Let's Encrypt Certificate (optional)
|
## Let's Encrypt Certificate (optional)
|
||||||
|
|
||||||
If you're using installation via docker, you can use `traefik`. Please refer to their documentation.
|
If you're using installation via docker, you can use `traefik`. Please refer to their documentation.
|
||||||
@ -405,9 +388,9 @@ After first success you have to replace `--issue` with `--renew`.
|
|||||||
every 4.8 hours. If network connectivity is lost, the loss of connectivity is detected during license renewal and the
|
every 4.8 hours. If network connectivity is lost, the loss of connectivity is detected during license renewal and the
|
||||||
client has 19.2 hours in which to re-establish connectivity before its license expires.
|
client has 19.2 hours in which to re-establish connectivity before its license expires.
|
||||||
|
|
||||||
\*3 Always use `https`, since guest-drivers only support secure connections!
|
\*2 Always use `https`, since guest-drivers only support secure connections!
|
||||||
|
|
||||||
\*4 If you recreate instance keys you need to **recreate client-token for each guest**!
|
\*3 If you recreate instance keys you need to **recreate client-token for each guest**!
|
||||||
|
|
||||||
# Setup (Client)
|
# Setup (Client)
|
||||||
|
|
||||||
@ -415,15 +398,9 @@ client has 19.2 hours in which to re-establish connectivity before its license e
|
|||||||
|
|
||||||
Successfully tested with this package versions:
|
Successfully tested with this package versions:
|
||||||
|
|
||||||
| vGPU Suftware | vGPU Manager | Linux Driver | Windows Driver | Release Date |
|
- `14.3` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `513.91`)
|
||||||
|---------------|--------------|--------------|----------------|---------------|
|
- `14.4` (Linux-Host: `510.108.03`, Linux-Guest: `510.108.03`, Windows-Guest: `514.08`)
|
||||||
| `15.2` | `525.105.14` | `525.105.17` | `528.89` | March 2023 |
|
- `15.0` (Linux-Host: `525.60.12`, Linux-Guest: `525.60.13`, Windows-Guest: `527.41`)
|
||||||
| `15.1` | `525.85.07` | `525.85.05` | `528.24` | January 2023 |
|
|
||||||
| `15.0` | `525.60.12` | `525.60.13` | `527.41` | December 2022 |
|
|
||||||
| `14.4` | `510.108.03` | `510.108.03` | `514.08` | December 2022 |
|
|
||||||
| `14.3` | `510.108.03` | `510.108.03` | `513.91` | November 2022 |
|
|
||||||
|
|
||||||
- https://docs.nvidia.com/grid/index.html
|
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
@ -475,7 +452,7 @@ Restart-Service NVDisplay.ContainerLocalSystem
|
|||||||
Check licensing status:
|
Check licensing status:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
& 'nvidia-smi' -q | Select-String "License"
|
& 'C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe' -q | Select-String "License"
|
||||||
```
|
```
|
||||||
|
|
||||||
Output should be something like:
|
Output should be something like:
|
||||||
@ -487,19 +464,6 @@ vGPU Software Licensed Product
|
|||||||
|
|
||||||
Done. For more information check [troubleshoot section](#troubleshoot).
|
Done. For more information check [troubleshoot section](#troubleshoot).
|
||||||
|
|
||||||
## unRAID Guest
|
|
||||||
|
|
||||||
1. Make sure you create a folder in a linux filesystem (BTRFS/XFS/EXT4...), I recommend `/mnt/user/system/nvidia` (this is where docker and libvirt preferences are saved, so it's a good place to have that)
|
|
||||||
2. Edit the script to put your `DLS_IP`, `DLS_PORT` and `TOKEN_PATH`, properly
|
|
||||||
3. Install `User Scripts` plugin from *Community Apps* (the Apps page, or google User Scripts Unraid if you're not using CA)
|
|
||||||
4. Go to `Settings > Users Scripts > Add New Script`
|
|
||||||
5. Give it a name (the name must not contain spaces preferably)
|
|
||||||
6. Click on the *gear icon* to the left of the script name then edit script
|
|
||||||
7. Paste the script and save
|
|
||||||
8. Set schedule to `At First Array Start Only`
|
|
||||||
9. Click on Apply
|
|
||||||
|
|
||||||
|
|
||||||
# Endpoints
|
# Endpoints
|
||||||
|
|
||||||
### `GET /`
|
### `GET /`
|
||||||
@ -702,8 +666,4 @@ The error message can safely be ignored (since we have no license limitation :P)
|
|||||||
|
|
||||||
Thanks to vGPU community and all who uses this project and report bugs.
|
Thanks to vGPU community and all who uses this project and report bugs.
|
||||||
|
|
||||||
Special thanks to
|
Special thanks to @samicrusader who created build file for ArchLinux and @cyrus who wrote the section for openSUSE.
|
||||||
|
|
||||||
- @samicrusader who created build file for ArchLinux
|
|
||||||
- @cyrus who wrote the section for openSUSE
|
|
||||||
- @midi who wrote the section for unRAID
|
|
||||||
|
27
ROADMAP.md
27
ROADMAP.md
@ -1,27 +0,0 @@
|
|||||||
# Roadmap
|
|
||||||
|
|
||||||
I am planning to implement the following features in the future.
|
|
||||||
|
|
||||||
|
|
||||||
## HA - High Availability
|
|
||||||
|
|
||||||
Support Failover-Mode (secondary ip address) as in official DLS.
|
|
||||||
|
|
||||||
**Note**: There is no Load-Balancing / Round-Robin HA Mode supported! If you want to use that, consider to use
|
|
||||||
Docker-Swarm with shared/cluster database (e.g. postgres).
|
|
||||||
|
|
||||||
*See [ha branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ha) for current status.*
|
|
||||||
|
|
||||||
|
|
||||||
## UI - User Interface
|
|
||||||
|
|
||||||
Add a user interface to manage origins and leases.
|
|
||||||
|
|
||||||
*See [ui branch](https://git.collinwebdesigns.de/oscar.krause/fastapi-dls/-/tree/ui) for current status.*
|
|
||||||
|
|
||||||
|
|
||||||
## Config Database
|
|
||||||
|
|
||||||
Instead of using environment variables, configuration files and manually create certificates, store configs and
|
|
||||||
certificates in database (like origins and leases). Also, there should be provided a startup assistant to prefill
|
|
||||||
required attributes and create instance-certificates. This is more user-friendly and should improve fist setup.
|
|
@ -82,7 +82,7 @@ async def _index():
|
|||||||
|
|
||||||
|
|
||||||
@app.get('/-/health', summary='* Health')
|
@app.get('/-/health', summary='* Health')
|
||||||
async def _health():
|
async def _health(request: Request):
|
||||||
return JSONr({'status': 'up'})
|
return JSONr({'status': 'up'})
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
fastapi==0.95.1
|
fastapi==0.89.1
|
||||||
uvicorn[standard]==0.22.0
|
uvicorn[standard]==0.20.0
|
||||||
python-jose==3.3.0
|
python-jose==3.3.0
|
||||||
pycryptodome==3.17
|
pycryptodome==3.17
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
sqlalchemy==2.0.12
|
sqlalchemy==2.0.0
|
||||||
markdown==3.4.3
|
markdown==3.4.1
|
||||||
python-dotenv==1.0.0
|
python-dotenv==0.21.1
|
||||||
|
1
version.env
Normal file
1
version.env
Normal file
@ -0,0 +1 @@
|
|||||||
|
VERSION=1.3.5
|
Loading…
Reference in New Issue
Block a user