diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 3647122..35778e3 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -28,7 +28,6 @@ jobs: python: - "3.10" pytorch: - - "2.1.1" - "2.1.2" - "2.2.0" steps: @@ -100,7 +99,6 @@ jobs: python: - "3.10" pytorch: - - "2.1.1" - "2.1.2" - "2.2.0" cuda: diff --git a/README.md b/README.md index 6ccc551..cd45c19 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ You can also [build from source](#building-images) by editing `.env` and running Supported Python versions: `3.10` -Supported Pytorch versions: `2.2.0`, `2.1.2` `2.1.1` +Supported Pytorch versions: `2.2.0`, `2.1.2` Supported Platforms: `NVIDIA CUDA`, `AMD ROCm`, `CPU` diff --git a/config/provisioning/default.sh b/config/provisioning/default.sh index b14c155..a672eb6 100755 --- a/config/provisioning/default.sh +++ b/config/provisioning/default.sh @@ -1,4 +1,4 @@ -#!/bin/false +#!/bin/bash # This file will be sourced in init.sh # Namespace functions with provisioning_ @@ -33,8 +33,8 @@ EXTENSIONS=( CHECKPOINT_MODELS=( "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt" #"https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt" - "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors" - "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors" + #"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors" + #"https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors" ) LORA_MODELS=( diff --git a/docker-compose.yaml b/docker-compose.yaml index 3e1eb5b..3ee5fae 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,14 +2,15 @@ version: "3.8" # Compose file build variables set in .env services: supervisor: + platform: linux/amd64 build: context: ./build args: - IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.1.1-py3.10-cuda-11.8.0-runtime-22.04} + IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.2.1-py3.10-cuda-11.8.0-runtime-22.04} tags: - - "ghcr.io/ai-dock/stable-diffusion-webui:${IMAGE_TAG:-jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-runtime-22.04}" + - "ghcr.io/ai-dock/stable-diffusion-webui:${IMAGE_TAG:-jupyter-pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04}" - image: ghcr.io/ai-dock/stable-diffusion-webui:${IMAGE_TAG:-jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-runtime-22.04} + image: ghcr.io/ai-dock/stable-diffusion-webui:${IMAGE_TAG:-jupyter-pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04} devices: - "/dev/dri:/dev/dri" @@ -53,6 +54,7 @@ services: - SSH_PORT_LOCAL=${SSH_PORT_LOCAL:-22} - SERVICEPORTAL_PORT_HOST=${SERVICEPORTAL_PORT_HOST:-1111} - SERVICEPORTAL_METRICS_PORT=${SERVICEPORTAL_METRICS_PORT:-21111} + - WEBUI_BRANCH=${WEBUI_BRANCH:-} - WEBUI_FLAGS=${WEBUI_FLAGS:-} - WEBUI_PORT_HOST=${WEBUI_PORT_HOST:-7860} - WEBUI_PORT_LOCAL=${WEBUI_PORT_LOCAL:-17860} @@ -60,4 +62,4 @@ services: - JUPYTER_PORT_HOST=${JUPYTER_PORT_HOST:-8888} - JUPYTER_METRICS_PORT=${JUPYTER_METRICS_PORT:-28888} - SERVERLESS=${SERVERLESS:-false} - # - PROVISIONING_SCRIPT=${PROVISIONING_SCRIPT:-} + #- PROVISIONING_SCRIPT=${PROVISIONING_SCRIPT:-}