diff --git a/.github/workflows/backend_build_darwin.yml b/.github/workflows/backend_build_darwin.yml index e6a2b4d388e4..5ec2fa4a1153 100644 --- a/.github/workflows/backend_build_darwin.yml +++ b/.github/workflows/backend_build_darwin.yml @@ -12,6 +12,10 @@ on: description: 'Build type (e.g., mps)' default: '' type: string + use-pip: + description: 'Use pip to install dependencies' + default: false + type: boolean go-version: description: 'Go version to use' default: '1.24.x' @@ -63,7 +67,7 @@ jobs: - name: Build ${{ inputs.backend }}-darwin run: | make protogen-go - BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} make build-darwin-python-backend + BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-python-backend - name: Upload ${{ inputs.backend }}.tar uses: actions/upload-artifact@v4 diff --git a/Makefile b/Makefile index a491a26cf938..e9197fd3b0d3 100644 --- a/Makefile +++ b/Makefile @@ -369,6 +369,10 @@ backends/mlx: BACKEND=mlx BUILD_TYPE=mps $(MAKE) build-darwin-python-backend ./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)" +backends/diffuser-darwin: + USE_PIP=true BACKEND=diffusers BUILD_TYPE=mps $(MAKE) build-darwin-python-backend + ./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)" + backend-images: mkdir -p backend-images diff --git a/backend/index.yaml b/backend/index.yaml index 3fed08f275d4..dc8036e1cc93 100644 --- a/backend/index.yaml +++ b/backend/index.yaml @@ -184,6 +184,7 @@ intel: "intel-diffusers" amd: "rocm-diffusers" nvidia-l4t: "nvidia-l4t-diffusers" + metal: "metal-diffusers" - &exllama2 name: "exllama2" urls: @@ -875,6 +876,7 @@ intel: "intel-diffusers-development" amd: "rocm-diffusers-development" nvidia-l4t: "nvidia-l4t-diffusers-development" + metal: "metal-diffusers-development" - !!merge <<: *diffusers name: "nvidia-l4t-diffusers" uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-diffusers" @@ -925,6 +927,16 @@ uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-diffusers" mirrors: - localai/localai-backends:master-gpu-intel-diffusers +- !!merge <<: *diffusers + name: "metal-diffusers" + uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-diffusers" + mirrors: + - localai/localai-backends:latest-metal-darwin-arm64-diffusers +- !!merge <<: *diffusers + name: "metal-diffusers-development" + uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers" + mirrors: + - localai/localai-backends:master-metal-darwin-arm64-diffusers ## exllama2 - !!merge <<: *exllama2 name: "exllama2-development" diff --git a/backend/python/diffusers/requirements-mps.txt b/backend/python/diffusers/requirements-mps.txt new file mode 100644 index 000000000000..77dd54b73a34 --- /dev/null +++ b/backend/python/diffusers/requirements-mps.txt @@ -0,0 +1,10 @@ +torch==2.7.1 +torchvision==0.22.1 +git+https://github.com/huggingface/diffusers +opencv-python +transformers +accelerate +compel +peft +sentencepiece +optimum-quanto \ No newline at end of file diff --git a/backend/python/diffusers/run.sh b/backend/python/diffusers/run.sh index ee730f21f5a9..74367c99f332 100755 --- a/backend/python/diffusers/run.sh +++ b/backend/python/diffusers/run.sh @@ -12,4 +12,6 @@ if [ -d "/opt/intel" ]; then export XPU=1 fi +export PYTORCH_ENABLE_MPS_FALLBACK=1 + startBackend $@