chuanli11 commited on
Commit
b5794ad
1 Parent(s): 1909dbb

Use onnx as backend for CPU inference

Browse files
Files changed (2) hide show
  1. app.py +9 -2
  2. requirements.txt +7 -6
app.py CHANGED
@@ -2,14 +2,21 @@ from contextlib import nullcontext
2
  import gradio as gr
3
  import torch
4
  from torch import autocast
5
- from diffusers import StableDiffusionPipeline
6
 
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
  context = autocast if device == "cuda" else nullcontext
10
  dtype = torch.float16 if device == "cuda" else torch.float32
11
 
12
- pipe = StableDiffusionPipeline.from_pretrained("lambdalabs/sd-pokemon-diffusers", torch_dtype=dtype)
 
 
 
 
 
 
 
13
  pipe = pipe.to(device)
14
 
15
 
 
2
  import gradio as gr
3
  import torch
4
  from torch import autocast
5
+ from diffusers import StableDiffusionPipeline, StableDiffusionOnnxPipeline
6
 
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
  context = autocast if device == "cuda" else nullcontext
10
  dtype = torch.float16 if device == "cuda" else torch.float32
11
 
12
+ if device == "cuda":
13
+ pipe = StableDiffusionPipeline.from_pretrained("lambdalabs/sd-pokemon-diffusers", torch_dtype=dtype)
14
+ else:
15
+ pipe = StableDiffusionOnnxPipeline.from_pretrained(
16
+ "lambdalabs/sd-pokemon-diffusers",
17
+ revision="onnx",
18
+ provider="CPUExecutionProvider"
19
+ )
20
  pipe = pipe.to(device)
21
 
22
 
requirements.txt CHANGED
@@ -1,7 +1,8 @@
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
- torch
3
- diffusers
4
- transformers
5
- scipy
6
- ftfy
7
- datasets
 
 
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch==1.12.1+cu113
3
+ diffusers==0.3.0
4
+ transformers==4.22.2
5
+ scipy==1.9.1
6
+ ftfy==6.1.1
7
+ datasets==2.5.1
8
+ onnxruntime==1.12.1