generated from AIKU-Official/aiku-23-2-project-guideline
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference.py
28 lines (23 loc) · 830 Bytes
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import PIL
import requests
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
from diffusers.utils import load_image
import os
#os.environ["HF_HOME"] = "/d1/hyomin/.cache3"
model = "pwnhyo/instruct-pix2pix-model"
image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_1.png")
pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(model, torch_dtype=torch.float16).to("cuda")
pipeline.safety_checker = None
generator = torch.Generator("cuda").manual_seed(422)
prompt = "Add a santa hat"
edited_image = pipeline(
prompt,
image=image,
num_inference_steps=20,
image_guidance_scale=1.5,
guidance_scale=10,
generator=generator,
).images[0]
edited_image.save("edited_image_test11.png")
#CUDA_VISIBLE_DEVICES=1 python inference.py