模型简介
模型特点
模型能力
使用案例
标签:
- 文本到图像
- 稳定扩散
语言:
- 英文 库名称: diffusers
IP-Adapter-FaceID 模型卡片
项目页面 | 论文 (ArXiv) | 代码
简介
IP-Adapter-FaceID 的实验版本:我们使用来自人脸识别模型的面部 ID 嵌入代替 CLIP 图像嵌入,此外,我们使用 LoRA 来提高 ID 一致性。IP-Adapter-FaceID 可以根据一张人脸仅通过文本提示生成各种风格的图像。
2023/12/27 更新:
IP-Adapter-FaceID-Plus:面部 ID 嵌入(用于面部 ID)+ CLIP 图像嵌入(用于面部结构)
2023/12/28 更新:
IP-Adapter-FaceID-PlusV2:面部 ID 嵌入(用于面部 ID)+ 可控 CLIP 图像嵌入(用于面部结构)
您可以调整面部结构的权重以获得不同的生成效果!
2024/01/04 更新:
IP-Adapter-FaceID-SDXL:IP-Adapter-FaceID 的实验性 SDXL 版本
2024/01/17 更新:
IP-Adapter-FaceID-PlusV2-SDXL:IP-Adapter-FaceID-PlusV2 的实验性 SDXL 版本
2024/01/19 更新:
IP-Adapter-FaceID-Portrait:与 IP-Adapter-FaceID 相同,但用于肖像生成(无需 LoRA!无需 ControlNet!)。特别地,它接受多张面部图像以增强相似性(默认为 5 张)。
使用方法
IP-Adapter-FaceID
首先,您应使用 insightface 提取面部 ID 嵌入:
import cv2
from insightface.app import FaceAnalysis
import torch
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image = cv2.imread("person.jpg")
faces = app.get(image)
faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
然后,您可以根据面部嵌入生成图像:
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from PIL import Image
from ip_adapter.ip_adapter_faceid import IPAdapterFaceID
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
vae_model_path = "stabilityai/sd-vae-ft-mse"
ip_ckpt = "ip-adapter-faceid_sd15.bin"
device = "cuda"
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
scheduler=noise_scheduler,
vae=vae,
feature_extractor=None,
safety_checker=None
)
# 加载 ip-adapter
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
# 生成图像
prompt = "花园中穿红裙子的女性照片"
negative_prompt = "单色、低分辨率、解剖结构不良、最差质量、低质量、模糊"
images = ip_model.generate(
prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=faceid_embeds, num_samples=4, width=512, height=768, num_inference_steps=30, seed=2023
)
您也可以使用普通的 IP-Adapter 和普通的 LoRA 加载模型:
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from PIL import Image
from ip_adapter.ip_adapter_faceid_separate import IPAdapterFaceID
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
vae_model_path = "stabilityai/sd-vae-ft-mse"
ip_ckpt = "ip-adapter-faceid_sd15.bin"
lora_ckpt = "ip-adapter-faceid_sd15_lora.safetensors"
device = "cuda"
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
scheduler=noise_scheduler,
vae=vae,
feature_extractor=None,
safety_checker=None
)
# 加载 lora 并融合
pipe.load_lora_weights(lora_ckpt)
pipe.fuse_lora()
# 加载 ip-adapter
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
# 生成图像
prompt = "花园中穿红裙子的女性照片"
negative_prompt = "单色、低分辨率、解剖结构不良、最差质量、低质量、模糊"
images = ip_model.generate(
prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=faceid_embeds, num_samples=4, width=512, height=768, num_inference_steps=30, seed=2023
)
IP-Adapter-FaceID-SDXL
首先,您应使用 insightface 提取面部 ID 嵌入:
import cv2
from insightface.app import FaceAnalysis
import torch
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image = cv2.imread("person.jpg")
faces = app.get(image)
faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
然后,您可以根据面部嵌入生成图像:
import torch
from diffusers import StableDiffusionXLPipeline, DDIMScheduler
from PIL import Image
from ip_adapter.ip_adapter_faceid import IPAdapterFaceIDXL
base_model_path = "SG161222/RealVisXL_V3.0"
ip_ckpt = "ip-adapter-faceid_sdxl.bin"
device = "cuda"
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
pipe = StableDiffusionXLPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
scheduler=noise_scheduler,
add_watermarker=False,
)
# 加载 ip-adapter
ip_model = IPAdapterFaceIDXL(pipe, ip_ckpt, device)
# 生成图像
prompt = "花园中穿白裙子的美丽亚洲少女特写,戴着银色小耳环,在柔和的晨光下"
negative_prompt = "单色、低分辨率、解剖结构不良、最差质量、低质量、模糊"
images = ip_model.generate(
prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=faceid_embeds, num_samples=2,
width=1024, height=1024,
num_inference_steps=30, guidance_scale=7.5, seed=2023
)
IP-Adapter-FaceID-Plus
首先,您应使用 insightface 提取面部 ID 嵌入和面部图像:
import cv2
from insightface.app import FaceAnalysis
from insightface.utils import face_align
import torch
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image = cv2.imread("person.jpg")
faces = app.get(image)
faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
face_image = face_align.norm_crop(image, landmark=faces[0].kps, image_size=224) # 您也可以分割面部
然后,您可以根据面部嵌入生成图像:
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from PIL import Image
from ip_adapter.ip_adapter_faceid import IPAdapterFaceIDPlus
v2 = False
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
vae_model_path = "stabilityai/sd-vae-ft-mse"
image_encoder_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
ip_ckpt = "ip-adapter-faceid-plus_sd15.bin" if not v2 else "ip-adapter-faceid-plusv2_sd15.bin"
device = "cuda"
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
scheduler=noise_scheduler,
vae=vae,
feature_extractor=None,
safety_checker=None
)
# 加载 ip-adapter
ip_model = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_ckpt, device)
# 生成图像
prompt = "花园中穿红裙子的女性照片"
negative_prompt = "单色、低分辨率、解剖结构不良、最差质量、低质量、模糊"
images = ip_model.generate(
prompt=prompt, negative_prompt=negative_prompt, face_image=face_image, faceid_embeds=faceid_embeds, shortcut=v2, s_scale=1.0,
num_samples=4, width=512, height=768, num_inference_steps=30, seed=2023
)
IP-Adapter-FaceID-Portrait
import cv2
from insightface.app import FaceAnalysis
import torch
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
images = ["1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg"]
faceid_embeds = []
for image in images:
image = cv2.imread("person.jpg")
faces = app.get(image)
faceid_embeds.append(torch.from_numpy(faces[0].normed_embedding).unsqueeze(0).unsqueeze(0))
faceid_embeds = torch.cat(faceid_embeds, dim=1)
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from PIL import Image
from ip_adapter.ip_adapter_faceid_separate import IPAdapterFaceID
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
vae_model_path = "stabilityai/sd-vae-ft-mse"
ip_ckpt = "ip-adapter-faceid-portrait_sd15.bin"
device = "cuda"
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
scheduler=noise_scheduler,
vae=vae,
feature_extractor=None,
safety_checker=None
)
# 加载 ip-adapter
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device, num_tokens=16, n_cond=5)
# 生成图像
prompt = "花园中穿红裙子的女性照片"
negative_prompt = "单色、低分辨率、解剖结构不良、最差质量、低质量、模糊"
images = ip_model.generate(
prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=faceid_embeds, num_samples=4, width=512, height=512, num_inference_steps=30, seed=2023
)

