| import cv2 |
| import numpy as np |
|
|
| import diffusers |
| from diffusers.models import ControlNetModel |
| from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel |
| from diffusers.utils import load_image |
|
|
| import torch |
| import torch.nn.functional as F |
| from torchvision.transforms import Compose |
| from style_template import styles |
|
|
| from PIL import Image |
|
|
| from depth_anything.dpt import DepthAnything |
| from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet |
|
|
| from insightface.app import FaceAnalysis |
| from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps |
| from controlnet_aux import OpenposeDetector |
|
|
|
|
| STYLE_NAMES = list(styles.keys()) |
| DEFAULT_STYLE_NAME = "Mars" |
|
|
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| if device.type != 'cuda': |
| raise ValueError("Se requiere ejecutar en GPU") |
|
|
| dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32 |
|
|
| class EndpointHandler(): |
| def __init__(self, model_dir): |
| |
| print("Loading FaceAnalysis", model_dir) |
|
|
| |
| |
| |
| |
| |
| |
| self.app = FaceAnalysis( |
| name="buffalo_l", |
| root="./", |
| providers=["CPUExecutionProvider"], |
| ) |
| |
| self.app.prepare(ctx_id=0, det_size=(640, 640)) |
| |
| openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") |
| depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval() |
|
|
| transform = Compose([ |
| Resize( |
| width=518, |
| height=518, |
| resize_target=False, |
| keep_aspect_ratio=True, |
| ensure_multiple_of=14, |
| resize_method='lower_bound', |
| image_interpolation_method=cv2.INTER_CUBIC, |
| ), |
| NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), |
| PrepareForNet(), |
| ]) |
|
|
| face_adapter = f"/repository/checkpoints/ip-adapter.bin" |
| controlnet_path = f"/repository/checkpoints/ControlNetModel" |
|
|
| self.controlnet_identitynet = ControlNetModel.from_pretrained( |
| controlnet_path, torch_dtype=dtype |
| ) |
| |
| controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0" |
| controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0" |
| controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small" |
|
|
| controlnet_pose = ControlNetModel.from_pretrained( |
| controlnet_pose_model, torch_dtype=dtype |
| ).to(device) |
| controlnet_canny = ControlNetModel.from_pretrained( |
| controlnet_canny_model, torch_dtype=dtype |
| ).to(device) |
| controlnet_depth = ControlNetModel.from_pretrained( |
| controlnet_depth_model, torch_dtype=dtype |
| ).to(device) |
|
|
| def get_depth_map(image): |
| |
| image = np.array(image) / 255.0 |
|
|
| h, w = image.shape[:2] |
|
|
| image = transform({'image': image})['image'] |
| image = torch.from_numpy(image).unsqueeze(0).to("cuda") |
|
|
| with torch.no_grad(): |
| depth = depth_anything(image) |
|
|
| depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0] |
| depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 |
|
|
| depth = depth.cpu().numpy().astype(np.uint8) |
|
|
| depth_image = Image.fromarray(depth) |
|
|
| return depth_image |
| |
| def get_canny_image(image, t1=100, t2=200): |
| image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
| edges = cv2.Canny(image, t1, t2) |
| return Image.fromarray(edges, "L") |
| |
| self.controlnet_map = { |
| "pose": controlnet_pose, |
| "canny": controlnet_canny, |
| "depth": controlnet_depth, |
| } |
|
|
| self.controlnet_map_fn = { |
| "pose": openpose, |
| "canny": get_canny_image, |
| "depth": get_depth_map, |
| } |
|
|
| pretrained_model_name_or_path = "wangqixun/YamerMIX_v8" |
|
|
| self.pipe = StableDiffusionXLInstantIDPipeline.from_pretrained( |
| pretrained_model_name_or_path, |
| controlnet=[self.controlnet_identitynet], |
| torch_dtype=dtype, |
| safety_checker=None, |
| feature_extractor=None, |
| ).to(device) |
| |
| self.pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config( |
| self.pipe.scheduler.config |
| ) |
|
|
| |
| self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") |
| self.pipe.disable_lora() |
| |
| self.pipe.cuda() |
| self.pipe.load_ip_adapter_instantid(face_adapter) |
| self.pipe.image_proj_model.to("cuda") |
| self.pipe.unet.to("cuda") |
| |
| |
| scheduler_class_name = "EulerDiscreteScheduler" |
| add_kwargs = {} |
| scheduler = getattr(diffusers, scheduler_class_name) |
| self.pipe.scheduler = scheduler.from_config(self.pipe.scheduler.config, **add_kwargs) |
|
|
| identitynet_strength_ratio = 0.8 |
| |
| pose_strength = 0.5 |
| canny_strength = 0.3 |
| depth_strength = 0.5 |
| |
| self.my_controlnet_selection = ["pose", "canny"] |
|
|
| controlnet_scales = { |
| "pose": pose_strength, |
| "canny": canny_strength, |
| "depth": depth_strength, |
| } |
| |
| self.pipe.controlnet = MultiControlNetModel( |
| [self.controlnet_identitynet] |
| + [self.controlnet_map[s] for s in self.my_controlnet_selection] |
| ) |
| self.control_scales = [float(identitynet_strength_ratio)] + [ |
| controlnet_scales[s] for s in self.my_controlnet_selection |
| ] |
|
|
| def __call__(self, data): |
|
|
|
|
| def apply_style(style_name: str, positive: str) -> str: |
| p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) |
| return p.replace("{prompt}", positive) |
| |
| default_negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, anime, photorealistic, 35mm film, deformed, glitch, low contrast, noisy" |
|
|
| |
| face_image_path = data.pop("face_image_path", "https://i.ibb.co/GQzm527/examples-musk-resize.jpg") |
| pose_image_path = data.pop("pose_image_path", "https://i.ibb.co/TRCK4MS/examples-poses-pose2.jpg") |
| prompt_input = data.pop("inputs", "a man flying in the sky in Mars") |
| num_inference_steps = data.pop("num_inference_steps", 20) |
| guidance_scale = data.pop("guidance_scale", 5.0) |
| negative_prompt = data.pop("negative_prompt", default_negative_prompt) |
| style_name = data.pop("style_name", DEFAULT_STYLE_NAME) |
|
|
| prompt = apply_style(style_name, prompt_input) |
|
|
| adapter_strength_ratio = 0.8 |
| |
| def convert_from_cv2_to_image(img: np.ndarray) -> Image: |
| return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) |
|
|
| def convert_from_image_to_cv2(img: Image) -> np.ndarray: |
| return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) |
|
|
| def resize_img( |
| input_image, |
| max_side=1280, |
| min_side=1024, |
| size=None, |
| pad_to_max_side=False, |
| mode=Image.BILINEAR, |
| base_pixel_number=64, |
| ): |
| if size is not None: |
| w_resize_new, h_resize_new = size |
| else: |
| w, h = input_image.size |
| |
| ratio_min = min_side / min(w, h) |
| w_min, h_min = round(ratio_min * w), round(ratio_min * h) |
| ratio_max = max_side / max(w_min, h_min) |
| |
| final_ratio = min(ratio_min, ratio_max) |
| w_final, h_final = round(final_ratio * w), round(final_ratio * h) |
|
|
| |
| w_resize_new = (w_final // base_pixel_number) * base_pixel_number |
| h_resize_new = (h_final // base_pixel_number) * base_pixel_number |
|
|
| |
| input_image = input_image.resize([w_resize_new, h_resize_new], mode) |
|
|
| if pad_to_max_side: |
| |
| res = Image.new("RGB", (max_side, max_side), (255, 255, 255)) |
| offset_x = (max_side - w_resize_new) // 2 |
| offset_y = (max_side - h_resize_new) // 2 |
| res.paste(input_image, (offset_x, offset_y)) |
| return res |
|
|
| return input_image |
|
|
| face_image = load_image(face_image_path) |
| face_image = resize_img(face_image, max_side=1024) |
| face_image_cv2 = convert_from_image_to_cv2(face_image) |
| height, width, _ = face_image_cv2.shape |
|
|
| |
| face_info = self.app.get(face_image_cv2) |
| |
| |
| |
| |
| |
|
|
| face_info = sorted( |
| face_info, |
| key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1], |
| )[ |
| -1 |
| ] |
| |
| face_emb = face_info["embedding"] |
| face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"]) |
| img_controlnet = face_image |
| |
| pose_image = load_image(pose_image_path) |
| pose_image = resize_img(pose_image, max_side=1024) |
| img_controlnet = pose_image |
| pose_image_cv2 = convert_from_image_to_cv2(pose_image) |
|
|
| face_info = self.app.get(pose_image_cv2) |
|
|
| |
| |
| |
| |
| |
|
|
| face_info = face_info[-1] |
| face_kps = draw_kps(pose_image, face_info["kps"]) |
|
|
| width, height = face_kps.size |
|
|
| control_mask = np.zeros([height, width, 3]) |
| x1, y1, x2, y2 = face_info["bbox"] |
| x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) |
| control_mask[y1:y2, x1:x2] = 255 |
| control_mask = Image.fromarray(control_mask.astype(np.uint8)) |
| |
| control_images = [face_kps] + [ |
| self.controlnet_map_fn[s](img_controlnet).resize((width, height)) |
| for s in self.my_controlnet_selection |
| ] |
|
|
| print("Start inference...") |
|
|
| self.generator = torch.Generator(device=device).manual_seed(42) |
|
|
| self.pipe.set_ip_adapter_scale(adapter_strength_ratio) |
| images = self.pipe( |
| prompt=prompt, |
| negative_prompt=negative_prompt, |
| image_embeds=face_emb, |
| image=control_images, |
| control_mask=control_mask, |
| controlnet_conditioning_scale=self.control_scales, |
| num_inference_steps=num_inference_steps, |
| guidance_scale=guidance_scale, |
| height=height, |
| width=width, |
| generator=self.generator, |
| ).images |
| |
| return images[0] |