| import os |
| import torch |
| import gradio as gr |
| from tqdm import tqdm |
| from PIL import Image |
| import torch.nn.functional as F |
| from torchvision import transforms as tfms |
| from transformers import CLIPTextModel, CLIPTokenizer, logging |
| from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel |
|
|
| torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" |
| if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1" |
|
|
| |
| vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") |
|
|
| |
| tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
| text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
|
|
| |
| unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") |
|
|
| |
| scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
|
|
| style_token_dict = {'Concept':'<concept-art>', 'Realistic':'<doose-realistic>', 'Line':'<line-art>', |
| 'Ricky':'<RickyArt>', 'Plane Scape':'<tony-diterlizzi-planescape>'} |
|
|
| |
| vae = vae.to(torch_device) |
| text_encoder = text_encoder.to(torch_device) |
| unet = unet.to(torch_device) |
|
|
| token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
| pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
| position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
| position_embeddings = pos_emb_layer(position_ids) |
|
|
| concept_art_embed = torch.load('concept-art.bin') |
| doose_s_realistic_art_style_embed = torch.load('doose-s-realistic-art-style.bin') |
| line_art_embed = torch.load('line-art.bin') |
| rickyart_embed = torch.load('rickyart.bin') |
| tony_diterlizzi_s_planescape_art_embed = torch.load('tony-diterlizzi-s-planescape-art.bin') |
|
|
| tokenizer.add_tokens(['<concept-art>', '<doose-realistic>', '<line-art>', '<RickyArt>', '<tony-diterlizzi-planescape>']) |
|
|
| token_emb_layer_with_art = torch.nn.Embedding(49413, 768) |
| token_emb_layer_with_art.load_state_dict({'weight': torch.cat((token_emb_layer.state_dict()['weight'], |
| concept_art_embed['<concept-art>'].unsqueeze(0).to(torch_device), |
| doose_s_realistic_art_style_embed['<doose-realistic>'].unsqueeze(0).to(torch_device), |
| line_art_embed['<line-art>'].unsqueeze(0).to(torch_device), |
| rickyart_embed['<RickyArt>'].unsqueeze(0).to(torch_device), |
| tony_diterlizzi_s_planescape_art_embed['<tony-diterlizzi-planescape>'].unsqueeze(0).to(torch_device)))}) |
| token_emb_layer_with_art = token_emb_layer_with_art.to(torch_device) |
|
|
| grayscale_transformer = tfms.Grayscale(num_output_channels=3) |
|
|
| def set_timesteps(scheduler, num_inference_steps): |
| scheduler.set_timesteps(num_inference_steps) |
| scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
|
|
| def pil_to_latent(input_im): |
| with torch.no_grad(): |
| latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
| return 0.18215 * latent.latent_dist.sample() |
|
|
| def latents_to_pil(latents): |
| latents = (1 / 0.18215) * latents |
| with torch.no_grad(): |
| image = vae.decode(latents).sample |
| image = (image / 2 + 0.5).clamp(0, 1) |
| image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
| images = (image * 255).round().astype("uint8") |
| pil_images = [Image.fromarray(image) for image in images] |
| return pil_images |
|
|
| def build_causal_attention_mask(bsz, seq_len, dtype): |
| mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) |
| mask.fill_(torch.tensor(torch.finfo(dtype).min)) |
| mask = mask.triu_(1) |
| return mask.unsqueeze(1) |
|
|
| def get_output_embeds(input_embeddings): |
| |
| bsz, seq_len = input_embeddings.shape[:2] |
| causal_attention_mask = build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) |
|
|
| |
| |
| encoder_outputs = text_encoder.text_model.encoder( |
| inputs_embeds=input_embeddings, |
| attention_mask=None, |
| causal_attention_mask=causal_attention_mask.to(torch_device), |
| output_attentions=None, |
| output_hidden_states=True, |
| return_dict=None, |
| ) |
|
|
| |
| output = encoder_outputs[0] |
|
|
| |
| output = text_encoder.text_model.final_layer_norm(output) |
|
|
| |
| return output |
|
|
| def generate_with_embs(num_inference_steps, guidance_scale, seed, text_input, text_embeddings): |
| height = 512 |
| width = 512 |
| generator = torch.manual_seed(seed) |
| batch_size = 1 |
|
|
| max_length = text_input.input_ids.shape[-1] |
| uncond_input = tokenizer( |
| [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
| ) |
| with torch.no_grad(): |
| uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
| text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
| |
| set_timesteps(scheduler, num_inference_steps) |
|
|
| |
| latents = torch.randn( |
| (batch_size, unet.in_channels, height // 8, width // 8), |
| generator=generator, |
| ) |
| latents = latents.to(torch_device) |
| latents = latents * scheduler.init_noise_sigma |
|
|
| |
| for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
| |
| latent_model_input = torch.cat([latents] * 2) |
| sigma = scheduler.sigmas[i] |
| latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
| |
| with torch.no_grad(): |
| noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
| |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
| |
| latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
| return latents_to_pil(latents)[0] |
|
|
| def guide_loss(images, loss_type='Gayscale'): |
| |
| if loss_type == 'Grayscale': |
| transformed_imgs = grayscale_transformer(images) |
| error = torch.abs(transformed_imgs - images).mean() |
|
|
| |
| elif loss_type == 'Bright': |
| transformed_imgs = tfms.functional.adjust_brightness(images, brightness_factor=3) |
| error = torch.abs(transformed_imgs - images).mean() |
|
|
| |
| elif loss_type == 'Contrast': |
| transformed_imgs = tfms.functional.adjust_contrast(images, contrast_factor=10) |
| error = torch.abs(transformed_imgs - images).mean() |
|
|
| |
| elif loss_type == "Symmetry": |
| flipped_image = torch.flip(images, [3]) |
| error = F.mse_loss(images, flipped_image) |
|
|
| |
| elif loss_type == 'Saturation': |
| transformed_imgs = tfms.functional.adjust_saturation(images,saturation_factor = 10) |
| error = torch.abs(transformed_imgs - images).mean() |
|
|
| return error |
|
|
| def generate_with_guide_loss(num_inference_steps, guidance_scale, seed, text_input, text_embeddings, loss_type, loss_scale): |
| height = 512 |
| width = 512 |
| generator = torch.manual_seed(seed) |
| batch_size = 1 |
|
|
| |
| max_length = text_input.input_ids.shape[-1] |
| uncond_input = tokenizer( |
| [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
| ) |
| with torch.no_grad(): |
| uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
| text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
| |
| set_timesteps(scheduler, num_inference_steps) |
|
|
| |
| latents = torch.randn( |
| (batch_size, unet.in_channels, height // 8, width // 8), |
| generator=generator, |
| ) |
| latents = latents.to(torch_device) |
| latents = latents * scheduler.init_noise_sigma |
|
|
| |
| for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
| |
| latent_model_input = torch.cat([latents] * 2) |
| sigma = scheduler.sigmas[i] |
| latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
| |
| with torch.no_grad(): |
| noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
| |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
| |
| if i%5 == 0: |
| |
| latents = latents.detach().requires_grad_() |
|
|
| |
| latents_x0 = latents - sigma * noise_pred |
| |
|
|
| |
| denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
|
| |
| loss = guide_loss(denoised_images, loss_type) * loss_scale |
|
|
| |
| if i%5==0: |
| print(i, 'loss:', loss.item()) |
|
|
| |
| cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
| |
| latents = latents.detach() - cond_grad * sigma**2 |
|
|
| |
| latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
| return latents_to_pil(latents)[0] |
|
|
| def inference(text, style, inference_step, guidance_scale, seed, guidance_method, loss_scale): |
| prompt = text + " the style of " + style_token_dict[style] |
|
|
| |
| text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
| input_ids = text_input.input_ids.to(torch_device) |
|
|
| |
| token_embeddings = token_emb_layer_with_art(input_ids) |
|
|
| |
| input_embeddings = token_embeddings + position_embeddings |
|
|
| |
| modified_output_embeddings = get_output_embeds(input_embeddings) |
|
|
| |
| image_embs = generate_with_embs(inference_step, guidance_scale, seed, text_input, modified_output_embeddings) |
|
|
| |
| image_guide = generate_with_guide_loss(inference_step, guidance_scale, seed, text_input, |
| modified_output_embeddings, guidance_method, loss_scale) |
|
|
| return image_embs, image_guide |
|
|
| title = "Stable Diffusion with Textual Inversion" |
| description = "A simple Gradio interface to infer Stable Diffusion and generate images with different art style" |
| examples = [["A sweet potato farm", 'Concept', 10, 4.5, 1, 'Grayscale', 100], |
| ["Sky full of cotton candy", 'Realistic', 10, 9.5, 2, 'Bright', 200]] |
|
|
| demo = gr.Interface(inference, |
| inputs = [gr.Textbox(label="Prompt", type="text"), |
| gr.Dropdown(label="Style", choices=['Concept', 'Realistic', 'Line', |
| 'Ricky', 'Plane Scape'], value="Concept"), |
| gr.Slider(10, 30, 10, step = 1, label="Inference steps"), |
| gr.Slider(1, 10, 7.5, step = 0.1, label="Guidance scale"), |
| gr.Slider(0, 10000, 1, step = 1, label="Seed"), |
| gr.Dropdown(label="Guidance method", choices=['Grayscale', 'Bright', 'Contrast', |
| 'Symmetry', 'Saturation'], value="Grayscale"), |
| gr.Slider(100, 10000, 200, step = 100, label="Loss scale")], |
| outputs= [gr.Image(width=320, height=320, label="Generated art"), |
| gr.Image(width=320, height=320, label="Generated art with guidance")], |
| title=title, |
| description=description, |
| examples=examples) |
|
|
| demo.launch() |
|
|