import os import sys import subprocess import torch def clone_if_missing(path, repo, branch=None): if os.path.exists(path): return cmd = ["git", "clone"] if branch: cmd += ["-b", branch] cmd += [repo, path] subprocess.run(cmd, check=True) clone_if_missing( "src/blip", "https://github.com/pharmapsychotic/BLIP.git", branch="lib", ) clone_if_missing( "clip-interrogator", "https://github.com/pharmapsychotic/clip-interrogator.git", branch="open-clip", ) sys.path.append("src/blip") sys.path.append("clip-interrogator") print("Download preprocessed cache files...") CACHE_URLS = [ "https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl", "https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl", "https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl", "https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl", "https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl", ] os.makedirs("cache", exist_ok=True) for url in CACHE_URLS: subprocess.run(["wget", "-nc", url, "-P", "cache"], check=False) import gradio as gr from clip_interrogator import Config, Interrogator config = Config() config.device = "cuda" if torch.cuda.is_available() else "cpu" config.blip_offload = False if torch.cuda.is_available() else True config.chunk_size = 2048 config.flavor_intermediate_count = 512 config.blip_num_beams = 64 ci = Interrogator(config) def generate_image_prompt(input_image, interrogation_mode: str, best_mode_max_flavors): """ Generate a Stable Diffusion 2.0 prompt description from an input image. Use this tool when you need to interrogate an image with CLIP Interrogator 2.1 and produce a text prompt. Args: input_image: PIL image to describe. interrogation_mode (str): Interrogation mode to use: best, classic, or fast. best_mode_max_flavors: Maximum number of flavors used only when interrogation_mode is best. Returns: str: Generated prompt description, or an upload prompt when no image is provided. """ if input_image is None: return "Please upload an image." input_image = input_image.convert("RGB") if interrogation_mode == "best": result = ci.interrogate(input_image, max_flavors=int(best_mode_max_flavors)) elif interrogation_mode == "classic": result = ci.interrogate_classic(input_image) else: result = ci.interrogate_fast(input_image) print(f"mode {interrogation_mode}: {result}") return result title = """ # CLIP Interrogator 2.1 Want to figure out what a good prompt might be to create new images like an existing one? The CLIP Interrogator is here to get you answers! This version is specialized for Stable Diffusion 2.0 using the ViT-H-14 OpenCLIP model. """ article = """ Server busy? You can also run on [Google Colab](https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb) Follow Pharma on twitter [@pharmapsychotic](https://twitter.com/pharmapsychotic) and check out more tools at [Ai generative art tools list](https://pharmapsychotic.com/tools.html) """ css = """ #col-container { max-width: 700px; margin-left: auto; margin-right: auto; } a { text-decoration-line: underline; font-weight: 600; } """ with gr.Blocks() as demo: with gr.Column(elem_id="col-container"): gr.HTML(title) input_image = gr.Image(type="pil", elem_id="input-img") with gr.Row(): mode_input = gr.Radio( ["best", "classic", "fast"], label="Select mode", value="best", ) flavor_input = gr.Slider( minimum=2, maximum=24, step=2, value=4, label="best mode max flavors", ) submit_btn = gr.Button("Submit") output_text = gr.Textbox( label="Description Output", elem_id="output-txt", ) examples = [ ["27E894C4-9375-48A1-A95D-CB2425416B4B.png", "best", 4], ["DB362F56-BA98-4CA1-A999-A25AA94B723B.png", "fast", 4], ] gr.Examples( examples=examples, fn=generate_image_prompt, inputs=[input_image, mode_input, flavor_input], outputs=[output_text], cache_examples=False, run_on_click=True, ) gr.HTML(article) submit_btn.click( fn=generate_image_prompt, inputs=[input_image, mode_input, flavor_input], outputs=output_text, api_name="clipi2", ) demo.queue(max_size=32).launch( footer_links=["api"], ssr_mode=False, mcp_server=True, css=css, )