| import os |
| from PIL import Image |
| import gradio as gr |
| from google import genai |
|
|
| |
| GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") |
| client = genai.Client(api_key=GEMINI_API_KEY) |
|
|
| |
| def explain_image(image: Image.Image): |
| |
| response = client.models.generate_content( |
| model="gemini-2.0-flash", |
| contents=[image, "使用法語描述這張圖片"], |
| ) |
| |
| explanation = response.text |
| return explanation |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("## 🧠B11090070Gemini 圖片解釋器(圖 ➜ 文)") |
| image_input = gr.Image(type="pil", label="上傳圖片") |
| explain_button = gr.Button("解釋圖片") |
| output_text = gr.Textbox(label="圖片說明", lines=5) |
|
|
| explain_button.click(fn=explain_image, inputs=image_input, outputs=output_text) |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|