| import requests
|
| import json
|
| import time
|
| from datetime import datetime
|
| import difflib
|
|
|
|
|
| MODELS = ["gemmapro", "gemmapro-r", "gemmapro-20kctx"]
|
| OLLAMA_URL = "http://localhost:11434/api/generate"
|
|
|
| def send_request_to_ollama(prompt, model):
|
| """向指定模型發送請求並獲取回應"""
|
| data = {
|
| "model": model,
|
| "prompt": prompt,
|
| "stream": False
|
| }
|
|
|
| try:
|
| response = requests.post(OLLAMA_URL, json=data)
|
| response.raise_for_status()
|
| return response.json()["response"]
|
| except requests.exceptions.RequestException as e:
|
| print(f"[錯誤] 模型 {model} 請求失敗: {e}")
|
| return f"[錯誤] 向 {model} 發送請求時發生錯誤: {str(e)}"
|
| except KeyError:
|
| print(f"[錯誤] 模型 {model} 回應格式不符預期,找不到 'response' 鍵。")
|
| return f"[錯誤] 模型 {model} 回應格式錯誤。"
|
| except json.JSONDecodeError:
|
| print(f"[錯誤] 模型 {model} 回應非有效的 JSON 格式: {response.text}")
|
| return f"[錯誤] 無法解析來自 {model} 的回應。"
|
|
|
| def initialize_markdown_file():
|
| """初始化 Markdown 報告檔案,包含 YAML metadata"""
|
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| metadata = {
|
| "title": "多模型推理彙整報告",
|
| "date": timestamp,
|
| "models": MODELS,
|
| "author": "自動化程式",
|
| "description": "本報告整合多個模型對多個問題的回應,進行去蕪存菁後的彙整。"
|
| }
|
|
|
| try:
|
| with open("output_moremodels.md", "w", encoding="utf-8") as file:
|
| file.write("---\n")
|
| for key, value in metadata.items():
|
| if isinstance(value, list):
|
| file.write(f"{key}:\n")
|
| for item in value:
|
| file.write(f" - {item}\n")
|
| else:
|
| file.write(f"{key}: {value}\n")
|
| file.write("---\n\n")
|
| file.write(f"# {metadata['title']}\n\n")
|
| file.write(f"產出時間: {timestamp}\n\n")
|
| file.write(f"使用模型: {', '.join(MODELS)}\n\n---\n\n")
|
| print("[初始化] 已建立 output_moremodels.md")
|
| except IOError as e:
|
| print(f"[錯誤] 無法寫入檔案 output_moremodels.md: {e}")
|
|
|
| exit(1)
|
|
|
| def append_to_markdown(index, prompt, responses):
|
| """將問題與各模型回應結果寫入 Markdown 檔案"""
|
| try:
|
| with open("output_moremodels.md", "a", encoding="utf-8") as file:
|
| file.write(f"## 問題 {index}\n\n")
|
| file.write(f"### 提問\n\n```\n{prompt}\n```\n\n")
|
|
|
| for model, response in responses.items():
|
|
|
| response_text = str(response) if response is not None else "[無回應]"
|
| file.write(f"### 模型:{model}\n\n{response_text.strip()}\n\n")
|
|
|
|
|
|
|
| valid_responses = {m: r for m, r in responses.items() if isinstance(r, str)}
|
| summary = summarize_responses(prompt, valid_responses)
|
| file.write(f"### 彙整摘要\n\n{summary}\n\n---\n\n")
|
| except IOError as e:
|
| print(f"[錯誤] 無法附加內容至檔案 output_moremodels.md: {e}")
|
|
|
|
|
| def summarize_responses(prompt, responses):
|
| """
|
| 將多個模型的回應進行比較,提取相似的句子,並整合成通順的摘要。
|
| 注意:目前的實作僅基於句法相似度,可能無法完全捕捉語意。
|
| """
|
|
|
| if not responses:
|
| return "沒有從任何模型收到有效回應可供摘要。"
|
|
|
|
|
| sentence_lists = []
|
| for response in responses.values():
|
|
|
|
|
| processed_response = response.replace('\n', ' ')
|
| sentences = []
|
| current_sentence = ""
|
| for char in processed_response:
|
| current_sentence += char
|
| if char in ['。', '!', '?', '.', '!', '?']:
|
| trimmed_sentence = current_sentence.strip()
|
| if trimmed_sentence:
|
| sentences.append(trimmed_sentence)
|
| current_sentence = ""
|
|
|
| trimmed_sentence = current_sentence.strip()
|
| if trimmed_sentence:
|
| sentences.append(trimmed_sentence)
|
|
|
| sentence_lists.append(sentences)
|
|
|
|
|
| processed_indices = set()
|
| summary_sentences = []
|
| all_sentences = [(i, j, sent) for i, lst in enumerate(sentence_lists) for j, sent in enumerate(lst)]
|
|
|
|
|
| for idx1 in range(len(all_sentences)):
|
| model_idx1, sent_idx1, sent1 = all_sentences[idx1]
|
|
|
| if (model_idx1, sent_idx1) in processed_indices:
|
| continue
|
|
|
| best_match = None
|
| max_similarity = 0.7
|
|
|
| for idx2 in range(idx1 + 1, len(all_sentences)):
|
| model_idx2, sent_idx2, sent2 = all_sentences[idx2]
|
|
|
|
|
| if model_idx1 == model_idx2:
|
| continue
|
|
|
|
|
| if (model_idx2, sent_idx2) in processed_indices:
|
| continue
|
|
|
| similarity = difflib.SequenceMatcher(None, sent1, sent2).ratio()
|
|
|
|
|
| if similarity > max_similarity:
|
| max_similarity = similarity
|
|
|
| chosen_sentence = sent1 if len(sent1) <= len(sent2) else sent2
|
| best_match = ((model_idx1, sent_idx1), (model_idx2, sent_idx2), chosen_sentence)
|
|
|
|
|
| if best_match:
|
| idx_pair1, idx_pair2, chosen = best_match
|
|
|
| processed_indices.add(idx_pair1)
|
| processed_indices.add(idx_pair2)
|
|
|
| summary_sentences.append(chosen)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if not summary_sentences:
|
| summary = "各模型提供了不同的觀點,未偵測到足夠相似的核心內容可供直接彙整。重點預覽如下:\n\n"
|
| for model, response in responses.items():
|
| preview = response.strip().replace('\n', ' ')[:100]
|
| summary += f"- **{model}**: {preview}...\n"
|
| else:
|
| summary = "綜合各模型的相似觀點,摘要如下:\n\n"
|
|
|
|
|
| unique_summary_sentences = []
|
| for sentence in summary_sentences:
|
| if sentence not in unique_summary_sentences:
|
| unique_summary_sentences.append(sentence)
|
|
|
| for sentence in unique_summary_sentences:
|
| summary += f"- {sentence}\n"
|
|
|
|
|
| return summary
|
|
|
| def main():
|
|
|
| questions = [
|
| "介紹台灣的夜市文化",
|
| "台灣人工智慧發展的現況與挑戰為何?"
|
| ]
|
|
|
| initialize_markdown_file()
|
| print("[開始] 向模型發送請求...")
|
|
|
| for i, prompt in enumerate(questions, 1):
|
| print(f"[處理中] 問題 {i}/{len(questions)}: {prompt[:30]}...")
|
| model_responses = {}
|
|
|
| for model in MODELS:
|
| print(f" └▶ 模型 {model} 推理中...")
|
| start_time = time.time()
|
| response = send_request_to_ollama(prompt, model)
|
| end_time = time.time()
|
| elapsed_time = end_time - start_time
|
| print(f" 回應耗時: {elapsed_time:.2f} 秒")
|
| model_responses[model] = response
|
|
|
|
|
| append_to_markdown(i, prompt, model_responses)
|
| print(f"[完成] 問題 {i} 已處理並寫入檔案。")
|
|
|
|
|
| print("[完成] 所有問題已處理完畢,結果保存在 output_moremodels.md")
|
|
|
| if __name__ == "__main__":
|
| main() |