| | from transformers import pipeline |
| |
|
| | |
| | def load_model(model_name): |
| | try: |
| | |
| | model = pipeline("text-classification", model=model_name) |
| | return model |
| | except Exception as e: |
| | print(f"Error loading model: {e}") |
| | return None |
| |
|
| | |
| | def run_inference(user_input, selected_model, prompt=None): |
| | model = load_model(selected_model) |
| | if model: |
| | |
| | if prompt: |
| | input_text = f"{prompt}\n{user_input}" |
| | else: |
| | input_text = user_input |
| | |
| | try: |
| | |
| | result = model(input_text) |
| | |
| | |
| | return result[0]['label'] if 'label' in result[0] else "Error: No label in output" |
| | except Exception as e: |
| | return f"Error during inference: {e}" |
| | else: |
| | return f"Error: Model '{selected_model}' failed to load." |
| |
|
| | |
| | selected_model = "Canstralian/CySec_Known_Exploit_Analyzer" |
| | user_input = "Sample exploit description" |
| | prompt = "Classify the following cybersecurity exploit:" |
| |
|
| | |
| | result = run_inference(user_input, selected_model, prompt) |
| | print(f"Inference Result: {result}") |
| |
|