| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| try: |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| model_name = "HuggingFaceTB/SmolLM3-3B" |
| device = "cuda" |
| |
| |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| ).to(device) |
| with open('HuggingFaceTB_SmolLM3-3B_3.txt', 'w', encoding='utf-8') as f: |
| f.write('Everything was good in HuggingFaceTB_SmolLM3-3B_3.txt') |
| except Exception as e: |
| import os |
| from slack_sdk import WebClient |
| client = WebClient(token=os.environ['SLACK_TOKEN']) |
| client.chat_postMessage( |
| channel='#hub-model-metadata-snippets-sprint', |
| text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/HuggingFaceTB_SmolLM3-3B_3.txt|HuggingFaceTB_SmolLM3-3B_3.txt>', |
| ) |
|
|
| with open('HuggingFaceTB_SmolLM3-3B_3.txt', 'a', encoding='utf-8') as f: |
| import traceback |
| f.write('''```CODE: |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| model_name = "HuggingFaceTB/SmolLM3-3B" |
| device = "cuda" # for GPU usage or "cpu" for CPU usage |
| |
| # load the tokenizer and the model |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| ).to(device) |
| ``` |
| |
| ERROR: |
| ''') |
| traceback.print_exc(file=f) |
| |
| finally: |
| from huggingface_hub import upload_file |
| upload_file( |
| path_or_fileobj='HuggingFaceTB_SmolLM3-3B_3.txt', |
| repo_id='model-metadata/code_execution_files', |
| path_in_repo='HuggingFaceTB_SmolLM3-3B_3.txt', |
| repo_type='dataset', |
| ) |
|
|