| # Required libraries for running tiny-gpt2 model (CPU and GPU compatible) | |
| torch==2.3.0 # CPU version; for GPU, use torch==2.3.0+cu121 (see notes below) | |
| transformers==4.38.2 | |
| huggingface_hub==0.22.2 | |
| datasets==2.21.0 | |
| numpy==1.26.4 | |
| matplotlib==3.8.3 | |
| flask==3.0.3 | |
| # Notes: | |
| # - For CPU-only systems (e.g., 16GB or 8GB RAM, no GPU), the above versions work directly. | |
| # - For GPU-supported systems (e.g., NVIDIA GTX), install GPU-compatible PyTorch: | |
| # 1. Uninstall torch: pip uninstall torch -y | |
| # 2. Install GPU version: pip install torch==2.3.0+cu121 | |
| # 3. Verify CUDA: python -c "import torch; print(torch.cuda.is_available())" | |
| # - To force CPU execution on GPU systems, scripts include os.environ["CUDA_VISIBLE_DEVICES"] = "" | |
| # - Compatible with Python 3.10.9 or 3.9.10 |