Buckets:

download
raw
7.75 kB
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "cCdVZlzUhePK",
"outputId": "36c02f5d-c72b-4f3f-b370-3775e8340d91",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Cloning into 'New_World'...\n",
"remote: Enumerating objects: 125, done.\u001b[K\n",
"remote: Counting objects: 100% (125/125), done.\u001b[K\n",
"remote: Compressing objects: 100% (122/122), done.\u001b[K\n",
"Receiving objects: 100% (125/125), 51.73 KiB | 7.39 MiB/s, done.\n",
"remote: Total 125 (delta 85), reused 0 (delta 0), pack-reused 0 (from 0)\u001b[K\n",
"Resolving deltas: 100% (85/85), done.\n"
]
}
],
"source": [
"!git clone https://github.com/dvssajay/New_World.git"
]
},
{
"cell_type": "code",
"source": [
"%cd New_World/"
],
"metadata": {
"id": "4OhZbKcyhkgl",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "a626a392-ebb5-474c-c2e4-695b684abd3a"
},
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/FSGDM\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!python /content/New_World/mainAB2GR0_10_1.py"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "tQzPOzUYxMtl",
"outputId": "53a792d0-4e3f-4bb2-b886-8a70a8dafa21"
},
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"==> Preparing data..\n",
"100% 170M/170M [00:20<00:00, 8.34MB/s]\n",
"==> Building model..\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: (1) Create a W&B account\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: (2) Use an existing W&B account\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: (3) Don't visualize my results\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Enter your choice: 3\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: You chose \"Don't visualize my results\"\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Tracking run with wandb version 0.21.0\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: W&B syncing is set to \u001b[1m`offline`\u001b[0m in this directory. Run \u001b[1m`wandb online`\u001b[0m or set \u001b[1mWANDB_MODE=online\u001b[0m to enable cloud syncing.\n",
"\n",
"Epoch: 0\n",
" [================================================================>] Step: 123ms | Tot: 1m15s | Loss: 1.446 | Acc: 47.174% (23587/50000) 391/391 \n",
" [================================================================>] Step: 120ms | Tot: 5s14ms | Loss: 1.278 | Acc: 55.940% (5594/10000) 79/79 \n",
"Saving..\n",
"\n",
"Epoch: 1\n",
" [================================================================>] Step: 123ms | Tot: 1m20s | Loss: 1.030 | Acc: 63.754% (31877/50000) 391/391 \n",
" [================================================================>] Step: 18ms | Tot: 5s605ms | Loss: 1.036 | Acc: 64.390% (6439/10000) 79/79 \n",
"Saving..\n",
"\n",
"Epoch: 2\n",
" [================================================================>] Step: 128ms | Tot: 1m18s | Loss: 0.837 | Acc: 71.200% (35600/50000) 391/391 \n",
" [================================================================>] Step: 18ms | Tot: 4s893ms | Loss: 0.982 | Acc: 68.750% (6875/10000) 79/79 \n",
"Saving..\n",
"\n",
"Epoch: 3\n",
"Traceback (most recent call last):\n",
" File \"/content/New_World/mainAB2GR0_10_1.py\", line 231, in <module>\n",
" train(epoch)\n",
" File \"/content/New_World/mainAB2GR0_10_1.py\", line 121, in train\n",
" train_loss += loss.item()\n",
" ^^^^^^^^^^^\n",
"KeyboardInterrupt\n",
"\u001b[1;34mwandb\u001b[0m: \n",
"\u001b[1;34mwandb\u001b[0m: You can sync this run to the cloud by running:\n",
"\u001b[1;34mwandb\u001b[0m: \u001b[1mwandb sync /content/wandb/offline-run-20250803_072743-pvviusl0\u001b[0m\n",
"\u001b[1;34mwandb\u001b[0m: Find logs at: \u001b[1;35mwandb/offline-run-20250803_072743-pvviusl0/logs\u001b[0m\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd examples/CIFAR100/"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "nis69XVJxpSO",
"outputId": "dc1f0bb5-9bd5-46ce-c1ef-722e699434d3"
},
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/FSGDM/examples/CIFAR100\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!python main.py"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "PYsENziixteU",
"outputId": "26e697b2-e04f-42dc-96fc-e9ca31a05d05"
},
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Using device: cuda\n",
"100% 169M/169M [00:05<00:00, 30.8MB/s]\n",
"/usr/local/lib/python3.11/dist-packages/torch/utils/data/dataloader.py:624: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
" warnings.warn(\n",
"Epoch 1/300\n",
"Test set: Average loss: 4.1882, Accuracy: 738/10000 (7.38%)\n",
"Epoch 2/300\n",
"Traceback (most recent call last):\n",
" File \"/content/FSGDM/examples/CIFAR100/main.py\", line 208, in <module>\n",
" main(args)\n",
" File \"/content/FSGDM/examples/CIFAR100/main.py\", line 161, in main\n",
" train_loss, train_acc = train_one_epoch(model, train_loader, criterion, optimizer, device)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/content/FSGDM/examples/CIFAR100/main.py\", line 102, in train_one_epoch\n",
" running_loss += loss.item() * inputs.size(0)\n",
" ^^^^^^^^^^^\n",
"KeyboardInterrupt\n"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "05BwserLxwTm"
},
"execution_count": null,
"outputs": []
}
]
}

Xet Storage Details

Size:
7.75 kB
·
Xet hash:
4c81a992a92da333fed97880d71000934de19c98aef48baa7ec1685d080dabf4

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.