{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "CkZsS-w4atkF", "outputId": "f3a78987-dbd2-4771-92ca-69cdf97d0571" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Mounted at /content/drive\n" ] } ], "source": [ "from google.colab import drive\n", "drive.mount('/content/drive')" ], "id": "CkZsS-w4atkF" }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "zzWoPgRpd1xn", "outputId": "daa8edca-5ddf-4ac8-cb91-b4e77f9cc858" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Collecting wandb\n", " Downloading wandb-0.14.0-py3-none-any.whl (2.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m27.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting sentry-sdk>=1.0.0\n", " Downloading sentry_sdk-1.19.0-py2.py3-none-any.whl (199 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m199.2/199.2 KB\u001b[0m \u001b[31m25.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: setuptools in /usr/local/lib/python3.9/dist-packages (from wandb) (67.6.1)\n", "Requirement already satisfied: protobuf!=4.21.0,<5,>=3.15.0 in /usr/local/lib/python3.9/dist-packages (from wandb) (3.20.3)\n", "Requirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.9/dist-packages (from wandb) (5.9.4)\n", "Requirement already satisfied: requests<3,>=2.0.0 in /usr/local/lib/python3.9/dist-packages (from wandb) (2.27.1)\n", "Collecting GitPython!=3.1.29,>=1.0.0\n", " Downloading GitPython-3.1.31-py3-none-any.whl (184 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m184.3/184.3 KB\u001b[0m \u001b[31m22.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: appdirs>=1.4.3 in /usr/local/lib/python3.9/dist-packages (from wandb) (1.4.4)\n", "Collecting docker-pycreds>=0.4.0\n", " Downloading docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n", "Collecting setproctitle\n", " Downloading setproctitle-1.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n", "Requirement already satisfied: Click!=8.0.0,>=7.0 in /usr/local/lib/python3.9/dist-packages (from wandb) (8.1.3)\n", "Requirement already satisfied: PyYAML in /usr/local/lib/python3.9/dist-packages (from wandb) (6.0)\n", "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from wandb) (4.5.0)\n", "Collecting pathtools\n", " Downloading pathtools-0.1.2.tar.gz (11 kB)\n", " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Requirement already satisfied: six>=1.4.0 in /usr/local/lib/python3.9/dist-packages (from docker-pycreds>=0.4.0->wandb) (1.16.0)\n", "Collecting gitdb<5,>=4.0.1\n", " Downloading gitdb-4.0.10-py3-none-any.whl (62 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.7/62.7 KB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests<3,>=2.0.0->wandb) (3.4)\n", "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests<3,>=2.0.0->wandb) (2.0.12)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests<3,>=2.0.0->wandb) (2022.12.7)\n", "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests<3,>=2.0.0->wandb) (1.26.15)\n", "Collecting smmap<6,>=3.0.1\n", " Downloading smmap-5.0.0-py3-none-any.whl (24 kB)\n", "Building wheels for collected packages: pathtools\n", " Building wheel for pathtools (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for pathtools: filename=pathtools-0.1.2-py3-none-any.whl size=8807 sha256=e0132d67db355152a3925f1b0367a996c977716084c67122838eac002d321662\n", " Stored in directory: /root/.cache/pip/wheels/b7/0a/67/ada2a22079218c75a88361c0782855cc72aebc4d18d0289d05\n", "Successfully built pathtools\n", "Installing collected packages: pathtools, smmap, setproctitle, sentry-sdk, docker-pycreds, gitdb, GitPython, wandb\n", "Successfully installed GitPython-3.1.31 docker-pycreds-0.4.0 gitdb-4.0.10 pathtools-0.1.2 sentry-sdk-1.19.0 setproctitle-1.3.2 smmap-5.0.0 wandb-0.14.0\n" ] } ], "source": [ "!pip install wandb" ], "id": "zzWoPgRpd1xn" }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 121 }, "id": "747ddcf2", "outputId": "ebf3b723-ac7b-41ba-a9a6-e2e82e907879" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "" ], "application/javascript": [ "\n", " window._wandbApiKey = new Promise((resolve, reject) => {\n", " function loadScript(url) {\n", " return new Promise(function(resolve, reject) {\n", " let newScript = document.createElement(\"script\");\n", " newScript.onerror = reject;\n", " newScript.onload = resolve;\n", " document.body.appendChild(newScript);\n", " newScript.src = url;\n", " });\n", " }\n", " loadScript(\"https://cdn.jsdelivr.net/npm/postmate/build/postmate.min.js\").then(() => {\n", " const iframe = document.createElement('iframe')\n", " iframe.style.cssText = \"width:0;height:0;border:none\"\n", " document.body.appendChild(iframe)\n", " const handshake = new Postmate({\n", " container: iframe,\n", " url: 'https://wandb.ai/authorize'\n", " });\n", " const timeout = setTimeout(() => reject(\"Couldn't auto authenticate\"), 5000)\n", " handshake.then(function(child) {\n", " child.on('authorize', data => {\n", " clearTimeout(timeout)\n", " resolve(data)\n", " });\n", " });\n", " })\n", " });\n", " " ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Logging into wandb.ai. (Learn how to deploy a W&B server locally: https://wandb.me/wandb-server)\n", "\u001b[34m\u001b[1mwandb\u001b[0m: You can find your API key in your browser here: https://wandb.ai/authorize\n", "wandb: Paste an API key from your profile and hit enter, or press ctrl+c to quit:" ] }, { "name": "stdout", "output_type": "stream", "text": [ " ··········\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /root/.netrc\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "True" ] }, "metadata": {}, "execution_count": 3 } ], "source": [ "import wandb\n", "\n", "wandb.login()" ], "id": "747ddcf2" }, { "cell_type": "code", "execution_count": 4, "metadata": { "id": "c37343d6" }, "outputs": [], "source": [ "import torch\n", "import torch.optim as optim\n", "import torch.nn.functional as F\n", "import torch.nn as nn\n", "from torchvision import datasets, transforms\n", "from torchvision.models import resnet50, ResNet50_Weights\n", "from torch.utils.data import Dataset, DataLoader, random_split, SubsetRandomSampler\n", "import numpy as np\n", "import os\n", "import time\n", "import copy\n", "import random\n", "from sklearn import metrics\n", "\n", "torch.manual_seed(42)\n", "np.random.seed(42)\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" ], "id": "c37343d6" }, { "cell_type": "code", "execution_count": 5, "metadata": { "id": "17b25dc7" }, "outputs": [], "source": [ "def build_dataset(batch_size): \n", " data_transforms = {\n", " 'train': transforms.Compose([\n", " transforms.RandomResizedCrop(224),\n", " transforms.RandomHorizontalFlip(),\n", " transforms.ToTensor(),\n", " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", " ]),\n", " 'test': transforms.Compose([\n", " transforms.Resize(256),\n", " transforms.CenterCrop(224),\n", " transforms.ToTensor(),\n", " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", " ]),\n", " }\n", "\n", " data_dir = '/content/drive/MyDrive/plantsdata'\n", " dataset = datasets.ImageFolder(os.path.join(data_dir))\n", "\n", " # 90/10 split\n", " train_dataset, test_dataset = random_split(dataset, [0.9, 0.1])\n", "\n", " train_dataset.dataset.transform = data_transforms['train']\n", " test_dataset.dataset.transform = data_transforms['test']\n", "\n", " train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,\n", " shuffle=True, num_workers=4)\n", " test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,\n", " shuffle=True, num_workers=4)\n", "\n", " dataloaders = {'train': train_loader, 'test': test_loader}\n", " dataset_size = len(dataset)\n", " dataset_sizes = {'train': len(train_dataset), 'test': len(test_dataset)}\n", " class_names = dataset.classes\n", "\n", " return (dataloaders, dataset_sizes)\n", "\n", "def build_network():\n", " network = resnet50(weights=ResNet50_Weights.DEFAULT)\n", " num_ftrs = network.fc.in_features\n", "\n", " # Add linear layer with number of classes\n", " network.fc = nn.Linear(num_ftrs, 2)\n", "\n", " return network.to(device)\n", "\n", "def build_optimizer(network, optimizer, learning_rate, beta_one, beta_two, eps):\n", " if optimizer == \"sgd\":\n", " optimizer = optim.SGD(network.parameters(),\n", " lr=learning_rate, momentum=0.9)\n", " elif optimizer == \"adam\":\n", " optimizer = optim.Adam(network.parameters(),\n", " lr=learning_rate,\n", " betas=(beta_one, beta_two),\n", " eps=eps)\n", " return optimizer\n", "\n", "def train_epoch(network, loader, optimizer, criterion, scheduler, dataset_sizes):\n", " network.train()\n", " running_loss = 0.0\n", " running_corrects = 0\n", " for _, (data, target) in enumerate(loader):\n", " data, target = data.to(device), target.to(device)\n", " optimizer.zero_grad()\n", "\n", " # ➡ Forward pass\n", " #loss = F.nll_loss(network(data), target)\n", " with torch.set_grad_enabled(True):\n", " outputs = network(data)\n", " _, preds = torch.max(outputs, 1)\n", " loss = criterion(outputs, target)\n", " \n", " #cumu_loss += loss.item()\n", " \n", " running_loss += loss.item() * data.size(0)\n", " running_corrects += torch.sum(preds == target.data)\n", "\n", " # ⬅ Backward pass + weight update\n", " loss.backward()\n", " optimizer.step()\n", "\n", " wandb.log({'train/batch_loss': loss.item()})\n", "\n", " scheduler.step()\n", "\n", " epoch_loss = running_loss / dataset_sizes['train']\n", " epoch_acc = running_corrects.double() / dataset_sizes['train']\n", " \n", " return (epoch_loss, epoch_acc)\n", "\n", "def test(network, loader, optimizer, criterion, dataset_sizes):\n", " network.eval()\n", " confusion = torch.empty([0, 1])\n", " confusion = confusion.to(device)\n", " running_loss = 0.0\n", " test_corrects = 0\n", " for _, (data, target) in enumerate(loader):\n", " data, target = data.to(device), target.to(device)\n", " optimizer.zero_grad()\n", "\n", " # ➡ Forward pass\n", " with torch.set_grad_enabled(False):\n", " outputs = network(data)\n", " _, preds = torch.max(outputs, 1)\n", " loss = criterion(outputs, target)\n", "\n", " running_loss += loss.item() * data.size(0)\n", " test_corrects += torch.sum(preds == target.data)\n", " \n", " confusion = torch.cat((confusion, preds[:, None] / target.data[:, None]))\n", "\n", " tp = torch.sum(confusion == 1).item()\n", " fp = torch.sum(confusion == float('inf')).item()\n", " tn = torch.sum(torch.isnan(confusion)).item()\n", " fn = torch.sum(confusion == 0).item()\n", " \n", " precision = tp / (tp + fp)\n", " recall = tp / (tp + fn)\n", " f = 2 * ((precision * recall) / (precision + recall))\n", " \n", " epoch_loss = running_loss / dataset_sizes['test']\n", " epoch_acc = test_corrects.double() / dataset_sizes['test']\n", " \n", " return (epoch_loss, epoch_acc, precision, recall, f)" ], "id": "17b25dc7" }, { "cell_type": "code", "execution_count": 6, "metadata": { "id": "5eff68bf" }, "outputs": [], "source": [ "def train(config=None):\n", " # Initialize a new wandb run\n", " with wandb.init(config=config):\n", " # If called by wandb.agent, as below,\n", " # this config will be set by Sweep Controller\n", " config = wandb.config\n", "\n", " (dataloaders, dataset_sizes) = build_dataset(config.batch_size)\n", " network = build_network()\n", " optimizer = build_optimizer(network, config.optimizer, config.learning_rate, config.beta_one,\n", " config.beta_two, config.eps)\n", " criterion = nn.CrossEntropyLoss()\n", " # Decay LR by a factor of 0.1 every 7 epochs\n", " exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, config.step_size, config.gamma)\n", "\n", " for epoch in range(config.epochs): \n", " (epoch_loss, epoch_acc) = train_epoch(network, dataloaders['train'], optimizer,\n", " criterion, exp_lr_scheduler,\n", " dataset_sizes)\n", " wandb.log({\"epoch\": epoch, 'train/epoch_loss': epoch_loss, 'train/epoch_acc': epoch_acc})\n", " \n", " (test_loss, test_acc, test_precision, test_recall, test_f) = test(network, dataloaders['test'],\n", " optimizer, criterion,\n", " dataset_sizes)\n", " wandb.log({'test/epoch_loss': test_loss, 'test/epoch_acc': test_acc,\n", " 'test/precision': test_precision, 'test/recall': test_recall,\n", " 'test/f1-score': test_f})" ], "id": "5eff68bf" }, { "cell_type": "code", "execution_count": 7, "metadata": { "id": "732a83df" }, "outputs": [], "source": [ "sweep_config = {\n", " 'method': 'random'\n", "}\n", "\n", "metric = {\n", " 'name': 'test/epoch_acc',\n", " 'goal': 'maximize' \n", "}\n", "\n", "sweep_config['metric'] = metric\n", "\n", "parameters_dict = {\n", " 'optimizer': {\n", " 'values': ['adam', 'sgd']\n", " },\n", "}\n", "\n", "sweep_config['parameters'] = parameters_dict\n", "\n", "parameters_dict.update({\n", " 'epochs': {\n", " 'value': 10},\n", " 'batch_size': {\n", " 'values': [4, 8, 16, 32, 64]},\n", " 'learning_rate': {\n", " 'values': [0.1, 0.01, 0.003, 0.001, 0.0003, 0.0001]},\n", " 'step_size': {\n", " 'values': [2, 3, 5, 7]},\n", " 'gamma': {\n", " 'values': [0.1, 0.5]},\n", " 'beta_one': {\n", " 'values': [0.9, 0.99]},\n", " 'beta_two': {\n", " 'values': [0.5, 0.9, 0.99, 0.999]},\n", " 'eps': {\n", " 'values': [1e-08, 0.1, 1]}\n", "})" ], "id": "732a83df" }, { "cell_type": "code", "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "9a01fef6", "outputId": "dd802f5b-fd67-4e16-c042-f7fdbe65d568" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Create sweep with ID: 9681wnh0\n", "Sweep URL: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0\n" ] } ], "source": [ "sweep_id = wandb.sweep(sweep_config, project=\"pytorch-sweeps-demo\")" ], "id": "9a01fef6" }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": [ "e840ed026b3342718c0aa068f81d93f3", "d510d413136c4231bc720200145a5d77", "a6a0d4738d434aa1b734c8407dde4e74", "9e4d93cf62094092809fee70ba7885f5", "32a491d3031c476da2d8687861ccbf7d", "ff00a24840224f8d9cce9ade4e77ac0c", "d8ec9c75b1f14686a6734b86eea24bb7", "220d541b7b4347b08a7fc9b8feb09f98" ] }, "id": "e80d1730", "outputId": "fb105ba8-6c50-4e19-9d02-88a9e8357bc0" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: pw52k3j3 with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33me1527193\u001b[0m (\u001b[33mflower-classification\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_135456-pw52k3j3" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run snowy-sweep-1 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/pw52k3j3" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.9/dist-packages/torch/utils/data/dataloader.py:561: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n", " warnings.warn(_create_warning_msg(\n", "Downloading: \"https://download.pytorch.org/models/resnet50-11ad3fa6.pth\" to /root/.cache/torch/hub/checkpoints/resnet50-11ad3fa6.pth\n", "100%|██████████| 97.8M/97.8M [00:00<00:00, 236MB/s]\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "VBox(children=(Label(value='0.001 MB of 0.001 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "e840ed026b3342718c0aa068f81d93f3" } }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▅▆▆██▇▇▇█
test/epoch_loss█▅▂▂▂▂▁▁▂▂
test/f1-score▁▆▇▇███▇▇█
test/precision▁▄▅▅█▇▆▇▅▇
test/recall▁▇▇▇▆▇█▆█▇
train/batch_loss▇▆█▅▅▆▆▆▅▃▄▅▄▄▄▅▃█▄█▃▄▂▁▄▃▁▆▅▁▄▆▂▄▂▂▃▄▆▄
train/epoch_acc▁▆▆▇█▇██▇█
train/epoch_loss█▅▃▂▁▂▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.81111
test/epoch_loss0.60187
test/f1-score0.8172
test/precision0.77551
test/recall0.86364
train/batch_loss0.5635
train/epoch_acc0.77273
train/epoch_loss0.59496

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run snowy-sweep-1 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/pw52k3j3
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_135456-pw52k3j3/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: ea718wsd with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 32\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_140117-ea718wsd" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run splendid-sweep-2 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ea718wsd" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▃▆█
test/epoch_acc▁▁▁
test/epoch_loss█▁▁
test/f1-score▁▁▁
test/precision▁▁▁
test/recall▁▁▁
train/batch_loss▁▁█▁▁▂▁▁▁▁▂▂▁▁▃▁▁▂▁▁▁▁▂▂▁▁▁▂▁▁▂▁▁▁▁▁▁▁▁▁
train/epoch_acc▃▁▇█
train/epoch_loss█▆▃▁

Run summary:


epoch3
test/epoch_acc0.42222
test/epoch_loss109.2288
test/f1-score0.59375
test/precision0.42222
test/recall1.0
train/batch_loss1.26954
train/epoch_acc0.51474
train/epoch_loss3.22592

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run splendid-sweep-2 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ea718wsd
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_140117-ea718wsd/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "Run ea718wsd errored: ZeroDivisionError('float division by zero')\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run ea718wsd errored: ZeroDivisionError('float division by zero')\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 2igypsdg with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 64\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_140346-2igypsdg" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run visionary-sweep-3 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/2igypsdg" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc█▅▁▁▁▁▃▁▃▃
test/epoch_loss▁▆█▄▄▄▆▅▃▅
test/f1-score▁█▅▅▅▅▆▅▆▆
test/precision▂█▁▁▁▁▃▁▃▃
test/recall▁█▇▇▇▇▇▇▇▇
train/batch_loss█▇██▄█▆▆▁▂▂▁▃▅▆▄▅▂▇▃▄▁▆▆▁▆▆▄▄▃▃▆▇▂▇█▅▅▁▄
train/epoch_acc▁▄▇█▇█▇▇▆▇
train/epoch_loss█▅▂▂▂▁▁▂▂▁

Run summary:


epoch9
test/epoch_acc0.34444
test/epoch_loss0.72334
test/f1-score0.47788
test/precision0.35065
test/recall0.75
train/batch_loss0.67509
train/epoch_acc0.56265
train/epoch_loss0.67967

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run visionary-sweep-3 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/2igypsdg
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_140346-2igypsdg/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 37tqne1y with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 64\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_140933-37tqne1y" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run proud-sweep-4 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/37tqne1y" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▆▅▁▅▃▇▇███
test/epoch_loss▂▁█▁▅▁▁▁▁▁
test/f1-score▅▆▄▄▁▇▇███
test/precision█▅▁█▆▅▅▆▇▇
test/recall▃▆▇▃▁▇█▇▇▇
train/batch_loss█▆▅▄▃▂▁▃▇▃▄▄▂▂▅▃▂▄▂▄▂▄▃▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
train/epoch_acc▁▆▅▆▆▆████
train/epoch_loss█▄▆▄▄▃▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.84444
test/epoch_loss0.62389
test/f1-score0.84091
test/precision0.84091
test/recall0.84091
train/batch_loss0.00493
train/epoch_acc1.0
train/epoch_loss0.00446

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run proud-sweep-4 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/37tqne1y
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_140933-37tqne1y/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 3co2jpxp with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_141514-3co2jpxp" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run restful-sweep-5 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/3co2jpxp" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▄█▁▃▃▅▆▇▇█
test/epoch_loss▃▁▆▄█▃▄▂▃▃
test/f1-score▂▇▁▂▃▆▅▆▆█
test/precision▅█▁▃▂▄▆▆▆▇
test/recall▁▂█▄██▂▃▄▄
train/batch_loss▄▂▃▄▂▂▃█▂▁▃▁▂▂▁▁▃▁▁▂▁▁▁▂▁▁▁▃▁▁▁▁▁▁▅▁▁▁▁▁
train/epoch_acc▁▄▅▆▆▆▇███
train/epoch_loss█▆▄▄▃▃▂▂▁▁

Run summary:


epoch9
test/epoch_acc0.87778
test/epoch_loss0.53854
test/f1-score0.86747
test/precision0.85714
test/recall0.87805
train/batch_loss0.00185
train/epoch_acc0.99631
train/epoch_loss0.01069

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run restful-sweep-5 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/3co2jpxp
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_141514-3co2jpxp/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: ppthue5q with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_142101-ppthue5q" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run charmed-sweep-6 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ppthue5q" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▄▃▇▆▇███▇
test/epoch_loss█▄▃▅▃▁▁▁▂▂
test/f1-score▁▄▃▆▆▇███▆
test/precision▁▂▂▆▅▅▆▇█▆
test/recall▁█▃▅▃▆▆▅▃▅
train/batch_loss█▃▆▅▇▆▄▃▄▁▄▃▁▁▂▂▁▁▁▁▃▁▁▃▁▁▁▁▁▁▂▁▁▁▂▂▁▃▁▁
train/epoch_acc▁▃▆▆▇▇████
train/epoch_loss█▆▄▄▂▂▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.87778
test/epoch_loss0.24035
test/f1-score0.86076
test/precision0.85
test/recall0.87179
train/batch_loss0.03008
train/epoch_acc0.99386
train/epoch_loss0.02099

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run charmed-sweep-6 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ppthue5q
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_142101-ppthue5q/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: eakg0nsy with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.003\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_142642-eakg0nsy" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run still-sweep-7 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/eakg0nsy" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▆▅▃█▆▅▆▆▆
test/epoch_loss▆▄▃▆▁▇█▇██
test/f1-score▁▆▅▃█▅▄▆▆▅
test/precision▁▆▁▆▇█▅▇▇▆
test/recall▃▅█▁▆▃▃▄▄▄
train/batch_loss▄▅▃▄▅▆▆█▄▂▂▃▂▁▁▂▁▁▂▁▃▁▁▁▁▁▂▃▁▁▁▁▁▁▁▁▁▁▁▁
train/epoch_acc▁▄▅▆▇▇▇███
train/epoch_loss█▆▅▃▂▂▂▁▁▁

Run summary:


epoch9
test/epoch_acc0.85556
test/epoch_loss0.44089
test/f1-score0.87379
test/precision0.9375
test/recall0.81818
train/batch_loss0.01161
train/epoch_acc0.99386
train/epoch_loss0.02177

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run still-sweep-7 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/eakg0nsy
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_142642-eakg0nsy/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: jucrzfat with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 16\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_143240-jucrzfat" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run crimson-sweep-8 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/jucrzfat" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▆▅▆▇█████
test/epoch_loss█▂▁▁▁▁▁▁▁▁
test/f1-score▁▂▄▅▆▇▇███
test/precision▁▇▅▆▆█████
test/recall█▁▄▅▅▅▅▅▅▅
train/batch_loss▆▅▄▇▄▄▅█▆▄▃▅▄▃▅▂▄▄▄▃▃▃▄▂▄▃▄▅▁▃▃▂▂▂▂▃▃▂▁▃
train/epoch_acc▁▄▅▅▇▇▇▇██
train/epoch_loss█▅▄▃▂▂▂▂▁▁

Run summary:


epoch9
test/epoch_acc0.78889
test/epoch_loss0.46169
test/f1-score0.7957
test/precision0.84091
test/recall0.7551
train/batch_loss0.63008
train/epoch_acc0.77396
train/epoch_loss0.4697

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run crimson-sweep-8 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/jucrzfat
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_143240-jucrzfat/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: bhks7msu with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 32\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.003\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_143816-bhks7msu" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run lilac-sweep-9 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/bhks7msu" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▃▃█▁▆▆▃▃█
test/epoch_loss█▅▂▂▁▁▁▁▁▁
test/f1-score▁▄▄█▂▆▆▄▄█
test/precision▂▃▃█▁▆▆▃▃█
test/recall▁█████████
train/batch_loss██▇▅▅▄▄▅▅▂▅▂▃▄▃▅▂▃▃▆▂▃▂▃▃▂▂▅▃▂▁▂▂▅▄▄▃▃▁▅
train/epoch_acc▁▅▇███▇███
train/epoch_loss█▅▃▂▁▁▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.85556
test/epoch_loss0.54705
test/f1-score0.86022
test/precision0.78431
test/recall0.95238
train/batch_loss0.61833
train/epoch_acc0.8059
train/epoch_loss0.558

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run lilac-sweep-9 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/bhks7msu
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_143816-bhks7msu/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: ezctslju with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 32\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.003\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_144350-ezctslju" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run different-sweep-10 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ezctslju" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▂▇███▇▇▄▇
test/epoch_loss█▆▅▃▂▂▁▁▁▁
test/f1-score▁▅▆███▆▆▁▆
test/precision▂▁██████▇█
test/recall▅█▃▅▅▅▃▃▁▃
train/batch_loss███▇▇▆▆▇▆▆▆▆▅▅▅▅▃▅▃▅▃▃▂▄▃▂▄▂▂▁▂▃▃▃▄▄▃▅▃▄
train/epoch_acc▁▅▇▇▇█▇███
train/epoch_loss█▆▅▄▂▂▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.75556
test/epoch_loss0.53376
test/f1-score0.76596
test/precision0.76596
test/recall0.76596
train/batch_loss0.52814
train/epoch_acc0.86118
train/epoch_loss0.46213

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run different-sweep-10 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ezctslju
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_144350-ezctslju/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: gxvcwlwu with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 64\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_144922-gxvcwlwu" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run glowing-sweep-11 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/gxvcwlwu" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▂▅▆▆▆▇███
test/epoch_loss█▇▅▄▄▃▂▂▂▁
test/f1-score▁▂▅▆▇▇▇███
test/precision▁▂▄▅▆▆▇███
test/recall▁▄▇███████
train/batch_loss▇█▆▇▇▆▆▆▆▅▅▆▅▅▃▅▅▄▅▅▃▄▄▂▃▃▃▃▃▃▃▂▁▃▃▃▂▃▁▁
train/epoch_acc▁▄▅▆▇▇▇███
train/epoch_loss█▇▆▄▄▃▂▂▁▁

Run summary:


epoch9
test/epoch_acc0.86667
test/epoch_loss0.53756
test/f1-score0.86364
test/precision0.80851
test/recall0.92683
train/batch_loss0.52993
train/epoch_acc0.80098
train/epoch_loss0.55775

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run glowing-sweep-11 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/gxvcwlwu
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_144922-gxvcwlwu/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: o4ceynjw with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 64\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_145500-o4ceynjw" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run chocolate-sweep-12 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/o4ceynjw" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▆▄▇▇█████
test/epoch_loss█▄▄▁▁▁▁▁▁▁
test/f1-score▁▆▄▇▇█████
test/precision▂▁█▆██████
test/recall▁█▃█▇█████
train/batch_loss██▇▆▅▅▄▄▃▃▂▂▂▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
train/epoch_acc▁▅▇███████
train/epoch_loss█▅▃▁▁▁▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.97778
test/epoch_loss0.13521
test/f1-score0.97826
test/precision1.0
test/recall0.95745
train/batch_loss0.00408
train/epoch_acc1.0
train/epoch_loss0.00712

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run chocolate-sweep-12 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/o4ceynjw
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_145500-o4ceynjw/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: w0els6yx with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_150038-w0els6yx" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run glorious-sweep-13 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/w0els6yx" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▂▁▄▄▇▆▆▇█▇
test/epoch_loss█▆▅▆▃▂▂▂▂▁
test/f1-score▁▁▅▄▇▇▇███
test/precision▂▁▄▃▆▆▆▆█▇
test/recall▁▂▅▄▆▆▇█▆█
train/batch_loss█▆▆▆▆▆▆▃▇█▄▄▇█▆▅▄▇▇▃▄▄▅▂▃▄▆▆▁▆▂▄▄▅▅▅▄▆▄▄
train/epoch_acc▁▃▄▄▆▆▇▇▇█
train/epoch_loss█▇▆▅▃▃▂▂▂▁

Run summary:


epoch9
test/epoch_acc0.68889
test/epoch_loss0.66123
test/f1-score0.65854
test/precision0.64286
test/recall0.675
train/batch_loss0.60239
train/epoch_acc0.65233
train/epoch_loss0.66732

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run glorious-sweep-13 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/w0els6yx
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_150038-w0els6yx/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: sn7rpzsv with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_150639-sn7rpzsv" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run stoic-sweep-14 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/sn7rpzsv" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▃▁▃▆▆▃▆▆▄█
test/epoch_loss█▆▄▁▃▆▃▃▄▁
test/f1-score▄▁▃▇▆▃▆▆▅█
test/precision▃▆▁▂██▂▁▃▇
test/recall▄▁▄█▄▂▇█▅▇
train/batch_loss█▆▇▆▇█▂▆▂▄▃▁▁▁▄▃▄▂▁▁▁▁▁▃▁▁▁▁▁▁▁▁▁▁▁▂▁▁▁▁
train/epoch_acc▁▄▆▇▇▇▇███
train/epoch_loss█▆▄▃▂▂▂▁▁▁

Run summary:


epoch9
test/epoch_acc0.92222
test/epoch_loss0.22225
test/f1-score0.91358
test/precision0.94872
test/recall0.88095
train/batch_loss0.01037
train/epoch_acc0.98649
train/epoch_loss0.04606

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run stoic-sweep-14 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/sn7rpzsv
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_150639-sn7rpzsv/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: m64aehal with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_151243-m64aehal" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run rare-sweep-15 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/m64aehal" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁█▃▅█▇▄▇██
test/epoch_loss█▁▆▃▂▄█▄▃▃
test/f1-score▃█▁▃██▃▇██
test/precision▁▅██▆▄▅▆▆▆
test/recall▅█▁▂▇█▃▆▆▇
train/batch_loss▄▅▃▅▂▇▂▄▂▃█▄▂▃▁▃▄▁▂▁▁▁▃▁▂▁▁▂▁▂▁▁▁▁▁▁▁▅▁▂
train/epoch_acc▁▃▄▆▆▇▇▇██
train/epoch_loss█▆▅▄▃▂▂▂▁▁

Run summary:


epoch9
test/epoch_acc0.86667
test/epoch_loss0.52816
test/f1-score0.85
test/precision0.85
test/recall0.85
train/batch_loss0.0016
train/epoch_acc0.99509
train/epoch_loss0.02902

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run rare-sweep-15 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/m64aehal
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_151243-m64aehal/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 71er7icc with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 32\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_151846-71er7icc" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run winter-sweep-16 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/71er7icc" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▃▅▆▇▇▇███
test/epoch_loss█▇▅▄▄▃▃▂▂▁
test/f1-score▁▃▆▆▇▇▇███
test/precision▁▃▆▅▆▇▇███
test/recall▁▃▆▇▇▇▇███
train/batch_loss▇█▆▆▆▆▅▇▅▇▅▅▅▅▅▄▄▅▅▄▃▃▄▃▂▃▄▃▂▄▂▃▁▁▃▄▃▂▂▃
train/epoch_acc▁▃▄▆▇▇▇█▇█
train/epoch_loss█▇▆▅▄▃▃▂▂▁

Run summary:


epoch9
test/epoch_acc0.83333
test/epoch_loss0.5844
test/f1-score0.85437
test/precision0.88
test/recall0.83019
train/batch_loss0.60478
train/epoch_acc0.82801
train/epoch_loss0.58084

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run winter-sweep-16 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/71er7icc
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_151846-71er7icc/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: k0hwgfjk with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.003\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_152419-k0hwgfjk" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run sleek-sweep-17 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/k0hwgfjk" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▃▁█▆▆▆█▅▅█
test/epoch_loss▅█▂▂▁▁▁▁▁▁
test/f1-score▂▁█▇▇▇█▅▅█
test/precision▇▃█▄▁▁█▁▁█
test/recall▁▁▆▆██▆▆▆▆
train/batch_loss▆█▄▅▃▃▇▁▁▁▁▁▁▁▂▁▂▂▁▁▁▁▁▂▁▄▃▁▁▂▂▁▁▃▁▁▁▁▁▁
train/epoch_acc▁▅██████▇█
train/epoch_loss█▄▁▁▁▁▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.91111
test/epoch_loss0.2015
test/f1-score0.89744
test/precision0.94595
test/recall0.85366
train/batch_loss0.00723
train/epoch_acc0.98157
train/epoch_loss0.07856

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run sleek-sweep-17 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/k0hwgfjk
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_152419-k0hwgfjk/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: hb00vz7w with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Tracking run with wandb version 0.14.0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Run data is saved locally in /content/wandb/run-20230404_152956-hb00vz7w" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Syncing run smart-sweep-18 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/9681wnh0" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/hb00vz7w" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Waiting for W&B process to finish... (success)." ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "\n", "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▅▆▇▆▇▆▆▆█
test/epoch_loss█▅▅▃▃▁▃▄▂▁
test/f1-score▁▄▆▇▆▇▆▆▆█
test/precision▁█▆▆▂▄▆█▆▄
test/recall▁▂▅▆▇▇▅▄▅█
train/batch_loss▅▅▄▆▅▅▂▂▂▃▂▅▃▂▂▁▂▃▂▁█▁▁▂▁▁▁▁▁▁▁▁▁▂▂▁▁▁▁▁
train/epoch_acc▁▄▅▇▇▇▇▇██
train/epoch_loss█▆▄▃▂▂▂▂▁▁

Run summary:


epoch9
test/epoch_acc0.9
test/epoch_loss0.24883
test/f1-score0.89888
test/precision0.93023
test/recall0.86957
train/batch_loss0.01547
train/epoch_acc0.98771
train/epoch_loss0.04667

" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ " View run smart-sweep-18 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/hb00vz7w
Synced 4 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "text/html": [ "Find logs at: ./wandb/run-20230404_152956-hb00vz7w/logs" ] }, "metadata": {} }, { "output_type": "stream", "name": "stderr", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 0bg49if5 with config:\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" ] } ], "source": [ "wandb.agent(sweep_id, train, count=60)" ], "id": "e80d1730" }, { "cell_type": "code", "source": [], "metadata": { "id": "0p3H2-jRjJIG" }, "id": "0p3H2-jRjJIG", "execution_count": null, "outputs": [] } ], "metadata": { "accelerator": "GPU", "colab": { "provenance": [] }, "gpuClass": "standard", "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.15" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "e840ed026b3342718c0aa068f81d93f3": { "model_module": "@jupyter-widgets/controls", "model_name": "VBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "VBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "VBoxView", "box_style": "", "children": [ "IPY_MODEL_d510d413136c4231bc720200145a5d77", "IPY_MODEL_a6a0d4738d434aa1b734c8407dde4e74" ], "layout": "IPY_MODEL_9e4d93cf62094092809fee70ba7885f5" } }, "d510d413136c4231bc720200145a5d77": { "model_module": "@jupyter-widgets/controls", "model_name": "LabelModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "LabelModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "LabelView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_32a491d3031c476da2d8687861ccbf7d", "placeholder": "​", "style": "IPY_MODEL_ff00a24840224f8d9cce9ade4e77ac0c", "value": "0.001 MB of 0.001 MB uploaded (0.000 MB deduped)\r" } }, "a6a0d4738d434aa1b734c8407dde4e74": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d8ec9c75b1f14686a6734b86eea24bb7", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_220d541b7b4347b08a7fc9b8feb09f98", "value": 1 } }, "9e4d93cf62094092809fee70ba7885f5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "32a491d3031c476da2d8687861ccbf7d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ff00a24840224f8d9cce9ade4e77ac0c": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "d8ec9c75b1f14686a6734b86eea24bb7": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "220d541b7b4347b08a7fc9b8feb09f98": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } } } } }, "nbformat": 4, "nbformat_minor": 5 }