diff --git a/classification/classifier/hyp-metrics.csv b/classification/classifier/hyp-metrics.csv new file mode 100644 index 0000000..60c41c8 --- /dev/null +++ b/classification/classifier/hyp-metrics.csv @@ -0,0 +1,139 @@ +,summary,config,name +0,"{'test/epoch_loss': 0.5664619127909343, 'train/epoch_acc': 0.8230958230958231, 'train/batch_loss': 0.33577921986579895, 'epoch': 9, '_wandb': {'runtime': 363}, '_timestamp': 1680692970.2016854, 'test/recall': 0.6170212765957447, 'test/precision': 0.8285714285714286, '_step': 2059, '_runtime': 367.13677954673767, 'test/f1-score': 0.7073170731707318, 'test/epoch_acc': 0.7333333333333334, 'train/epoch_loss': 0.4241055610431793}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.0003}",fiery-sweep-26 +1,"{'test/recall': 0.8222222222222222, 'test/precision': 0.6851851851851852, '_runtime': 341.8420207500458, '_timestamp': 1680692589.503975, '_wandb': {'runtime': 338}, 'test/f1-score': 0.7474747474747475, 'test/epoch_acc': 0.7222222222222222, 'test/epoch_loss': 0.6454579922888014, 'train/epoch_acc': 0.7125307125307125, 'train/batch_loss': 0.7014500498771667, '_step': 1039, 'epoch': 9, 'train/epoch_loss': 0.649790015355375}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.0003}",radiant-sweep-25 +2,"{'test/recall': 0.7837837837837838, 'test/epoch_acc': 0.888888888888889, 'test/precision': 0.935483870967742, 'train/batch_loss': 0.01956617273390293, '_step': 1039, 'epoch': 9, '_wandb': {'runtime': 333}, '_runtime': 336.8275649547577, 'train/epoch_loss': 0.01614290558709019, '_timestamp': 1680692234.39516, 'test/f1-score': 0.8529411764705881, 'test/epoch_loss': 0.34812947780333664, 'train/epoch_acc': 0.9987714987714988}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.003}",blooming-sweep-24 +3,"{'test/epoch_acc': 0.8, 'train/batch_loss': 0.5222326517105103, 'train/epoch_loss': 0.5324229019572753, 'epoch': 9, '_wandb': {'runtime': 327}, '_runtime': 331.57809829711914, 'test/f1-score': 0.7954545454545455, 'test/epoch_loss': 0.5553177932898203, 'train/epoch_acc': 0.8353808353808354, '_step': 529, '_timestamp': 1680691883.3877182, 'test/recall': 0.8333333333333334, 'test/precision': 0.7608695652173914}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.0003}",visionary-sweep-23 +4,"{'test/f1-score': 0.7076923076923076, 'train/epoch_acc': 0.5577395577395577, '_step': 410, 'epoch': 1, 'test/recall': 0.8846153846153846, 'test/epoch_acc': 0.5777777777777778, 'test/precision': 0.5897435897435898, 'test/epoch_loss': 1.5602711306677923, 'train/batch_loss': 0.5083656311035156, 'train/epoch_loss': 0.7508098256090057, '_wandb': {'runtime': 70}, '_runtime': 71.64615154266357, '_timestamp': 1680691538.7247725}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.01}",ancient-sweep-22 +5,"{'test/precision': 0.6885245901639344, 'test/epoch_loss': 0.4844042791260613, 'train/epoch_loss': 0.49390909720111537, '_step': 529, 'epoch': 9, '_timestamp': 1680691453.5148375, 'test/f1-score': 0.8, 'test/epoch_acc': 0.7666666666666667, 'train/epoch_acc': 0.769041769041769, 'train/batch_loss': 0.4559023082256317, '_wandb': {'runtime': 328}, '_runtime': 331.44886469841003, 'test/recall': 0.9545454545454546}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 16, 'learning_rate': 0.003}",fresh-sweep-22 +6,"{'test/epoch_loss': 0.26263883135527266, 'train/epoch_acc': 0.9975429975429976, 'train/batch_loss': 0.0031523401848971844, 'train/epoch_loss': 0.018423480946079804, '_wandb': {'runtime': 355}, '_runtime': 358.66950702667236, '_timestamp': 1680691110.042932, 'test/recall': 0.8867924528301887, 'test/f1-score': 0.9306930693069309, 'test/epoch_acc': 0.9222222222222224, 'test/precision': 0.9791666666666666, '_step': 2059, 'epoch': 9}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.01}",pleasant-sweep-21 +7,"{'train/batch_loss': 0.003317732596769929, 'epoch': 9, '_wandb': {'runtime': 329}, '_runtime': 332.6156196594238, 'test/f1-score': 0.8865979381443299, 'test/epoch_loss': 0.3669874522421095, 'train/epoch_acc': 1, 'train/epoch_loss': 0.0014873178028192654, '_step': 279, '_timestamp': 1680690741.3215847, 'test/recall': 0.9148936170212766, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.86}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 32, 'learning_rate': 0.01}",fragrant-sweep-20 +8,"{'test/recall': 0.82, 'test/precision': 0.7592592592592593, 'test/epoch_loss': 0.5786970999505785, 'train/epoch_acc': 0.8206388206388207, '_step': 149, 'epoch': 9, '_runtime': 342.05230498313904, 'test/epoch_acc': 0.7555555555555555, 'train/batch_loss': 0.58731609582901, 'train/epoch_loss': 0.5623220165765842, '_wandb': {'runtime': 338}, '_timestamp': 1680690397.165603, 'test/f1-score': 0.7884615384615384}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 64, 'learning_rate': 0.001}",treasured-sweep-19 +9,"{'test/precision': 0.8536585365853658, 'test/epoch_loss': 0.6037532766660054, 'train/epoch_acc': 0.7788697788697788, 'epoch': 9, '_wandb': {'runtime': 357}, '_runtime': 360.5366156101227, 'test/f1-score': 0.7865168539325843, 'test/epoch_acc': 0.788888888888889, 'train/batch_loss': 0.5736206769943237, '_step': 2059, '_timestamp': 1680690042.488695, 'test/recall': 0.7291666666666666, 'train/epoch_loss': 0.5984062318134074}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 4, 'learning_rate': 0.0001}",desert-sweep-18 +10,"{'_wandb': {'runtime': 362}, '_runtime': 365.3367943763733, '_timestamp': 1680689670.8310964, 'test/f1-score': 0.8333333333333334, 'test/precision': 0.945945945945946, 'train/epoch_loss': 0.3086323318522451, '_step': 2059, 'epoch': 9, 'test/recall': 0.7446808510638298, 'test/epoch_acc': 0.8444444444444444, 'test/epoch_loss': 0.3740654948684904, 'train/epoch_acc': 0.8697788697788698, 'train/batch_loss': 0.5778521299362183}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.003}",celestial-sweep-17 +11,"{'train/epoch_acc': 1, 'train/batch_loss': 0.004256190732121468, '_step': 149, '_runtime': 340.39124369621277, '_timestamp': 1680689237.7951498, 'test/precision': 0.9069767441860463, 'test/epoch_loss': 0.18080708616309696, 'train/epoch_loss': 0.0053219743558098115, 'epoch': 9, '_wandb': {'runtime': 337}, 'test/recall': 0.9285714285714286, 'test/f1-score': 0.9176470588235294, 'test/epoch_acc': 0.9222222222222224}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 64, 'learning_rate': 0.01}",cosmic-sweep-15 +12,"{'_step': 2059, '_runtime': 359.0396990776062, '_timestamp': 1680688886.363035, 'test/recall': 0.8222222222222222, 'test/f1-score': 0.8705882352941177, 'test/precision': 0.925, 'train/batch_loss': 0.21692615747451785, 'epoch': 9, '_wandb': {'runtime': 356}, 'test/epoch_acc': 0.8777777777777778, 'test/epoch_loss': 0.23811448697621623, 'train/epoch_acc': 0.968058968058968, 'train/epoch_loss': 0.09628425111664636}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.001}",stilted-sweep-14 +13,"{'epoch': 9, '_runtime': 336.5640392303467, '_timestamp': 1680688517.0028613, 'test/recall': 0.9, 'test/precision': 0.9574468085106383, 'train/epoch_acc': 1, 'train/batch_loss': 0.007201554253697395, 'train/epoch_loss': 0.007631345846546077, '_step': 149, '_wandb': {'runtime': 333}, 'test/f1-score': 0.9278350515463918, 'test/epoch_acc': 0.9222222222222224, 'test/epoch_loss': 0.16714997291564945}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.01}",frosty-sweep-13 +14,"{'test/f1-score': 0.8674698795180724, 'test/precision': 0.9230769230769232, 'train/batch_loss': 0.27152174711227417, '_step': 529, 'epoch': 9, '_wandb': {'runtime': 328}, 'test/epoch_acc': 0.8777777777777778, 'test/epoch_loss': 0.32556109494633145, 'train/epoch_acc': 0.9496314496314496, 'train/epoch_loss': 0.17368088453934877, '_runtime': 331.98337984085083, '_timestamp': 1680688162.2054858, 'test/recall': 0.8181818181818182}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.001}",young-sweep-12 +15,"{'test/recall': 0.8292682926829268, 'test/epoch_acc': 0.7222222222222222, 'test/epoch_loss': 0.5193446947468652, 'train/batch_loss': 0.3307788372039795, '_wandb': {'runtime': 332}, '_timestamp': 1680687816.5057352, '_runtime': 335.6552822589874, 'test/f1-score': 0.7311827956989247, 'test/precision': 0.6538461538461539, 'train/epoch_acc': 0.7469287469287469, 'train/epoch_loss': 0.5277571982775039, '_step': 1039, 'epoch': 9}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.1}",sandy-sweep-11 +16,"{'test/precision': 0.8085106382978723, 'epoch': 9, '_wandb': {'runtime': 334}, '_runtime': 336.80703043937683, 'test/recall': 0.9047619047619048, 'test/f1-score': 0.853932584269663, 'test/epoch_acc': 0.8555555555555556, '_step': 149, '_timestamp': 1680687470.9289024, 'test/epoch_loss': 0.4616309046745301, 'train/epoch_acc': 1, 'train/batch_loss': 0.0030224076472222805, 'train/epoch_loss': 0.003708146820279612}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.1}",laced-sweep-10 +17,"{'_step': 422, 'epoch': 7, '_runtime': 265.48077392578125, '_timestamp': 1680687113.1220188, 'test/recall': 0.08888888888888889, 'test/f1-score': 0.14035087719298245, 'test/precision': 0.3333333333333333, 'test/epoch_loss': 11610.708938450283, 'train/batch_loss': 9.74098777770996, '_wandb': {'runtime': 265}, 'test/epoch_acc': 0.45555555555555555, 'train/epoch_acc': 0.5331695331695332, 'train/epoch_loss': 9.16968992828444}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.1}",jumping-sweep-9 +18,"{'test/recall': 0.803921568627451, 'test/f1-score': 0.845360824742268, 'test/epoch_acc': 0.8333333333333334, 'test/precision': 0.8913043478260869, 'test/epoch_loss': 0.3831123087141249, '_step': 529, '_runtime': 330.36346793174744, '_timestamp': 1680686834.80723, 'train/batch_loss': 0.34334877133369446, 'train/epoch_loss': 0.3055295220024756, 'epoch': 9, '_wandb': {'runtime': 327}, 'train/epoch_acc': 0.8955773955773956}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 16, 'learning_rate': 0.0003}",dutiful-sweep-8 +19,"{'train/epoch_acc': 0.484029484029484, 'train/epoch_loss': 'NaN', 'epoch': 2, '_wandb': {'runtime': 99}, '_runtime': 99.40804982185364, '_timestamp': 1680686491.634724, 'test/recall': 1, 'test/f1-score': 0.6259541984732825, '_step': 157, 'test/epoch_acc': 0.45555555555555555, 'test/precision': 0.45555555555555555, 'test/epoch_loss': 6.554853016439314e+29, 'train/batch_loss': 'NaN'}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.1}",olive-sweep-7 +20,"{'_step': 279, '_timestamp': 1680686383.3591404, 'test/f1-score': 0.8695652173913044, 'test/epoch_acc': 0.8666666666666667, 'test/precision': 0.851063829787234, 'train/batch_loss': 0.3707323968410492, 'epoch': 9, '_wandb': {'runtime': 334}, '_runtime': 337.17863941192627, 'test/recall': 0.8888888888888888, 'test/epoch_loss': 0.35141510632303025, 'train/epoch_acc': 0.9103194103194104, 'train/epoch_loss': 0.3219767680771521}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 32, 'learning_rate': 0.001}",good-sweep-6 +21,"{'test/f1-score': 0.6601941747572815, 'test/epoch_acc': 0.6111111111111112, 'test/precision': 0.6296296296296297, 'train/batch_loss': 0.7027227878570557, 'train/epoch_acc': 0.5196560196560196, '_step': 149, 'epoch': 9, '_wandb': {'runtime': 342}, '_runtime': 344.80718994140625, '_timestamp': 1680686028.304971, 'test/recall': 0.6938775510204082, 'test/epoch_loss': 0.6818753732575311, 'train/epoch_loss': 0.6907664721955246}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 64, 'learning_rate': 0.0003}",summer-sweep-5 +22,"{'_step': 529, '_wandb': {'runtime': 331}, '_runtime': 333.9663326740265, '_timestamp': 1680685671.7387648, 'test/f1-score': 0.9066666666666668, 'test/epoch_acc': 0.9222222222222224, 'test/precision': 0.9444444444444444, 'train/epoch_acc': 0.9864864864864864, 'train/batch_loss': 0.15035715699195862, 'train/epoch_loss': 0.10497688309859292, 'epoch': 9, 'test/recall': 0.8717948717948718, 'test/epoch_loss': 0.22382020586066775}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 16, 'learning_rate': 0.001}",firm-sweep-4 +23,"{'_step': 149, 'test/recall': 0.925, 'test/f1-score': 0.6379310344827587, 'train/epoch_loss': 0.6564877619028677, 'test/epoch_loss': 0.6597137530644734, 'train/epoch_acc': 0.5909090909090909, 'epoch': 9, '_wandb': {'runtime': 333}, '_runtime': 335.79468297958374, '_timestamp': 1680685319.453976, 'test/epoch_acc': 0.5333333333333333, 'test/precision': 0.4868421052631579, 'train/batch_loss': 0.652446985244751}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 64, 'learning_rate': 0.0001}",genial-sweep-3 +24,"{'test/epoch_acc': 0.7444444444444445, 'test/precision': 0.6271186440677966, 'test/epoch_loss': 0.5467572536733415, '_step': 529, 'epoch': 9, '_wandb': {'runtime': 329}, '_runtime': 331.50625491142273, 'test/f1-score': 0.7628865979381443, '_timestamp': 1680684975.004809, 'test/recall': 0.9736842105263158, 'train/epoch_acc': 0.7899262899262899, 'train/batch_loss': 0.5583129525184631, 'train/epoch_loss': 0.4703364581675143}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 16, 'learning_rate': 0.1}",fine-sweep-2 +25,"{'test/epoch_acc': 0.9, 'train/epoch_acc': 0.9987714987714988, '_step': 529, 'epoch': 9, '_wandb': {'runtime': 447}, '_runtime': 450.5545320510864, 'test/recall': 0.8863636363636364, 'train/epoch_loss': 0.007131033717467008, '_timestamp': 1680684633.811369, 'test/f1-score': 0.896551724137931, 'test/precision': 0.9069767441860463, 'test/epoch_loss': 0.30911533037821454, 'train/batch_loss': 0.005764181260019541}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 16, 'learning_rate': 0.01}",visionary-sweep-1 +26,"{'_wandb': {'runtime': 83}, '_timestamp': 1680629962.8990817, 'train/epoch_acc': 0.8931203931203932, 'train/epoch_loss': 0.2428556958016658, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.8444444444444444, 'test/epoch_loss': 0.29840316110187104, '_step': 239, 'epoch': 1, '_runtime': 83.58446168899536, 'test/recall': 0.9047619047619048, 'test/f1-score': 0.8735632183908046, 'train/batch_loss': 0.08615076541900635}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.1}",stoic-sweep-14 +27,"{'epoch': 9, '_wandb': {'runtime': 347}, '_runtime': 348.9410927295685, '_timestamp': 1680629872.8401277, 'test/recall': 0.975, 'test/f1-score': 0.951219512195122, 'test/epoch_acc': 0.9555555555555556, '_step': 149, 'train/batch_loss': 0.10338585078716278, 'train/epoch_loss': 0.1163152276517718, 'train/epoch_acc': 0.9803439803439804, 'test/epoch_loss': 0.20102048052681817, 'test/precision': 0.9285714285714286}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 64, 'learning_rate': 0.01}",rich-sweep-13 +28,"{'train/batch_loss': 82027960, '_step': 210, 'epoch': 3, '_wandb': {'runtime': 135}, '_runtime': 132.22715950012207, '_timestamp': 1680629513.1781075, 'test/f1-score': 0.6721311475409836, 'test/epoch_acc': 0.5555555555555556, 'test/recall': 0.9111111111111112, 'test/precision': 0.5324675324675324, 'test/epoch_loss': 3.395405118153546e+20, 'train/epoch_acc': 0.5282555282555282, 'train/epoch_loss': 60563307.6520902}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 16, 'learning_rate': 0.003}",smooth-sweep-12 +29,"{'_wandb': {'runtime': 326}, 'test/recall': 0.8888888888888888, 'test/epoch_acc': 0.6333333333333333, 'test/precision': 0.5245901639344263, 'train/batch_loss': 0.5836847424507141, 'train/epoch_loss': 0.6072891213970044, '_step': 279, 'epoch': 9, 'test/f1-score': 0.6597938144329897, 'test/epoch_loss': 0.6240786300765143, 'train/epoch_acc': 0.7469287469287469, '_runtime': 327.2181556224823, '_timestamp': 1680629374.0562296}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 32, 'learning_rate': 0.0003}",resilient-sweep-11 +30,"{'train/epoch_acc': 0.9717444717444718, 'epoch': 9, 'test/f1-score': 0.8958333333333334, 'test/precision': 0.9772727272727272, 'test/epoch_loss': 0.2657569663392173, 'test/recall': 0.8269230769230769, 'test/epoch_acc': 0.888888888888889, 'train/batch_loss': 0.13025684654712677, 'train/epoch_loss': 0.12745249926751018, '_step': 529, '_wandb': {'runtime': 330}, '_runtime': 332.23273372650146, '_timestamp': 1680629038.456323}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.001}",serene-sweep-10 +31,"{'_step': 1039, '_wandb': {'runtime': 334}, '_timestamp': 1680628699.1189623, 'test/recall': 0.8372093023255814, 'test/epoch_loss': 0.23338710864384968, 'train/batch_loss': 0.11391787976026536, 'train/epoch_loss': 0.2116023584907412, 'epoch': 9, '_runtime': 335.94198656082153, 'test/f1-score': 0.9, 'test/epoch_acc': 0.9111111111111112, 'test/precision': 0.972972972972973, 'train/epoch_acc': 0.9275184275184276}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 8, 'learning_rate': 0.0003}",cool-sweep-9 +32,"{'_runtime': 327.29265093803406, '_timestamp': 1680628351.790065, 'test/f1-score': 0.7959183673469388, 'train/epoch_loss': 0.6034659886828805, '_step': 529, '_wandb': {'runtime': 326}, 'test/recall': 0.8863636363636364, 'test/epoch_acc': 0.7777777777777778, 'test/precision': 0.7222222222222222, 'test/epoch_loss': 0.5824494547314114, 'train/epoch_acc': 0.7702702702702703, 'train/batch_loss': 0.5777762532234192, 'epoch': 9}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.0001}",lilac-sweep-8 +33,"{'_wandb': {'runtime': 335}, '_runtime': 337.11313247680664, 'test/recall': 0.8048780487804879, 'test/f1-score': 0.717391304347826, 'test/epoch_acc': 0.7111111111111111, 'test/epoch_loss': 0.6369305915302701, '_step': 149, 'epoch': 9, 'train/epoch_loss': 0.618001790392311, 'train/epoch_acc': 0.7199017199017199, 'train/batch_loss': 0.5935282111167908, '_timestamp': 1680628016.5942774, 'test/precision': 0.6470588235294118}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 64, 'learning_rate': 0.001}",warm-sweep-7 +34,"{'train/epoch_acc': 0.6498771498771498, 'train/epoch_loss': 0.6663250732773353, '_wandb': {'runtime': 354}, '_runtime': 355.7423675060272, 'test/recall': 0.8, 'test/f1-score': 0.6857142857142857, 'test/precision': 0.6, 'test/epoch_loss': 0.6619265423880683, '_step': 2059, 'epoch': 9, '_timestamp': 1680627667.6215644, 'test/epoch_acc': 0.6333333333333333, 'train/batch_loss': 0.6662057638168335}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.0001}",giddy-sweep-6 +35,"{'test/recall': 0.8163265306122449, 'test/f1-score': 0.7766990291262137, 'test/epoch_acc': 0.7444444444444445, 'test/epoch_loss': 0.6307997491624621, 'train/epoch_acc': 0.7125307125307125, 'epoch': 9, '_runtime': 344.59358406066895, '_timestamp': 1680627305.434523, 'train/batch_loss': 0.6531811356544495, 'train/epoch_loss': 0.6398702088093582, '_step': 149, '_wandb': {'runtime': 343}, 'test/precision': 0.7407407407407407}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 64, 'learning_rate': 0.0001}",stellar-sweep-5 +36,"{'_runtime': 335.76391553878784, '_timestamp': 1680626951.0603056, 'test/recall': 0.8461538461538461, 'test/f1-score': 0.9041095890410958, 'test/precision': 0.9705882352941176, 'test/epoch_loss': 0.1906787835785912, 'epoch': 9, '_wandb': {'runtime': 334}, 'train/epoch_loss': 0.02095988139033052, 'train/epoch_acc': 0.9975429975429976, 'train/batch_loss': 0.0006497434806078672, '_step': 1039, 'test/epoch_acc': 0.9222222222222224}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.003}",olive-sweep-4 +37,"{'_wandb': {'runtime': 332}, '_timestamp': 1680626608.419389, 'test/f1-score': 0.8705882352941177, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.8222222222222222, 'train/epoch_acc': 0.984029484029484, 'train/batch_loss': 0.12675245106220245, '_step': 149, 'epoch': 9, '_runtime': 333.64992809295654, 'test/recall': 0.925, 'test/epoch_loss': 0.27919367684258356, 'train/epoch_loss': 0.11751884335528429}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.003}",dazzling-sweep-3 +38,"{'_runtime': 337.19885444641113, '_timestamp': 1680626264.5954974, 'test/f1-score': 0.5977011494252874, 'test/epoch_acc': 0.6111111111111112, 'test/precision': 0.5306122448979592, 'train/epoch_acc': 0.6547911547911548, 'epoch': 9, '_wandb': {'runtime': 336}, 'train/epoch_loss': 0.6389284106085868, 'test/epoch_loss': 0.6708752089076572, 'train/batch_loss': 0.5270536541938782, '_step': 1039, 'test/recall': 0.6842105263157895}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.01}",kind-sweep-2 +39,"{'epoch': 9, '_wandb': {'runtime': 337}, '_runtime': 337.9836483001709, 'test/recall': 0.8636363636363636, 'test/f1-score': 0.853932584269663, '_step': 529, 'test/epoch_acc': 0.8555555555555556, 'test/precision': 0.8444444444444444, 'test/epoch_loss': 0.38614972366227046, 'train/epoch_acc': 0.8746928746928747, 'train/batch_loss': 0.3848239779472351, 'train/epoch_loss': 0.3516608065117782, '_timestamp': 1680625919.9645753}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 16, 'learning_rate': 0.003}",morning-sweep-1 +40,"{'test/recall': 0.8653846153846154, 'test/f1-score': 0.9, 'train/batch_loss': 0.05631007254123688, '_step': 2059, '_timestamp': 1680624250.2654595, '_runtime': 347.9354045391083, 'test/epoch_acc': 0.888888888888889, 'test/precision': 0.9375, 'test/epoch_loss': 0.25786760796585845, 'train/epoch_acc': 0.9975429975429976, 'train/epoch_loss': 0.02368298517580857, 'epoch': 9, '_wandb': {'runtime': 346}}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 4, 'learning_rate': 0.1}",valiant-sweep-23 +41,"{'_runtime': 329.4802031517029, '_timestamp': 1680623895.362503, 'test/recall': 0.8936170212765957, 'test/f1-score': 0.8571428571428571, 'test/epoch_loss': 0.490613665845659, 'train/epoch_acc': 0.8243243243243243, 'epoch': 9, '_wandb': {'runtime': 327}, 'test/epoch_acc': 0.8444444444444444, 'test/precision': 0.8235294117647058, 'train/batch_loss': 0.5639374256134033, 'train/epoch_loss': 0.48581602795996887, '_step': 1039}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.0003}",earnest-sweep-22 +42,"{'_step': 149, 'epoch': 9, '_timestamp': 1680623556.4586525, 'test/recall': 0.9148936170212766, 'test/epoch_loss': 0.2318242397573259, 'train/epoch_acc': 0.995085995085995, 'train/batch_loss': 0.06110217794775963, 'train/epoch_loss': 0.05107141801451289, '_wandb': {'runtime': 326}, '_runtime': 328.0050995349884, 'test/f1-score': 0.9052631578947368, 'test/epoch_acc': 0.9, 'test/precision': 0.8958333333333334}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 64, 'learning_rate': 0.003}",genial-sweep-21 +43,"{'_wandb': {'runtime': 325}, '_runtime': 327.10622239112854, '_timestamp': 1680623221.0825984, 'test/recall': 0.8723404255319149, 'train/epoch_acc': 0.7911547911547911, '_step': 149, 'epoch': 9, 'test/f1-score': 0.780952380952381, 'test/epoch_acc': 0.7444444444444445, 'test/precision': 0.7068965517241379, 'test/epoch_loss': 0.5943129923608568, 'train/batch_loss': 0.6166229844093323, 'train/epoch_loss': 0.5714027147914034}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 64, 'learning_rate': 0.001}",lemon-sweep-20 +44,"{'train/epoch_acc': 0.6277641277641277, 'train/epoch_loss': 0.6722187732302879, 'epoch': 9, '_wandb': {'runtime': 330}, '_runtime': 331.60892701148987, 'test/recall': 0.7021276595744681, 'test/f1-score': 0.6470588235294118, 'train/batch_loss': 0.7205827236175537, '_step': 1039, '_timestamp': 1680622885.059607, 'test/epoch_acc': 0.6, 'test/precision': 0.6, 'test/epoch_loss': 0.6746161646313138}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.0001}",ancient-sweep-19 +45,"{'test/epoch_loss': 0.24883262103216516, 'train/epoch_acc': 0.9877149877149876, 'train/batch_loss': 0.015468262135982512, '_wandb': {'runtime': 347}, '_runtime': 348.9979507923126, '_timestamp': 1680622545.2735748, 'test/recall': 0.8695652173913043, 'test/f1-score': 0.898876404494382, 'test/epoch_acc': 0.9, 'test/precision': 0.9302325581395348, 'train/epoch_loss': 0.0466749508011656, '_step': 2059, 'epoch': 9}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.01}",smart-sweep-18 +46,"{'epoch': 9, '_runtime': 329.3028633594513, '_timestamp': 1680622188.8210304, 'test/epoch_loss': 0.2015038196825319, 'train/epoch_loss': 0.07856258183731457, '_step': 1039, 'test/recall': 0.8536585365853658, 'test/f1-score': 0.8974358974358975, 'test/epoch_acc': 0.9111111111111112, 'test/precision': 0.945945945945946, 'train/epoch_acc': 0.9815724815724816, 'train/batch_loss': 0.007225348148494959, '_wandb': {'runtime': 328}}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.003}",sleek-sweep-17 +47,"{'test/epoch_acc': 0.8333333333333334, 'train/epoch_acc': 0.828009828009828, 'train/epoch_loss': 0.5808350268101516, 'test/recall': 0.8301886792452831, 'epoch': 9, '_wandb': {'runtime': 321}, '_runtime': 323.3842430114746, '_timestamp': 1680621849.979658, 'test/f1-score': 0.8543689320388349, 'test/precision': 0.88, 'test/epoch_loss': 0.5843977000978258, '_step': 279, 'train/batch_loss': 0.6047794222831726}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 32, 'learning_rate': 0.0001}",winter-sweep-16 +48,"{'epoch': 9, '_wandb': {'runtime': 346}, '_runtime': 347.8050694465637, 'test/recall': 0.85, 'test/epoch_acc': 0.8666666666666667, 'test/precision': 0.85, '_step': 2059, 'test/f1-score': 0.85, 'test/epoch_loss': 0.5281610590923164, 'train/epoch_acc': 0.995085995085995, 'train/batch_loss': 0.001602485659532249, 'train/epoch_loss': 0.029015880939893934, '_timestamp': 1680621511.323635}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.1}",rare-sweep-15 +49,"{'_wandb': {'runtime': 346}, '_runtime': 347.7671456336975, '_timestamp': 1680621147.5604067, 'test/f1-score': 0.9135802469135802, 'test/epoch_acc': 0.9222222222222224, 'train/epoch_acc': 0.9864864864864864, '_step': 2059, 'test/recall': 0.8809523809523809, 'test/precision': 0.9487179487179488, 'test/epoch_loss': 0.22225395898438163, 'train/batch_loss': 0.010366588830947876, 'train/epoch_loss': 0.04606454834343147, 'epoch': 9}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.001}",stoic-sweep-14 +50,"{'_timestamp': 1680620790.920825, 'test/f1-score': 0.6585365853658537, 'train/epoch_acc': 0.6523341523341524, 'train/batch_loss': 0.6023905277252197, 'train/epoch_loss': 0.6673213337211703, '_step': 2059, '_wandb': {'runtime': 351}, '_runtime': 352.6435329914093, 'test/precision': 0.6428571428571429, 'test/epoch_loss': 0.661226307021247, 'epoch': 9, 'test/recall': 0.675, 'test/epoch_acc': 0.6888888888888889}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.0001}",glorious-sweep-13 +51,"{'_step': 149, '_wandb': {'runtime': 329}, 'test/recall': 0.9574468085106383, 'test/f1-score': 0.9782608695652174, 'test/epoch_acc': 0.977777777777778, 'test/precision': 1, 'train/epoch_acc': 1, 'epoch': 9, '_runtime': 330.7649688720703, '_timestamp': 1680620431.024078, 'test/epoch_loss': 0.1352142873737547, 'train/batch_loss': 0.004083937965333462, 'train/epoch_loss': 0.0071195896911716286}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.01}",chocolate-sweep-12 +52,"{'test/precision': 0.8085106382978723, 'train/epoch_loss': 0.5577488642652731, '_step': 149, 'epoch': 9, '_wandb': {'runtime': 328}, '_timestamp': 1680620092.0697718, 'test/f1-score': 0.8636363636363636, 'test/epoch_acc': 0.8666666666666667, '_runtime': 329.12984681129456, 'test/recall': 0.926829268292683, 'test/epoch_loss': 0.5375637359089321, 'train/epoch_acc': 0.800982800982801, 'train/batch_loss': 0.5299303531646729}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.0003}",glowing-sweep-11 +53,"{'_runtime': 324.3058567047119, 'test/recall': 0.7659574468085106, 'test/epoch_acc': 0.7555555555555555, 'test/precision': 0.7659574468085106, 'train/epoch_acc': 0.8611793611793611, 'train/epoch_loss': 0.46212616409072127, '_step': 279, 'epoch': 9, '_wandb': {'runtime': 322}, '_timestamp': 1680619755.0191748, 'test/f1-score': 0.7659574468085105, 'test/epoch_loss': 0.5337554746203952, 'train/batch_loss': 0.5281365513801575}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 32, 'learning_rate': 0.003}",different-sweep-10 +54,"{'_runtime': 327.0705659389496, '_timestamp': 1680619423.656795, 'test/f1-score': 0.8602150537634408, 'test/epoch_acc': 0.8555555555555556, 'test/precision': 0.7843137254901961, '_step': 279, 'epoch': 9, '_wandb': {'runtime': 325}, 'test/epoch_loss': 0.5470490535100301, 'train/epoch_acc': 0.8058968058968059, 'train/epoch_loss': 0.5580001385557564, 'test/recall': 0.9523809523809524, 'train/batch_loss': 0.6183260083198547}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 32, 'learning_rate': 0.003}",lilac-sweep-9 +55,"{'train/epoch_loss': 0.46969629490990605, '_wandb': {'runtime': 327}, 'test/recall': 0.7551020408163265, 'test/epoch_acc': 0.788888888888889, 'test/precision': 0.8409090909090909, 'test/f1-score': 0.7956989247311828, 'test/epoch_loss': 0.46168507006433274, 'train/epoch_acc': 0.773955773955774, 'train/batch_loss': 0.6300776600837708, '_step': 529, 'epoch': 9, '_runtime': 328.68579959869385, '_timestamp': 1680619089.5332966}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.1}",crimson-sweep-8 +56,"{'_step': 2059, '_runtime': 350.2308712005615, '_timestamp': 1680618753.2361271, 'test/epoch_loss': 0.44089303129391433, 'train/epoch_acc': 0.9938574938574938, 'train/batch_loss': 0.011611333116889, 'epoch': 9, '_wandb': {'runtime': 349}, 'test/recall': 0.8181818181818182, 'test/f1-score': 0.8737864077669902, 'test/epoch_acc': 0.8555555555555556, 'test/precision': 0.9375, 'train/epoch_loss': 0.02176519967463292}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.003}",still-sweep-7 +57,"{'test/f1-score': 0.8607594936708861, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.85, 'train/epoch_acc': 0.9938574938574938, 'train/epoch_loss': 0.02099113287724536, '_wandb': {'runtime': 333}, 'test/recall': 0.8717948717948718, '_runtime': 334.69481587409973, '_timestamp': 1680618396.0194488, 'test/epoch_loss': 0.24035142682841976, 'train/batch_loss': 0.030084805563092232, '_step': 1039, 'epoch': 9}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.01}",charmed-sweep-6 +58,"{'epoch': 9, '_timestamp': 1680618051.044084, 'test/recall': 0.8780487804878049, 'test/f1-score': 0.8674698795180722, 'test/precision': 0.8571428571428571, 'test/epoch_loss': 0.5385394818252988, 'train/epoch_acc': 0.9963144963144964, 'train/batch_loss': 0.001848929445259273, '_step': 1039, '_wandb': {'runtime': 335}, '_runtime': 336.1621870994568, 'test/epoch_acc': 0.8777777777777778, 'train/epoch_loss': 0.010693324584853135}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 8, 'learning_rate': 0.0003}",restful-sweep-5 +59,"{'_step': 149, 'epoch': 9, 'test/recall': 0.8409090909090909, 'test/epoch_acc': 0.8444444444444444, 'test/epoch_loss': 0.6238909363746643, 'train/epoch_loss': 0.004462716538065481, '_wandb': {'runtime': 333}, '_runtime': 334.4848310947418, '_timestamp': 1680617708.075962, 'test/f1-score': 0.8409090909090909, 'test/precision': 0.8409090909090909, 'train/epoch_acc': 1, 'train/batch_loss': 0.004928763955831528}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.1}",proud-sweep-4 +60,"{'train/epoch_acc': 0.5626535626535626, 'train/batch_loss': 0.6750851273536682, '_step': 149, '_wandb': {'runtime': 337}, '_timestamp': 1680617365.2791553, 'test/recall': 0.75, 'test/epoch_acc': 0.34444444444444444, 'test/epoch_loss': 0.7233364171451993, 'train/epoch_loss': 0.6796711432845938, 'epoch': 9, '_runtime': 338.4922821521759, 'test/f1-score': 0.4778761061946903, 'test/precision': 0.35064935064935066}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 64, 'learning_rate': 0.0001}",visionary-sweep-3 +61,"{'_wandb': {'runtime': 132}, 'test/recall': 1, 'test/f1-score': 0.59375, 'test/epoch_acc': 0.4222222222222222, 'test/precision': 0.4222222222222222, 'train/batch_loss': 1.2695436477661133, '_step': 110, 'epoch': 3, '_runtime': 129.48883533477783, '_timestamp': 1680617007.4126654, 'test/epoch_loss': 109.22879723442924, 'train/epoch_acc': 0.5147420147420148, 'train/epoch_loss': 3.225923076601521}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 32, 'learning_rate': 0.1}",splendid-sweep-2 +62,"{'_runtime': 373.84231185913086, 'test/recall': 0.8636363636363636, 'train/batch_loss': 0.563504695892334, 'test/epoch_loss': 0.6018742865986294, '_step': 1039, 'epoch': 9, '_wandb': {'runtime': 372}, '_timestamp': 1680616870.0621138, 'test/f1-score': 0.8172043010752688, 'test/epoch_acc': 0.8111111111111111, 'test/precision': 0.7755102040816326, 'train/epoch_acc': 0.7727272727272727, 'train/epoch_loss': 0.5949591096554693}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.0001}",snowy-sweep-1 +63,"{'test/epoch_acc': 0.6333333333333333, 'test/precision': 0.625, 'train/epoch_acc': 0.5552825552825553, 'train/batch_loss': 0.7118003964424133, 'epoch': 9, '_timestamp': 1678798635.5359335, 'test/f1-score': 0.6024096385542168, 'test/recall': 0.5813953488372093, 'test/epoch_loss': 0.6787986318270366, 'train/epoch_loss': 0.684732110699506, '_step': 529, '_wandb': {'runtime': 327}, '_runtime': 333.6077947616577}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.0001}",comic-sweep-38 +64,"{'_step': 149, 'epoch': 9, '_timestamp': 1678798288.876002, 'test/recall': 1, 'test/epoch_loss': 0.5120628664890925, 'train/epoch_acc': 1, 'train/epoch_loss': 0.001254009526264133, '_wandb': {'runtime': 337}, '_runtime': 342.7867271900177, 'test/f1-score': 0.888888888888889, 'test/epoch_acc': 0.888888888888889, 'test/precision': 0.8, 'train/batch_loss': 0.0015535189304500818}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.1}",magic-sweep-37 +65,"{'test/recall': 0.6341463414634146, 'test/epoch_acc': 0.6444444444444445, 'test/precision': 0.6046511627906976, 'train/epoch_acc': 0.6572481572481572, 'train/epoch_loss': 0.659313001562395, '_step': 279, 'epoch': 9, '_wandb': {'runtime': 332}, 'test/epoch_loss': 0.6593369828330146, 'train/batch_loss': 0.6705241203308105, '_runtime': 338.4290623664856, '_timestamp': 1678797929.8979273, 'test/f1-score': 0.6190476190476191}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 32, 'learning_rate': 0.0003}",azure-sweep-36 +66,"{'_step': 1039, 'test/precision': 0.9591836734693876, 'test/epoch_loss': 0.5167779392666287, 'train/epoch_acc': 0.7911547911547911, 'epoch': 9, '_wandb': {'runtime': 343}, '_runtime': 349.1018385887146, '_timestamp': 1678797575.4461255, 'test/recall': 0.8703703703703703, 'test/f1-score': 0.912621359223301, 'test/epoch_acc': 0.9, 'train/batch_loss': 0.5475739240646362, 'train/epoch_loss': 0.542006236622316}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.001}",easy-sweep-35 +67,"{'_wandb': {'runtime': 362}, '_timestamp': 1678797212.2311337, 'test/f1-score': 0.8611111111111112, 'test/precision': 0.8611111111111112, 'test/epoch_loss': 0.27850865055532065, 'train/epoch_acc': 0.9987714987714988, 'train/batch_loss': 4.9947026127483696e-05, '_step': 2059, 'train/epoch_loss': 0.012833298822080874, '_runtime': 367.9372293949127, 'test/recall': 0.8611111111111112, 'test/epoch_acc': 0.888888888888889, 'epoch': 9}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.003}",usual-sweep-34 +68,"{'test/epoch_loss': 0.6554473309053315, 'epoch': 9, '_wandb': {'runtime': 330}, '_runtime': 335.99687933921814, '_timestamp': 1678796827.8409674, 'test/recall': 0.9791666666666666, 'test/f1-score': 0.903846153846154, 'test/epoch_acc': 0.888888888888889, 'train/epoch_acc': 0.9742014742014742, 'train/batch_loss': 0.17918632924556732, 'train/epoch_loss': 0.07036763163974523, '_step': 529, 'test/precision': 0.8392857142857143}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 16, 'learning_rate': 0.0003}",polar-sweep-33 +69,"{'test/f1-score': 0.7356321839080459, 'test/epoch_acc': 0.7444444444444445, 'train/epoch_acc': 0.8660933660933661, 'train/epoch_loss': 0.47513497564072105, 'epoch': 9, '_runtime': 336.63737440109253, '_timestamp': 1678796468.9253614, 'test/recall': 0.8648648648648649, 'test/precision': 0.64, 'test/epoch_loss': 0.5271965821584066, 'train/batch_loss': 0.4695126414299011, '_step': 149, '_wandb': {'runtime': 330}}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 64, 'learning_rate': 0.001}",still-sweep-32 +70,"{'test/epoch_acc': 0.888888888888889, 'test/precision': 0.9428571428571428, 'test/epoch_loss': 0.2378266812198692, 'train/batch_loss': 0.711412787437439, '_step': 2059, '_wandb': {'runtime': 372}, '_runtime': 378.4032835960388, '_timestamp': 1678796117.3062005, 'test/recall': 0.8048780487804879, 'test/f1-score': 0.868421052631579, 'train/epoch_acc': 0.9705159705159704, 'train/epoch_loss': 0.09577267487700432, 'epoch': 9}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.001}",misty-sweep-31 +71,"{'_wandb': {'runtime': 333}, '_runtime': 336.8808288574219, '_timestamp': 1678795725.918603, 'test/f1-score': 0.8636363636363636, 'train/epoch_acc': 0.9926289926289926, 'train/epoch_loss': 0.05967479737370254, '_step': 529, 'epoch': 9, 'test/recall': 0.8260869565217391, 'test/epoch_acc': 0.8666666666666667, 'test/precision': 0.9047619047619048, 'test/epoch_loss': 0.27924135790930854, 'train/batch_loss': 0.04936826974153519}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 16, 'learning_rate': 0.001}",flowing-sweep-30 +72,"{'_runtime': 339.73244285583496, '_timestamp': 1678795319.518895, 'test/recall': 0.851063829787234, 'test/f1-score': 0.898876404494382, 'test/epoch_acc': 0.9, 'test/precision': 0.9523809523809524, '_step': 279, 'epoch': 9, 'train/epoch_acc': 0.8722358722358722, 'train/epoch_loss': 0.3784469199122024, 'train/batch_loss': 0.4592914581298828, '_wandb': {'runtime': 336}, 'test/epoch_loss': 0.37525106337335373}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 32, 'learning_rate': 0.001}",deep-sweep-28 +73,"{'test/recall': 0.625, 'test/f1-score': 0.6849315068493151, 'train/epoch_acc': 0.7899262899262899, 'train/batch_loss': 0.6763702630996704, 'train/epoch_loss': 0.5319552311733255, '_wandb': {'runtime': 377}, '_runtime': 381.0768678188324, '_timestamp': 1678794965.2675128, 'test/precision': 0.7575757575757576, 'test/epoch_loss': 0.5484810524516636, '_step': 2059, 'epoch': 9, 'test/epoch_acc': 0.7444444444444445}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.0001}",glorious-sweep-27 +74,"{'epoch': 9, '_wandb': {'runtime': 334}, 'test/epoch_acc': 0.7555555555555555, 'train/batch_loss': 0.4391788542270661, '_step': 529, '_timestamp': 1678794572.9156363, 'test/recall': 0.813953488372093, 'test/f1-score': 0.7608695652173914, 'test/precision': 0.7142857142857143, 'test/epoch_loss': 0.5729872869120703, 'train/epoch_acc': 0.8968058968058967, 'train/epoch_loss': 0.2699748155379471, '_runtime': 338.11463618278503}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.1}",stoic-sweep-26 +75,"{'_step': 2059, 'epoch': 9, '_wandb': {'runtime': 377}, '_timestamp': 1678794222.848524, 'test/precision': 0.8478260869565217, 'train/epoch_acc': 0.9877149877149876, 'train/batch_loss': 0.025906365364789963, '_runtime': 380.8983037471771, 'test/recall': 0.8863636363636364, 'test/f1-score': 0.8666666666666666, 'test/epoch_acc': 0.8666666666666667, 'test/epoch_loss': 0.3083995895563728, 'train/epoch_loss': 0.04955068614813831}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.01}",vibrant-sweep-25 +76,"{'train/epoch_acc': 1, 'train/batch_loss': 0.0010389955714344978, '_step': 149, 'epoch': 9, '_timestamp': 1678793829.5489533, 'test/recall': 0.9215686274509804, 'test/f1-score': 0.8867924528301887, 'test/precision': 0.8545454545454545, '_wandb': {'runtime': 340}, '_runtime': 343.4739582538605, 'test/epoch_acc': 0.8666666666666667, 'test/epoch_loss': 0.7976957665549385, 'train/epoch_loss': 0.002287556243378495}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.1}",valiant-sweep-24 +77,"{'epoch': 9, 'test/precision': 0.8666666666666667, 'train/epoch_acc': 0.8857493857493858, 'train/epoch_loss': 0.3862068348493272, '_step': 149, '_runtime': 344.0598545074463, '_timestamp': 1678793464.5180786, 'test/recall': 0.8478260869565217, 'test/f1-score': 0.8571428571428571, 'test/epoch_acc': 0.8555555555555556, 'test/epoch_loss': 0.4112878143787384, 'train/batch_loss': 0.3762533664703369, '_wandb': {'runtime': 340}}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.001}",polished-sweep-23 +78,"{'train/epoch_acc': 0.6756756756756757, 'train/batch_loss': 0.7007869482040405, 'train/epoch_loss': 0.6115244123215171, '_step': 529, 'epoch': 9, '_runtime': 339.41979336738586, '_timestamp': 1678793108.7606344, 'test/epoch_loss': 0.6097042110231188, '_wandb': {'runtime': 336}, 'test/recall': 0.8837209302325582, 'test/f1-score': 0.7102803738317758, 'test/epoch_acc': 0.6555555555555556, 'test/precision': 0.59375}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.01}",clear-sweep-22 +79,"{'_wandb': {'runtime': 377}, '_runtime': 381.0477261543274, '_timestamp': 1678792758.596286, 'test/recall': 0.8157894736842105, 'test/precision': 0.9393939393939394, 'train/epoch_acc': 0.9815724815724816, 'epoch': 9, 'test/f1-score': 0.8732394366197183, 'test/epoch_acc': 0.9, 'test/epoch_loss': 0.23743902287549443, 'train/batch_loss': 0.5061427354812622, 'train/epoch_loss': 0.07462231436439994, '_step': 2059}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.01}",sage-sweep-21 +80,"{'test/precision': 0.902439024390244, 'train/batch_loss': 0.24579545855522156, 'train/epoch_loss': 0.12095561367287976, '_step': 529, 'epoch': 9, '_runtime': 335.3731348514557, 'test/epoch_acc': 0.8555555555555556, 'test/epoch_loss': 0.28035063776705, 'train/epoch_acc': 0.9791154791154792, '_wandb': {'runtime': 331}, '_timestamp': 1678792364.5292609, 'test/recall': 0.8043478260869565, 'test/f1-score': 0.8505747126436782}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 16, 'learning_rate': 0.001}",olive-sweep-20 +81,"{'_step': 1039, 'epoch': 9, '_runtime': 340.5063774585724, 'test/precision': 0.9534883720930232, 'train/epoch_acc': 0.995085995085995, 'train/batch_loss': 0.0077079650945961475, 'train/epoch_loss': 0.018187719287696302, '_wandb': {'runtime': 337}, '_timestamp': 1678792015.2579195, 'test/recall': 0.9111111111111112, 'test/f1-score': 0.931818181818182, 'test/epoch_acc': 0.9333333333333332, 'test/epoch_loss': 0.17397157057291932}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.003}",autumn-sweep-19 +82,"{'train/batch_loss': 0.4317986071109772, '_step': 1039, 'epoch': 9, 'test/recall': 0.8205128205128205, 'test/f1-score': 0.7804878048780488, 'test/epoch_acc': 0.8, 'test/epoch_loss': 0.4940012666914198, 'train/epoch_acc': 0.8218673218673218, 'train/epoch_loss': 0.4784781006542412, '_wandb': {'runtime': 344}, '_runtime': 347.40152740478516, '_timestamp': 1678791661.9692383, 'test/precision': 0.7441860465116279}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 8, 'learning_rate': 0.0001}",crisp-sweep-18 +83,"{'test/recall': 0.9090909090909092, 'test/f1-score': 0.9090909090909092, 'test/epoch_acc': 0.9111111111111112, 'test/precision': 0.9090909090909092, 'test/epoch_loss': 0.19624250796106127, '_step': 279, '_wandb': {'runtime': 335}, '_timestamp': 1678791236.6172178, 'train/epoch_acc': 0.9828009828009828, 'train/batch_loss': 0.15555259585380554, 'epoch': 9, '_runtime': 337.956387758255, 'train/epoch_loss': 0.08830470366618558}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 32, 'learning_rate': 0.003}",deep-sweep-16 +84,"{'test/epoch_acc': 0.7333333333333334, 'test/precision': 0.7049180327868853, 'test/epoch_loss': 0.6228035251299541, 'train/batch_loss': 0.6377201080322266, '_runtime': 334.2993712425232, '_timestamp': 1678790886.952144, 'test/f1-score': 0.7818181818181819, 'test/recall': 0.8775510204081632, 'train/epoch_acc': 0.7493857493857494, 'train/epoch_loss': 0.6127705679478751, '_step': 279, 'epoch': 9, '_wandb': {'runtime': 331}}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 32, 'learning_rate': 0.0003}",confused-sweep-15 +85,"{'_step': 529, '_wandb': {'runtime': 342}, '_timestamp': 1678790542.286384, 'test/precision': 0.7192982456140351, 'train/epoch_acc': 0.8415233415233415, 'train/batch_loss': 0.1340156048536301, 'train/epoch_loss': 0.3545121966840594, 'epoch': 9, '_runtime': 345.0617377758026, 'test/recall': 0.8541666666666666, 'test/f1-score': 0.7809523809523811, 'test/epoch_acc': 0.7444444444444445, 'test/epoch_loss': 0.6144241677390204}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 16, 'learning_rate': 0.1}",ancient-sweep-14 +86,"{'train/epoch_acc': 0.7457002457002457, '_step': 529, 'epoch': 9, '_wandb': {'runtime': 344}, '_runtime': 346.86587953567505, '_timestamp': 1678790183.7024884, 'test/epoch_acc': 0.7222222222222222, 'test/recall': 0.782608695652174, 'test/f1-score': 0.7422680412371134, 'test/precision': 0.7058823529411765, 'test/epoch_loss': 0.6392196734746297, 'train/batch_loss': 0.6280461549758911, 'train/epoch_loss': 0.6374555861334836}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 16, 'learning_rate': 0.0003}",revived-sweep-13 +87,"{'train/epoch_acc': 0.9987714987714988, 'train/batch_loss': 0.04231283441185951, '_step': 149, 'test/f1-score': 0.9010989010989012, 'test/epoch_acc': 0.9, 'test/epoch_loss': 0.24115624560250176, 'test/recall': 0.9111111111111112, 'test/precision': 0.8913043478260869, 'train/epoch_loss': 0.02119528235872196, 'epoch': 9, '_wandb': {'runtime': 348}, '_runtime': 350.9660577774048, '_timestamp': 1678789826.0085878}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 64, 'learning_rate': 0.0003}",swift-sweep-12 +88,"{'_step': 2059, '_runtime': 397.1281135082245, 'test/recall': 0.8333333333333334, 'test/f1-score': 0.7894736842105262, 'test/epoch_acc': 0.8222222222222223, 'test/precision': 0.75, 'test/epoch_loss': 0.5769641452365452, 'epoch': 9, '_wandb': {'runtime': 393}, '_timestamp': 1678789464.8040044, 'train/epoch_acc': 0.757985257985258, 'train/batch_loss': 0.6127220392227173, 'train/epoch_loss': 0.5840219159676929}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.0001}",rosy-sweep-11 +89,"{'train/epoch_acc': 0.9938574938574938, '_wandb': {'runtime': 352}, '_runtime': 355.46944642066956, '_timestamp': 1678789057.5684297, 'test/recall': 0.8076923076923077, 'test/f1-score': 0.8842105263157894, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.9767441860465116, 'train/epoch_loss': 0.06967324825777176, '_step': 149, 'epoch': 9, 'test/epoch_loss': 0.2696530275874668, 'train/batch_loss': 0.11590295284986496}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 64, 'learning_rate': 0.003}",deft-sweep-10 +90,"{'train/epoch_loss': 0.6400203514450599, '_runtime': 342.3234579563141, '_timestamp': 1678788683.006292, 'test/f1-score': 0.7959183673469388, 'test/precision': 0.7090909090909091, 'test/epoch_loss': 0.6248881856600443, 'train/epoch_acc': 0.7014742014742015, 'train/batch_loss': 0.5820533037185669, '_step': 279, 'epoch': 9, '_wandb': {'runtime': 340}, 'test/recall': 0.9069767441860463, 'test/epoch_acc': 0.7777777777777778}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 32, 'learning_rate': 0.0001}",atomic-sweep-9 +91,"{'train/epoch_acc': 0.7432432432432432, 'train/batch_loss': 0.3377891480922699, 'epoch': 9, '_wandb': {'runtime': 351}, 'test/epoch_acc': 0.6555555555555556, 'test/recall': 0.7954545454545454, 'test/f1-score': 0.693069306930693, 'test/precision': 0.6140350877192983, 'test/epoch_loss': 0.6175267219543457, 'train/epoch_loss': 0.5329857344855841, '_step': 1039, '_runtime': 353.4816448688507, '_timestamp': 1678788328.1196988}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 8, 'learning_rate': 0.1}",cosmic-sweep-8 +92,"{'_timestamp': 1678787961.3400052, 'test/recall': 0.8536585365853658, 'test/f1-score': 0.6999999999999998, 'test/epoch_acc': 0.6666666666666667, 'test/precision': 0.5932203389830508, '_step': 2059, '_wandb': {'runtime': 390}, '_runtime': 392.4064960479736, 'train/batch_loss': 0.17200787365436554, 'train/epoch_loss': 0.5631518808058498, 'epoch': 9, 'test/epoch_loss': 0.6419186863634322, 'train/epoch_acc': 0.7186732186732187}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.01}",lunar-sweep-7 +93,"{'test/epoch_acc': 0.9, 'test/precision': 0.9090909090909092, 'test/epoch_loss': 0.24278527200222016, 'train/epoch_acc': 0.9975429975429976, 'train/epoch_loss': 0.03237721893286529, 'epoch': 9, '_timestamp': 1678787557.992564, 'test/f1-score': 0.8988764044943819, 'test/recall': 0.8888888888888888, 'train/batch_loss': 0.04353119805455208, '_step': 529, '_wandb': {'runtime': 343}, '_runtime': 345.9260220527649}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.01}",zany-sweep-6 +94,"{'test/precision': 0.9767441860465116, 'test/epoch_loss': 0.32114719019995797, '_step': 529, '_wandb': {'runtime': 344}, '_runtime': 346.5414688587189, '_timestamp': 1678787192.9954038, 'test/recall': 0.8571428571428571, 'test/epoch_acc': 0.9111111111111112, 'train/batch_loss': 0.21811823546886444, 'train/epoch_loss': 0.2347587838000103, 'epoch': 9, 'test/f1-score': 0.9130434782608696, 'train/epoch_acc': 0.9336609336609336}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.001}",absurd-sweep-5 +95,"{'_wandb': {'runtime': 344}, '_timestamp': 1678786835.7254088, 'test/epoch_loss': 0.22436124781767527, '_step': 279, 'epoch': 9, '_runtime': 345.9469966888428, 'test/recall': 0.8461538461538461, 'test/f1-score': 0.8799999999999999, 'test/epoch_acc': 0.9, 'test/precision': 0.9166666666666666, 'train/epoch_acc': 1, 'train/batch_loss': 0.06225413456559181, 'train/epoch_loss': 0.02646600444977348}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 32, 'learning_rate': 0.003}",radiant-sweep-4 +96,"{'test/epoch_acc': 0.8111111111111111, 'test/precision': 0.7446808510638298, 'train/epoch_loss': 0.45506354690476775, 'epoch': 9, '_wandb': {'runtime': 353}, '_runtime': 355.012455701828, '_timestamp': 1678786479.0865147, 'test/recall': 0.875, 'test/f1-score': 0.8045977011494252, 'test/epoch_loss': 0.4459853092829386, 'train/epoch_acc': 0.8341523341523341, '_step': 1039, 'train/batch_loss': 0.5456343293190002}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.0003}",sandy-sweep-3 +97,"{'train/batch_loss': 0.026765840128064156, '_step': 529, '_runtime': 344.01046657562256, 'test/epoch_loss': 0.31915653232071134, 'train/epoch_acc': 0.9926289926289926, 'test/f1-score': 0.8450704225352113, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.9090909090909092, 'train/epoch_loss': 0.045762457081668206, 'epoch': 9, '_wandb': {'runtime': 342}, '_timestamp': 1678786112.108075, 'test/recall': 0.7894736842105263}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.01}",pretty-sweep-2 +98,"{'test/f1-score': 0.379746835443038, 'test/precision': 0.42857142857142855, 'test/epoch_loss': 0.7006691349877252, 'train/epoch_acc': 0.4815724815724816, 'train/epoch_loss': 0.7011552195291262, 'epoch': 9, '_wandb': {'runtime': 357}, '_runtime': 359.66486382484436, '_timestamp': 1678785758.376562, 'test/recall': 0.3409090909090909, 'test/epoch_acc': 0.45555555555555555, 'train/batch_loss': 0.7150550484657288, '_step': 149}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.0003}",rose-sweep-1 +99,"{'_timestamp': 1678785370.5563953, 'test/recall': 0.9090909090909092, 'test/f1-score': 0.8791208791208791, 'test/precision': 0.851063829787234, 'test/epoch_loss': 0.5091631063156657, '_step': 74, '_wandb': {'runtime': 181}, '_runtime': 180.05384421348572, 'train/epoch_acc': 0.995085995085995, 'train/batch_loss': 0.0016211483161896467, 'train/epoch_loss': 0.023103852647056927, 'epoch': 4, 'test/epoch_acc': 0.8777777777777778}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 64, 'learning_rate': 0.1}",cosmic-sweep-2 +100,"{'test/recall': 0.9166666666666666, 'test/precision': 0.9166666666666666, 'train/batch_loss': 0.0724378228187561, '_step': 279, 'epoch': 9, '_wandb': {'runtime': 344}, '_timestamp': 1678743707.9633043, 'train/epoch_acc': 0.9828009828009828, 'train/epoch_loss': 0.11044558714297244, '_runtime': 347.11417746543884, 'test/f1-score': 0.9166666666666666, 'test/epoch_acc': 0.9111111111111112, 'test/epoch_loss': 0.2461573594146305}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 32, 'learning_rate': 0.003}",ethereal-sweep-14 +101,"{'_step': 149, 'epoch': 9, '_timestamp': 1678743349.8008895, 'test/epoch_acc': 0.9333333333333332, 'test/precision': 0.9545454545454546, 'test/epoch_loss': 0.16449517243438297, 'train/batch_loss': 0.05796322599053383, '_wandb': {'runtime': 346}, '_runtime': 349.69085454940796, 'test/recall': 0.9130434782608696, 'test/f1-score': 0.9333333333333332, 'train/epoch_acc': 1, 'train/epoch_loss': 0.043383844352398226}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 64, 'learning_rate': 0.003}",northern-sweep-13 +102,"{'test/epoch_acc': 0.788888888888889, '_wandb': {'runtime': 559}, '_runtime': 560.5539684295654, '_timestamp': 1678743376.8770983, 'test/recall': 0.85, 'test/f1-score': 0.7816091954022989, 'test/precision': 0.723404255319149, 'test/epoch_loss': 0.5102662573258082, 'train/epoch_acc': 0.8255528255528255, '_step': 2059, 'epoch': 9, 'train/batch_loss': 0.42048144340515137, 'train/epoch_loss': 0.40511614706651}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.001}",faithful-sweep-12 +103,"{'_wandb': {'runtime': 355}, 'test/epoch_acc': 0.8666666666666667, 'train/epoch_acc': 0.8955773955773956, 'train/epoch_loss': 0.27216847456936755, 'test/epoch_loss': 0.3378064884079827, '_step': 1039, 'epoch': 9, '_runtime': 358.3485324382782, '_timestamp': 1678742986.9751594, 'test/recall': 0.7777777777777778, 'test/f1-score': 0.8536585365853658, 'test/precision': 0.945945945945946, 'train/batch_loss': 0.5923706889152527}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.0003}",zany-sweep-12 +104,"{'test/recall': 0.9166666666666666, 'test/f1-score': 0.7415730337078651, 'test/epoch_acc': 0.7444444444444445, 'test/epoch_loss': 0.615033131175571, 'train/batch_loss': 0.6421169638633728, '_step': 1039, '_wandb': {'runtime': 358}, '_runtime': 362.78373169898987, '_timestamp': 1678742619.1453717, 'test/precision': 0.6226415094339622, 'train/epoch_acc': 0.7481572481572482, 'train/epoch_loss': 0.613342459283824, 'epoch': 9}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.0001}",ruby-sweep-11 +105,"{'train/epoch_loss': 0.09796744051757808, '_step': 2059, 'epoch': 9, '_runtime': 531.6082515716553, '_timestamp': 1678742643.2100165, 'test/recall': 0.8076923076923077, 'test/f1-score': 0.875, 'train/epoch_acc': 0.9656019656019657, '_wandb': {'runtime': 531}, 'test/epoch_acc': 0.8666666666666667, 'test/precision': 0.9545454545454546, 'test/epoch_loss': 0.3795760815549228, 'train/batch_loss': 0.07699991017580032}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 4, 'learning_rate': 0.001}",fallen-sweep-10 +106,"{'_step': 1039, '_wandb': {'runtime': 359}, 'test/epoch_loss': 0.2956610471010208, 'train/batch_loss': 0.1150113120675087, 'epoch': 9, '_runtime': 361.6978232860565, '_timestamp': 1678742242.6362762, 'test/recall': 0.8076923076923077, 'test/f1-score': 0.875, 'test/epoch_acc': 0.8666666666666667, 'test/precision': 0.9545454545454546, 'train/epoch_acc': 0.9103194103194104, 'train/epoch_loss': 0.24495647845821825}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 7, 'batch_size': 8, 'learning_rate': 0.003}",rare-sweep-10 +107,"{'train/epoch_loss': 0.310643073711407, '_step': 1039, 'epoch': 9, '_wandb': {'runtime': 471}, '_runtime': 471.6707801818848, 'train/epoch_acc': 0.8869778869778869, 'train/batch_loss': 0.14859537780284882, '_timestamp': 1678742103.7627492, 'test/recall': 0.7906976744186046, 'test/f1-score': 0.8717948717948717, 'test/epoch_acc': 0.888888888888889, 'test/precision': 0.9714285714285714, 'test/epoch_loss': 0.26282389760017394}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.003}",major-sweep-9 +108,"{'test/recall': 0.6976744186046512, 'test/f1-score': 0.6451612903225806, 'test/epoch_acc': 0.6333333333333333, 'test/precision': 0.6, 'epoch': 9, '_wandb': {'runtime': 341}, '_runtime': 344.49258494377136, '_timestamp': 1678741869.828495, 'test/epoch_loss': 0.6676742302046882, 'train/epoch_acc': 0.5921375921375921, '_step': 279, 'train/batch_loss': 0.6228023767471313, 'train/epoch_loss': 0.6766868150204932}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 32, 'learning_rate': 0.0001}",spring-sweep-9 +109,"{'_runtime': 452.4322986602783, '_timestamp': 1678741623.0662856, 'test/recall': 0.9318181818181818, 'test/f1-score': 0.9213483146067416, 'test/precision': 0.9111111111111112, 'test/epoch_loss': 0.16872049139605627, 'train/batch_loss': 0.0022799931466579437, '_step': 1039, 'train/epoch_loss': 0.02303326028314504, '_wandb': {'runtime': 451}, 'test/epoch_acc': 0.9222222222222224, 'train/epoch_acc': 0.9987714987714988, 'epoch': 9}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.003}",elated-sweep-8 +110,"{'_runtime': 345.3405177593231, '_timestamp': 1678741511.9070578, 'test/epoch_acc': 0.9555555555555556, 'test/precision': 0.9761904761904762, 'test/epoch_loss': 0.2148759490913815, 'train/epoch_acc': 0.9606879606879608, 'train/batch_loss': 0.11643347889184952, 'epoch': 9, 'train/epoch_loss': 0.1359616077759049, '_wandb': {'runtime': 342}, 'test/recall': 0.9318181818181818, 'test/f1-score': 0.9534883720930232, '_step': 149}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.003}",hardy-sweep-8 +111,"{'_step': 279, 'test/epoch_loss': 0.2181672462158733, 'train/epoch_acc': 1, 'train/batch_loss': 0.042314428836107254, 'test/recall': 0.8048780487804879, 'test/f1-score': 0.868421052631579, 'test/epoch_acc': 0.888888888888889, 'test/precision': 0.9428571428571428, 'epoch': 9, '_wandb': {'runtime': 342}, '_runtime': 345.1732180118561, '_timestamp': 1678741156.130327, 'train/epoch_loss': 0.008645273717600824}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 32, 'learning_rate': 0.1}",sweepy-sweep-7 +112,"{'train/batch_loss': 0.3791900873184204, '_step': 1039, '_wandb': {'runtime': 453}, '_runtime': 454.0593776702881, 'test/recall': 0.6341463414634146, 'test/precision': 0.8387096774193549, 'test/epoch_loss': 0.4768455002042982, 'train/epoch_acc': 0.8292383292383292, 'epoch': 9, '_timestamp': 1678741159.4683807, 'test/f1-score': 0.7222222222222222, 'test/epoch_acc': 0.7777777777777778, 'train/epoch_loss': 0.45283343838825274}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 8, 'learning_rate': 0.0001}",glorious-sweep-7 +113,"{'test/epoch_acc': 0.9333333333333332, 'test/precision': 0.9333333333333332, 'train/batch_loss': 0.001889266073703766, 'train/epoch_loss': 0.0030514685945077376, 'epoch': 9, '_timestamp': 1678740798.1400597, '_runtime': 348.53755164146423, 'test/recall': 0.9333333333333332, 'test/f1-score': 0.9333333333333332, 'test/epoch_loss': 0.1931780371401045, 'train/epoch_acc': 1, '_step': 149, '_wandb': {'runtime': 346}}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 64, 'learning_rate': 0.01}",rural-sweep-6 +114,"{'_step': 2059, 'epoch': 9, 'test/f1-score': 0.896551724137931, 'test/precision': 0.9285714285714286, '_wandb': {'runtime': 560}, '_runtime': 560.7404127120972, '_timestamp': 1678740696.0305526, 'test/recall': 0.8666666666666667, 'test/epoch_acc': 0.9, 'test/epoch_loss': 0.22745563416845269, 'train/epoch_acc': 0.984029484029484, 'train/batch_loss': 0.1385842263698578, 'train/epoch_loss': 0.07075482415817952}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.01}",smart-sweep-6 +115,"{'epoch': 9, '_wandb': {'runtime': 342}, '_runtime': 345.5716743469238, 'test/epoch_acc': 0.8111111111111111, 'test/precision': 0.8636363636363636, 'train/batch_loss': 0.44296249747276306, 'train/epoch_loss': 0.5191410552225183, '_step': 529, '_timestamp': 1678740438.4959724, 'test/recall': 0.7755102040816326, 'test/f1-score': 0.8172043010752688, 'test/epoch_loss': 0.507676590151257, 'train/epoch_acc': 0.7616707616707616}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 16, 'learning_rate': 0.1}",giddy-sweep-5 +116,"{'_wandb': {'runtime': 342}, '_runtime': 345.28623247146606, 'test/f1-score': 0.6842105263157895, 'train/epoch_acc': 0.8538083538083537, 'train/batch_loss': 0.4066888689994812, 'test/precision': 0.7027027027027027, 'test/epoch_loss': 0.6657861550649007, 'train/epoch_loss': 0.32492415251837314, '_step': 529, 'epoch': 9, '_timestamp': 1678740073.5443084, 'test/recall': 0.6666666666666666, 'test/epoch_acc': 0.7333333333333334}","{'eps': 1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.1}",lilac-sweep-4 +117,"{'test/recall': 0.8367346938775511, 'test/f1-score': 0.8913043478260869, 'test/epoch_acc': 0.888888888888889, 'test/precision': 0.9534883720930232, 'train/epoch_acc': 0.9803439803439804, 'train/batch_loss': 0.01167443674057722, '_step': 1039, 'epoch': 9, '_timestamp': 1678740126.212114, 'test/epoch_loss': 0.2600655794143677, 'train/epoch_loss': 0.08152788232426166, '_wandb': {'runtime': 454}, '_runtime': 454.98564982414246}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.001}",hearty-sweep-5 +118,"{'epoch': 9, '_wandb': {'runtime': 354}, '_runtime': 356.9382667541504, 'test/epoch_acc': 0.788888888888889, 'train/epoch_loss': 0.5079173609724209, '_step': 1039, '_timestamp': 1678739717.8250418, 'test/recall': 0.875, 'test/f1-score': 0.7865168539325842, 'test/precision': 0.7142857142857143, 'test/epoch_loss': 0.4899995631641812, 'train/epoch_acc': 0.8144963144963144, 'train/batch_loss': 0.6180618405342102}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.0001}",silvery-sweep-3 +119,"{'test/epoch_acc': 0.888888888888889, 'test/precision': 0.9142857142857144, 'train/batch_loss': 0.2711101472377777, 'train/epoch_loss': 0.28549219298128414, '_wandb': {'runtime': 453}, '_runtime': 454.2519624233246, '_timestamp': 1678739662.5458224, 'test/f1-score': 0.8648648648648648, 'test/epoch_loss': 0.3028925802972582, 'train/epoch_acc': 0.8968058968058967, '_step': 1039, 'epoch': 9, 'test/recall': 0.8205128205128205}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.99, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 8, 'learning_rate': 0.0003}",dulcet-sweep-4 +120,"{'_timestamp': 1678739351.1315958, 'test/f1-score': 0.6451612903225806, 'test/epoch_acc': 0.6333333333333333, 'test/epoch_loss': 0.6651701913939582, 'train/epoch_acc': 0.6928746928746928, 'train/batch_loss': 0.6685948967933655, '_step': 529, 'epoch': 9, 'test/recall': 0.7894736842105263, 'test/precision': 0.5454545454545454, 'train/epoch_loss': 0.6479796424544707, '_wandb': {'runtime': 341}, '_runtime': 343.88807487487793}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.999, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.001}",glamorous-sweep-2 +121,"{'train/batch_loss': 0.6510805487632751, '_step': 1039, 'epoch': 9, '_wandb': {'runtime': 469}, 'test/f1-score': 0.7608695652173914, 'test/epoch_loss': 0.6144020875295003, 'train/epoch_acc': 0.7542997542997543, '_runtime': 469.65283608436584, '_timestamp': 1678739200.083605, 'test/recall': 0.875, 'test/epoch_acc': 0.7555555555555555, 'test/precision': 0.6730769230769231, 'train/epoch_loss': 0.6267796501480684}","{'eps': 0.1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.0001}",hopeful-sweep-3 +122,"{'_wandb': {'runtime': 353}, '_timestamp': 1678738994.027642, 'test/recall': 0.8409090909090909, 'test/precision': 0.8409090909090909, 'test/epoch_loss': 0.3028163850307465, 'train/batch_loss': 0.0980801358819008, '_step': 279, '_runtime': 357.5890119075775, 'test/f1-score': 0.8409090909090909, 'test/epoch_acc': 0.8444444444444444, 'train/epoch_acc': 0.9975429975429976, 'train/epoch_loss': 0.03763626415181805, 'epoch': 9}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 5, 'batch_size': 32, 'learning_rate': 0.003}",lunar-sweep-1 +123,"{'test/f1-score': 0.7157894736842105, 'test/precision': 0.5964912280701754, 'train/epoch_acc': 0.6658476658476659, '_step': 2059, '_runtime': 529.6096863746643, '_timestamp': 1678738720.9443874, 'test/epoch_acc': 0.7000000000000001, 'test/epoch_loss': 0.5541173484590318, 'train/batch_loss': 0.7896618843078613, 'train/epoch_loss': 0.618659178367118, 'epoch': 9, '_wandb': {'runtime': 529}, 'test/recall': 0.8947368421052632}","{'eps': 1e-08, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.9, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 4, 'learning_rate': 0.1}",stoic-sweep-2 +124,"{'test/recall': 0.6578947368421053, 'test/f1-score': 0.7575757575757577, 'test/epoch_acc': 0.8222222222222223, 'test/precision': 0.8928571428571429, 'test/epoch_loss': 0.4269479903909895, 'train/epoch_loss': 0.016353931551580648, '_step': 529, 'epoch': 9, '_wandb': {'runtime': 353}, '_runtime': 355.4184715747833, '_timestamp': 1678738469.1834886, 'train/epoch_acc': 0.995085995085995, 'train/batch_loss': 0.0014543599681928754}","{'eps': 1e-08, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 16, 'learning_rate': 0.0001}",dark-sweep-2 +125,"{'test/epoch_acc': 0.8555555555555556, 'test/precision': 0.8780487804878049, 'test/epoch_loss': 0.40116495291392007, '_step': 1039, 'epoch': 9, '_runtime': 384.5172441005707, '_timestamp': 1678738101.018471, 'test/f1-score': 0.8470588235294119, 'train/batch_loss': 0.31195682287216187, 'train/epoch_loss': 0.3623260387038716, '_wandb': {'runtime': 381}, 'test/recall': 0.8181818181818182, 'train/epoch_acc': 0.8673218673218673}","{'eps': 0.1, 'gamma': 0.5, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.0003}",trim-sweep-1 +126,"{'train/batch_loss': 0.6653294563293457, '_wandb': {'runtime': 560}, 'test/recall': 0.9090909090909092, 'test/f1-score': 0.8602150537634408, 'test/precision': 0.8163265306122449, 'train/epoch_acc': 0.7567567567567567, 'test/epoch_loss': 0.6165981186760796, 'train/epoch_loss': 0.6107166709712448, '_step': 2059, 'epoch': 9, '_runtime': 560.7235152721405, '_timestamp': 1678738182.1088202, 'test/epoch_acc': 0.8555555555555556}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.9, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 2, 'batch_size': 4, 'learning_rate': 0.001}",sparkling-sweep-1 +127,"{'_step': 555, '_wandb': {'runtime': 118}, '_runtime': 122.13349413871764, '_timestamp': 1678737059.0375042, 'test/recall': 0.6818181818181818, 'test/epoch_acc': 0.6555555555555556, 'test/precision': 0.6382978723404256, 'test/epoch_loss': 0.6796493821673923, 'train/epoch_acc': 0.5515970515970516, 'train/batch_loss': 0.6759337782859802, 'epoch': 1, 'test/f1-score': 0.6593406593406593, 'train/epoch_loss': 0.6851893525744539}","{'eps': 1, 'gamma': 0.1, 'epochs': 10, 'beta_one': 0.99, 'beta_two': 0.5, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.0003}",serene-sweep-1 +128,"{'_runtime': 456.3002746105194, 'test/f1-score': 0.8888888888888888, 'test/epoch_loss': 0.45068282733360926, 'train/batch_loss': 0.003167948452755809, '_wandb': {'runtime': 455}, 'epoch': 9, 'test/epoch_acc': 0.8777777777777778, '_step': 1159, 'test/recall': 0.8461538461538461, 'test/batch_loss': 0.1311825066804886, 'train/epoch_loss': 0.032788554922144414, '_timestamp': 1678734250.8076646, 'train/epoch_acc': 0.9914004914004914, 'test/precision': 0.9361702127659576}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.003}",super-sweep-10 +129,"{'train/epoch_acc': 0.687960687960688, 'train/epoch_loss': 0.5984233345387902, '_runtime': 564.230875492096, '_timestamp': 1678733784.6976814, 'test/epoch_acc': 0.7111111111111111, 'test/epoch_loss': 0.5302444166607327, 'epoch': 9, '_wandb': {'runtime': 563}, 'test/recall': 0.7674418604651163, '_step': 2289, 'train/batch_loss': 0.3260266184806824, 'test/f1-score': 0.7173913043478259, 'test/precision': 0.673469387755102, 'test/batch_loss': 0.9658783674240112}","{'gamma': 0.1, 'epochs': 10, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.01}",distinctive-sweep-9 +130,"{'train/batch_loss': 0.007875862531363964, 'train/epoch_loss': 0.1743801347293527, 'test/epoch_acc': 0.9333333333333332, 'test/precision': 1, 'test/batch_loss': 0.1419784128665924, '_step': 2289, '_runtime': 527.6160025596619, 'test/recall': 0.8636363636363636, 'test/f1-score': 0.9268292682926828, 'test/epoch_loss': 0.17092165086004468, 'train/epoch_acc': 0.9496314496314496, 'epoch': 9, '_wandb': {'runtime': 527}, '_timestamp': 1678733210.1129615}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.0003}",winter-sweep-8 +131,"{'epoch': 9, 'test/precision': 1, 'train/batch_loss': 0.04383014515042305, 'test/batch_loss': 0.27116066217422485, 'train/epoch_loss': 0.07730489082323246, 'test/epoch_acc': 0.9222222222222224, 'test/epoch_loss': 0.21558621691332924, 'train/epoch_acc': 0.9791154791154792, '_step': 1159, '_wandb': {'runtime': 452}, '_runtime': 453.52900218963623, '_timestamp': 1678732673.1225052, 'test/recall': 0.8292682926829268, 'test/f1-score': 0.9066666666666668}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.001}",stilted-sweep-7 +132,"{'_step': 2289, 'test/batch_loss': 0.4716488718986511, 'test/epoch_loss': 0.6190193812052409, 'test/precision': 0.6538461538461539, 'train/epoch_acc': 0.7272727272727273, 'train/epoch_loss': 0.5549268187263967, '_runtime': 561.7993631362915, 'test/recall': 0.7555555555555555, 'test/f1-score': 0.7010309278350516, 'test/epoch_acc': 0.6777777777777778, 'epoch': 9, '_wandb': {'runtime': 561}, 'train/batch_loss': 0.48304444551467896, '_timestamp': 1678732212.5530572}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'adam', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.01}",summer-sweep-6 +133,"{'_step': 1159, '_wandb': {'runtime': 453}, '_timestamp': 1678731639.156168, 'test/precision': 0.945945945945946, 'test/f1-score': 0.813953488372093, 'epoch': 9, '_runtime': 454.3645238876343, 'test/recall': 0.7142857142857143, 'test/epoch_acc': 0.8222222222222223, 'test/batch_loss': 0.5068956017494202, 'test/epoch_loss': 0.4936415394147237, 'train/epoch_loss': 0.5186349417126442, 'train/epoch_acc': 0.8218673218673218, 'train/batch_loss': 0.4434223175048828}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.0001}",different-sweep-5 +134,"{'_step': 1159, '_wandb': {'runtime': 453}, '_runtime': 454.26038885116577, 'test/epoch_loss': 0.5482642173767089, 'test/precision': 0.825, 'test/batch_loss': 0.5159374475479126, 'train/epoch_acc': 0.812039312039312, 'train/batch_loss': 0.5655931830406189, 'test/f1-score': 0.8354430379746836, 'test/epoch_acc': 0.8555555555555556, 'train/epoch_loss': 0.5429200196149016, 'epoch': 9, '_timestamp': 1678731176.111379, 'test/recall': 0.8461538461538461}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 2, 'batch_size': 8, 'learning_rate': 0.0001}",wise-sweep-4 +135,"{'epoch': 9, '_wandb': {'runtime': 528}, 'test/recall': 0.775, 'test/epoch_acc': 0.8777777777777778, 'test/precision': 0.9393939393939394, 'test/batch_loss': 1.7588363885879517, 'train/epoch_loss': 0.02060394324720534, '_step': 2289, '_runtime': 528.9760706424713, 'test/f1-score': 0.8493150684931509, '_timestamp': 1678730714.7711067, 'train/epoch_acc': 0.9963144963144964, 'train/batch_loss': 0.00470334617421031, 'test/epoch_loss': 0.24194780117250048}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 4, 'learning_rate': 0.003}",misty-sweep-3 +136,"{'test/f1-score': 0.7536231884057972, 'test/epoch_acc': 0.8111111111111111, '_step': 1159, '_wandb': {'runtime': 454}, 'test/batch_loss': 0.455120325088501, 'test/epoch_loss': 0.4792341656155056, 'train/batch_loss': 0.5347514748573303, 'epoch': 9, 'train/epoch_acc': 0.8329238329238329, 'test/recall': 0.6842105263157895, '_timestamp': 1678730177.1362092, 'test/precision': 0.8387096774193549, 'train/epoch_loss': 0.42904984072326735, '_runtime': 455.41485929489136}","{'gamma': 0.1, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 3, 'batch_size': 8, 'learning_rate': 0.0003}",unique-sweep-2 +137,"{'test/precision': 0.9047619047619048, 'train/epoch_acc': 0.9901719901719902, 'test/recall': 0.8636363636363636, 'test/epoch_acc': 0.888888888888889, 'test/batch_loss': 2.5320074558258057, 'test/epoch_loss': 0.5442472649919283, 'train/epoch_loss': 0.024021292951151657, '_wandb': {'runtime': 527}, 'test/f1-score': 0.8837209302325582, 'epoch': 9, '_runtime': 528.4356484413147, '_timestamp': 1678729705.2001765, 'train/batch_loss': 0.005740344058722258, '_step': 2289}","{'gamma': 0.5, 'epochs': 10, 'optimizer': 'sgd', 'step_size': 7, 'batch_size': 4, 'learning_rate': 0.003}",polar-sweep-1 diff --git a/classification/classifier/hyp-metrics.ipynb b/classification/classifier/hyp-metrics.ipynb new file mode 100644 index 0000000..45f1c0a --- /dev/null +++ b/classification/classifier/hyp-metrics.ipynb @@ -0,0 +1,916 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "747ddcf2", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/zenon/.local/share/miniconda3/lib/python3.7/site-packages/requests/__init__.py:104: RequestsDependencyWarning: urllib3 (1.26.13) or chardet (5.1.0)/charset_normalizer (2.0.4) doesn't match a supported version!\n", + " RequestsDependencyWarning)\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33me1527193\u001b[0m (\u001b[33mflower-classification\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" + ] + } + ], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import seaborn as sns\n", + "import os\n", + "import time\n", + "import random\n", + "import wandb\n", + "import torch\n", + "wandb.login()\n", + "\n", + "from evaluation.helpers import set_size\n", + "\n", + "torch.manual_seed(42)\n", + "np.random.seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "76cc2ca7", + "metadata": {}, + "outputs": [], + "source": [ + "api = wandb.Api()\n", + "\n", + "# Project is specified by \n", + "runs = api.runs(\"flower-classification/pytorch-sweeps-demo\")\n", + "\n", + "summary_list, config_list, name_list = [], [], []\n", + "for run in runs: \n", + " # .summary contains the output keys/values for metrics like accuracy.\n", + " # We call ._json_dict to omit large files \n", + " summary_list.append(run.summary._json_dict)\n", + "\n", + " # .config contains the hyperparameters.\n", + " # We remove special values that start with _.\n", + " config_list.append(\n", + " {k: v for k,v in run.config.items()\n", + " if not k.startswith('_')})\n", + "\n", + " # .name is the human-readable name of the run.\n", + " name_list.append(run.name)\n", + "\n", + "runs_df = pd.DataFrame({\n", + " \"summary\": summary_list,\n", + " \"config\": config_list,\n", + " \"name\": name_list\n", + " })\n", + "\n", + "runs_df.to_csv(\"hyp-metrics.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "353f9082", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
Unnamed: 0nametest/epoch_losstrain/epoch_acctrain/batch_lossepoch_timestamptest/recalltest/precision_step...test/batch_lossepsgammaepochsbeta_onebeta_twooptimizerstep_sizebatch_sizelearning_rate
00fiery-sweep-260.5664620.8230960.33577991.680693e+090.6170210.8285712059...NaN1.000000e-010.1100.990.900adam340.0003
11radiant-sweep-250.6454580.7125310.7014591.680693e+090.8222220.6851851039...NaN1.000000e+000.5100.990.900adam280.0003
22blooming-sweep-240.3481290.9987710.01956691.680692e+090.7837840.9354841039...NaN1.000000e-080.5100.900.999sgd580.0030
33visionary-sweep-230.5553180.8353810.52223391.680692e+090.8333330.760870529...NaN1.000000e+000.1100.900.900sgd2160.0003
44ancient-sweep-221.5602710.5577400.50836611.680692e+090.8846150.589744410...NaN1.000000e-080.5100.900.990adam740.0100
..................................................................
133133different-sweep-50.4936420.8218670.44342291.678732e+090.7142860.9459461159...0.506896NaN0.510NaNNaNsgd380.0001
134134wise-sweep-40.5482640.8120390.56559391.678731e+090.8461540.8250001159...0.515937NaN0.510NaNNaNsgd280.0001
135135misty-sweep-30.2419480.9963140.00470391.678731e+090.7750000.9393942289...1.758836NaN0.510NaNNaNsgd340.0030
136136unique-sweep-20.4792340.8329240.53475191.678730e+090.6842110.8387101159...0.455120NaN0.110NaNNaNsgd380.0003
137137polar-sweep-10.5442470.9901720.0057491.678730e+090.8636360.9047622289...2.532007NaN0.510NaNNaNsgd740.0030
\n", + "

138 rows × 25 columns

\n", + "
" + ], + "text/plain": [ + " Unnamed: 0 name test/epoch_loss train/epoch_acc \\\n", + "0 0 fiery-sweep-26 0.566462 0.823096 \n", + "1 1 radiant-sweep-25 0.645458 0.712531 \n", + "2 2 blooming-sweep-24 0.348129 0.998771 \n", + "3 3 visionary-sweep-23 0.555318 0.835381 \n", + "4 4 ancient-sweep-22 1.560271 0.557740 \n", + ".. ... ... ... ... \n", + "133 133 different-sweep-5 0.493642 0.821867 \n", + "134 134 wise-sweep-4 0.548264 0.812039 \n", + "135 135 misty-sweep-3 0.241948 0.996314 \n", + "136 136 unique-sweep-2 0.479234 0.832924 \n", + "137 137 polar-sweep-1 0.544247 0.990172 \n", + "\n", + " train/batch_loss epoch _timestamp test/recall test/precision _step \\\n", + "0 0.335779 9 1.680693e+09 0.617021 0.828571 2059 \n", + "1 0.70145 9 1.680693e+09 0.822222 0.685185 1039 \n", + "2 0.019566 9 1.680692e+09 0.783784 0.935484 1039 \n", + "3 0.522233 9 1.680692e+09 0.833333 0.760870 529 \n", + "4 0.508366 1 1.680692e+09 0.884615 0.589744 410 \n", + ".. ... ... ... ... ... ... \n", + "133 0.443422 9 1.678732e+09 0.714286 0.945946 1159 \n", + "134 0.565593 9 1.678731e+09 0.846154 0.825000 1159 \n", + "135 0.004703 9 1.678731e+09 0.775000 0.939394 2289 \n", + "136 0.534751 9 1.678730e+09 0.684211 0.838710 1159 \n", + "137 0.00574 9 1.678730e+09 0.863636 0.904762 2289 \n", + "\n", + " ... test/batch_loss eps gamma epochs beta_one beta_two \\\n", + "0 ... NaN 1.000000e-01 0.1 10 0.99 0.900 \n", + "1 ... NaN 1.000000e+00 0.5 10 0.99 0.900 \n", + "2 ... NaN 1.000000e-08 0.5 10 0.90 0.999 \n", + "3 ... NaN 1.000000e+00 0.1 10 0.90 0.900 \n", + "4 ... NaN 1.000000e-08 0.5 10 0.90 0.990 \n", + ".. ... ... ... ... ... ... ... \n", + "133 ... 0.506896 NaN 0.5 10 NaN NaN \n", + "134 ... 0.515937 NaN 0.5 10 NaN NaN \n", + "135 ... 1.758836 NaN 0.5 10 NaN NaN \n", + "136 ... 0.455120 NaN 0.1 10 NaN NaN \n", + "137 ... 2.532007 NaN 0.5 10 NaN NaN \n", + "\n", + " optimizer step_size batch_size learning_rate \n", + "0 adam 3 4 0.0003 \n", + "1 adam 2 8 0.0003 \n", + "2 sgd 5 8 0.0030 \n", + "3 sgd 2 16 0.0003 \n", + "4 adam 7 4 0.0100 \n", + ".. ... ... ... ... \n", + "133 sgd 3 8 0.0001 \n", + "134 sgd 2 8 0.0001 \n", + "135 sgd 3 4 0.0030 \n", + "136 sgd 3 8 0.0003 \n", + "137 sgd 7 4 0.0030 \n", + "\n", + "[138 rows x 25 columns]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df = pd.read_csv('hyp-metrics.csv',\n", + " delimiter=',')\n", + "df['summary'] = df['summary'].map(eval)\n", + "df['config'] = df['config'].map(eval)\n", + "df = df.join(pd.json_normalize(df['summary'])).drop('summary', axis='columns')\n", + "df = df.join(pd.json_normalize(df['config'])).drop('config', axis='columns')\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4679b2f8", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/zenon/.local/share/miniconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: In a future version of pandas all arguments of Series.sort_values will be keyword-only\n", + " \"\"\"Entry point for launching an IPython kernel.\n" + ] + }, + { + "data": { + "text/plain": [ + "0.0100 21\n", + "0.1000 21\n", + "0.0003 23\n", + "0.0010 23\n", + "0.0001 23\n", + "0.0030 27\n", + "Name: learning_rate, dtype: int64" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df['learning_rate'].value_counts().sort_values(0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "1b1a54fc", + "metadata": {}, + "outputs": [], + "source": [ + "# Style the plots (with grid this time)\n", + "width = 418\n", + "sns.set_theme(style='whitegrid',\n", + " rc={'text.usetex': True, 'font.family': 'serif', 'axes.labelsize': 10,\n", + " 'font.size': 10, 'legend.fontsize': 8,\n", + " 'xtick.labelsize': 8, 'ytick.labelsize': 8})\n", + "\n", + "fig_save_dir = '../../thesis/graphics/'" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "00efa25b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjgAAAFbCAYAAADY/fSfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAABX2UlEQVR4nO3de3yT9d0//teVNOkxh5ZCOTTlVMA2lAmo0KDzxKSg27ROgtvuDdTCDvctTnGH+1bc0HvfTYoTt7kbOmXz/k2Nbt3uqTQgOk9NQRSUNhTkVJpybGmbpE0PaXL9/qi5JLSFkia9kvT1fDx80OuTK9f17ser7TufoyCKoggiIiKiOKKQOwAiIiKicGOCQ0RERHGHCQ4RERHFHSY4REREFHeY4BAREVHcYYJDREREcYcJDhEREcUdJjhEREQUdxLkDmA47N27F6IoQqVSyR0KERERhcjr9UIQBMyePfuS546IFhxRFBGJBZtFUUR3d3dErh0vWEehY91dGusodKw7iqRIPV+X8/d8RLTgBFpuCgoKwnpdj8eD2tpa5ObmIiUlJazXjheso9Cx7i6NdRQ61h1FUqSer+rq6kGfK0uC43K5YLFYAAAlJSX9nmO1WgEATqcTBoMBJpNp2OIjIiKi2CZLF5XNZkNra+uArzscDthsNhQVFcFsNqOsrGz4giMiIqKYJ0uCU1RUhJycnAFft9ls0Gg00rFGo4HNZhuO0IiIiCgOROUYnPr6euj1eulYr9fD5XIN6ZqiKMLj8QwxsmAdHR1B/1JfrKPQse4ujXUUOtYdRVKkni9RFCEIwqDOjcoEpz9Op3NI7/d6vaitrQ1TNMHq6uoict14wjoKHevu0lhHoWPdUSRF4vlSq9WDOi8qE5ycnJygFpvW1lYYDIYhXVOlUiE3N3eooQXp6OhAXV0dJk2ahOTk5LBeO16wjkLHurs01lHoWHcUSZF6vg4fPjzoc6MqwXG5XNBqtTCZTFi/fr1U3tDQMORZVIIgRGwqZHJyMqdZXgLrKHSsu0tjHYWOdUeRFO7na7DdU4BMCY7NZkNlZSXcbjcMBgOKiooAAMXFxSgvL4fBYMCSJUtgtVrhdDqxcuVKOcIkIopLPT09/X5NFE9kSXBMJlO/LTI7duyQvg4kPUREFD4H9n+G7y1/AP/vNz/H2zvewbrK9fjfv5ZBq9Vc+s1EMSSquqiIiCiydlj/hbpj9bjnmz9EZ0cnElQJqPl0P0zXzZM7NKKwGhF7URERUa9/f3AVvnzjAnR4OiCKIn74QAmTG4pLTHCIiEaQF194Fe+/Y4NC0fvr/7n/eQGf7h38/j5EsYIJDhHRCDJpcg7SNGnYuOnXuH3prdCn6zEma7TcYRGFHcfgEBGNIKbr5uGDPdugUiVgvGEMfvrIgxg7LkvusIjCji04REQjjE6nlb7W6jh7iuITW3CIKCbV2g+iqvJDpGqSkZeXJ3c4RBRlmOAQUcz5n98+j1+te0o6PvrZcfzssYdkjCh29PT0oOzZP2PZt4tx9nQj7J8cxHfu/abcYRGFHRMcIoopbncbnl7/bFDZc//zAu77/ncxekymTFHFjl8//huUPftnvPv2Bzhy6BgazzZhfPZ4LFx0g9yhEYUVx+AQUUxpOdeCzo7OoLKeHh/OnmmSKaLYUvKDFZiSOxk7K3ej8WwTbll8E264+Vq5wyIKOyY4RBRTciYZcIVxelCZIWcC8i4oo/51d3eju7tbOvZ4OuDz+WWMiCgymOAQUczZ/KeNuOHm66DRpqHgynz8/rkN0sJ1dHF/+ZMFDfUncNMtX8b47HGofG8nbO/vkjssorDjGBwiijk5kwzY/MIz6OnxYt++auROnyJ3SDHj4f9ajaxxWSheehs+3LUb5xpbcePC6+QOiyjs+JGHiGLOxv/3OxQvXIrGs0144se/wpM/3yB3SDFjf/UBbP7dFnz8TiWq3rLhz2UvobXFKXdYRGHHFhyKGNH/Rb++KLKPn8Kj09OJ117+Pxw+eQq3LPg6urq9cDa14MGf/ge0GXq5w4t677z5Lk6eOIVV3/sZur09UCUoUVtzAIXccJPiDFtwKCI6m8/h9Pvvoqe9Haltbrir90H0+eQOi+JAUkoSfvvME1Aplejq9kIhCPj1Ez9lcjNIqk4PAKDb2/NFYZtLpmiIIocJDkVEx+lT6PF44NzzMVI97ehxueBtb5c7LIoD3d1e/PyXGzFKkwaFICBTo8Gvn94ET7tH7tBiwpEjdUHHPT4/zpw4JU8wRBHELiqKCH2eEV3NLfC6ez8ZpuROg1qrvcS7iC5NIYq4M38mtHPmQp2VgY5TTfB0d0PpF+UOLSZ85ZYv429b34Xv8y5kTUoS5i5g9xTFH7bgUES4Dh+SkhsA8Bw9Cm97m4wRUbxISFRjbO5kpCQmIqG1HZrkZGRMGA91aorcocWEHbuqpeQGAFztHbAfqZcxIqLIYIJDEaHSaKBQqaC9cjY6kpKhSEyEUp0od1gxQfT5sO2Xf8Qnz/8D7ZV2WH/xR3iaW+UOK2r0eHtgfXsPej5fnM7vF/Hm+9Xo6uySObLY0NHR0afM7eaHD4o/7KKiiEgZOw5JozLR6fXCrdFiwvTpUKhUcocVE/a9/h4OfHQMSqUAlUqJzs4evLXRgq/+YpXcoUUFhSDgtsI89LR50Oz2IEOTglvnzYBKyc9rg1F3tG9rzWcHDssQCVFk8TcCRYyU0AgChATm0oOl16Vi6uQMJKoT4PX6MHliOsaO5filAEGpwHufHoX92Bm89Nan+OTwSby/7xifsUGav+DqPmWFC66RIRKiyOJvBIqI7o4uvPPH17H3Xx8iRZMC9XdFFNx0ldxhxYSTNYeh1SQiJVkFb48PyUkquE+fkzusqNF4qhF7q49Jx5U1xwEAdZ/VYUoeVzS+lPvXfB/H6xx4s+JfUKkSsGLlt3HTLdfLHRZR2DHBoYj459MvYcMfn8cpVzMSFErs3F+D3/79WWRkj5Y7tKiXWTAD+17dgdGZqUhIUKC9vRuqsay3AAWEAcppMNLSUvHzn/4Ij60pwemzZzF6/CS5Q6I44vf54D5yGJ7Gs9B0e+Hr7ARS5JkAwN8JFBHPvWjBKVczAKDH70NlXS3e+FO5zFHFBt2k8VAlKaVjlVoJzaQJMkYUXTLHj8bU6dlBZePGj8Iktt4Myp6/v4P3/+evaNlXC/+RU3jnaQtO1dbJHRbFiZaaariOHEaPy4Xkzg64Pt0btKr9cGKCQxFx8vPk5nwNba3DH0gMevOP/0Bqkgonmpz45PBJqFVK7Nn6ntxhRY2ONg++NE6DudMnICs9DbNzx2PBjLFobez7zFFfgqsRhYvyoE1PQVa2HtcuMcJ57IjcYVEcEH0+eE6dDCrzd3aiq1men00mOBQReXnT+pRde8u1MkQSe3xpqWhp68BnjiYcdDTC1d6Jsx1eucOKGkkpyfCla3HVjGx84/oCzM83wKVSQMetGgYlc2xa0HGCSgm9PkmmaCiuCAIEhQJnP1/Wwt3ugaezS7YJAByDQxGx6htfxZFDR9DockMAcNOsmZidN13usGLC7LnT0fReK66/cgq8PT4kqhIwby7rLkAU/chN16OzpXchSYVCgfwxo+H3+aDkVPFLanN1Ii1NHVzm5hpCNHSCQoFn//EOyv9vG2ZMzobjdBPGj83EK7cUyRIPExyKiBvuWYrXrrkSx9vdOHf6FOYajUgbx4Gyg5Gi1eKooxnTJmYiUZWAs81tGDVhotxhRQ+/iPZOD3Sj09DW2IbUUalwtnXI1s8faw4ebMScOeMhCL2Dtb1dPWho70KuzHFRfNi7/zN0dnvx6cHemY4dXm/QytnDiQkORYSgEDB21gxoPR7UqgVossfKHVLMGGMYjWmTxwCf/1IYnZGGqbOY4AR4fT7oDTpkpmuRrE1G6qhUJLZ1oLOrC2lJXC37UuZ9+6v463+XYWy2Dt5uHxqbu/C9px6UOyyKA93dXhw9XBdU1uHpRG3NQVxTOHfY42GCQxRlEpISkahJhS7XACe88O47hpR0LvQXIABwNbgwSqdBWmYaRFGEq8EJQex/+jgFS9Wm4nhLOz7YfRAAMP+Wa6BSc5VxGjq/34+uru4+5WfONMoQDQcZE0WdJG0qJs6bjqxZU5E4Ro+JphnQGsbIHVbUUCclwuX3IbAcjiAIcHq7karXyBtYjPjH8/+E88w5FN91LRZcewX2/Otj7Hl/r9xhURxITFRjQva4oDJlghLX3VAoSzxswSGKMp3N5+B1NqO7tRlavx9QKtB2/Dj0V+TJHVpU6Gj34Irc3i5P5ykntFlaTJ8+Hq5zzdCOypA5uuj3jRWL8eU8PZQKAUAOFi+ajakLZskdFsUBQRDwh8f/E3etfBBd3t6Znz/7zt3Q63WyxMMWHIqYptqjOPraO+iy16HLxd2KB02dhKN1ZyEIgEqpgNPpgdPTI3dUUcPT7MLZg2dw9tBZNNc348xnZ3DmwGk4TzbJHVpM8Bw/9nly0yspQUD7iQYZI6J44evpwU8e+TUmaibg6vEFMI6ehrJX38CxT2tliYcJDkVE/fsfY79lK5prj6Ln6CnYnytHTwenog7Grle3YpIhUzrWapNxrOojGSOKLonqBPi8PnhaPACADmcHerp6kJaaLHNksaHH4+lb1tEhQyQUbwQRyE4bh/RkHRSCAmnqFEzW5SApQZ4xXkxwKCKOvbMr6Njv7cGxd3fLFE1s6TjXiu7uHrQ2taOlsQ2iXwS8otxhRY0knQYQ+g4oTslMlyGa2NPvk9RPfRJdru6eHrjaggcZiyJw6pxLlniY4FBEtLj7dkkdaTghQySxR5WcCm+3D/rMVKSPTkOPzw+f3yd3WFHDByBpTGpQWUqWBl6R6+AMhtfdtwWnq9UtQyQUb9SJauhH9R1vMyY7S4ZomOCErOFIA459vkHdnnf3oqOdTbznqz0RnMz0+HxoaG2RKZrYMqMwHxrdF90tanUCRudwkcQAX48Pb+08hPEFEzA6dzTGF4zHOx8fRk8PW7kGI3VC341bdVMmDX8gFHcUCgXuXm2GcN4Yr3mLrkH2FHk2C2aCE4LWplasW/lLlD7wG7z3tw/wu//8A359fylEkb9gA/Im9S5M19zWhh6fDwqFAtdeM0fmqGJD7Sc1fcqcztbhDyRKuVvbIPr9SEhMQNqoNKiS1BAgoLWRCfRgaKZORbuzCz3dPeju6EZHlwJJo5lA09CJoohX/vF/ONh6FPNuuRJOlQuvv/smms/J87PJBCcE+kw9rrt1ATxuD3Zu/RAKhQKLv1kkLX1OwJxbb4QoishIS0OCUgm/UsCUOTPlDismNDT17S44VndWhkiikyYtGbd/dS4UCgFVuw4BEPHVJbOhS+WGkYPheP9jnD1wEs2nO3Hy4Fmc/uQIzn2+rD7RUAiCgJuvuwov/OoB3HVjPp5dex++/bUbodXJs0YVE5wQjZ80XvpanaRG5thRMkYTfc4dOBqU8CX4gY5mp4wRxQ5j4VV4b3sN3M4OdHq6UfuJAx3q1Eu/cYTwdnYD/t5fpsb8bCgUCoh+Ed5uTqUfjIk3XIPOFB2mFi8EjJPhHz0GmVdMkTssigOiKOKanNHQa3p/X6kSErDwyiuAz9fEGW5McELQ0tiCPz/5v1Aqlci9cio62juw+fHn2EV1Hmf96T5lJ/YekCGS2DPvhvnwaXV485W92PqXj2A/cBr3/GSl3GFFDU2mHnveO4KeHh+0mmT4fX58+NZnyMjmas+DUWV5G7U7D2DHb/8O+99349O3PsXR3fKsU0Lxxd/TA39nJ46dOAMAaGpxwdXWjsZ6edZZ4krGIUgfnY6HnnoAzhYnUsem4MjuY7jxa9ezi+o86pRE9Hg6g8o0o/XyBBNjju6uRUu1A0p1AhKSVOhydaDqL2/iyyuWyB1aVPD5/RifPwYJCUoAgEKpgKEgCz09PiRwr81LmvPVBTj20QGcqOntlpp09QxMmjNd5qgoHigSEvDL5/6G9z7ch5/c+w08V74diWoVXtn6ojzxyHLXODBpQiamTeyd+nbNzEnQalJkjii6JGcFD1oUAWhzxvd/MgWZNGcG8m6cg1t+dBdmffd65MzOxdw7vix3WFEjQZWAtHQtWlvb8ezmHWhsciNZkwpVIjeMHAxfjw++ni+WHfB198DvZ+szDZ0gCJgx90p0dHXj58++CMfpJow1TEAqt2qIHV3udtT85TUc/vtb6NxzCPU7dsL+0hvsojpPi8ePVmcnRAA9Pj9OnHaj28u1XAZDoVTglv+4E9kFU5CoTcGih8xI1afJHVbUEAQBWQUz8WzZDhw9dhZ/KNuBlImToExgg/RgVG//EK4zLZg4ZxqSR6XBse8IGqqPyB0WxYlv3ftNpGfopePl3/8uEhPVssTC3wghSNSkYuKN83B02wfwnWiCQpWAqYuuZRfVeW5c9XUc+bAWY4052PfRJyhMz8LoyWzBoaHzdnvxqweeQnNzO0ZnatDY5MbG/9qE31bMRHIaW1IvxfTNr0CTqceUBUZUf/wJ0vxJmDRnhtxhURwQRRH3ffvf0dLciklTclB3tB73r/oJ3tn5BsaMHf6lCJjghKin84t9lUS/CF+3PKPEo5WgUCB3vhEejweJ2hRkTc+WOySKF95u3H7bXNhrG3Dn7Vfjn2/swcScTCh9/BkcDEGhwKyiefB4PFBrkjE1j7vUU3gIgoCf//Jn2FL2Fzzx5CP49RNPYfKUybIkNwATnJB0udtxctc+KFQJUEzIRE/dadS/uxvpuTlsxSGKMGVSEowFE5Gf17s66u1fvQpQKJCQwtYbIrnNvWY25l4zGx6PB9/45teRJ2MCzQQnBImaVMz67u1od7vR4G6BwTgDY43TmNwQDQOFUon0PCOaa/YBoggRQOqUqVCoOMiYKFr0RMG6VExwQtS4/wg629ogZmnQZD+GFG0aRs2YLHdYRCNCanY2kkaPhvvsGRw7dRqZE9gFShQNWk+fw/Zn/oZTB44jSZ+KtBIVZhTOkiUWJjgh6Ha3o6FqL8QeH6BSAl4fjr5pQ8b0SWzFIRomysREqEdlwn+2Ue5QiOhz2zf+FacO1gMAOlvbseOZckyaNQ2JqcmXeGf4cZp4iBTK3kXG8PnUZ6WazeNERDRydXd0SclNQE+XFycP1A/wjshighMChVqFRG3wuiRpWaPYetOP7jZPb0sXERHFNVWSGppMfXChAKRPyJQlHtm6qKxWKwDA6XTCYDDAZDINeE5AUVHRsMR2KX5vD7pcbUFlbafYTH6+Lnc7al+xwuU4BSgVONXuw9SbCuUOi4iIIkQQBNxw323Y+tTL8H0+yPjK20zQy7QZtSwJjsPhgM1mw7p16wAAK1as6JPguFwuOBwOlJSUAADWrl0bNQmOUpWA1DEZ8HZ2QcwdB++ew9BNmiB3WFHlqPWD3uQGAHx+nHjvY4yePgXa7Cx5A6O40NPtxSdvVKFh/zGIaQnInTwV4DRxItlNuSYP95b9BHWffIZzHa2Yc+01ssUiSxeVzWaDRqORjjUaDWw2W9A5Wq0WFosFdrtdOidaKBPVyP36QnQZJmDfvmNQfykfk7+yQO6wooqz/mSfMlc/ZUSh2Pb0q6j83204/vFnqH93P958+q9yh0REn+v2dGLi3OlISFaj29N16TdEiCwtOPX19dDr9dKxXq+Hy+Xqc96aNWtQXFwMo9GIP/3pT0O6pyiK8Hg8Q7pGQGdbB375nXVwnG4CALyJt2F6zYb7Sn/IcTif8/ezL5AvURW2/wcjQUdHR9C/1MvT2obDu+xBZSdqjuHkEQf04+RpCo9FfL4oEir/ZMVhmx0L7luM6pc/wBHNHtz6k29BnZIYluuLojjov7NRM03c6XT2KauurkZ5eTlKS0uxfPlylJeXh3x9r9eL2traoYQoOetolJKbgI/31sK0fz8UCo7bBoBzooCkrh4kJiZAFEW0uLuhbGlGU6182Xws8fn8UCp7n6UjR45KXxPQ3dbZuz39BY4ePYrk1rPDH1CMq6urkzsEiiPONhe6PZ341zN/BwAkJKnw2eHPoFSFL91Qqwe3eacsCU5OTk5Qi01raysMBkPQOVarFQsWLIDRaMSWLVuwdu1a2Gy2fgcjD4ZKpUJubu6Q4g7ocfqQlqyGUqGAs70TYzM0aGxtQ+7UXCQmhSdLjXUzZszAPx59Hh31TfD2+DH3mzejwHSV3GHFhDZ3G1Z99wEs+dpXoNWn4dnfPI9nyp7EtOlT5Q4tajTNr8PRnful4/EzJ2HOgqtljCj2dHR0oK6uDpMmTUJy8vCvUULxaXL2RLxq34ROd29r/bylN2LSrOlhu/7hw4cHfa4sCY7JZML69eul44aGBilxcblc0Gq10uyq89+j0+lCvqcgCEgJ0yDECempuOM6IxSCAnsOncCCmRNxrq0DGq0GCf10zYxEb/3hHzh3/AzUKUnwer3Y/dLbmDwzF2OmcjD2pXzycTVq9u3HJ3uqkZiUiM6OTryz4wN86coCuUOLGkt+ZMa+bR+iYf8x+FMUuPFbS8L28z3SJCcns+4oLERRxGu/eAGdbg9S0zVob3Hj3Wdfw8RnViM1QxuWe1zOMBBZ/hobDAYsWbIEVqsVTqcTK1eulF4rLi5GeXk5zGYzysrKUFNTAwDQ6XQwGo1yhNtHxvhM6EZpIHZ48eVZkyGKIiZPHwcFx99IdFPHYf+LjVi94X68vuWfaGw4h/TsMXKHFRNM183D6nvNmDfdgNTkJHzw6QH82w+Wyx1WVFGqEjD7NhNm3HQlamtrkZDIhTaJ5CYIAgoWz0Nnmwe3P34P/vnr/0VG1uiwJTeXS7bmhoGmfO/YsUP6OjBFPNokZmQgc7wejUd6175RqpTQjtF9sboxYXflp2h1t+OpH/8O7lY3EtQJqD/swFTjFLlDi3p/fe4v+Lrpi71bFptm48UNv8O9jz4sY1RERBcniiI+ea0SrafO4e3f/wMtR87AVX8O7c0uWZIcjlwMwfGP9knJjTpVDX+PHyf3OeDt7pY5suhR8sg9mDRjItytbgDAt390N5ObQZpX0Le/+tq50dF6SUQ0EEEQsPAHd0CdkoT6vYcg+kUU/tstsrXgMMEJgXb0F8tOd7f3JjUiAKWCLTgBb/71LdQdPC4dv/G/VjSdPidjRLEj5bwlFAISNWl9TyQiijLOMy3o6friw35Lg3yr/DPBCcGoKQaIycGzpXQzJkGRwAQnQKVSQZWown/8vx9g2uypUCYoOIV+kA6cbMSBYw3ScaurDTs+Ds8SB/HE7/ejpbEVfr9f7lCICL1dVJ9W7ITfL+LKr5mQkKTCkSo72lvcssTDKT8haDx7Dj/e8gJuviIPUyeMxceHjsDx/nv457JbudDf5xZ+4ybMvu5LSNYkQ52hQs6EHGSMSZc7rJiw4AYT/uXtgcIwBs1NjTjU2o37Hlgld1hR5cDeg/j9I3/A2RON0KSnYdVjJbj6Bi5DQCQnQRDwtf/8NxzfewgTrpwCcZQaudOnIzVdnp0I+JE6BGPGjsYD/3k/8gtn44r5X8L4vFw8Ufook5sLjMrqXVVWmaCEVqY+2Fh141e+jPScHHQnp6Do64vkDieq+H1+PPOz3+Psid6mb3dLG/7nsTJ0d3EMHJHcat/9BO//aSvazrlw/F927Pn7exBlamVlghOimYpEaE61ocH2GeaodcjweOUOiWhEOHviLM5dMJ6rzdmGhiMnZIqIiIDebuOdL++Au8mJl3/0e7TWNaLuo4M455BnHA4TnBC4T57Fxzs+QV19K06facOhI+ew+9W3Ifo4FuB83vbePW5EUURPJ7dooPDIyMpAmi540HViciLGGrhTPZGcFAoF5ptvBgCp1cbwpVyMypFnDTQmOCE4c8gBpyv4D/bJE63o8bIVJ+DMvoP4cOMLcNadQNfewzj40lZ0t3NTPxo6daIa9/3XPdK2KAkqJb71o7uRouFqvERyEkURRz8MnhDReOyUtG3DcOMg4xAkpPWdsuvzixAE5osBXc42+Lq9OPTKNgBAjyYVvq5uIJV73tDQFd4yD7Pmz8Rn1YfQ3tOGOVfPkTskIgJwztG74W3m5HFoOnYKnc529HTL8+GfCU4IsmdNRYo+DZ7WNqlsylVXcLn48+hyxgOCAIi92z4nJKmRpJdnJD3Fp1RtKmbMno7aWk6hJ4oGgiCg+Of3oHrbLlxlvhH/emkrJk6dDE2mXpZ42OQQggRVAm57yAzDrClIzkhD7rwrsPAHd8gdVlSpfbUCEEX4Ph+X5DnbjBO79skcFRERRVJG9mhcf+9tEBQCxs2Zgqnz82WLhQlOCLrbPKjf/j7GpgKzbpkJbZcLh197G+LnrRUENJ5xoaPTiwOHmnDylAtud5ds/bBERDTyMMEJgSo1Gfop2ehp70D3/uMQ/SJG5+dyHZzznG5sx4HPmtDd7cOZxnYcqWtBq4szqQbr76++htOnzsDr7cELz70En88nd0hERDGFY3BCIAgCMqbm4Mze3r5/ZZIaWsNYmaOKLkm6NHhcwS02yelc7G8w/vXme3jwh/8Jw8RsZI7OwJ7dn6KluRU/e+whuUMjIrqk7jYPoABEbw/83h7Z4mALTgi63O04+I8dAABFhga+zm7U/m07u6jOM/f264KO0zJ1mGaaKVM0sWXB9YW4edENqK9zYM/uT2GYmI3lJd+SOywioks6tqMKHz/7EtpOnEHnrlocKn8TPpmSHLbghCBRk4oZd3wFHe42nE3ogeZMGwzXzGIX1Xnyb5yDtFE61L63Fx5vJ65bdgvUF2xQSkRE8UMURXg7OuH1dODAX94AAPgSkyD6fIBq+NMNtuCESDMhC2kTsiAIAvS5OUhK18kdUtTJmTUV192zBDlfzkOKru/aQdS/D96x4a1t78AwMRuzr54Fx/EGbNn8/8kdFhHRRQmCgClfMUGVkiSVGW6ah4QkeT7csgUnBN1tHuz709/h83ohTMrCZ7W70DjVAOM3b2MrDg3ZTbdcjw2/+29cObcAZxvP4pPdNSj5wXK5wyIiuihRFFHz4hvwejqhTEqEr7MLR/7xFtK/fzfUmtRhj4cJTghUKcnQTRyPM58eAOx1AIBRV0xhckNh42x14a7bvgu3y42vf+M29PT4oFQq5Q6L4oDo86Gldj/aTzQgUwQ6NBqkTJ8ud1gUBwRBwIT5BUhQATm33ogDr+2AfnSmLMkNwC6qkAgKARkzJknHyqRE6CaOly8giiu2Dz7EL/7rVzjX1Izubi9effHv+P1vNssdFsUJ19EjaHfUA34/FKIfniOH0HmuSe6wKA6Iogh4WqEbnQRv01mMGZuG1GQ/fF3yLBHCBCcEXe52HCz/fBZVuga+zi7U/nUbZ1FRWLz71vt9yt7pp4woFJ2NZ/spa5QhEoo3giAgOSsLEEV4jhyG0u+HSqeHQq2WJR52UYUgUZOK6bffjA6XG41qP9JOuWGYx1lU5xP9fjgPf4b2kyeh7+mBt6UFSOFuz4MxacrEQZURhaLN1YkL/9x0dXEhSQqPxIwMKFQq+L29G2yqRo2S7W8jW3BCNGbmNIy+8goIgoCcm+chdcwouUOKKs5Dn8F95Aj8HR1Qe71wVe9DTwe3ahiMO75xG666ZrZ0nDl6FFY//H0ZI6J4su+Dz9Dp6ZaOm0678NnuozJGRPFCFEWc27sXfq8XCVotRABtB2rh6+yUJR624FBEeE6dDC4Q/eg4cwaaSZPlCSiGJCUn4ZXX/ox33nofBw4chPmb38CoURlyh0VxwtvRjdOOFhimZsLnE9FwpAnpOSq5w6I4IAgCRn3pSrQ1OKDOmYi6vXswPjsbyqSkS785ApjgUEQo1YnwdXT0KaPBUSgUmL/gaugy0pCcLM8vB4pPc2+YDnVC73hBZQLwJdNkqMeMkzkqihcqjQbpefnweDzoTE5B4pgs2WJhFxVFhDZ3GnBev6syTdM7+IyIZJWYGPxrXxAECF5uhEvhIfr9aDvVO2jd39EFb3vHJd4ROUxwKCKSx4zB2Ou+jJQpU+HU6qCbPRsC13Ehkp2g6PtrX5HAn00aOlEUceBvb+KT5/6GpupD6LTZcfDlCng98iQ5THCGQPT5oOzp4fTwAahS05BsyEFXUjIEBX+BEkWDtEmT+pRpp80Y/kAo7giCgLTxo+Hv6UFdxfsQPV1IytBBmSjPNHEmOCFyHjqI5g/ew6jmJjR/8B46m7iOBBFFP13udOjzjVCmpKA7QQXNl65Eol4vd1gUJ9IM49Hs7p2l53J3QUhPh0Km1nsmOCHwetrhOnz4iwK/H017PpYvICKiy6CZOAn6q+ehNWMU1Pp0ucOhOOH3+1G+9o84fuwcGho9OFrXjHeet6LZcUaWeJjghMBz4kSfMtHnQ49My1ETERHJTaFQ4Lrli6FUKtB4yglRBPKum4kMgzwTTJjghECd3v+aJEqZlqMmIiKKBol6XdCEkuQx8q3hxQQnBMmZmVDpdEFlmsncTZxouDjPNOP//vsFvLBqA2pe/ACtJ7lZJJHcRFHEzpd2oKfbiytunA2FSol9b+xCe7NLlni40F+IsgoXoNVRjzNHj2JcXj50XOOFaNi8/uu/oKnuNACg61gnrKUWrHj2oX6nQBPR8BAEAV/7r3/D0d0HMLkwDwkT0pA7LRepGVpZ4hnSb4M//vGPeOCBBwAAVVVVaGtrC0dMMUEQBCRmjka7RguVRiN3OEQjhvNMs5TcBLjPtqLp+OkB3kFEwyVZmwrjzXMBAPqJozF66njZYgk5wSktLYVWq4XJZAIAFBYWwmazhS0win093h7Yq2vR1HgOfp9f7nAoTiRpUpCgDt47SVAqkJIuz6dEIopOISc4BQUFWLp0KQwGQzjjiRm+ri60Hz0CXWsLOhocEP38A36+d994F3/75e+Q6jgG7cnTePbh9fB2e+UOi+JAYkoSrll6Y1DZrCXzkapPkykiIopGIY/BaWho6FNWXV2NW265ZUgBxQJRFNH44S5429xIBOA5chhCdzcyZhbIHVrUOPPJpzBdMw0AoElNxtLb0rHj79uw2HybzJFRPLi6+HpMvHIa6qsPo03owjULF8gdEhFFmZATnPz8fBQXFyM9PR02mw02mw1r1qwJZ2xRq6u5Gd42d1BZ+4kGpOflc7+lz02bMjroWKlUQOkdOWO0KPLGTBmPtLF61NbWyh0KEUWhkLuoCgsLsXHjRuTl5UEURTz++OMoLCwMZ2xRq7/p4ELvC8MeS7Rq9/TtjtKlj5IhEiIiGomGNE3cYDCMmFab86nT0+ETErDno89w5qwT06aOxawFszlF9TxeTSa6urqRmNg7GPRoXSNmLyuWOarY4XG2Y9/2XThZ34Cx2kykTJsod0hERDEl5ATnzjvvxKpVq0bEmJsLeds78Pvf/BP1Z1oBAO+8V4vrak7i36+aK29gUeTGpUvgamzGOcdxNLW2Ir/4Vmg5y2VQPM52vLTm92g75wQAnNh5CHc8tgLZM6fIHBkRUewIucnBbDb3SW6qqqqGHFAsqH5vj5TcBOzaexjd7R3yBBSltKMzkHXFDCh0WqRqUuUOJ2bU/muPlNwAgN/nx0f/eF/GiIiIYk/ILTiCIOCxxx5DTk4ODAYDnE4nrFbriBiH4xX65oVenw9I4ABjGrouT2efsu72vmVERDSwkFtwNm/eDFEU0dLSgn379uH48eNobW0NY2jRa/q8fKhUwVWXnqWF6oLFx4hCMX3BLCguSJbzbpwtUzRERLEp5BacdevW9WmtGSldVBkZeix/bAXWr3kKqYkpaPN14Ld/epqbbVJYZE7Mwu2PLseHf/sXnM2t+NJX5qHglmvkDouIKKaEnOAUFhaira0NFRUVAIDFixePiO4pADh7pgk/efjnOHeuGRptGtyuNvzwvofwymt/ZpJDYWEomIJRU8eitrYWeXl5codDRDRoRz+sxdE9B9Gp8GLa1FwgRZ44Qk5wHA4HVq9eLW3VUFZWJq2LE+/GZGXirrvvQHNzM2674yt4+tebUPLD5UxuiIhoRNtpeQu7LG9Lx576Fix9YqUssYSc4Gzfvh3l5eVBZRs2bBgRCQ4A/HTtj9De3o4DBw7gf1/djNRUzhK60Lkz57Bzxy64O9yYPm263OEQEVEE+Xp82PN/HwSVndp/HKc/c2Ds9OHftzLkBCc7O7tP2cyZM4cUTKwJtNiw5aavmt12/Orf18Pb1buicc37+/HzPz6KBNWQ1pYkIqIoJfr98Hl7+pR7u7pliGYIs6gcDkefsv424KSRyfK7V6XkBgAOfXoYH7+7R8aIiIgokhLUKky/dlZQmXZMOibkT5InnlDfaDKZcM8998BoNALAiNpsky6t+Wxzn7Km0+dkiISIiIbLzd+/HdqsdBzfewhIUeKmFV+FQqZNqENuwcnPz8cvfvELiKI44jbbpEu7+sarg46VCUrMvX6OTNEQEdFwSFCrULhsIb722Hcx7ba5SMvUyRdLqG90u93Yvn07vve97yEtLQ1VVVVoa2tDWlraoN5vtVoBAE6nEwaDASaTqd/zysrKpJlaRUVFoYYbEZ4WN5zHG+GdPBVIkWkeXJT65mozRNGPqu27kJiaiLv/YynGGrLkDouIiEaIkBOciooKtLS0SMeFhYXYvn37oDbfdDgcsNlsWLduHQBgxYoV/SY4K1aswMaNG6HValFcXBxVCc5H5e/B9tKbEH1+HCz/ELc+/E3kfClX7rCihjpRjRU/+S7M/3EX13KJcT6fD16v99InyqCrq0v6V6EIuUFaFiqVCkqZmu6JRoKQExy9Xo+lS5eG9F6bzQaNRiMdazQa2Gy2oCTHbrdL59jt9j5T0uXkOtsC24vbIfpFAEC3pwtvb/o/fPf3D3JGFcWVtrY2NDQ0QBRFuUPpl9/vR0JCAk6ePBlzCY4gCMjOzh50q3c4ndqzH6f22NHp7YY7LR0pM7hTPcWfkBOcffv2wWQyBf1wVldXD6oFp76+Hnq9XjrW6/VwuVxB59TU1KChoUGarbV27VqpxScUoijC4/GE/P7znTxULyU3Ac7TzXC1OKFKUoflHvGio6Mj6F8aPLnrzu/3o76+HqmpqRg1alRUJu+iKKK7uxtqtToq4xuIKIo4d+4c6uvrkZOTM6zJWeO+z3Dc+sVaJZ9ZrEhY/nUkZ6YPWwwU3z61VWNf1T6o0lSYMH5CWK8tiuKgf9ZDTnDMZjPuuOMO5OTkQKPRYP/+/fjFL34R6uXgdDqDjl0uF3Q6nTRLq6amBna7XTq+XF6vF7W1tSHHF3QtfxcEpQKizy+VpY7V4/CxI2G5fjyqq6uTO4SYJWfdKZVKZGVlRW3yIAgCEhMT5Q7jsgmCgLS0NLS0tODgwYPDeu+OD2uCjkW/H5+9/yHUV+QMaxwUn2yv7cQH/7BJxwd2H4T5oW+E9R5q9eAaEkJOcAwGA8rLy1FRUQG32401a9b0u/hff3JycoJabFpbW6WBxOdf//wynU4Hh8MRcoKjUqmQmxu+MTJpP0hE5Z+t6HR5oJ8wCjf/x53IyB4dtuvHi46ODtTV1WHSpElITk6WO5yYInfddXV14eTJk0hKSkJSUtKw338wRFFEV1cXEhMTozYJuxiVSoWJEycOa5L2md0BV7M7qGzM+HEYx3FyNEQ9PT14ZvuzQWXH99cjUUzClPzJYbnH4cOHB31uyAlOaWkpJk6ciMWLF2P16tWorq7GkiVLBtVFZTKZsH79eum4oaFBGn/jcrmg1WphMplgsVikcxwOx4AzrQZDEASkhHGm08wb52LS3Omo3rsPX7p6dlivHY+Sk5NZRyGSq+4UCgUUCgWUSmXUDob1+XwAen++ozXGgSiVSigUCiQnJw9rAjnx2qtQU3cSor+3BVqVmgzD1bOg5s8nDVF3Vze83X0nJIg+MWy/wy7ng0zIHb8FBQW466678Morr8BoNOLpp59Ga2vroN5rMBiwZMkSWK1WWCwWrFz5xUZcxcXFUpJjNpthsVhQVlaGNWvWQKvVhhpuRCgSlFCnRecnWyK52Gw2FBcXY8WKFcN6X7vdjhUrVqC4uHhY7xtr0qdkY86qpRg7fxZUV+Qgf/ntUKcxuaGhUyeqUXjL/KCy0eMzkTfnClniCbkFJ5BsbN26FU888QSA3m6kwRpoyveOHTsueQ4RRS+TyYSVK1di8+bNQ76WxWKB2Wwe1LlGoxElJSVYu3btkO8bsH79ejgcDjzzzDNhu2Y0SM3KRPaXr4K7thaqVHYdU/isWnsfRo/PxKdV+5CiT8F3H/y2bHsQhnzXwOwmh8OBvLw8OByOPjOhiGhkCldra2Vl5aATHODyPmQNxoIFC/h7jegyqJPUWPbvS/G1e25DbW0tMsdlyhZLyAnO4sWLYbFY8Le//Q1utxsWiwXp6ZxmSEThYbFYZN/Adyjj/ohIXiEnOBqNBvfdd590zI02iehC52/J4nK5UFJSIr3mcrlgsVhgMBhgs9lgNpuDNu+trKyEw+FAWVkZAAS9NzABwe/3w+v14lvf+lbQfe12OxwOBxwOB1pbW/Hwww9fNM5AHC6XCw6HA1qtFjNnzkRpaSkcDofUde5wOFBcXIyVK1fCYDDA6XRi7dq12LJli5QMBbaXcTgcMBgM7GonkklYOsYeeOABPP300+G4FBHFCbvdDpPJJHVXWSyWoAU7N23ahGXLlklJwMKFC1FeXi7NogR6Z1ien9gAvQlEIGnx+Xx4/fXXsW3bNixZsgRAbzLldDqlxGLhwoVYsmTJgEtMBJKwwD0DW8kYjUasWbMGq1evls51uVzYuHGjdO769euxaNEi6fj+++/HkiVLpHuvWLECBoMh5OUtiCh0YVk+MzAeh4gowGg0Bo3FCcyKPH/8ns32xYJggZaci3G5XCgtLcWqVauksu3btwd1ZblcrqCupUBrysVYrVZprI3BYMDMmTP7Pc/pdEqv2e12vPLKK9IkC4fDgW3btgW12BQVFQUtd0FEw0eeoc1ENCIZDAbY7XYYDAZpZlKgWyjQ8nIxNTU10Gq1QYnTk08+GbSOzIWLhmo0moteN5CEXH311TAajVi8eHGfVqOA8xOn1atXBy1fYbPZoNVqg5K0+vp6fgAkkklYEpxwz1wgovh0fqJht9uxadMmLFiwAIsXL77kSuiRnKm5ZcsW2O122Gw2qcVloCQH+GKcTWCGVyA2g8EQlARxkDKRfMLSRfX888+H4zJEFOdcLheMRiNcLheWL1+OVatWwWw2Q6vVwu3u3T5goBaPwF50/SU5Q0l8AglNYB2dwBY0A3E4HCgtLQ3a/DcwZqe/2DnNnEgeYd3Cdvv27eG8HBHFsAtbXAKL9gXGxASSnYBA647dbgcQPHYmsA+dwWDAokWLpJlVAOB2u6WBwv0JJE4DCczmOt/FWpMCXVOBrjC73Q6dTgeTyYSZM2f2ieViyRIRRU7YEpy2tjZUV1eH63JEFMN0Oh02btwIm80mbcnicrmkVg+j0Yj77rsP69evh81mg81mw8aNG1FZWSldw2AwYOnSpdI5gYTimWeeQWtrK8rKyrBt2zZs374dS5culbq8zp9aXlZWhpqaGlgslgGToMAYGqvVKsX6xBNP9Hs9i8UCu90OrVYLq9WKsrIyLF++XLrGli1bUF1dLd3ParVe1kKFRBQ+giiK4mBOLC4uRm1t7YCvi6IIQRAueo5cAolXQUFBWK/r8XhQW1uLvLw8biQ5ANZR6OSuu87OThw7dgyTJ0+O2t3EfT4fOjs7kZSUFHObbcpdv3I/XxTfIvV8Xc7f80EPMg5MhczPzx/wnNLS0sFejoiIiChiBt1FlZ+ff8ll0xcsWDDkgIiIiIiGalAJjtvtxp133nnJKY+FhYVhCYqIiIhoKAaV4NTU1GDjxo1IS0uTyl599dU+53EWFREREUWDQY3BmTlzJh555BHMmjUraMbBhes72Gw23HLLLeGPkoiIiOgyDKoFR6PR4IknnkB2dra0nLooin3+a2lpiXS8RERERJc06FlUGo0GixYtko5NJlOfGVVclpyIiIiiQch7UfU3XfxiU8iJaOQQRRFdzc3wdXVCmZiExIwMCIIQ1ntYrVakpqZizpw5Yb0uEcWHQSU4brcb69evh06nw5IlS5CXlxfpuIgoRnlOn0Jr7X74OjulMmVSEvR5+UgZOy4s93C5XNi8eTPuu+++sFyPiOLPoBIcjUYjLbH+yiuv4KWXXsLEiRNhNpuDZlYR0cjmOX0K5/bu6VPu6+zsLZ89JyxJTkVFBRYvXjzk6xBR/LrsLqqlS5di6dKlcLvdePnll+FwOLBgwQLOniIa4URRRGvt/oue01q7H8lZY4fUXWW322EymS66wSYRUchjcDQajdQ8vH//fpSWlkIQBJhMJi74RzQCdTU3B3VL9cfX2Ymu5mYkjRoV8n0cDgeKiopCfj8RjQwhJzjny8/PlwYYb9u2DWvXrsXEiRNx7733huPyRBQDfF0XT24u97z+lJWVwWAwwGq1orq6GsePH8eYMWMwe/bskK9JRPEpLAnO+RYtWoRFixbB7XaH+9JEFMWUiYPbEXuw5/WnpKRE+rq6uhpGo5GTHoioX4PebPNC52+86Xa7sW3btqAyjUYztMiIKKYkZmRAmXTx5EWZ1DtlfKhsNhuqqqpQUVFxyU2AiWhkCjnBqaqqkr4OLAJ4fhkRjSyCIECfd/G1sPR5+WFZD8dkMqG8vBxPP/00srOzh3w9Ioo/l9VF5Xa7UVFRAUEQUFlZ2ef1mpoa3HXXXWELjohiS8rYccDsORFfB4eI6FIuK8HRaDQoLCxEWVkZ6uvr+3xy4qJbRJQydhySs8ZGfCVjIqKLuexBxgaDAevWrUNVVRWngxNRvwRBGNJUcCKioQp5DE5lZSVeffVVtLW14d5778UDDzyA7du3hzO2qFZf58Chz44AAKo++BCdHaFPfSUiIqLwCjnBKSgowF133YWXX34ZeXl5ePrpp9Ha2hrG0KJX49km3H3HPbjn7h/i76+8gZXfWY2S79wPURTlDo2IiIgwhARHq9UC6N0T5tZbbwUA6HS68EQV5TJHj8KXb1yAc03N+P+efwWiKOLrd97KMQZERERRIuQEx+FwoKqqCg6HA3l5eXA4HHC5XOGMLWoJgoDC666RjtM0aZh1pVHGiIiii+j3o/VYA85Wf4bWYw0Q/X65QyKiESbklYwXL16MV155BeXl5XC73bBYLEhPTw9nbFHr7OlG/Hj1WigUCuTNnA77vgN44Ps/xRtvv8pWHBrxmvYfwWHr++h2tUllam0acouuQ2b+1CFf//xNNv1+P66//vohX5OI4s+QNtsURRGlpaV4+umnsWDBAhQUFIQztqg1ZuxoPP2HX6H5XDOm50+B9Z9v4+7v3MXkhka8pv1HsP+Vij7l3a427H+lAvlLFw8pyXG5XHA4HNKWDY888ggTHCLqV8hdVKWlpdBqtTCZTACAwsJC2Gy2sAUW7YpuXYjbv3EbBEHAgz/9d0zNnSx3SESyEv1+HLa+f9FzjljfH1J3lVarhcVigd1ul46JiPoTcgtOQUEBt2cgIonz+Mmgbqn+dLna4Dx+EvrJoW+vsGbNGhQXF8NoNOK5554L+TpEFN/CstlmQHV19ZCCIaLY1d3mCet5A6murkZ5eTl0Oh3uueeeIV2LiOJXyAlOfn4+iouL8cc//hEbNmzAnXfeKXVXEdHIo05LCet5/bFarViwYAGMRiO2bNkCo9GIXbt2hXw9IopfISc4hYWF2LhxI/Ly8iCKIh5//HFu3UA0gukmjodam3bRcxK1adBNHB/yPZxOZ9B6WyaTieNwiKhfIY/BaWhogMFgwJo1a+B2u2Gz2aDVavtswElEI4OgUCC36Lp+Z1EFTC26DoIi5M9VMJvNKCsrQ01NDYDe2Zx5eXkhX4+I4lfIv2nOH1ys0Wg44JiIkJk/FflLF/dpyUnUpg15inhASUkJzGYzzGYzFi1aNOTrEVF8uqwWHLfbjYqKCgiCgMrKyj6v19TU4K677gpbcEQUezLzp2LUFZN7Z1W1eaBOS4Fu4vghtdwQEV2uy0pwNBoNCgsLUVZWhvr6+j7dUffdd19YgyOi2CQoFEOaCk5ENFSXPQbHYDBg3bp1qKqq4qBiIiIiikpDmkVFREREFI3YKU5ERERxhwkOERERxZ2Q18EhIhqI3+fHydo6tLe4kZquwfi8SVAoh/55yuVywWKxAIC0o3hAWVkZDAYDAKCoqGjI9yKi2MYEh4jC6vBOO9597nW0nXNJZWmjtLj+3tuQO984pGvbbDa0trZCr9cHld9777145plnoNVqUVxczASHiNhFRUThc3inHW88+WJQcgMAbedceOPJF3F4p31I1y8qKkJOTk5QWW1tLTQaDQDAbrejvLx8SPcgovjABIeIwsLv8+Pd516/6DnvPvcG/D5/WO+7f/9+NDQ0wOFwAADWrl0b1usTUWxigkNEYXGytq5Py82F2s45cbK2Lqz3dbvd0Ol0MBqNMBqNqKmpgd0+tJYiIop9so3BsVqtAHp3BzYYDDCZTBc9V6vVXvQcIpJXe4s7rOcNVnZ2Nk6dOiUd63Q6OBwOGI1DG+9DRLFNlhYch8MBm82GoqIiaXfggbhcLmzevBku18U/GRKRvFLTNWE9b7DmzZuHhoYG6djhcPDDEBHJ04Jjs9mkQYFA7x5XNput319KFRUVWLx48ZDvKYoiPB7PkK9zvo6OjqB/qS/WUejkrruuri74/X74fD74fL5Lnp813YC0UdqLdlOljdIia7phUNfrj81mwwcffAC3240JEybglltugUajwV133YWXXnoJbrcbDz74IFJTU0O+x3Dx+Xzw+/3o6OiA3x/ecUmDIffzRfEtUs+XKIoQBGFQ58qS4NTX1wdN89Tr9f220NjtdphMJqk7ayi8Xi9qa2uHfJ3+1NXVReS68YR1FDo56y4hIQFdXV2DPn/etxbirWcGnsU071sL0e3tBryhxTNnzhzMmTNHOg7EdsMNNwSd19nZGdoNhlFXVxd6enpw9OhRWePgzyZFUiSeL7VaPajzomYdHKfT2afM4XCEbT0LlUqF3NzcsFwroKOjA3V1dZg0aRKSk5PDeu14wToKndx119XVhZMnTyIxMRFJSUmDek/edVdCrVbj/S1b+6yDc92KJZg6Lz+sMYqiiK6uLiQmJg76U100SUhIQE5ODhITE4f93nI/XxTfIvV8HT58eNDnypLg5OTkBLXYtLa2SiuQBgRWJbVaraiurobD4YDBYAh54KAgCEhJSRlS3ANJTk6O2LXjBesodHLVnUKhgEKhgFKphFKpHPT7ppsKkDvPGJGVjC8U6IYSBOGyYowGSqUSCoUCycnJg04gI4E/mxRJ4X6+LueDjCwJjslkwvr166XjhoYGafyNy+WCVqsNWoa9uroaBQUFnBURg3p6euQOgWSgUCqQPXOK3GEQ0Qgmyywqg8GAJUuWwGq1wmKxYOXKldJrxcXFQa07NpsNVVVV2Lp1q7SQF0U/p9OF76/4EWZPvw733n0//mb5p9whERHRCCLbGJyBxtbs2LEj6NhkMnHp9Rj0y8dKUfH6mwCA1hYn1v7kv3H1vDm4In+6zJEREdFIwJWMKSLe+1dl0LEoivjg3SqZoiEiopEmamZRUXyZPHUSTp08E1Q2ZeokeYKhYef3+VG75wBam1qhz9Qjb84VERlkTEQ0ECY4FBE/e+xB/NtdK9Ha0jv9f+GiG3DDwutkjoqGw663duNPT76A5jPNUllGVgaW//g7mHfz1UO+vtVqhcFgQE1NDfx+P772ta9J5U6nE3a7HUVFRVzNmGiEY4JDEVHwJSMq97yJf731Htrb3fjq7bdCoeAn+Hi3663deGrN04AYXN58thlPrXkaD5Y+MKQkJ7B1S3l5OQwGA66++mp87Wtfw/79+wEAZrMZLpcLN998M3bv3j2E74SIYh3/4lDEpKal4KavfBm5MzhdeCTw+/z405Mv9EluAEhlf37yBfh9oW9LoNVqpUkHDocDhYWFAHrX0rLZbNI5Op2OO4oTjXBswSGisKjdcyCoW6oPETh3phm1ew7AePXQVjS2WCyorKzEb37zGwC9sy2vu+6LLlCn08l1s4hGOLbgEFFYtDa1hvW8izGbzVi2bBk2bNjQ57W1a9fi8ccfH/I9iCi2McEhorDQZ+rDet5AAguBmkwmbNu2Dbt27ZJes1qtMJlMYdvDjohiFxMcIgqLvDlXICMrAxhoqxgBGJWVgbw5V4R8D4vFgk2bNknHOp0OWq0WQO+q51qtFkVFRbDb7Vz5nGiE4xgcIgoLhVKB5T/+Tu8sKgHBg40/T3q+++PvDGk9nMWLF8Nms8Fms6GyshJLly5FXl4eHA4HVq9eLZ3ncrlw8ODBkO9DRLGPCQ4Rhc28m6/Gg6UP9FkHZ9SYDHw3DOvgBFpogN4uKp/Ph87OThgMBk4LJ6IgTHCIKKzm3Xw1rr5hLlcyJiJZMcEhorBTKBVDngpORDQU/EhFREREcYcJDhEREcUdJjhEREQUd5jgEBERUdzhIGMiCjufz4cPd36Ms2eaMCYrE9fMnwulUhm265eVlcFgMMDv9+P6668Pes1qtUKr1cJkMoXtfkQUe5jgEFFYWV9/E7/4r1/h1MkzUtm48Vl47L9/iqLbvjLk669YsQIbN26EVqvFHXfcEZTguFwubN68GStXrhzyfYgotrGLiojCxvr6m/j+PQ8GJTcAcPrUWXz/ngdhff3NIV3fbrdDo9FIX//1r38Ner2iogKLFy8e0j2IKD4wwSGisPD5fPjFf/0Koij2eS1Q9otHfg2fzxfyPWpqatDQ0CDtM/XYY49Jr9ntdnZLEZGECQ4RhcWHOz/u03JzPlEUcerEaXy48+OQ7+FyuaDT6WA0GmE0GmG321FbWwsAcDgcMBgMIV+biOILx+AQUVicPdMU1vP6YzAYgpIYnU6HEydOYM+ePcjJyYHVakV1dbWU7BiNxpDvRUSxjQkOEYXFmKzMsJ7XH5PJBIvFIh03NDRg3rx5GD16tDRLq7q6GgUFBUxuiEY4JjhEFBbXzJ+LceOzcPrU2X7H4QiCgLHjs3DN/Lkh30Or1cJsNsNiscDlcuHBBx+UBh0DgM1mQ1VVFRwOB4xGI7usiEYwJjhEFBZKpRKP/fdP8f17HoQgCEFJjiAIAIDHnvjJkNfDKSoqkr72+Xzo7OyUjk0mE8rLy4d0fSKKDxxkTERhU3TbV/CH55/C2HFjgsrHjs/CH55/Kizr4BARDQZbcIgorIpu+wq+svimiK5kTER0KUxwiCjslEolChdcI3cYRDSCsYuKiIiI4g4THCIiIoo7THCIiIgo7jDBISIiorjDBIeIotb5qxYHrF+/Hvfff39Yrh/OaxFRdOEsKiKKWpWVlTCbzUFlCxYsgMvlCsv1w3ktIoouTHCIKCpZLBY0NDT0KTeZTGG7RzivRUTRhQkOEUVEWVmZtBeUw+FASUkJgN79okpLS6HT6aRtF1wuF1pbW/Hwww9L51RWVsLhcKCsrAwAUFJSArvdjtLSUjgcDuzYsQMAsGvXLvz2t7+FIAh4/PHH4XA44HQ6YbfbsW7dOlgsFuh0OmzduhWrVq2SNuHs71oOhwPFxcVYuXIlDAYDnE4n1q5diy1btkjJUOD7CuxYXlRUJH1P2dnZWLZsGSorKwFA+n6IaPhxDA4Rhd39998Po9GIoqIi6b8VK1YA6G01WblyJWw2G0wmE8xms5T8rF27Vjpn2bJlMBgMKCkpkV43Go1Ys2ZN0L3mzZuHBx98EC6XC06nE0VFRTCbzbDZbFi/fj3MZjOKioqwZMkSPProo9L7+ruWy+XCxo0bUVJSgqKiItTX12PRokVScnP//fdLSU1JSQksFgvsdrv0PQVanJYsWRKBWiWiy8EEh4jCym63o6qqKqj7J9AaYrPZAPTuCn7hbt+rVq2CxWKBw+G47Hvq9Xo4HI4+9zyf0Wi85LWdTidmzpwpfR+vvPIKnnjiCQC9rTvbtm0L2uyzqKhIGgit1WqlZMdoNLL1hkhm7KIiorCqqanpk1wAQHZ2NiorKwcc96LVaqUkob/3X8qF79FoNMjJybmsa5wf2+rVq7FmzRpotVoAvd1mWq1WStIAoL6+PihpCiVuIooMJjhEFFaRmpUUGPMyHALjbAIzuBwOB1wuFwwGQ1ASdGGyFkiGiEh+7KIiorAymUz9dgU1NDSgoKBgwPe5XC64XC5pEPCF7HZ72GK8GIfDgdLSUqxbt04qs9lsA3ZxcZo5UXRigkNEYWU0GlFYWBjUlRNITs4fv2K324OSg02bNsFsNkutNIGZSkBv0jFQ4jMQt9sdUvyBrqlAHHa7HTqdDiaTCTNnzoTVag06v6KiIqT7EFFkMcEhorB75plnUFlZCYvFAovFgq1bt6K8vDzoHKPRCJvNBpvNhrKyMuj1+qBWE4PBgKVLl2L9+vWw2WwwGAyw2+3YtGlT0PTx2tpabN68OaisrKwMNTU1sFqtsNlssNvtWL9+PVwul/Rvf9cKzIrSarWwWq0oKyvD8uXLpa6nLVu2oLq6GhaLBVarFVarVZqxVVZWJl0rlIHSRBRegiiKotxBRFp1dTUAXLR5PBQejwe1tbXIy8tDSkpKWK8dL1hHoZO77jo7O3Hs2DFMnjwZSUlJYb12YN2YC5Oey+Xz+dDZ2YmkpCQolcowRTc8Ilm/gyH380XxLVLP1+X8PWcLDhEREcUdJjhEREQUd5jgENGwCoxXsdvt0tgXIqJw4zo4RDSsTCYTN7kkoohjCw4RERHFHSY4REREFHeY4BAREVHcYYJDREREcUe2QcaB5c6dTmefDezOP8fpdMJut6OoqIgDE4lihM/nw969e9HU1ITMzEzMnj172Bbiczgc0nYL/J1BNHLJkuA4HA7YbDZpWfYVK1b0+UUU2LvGbDbD5XLh5ptvxu7du4c9ViK6PG+//TZKS0tx9uxZqWzMmDFYs2YNbrrppojf32AwoLCwMOL3IaLoJksXlc1mg0ajkY41Gk3QxnxAb8tOoEyr1UKn0w3bbsJEFJq3334bP/7xj4OSGwA4e/YsfvzjH+Ptt9+WKTIiGmlkacGpr6+HXq+XjvV6fdCuwkDftTKcTudl7yZ8PlEU4fF4Qn5/fzo6OoL+pb5YR6GTu+66urrg9/vh8/ng8/kueb7P50NpaelFz9mwYQOuvfbaIXVXbdiwAYWFhaiqqsJdd92FMWPGQBRFbN68GRqNRvowNH/+fCnu89+zdOlSGAwG7N+/H/fccw+eeuopuN1uVFRUYOXKlWhtbcW2bdtgNpuRn58fcpyX4vP54Pf70dHRAb/fH7H7DETu54viW6SeL1EUIQjCoM6NmoX+nE7ngK+tXbsWjz/++JCu7/V6UVtbO6RrDKSuri4i140nrKPQyVl3CQkJ6OrqGtS5e/fu7dNyc6EzZ87gww8/xOzZs0OOKTU1FXPmzIHX68XmzZvxyCOP4I033sCxY8fwyCOPAADef/99eL1edHZ29nnPpk2b8Mgjj2DKlCnIy8tDcnIy5syZg2PHjuG9997D8uXLkZycjBdffFG6XiR0dXWhp6cHR48ejdg9BoM/mxRJkXi+1Gr1oM6TJcHJyckJarFpbW2FwWDo91yr1QqTyYSioqIh3VOlUiE3N3dI17hQR0cH6urqMGnSJCQnJ4f12vGCdRQ6ueuuq6sLJ0+eRGJi4qB2u76wFfZi5w1l92yVSoV//vOfcLvdaGtrAwDs3r0bs2bNkq6bnp4OlUolHV/4nkC5QqFAZmYmkpKSoFKpoNFokJSUhMTERCiVyojv8p2QkICcnBwkJiZG9D79kfv5ovgWqefr8OHDgz5XlgTHZDJh/fr10nFDQ4PUHeVyuaDVagH0jtXRarUwmUyw2+3QarUDJkKXIghCWLdsP19ycnLErh0vWEehk6vuFAoFFAoFlErloLqUxowZM6jrjhkzJuQuKovFAqfTiZUrV8Jut6O6uhq1tbXS74jAddva2qS4L3xPTU0NDhw4AKPRCEEQkJGRAaVSCUEQkJ6eDqVSGfS9R0rgPsnJyRFPpC6GP5sUSeF+vgbbPQXIlOAYDAYsWbJEmga+cuVK6bXi4mKUl5fD6XRi9erVUrnL5cLBgwflCJeIBmH27NkYM2bMRbupsrKyhtQ9NXPmTNjt9qBJCSdOnMBtt92GEydOSMtPOBwOvPzyyzCZTP2+x+FwSP9aLBaYzWZp8oPJZMLWrVtRU1MDh8MR8ocqIpKXIIqiKHcQkVZdXQ0AKCgoCOt1PR4PamtrkZeXx09AA2AdhU7uuuvs7MSxY8cwefLkQbcwBGZRDeTJJ58M61Rxn8+Hzs5OJCUlDds6O+ESSv2Gk9zPF8W3SD1fl/P3nCsZE1HY3HTTTXjyySf7dFdlZWWFPbkhIrqYqJlFRUTx4aabbsL1118v20rGREQAExwiigClUomrrrpK7jCIaARjFxURERHFHSY4REREFHeY4BAREVHcYYJDRGHV3d2Njz76CIEVKERRxEcffYTu7m7ZYnI4HCguLu6zqS8RxS8mOEQUNt3d3XjooYfwve99D0899RT8fj82bNiA733ve3jooYdkS3IMBgMKCwtluTcRyYMJDhGFRSC52blzJwDgpZdewre+9S28/PLLAICdO3fKmuQQ0cjCaeJEFBb79u1DVVVVUNmhQ4ekr0VRRFVVFfbt2xfyFHK73Q6HwwGtVgur1YrHHnsMALBhwwZMnDgR9fX1KCgogNPphNlsRllZGbRaLXQ6Hfbv348FCxaE/g0SUUxhCw4RhcXcuXOxbNmyi55z9913Y+7cuSHfY+vWrXA6nZg5cybMZjMAoLa2Fg6HA2azGTk5OVJyY7VapfKioiLuKUU0TA7vtOODLRU4ufsIerq8ssXBBIeIwkIQBDz44IOYNm1av69PmzYNP/rRjy5rN+ALrVq1Cna7HcXFxdi0aRMAIDs7G263Gy6XC3a7HTNnzgQA2Gw2GI1G6b0ajSbk+xLR4FS9+CbeePJF1L61B0e3f4qK9S/LFgsTHCIKC1EU8dRTTwV1S53v0KFD+M1vfoOh7O9bUVGBdevWYceOHdDr9XA4HNBoNLj33nvhcDiwbt06KakxmUyor6+X3ut2u0O+LxFdmq/Hh72vB89UPH2gHqcO1g/wjshigkNEYfHxxx9LA4oH8tJLL+Hjjz8O+R719fWwWq2wWq0wGAxSt1NVVVVQaw0AFBUVQa/XS+c7HI5LxkdEoRP9fvi8PX3Ke7rl6abiIGMiCotZs2ahsLAQO3fulFpppk2bJrXoCIKA+fPnY9asWSHf4+GHHw469vl8AHoTnBUrVkjlGzduhFarRUlJiVRWVFQU8n2J6NIS1CrM+PKXUPuvvVKZdmwGJuRPkiUetuAQUVio1Wps2LAB8+fPB9A7oPgvf/mLNPB4/vz52LBhA9RqdVjvu3HjRjz//PPYsmULtmzZgjVr1sBisYT1HkQ0ODd973YU3r0Q467Iwdg5k3Hrz74FhVIpSyxswSGisAkkOfv27cPcuXMhCAIeeugh3HDDDZg1a1bYkxsAuPPOO2G1WpGeng4A0swpIhp+CaoEXHPXjZh56zzU1tYibZRWvlhkuzMRxSW1Wh20zo0gCCGvezMY2dnZyM3NhVKmT4lEFJ3YRUVEFzWUWU80MNYrUWQxwSGifgVaRLi1QmQE6pUtT0SRwS4qIupXQkICUlJS0NjYCJVKBYUi+j4P+Xw+dHV1AYitRMHv96OxsREpKSlISOCvYaJI4E8WEfVLEASMGzcOx44dw/Hjx+UOp19+vx89PT1ISEiIygTsYhQKBXJycoa0sjMRDYwJDhENSK1WY9q0aVHbTdXR0YGjR48iJycHycnJcodzWdRqdcwlZUSxhAkOEV2UQqFAUlKS3GH0y+/3AwASExOjNkYikgc/PhAREVHcEcQRMFdxz549EEUx7IuMiaIIr9cLlUrFfvQBsI5Cx7q7NNZR6Fh3FEmRer66u7shCALmzJlzyXNHRBdVpH54BUGIyMqs8YR1FDrW3aWxjkLHuqNIitTzJQjCoP+mj4gWHCIiIhpZOAaHiIiI4g4THCIiIoo7THCIiIgo7jDBISIiorjDBIeIiIjiDhMcIiIiijtMcIiIiCjuMMEhIiKiuMMEh4iIiOIOExwiIiKKO0xwiIiIKO6MiM02L8VqtQIAnE4nDAYDTCbToM+53HKXywWLxQIAKCkpieB3FT7DWT9WqxUGgwE1NTUAALPZHMHvbPhFoi5j8ZkaSKSetXh+pvozlHqMp+eJIm8wz5psz5Q4wtXX14uPPvqodLx8+fJBn3O55aIoihUVFeKTTz4pbt68OXzfRAQNZ/04nU7xjjvukL6ePn16GL8T+UWiLkUx9p6pgUSifuL9merPUOpRFOPneaLIG8yzJoryPVMjvovKZrNBo9FIxxqNBjabbVDnXG45ABQVFSEnJydS307YDWf9aLValJeXAwAcDke/nwRiWSTqEoi9Z2ogkaifeH+m+jOUegTi53miyBvMswbI90yN+C6q+vp66PV66Viv18Plcg3qnMstj0Vy1I/FYkFlZSU2btwY7m9HVpGoy3gSyfqJ12eqP0OpR6LLEe3P0YhvwemP0+kM+ZzLLY9Fka4fs9mMZcuWobS0NLQAY0gk6jKehKt+RtIz1Z+h1CPR5Yim52jEJzgXNpu1trbCYDAM6pzLLY9Fw10/gezfZDKhoqKi3+bOWBWJuownkaqfeH6m+jOUeiS6HNH+HI34BMdkMqG6ulo6bmhoCJpNcLFzLrc8Fg1n/VgsFmzatEkq1+l00Ol0Efvehlsk6jKeRKJ+4v2Z6s9Q6pHocgzmWZOTIIqiKHcQcjt/mptOp0NRUREAYOHChSgvL4dWqx3wnMstt9lsePnll+F2u2E2m6XyaDZc9eNyuaSBoZWVldDr9XE3TTUSdRmLz9RAwl0/I+GZ6s9Q6jGenieKvME8a3I9U0xwiIiIKO6M+C4qIiIiij9McIiIiCjuMMEhIiKiuMMEh4iIiOIOExwiIiKKO0xwiIiIKO4wwSGiy2az2VBcXAyLxTLs9y4uLpbW3iAiGggTHCK6bCaTCYsXL5bl3mvWrImqVXejYcVWIuqLCQ4RxRSTyQStVit3GAAAh8OBiooKucMgon4wwSEiClFZWZncIRDRABLkDoCI4oPNZoPdbofBYEB1dTUefvhhAL171Wi1WjgcDtTX10vlNpsNa9eulfaGslgsWLNmDUpLS2E2m2EwGOBwOFBZWYlnnnkGAGC32/Hoo4/CbDbDbDbDZrNd9PzA/R0OB7RaLex2O4qKimC32/vsSdVfPOXl5ReNv6amBq2trQB6W5YMBsOA9UBEw4stOEQ0ZA6HA6WlpSgpKUFRURFycnKk1o3Vq1fDYDDAbDbD7XZLA4QDu4Db7XaYzWasXLkSJpMJhYWFqKyshMlkkt5jt9sBAEajMWjsz6XOd7lcePTRR1FSUiIlRAaDod8NN/uL51LxFxYWYsGCBUEJ1kD1QETDiy04RDRkL7/8MnQ6HWw2m1RWXV0NANi9e7fUAtLa2gqHwyGdo9FooNfrAUDaYViv10tlgXOcTueA977c8y+mv3guFv+FLlYPRDS8mOAQUVjk5+cHzW4ym80AgE2bNkGv16OoqAgGg6HP+/orCxetVoulS5eirKwMWq1Wamm5mAtfv1T8AYHZVAPVAxENL3ZREVHIAn/UlyxZgqqqqqDXbDYbbDYb9u/fj5KSEhgMBrjdbum1gFBaWy5narZer5e6qPrrmrrQ+fEMJv7zzx2oHoho+LEFh4gum91ul6ZHm0wmGI1GrFmzBuvXr0dBQYFUDvR2+wT+yBcVFeHll1+WBuNWVVVh//79MBgM0viX86/rcDiwf/9+6T0ulwsVFRXQ6XQoKiqSjgc6PzAuZuHChdBqtdL7+mtV6S+emTNnDhg/ACxbtgxlZWWwWCzSIOOB6oGIhpcgiqIodxBERJEQmNEUaLlxOBxYv349li1bxsSDKM6xi4qI4lZgdlWAwWDAkiVLLjpQmIjiA1twiCiuBaZpB7qVnE4nB/4SjQBMcIiIiCjusIuKiIiI4g4THCIiIoo7THCIiIgo7jDBISIiorjDBIeIiIjiDhMcIiIiijtMcIiIiCjuMMEhIiKiuMMEh4iIiOLO/w/CPpvSRBC6tQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "df_prepared = df.rename(columns={'learning_rate': 'learning rate', 'batch_size': 'batch size'})\n", + "fig, ax = plt.subplots(1, 1, figsize=set_size(width, subplots=(1,1)))\n", + "sns.scatterplot(x=\"learning rate\", y=\"test/f1-score\",\n", + " style=\"optimizer\", hue=\"batch size\",\n", + " palette=sns.cubehelix_palette(5, light=0.8, gamma=1.2),\n", + " sizes=(5, 30), linewidth=0, s=15,\n", + " data=df_prepared, ax=ax)\n", + "ax.set_xscale('log')\n", + "ax.set_xticks([0.0001, 0.0003, 0.001, 0.003, 0.01, 0.1])\n", + "ax.set_xticklabels(labels = ['0.0001', '0.0003', '0.001', '0.003', '0.01', '0.1'])\n", + "fig.tight_layout()\n", + "fig.savefig(fig_save_dir + 'classifier-hyp-metrics.pdf', format='pdf', bbox_inches='tight')" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "44e275ab", + "metadata": {}, + "outputs": [], + "source": [ + "parameters_dict = {\n", + " 'optimizer': {\n", + " 'values': ['adam', 'sgd']\n", + " },\n", + "}\n", + "\n", + "parameters_dict.update({\n", + " 'batch_size': {\n", + " 'values': [4, 8, 16, 32, 64]},\n", + " 'learning_rate': {\n", + " 'values': [0.0001, 0.0003, 0.001, 0.003, 0.01, 0.1]},\n", + " 'step_size': {\n", + " 'values': [2, 3, 5, 7]},\n", + " 'gamma': {\n", + " 'values': [0.1, 0.5]},\n", + " 'beta_one': {\n", + " 'values': [0.9, 0.99]},\n", + " 'beta_two': {\n", + " 'values': [0.5, 0.9, 0.99, 0.999]},\n", + " 'eps': {\n", + " 'values': [1e-08, 0.1, 1]}\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "7d3c2860", + "metadata": {}, + "outputs": [], + "source": [ + "params = pd.DataFrame.from_dict(parameters_dict)\n", + "params = params.transpose()\n", + "params['values_string'] = [', '.join(map(str, l)) for l in params['values']]\n", + "params['values'] = params['values_string']\n", + "params = params.drop(['values_string'], axis=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "acc3a77e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
values
optimizeradam, sgd
batch_size4, 8, 16, 32, 64
learning_rate0.0001, 0.0003, 0.001, 0.003, 0.01, 0.1
step_size2, 3, 5, 7
gamma0.1, 0.5
beta_one0.9, 0.99
beta_two0.5, 0.9, 0.99, 0.999
eps1e-08, 0.1, 1
\n", + "
" + ], + "text/plain": [ + " values\n", + "optimizer adam, sgd\n", + "batch_size 4, 8, 16, 32, 64\n", + "learning_rate 0.0001, 0.0003, 0.001, 0.003, 0.01, 0.1\n", + "step_size 2, 3, 5, 7\n", + "gamma 0.1, 0.5\n", + "beta_one 0.9, 0.99\n", + "beta_two 0.5, 0.9, 0.99, 0.999\n", + "eps 1e-08, 0.1, 1" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "params" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "73a26951", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
optimizerbatch_sizelearning_ratestep_sizegammabeta_onebeta_twoeps
valuesadam40.000120.10.90.50.0
valuesadam40.000120.10.90.50.1
valuesadam40.000120.10.90.51
valuesadam40.000120.10.90.90.0
valuesadam40.000120.10.90.90.1
...........................
valuessgd640.170.50.990.990.1
valuessgd640.170.50.990.991
valuessgd640.170.50.990.9990.0
valuessgd640.170.50.990.9990.1
valuessgd640.170.50.990.9991
\n", + "

11520 rows × 8 columns

\n", + "
" + ], + "text/plain": [ + " optimizer batch_size learning_rate step_size gamma beta_one beta_two \\\n", + "values adam 4 0.0001 2 0.1 0.9 0.5 \n", + "values adam 4 0.0001 2 0.1 0.9 0.5 \n", + "values adam 4 0.0001 2 0.1 0.9 0.5 \n", + "values adam 4 0.0001 2 0.1 0.9 0.9 \n", + "values adam 4 0.0001 2 0.1 0.9 0.9 \n", + "... ... ... ... ... ... ... ... \n", + "values sgd 64 0.1 7 0.5 0.99 0.99 \n", + "values sgd 64 0.1 7 0.5 0.99 0.99 \n", + "values sgd 64 0.1 7 0.5 0.99 0.999 \n", + "values sgd 64 0.1 7 0.5 0.99 0.999 \n", + "values sgd 64 0.1 7 0.5 0.99 0.999 \n", + "\n", + " eps \n", + "values 0.0 \n", + "values 0.1 \n", + "values 1 \n", + "values 0.0 \n", + "values 0.1 \n", + "... ... \n", + "values 0.1 \n", + "values 1 \n", + "values 0.0 \n", + "values 0.1 \n", + "values 1 \n", + "\n", + "[11520 rows x 8 columns]" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pd.DataFrame.from_dict(parameters_dict).explode('optimizer').explode('batch_size').explode('learning_rate').explode('step_size').explode('gamma').explode('beta_one').explode('beta_two').explode('eps')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f163a6c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/classification/classifier/hyp.ipynb b/classification/classifier/hyp.ipynb new file mode 100644 index 0000000..246b7d2 --- /dev/null +++ b/classification/classifier/hyp.ipynb @@ -0,0 +1,4644 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "747ddcf2", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33me1527193\u001b[0m (\u001b[33mflower-classification\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" + ] + }, + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import wandb\n", + "\n", + "wandb.login()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c37343d6", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.optim as optim\n", + "import torch.nn.functional as F\n", + "import torch.nn as nn\n", + "from torchvision import datasets, transforms\n", + "from torchvision.models import resnet50, ResNet50_Weights\n", + "from torch.utils.data import Dataset, DataLoader, random_split, SubsetRandomSampler\n", + "import numpy as np\n", + "import os\n", + "import time\n", + "import copy\n", + "import random\n", + "from sklearn import metrics\n", + "\n", + "torch.manual_seed(42)\n", + "np.random.seed(42)\n", + "\n", + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "17b25dc7", + "metadata": {}, + "outputs": [], + "source": [ + "def build_dataset(batch_size): \n", + " data_transforms = {\n", + " 'train': transforms.Compose([\n", + " transforms.RandomResizedCrop(224),\n", + " transforms.RandomHorizontalFlip(),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " ]),\n", + " 'test': transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " ]),\n", + " }\n", + "\n", + " data_dir = 'plantsdata'\n", + " dataset = datasets.ImageFolder(os.path.join(data_dir))\n", + "\n", + " # 90/10 split\n", + " train_dataset, test_dataset = random_split(dataset, [0.9, 0.1])\n", + "\n", + " train_dataset.dataset.transform = data_transforms['train']\n", + " test_dataset.dataset.transform = data_transforms['test']\n", + "\n", + " train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,\n", + " shuffle=True, num_workers=4)\n", + " test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,\n", + " shuffle=True, num_workers=4)\n", + "\n", + " dataloaders = {'train': train_loader, 'test': test_loader}\n", + " dataset_size = len(dataset)\n", + " dataset_sizes = {'train': len(train_dataset), 'test': len(test_dataset)}\n", + " class_names = dataset.classes\n", + "\n", + " return (dataloaders, dataset_sizes)\n", + "\n", + "def build_network():\n", + " network = resnet50(weights=ResNet50_Weights.DEFAULT)\n", + " num_ftrs = network.fc.in_features\n", + "\n", + " # Add linear layer with number of classes\n", + " network.fc = nn.Linear(num_ftrs, 2)\n", + "\n", + " return network.to(device)\n", + "\n", + "def build_optimizer(network, optimizer, learning_rate, beta_one, beta_two, eps):\n", + " if optimizer == \"sgd\":\n", + " optimizer = optim.SGD(network.parameters(),\n", + " lr=learning_rate, momentum=0.9)\n", + " elif optimizer == \"adam\":\n", + " optimizer = optim.Adam(network.parameters(),\n", + " lr=learning_rate,\n", + " betas=(beta_one, beta_two),\n", + " eps=eps)\n", + " return optimizer\n", + "\n", + "def train_epoch(network, loader, optimizer, criterion, scheduler, dataset_sizes):\n", + " network.train()\n", + " running_loss = 0.0\n", + " running_corrects = 0\n", + " for _, (data, target) in enumerate(loader):\n", + " data, target = data.to(device), target.to(device)\n", + " optimizer.zero_grad()\n", + "\n", + " # ➡ Forward pass\n", + " #loss = F.nll_loss(network(data), target)\n", + " with torch.set_grad_enabled(True):\n", + " outputs = network(data)\n", + " _, preds = torch.max(outputs, 1)\n", + " loss = criterion(outputs, target)\n", + " \n", + " #cumu_loss += loss.item()\n", + " \n", + " running_loss += loss.item() * data.size(0)\n", + " running_corrects += torch.sum(preds == target.data)\n", + "\n", + " # ⬅ Backward pass + weight update\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " wandb.log({'train/batch_loss': loss.item()})\n", + "\n", + " scheduler.step()\n", + "\n", + " epoch_loss = running_loss / dataset_sizes['train']\n", + " epoch_acc = running_corrects.double() / dataset_sizes['train']\n", + " \n", + " return (epoch_loss, epoch_acc)\n", + "\n", + "def test(network, loader, optimizer, criterion, dataset_sizes):\n", + " network.eval()\n", + " confusion = torch.empty([0, 1])\n", + " confusion = confusion.to(device)\n", + " running_loss = 0.0\n", + " test_corrects = 0\n", + " for _, (data, target) in enumerate(loader):\n", + " data, target = data.to(device), target.to(device)\n", + " optimizer.zero_grad()\n", + "\n", + " # ➡ Forward pass\n", + " with torch.set_grad_enabled(False):\n", + " outputs = network(data)\n", + " _, preds = torch.max(outputs, 1)\n", + " loss = criterion(outputs, target)\n", + "\n", + " running_loss += loss.item() * data.size(0)\n", + " test_corrects += torch.sum(preds == target.data)\n", + " \n", + " confusion = torch.cat((confusion, preds[:, None] / target.data[:, None]))\n", + "\n", + " tp = torch.sum(confusion == 1).item()\n", + " fp = torch.sum(confusion == float('inf')).item()\n", + " tn = torch.sum(torch.isnan(confusion)).item()\n", + " fn = torch.sum(confusion == 0).item()\n", + " \n", + " precision = tp / (tp + fp)\n", + " recall = tp / (tp + fn)\n", + " f = 2 * ((precision * recall) / (precision + recall))\n", + " \n", + " epoch_loss = running_loss / dataset_sizes['test']\n", + " epoch_acc = test_corrects.double() / dataset_sizes['test']\n", + " \n", + " return (epoch_loss, epoch_acc, precision, recall, f)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5eff68bf", + "metadata": {}, + "outputs": [], + "source": [ + "def train(config=None):\n", + " # Initialize a new wandb run\n", + " with wandb.init(config=config):\n", + " # If called by wandb.agent, as below,\n", + " # this config will be set by Sweep Controller\n", + " config = wandb.config\n", + "\n", + " (dataloaders, dataset_sizes) = build_dataset(config.batch_size)\n", + " network = build_network()\n", + " optimizer = build_optimizer(network, config.optimizer, config.learning_rate, config.beta_one,\n", + " config.beta_two, config.eps)\n", + " criterion = nn.CrossEntropyLoss()\n", + " # Decay LR by a factor of 0.1 every 7 epochs\n", + " exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, config.step_size, config.gamma)\n", + "\n", + " for epoch in range(config.epochs): \n", + " (epoch_loss, epoch_acc) = train_epoch(network, dataloaders['train'], optimizer,\n", + " criterion, exp_lr_scheduler,\n", + " dataset_sizes)\n", + " wandb.log({\"epoch\": epoch, 'train/epoch_loss': epoch_loss, 'train/epoch_acc': epoch_acc})\n", + " \n", + " (test_loss, test_acc, test_precision, test_recall, test_f) = test(network, dataloaders['test'],\n", + " optimizer, criterion,\n", + " dataset_sizes)\n", + " wandb.log({'test/epoch_loss': test_loss, 'test/epoch_acc': test_acc,\n", + " 'test/precision': test_precision, 'test/recall': test_recall,\n", + " 'test/f1-score': test_f})" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "732a83df", + "metadata": {}, + "outputs": [], + "source": [ + "sweep_config = {\n", + " 'method': 'random'\n", + "}\n", + "\n", + "metric = {\n", + " 'name': 'test/epoch_acc',\n", + " 'goal': 'maximize' \n", + "}\n", + "\n", + "sweep_config['metric'] = metric\n", + "\n", + "parameters_dict = {\n", + " 'optimizer': {\n", + " 'values': ['adam', 'sgd']\n", + " },\n", + "}\n", + "\n", + "sweep_config['parameters'] = parameters_dict\n", + "\n", + "parameters_dict.update({\n", + " 'epochs': {\n", + " 'value': 10},\n", + " 'batch_size': {\n", + " 'values': [4, 8]},\n", + " 'learning_rate': {\n", + " 'values': [0.1, 0.01, 0.003, 0.001, 0.0003, 0.0001]},\n", + " 'step_size': {\n", + " 'values': [2, 3, 5, 7]},\n", + " 'gamma': {\n", + " 'values': [0.1, 0.5]},\n", + " 'beta_one': {\n", + " 'values': [0.9, 0.99]},\n", + " 'beta_two': {\n", + " 'values': [0.5, 0.9, 0.99, 0.999]},\n", + " 'eps': {\n", + " 'values': [1e-08, 0.1, 1]}\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "9a01fef6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Create sweep with ID: eqwnoagh\n", + "Sweep URL: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh\n" + ] + } + ], + "source": [ + "sweep_id = wandb.sweep(sweep_config, project=\"pytorch-sweeps-demo\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "e80d1730", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: znahtehx with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_210021-znahtehx" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run sparkling-sweep-1 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/znahtehx" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bb4b99390e384bd5912f1133277e4a65", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.127552…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▂▅▆▅▇▄▇▃█
test/epoch_loss█▃▂▂▃▂▂▁▂▂
test/f1-score▁▃▅▆▄▆▄▇▃█
test/precision▁▁▃▅▆▇▄█▃▇
test/recall▁▆▇▆▁▄▃▄▃█
train/batch_loss█▇▆▆▆▆▅▄▄▃▅▅█▆▃█▆▇▂▆▅▅▁▃▆▄▃██▅▆▄▆▅▄▂▂▇▇▆
train/epoch_acc▁▆▇▇▇███▇█
train/epoch_loss█▄▂▂▂▁▁▁▂▁

Run summary:


epoch9
test/epoch_acc0.85556
test/epoch_loss0.6166
test/f1-score0.86022
test/precision0.81633
test/recall0.90909
train/batch_loss0.66533
train/epoch_acc0.75676
train/epoch_loss0.61072

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run sparkling-sweep-1 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/znahtehx
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_210021-znahtehx/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: qutqx8ux with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_210951-qutqx8ux" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run stoic-sweep-2 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/qutqx8ux" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "acea58027ff945afa7cd0132f556317c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.004 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.129798…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▂▁▆▆█▇▆▃▅
test/epoch_loss█▅▃▁▁▁▁▂▂▂
test/f1-score▄▁▆█▇███▆▇
test/precision▁▃▁▅▆█▆▅▃▄
test/recall▅▁██▆▆██▆█
train/batch_loss▄▁▃█▃▇▅▁▄▄▅▃▃▂▂▅▅▂▂▅▃▃▅▄▂▃▃▂▄▃▃▆▅▂▂▄▅▁▂▂
train/epoch_acc▁▃▄▇▆▇█▇▇█
train/epoch_loss█▄▃▂▂▁▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.7
test/epoch_loss0.55412
test/f1-score0.71579
test/precision0.59649
test/recall0.89474
train/batch_loss0.78966
train/epoch_acc0.66585
train/epoch_loss0.61866

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run stoic-sweep-2 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/qutqx8ux
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_210951-qutqx8ux/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 9j8etw77 with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_211850-9j8etw77" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run hopeful-sweep-3 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/9j8etw77" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "221cf6c293624e52a4c9c607f0f81dec", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.127348…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁█▇▇▇▆▆▆▆▆
test/epoch_loss█▂▃▁▂▂▂▁▁▁
test/f1-score▁█▆▇▇▅▆▅▅▅
test/precision▁█▇█▇▆▆▇▇▇
test/recall▄█▄▁█▁▄▁▁▁
train/batch_loss▄▆██▄▃▃▅▂▃▄▅▃▄▂▃▁▁▁▄▂▃▄▁▄▂▂▁▁▃▃▄▂▂▂▅▂▃▃▄
train/epoch_acc▁▅█▇▇███▇▇
train/epoch_loss█▄▂▂▁▁▁▁▂▂

Run summary:


epoch9
test/epoch_acc0.75556
test/epoch_loss0.6144
test/f1-score0.76087
test/precision0.67308
test/recall0.875
train/batch_loss0.65108
train/epoch_acc0.7543
train/epoch_loss0.62678

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run hopeful-sweep-3 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/9j8etw77
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_211850-9j8etw77/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: k23a02gb with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_212648-k23a02gb" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run dulcet-sweep-4 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/k23a02gb" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▅▁▂▅▇▄▅▇█▇
test/epoch_loss█▆▄▂▁▂▂▁▁▁
test/f1-score▆▁▂▅▇▃▅▇█▇
test/precision▁▁▄▅▇▆█▇█▆
test/recall█▁▁▅▆▂▃▆▆▅
train/batch_loss█▇▇▇▇▇▇▆▆▄▆▇▆▆▅▆▄▃▃▃▁▄▃▃▄▃▂▄▂▁▂▃▇▁▃▄▃▄▆▄
train/epoch_acc▁▅▆▆▇█▇█▇█
train/epoch_loss█▆▄▃▂▁▂▁▂▁

Run summary:


epoch9
test/epoch_acc0.88889
test/epoch_loss0.30289
test/f1-score0.86486
test/precision0.91429
test/recall0.82051
train/batch_loss0.27111
train/epoch_acc0.89681
train/epoch_loss0.28549

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run dulcet-sweep-4 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/k23a02gb
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_212648-k23a02gb/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 265qnj0c with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_213431-265qnj0c" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run hearty-sweep-5 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/265qnj0c" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4f68d0e4fc994f12bdfba68bd75d2d3f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.010 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.369539…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▅▄▆▇█▇▇▆▆
test/epoch_loss█▅▄▃▃▁▂▃▂▁
test/f1-score▁▅▄▆▇██▇▆▇
test/precision███▄▁▂▁▅▁▁
test/recall▁▄▄▅▇██▇▆▇
train/batch_loss██▇▇▅▅▃▆▂▃▂▂▂▂▄▄▂▂▂▂▄▁▂▅▃▁▁▁▂▆▂▃▃▁▁▁▂▂▁▂
train/epoch_acc▁▅▆▇▇█████
train/epoch_loss█▅▄▂▂▁▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.88889
test/epoch_loss0.26007
test/f1-score0.8913
test/precision0.95349
test/recall0.83673
train/batch_loss0.01167
train/epoch_acc0.98034
train/epoch_loss0.08153

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run hearty-sweep-5 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/265qnj0c
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_213431-265qnj0c/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: eg199ue9 with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_214215-eg199ue9" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run smart-sweep-6 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/eg199ue9" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "77222c4a821846208ce165385b4c6092", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.127718…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▇▆▆▁▅▆▃▃▃█
test/epoch_loss█▄▃▄▄▅▂▄▃▁
test/f1-score▇▆▆▁▅▆▄▃▄█
test/precision▅▂█▃▁▂▁▄▁▅
test/recall▇▇▅▁▆▇▅▃▅█
train/batch_loss▆▆▆▆▃▂▂▄▂▂▁▃▂█▂▁▂▅▄▁▁▂▁▁▅▁▁▃▂▂▄▁▁▁▁▁▁▁▁▁
train/epoch_acc▁▄▆▇▇▇████
train/epoch_loss█▅▄▃▂▂▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.9
test/epoch_loss0.22746
test/f1-score0.89655
test/precision0.92857
test/recall0.86667
train/batch_loss0.13858
train/epoch_acc0.98403
train/epoch_loss0.07075

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run smart-sweep-6 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/eg199ue9
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_214215-eg199ue9/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: vdaaitvt with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_215145-vdaaitvt" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run glorious-sweep-7 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/vdaaitvt" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "16d2f2fc96774dcbaba181d48d444fc9", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.003 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁▄▆██▇▇▇▇▆
test/epoch_loss█▇▆▄▃▂▁▁▁▁
test/f1-score▁▄▆█▇▆▆▆▆▃
test/precision▁▃▄▆▇▇▇███
test/recall▇▇██▆▅▅▄▄▁
train/batch_loss▇▇▇▆▇▆▆▆▆▅▆▅▆▆▆▇▅▅▄▅█▅▃▄▅▃▅▇▃▅▅▅▅▂▄▁▅▄▄▅
train/epoch_acc▁▄▆▇▇███▇█
train/epoch_loss█▇▆▅▄▃▂▁▂▁

Run summary:


epoch9
test/epoch_acc0.77778
test/epoch_loss0.47685
test/f1-score0.72222
test/precision0.83871
test/recall0.63415
train/batch_loss0.37919
train/epoch_acc0.82924
train/epoch_loss0.45283

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run glorious-sweep-7 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/vdaaitvt
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_215145-vdaaitvt/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 16v61zix with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_215930-16v61zix" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run elated-sweep-8 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/16v61zix" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "275f61520cf1419c9104636f4bd34994", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.127347…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▅▅▅██▁▅▅▁▁
test/epoch_loss█▁▂▅▃▅▄▄▄▂
test/f1-score▄▄▄█▇▁▄▄▁▁
test/precision▃▅▃▁█▃▃▃▁▁
test/recall▃▁▃█▁▁▃▃▃▃
train/batch_loss█▆▇▇▂▂▅▆▄▆▂▁▁▁▁▁▁▁▂▁▁▁▁▁▁▁▁▁▁▁▁▁▂▁▁▁▂▁▁▁
train/epoch_acc▁▅▇▇▇█████
train/epoch_loss█▅▂▂▂▁▁▁▁▁

Run summary:


epoch9
test/epoch_acc0.92222
test/epoch_loss0.16872
test/f1-score0.92135
test/precision0.91111
test/recall0.93182
train/batch_loss0.00228
train/epoch_acc0.99877
train/epoch_loss0.02303

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run elated-sweep-8 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/16v61zix
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_215930-16v61zix/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: gy76rrgz with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_220712-gy76rrgz" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run major-sweep-9 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/gy76rrgz" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "859091dc42c94f2eb2732a64c9afc414", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.127052…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▃▃▁▃▃▆▆█▆▃
test/epoch_loss█▆▄▃▂▂▁▁▁▁
test/f1-score▄▃▁▃▃▆▆█▆▃
test/precision▁█▇███████
test/recall▆▃▁▃▃▆▆█▆▃
train/batch_loss█▇██▇▆▇▃▅▃▇▄▄▄▃▆▃▅▃▅▃▁▇▅▃▄▄▆▂▅▂▂▃▁▁▂▂▁▃▁
train/epoch_acc▁▅▅▆▇▇▇▇██
train/epoch_loss█▆▅▃▃▂▂▂▁▁

Run summary:


epoch9
test/epoch_acc0.88889
test/epoch_loss0.26282
test/f1-score0.87179
test/precision0.97143
test/recall0.7907
train/batch_loss0.1486
train/epoch_acc0.88698
train/epoch_loss0.31064

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run major-sweep-9 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/gy76rrgz
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_220712-gy76rrgz/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 4dx2f0j8 with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_221511-4dx2f0j8" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run fallen-sweep-10 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/4dx2f0j8" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "93aeee346a4849d88f08125cde60a1ef", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.129230…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▁█▅▇▇▇▇▇▇█
test/epoch_loss█▁▄▃▆▅▆▅▅▄
test/f1-score▁█▅▇▇▇▇▇▇█
test/precision▆▅▇▄█▄█▄▁▅
test/recall▁█▅▇▇▇▇▇▇█
train/batch_loss▅▅▆▃▄▃▃▂▂▂▂▂▁▃█▁▅▁▂▂▂▁▁▂▃▁▁▁▁▃▁▁▁▁▁▂▁▁▄▁
train/epoch_acc▁▄▆▆▇█████
train/epoch_loss█▆▄▃▂▂▂▁▁▁

Run summary:


epoch9
test/epoch_acc0.86667
test/epoch_loss0.37958
test/f1-score0.875
test/precision0.95455
test/recall0.80769
train/batch_loss0.077
train/epoch_acc0.9656
train/epoch_loss0.09797

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run fallen-sweep-10 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/4dx2f0j8
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_221511-4dx2f0j8/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: j93p9uxm with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_222419-j93p9uxm" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run revived-sweep-11 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/j93p9uxm" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "46df1ce0b91447d0a2e0d047a3864afd", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.010 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.369874…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▅█
test/epoch_acc▁█
test/epoch_loss█▁
test/f1-score▁█
test/precision▁█
test/recall█▁
train/batch_loss▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▂▁█▁▁▆▅▁▁
train/epoch_acc█▃▁
train/epoch_loss▁▁█

Run summary:


epoch2
test/epoch_acc0.56667
test/epoch_loss92431672.3021
test/f1-score0.62136
test/precision0.47761
test/recall0.88889
train/batch_loss7666.14648
train/epoch_acc0.46929
train/epoch_loss4618.08651

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run revived-sweep-11 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/j93p9uxm
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_222419-j93p9uxm/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run j93p9uxm errored: ZeroDivisionError('division by zero')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run j93p9uxm errored: ZeroDivisionError('division by zero')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: pb5m44k2 with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_222656-pb5m44k2" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run faithful-sweep-12 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/pb5m44k2" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (success)." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b54a0c709df84839b6a094fd08e2696d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.003 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


epoch▁▂▃▃▄▅▆▆▇█
test/epoch_acc▅▁▆▃▆▆▇▇▇█
test/epoch_loss▅█▄▅▃▃▂▁▁▁
test/f1-score▄▄▄▁▅▆▇▇▇█
test/precision▆▁█▃▆▆█▇██
test/recall▃█▂▁▄▆▅▆▅▇
train/batch_loss▆▄▂▅▅▄▃▆█▃▂▄▂▁▃▂▄▁▂▂▄▃▅▂▂▅▂▂▃▄▁▄▃▁▂▄▂▂▃▄
train/epoch_acc▁▁▃▃▄▄▆▆▇█
train/epoch_loss█▇▇▆▅▅▃▃▂▁

Run summary:


epoch9
test/epoch_acc0.78889
test/epoch_loss0.51027
test/f1-score0.78161
test/precision0.7234
test/recall0.85
train/batch_loss0.42048
train/epoch_acc0.82555
train/epoch_loss0.40512

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run faithful-sweep-12 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/pb5m44k2
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_222656-pb5m44k2/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: q8m1yt6d with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223624-q8m1yt6d" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run fine-sweep-13 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/q8m1yt6d" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "

Run history:


train/batch_loss

Run summary:


train/batch_loss0.67379

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run fine-sweep-13 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/q8m1yt6d
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223624-q8m1yt6d/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run q8m1yt6d errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 1.95 GiB total capacity; 1.30 GiB already allocated; 11.31 MiB free; 1.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run q8m1yt6d errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 1.95 GiB total capacity; 1.30 GiB already allocated; 11.31 MiB free; 1.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: f3kiw40d with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223651-f3kiw40d" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run devout-sweep-14 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/f3kiw40d" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5b4942d4dbd04d5baa953dbdfe4608de", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.027 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run devout-sweep-14 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/f3kiw40d
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223651-f3kiw40d/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run f3kiw40d errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 3.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run f3kiw40d errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 3.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: i0xsie8j with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223710-i0xsie8j" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run restful-sweep-15 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/i0xsie8j" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run restful-sweep-15 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/i0xsie8j
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223710-i0xsie8j/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run i0xsie8j errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run i0xsie8j errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: bi477kch with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223736-bi477kch" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run pretty-sweep-16 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/bi477kch" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run pretty-sweep-16 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/bi477kch
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223736-bi477kch/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run bi477kch errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run bi477kch errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 7jmkpkmh with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223752-7jmkpkmh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run daily-sweep-17 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/7jmkpkmh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run daily-sweep-17 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/7jmkpkmh
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223752-7jmkpkmh/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run 7jmkpkmh errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run 7jmkpkmh errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: pc0kaw45 with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223812-pc0kaw45" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run dutiful-sweep-18 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/pc0kaw45" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "202c31bd32e34897b89b9f828ad6301e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.027 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run dutiful-sweep-18 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/pc0kaw45
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223812-pc0kaw45/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run pc0kaw45 errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run pc0kaw45 errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: o04kggii with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.999\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223833-o04kggii" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run glad-sweep-19 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/o04kggii" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ebfa8b7ad18e48efb4c7a99963364387", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.129182…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run glad-sweep-19 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/o04kggii
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223833-o04kggii/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run o04kggii errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run o04kggii errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: mr7zxx8m with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223854-mr7zxx8m" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run dazzling-sweep-20 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/mr7zxx8m" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b1e66d66721840b391339ee3201e55fb", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.129362…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run dazzling-sweep-20 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/mr7zxx8m
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223854-mr7zxx8m/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run mr7zxx8m errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run mr7zxx8m errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 292ds63r with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223916-292ds63r" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run misunderstood-sweep-21 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/292ds63r" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2bd23ebb420c420f8f23ed3bc12e993f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.129560…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run misunderstood-sweep-21 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/292ds63r
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223916-292ds63r/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run 292ds63r errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run 292ds63r errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: fdlwffsj with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_223937-fdlwffsj" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run glorious-sweep-22 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/fdlwffsj" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ff7c63d38a9a470b92b70583ec7dfbe2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.026 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.132928…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run glorious-sweep-22 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/fdlwffsj
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_223937-fdlwffsj/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run fdlwffsj errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run fdlwffsj errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 3s4wltdw with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224003-3s4wltdw" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run absurd-sweep-23 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/3s4wltdw" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run absurd-sweep-23 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/3s4wltdw
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224003-3s4wltdw/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run 3s4wltdw errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run 3s4wltdw errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: kv0nxhmk with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224028-kv0nxhmk" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run devout-sweep-24 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/kv0nxhmk" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f47be2ec47854346b5d3306559f94b91", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.010 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.375132…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run devout-sweep-24 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/kv0nxhmk
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224028-kv0nxhmk/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run kv0nxhmk errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run kv0nxhmk errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: ixbulpc8 with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 5\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224049-ixbulpc8" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run silver-sweep-25 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ixbulpc8" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bfd33251a13841f7a8b32b6145a2fdfe", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.003 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=0.129490…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run silver-sweep-25 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/ixbulpc8
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224049-ixbulpc8/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run ixbulpc8 errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run ixbulpc8 errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: lfi2onyo with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224110-lfi2onyo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run winter-sweep-26 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/lfi2onyo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run winter-sweep-26 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/lfi2onyo
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224110-lfi2onyo/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run lfi2onyo errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run lfi2onyo errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: 4uvn2tnq with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.001\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: adam\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224131-4uvn2tnq" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run expert-sweep-27 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/4uvn2tnq" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "224e6063b56942e8bad3124fe35c96a6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.027 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run expert-sweep-27 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/4uvn2tnq
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224131-4uvn2tnq/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run 4uvn2tnq errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run 4uvn2tnq errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: y4niwbym with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 4\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.01\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 2\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224154-y4niwbym" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run tough-sweep-28 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/y4niwbym" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "cc40ab25a9354028bd95ee1802eb53d0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.027 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run tough-sweep-28 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/y4niwbym
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224154-y4niwbym/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run y4niwbym errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run y4niwbym errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: hxampiva with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.5\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.0003\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 3\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224215-hxampiva" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run misunderstood-sweep-29 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/hxampiva" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "18a4b9d577dc4678bc02fc829b527858", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.027 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run misunderstood-sweep-29 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/hxampiva
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224215-hxampiva/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run hxampiva errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run hxampiva errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Sweep Agent: Waiting for job.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Job received.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Agent Starting Run: q1v8qruc with config:\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbatch_size: 8\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_one: 0.99\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tbeta_two: 0.9\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tepochs: 10\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \teps: 1e-08\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tgamma: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tlearning_rate: 0.1\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \toptimizer: sgd\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \tstep_size: 7\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.11" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/zenon/Documents/master-thesis/classification/classifier/wandb/run-20230313_224241-q1v8qruc" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run cosmic-sweep-30 to Weights & Biases (docs)
Sweep page: https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/flower-classification/pytorch-sweeps-demo" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View sweep at https://wandb.ai/flower-classification/pytorch-sweeps-demo/sweeps/eqwnoagh" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/q1v8qruc" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5504f3fc68844494809c30c364a93525", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='0.027 MB of 0.027 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run cosmic-sweep-30 at: https://wandb.ai/flower-classification/pytorch-sweeps-demo/runs/q1v8qruc
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20230313_224241-q1v8qruc/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Run q1v8qruc errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[32m\u001b[41mERROR\u001b[0m Run q1v8qruc errored: OutOfMemoryError('CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 1.95 GiB total capacity; 1.32 GiB already allocated; 1.31 MiB free; 1.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF')\n" + ] + } + ], + "source": [ + "wandb.agent(sweep_id, train, count=30)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/thesis/graphics/classifier-hyp-metrics.pdf b/thesis/graphics/classifier-hyp-metrics.pdf new file mode 100644 index 0000000..f93f28c Binary files /dev/null and b/thesis/graphics/classifier-hyp-metrics.pdf differ diff --git a/thesis/references.bib b/thesis/references.bib index fdc56ee..2956556 100644 --- a/thesis/references.bib +++ b/thesis/references.bib @@ -99,6 +99,19 @@ keywords = {artificial intelligence,crop management,livestock management,machine learning,precision agriculture,precision livestock farming,soil management,water management} } +@article{bergstra2012, + title = {Random Search for Hyper-Parameter Optimization}, + author = {Bergstra, James and Bengio, Yoshua}, + date = {2012-02-01}, + journaltitle = {The Journal of Machine Learning Research}, + shortjournal = {J. Mach. Learn. Res.}, + volume = {13}, + pages = {281--305}, + issn = {1532-4435}, + issue = {null}, + keywords = {deep learning,global optimization,model selection,neural networks,response surface modeling} +} + @online{bochkovskiy2020, title = {{{YOLOv4}}: {{Optimal Speed}} and {{Accuracy}} of {{Object Detection}}}, shorttitle = {{{YOLOv4}}}, @@ -164,6 +177,19 @@ keywords = {Complexity theory,Degradation,Image recognition,Image segmentation,Neural networks,Training,Visualization} } +@online{kingma2017, + title = {Adam: {{A Method}} for {{Stochastic Optimization}}}, + shorttitle = {Adam}, + author = {Kingma, Diederik P. and Ba, Jimmy}, + date = {2017-01-29}, + number = {arXiv:1412.6980}, + eprint = {arXiv:1412.6980}, + eprinttype = {arxiv}, + doi = {10.48550/arXiv.1412.6980}, + pubstate = {preprint}, + keywords = {Computer Science - Machine Learning} +} + @article{kuznetsova2020, title = {The {{Open Images Dataset V4}}: {{Unified}} Image Classification, Object Detection, and Visual Relationship Detection at Scale}, shorttitle = {The {{Open Images Dataset V4}}}, diff --git a/thesis/thesis.pdf b/thesis/thesis.pdf index 1d301d5..34d8e4c 100644 Binary files a/thesis/thesis.pdf and b/thesis/thesis.pdf differ diff --git a/thesis/thesis.tex b/thesis/thesis.tex index 84b6b4b..0bdb94b 100644 --- a/thesis/thesis.tex +++ b/thesis/thesis.tex @@ -78,6 +78,7 @@ \newacronym{map}{mAP}{mean average precision} \newacronym{resnet}{ResNet}{Residual Neural Network} \newacronym{cnn}{CNN}{Convolutional Neural Network} +\newacronym{sgd}{SGD}{Stochastic Gradient Descent} \begin{document} @@ -348,6 +349,78 @@ feature extraction capabilities. \label{fig:classifier-training-metrics} \end{figure} +\subsection{Hyper-parameter Optimization} +\label{ssec:resnet-hyp-opt} + +In order to improve the aforementioned accuracy values, we perform +hyper-parameter optimization across a wide range of +parameters. Table~\ref{tab:resnet-hyps} lists the hyper-parameters and +their possible values. Since the number of all combinations of values +is 11520 and each combination is trained for 10 epochs with a training +time of approximately six minutes per combination, exhausting the +search space would take 48 days. Due to time limitations, we have +chosen to not search exhaustively but to pick random combinations +instead. Random search works surprisingly well---especially compared to +grid search---in a number of domains, one of which is hyper-parameter +optimization~\cite{bergstra2012}. + +\begin{table}[h] + \centering + \begin{tabular}{lr} + \toprule + Parameter & Values \\ + \midrule + optimizer & adam, sgd \\ + batch size & 4, 8, 16, 32, 64 \\ + learning rate & 0.0001, 0.0003, 0.001, 0.003, 0.01, 0.1 \\ + step size & 2, 3, 5, 7 \\ + gamma & 0.1, 0.5 \\ + beta one & 0.9, 0.99 \\ + beta two & 0.5, 0.9, 0.99, 0.999 \\ + eps & 0.00000001, 0.1, 1 \\ + \bottomrule + \end{tabular} + \caption{Hyper-parameters and their possible values during + optimization.} + \label{tab:resnet-hyps} +\end{table} + +The random search was run for 138 iterations which equates to a 75\% +probability that the best solution lies within 1\% of the theoretical +maximum~\eqref{eq:opt-prob}. Figure~\ref{fig:resnet-hyp-results} shows +three of the eight parameters and their impact on a high +F1-score. \gls{sgd} has less variation in its results than +Adam~\cite{kingma2017} and manages to provide eight out of the ten +best results. The number of epochs to train for was chosen based on +the observation that almost all configurations converge well before +reaching the tenth epoch. The assumption that a training run with ten +epochs provides a good proxy for final performance is supported by the +quick convergence of validation accuracy and loss in +figure~\ref{fig:classifier-training-metrics}. + +\begin{equation}\label{eq:opt-prob} + 1 - (1 - 0.01)^{138} \approx 0.75 +\end{equation} + +\begin{figure} + \centering + \includegraphics{graphics/classifier-hyp-metrics.pdf} + \caption[Classifier hyper-parameter optimization results.]{This + figure shows three of the eight hyper-parameters and their + performance measured by the F1-score during 138 + trials. Differently colored markers show the batch size with + darker colors representing a larger batch size. The type of marker + (circle or cross) shows which optimizer was used. The x-axis shows + the learning rate on a logarithmic scale. In general, a learning + rate between 0.003 and 0.01 results in more robust and better + F1-scores. Larger batch sizes more often lead to better + performance as well. As for the type of optimizer, \gls{sgd} + produced the best iteration with an F1-score of 0.9783. Adam tends + to require more customization of its parameters than \gls{sgd} to + achieve good results.} + \label{fig:resnet-hyp-results} +\end{figure} + \subsection{Class Activation Maps} \label{ssec:resnet-cam}