Skip to content
This repository has been archived by the owner on Oct 19, 2023. It is now read-only.

Commit

Permalink
Merge pull request #16 from keon/1.3
Browse files Browse the repository at this point in the history
do not use size_average during evaluation
  • Loading branch information
keon authored Oct 14, 2019
2 parents ef2374f + 5425f7f commit 21650bd
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 110 deletions.
10 changes: 5 additions & 5 deletions 04-패션_아이템을_구분하는_DNN/fashion_mnist.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -230,21 +230,21 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"display_name": "Python 3",
"language": "python",
"name": "python2"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.16"
"pygments_lexer": "ipython3",
"version": "3.7.0"
}
},
"nbformat": 4,
Expand Down
80 changes: 36 additions & 44 deletions 04-패션_아이템을_구분하는_DNN/neural_network.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@
"\n",
" # 모든 오차 더하기\n",
" test_loss += F.cross_entropy(output, target,\n",
" size_average=False).item()\n",
" reduction='sum').item()\n",
" \n",
" # 가장 큰 값을 가진 클래스가 모델의 예측입니다.\n",
" # 예측과 정답을 비교하여 일치할 경우 correct에 1을 더합니다.\n",
Expand All @@ -224,48 +224,40 @@
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/keon/3-min-pytorch/env/local/lib/python3.7/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='sum' instead.\n",
" warnings.warn(warning.format(ret))\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1] Test Loss: 0.8590, Accuracy: 66.35%\n",
"[2] Test Loss: 0.6650, Accuracy: 76.13%\n",
"[3] Test Loss: 0.5793, Accuracy: 79.65%\n",
"[4] Test Loss: 0.5551, Accuracy: 79.98%\n",
"[5] Test Loss: 0.5078, Accuracy: 81.94%\n",
"[6] Test Loss: 0.5034, Accuracy: 81.98%\n",
"[7] Test Loss: 0.4809, Accuracy: 82.69%\n",
"[8] Test Loss: 0.4705, Accuracy: 83.18%\n",
"[9] Test Loss: 0.4763, Accuracy: 82.70%\n",
"[10] Test Loss: 0.4589, Accuracy: 83.51%\n",
"[11] Test Loss: 0.4757, Accuracy: 83.19%\n",
"[12] Test Loss: 0.4604, Accuracy: 83.60%\n",
"[13] Test Loss: 0.4404, Accuracy: 84.24%\n",
"[14] Test Loss: 0.4320, Accuracy: 84.77%\n",
"[15] Test Loss: 0.4221, Accuracy: 85.01%\n",
"[16] Test Loss: 0.4199, Accuracy: 84.99%\n",
"[17] Test Loss: 0.4195, Accuracy: 84.92%\n",
"[18] Test Loss: 0.4150, Accuracy: 85.56%\n",
"[19] Test Loss: 0.4073, Accuracy: 85.79%\n",
"[20] Test Loss: 0.4183, Accuracy: 84.82%\n",
"[21] Test Loss: 0.4107, Accuracy: 85.39%\n",
"[22] Test Loss: 0.3920, Accuracy: 86.17%\n",
"[23] Test Loss: 0.4022, Accuracy: 85.70%\n",
"[24] Test Loss: 0.3948, Accuracy: 86.06%\n",
"[25] Test Loss: 0.3875, Accuracy: 86.23%\n",
"[26] Test Loss: 0.3788, Accuracy: 86.66%\n",
"[27] Test Loss: 0.3930, Accuracy: 86.23%\n",
"[28] Test Loss: 0.3810, Accuracy: 86.25%\n",
"[29] Test Loss: 0.3768, Accuracy: 86.73%\n",
"[30] Test Loss: 0.3706, Accuracy: 86.79%\n"
"[1] Test Loss: 0.8419, Accuracy: 67.97%\n",
"[2] Test Loss: 0.6651, Accuracy: 76.44%\n",
"[3] Test Loss: 0.5845, Accuracy: 79.20%\n",
"[4] Test Loss: 0.5463, Accuracy: 80.69%\n",
"[5] Test Loss: 0.5213, Accuracy: 81.86%\n",
"[6] Test Loss: 0.4973, Accuracy: 82.26%\n",
"[7] Test Loss: 0.4911, Accuracy: 82.66%\n",
"[8] Test Loss: 0.5134, Accuracy: 81.31%\n",
"[9] Test Loss: 0.4628, Accuracy: 83.50%\n",
"[10] Test Loss: 0.4546, Accuracy: 83.81%\n",
"[11] Test Loss: 0.4541, Accuracy: 83.78%\n",
"[12] Test Loss: 0.4366, Accuracy: 84.45%\n",
"[13] Test Loss: 0.4486, Accuracy: 83.66%\n",
"[14] Test Loss: 0.4312, Accuracy: 84.81%\n",
"[15] Test Loss: 0.4228, Accuracy: 85.18%\n",
"[16] Test Loss: 0.4332, Accuracy: 84.61%\n",
"[17] Test Loss: 0.4132, Accuracy: 85.38%\n",
"[18] Test Loss: 0.4072, Accuracy: 85.84%\n",
"[19] Test Loss: 0.4054, Accuracy: 85.52%\n",
"[20] Test Loss: 0.4459, Accuracy: 84.59%\n",
"[21] Test Loss: 0.4092, Accuracy: 85.73%\n",
"[22] Test Loss: 0.3908, Accuracy: 86.23%\n",
"[23] Test Loss: 0.4023, Accuracy: 85.48%\n",
"[24] Test Loss: 0.3913, Accuracy: 86.12%\n",
"[25] Test Loss: 0.4025, Accuracy: 85.70%\n",
"[26] Test Loss: 0.3844, Accuracy: 86.35%\n",
"[27] Test Loss: 0.3772, Accuracy: 86.73%\n",
"[28] Test Loss: 0.3823, Accuracy: 86.34%\n",
"[29] Test Loss: 0.3743, Accuracy: 86.85%\n",
"[30] Test Loss: 0.3764, Accuracy: 86.57%\n"
]
}
],
Expand All @@ -281,21 +273,21 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"display_name": "Python 3",
"language": "python",
"name": "python2"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.16"
"pygments_lexer": "ipython3",
"version": "3.7.0"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion 04-패션_아이템을_구분하는_DNN/neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def evaluate(model, test_loader):

# 모든 오차 더하기
test_loss += F.cross_entropy(output, target,
size_average=False).item()
reduction='sum').item()

# 가장 큰 값을 가진 클래스가 모델의 예측입니다.
# 예측과 정답을 비교하여 일치할 경우 correct에 1을 더합니다.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@
" data, target = data.to(DEVICE), target.to(DEVICE)\n",
" output = model(data)\n",
" test_loss += F.cross_entropy(output, target,\n",
" size_average=False).item()\n",
" reduction='sum').item()\n",
" \n",
" # 맞춘 갯수 계산\n",
" pred = output.max(1, keepdim=True)[1]\n",
Expand All @@ -218,68 +218,60 @@
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/keon/3-min-pytorch/env/local/lib/python3.7/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='sum' instead.\n",
" warnings.warn(warning.format(ret))\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1] Test Loss: 0.5441, Accuracy: 82.98%\n",
"[2] Test Loss: 0.4254, Accuracy: 86.30%\n",
"[3] Test Loss: 0.3493, Accuracy: 89.21%\n",
"[4] Test Loss: 0.2960, Accuracy: 90.66%\n",
"[5] Test Loss: 0.2499, Accuracy: 92.24%\n",
"[6] Test Loss: 0.2214, Accuracy: 93.27%\n",
"[7] Test Loss: 0.2045, Accuracy: 93.77%\n",
"[8] Test Loss: 0.1904, Accuracy: 94.15%\n",
"[9] Test Loss: 0.1760, Accuracy: 94.69%\n",
"[10] Test Loss: 0.1656, Accuracy: 94.73%\n",
"[11] Test Loss: 0.1608, Accuracy: 94.81%\n",
"[12] Test Loss: 0.1546, Accuracy: 95.16%\n",
"[13] Test Loss: 0.1480, Accuracy: 95.20%\n",
"[14] Test Loss: 0.1424, Accuracy: 95.57%\n",
"[15] Test Loss: 0.1373, Accuracy: 95.60%\n",
"[16] Test Loss: 0.1352, Accuracy: 95.72%\n",
"[17] Test Loss: 0.1281, Accuracy: 95.93%\n",
"[18] Test Loss: 0.1245, Accuracy: 96.08%\n",
"[19] Test Loss: 0.1244, Accuracy: 96.18%\n",
"[20] Test Loss: 0.1183, Accuracy: 96.29%\n",
"[21] Test Loss: 0.1194, Accuracy: 96.23%\n",
"[22] Test Loss: 0.1147, Accuracy: 96.38%\n",
"[23] Test Loss: 0.1107, Accuracy: 96.51%\n",
"[24] Test Loss: 0.1110, Accuracy: 96.46%\n",
"[25] Test Loss: 0.1087, Accuracy: 96.58%\n",
"[26] Test Loss: 0.1056, Accuracy: 96.72%\n",
"[27] Test Loss: 0.1051, Accuracy: 96.62%\n",
"[28] Test Loss: 0.1030, Accuracy: 96.74%\n",
"[29] Test Loss: 0.0998, Accuracy: 96.82%\n",
"[30] Test Loss: 0.1027, Accuracy: 96.84%\n",
"[31] Test Loss: 0.0992, Accuracy: 96.80%\n",
"[32] Test Loss: 0.0974, Accuracy: 96.88%\n",
"[33] Test Loss: 0.0971, Accuracy: 96.91%\n",
"[34] Test Loss: 0.0946, Accuracy: 96.97%\n",
"[35] Test Loss: 0.0974, Accuracy: 96.90%\n",
"[36] Test Loss: 0.0934, Accuracy: 97.04%\n",
"[37] Test Loss: 0.0952, Accuracy: 96.93%\n",
"[38] Test Loss: 0.0923, Accuracy: 97.17%\n",
"[39] Test Loss: 0.0906, Accuracy: 97.23%\n",
"[40] Test Loss: 0.0898, Accuracy: 97.20%\n",
"[41] Test Loss: 0.0888, Accuracy: 97.26%\n",
"[42] Test Loss: 0.0894, Accuracy: 97.16%\n",
"[43] Test Loss: 0.0915, Accuracy: 97.13%\n",
"[44] Test Loss: 0.0871, Accuracy: 97.27%\n",
"[45] Test Loss: 0.0867, Accuracy: 97.30%\n",
"[46] Test Loss: 0.0872, Accuracy: 97.45%\n",
"[47] Test Loss: 0.0854, Accuracy: 97.34%\n",
"[48] Test Loss: 0.0867, Accuracy: 97.33%\n",
"[49] Test Loss: 0.0856, Accuracy: 97.36%\n",
"[50] Test Loss: 0.0851, Accuracy: 97.43%\n"
"[1] Test Loss: 0.5471, Accuracy: 82.80%\n",
"[2] Test Loss: 0.4169, Accuracy: 86.73%\n",
"[3] Test Loss: 0.3393, Accuracy: 89.36%\n",
"[4] Test Loss: 0.2775, Accuracy: 91.60%\n",
"[5] Test Loss: 0.2420, Accuracy: 92.51%\n",
"[6] Test Loss: 0.2205, Accuracy: 93.24%\n",
"[7] Test Loss: 0.1990, Accuracy: 93.90%\n",
"[8] Test Loss: 0.1828, Accuracy: 94.26%\n",
"[9] Test Loss: 0.1786, Accuracy: 94.53%\n",
"[10] Test Loss: 0.1625, Accuracy: 95.13%\n",
"[11] Test Loss: 0.1589, Accuracy: 95.22%\n",
"[12] Test Loss: 0.1493, Accuracy: 95.46%\n",
"[13] Test Loss: 0.1448, Accuracy: 95.60%\n",
"[14] Test Loss: 0.1388, Accuracy: 95.67%\n",
"[15] Test Loss: 0.1326, Accuracy: 95.97%\n",
"[16] Test Loss: 0.1292, Accuracy: 96.01%\n",
"[17] Test Loss: 0.1257, Accuracy: 96.06%\n",
"[18] Test Loss: 0.1234, Accuracy: 96.17%\n",
"[19] Test Loss: 0.1174, Accuracy: 96.36%\n",
"[20] Test Loss: 0.1149, Accuracy: 96.55%\n",
"[21] Test Loss: 0.1133, Accuracy: 96.46%\n",
"[22] Test Loss: 0.1121, Accuracy: 96.50%\n",
"[23] Test Loss: 0.1099, Accuracy: 96.63%\n",
"[24] Test Loss: 0.1071, Accuracy: 96.69%\n",
"[25] Test Loss: 0.1055, Accuracy: 96.76%\n",
"[26] Test Loss: 0.1039, Accuracy: 96.83%\n",
"[27] Test Loss: 0.1036, Accuracy: 96.73%\n",
"[28] Test Loss: 0.1011, Accuracy: 96.77%\n",
"[29] Test Loss: 0.0973, Accuracy: 96.91%\n",
"[30] Test Loss: 0.0973, Accuracy: 96.99%\n",
"[31] Test Loss: 0.0970, Accuracy: 96.97%\n",
"[32] Test Loss: 0.0949, Accuracy: 97.00%\n",
"[33] Test Loss: 0.0931, Accuracy: 97.19%\n",
"[34] Test Loss: 0.0918, Accuracy: 97.14%\n",
"[35] Test Loss: 0.0911, Accuracy: 97.20%\n",
"[36] Test Loss: 0.0927, Accuracy: 97.04%\n",
"[37] Test Loss: 0.0900, Accuracy: 97.18%\n",
"[38] Test Loss: 0.0923, Accuracy: 97.16%\n",
"[39] Test Loss: 0.0899, Accuracy: 97.13%\n",
"[40] Test Loss: 0.0896, Accuracy: 97.10%\n",
"[41] Test Loss: 0.0879, Accuracy: 97.23%\n",
"[42] Test Loss: 0.0872, Accuracy: 97.43%\n",
"[43] Test Loss: 0.0867, Accuracy: 97.24%\n",
"[44] Test Loss: 0.0854, Accuracy: 97.25%\n",
"[45] Test Loss: 0.0863, Accuracy: 97.36%\n",
"[46] Test Loss: 0.0838, Accuracy: 97.42%\n",
"[47] Test Loss: 0.0829, Accuracy: 97.47%\n",
"[48] Test Loss: 0.0834, Accuracy: 97.38%\n",
"[49] Test Loss: 0.0860, Accuracy: 97.34%\n",
"[50] Test Loss: 0.0816, Accuracy: 97.36%\n"
]
}
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def evaluate(model, test_loader):
data, target = data.to(DEVICE), target.to(DEVICE)
output = model(data)
test_loss += F.cross_entropy(output, target,
size_average=False).item()
reduction='sum').item()

# 맞춘 갯수 계산
pred = output.max(1, keepdim=True)[1]
Expand Down

0 comments on commit 21650bd

Please sign in to comment.