Skip to content

Commit 26583b0

Browse files
committed
Fix typos and update instructions
1 parent 7ce4cad commit 26583b0

File tree

3 files changed

+22
-21
lines changed

3 files changed

+22
-21
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ To check if you have it installed, open your terminal and type:
2121
git --version
2222
```
2323

24-
#### Git installation in MacOS
24+
#### Git installation in macOS
2525

2626
``` bash
2727
brew update

config.py

-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
import matplotlib
1010
import matplotlib.pyplot as plt
1111
import numpy as np
12-
import seaborn as sns
1312
import torch
1413
from botorch.acquisition import (
1514
ExpectedImprovement,

tutorial.ipynb

+21-19
Original file line numberDiff line numberDiff line change
@@ -69,16 +69,16 @@
6969
"Once you have Git installed open your terminal, go to your desired directory, and type:\n",
7070
"\n",
7171
"``` bash\n",
72-
"git clone https://github.com/machine-learning-tutorial/neural-networks\n",
73-
"cd neural-networks\n",
72+
"git clone https://github.com/machine-learning-tutorial/bayesian-optimization\n",
73+
"cd bayesian-optimization\n",
7474
"```\n",
7575
"\n",
7676
"Or get the repository with direct download:\n",
7777
"\n",
7878
"``` bash\n",
79-
"wget https://github.com/machine-learning-tutorial/neural_networks/archive/refs/heads/main.zip\n",
79+
"wget https://github.com/machine-learning-tutorial/bayesian-optimization/archive/refs/heads/main.zip\n",
8080
"unzip main.zip\n",
81-
"cd neural-networks\n",
81+
"cd bayesian-optimization\n",
8282
"```"
8383
]
8484
},
@@ -102,14 +102,14 @@
102102
"Then run the following commands:\n",
103103
"\n",
104104
"```bash\n",
105-
"conda create -n nn-tutorial python=3.10\n",
106-
"conda activate nn-tutorial\n",
105+
"conda create -n bo-tutorial python=3.10\n",
106+
"conda activate bo-tutorial\n",
107107
"pip install -r requirements.txt\n",
108108
"jupyter contrib nbextension install --user\n",
109109
"jupyter nbextension enable varInspector/main\n",
110110
"```\n",
111111
"\n",
112-
"- After the tutorial you can remove your environment with `conda remove -n nn-tutorial --all`"
112+
"- **After** the tutorial you can remove your environment with `conda remove -n bo-tutorial --all`"
113113
]
114114
},
115115
{
@@ -132,7 +132,7 @@
132132
"Alternatively, you can create the virtual env with `venv` in the standard library\n",
133133
"\n",
134134
"```bash\n",
135-
"python -m venv nn-tutorial\n",
135+
"python -m venv bo-tutorial\n",
136136
"```\n",
137137
"\n",
138138
"and activate the env with $ source <venv>/bin/activate (bash) or C:> <venv>/Scripts/activate.bat (Windows)\n",
@@ -181,7 +181,7 @@
181181
},
182182
{
183183
"cell_type": "code",
184-
"execution_count": 2,
184+
"execution_count": null,
185185
"id": "e538eebc",
186186
"metadata": {
187187
"slideshow": {
@@ -665,7 +665,6 @@
665665
]
666666
},
667667
{
668-
"attachments": {},
669668
"cell_type": "markdown",
670669
"id": "52408173",
671670
"metadata": {
@@ -830,6 +829,7 @@
830829
"execution_count": null,
831830
"id": "e12804c8",
832831
"metadata": {
832+
"scrolled": true,
833833
"slideshow": {
834834
"slide_type": "slide"
835835
}
@@ -1012,16 +1012,17 @@
10121012
"outputs": [],
10131013
"source": [
10141014
"# You can change the GP hyperparameters here again\n",
1015-
"model.covar_module.base_kernel.lengthscale = 0.5\n",
1015+
"model.covar_module.base_kernel.lengthscale = 0.1\n",
10161016
"model.covar_module.outputscale = 0.5 # signal variance\n",
1017-
"model.likelihood.noise_covar.noise = 0.5"
1017+
"model.likelihood.noise_covar.noise = 0.02"
10181018
]
10191019
},
10201020
{
10211021
"cell_type": "code",
10221022
"execution_count": null,
10231023
"id": "910e9471",
10241024
"metadata": {
1025+
"scrolled": true,
10251026
"slideshow": {
10261027
"slide_type": "-"
10271028
}
@@ -1254,7 +1255,7 @@
12541255
},
12551256
"outputs": [],
12561257
"source": [
1257-
"acq_UCB = UpperConfidenceBound(model, beta=4)\n",
1258+
"acq_UCB = UpperConfidenceBound(model, beta=100)\n",
12581259
"plot_acq_with_gp(model, observations_x, observations_y, acq_UCB, test_X, show_true_f=True, \n",
12591260
" true_f_x= objective_x, true_f_y=objective_y)"
12601261
]
@@ -1293,6 +1294,7 @@
12931294
"execution_count": null,
12941295
"id": "ccb09c56",
12951296
"metadata": {
1297+
"scrolled": true,
12961298
"slideshow": {
12971299
"slide_type": "slide"
12981300
}
@@ -1412,7 +1414,7 @@
14121414
"source": [
14131415
"<h3 style=\"color:#e6541a;\">Get familiar with the Gym environment</h3>\n",
14141416
"<p style=\"color:#e6541a;\">$\\implies$ Change the magnet values, i.e. the actions</p>\n",
1415-
"<p style=\"color:#e6541a;\">$\\implies$ The actions are normalized to 1, so valid values are in the [0, 1] interval</p>\n",
1417+
"<p style=\"color:#e6541a;\">$\\implies$ The actions are normalized to 1, so valid values are in the [-1, 1] interval</p>\n",
14161418
"<p style=\"color:#e6541a;\">$\\implies$ The values of the <code>action</code> list in the cell below follows this magnet order: [Q1, Q2, CV, Q3, CH]</p>\n",
14171419
"<p style=\"color:#e6541a;\">$\\implies$ Observe the plot below, what beam does that magnet configuration yield? can you center and focus the beam by hand?</p>"
14181420
]
@@ -1428,8 +1430,8 @@
14281430
},
14291431
"outputs": [],
14301432
"source": [
1431-
"action = [0.5, 0.5, 0.5, 0.5, 0.5]\n",
1432-
"action = np.array(action)"
1433+
"action = # fill here\n",
1434+
"action = np.array(action) # [Q1, Q2, CV, Q3, CH]"
14331435
]
14341436
},
14351437
{
@@ -1528,7 +1530,7 @@
15281530
"metadata": {},
15291531
"outputs": [],
15301532
"source": [
1531-
"target_beam = [#fill here!]"
1533+
"target_beam = [0, 1e-4, 0, 1e-4]#fill here!]"
15321534
]
15331535
},
15341536
{
@@ -1705,7 +1707,7 @@
17051707
"<h2>Let's apply Bayesian optimization to this problem</h2>\n",
17061708
"\n",
17071709
"- We will use the loop implemented in the cell above\n",
1708-
"- In order to quantify how the algorithm is performing, we will use the __log maximum aboslute error (L1 error)__ as metric:\n",
1710+
"- In order to quantify how the algorithm is performing, we will use the __log mean aboslute error (L1 error)__ as metric:\n",
17091711
"\n",
17101712
"$$\\begin{aligned}\n",
17111713
"f(x) &= -\\log(\\mathrm{MAE}(b_\\mathrm{current},b_\\mathrm{target})) \\\\\n",
@@ -1791,7 +1793,7 @@
17911793
"beta = 2.0\n",
17921794
"acquisition = \"UCB\"\n",
17931795
"\n",
1794-
"opt_info = bayesian_optimize(env, observation, n_steps=40, acquisition=acquisition, beta=beta,\n",
1796+
"opt_info = bayesian_optimize(env, observation, n_steps=50, acquisition=acquisition, beta=beta,\n",
17951797
" max_step_size=0.3, show_plot=True, time_sleep=0.05) "
17961798
]
17971799
},

0 commit comments

Comments
 (0)