From 723f8719d1f54c8d806d7f9e6ea02542bcc0b48a Mon Sep 17 00:00:00 2001
From: Jon <vitale.jonathan@ymail.com>
Date: Tue, 16 May 2023 13:18:48 +1000
Subject: [PATCH] Add solution code for week 2

---
 .../agent_programs.cpython-311.pyc            | Bin 0 -> 7948 bytes
 week2/solution/code/agent_programs.py         | 271 ++++++++++++++++++
 week2/solution/code/vacuum_world.py           |   5 +
 3 files changed, 276 insertions(+)
 create mode 100644 week2/solution/code/__pycache__/agent_programs.cpython-311.pyc
 create mode 100644 week2/solution/code/agent_programs.py
 create mode 100644 week2/solution/code/vacuum_world.py

diff --git a/week2/solution/code/__pycache__/agent_programs.cpython-311.pyc b/week2/solution/code/__pycache__/agent_programs.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..034faff6efbdf34392961a38984b0cc958a3e241
GIT binary patch
literal 7948
zcmZ3^%ge>Uz`!8&EGb1(je+4Yhy%l{P{wBlMh1rI3@HpLj5!QZ5SlTH5zJ?bVoqU5
zVaj32WsPFZWs73VWshRd<%r^71gT@r;mqZV;$mb-WzFJ-n#K^t!^x1s(!vnM%gMmR
zkjjw72a`!<PvxlMW?)#x$iT3g5hfbNkEV(Zt11B|hE%Rp{#1cwObiUGnc%twQ&>~j
zQrJ^CQaI<ZL<yyEwJ<~pb26lGw=hJBfL(x1muL!43qzDx3U3QTlz0kbFoP!FOOTO%
znk=`t!xEEAOLHC5Q}ar0@ws>g2lzNf#(TJWx_gA&;)U_UJzYXPZgGZq`nbjidq%lx
zGTvf$FUm~uO)N-;yN-c@fsKKIfti7U;d2TTD3sF~YME*nQy3*dwt;a9Q#M$Ak!TIm
z0<Z`=k;04`VvGzm3@I!K6N<!e>SRUNnTOjJHgvTmV0S@W<H8WzRLfk$kixzINfaUn
zB5Tmxh;UsB2h@LwHH;~o2=Q8$8WvO$s5;dWc94k>oW%m685odVgX&UTCT&o~87^En
ze8GZhGAfUep{E(y6)p_1?%*)t#$g)HFe%a^*=1;Ep}UNcA&;@gqlPJ%L6gU?il+>m
zgmnw@%TtSrKoM5OS6q@<RHB=llbV>9nU`KAl#*FgqFbDrSDas@o1B_gl3G+Hm6%+T
zS(aFms#{!|T#}ier<<FaoROH9S)5xXlah*C_LpE%X>LJk5zM@5gW8Z^0_7R0sX4{E
zDVas7VAFnyfDB7dMTqED8`M_uBfF&9pjMOh7F%LLL26#gEiM<&AXn!QPk+DQTWo3h
zMY)M3x7dmj^HTD2Z?Pq3<Yy+QYO>zqC`c_zPAw=YzQviCTvD1?l3!GOiyh?J{Ji2@
zoN)VZ@#dwL$HTeBpaM_<1b*e}hZd(673*i^=Ovb8B<AV6<R_QrrskCt>!+larRL-p
zfH?Ze`Nhe`rUvn*rUuCD`11Us?Bb04f@1yh)YNPv{o?$bQc&>eC+DZ6>L-E<lK6t6
z{Pd#4++w|g%3I>anYjfysqsarX*sDC@kyx}iDjAjrA0;J3=9l{3=9m#VT=q64GcH<
z1RC5wurY85PLR98uCT!I61(aJcGVjk{QZ1gd>6#kFLG#H;n29ip#f4Pa6v%j3cKnD
z24+@sxDw-w941#dOfG=Y1DHnBiyUTGILt0^n0;kq5L5WTz{={$2q8N-Z*U7=5K+G_
zqIXF|Z$;%r5z8wgmRGo~cBEY5cDlgrbVEk<f||t@8Osje8$$96EHCKVUr~0rB;;^G
z$l<Pn+7jNiSr-*7uP9hf;QheNAgq9@;sK9vzh9T%1!=?U(iWGbEq2IWly<)&?S7HR
z;|h<*1s;zdP!}<?3W0ng1a*(<brGFQB04J~FN&C75i!5QZSjGDiBsqXx^a>hrJb)x
zJ745+xx(Xefyd<rk6?%E2UaFdp${AkJi-lrpF!n`CKEVQy!`+F|9==8R3T|H6@dyU
zu%sqakrV?11BA_Ti#0F5s3hYSYjJ*QNyaUf^3>vzTP&%G#U<EF`(xlbwFG1+$U+9x
z@(X);k-~_Yt5JEiOf^jKx`H`{Ih`?>0lB2CVW?q5tsB8*6H+}9!^FT)%TmkAiBgs>
zfS0gPhk~gZmKxR+)@95L468x47g&suA&*^|fsrASA&<F&Ig+8Av4$~)4HRHtX@*+1
z5^!q+!mB}&0o%-8%TdEp!(PR}z)-_l!vUgexoSB}cp&OQqzgl=TP-)ZmO-imKr#?q
z!;Kmy2)k2QVYLTQzT>Rn!t`B_J;ZlC&Jf@AxYsZTGiY-7xq%bGFW%yk`~r9h`inm&
zKRFSU&7q}UND;{QMVt%_44NFbm~&F|Zt<j66y)R=rKZH^CKlY{O3Tbki7!jcDNP0E
z^jmzzsU`85C8@a(!CL~k`DLk~k|sVsEv-1Uq_~I|lmbBwC{3OsP{B|n#K6FCODHeC
zG!JfcNoG!Jagi`cf*n%URNR6xDsKrE6r~oY=9M6dy;~d*!{aO9tjb$#d8rj8@fA?I
zvPc9}2!SF7R6N3Pkr>DhncV!8)SURF#NyNx%o0qWfq}shq*)$Zf_-IUkdmDnb6wWt
zlB~&w(2KIxS7fa(O4?kJw7I|{c7t7VM&KoOxeM%aHw5G_@GD&4Q25Hlz^}F-V*}?!
zKC3HyR^akaLg9jf;T3VCi!34y?j7MDm{?eO?{bJ-=TN@Hp}fF-Mfir4iyYQhIIJ&#
z(T@*IOsu>QICwf(e*F0HfrCL(>4K8U6$#S|ETVTMH7{scUy-!A$RgI@(UEaOOm+ds
zMKO&P>Ki;RYT0zSHMrdsmH)uN#wrRXpv4dF;_w3#3$N&10h#Lp+Lr{hSGa9(+u?Fi
z!10QJ;{`ARIfhsCfq=*auw%Fxc$^tO@PUh>VhIKYhJ(B^j#>-{wU`|Bm=D>=I7>1e
zmSl34Wko4gKs6O88-9)eHzJXX6)f4FDTT3ysf?kB3Dg`&VW?%UVRB)JH3Bu079bT!
zkSqlvQ<zY*RgrWJ3yvZRtr>#c46SAEF|J{5;HY6tVMS!jTIL$&TGkY{TDBDSTJ{o9
za~NbL0|Nv0f@c{61H)>l3I>K+juLpsf`K6muDgZ<)y0gUCNT%XR&asCS<6+!R-9D=
z%0EyOYnaj0aM$wGaMo~x3ZojX8Xgc`%U8?G$xtEy)dF%UCxVS&)$rEvp%u9Nd7Pjk
zB!#n<AE}V!FH)&tFA+u3iRSujuv8IW4Mz<-!hN;OHS8%|RlE!gwd{GUHLSJFc`PZM
zwQMzP>$7T@79fQSSUZ?V;a&jBwO}?ff!;jF2t7!{u}Fb|f#H`RMl<afqwY&kD-vA7
zK}y3SMFs|j5J(jXa*8Hz5h(B9;sUobi_0^?m1RLrW(l|uyTy{6Ur<@Z&%nTNi@i9b
zG%YPBwMY$Ax+#N%_|j2I!XhP*49F)%pvcn{KoowEf=(bgBfmH`52;jRgS7W<@uX!I
z6_>>47l2CUTcEZjr12S_mS38ea*HwZ78jxgSfl~6M-xP7g9sgvTUbG@#EM&Bs`3_h
zW^p{Ms4WJi30P?eqamfDbb5Xw&O*_Nfq@|hR48`C3PpbH6&4#JFY?)6;j?dW|G>e(
zAwEO>0=wb`cEua~VjX-R*cc>aF0hE)5R&}Bz{F|-COX(|a0_1Nmbt_&b3xYNBDdic
zZo>}@jGQJn<W=X_%&J-8d{N%uioC%F!7K8{7kFfTJYW~Uz%GA7TIGVO`4wr43mg(3
zxEMrK8hmbuNjLc4;NWa<yelX@U4D}MMM0%2f=UfuPg%HH{3cjkWRbhVB6opB?uNKL
zO3{0RPxb?Y1gr90S=|eIc2{KWFLFqAa7@s+AtAp&<D!Jt3ik~e7j^79N;_CTu&{8-
z-<49kE@gU2%5+2Oj?jxz&R3+IJG?u*L4_ix`~xXD5U0cY$B!QmxWzs&@Np`CU}F#v
zoN7FwYNGuV`%C;v7x<NKDC@3BTw;4cK;edf;Do@5Y!etKFn-`-P%!`tiGE;XkXP>T
z|6s_#F9&wq2Qvm%o_4<`zYD^e*M$u(2^(B6bi63+bVb<dB8&4C7Uv5r&JVbS8vKeu
zDeRE3v@<`$VSW~8QD&4vmjl*w&teAk+)I%16ZT@X2Ce;8%Y>o_l!1`UfG}&&Mnh_u
zQHn^c#WjXKEGT-2vxl{YsfHni5mex%Ftjouik%u3?BZ!mB}m0!7N|-Bd!dFYg`oz;
z1k}O>hb|WUx^UXYoWfAUi0odXY-7c*D~+j!88j+`B{W#Ei>EQAFwJ49WlLeGWlsTB
zQ=p^-4i5$f7lzo`wVWvowOpJGC7==yDvlBopwtBxuVJe}8Wl-lM#Mx43#3hp>O)i>
zvie#sSo^%k5z>N9VW{OM!wtxzQ_yxbs;#Iz6gP0g+yL(*a5u8oaMf^UgZmjpj6MA|
z?2T+STs7>_YLBIctCqcnvzDW0P7OyegC?6_5vb#($x<W-DwAbFgfxhd0TH0o?G~cR
zSp=>(*g^G1QGWR?E)XL*Kc_S|?-plyVopvmxXI0wS8$6ZEhn=8QVB8VX6D`ENGwV(
z&qysw)#SOwSaFN7@)l#!Eym<qOhx6ln3BtH@qioD@ky1S+T|8UZYHRjDK5Fi17m^2
z!7b!l9Lc3c@hO?bB@jEoOmLwBs~kWyq=FCzH~y1Si;<e*MPUpK44Xj}!*OuM0B+OD
zT;h<K5qOb9{tAbDgCkf-{t}1$0>+CRN>@0P8XWI(3SH-vy2L3p!|)=f>=jPg1}CtP
z>?Kaw8HpD;6|Qh9G&nsF6q;Z-A#kGc1d|CSQ^aQ|%?O-PI5T)g$c&IV8W#kV?+OY}
z5uYJA!)S)%45K-s6XmAJEfrcKzCvh)(~7_qPHV&#$}f?>AYgb`NMwro48<8vGXiHg
z%~74GH$`u$(h~I*N-Kg^B(4Zrqqb0giT(vaqZ>lP6C5WbPIR8&GQnkv`V6BPi3=EK
zCeKKjkut~Pf}qL=PF4}N4-Bj#Yz;mico+mk8a!?Yi#PayTicSXsvj66SyekYZwQNb
zaDb`~(YxYObB*R$FR0nzc}3lBN8uH9#{;34)V(fBdtZ_Ez9{Z<Mcn5^;1zNI4v!8G
zaD^iJfJdam1JdLdmF(dBz{J8ScvnJtuG1Xv6+9avuV~mGV7#K?bRgxDhSx<I?<+Fi
z7bSeINPvuy@bB>K@B|wp_<%>C!v$O?iCo}TSP-#+^OCCN1y#!jJR%?17<fc3a7)jq
zSfO!A-spn7(G79=4v!nsDjhyQ9&n3aU=atm>W|2nyNWO!6=8CfU_B~l?k2``OpM7*
z3bj=aYR!UM_0zzudQh>8$o$|IEK@Brfwn$q2o<UDu4P3YpsYb^hl9;yt7WfYs$l~a
zb2ZG6u6QkHEk_ABYl2H7P~qiP%T>dW!n^?FHi#e+S;K{z`4KLL6<0;7oD3!K;*Eh}
z0lca}GKQgsqlPnu1$}@4OBb9m1=)YxH7wx13{MTu0#NuN8wqBopbdE!N!Rc!z&e<a
zf;Qltg537z?lDFQNp6Hc@r5K&{RWO2PPBdlFSOslQp=0fZ{V)wtKlsYLFqX#)i9&!
z<*VUqV5$MBWx>%W;J^`+e5f&r%41}xVF_l?<n#kK^uY~(@X$s`ks+uUHUbgGAOhU@
z=P3fUaUl(Q_?ShJ8%SOiM3{gGQxE|vwu>x4EKuQB1gbVQ`H`CUh!#0$M59Oxq!QjX
zFVY2Z^*{t@grdk2#IgbrpfQJAJfN04tcWcJ)x4nc7YRd(V!6_i%$&@UN?c<YkqitB
zcR{7{5^$-E-Uj~4#=x(&LSuvHMLxSLe0JdGI~_(CB(Do-ToTY&A+|wmhsZ?%yDI{A
z7r+QqXz`+tFo-coC|*!Bz9McyaWCR2XvoCsiljB}A(Ia*EUeOZ#gwj#8D0`I+@Q8Y
z?V_0D6*0#S?hbB{^H`-Hh)IDs9o(RDRam*f=LWkFXn;lMD;tA^_6qY2?ia;vu87-o
z@Z8`QzrZd3Kw7)Q=K~u9T0?$=%tc|lE5ddcS?sT{*k53=hqU9t11ah<PAp7^SeTqR
zSP!d6yD%^uVPJ7#W7d?o#gdy?k^$*^vgVcM7E~6wgF1S=rFp6GiJ5wk$&w;aaaRN?
z35!4lRuQOlxdl@K9`h<L0+l1T1j9XCU44S%5pxsZ#sy?Zk2NhPKfmY}Yk7QXUfC_y
z3=jowszCAvsEZ8ADsG@u3(f+d<hGKb2;>KF`uxRVlbfGXnv-f*l+3^Ynw(`Qwqj&p
z_`uA_$asT+y8#SuFz_~j;SC0%3o!J6!T$mwbb~?h0xEjID{+NatAXnS3mYTL2R>;=
z;SUUO!U`<Lz$x6p(!({uc7f$ZVV#Q{x>q=K8(42}2!TcI7I<D1*1gD~cZEZ*ffdw&
zEuT<2BlRM`$`yW<2A&Tb41A(Dc!h892;bll0yPAfKn($=3k(7u_!L35!3j4;c1D&D
z0#+bFI8ng}a)S^9uSkdc1lJj+7kLz~@F+HLeqhmHl=#2^CqftvLHaNWaM%I>eoNWw

literal 0
HcmV?d00001

diff --git a/week2/solution/code/agent_programs.py b/week2/solution/code/agent_programs.py
new file mode 100644
index 0000000..c268416
--- /dev/null
+++ b/week2/solution/code/agent_programs.py
@@ -0,0 +1,271 @@
+import math
+import random 
+import numpy as np
+
+from une_ai.vacuum import VacuumAgent, DISPLAY_HEIGHT, DISPLAY_WIDTH, TILE_SIZE
+from une_ai.models import GridMap
+
+DIRECTIONS = VacuumAgent.WHEELS_DIRECTIONS
+
+"""
+Simple reflex agent: 
+- If the vacuum is on, turn it on
+- If there is dirt on the current tile, activate the suction mechanism
+- If the agent hit a wall, change the direction of the wheels randomly
+- If the agent senses dirt on the surrounding tiles, change the direction of the wheels towards the dirt
+"""
+def simple_reflex_behaviour(percepts, actuators):
+    actions = []
+
+    # if the vacuum is off, turn it on
+    if actuators['vacuum-power'] == 0:
+        actions.append('start-cleaning')
+    
+    # if there is dirt on the current tile, clean it
+    if percepts['dirt-sensor-center']:
+        actions.append('activate-suction-mechanism')
+    else:
+        actions.append('deactivate-suction-mechanism')
+    
+    # if the agent hit a wall, change direction
+    for direction in DIRECTIONS:
+        if percepts['bumper-sensor-{0}'.format(direction)] == True:
+            new_direction = actuators['wheels-direction']
+            while new_direction == actuators['wheels-direction']:
+                new_direction = random.choice(DIRECTIONS)
+            actions.append('change-direction-{0}'.format(new_direction))
+    
+    # if there is dirt on the surronding tiles, move in that direction
+    for direction in DIRECTIONS:
+        if percepts['dirt-sensor-{0}'.format(direction)]:
+            actions.append('change-direction-{0}'.format(direction))
+            break
+
+    return actions
+
+"""
+Model-based reflex agent: 
+- The agent keeps track of the explored tiles
+- If the environment is fully explored, then turn off
+- ELSE
+- behave like the simple-reflex agent
+"""
+w_env = math.floor(DISPLAY_WIDTH / TILE_SIZE)
+h_env = math.floor(DISPLAY_HEIGHT / TILE_SIZE)
+explored_map = GridMap(w_env, h_env)
+
+# compute offsets for the next movement
+movement_offsets = {
+    'north': (0, -1),
+    'south': (0, 1),
+    'west': (-1, 0),
+    'east': (1, 0)
+}
+
+def model_based_reflex_behaviour(percepts, actuators):
+
+    # stopping when the whole environment is explored
+    n_unexplored_tiles = len(explored_map.find_value(False))
+    if n_unexplored_tiles == 0:
+        return ['stop-cleaning']
+    
+    # if here, it means that there are unexplored tiles
+
+    # the actions are the same of the simple-reflex agent
+    actions = simple_reflex_behaviour(percepts, actuators)
+    
+    # we also need to update the model of the environment
+    agent_x, agent_y = percepts['location-sensor']
+    try:
+        explored_map.set_item_value(agent_x, agent_y, True)
+    except:
+        # out of bounds, no recordings on the map
+        pass
+
+    # checking if the agent bumped into a wall and update
+    # the map accordingly (as the agent did not move there)
+    present_direction = actuators['wheels-direction']
+    offset_x, offset_y = movement_offsets[present_direction]
+    next_x, next_y = (agent_x + offset_x, agent_y + offset_y)
+    for direction in DIRECTIONS:
+        if percepts['bumper-sensor-{0}'.format(direction)] == True:
+            try:
+                explored_map.set_item_value(next_x, next_y, True)
+            except:
+                # out of bounds, that's ok
+                pass
+
+    return actions
+
+"""
+Goal-based agent: 
+- We keep track of the explored tiles
+- The maps are used to predict if the next movement will lead to a previously explored tile and...
+... If so, the agent changes direction towards an unexplored tile
+ELSE
+- the agent behaves like the model-based reflex agent
+"""
+def goal_based_reflex_behaviour(percepts, actuators):
+    # start with the actions from the model-based reflex agent
+    # this will also update the model of the environment
+    actions = model_based_reflex_behaviour(percepts, actuators)
+
+    # if there is the action stop-cleaning
+    # it means we visited the whole environment
+    if 'stop-cleaning' in actions:
+        return actions
+
+    # else, we check if we are going in a direction with an unexplored tile
+    chosen_direction = None
+    for action in actions:
+        if action.startswith('change-direction-'):
+            chosen_direction = action.split('-')[2]
+    
+    if chosen_direction is None:
+        chosen_direction = actuators['wheels-direction']
+    
+    # making predictions about the future
+    # to check if it aligns with our goal of cleaning
+    # the whole environment
+    first_option = chosen_direction
+    direction_found = False
+    i = 0
+    directions = DIRECTIONS.copy()
+    # shuffle the directions so to simulate a random choice
+    random.shuffle(directions)
+    agent_x, agent_y = percepts['location-sensor']
+    while not direction_found:
+        offset_x, offset_y = movement_offsets[chosen_direction]
+        new_x, new_y = (agent_x + offset_x, agent_y + offset_y)
+
+        try:
+            is_explored = explored_map.get_item_value(new_x, new_y)
+        except:
+            # out of bounds, set it as explored
+            is_explored = True
+    
+        if is_explored:
+            # we already visited the next tile
+            # change direction with the next one
+            if i < len(directions):
+                chosen_direction = directions[i]
+                i += 1
+                # and try again
+            else:
+                # it seems that everything was visited
+                # we go with the first option we got
+                chosen_direction = first_option
+                break
+        else:
+            # we found an unvisited tile
+            direction_found = True
+    
+    # append the action, only the last change in the list will take place
+    actions.append('change-direction-{0}'.format(chosen_direction))
+
+    return actions
+
+"""
+Utility-based reflex agent:
+We behave exactly as the goal-based agent but we try to work more efficiently, so
+- Behave as the goal-based agent
+- IF the chosen direction leads to a previously explored tile it means that the agent explored all the surroundings...
+... so we choose a direction leading to the closer unexplored tile that is not obstructed by a wall
+"""
+
+# for this agent we also need to keep track of the walls
+walls_map = GridMap(w_env, h_env)
+
+def find_best_direction(x, y):
+    r = explored_map.get_row(y)
+    c = explored_map.get_column(x)
+    rw = walls_map.get_row(y)
+    cw = walls_map.get_column(x)
+    tiles_by_dir = {
+        'east': (r[x+1:], rw[x+1:]),
+        'west': (np.flip(r[:x]), np.flip(rw[:x])),
+        'north': (np.flip(c[:y]), np.flip(cw[:y])),
+        'south': (c[y+1:], cw[y+1:])
+    }
+    min_dist = None
+    min_dist_dir = None
+    for direction in DIRECTIONS:
+        cur_dist = None
+        # check if there is an unexplored tile towards this direction
+        try:
+            cur_dist = min(np.argwhere(tiles_by_dir[direction][0] == False))[0]
+        except:
+            # no empty tiles in this direction, skip
+            continue
+
+        # if we are here it means that there is an unexplored tile
+        # towards this direction, let's see if it is unobstructed
+        wall_dist = None
+        try:
+            wall_dist = min(np.argwhere(tiles_by_dir[direction][1] == True))[0]
+        except:
+            # there are no walls, wall_dist remains to None
+            pass
+        
+        if wall_dist is not None and cur_dist > wall_dist:
+            # unexplored tile directly not reachable, skip
+            continue
+            
+        # computing the min distance
+        if min_dist is None or cur_dist < min_dist:
+            min_dist = cur_dist
+            min_dist_dir = direction
+
+    return min_dist_dir
+
+def utility_based_reflex_behaviour(percepts, actuators):
+    agent_x, agent_y = percepts['location-sensor']
+
+    # we start behaving like the goal-based agent
+    actions = goal_based_reflex_behaviour(percepts, actuators)
+
+    # checking if the agent bumped into a wall during the previous
+    # iteration and update the walls map accordingly
+    present_direction = actuators['wheels-direction']
+    offset_x, offset_y = movement_offsets[present_direction]
+    next_x, next_y = (agent_x + offset_x, agent_y + offset_y)
+    for direction in DIRECTIONS:
+        if percepts['bumper-sensor-{0}'.format(direction)] == True:
+            try:
+                walls_map.set_item_value(next_x, next_y, True)
+            except:
+                # out of bounds, nothing to record
+                pass
+    
+    # now we check the chosen new direction
+    # if it leads to a previously explored tile,
+    # it means that the goal-based agent policy did not find
+    # any new surrounding tile to explore
+    chosen_direction = actuators['wheels-direction']
+    for action in actions:
+        if action.startswith('change-direction-'):
+            chosen_direction = action.split('-')[2]
+
+    offset_x, offset_y = movement_offsets[chosen_direction]
+    next_x, next_y = (agent_x + offset_x, agent_y + offset_y)
+
+    try:
+        is_explored = explored_map.get_item_value(next_x, next_y)
+    except:
+        # out of bounds, set it as explored
+        is_explored = True
+    
+    new_best_dir = chosen_direction
+    if is_explored:
+        # We did not find any unvisited tile in the surroundings
+        # we need to find the best direction to go leading to the closer unobstructed unexplored tile
+        new_best_dir = find_best_direction(agent_x, agent_y)
+        if new_best_dir is None:
+            # this may happen when in a well cleaned area, let's re-use the direction from the previous agent
+            new_best_dir = chosen_direction
+    
+    # append it, if there is more than a change direction action, only the last in the list will take place
+    actions.append('change-direction-{0}'.format(new_best_dir))
+
+    return actions
+    
\ No newline at end of file
diff --git a/week2/solution/code/vacuum_world.py b/week2/solution/code/vacuum_world.py
new file mode 100644
index 0000000..3d9656e
--- /dev/null
+++ b/week2/solution/code/vacuum_world.py
@@ -0,0 +1,5 @@
+from une_ai.vacuum import VacuumGame, DISPLAY_HEIGHT, DISPLAY_WIDTH
+from agent_programs import simple_reflex_behaviour, model_based_reflex_behaviour, goal_based_reflex_behaviour, utility_based_reflex_behaviour
+
+# To test the different agent programs, change the function passed as parameter to create the instance of VacuumGame
+game = VacuumGame(utility_based_reflex_behaviour, DISPLAY_WIDTH, DISPLAY_HEIGHT)
\ No newline at end of file
-- 
GitLab