diff --git a/week2/solution/code/__pycache__/agent_programs.cpython-311.pyc b/week2/solution/code/__pycache__/agent_programs.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..034faff6efbdf34392961a38984b0cc958a3e241
Binary files /dev/null and b/week2/solution/code/__pycache__/agent_programs.cpython-311.pyc differ
diff --git a/week2/solution/code/agent_programs.py b/week2/solution/code/agent_programs.py
new file mode 100644
index 0000000000000000000000000000000000000000..c268416e121087832258085b705caa75d175fa9c
--- /dev/null
+++ b/week2/solution/code/agent_programs.py
@@ -0,0 +1,271 @@
+import math
+import random 
+import numpy as np
+
+from une_ai.vacuum import VacuumAgent, DISPLAY_HEIGHT, DISPLAY_WIDTH, TILE_SIZE
+from une_ai.models import GridMap
+
+DIRECTIONS = VacuumAgent.WHEELS_DIRECTIONS
+
+"""
+Simple reflex agent: 
+- If the vacuum is on, turn it on
+- If there is dirt on the current tile, activate the suction mechanism
+- If the agent hit a wall, change the direction of the wheels randomly
+- If the agent senses dirt on the surrounding tiles, change the direction of the wheels towards the dirt
+"""
+def simple_reflex_behaviour(percepts, actuators):
+    actions = []
+
+    # if the vacuum is off, turn it on
+    if actuators['vacuum-power'] == 0:
+        actions.append('start-cleaning')
+    
+    # if there is dirt on the current tile, clean it
+    if percepts['dirt-sensor-center']:
+        actions.append('activate-suction-mechanism')
+    else:
+        actions.append('deactivate-suction-mechanism')
+    
+    # if the agent hit a wall, change direction
+    for direction in DIRECTIONS:
+        if percepts['bumper-sensor-{0}'.format(direction)] == True:
+            new_direction = actuators['wheels-direction']
+            while new_direction == actuators['wheels-direction']:
+                new_direction = random.choice(DIRECTIONS)
+            actions.append('change-direction-{0}'.format(new_direction))
+    
+    # if there is dirt on the surronding tiles, move in that direction
+    for direction in DIRECTIONS:
+        if percepts['dirt-sensor-{0}'.format(direction)]:
+            actions.append('change-direction-{0}'.format(direction))
+            break
+
+    return actions
+
+"""
+Model-based reflex agent: 
+- The agent keeps track of the explored tiles
+- If the environment is fully explored, then turn off
+- ELSE
+- behave like the simple-reflex agent
+"""
+w_env = math.floor(DISPLAY_WIDTH / TILE_SIZE)
+h_env = math.floor(DISPLAY_HEIGHT / TILE_SIZE)
+explored_map = GridMap(w_env, h_env)
+
+# compute offsets for the next movement
+movement_offsets = {
+    'north': (0, -1),
+    'south': (0, 1),
+    'west': (-1, 0),
+    'east': (1, 0)
+}
+
+def model_based_reflex_behaviour(percepts, actuators):
+
+    # stopping when the whole environment is explored
+    n_unexplored_tiles = len(explored_map.find_value(False))
+    if n_unexplored_tiles == 0:
+        return ['stop-cleaning']
+    
+    # if here, it means that there are unexplored tiles
+
+    # the actions are the same of the simple-reflex agent
+    actions = simple_reflex_behaviour(percepts, actuators)
+    
+    # we also need to update the model of the environment
+    agent_x, agent_y = percepts['location-sensor']
+    try:
+        explored_map.set_item_value(agent_x, agent_y, True)
+    except:
+        # out of bounds, no recordings on the map
+        pass
+
+    # checking if the agent bumped into a wall and update
+    # the map accordingly (as the agent did not move there)
+    present_direction = actuators['wheels-direction']
+    offset_x, offset_y = movement_offsets[present_direction]
+    next_x, next_y = (agent_x + offset_x, agent_y + offset_y)
+    for direction in DIRECTIONS:
+        if percepts['bumper-sensor-{0}'.format(direction)] == True:
+            try:
+                explored_map.set_item_value(next_x, next_y, True)
+            except:
+                # out of bounds, that's ok
+                pass
+
+    return actions
+
+"""
+Goal-based agent: 
+- We keep track of the explored tiles
+- The maps are used to predict if the next movement will lead to a previously explored tile and...
+... If so, the agent changes direction towards an unexplored tile
+ELSE
+- the agent behaves like the model-based reflex agent
+"""
+def goal_based_reflex_behaviour(percepts, actuators):
+    # start with the actions from the model-based reflex agent
+    # this will also update the model of the environment
+    actions = model_based_reflex_behaviour(percepts, actuators)
+
+    # if there is the action stop-cleaning
+    # it means we visited the whole environment
+    if 'stop-cleaning' in actions:
+        return actions
+
+    # else, we check if we are going in a direction with an unexplored tile
+    chosen_direction = None
+    for action in actions:
+        if action.startswith('change-direction-'):
+            chosen_direction = action.split('-')[2]
+    
+    if chosen_direction is None:
+        chosen_direction = actuators['wheels-direction']
+    
+    # making predictions about the future
+    # to check if it aligns with our goal of cleaning
+    # the whole environment
+    first_option = chosen_direction
+    direction_found = False
+    i = 0
+    directions = DIRECTIONS.copy()
+    # shuffle the directions so to simulate a random choice
+    random.shuffle(directions)
+    agent_x, agent_y = percepts['location-sensor']
+    while not direction_found:
+        offset_x, offset_y = movement_offsets[chosen_direction]
+        new_x, new_y = (agent_x + offset_x, agent_y + offset_y)
+
+        try:
+            is_explored = explored_map.get_item_value(new_x, new_y)
+        except:
+            # out of bounds, set it as explored
+            is_explored = True
+    
+        if is_explored:
+            # we already visited the next tile
+            # change direction with the next one
+            if i < len(directions):
+                chosen_direction = directions[i]
+                i += 1
+                # and try again
+            else:
+                # it seems that everything was visited
+                # we go with the first option we got
+                chosen_direction = first_option
+                break
+        else:
+            # we found an unvisited tile
+            direction_found = True
+    
+    # append the action, only the last change in the list will take place
+    actions.append('change-direction-{0}'.format(chosen_direction))
+
+    return actions
+
+"""
+Utility-based reflex agent:
+We behave exactly as the goal-based agent but we try to work more efficiently, so
+- Behave as the goal-based agent
+- IF the chosen direction leads to a previously explored tile it means that the agent explored all the surroundings...
+... so we choose a direction leading to the closer unexplored tile that is not obstructed by a wall
+"""
+
+# for this agent we also need to keep track of the walls
+walls_map = GridMap(w_env, h_env)
+
+def find_best_direction(x, y):
+    r = explored_map.get_row(y)
+    c = explored_map.get_column(x)
+    rw = walls_map.get_row(y)
+    cw = walls_map.get_column(x)
+    tiles_by_dir = {
+        'east': (r[x+1:], rw[x+1:]),
+        'west': (np.flip(r[:x]), np.flip(rw[:x])),
+        'north': (np.flip(c[:y]), np.flip(cw[:y])),
+        'south': (c[y+1:], cw[y+1:])
+    }
+    min_dist = None
+    min_dist_dir = None
+    for direction in DIRECTIONS:
+        cur_dist = None
+        # check if there is an unexplored tile towards this direction
+        try:
+            cur_dist = min(np.argwhere(tiles_by_dir[direction][0] == False))[0]
+        except:
+            # no empty tiles in this direction, skip
+            continue
+
+        # if we are here it means that there is an unexplored tile
+        # towards this direction, let's see if it is unobstructed
+        wall_dist = None
+        try:
+            wall_dist = min(np.argwhere(tiles_by_dir[direction][1] == True))[0]
+        except:
+            # there are no walls, wall_dist remains to None
+            pass
+        
+        if wall_dist is not None and cur_dist > wall_dist:
+            # unexplored tile directly not reachable, skip
+            continue
+            
+        # computing the min distance
+        if min_dist is None or cur_dist < min_dist:
+            min_dist = cur_dist
+            min_dist_dir = direction
+
+    return min_dist_dir
+
+def utility_based_reflex_behaviour(percepts, actuators):
+    agent_x, agent_y = percepts['location-sensor']
+
+    # we start behaving like the goal-based agent
+    actions = goal_based_reflex_behaviour(percepts, actuators)
+
+    # checking if the agent bumped into a wall during the previous
+    # iteration and update the walls map accordingly
+    present_direction = actuators['wheels-direction']
+    offset_x, offset_y = movement_offsets[present_direction]
+    next_x, next_y = (agent_x + offset_x, agent_y + offset_y)
+    for direction in DIRECTIONS:
+        if percepts['bumper-sensor-{0}'.format(direction)] == True:
+            try:
+                walls_map.set_item_value(next_x, next_y, True)
+            except:
+                # out of bounds, nothing to record
+                pass
+    
+    # now we check the chosen new direction
+    # if it leads to a previously explored tile,
+    # it means that the goal-based agent policy did not find
+    # any new surrounding tile to explore
+    chosen_direction = actuators['wheels-direction']
+    for action in actions:
+        if action.startswith('change-direction-'):
+            chosen_direction = action.split('-')[2]
+
+    offset_x, offset_y = movement_offsets[chosen_direction]
+    next_x, next_y = (agent_x + offset_x, agent_y + offset_y)
+
+    try:
+        is_explored = explored_map.get_item_value(next_x, next_y)
+    except:
+        # out of bounds, set it as explored
+        is_explored = True
+    
+    new_best_dir = chosen_direction
+    if is_explored:
+        # We did not find any unvisited tile in the surroundings
+        # we need to find the best direction to go leading to the closer unobstructed unexplored tile
+        new_best_dir = find_best_direction(agent_x, agent_y)
+        if new_best_dir is None:
+            # this may happen when in a well cleaned area, let's re-use the direction from the previous agent
+            new_best_dir = chosen_direction
+    
+    # append it, if there is more than a change direction action, only the last in the list will take place
+    actions.append('change-direction-{0}'.format(new_best_dir))
+
+    return actions
+    
\ No newline at end of file
diff --git a/week2/solution/code/vacuum_world.py b/week2/solution/code/vacuum_world.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d9656efb05248a784801735e40abbaa6ce43b4f
--- /dev/null
+++ b/week2/solution/code/vacuum_world.py
@@ -0,0 +1,5 @@
+from une_ai.vacuum import VacuumGame, DISPLAY_HEIGHT, DISPLAY_WIDTH
+from agent_programs import simple_reflex_behaviour, model_based_reflex_behaviour, goal_based_reflex_behaviour, utility_based_reflex_behaviour
+
+# To test the different agent programs, change the function passed as parameter to create the instance of VacuumGame
+game = VacuumGame(utility_based_reflex_behaviour, DISPLAY_WIDTH, DISPLAY_HEIGHT)
\ No newline at end of file