gpt4 book ai didi

python - 使用 turtle 图形的强化学习算法不起作用

转载 作者:塔克拉玛干 更新时间:2023-11-03 06:10:18 24 4
gpt4 key购买 nike

目前正在尝试在我使用 turtle 图形创建的环境中实现 Q 表算法。当我尝试运行使用 Q 学习的算法时,我收到一条错误消息:

  File "<ipython-input-1-cf5669494f75>", line 304, in <module>
rl()

File "<ipython-input-1-cf5669494f75>", line 282, in rl
A = choose_action(S, q_table)

File "<ipython-input-1-cf5669494f75>", line 162, in choose_action
state_actions = q_table.iloc[state, :]

File "/Users/himansuodedra/anaconda3/lib/python3.6/site-packages/pandas/core/indexing.py", line 1367, in __getitem__
return self._getitem_tuple(key)

File "/Users/himansuodedra/anaconda3/lib/python3.6/site-packages/pandas/core/indexing.py", line 1737, in _getitem_tuple
self._has_valid_tuple(tup)

File "/Users/himansuodedra/anaconda3/lib/python3.6/site-packages/pandas/core/indexing.py", line 204, in _has_valid_tuple
if not self._has_valid_type(k, i):

File "/Users/himansuodedra/anaconda3/lib/python3.6/site-packages/pandas/core/indexing.py", line 1674, in _has_valid_type
return self._is_valid_list_like(key, axis)

File "/Users/himansuodedra/anaconda3/lib/python3.6/site-packages/pandas/core/indexing.py", line 1723, in _is_valid_list_like
raise IndexingError('Too many indexers')

IndexingError: Too many indexers

我似乎无法查明问题所在。我的逻辑看起来不错。我也能够在脚本卡住之后构建环境,我被迫终止它。任何帮助都会很棒。代码如下:

"""
Reinforcement Learning using table lookup Q-learning method.
An agent "Blue circle" is positioned in a grid and must make its way to the
green square. This is the end goal. Each time the agent should improve its
strategy to reach the final Square. There are two traps the red and the wall
which will reset the agent.
"""
import turtle
import pandas as pd
import numpy as np
import time

np.random.seed(2)

""" Setting Parameters """

#N_STATES = 12 # the size of the 2D world
ACTIONS = ['left', 'right', 'down','up'] # available actions
EPSILON = 0.9 # greedy police (randomness factor)
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount factor
MAX_EPISODES = 13 # maximum episodes
FRESH_TIME = 0.3 # fresh time for one move


def isGoal():
if player.xcor() == -25 and player.ycor() == 225:
player.goto(-175,125)
status_func(1)
S_ = 'terminal'
R = 1
interaction = 'Episode %s: total_steps = %s' %(episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print('\r', end='')
return S_, R
else:
pass


def isFire():
if player.xcor() == -25 and player.ycor() == 175:
player.goto(-175,125)
status_func(3)
S_ = 'terminal'
R = -1
interaction = 'Episode %s: total_steps = %s' %(episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print('\r', end='')
return S_, R
else:
pass


def isWall():
if player.xcor() == -125 and player.ycor() == 175:
player.goto(-175,125)
status_func(2)
S_ = 'terminal'
R = -1
interaction = 'Episode %s: total_steps = %s' %(episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print('\r', end='')
return S_, R
else:
pass


""" Player Movement """

playerspeed = 50

""" Create the token object """

player = turtle.Turtle()
player.color("blue")
player.shape("circle")
player.penup()
player.speed(0)
player.setposition(-175,125)
player.setheading(90)



#Move the player left and right
def move_left():
x = player.xcor()
x -= playerspeed
if x < -175:
x = -175
player.setx(x)
isGoal()
isFire()
isWall()
S_ = player.pos()
R = 0

def move_right():
x = player.xcor()
x += playerspeed
if x > -25:
x = -25
player.setx(x)
isGoal()
isFire()
isWall()
S_ = player.pos()
R = 0

def move_up():
y = player.ycor()
y += playerspeed
if y > 225:
y = 225
player.sety(y)
isGoal()
isFire()
isWall()
S_ = player.pos()
R = 0

def move_down():
y = player.ycor()
y -= playerspeed
if y < 125:
y = 125
player.sety(y)
isGoal()
isFire()
isWall()
S_ = player.pos()
R = 0

#Create Keyboard Bindings
turtle.listen()
turtle.onkey(move_left, "Left")
turtle.onkey(move_right, "Right")
turtle.onkey(move_up, "Up")
turtle.onkey(move_down, "Down")

def build_q_table(n_states, actions):
table = pd.DataFrame(
np.zeros((n_states, len(actions))), # q_table initial values
columns=actions, # actions's name
)
# print(table) # show table
return table


def choose_action(state, q_table):
# This is how to choose an action
state_actions = q_table.iloc[state, :]
# act non-greedy or state-action have no value
if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()):
action_name = np.random.choice(ACTIONS)
else: # act greedy
# replace argmax to idxmax as argmax means a different function
action_name = state_actions.idxmax()
return action_name



def get_env_feedback(S, A):
if A == 'right':
move_right()
elif A == 'left':
move_left()
elif A == 'up':
move_up()
else: #down
move_down()
return S_, R



def update_env(S, episode, step_counter):
wn = turtle.Screen()
wn.bgcolor("white")
wn.title("test")

""" Create the Grid """

greg = turtle.Turtle()
greg.speed(0)

def create_square(size,color="black"):
greg.color(color)
greg.pd()
for i in range(4):
greg.fd(size)
greg.lt(90)
greg.pu()
greg.fd(size)

def row(size,color="black"):
for i in range(4):
create_square(size)

def board(size,color="black"):
greg.pu()
greg.goto(-(size*4),(size*4))
for i in range(3):
row(size)
greg.bk(size*4)
greg.rt(90)
greg.fd(size)
greg.lt(90)

def color_square(start_pos,distance_sq, sq_width, color):
greg.pu()
greg.goto(start_pos)
greg.fd(distance_sq)
greg.color(color)
greg.begin_fill()
for i in range(4):
greg.fd(sq_width)
greg.lt(90)
greg.end_fill()
greg.pu()

def initiate_grid():
board(50)
color_square((-200,200),150, 50,color="green")
color_square((-200,150),50, 50,color="black")
color_square((-200,150),150, 50,color="red")
greg.hideturtle()

initiate_grid()

""" Create the token object """

player = turtle.Turtle()
player.color("blue")
player.shape("circle")
player.penup()
player.speed(0)
player.setposition(S)
player.setheading(90)




def rl():
possible_states = {0:(-175,125),
1:(-175,175),
2:(-175,225),
3:(-125,125),
4:(-125,175),
5:(-125,225),
6:(-75,125),
7:(-75,175),
8:(-75,225),
9:(-25,125),
10:(-25,175),
11:(-25,225)}

inv_possible_states = {v:k for k,v in possible_states.items()}

#build the qtable
q_table = build_q_table(len(possible_states),ACTIONS)
for episode in range(MAX_EPISODES):
step_counter = 0
which_state = 0
S = possible_states[which_state]
is_terminated = False
update_env(S,episode,step_counter)
while not is_terminated:

A = choose_action(S, q_table)
# take action & get next state and reward
S_, R = get_env_feedback(S, A)
q_predict = q_table.loc[S, A]
if S_ != 'terminal':
S_ = inv_possible_states[S_]
# next state is not terminal
q_target = R + GAMMA * q_table.iloc[S_, :].max()
else:
q_target = R # next state is terminal
is_terminated = True # terminate this episode

q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update
S = S_ # move to next state

update_env(S, episode, step_counter+1)
step_counter += 1
return q_table



rl()

最佳答案

简短回答:您将屏幕坐标与环境的 12 种状态混淆了

长答案:当调用 A = choose_action(S, q_table) 并执行 choose_action 方法时,您会遇到问题在该方法中使用以下代码行:

state_actions = q_table.iloc[state, :]

错误 IndexingError: Too many indexers 试图告诉您您尝试访问的值在 q_table 上不存在。

如果您要打印出传递给 choose_action 函数的 state 变量,您将得到:

(-175, 125)

但这没有意义。如果在错误发生之前打印整个 Q 表,您将看到以下值:

    left  right  down   up
0 0.0 0.0 0.0 0.0
1 0.0 0.0 0.0 0.0
2 0.0 0.0 0.0 0.0
3 0.0 0.0 0.0 0.0
4 0.0 0.0 0.0 0.0
5 0.0 0.0 0.0 0.0
6 0.0 0.0 0.0 0.0
7 0.0 0.0 0.0 0.0
8 0.0 0.0 0.0 0.0
9 0.0 0.0 0.0 0.0
10 0.0 0.0 0.0 0.0
11 0.0 0.0 0.0 0.0

这些值全为零,因为您还没有学到任何东西。但是当 state 等于 (-175, 125) 时,您的代码试图访问 q_table.iloc[state, :]这没有任何意义!

您传递给 choose_action 方法的值应该对应于环境中的十二种状态之一,在 q_table 中由 0 到的整数表示11.

问题似乎是由这一行引起的:

S = possible_states[which_state]

☝️ rl 方法中的那行代码将 S 的值更改为 (-175, 125)。如果 S 应该表示代理所处的环境状态,那么 S 应该始终是 0 到 11(含)之间的整数。

您需要确保正确地将 turtle-graphics 正在绘制的屏幕位置与代理正在探索的 12 种环境状态分开。 turtle-graphics 不知道如何绘制环境状态,因为它们存储在 q_table 中,而 q_table 不知道哪些状态环境中的坐标与 turtle-graphics 用来绘制正方形的坐标相关联。

关于python - 使用 turtle 图形的强化学习算法不起作用,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50392231/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com