gpt4 book ai didi

python - 类型错误:apply_gradients() 得到一个意外的关键字参数 'global_step'

转载 作者:行者123 更新时间:2023-12-04 07:50:26 30 4
gpt4 key购买 nike

在尝试制作一个 RL 代理几天后,我终于成功地创造了它的体验,但是当我尝试训练它时,我得到了这个错误。我已经尽我所能:不同的经验,改变了步骤参数......我只是没有想法。

import pyxinput
import time
import cv2
from PIL import ImageGrab
import numpy as np
import keyboard
import tensorflow
import tf_agents
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import torch
#from tf_agents.networks import actor_distribution_networ

from tf_agents.policies import random_py_policy

Tensod_spec = tf_agents.specs.BoundedArraySpec(
(15,),
dtype=np.float32,
name="XimputSpecs",
minimum=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
maximum=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
)

Tensod_spec2 = tf_agents.specs.TensorSpec(
[440, 600, 1], dtype=tf.int32, name="ScreenSpecs"
)

Tensor_reward_spe = tf_agents.specs.TensorSpec(
[1, 1], dtype=tf.int32, name="Reward"
)

FromEnv = tf_agents.specs.BoundedTensorSpec(
shape=(440, 600, 1),
dtype='uint8',
name='observation',
minimum=0,
maximum=255
)
FromEnv2 = tf_agents.specs.BoundedTensorSpec(
shape=(1, 440, 600, 1),
dtype=tf.int32,
name='observation',
minimum=0,
maximum=255
)

fullscreen = [110, 130, 710, 570]

screenpil = ImageGrab.grab(bbox=fullscreen)
showprint = np.array(screenpil)
grayscreen = cv2.cvtColor(showprint, cv2.COLOR_BGR2GRAY)
screenrect = cv2.cvtColor(grayscreen, cv2.COLOR_GRAY2BGR)
grayscreen = grayscreen.reshape(440, 600, 1)

time_step_spec2 = tf_agents.trajectories.time_step.time_step_spec(
observation_spec=FromEnv,
#reward_spec = Tensor_reward_spec
)

time_step_spec = tf_agents.trajectories.time_step.time_step_spec(
observation_spec=FromEnv,
#reward_spec = Tensor_reward_spec
)

actor_net = tf_agents.networks.actor_distribution_network.ActorDistributionNetwork(
input_tensor_spec=FromEnv,
output_tensor_spec=tf_agents.specs.tensor_spec.from_spec(Tensod_spec),
activation_fn='relu',
#conv_layer_params=[(25, 40, 2)],
fc_layer_params=(50, 25, 15),
#dtype='int32'
)
print(actor_net)

train_step_counter = tf.dtypes.cast(1, tf.int32)

optimizer = tf.keras.optimizers.Adam(learning_rate=0.003)

tf_agent = tf_agents.agents.ReinforceAgent(
time_step_spec=time_step_spec,
action_spec=tf_agents.specs.tensor_spec.from_spec(Tensod_spec),
actor_network=actor_net,
optimizer=optimizer,
normalize_returns=True,
#train_step_counter=tf.Variable(1, name="global_step")
)
tf_agent.initialize()

grayscreen2 = grayscreen
grayscreen2 = grayscreen2.reshape(1, 440, 600, 1)
time_step2 = tf_agents.trajectories.time_step.TimeStep(
step_type=tf_agents.trajectories.time_step.StepType.FIRST,
reward=tf.dtypes.cast(1, tf.float32),
discount=tf.dtypes.cast(1, tf.float32),
observation=grayscreen2
)

policy_state = tf_agent.policy.get_initial_state(batch_size=1)

policy_step = tf_agent.policy.action(time_step2, policy_state)
print(policy_step)

observe = time_step2.observation
#print(observe.dtype)
#observe = observe.astype(int)
#print(observe.shape)

experience = tf_agents.trajectories.trajectory.Trajectory(
action=tf.compat.v2.Variable([
tf.compat.v2.Variable(policy_step.action),
tf.compat.v2.Variable(policy_step.action),
tf.compat.v2.Variable(policy_step.action)
]),
reward=tf.compat.v2.Variable([[
tf.compat.v2.Variable(time_step2.reward),
tf.compat.v2.Variable(time_step2.reward),
tf.compat.v2.Variable(time_step2.reward)
]]),
step_type=tf.compat.v2.Variable([[
tf.compat.v2.Variable(tf_agents.trajectories.time_step.StepType.FIRST),
tf.compat.v2.Variable(tf_agents.trajectories.time_step.StepType.MID),
tf.compat.v2.Variable(tf_agents.trajectories.time_step.StepType.LAST)
]]),
observation=tf.compat.v2.Variable([
tf.compat.v2.Variable(observe),
tf.compat.v2.Variable(observe),
tf.compat.v2.Variable(observe)
]),
policy_info=tf_agent.policy.info_spec,
next_step_type=tf.compat.v2.Variable([[
tf.compat.v2.Variable(tf_agents.trajectories.time_step.StepType.MID),
tf.compat.v2.Variable(tf_agents.trajectories.time_step.StepType.LAST),
tf.compat.v2.Variable(tf_agents.trajectories.time_step.StepType.LAST)
]]),
discount=tf.compat.v2.Variable([[
tf.dtypes.cast(1, tf.float32),
tf.dtypes.cast(1, tf.float32),
tf.dtypes.cast(1, tf.float32)
]]),
)

train_loss = tf_agent.train(experience)
print(train_loss)
我得到这个错误:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-4dd3966a32b6> in <module>
1 #
----> 2 train_loss = tf_agent.train(experience)
3 print(train_loss)

~\AppData\Local\Programs\Python\Python38\lib\site-packages\tf_agents\agents\tf_agent.py in train(self, experience, weights, **kwargs)
516
517 if self._enable_functions:
--> 518 loss_info = self._train_fn(
519 experience=experience, weights=weights, **kwargs)
520 else:

~\AppData\Local\Programs\Python\Python38\lib\site-packages\tf_agents\utils\common.py in with_check_resource_vars(*fn_args, **fn_kwargs)
183 # We're either in eager mode or in tf.function mode (no in-between); so
184 # autodep-like behavior is already expected of fn.
--> 185 return fn(*fn_args, **fn_kwargs)
186 if not resource_variables_enabled():
187 raise RuntimeError(MISSING_RESOURCE_VARIABLES_ERROR)

~\AppData\Local\Programs\Python\Python38\lib\site-packages\tf_agents\agents\reinforce\reinforce_agent.py in _train(self, experience, weights)
286 self.train_step_counter)
287
--> 288 self._optimizer.apply_gradients(
289 grads_and_vars, global_step=0)
290

TypeError: apply_gradients() got an unexpected keyword argument 'global_step'
这个全局步骤是什么,这个错误来自哪里?为什么我不能训练我的代理人?
眼镜:
  • Python 3.8
  • TensorFlow 2.4(GPU 和非 GPU)
  • Windows 10/Ubuntu

  • 如果您需要更多信息,请告诉我。
    编辑:尝试了其他运行良好的代理,我在 Tensor 上发布了这个 ISUE
    GIT: https://github.com/tensorflow/tensorflow/issues/48424
    如果以后有人遇到同样的问题

    最佳答案

    您应该尝试使用不同的优化器。那些在tf.keras.optimizer不要拿global_steps作为 apply_gradients 中的参数功能。
    相反,使用这些来自 tf.compat.v1.train ,例如,

    optimizer = tf.compat.v1.train.AdamOptimizer(learn_rate=0.003)
    请注意,这通过了运行时检查,但它使训练无法完成。 global_step应该带一个 Variable它的值将是 +1apply_gradients叫做。但是,在这里您会看到 global_step=0通过使其根本没有效果。 train_step_counter您在上面定义的将保留 0 .
    另请注意,还有一个 fix在途中。

    关于python - 类型错误:apply_gradients() 得到一个意外的关键字参数 'global_step',我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/67014270/

    30 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com