- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试将 q-learning 应用于我的自定义强化学习环境,该环境代表储能套利(使用电池进行电力交易,在价格低时充电,在价格上涨时放电)。环境有效,但我无法对其应用 q-learning。环境下方是一个能够运行环境的脚本,但我不确定应该将状态变量设为什么。关于如何应用 q-learning 来优化充电/放电循环的任何想法?重置功能在第二天从具有每小时电价的数据集中开始。数据框的图片如下。
BatteryEnv 类(gym.Env):
def __init__(self, df):
self.dict_actions = {0:'discharge',1:'charge',2:'wait'}
self.df = df
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(low=0, high=100, shape=(1,1))
self.reward_list = []
self.actual_load_list = []#observations
self.SOE_list=[] #State of energy
self.state_idx = 0 #iteration (hour of the day)
self.SOE = 0 #SOE
self.MAX_charge = 20 #C-rate kinda
self.Capacity =100
def step(self, action):
#mapping integer to action for actual load calculation
str_action = self.dict_actions[action]
#increase state idx within episode (1= 1 hour)
self.state_idx+=1
#calculating our actual load
if str_action == 'charge' and self.SOE < self.Capacity:
SOE_charge = np.clip(self.Capacity - self.SOE, 0, self.MAX_charge)
self.SOE += SOE_charge
obs = SOE_charge * self.df['prices'][self.state_idx]
elif str_action == 'discharge' and self.SOE > 0:
SOE_discharge = np.clip(self.SOE, 0, self.MAX_charge)
self.SOE -= SOE_discharge
obs = -SOE_discharge * self.df['prices'][self.state_idx]
else:
self.SOE += 0
obs = 0 * self.df['prices'][self.state_idx]
# appending actual load to list for monitoring and comparison purposes
self.actual_load_list.append(obs)
self.SOE_list.append(self.SOE)
#reward system
if obs<0: #if observation is positive we spending money. if negative we earning
reward =1
else:
reward =-1
# appending curr reward to list for monitoring and comparison purposes
self.reward_list.append(reward)
#checking whether our episode (day interval) ends
if self.df.iloc[self.state_idx,:].Daynum != self.df.iloc[self.state_idx-1].Daynum:
done = True
else:
done = False
return obs, reward, done
def reset(self):
return df.iloc[self.state_idx,:]
def render():
pass
下面的代码能够表明环境正在运行。
for episode in range(7):
observation = env.reset()
for t in range(24): #can't be smaller than 24 as 24 time points equal to 1 episode (1 day)
#print(observation)
action = env.action_space.sample() #random actions
observation, reward, done = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1)), print (observation), print(reward)
break
最佳答案
我认为我能够使代码与 Q-learning 一起工作。然而,奖励和重置功能需要一些工作才能更好地执行。
class BatteryEnv(gym.Env):
def __init__(self, prices = np.array(df.prices), daynum = np.array(df.Daynum)):
#self.df = df
self.prices = prices
self.daynum = daynum
self.dict_actions = {0:'discharge',1:'charge',2:'wait'}
self.action_space = spaces.Discrete(3)
# our observation space is just one float value - our load
self.observation_space = spaces.Box(low=0, high=100, shape=(1,1))
# reward list for monitoring
self.reward_list = []
# lists 4 monitoring
self.actual_load_list = []
self.SOE_list=[] #State of energy
self.chargio = [] #charge & discharge
self.SOEe=[] #State of energy
# index of current state within current episode
self.state_idx = 0 #iteration
self.SOE = 0 #SOE
self.MAX_charge = 20 #C-rate kinda
self.Capacity =100
self.state = 0
def step(self, action):
#mapping integer to action for actual load calculation
str_action = self.dict_actions[action]
#increase state idx within episode (day)
self.state_idx+=1
#calculating our actual load
if str_action == 'charge' and self.SOE < self.Capacity:
SOE_charge = np.clip(self.Capacity - self.SOE, 0, self.MAX_charge)
self.state += SOE_charge
self.SOEe.append(self.SOE)
self.chargio.append(SOE_charge)
obs = SOE_charge * self.prices[self.state_idx]
elif str_action == 'discharge' and self.SOE > 0:
SOE_discharge = np.clip(self.SOE, 0, self.MAX_charge)
self.state -= SOE_discharge
self.SOEe.append(self.SOE)
self.chargio.append(-SOE_discharge)
obs = -SOE_discharge * self.prices[self.state_idx]
else:
self.state += 0
self.chargio.append(0)
self.SOEe.append(self.SOE)
obs = 0
# appending actual load to list for monitoring and comparison purposes
self.actual_load_list.append(obs)
self.SOE_list.append(self.SOE)
#reward system
if obs<0: #if observation is positive we spending money. if negative we earning
reward =1
else:
reward =-1
# appending curr reward to list for monitoring and comparison purposes
self.reward_list.append(reward)
#checking whether our episode (day interval) ends
if self.daynum[self.state_idx] != self.daynum[self.state_idx-1]:
done = True
else:
done = False
info = {
#'step': self.state_idx,
'SOE': self.SOE,
#'reward': reward,
'chargio': self.chargio
}
return obs, reward, done, info
def reset(self):
self.state = 0
return self.state
def render():
pass
应用 q 学习:
env.reset()
env = BatteryEnv()
discrete_os_size = [20] * len(env.observation_space.high)
discrete_os_win_size = (env.observation_space.high -
env.observation_space.low)/discrete_os_size
discrete_os_win_size #buckets of 10
learning_rate = 0.1
discount =0.95 #measure of how important future actions are
episodes =25000
q_table = np.random.uniform(low=-2, high=2, size=(discrete_os_size + [env.action_space.n]))
def get_discrete_state(state): #change SOE for other states
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int))
discrete_state =get_discrete_state(env.reset())
SOE=[]
for episode in range (episodes):
if episode % 5000 ==0:
print(episode)
discrete_state =get_discrete_state(env.reset())
done = False
while not done:
action = np.argmax(q_table[discrete_state])
new_state, reward, done, _ =env.step(action)
new_discrete_state = get_discrete_state(new_state)
if not done:
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state + (action,)]
new_q = (1-learning_rate) * current_q + learning_rate *(reward + discount * max_future_q)
q_table[discrete_state +(action,)] = new_q
#elif new_state[0] >= env.go:
discrete_state = new_discrete_state
SOE.append(new_state)
print(reward, new_state)
关于python - 在自定义环境(python、强化学习、openai)中应用 q-learning 的问题,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/68227563/
我收到以下错误:模块“openai”没有属性“ChatCompletion” 我检查了其他帖子。都在说升级OpenAI Python包或者升级Python。我都做了,但没有修复它。 Python:3.
我收到以下错误:模块“openai”没有属性“ChatCompletion” 我检查了其他帖子。都在说升级OpenAI Python包或者升级Python。我都做了,但没有修复它。 Python:3.
我有一个用例,非常需要来自 OpenAI API 的完全确定性响应。然而,玩弄温度似乎无法产生完全的决定论。 import openai openai.organization = "org-..."
OpenAI api 包含一个微调服务,将任务分为“提示”和“完成” https://platform.openai.com/docs/guides/fine-tuning 文档说准确度指标是根据完成
我通过openai的text-davinci-003可以正常返回对话信息,但是目前无法实现上下文关联功能。我搜索了一下,发现有一个“conversation_id”参数,但是添加该参数后,API返回“
我有一个用例,非常需要来自 OpenAI API 的完全确定性响应。然而,玩弄温度似乎无法产生完全的决定论。 import openai openai.organization = "org-..."
OpenAI api 包含一个微调服务,将任务分为“提示”和“完成” https://platform.openai.com/docs/guides/fine-tuning 文档说准确度指标是根据完成
我通过openai的text-davinci-003可以正常返回对话信息,但是目前无法实现上下文关联功能。我搜索了一下,发现有一个“conversation_id”参数,但是添加该参数后,API返回“
我想使用 openai.embeddings_utils import get_embeddings所以已经安装了openai Name: openai Version: 0.26.5 Summary
当我使用 GPT3 的 playground 时,我经常得到带有编号列表和段落格式的结果,如下所示: Here's what the above class is doing: 1. It creat
当我使用 GPT3 的 playground 时,我经常得到带有编号列表和段落格式的结果,如下所示: Here's what the above class is doing: 1. It creat
我想使用 openai.embeddings_utils import get_embeddings所以已经安装了openai Name: openai Version: 0.26.5 Summary
OpenAI/chat GPT也支持docx/pdf文件上传吗?。我想上传多个文件到openAI/chatGPT。我在https://platform.openai.com/docs/api-refe
openAI/chatGPT也支持docx/pdf文件上传吗? 我想上传多个文件到 openAI/chatGPT。我尝试了 https://platform.openai.com/docs/api-r
openAI/chatGPT也支持docx/pdf文件上传吗? 我想上传多个文件到 openAI/chatGPT。我尝试了 https://platform.openai.com/docs/api-r
如果我们查看环境的预览,它们会在右下角的动画中显示剧集的增加。 https://gym.openai.com/envs/CartPole-v1/ .是否有明确显示的命令? 最佳答案 我认为 Ope
是否有人从使用 text-embedding-ada-002 的 Azure OpenAI 嵌入部署中获得的结果与 OpenAI 的结果不同?相同的文本,相同的模型,结果在向量空间中相差相当远。 对于
关闭。这个问题需要debugging details .它目前不接受答案。 编辑问题以包含 desired behavior, a specific problem or error, and th
我正在学习gpt微调 我成功运行了这个命令:pip install --upgrade openai 我无法运行此命令:export OPENAI_API_KEY="sk-xxxxxxxxxxxxxx
如何解决Openai API 不断输出的问题,比如让gpt api 写一篇文章。如果内容中断,可以继续提问,从而继续输出以上内容。这在ChatGPT中很容易做到,但是Openai API加上上面的提示
我是一名优秀的程序员,十分优秀!