- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
基本上 pygame.surfarray.pixels3d
返回一个 (672,672,3)
形状的数组,它给出错误:Resources exhausted
但是当我传递一个 (6,30,30)
数组时它起作用了。
如有任何帮助,我们将不胜感激。
import numpy
import random
from DeepRTS import PyDeepRTS
from Algorithms.DQN2.DQN import DQN
# Start the game
g = PyDeepRTS('21x21-2v2.json')
# Add players
player1 = g.add_player()
player2 = g.add_player()
#player3 = g.add_player()
#player4 = g.add_player()
# Set FPS and UPS limits
g.set_max_fps(10000000)
g.set_max_ups(10000000)
# How often the state should be drawn
g.render_every(20)
# How often the capture function should return a state
g.capture_every(20)
# How often the game image should be drawn to the screen
g.view_every(20)
# Start the game (flag)
g.start()
actions = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,14, 15, 16]
observation = numpy.ndarray(shape=(6,30,30), dtype=float)
flag = 0
player1.do_action(13)
player2.do_action(13)
player1.get_Score()
while flag == 0:
g.tick() # Update the game clock
g.update() # Process the game state
g.render() # Draw the game state to graphics
state2 = g.capture()
if state2 is not None:
dqn = DQN(state2, len(actions))
flag=1
# Run forever
i=0
while True:
g.tick() # Update the game clock
g.update() # Process the game state
g.render() # Draw the game state to graphics
state2 = g.capture() # Captures current state (Returns None if .capture_every is set for some iterations)
g.caption() # Show Window caption
g.view() # View the game state in the pygame window
if state2 is not None and flag == 1:
actionID = dqn.act()
# If the game is in terminal state
terminal = g.is_terminal()
reward_ = player1.get_Score()
player1.do_action(actionID)
player2.do_action(numpy.random.randint(0,19))
dqn.train(actionID, reward_, terminal,state2)
if g.is_terminal():
print("finished")
g.reset()
print(actionID, " Reward",reward_)
i += 1
#This is the DQN algorithm
import os
import random
import numpy as np
import tensorflow as tf
from collections import deque
from skimage.color import rgb2gray
from skimage.transform import resize
from keras.models import Sequential
from keras.layers import Conv2D, Flatten, Dense
from keras import backend as K
K.set_image_dim_ordering('th')
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
class DQN:
def __init__(self,
initial_state,
num_actions,
initial_epsilon=1.0,
final_epsilon=0.1,
exploration_steps=10000,
initial_replay_size=10,
memory_size=400000,
batch_size=9, # 32
learning_rate=0.0025,
momentum=0.95,
min_grad=0.01,
env_name="DeepRTS",
save_network_path = "dqn2/saved_networks/",
save_summary_path = "dqn2/summary/",
load_network = False,
gamma=0.99,
train_interval = 40,
target_update_interval = 1000,
save_interval = 30000
):
self.state = initial_state
self.sshape = initial_state.shape # Shape of the state
self.num_actions = num_actions # Action space
self.epsilon = initial_epsilon # Epsilon-greedy start
self.final_epsilon = final_epsilon # Epsilon-greedy end
self.epsilon_step = (self.epsilon - self.final_epsilon) / exploration_steps # Epsilon decrease step
self.initial_replay_size = initial_replay_size
self.memory_size = memory_size
self.exploration_steps = exploration_steps
self.learning_rate = learning_rate
self.momentum = momentum
self.min_grad = min_grad
self.batch_size = batch_size
self.gamma = gamma
self.target_update_interval = target_update_interval
self.save_interval = save_interval
self.env_name = env_name
self.save_network_path = save_network_path + self.env_name
self.save_summary_path = save_summary_path + self.env_name
self.load_network = load_network
self.train_interval = train_interval
self.t = 0 # TODO
# Summary Parameters
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
# Replay Memory
self.replay_memory = deque()
# Create Q Network
self.s, self.q_values, q_network = self.build_model()
q_network_weights = q_network.trainable_weights
# Create target network
self.st, self.target_q_values, target_network = self.build_model()
target_network_weights = target_network.trainable_weights
# Define target network update operation
self.update_target_network = [target_network_weights[i].assign(q_network_weights[i]) for i in range(len(target_network_weights))]
# Define loss and gradient update operation
self.a, self.y, self.loss, self.grads_update = self.build_functions(q_network_weights)
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(q_network_weights)
self.summary_placeholders, self.update_ops, self.summary_op = self.setup_summary()
self.summary_writer = tf.summary.FileWriter(self.save_summary_path, self.sess.graph)
if not os.path.exists(self.save_network_path):
os.makedirs(self.save_network_path)
self.sess.run(tf.global_variables_initializer())
# Load network
self.load()
# Initialize target network
self.sess.run(self.update_target_network)
def build_model(self):
model = Sequential()
model.add(Conv2D(32, (1, 1), strides=(1, 1), activation='relu', input_shape=self.sshape))
model.add(Conv2D(64, (1, 1), activation="relu", strides=(1, 1)))
model.add(Conv2D(64, (1, 1), activation="relu", strides=(1, 1)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(self.num_actions))
s = tf.placeholder(tf.float32, [None, *self.sshape])
q_values = model(s)
return s, q_values, model
def build_functions(self, q_network_weights):
a = tf.placeholder(tf.int64, [None])
y = tf.placeholder(tf.float32, [None])
# Convert action to one hot vector
a_one_hot = tf.one_hot(a, self.num_actions, 1.0, 0.0)
q_value = tf.reduce_sum(tf.multiply(self.q_values, a_one_hot), reduction_indices=1)
# Clip the error, the loss is quadratic when the error is in (-1, 1), and linear outside of that region
error = tf.abs(y - q_value)
quadratic_part = tf.clip_by_value(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part)
optimizer = tf.train.RMSPropOptimizer(self.learning_rate, momentum=self.momentum, epsilon=self.min_grad)
grads_update = optimizer.minimize(loss, var_list=q_network_weights)
return a, y, loss, grads_update
def new_episode(self):
pass
def end_episode(self):
pass
def act(self):
if self.epsilon >= random.random() or self.t < self.initial_replay_size:
action = random.randrange(self.num_actions)
else:
action = np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(self.state)]}))
# Anneal epsilon linearly over time
if self.epsilon > self.final_epsilon and self.t >= self.initial_replay_size:
self.epsilon -= self.epsilon_step
return action
def train_network(self):
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
terminal_batch = []
y_batch = []
# Sample random minibatch of transition from replay memory
minibatch = random.sample(self.replay_memory, self.batch_size)
for data in minibatch:
state_batch.append(data[0])
action_batch.append(data[1])
reward_batch.append(data[2])
next_state_batch.append(data[3])
terminal_batch.append(data[4])
# Convert True to 1, False to 0
terminal_batch = np.array(terminal_batch) + 0
target_q_values_batch = self.target_q_values.eval(feed_dict={self.st: np.float32(np.array(next_state_batch))})
y_batch = reward_batch + (1 - terminal_batch) * self.gamma * np.max(target_q_values_batch, axis=1)
loss, _ = self.sess.run([self.loss, self.grads_update], feed_dict={
self.s: np.float32(np.array(state_batch)),
self.a: action_batch,
self.y: y_batch
})
self.total_loss += loss
def train(self, action, reward, terminal, observation):
"""
# action - The performed action which led to this state
# reward - The reward given in the state transition
# terminal - Is state terminal? (Loss / Victory)
# observation - New state observation after action
"""
next_state = np.append(self.state[1:, :, :], observation, axis=0)
# Clip all positive rewards at 1 and all negative rewards at -1, leaving 0 rewards unchanged
reward = np.clip(reward, -1, 1)
# Store transition in replay memory
self.replay_memory.append((self.state, action, reward, self.state, terminal))
if len(self.replay_memory) > self.memory_size:
self.replay_memory.popleft()
if self.t >= self.initial_replay_size:
# Train network
if self.t % self.train_interval == 0:
self.train_network()
# Update target network
if self.t % self.target_update_interval == 0:
self.sess.run(self.update_target_network)
# Save network
if self.t % self.save_interval == 0:
save_path = self.saver.save(self.sess, self.save_network_path + '/' + self.env_name, global_step=self.t)
print('Successfully saved: ' + save_path)
self.total_reward += reward
self.total_q_max += np.max(self.q_values.eval(feed_dict={self.s: [np.float32(self.state)]}))
self.duration += 1
if terminal:
# Write summary
if self.t >= self.initial_replay_size:
stats = [self.total_reward, self.total_q_max / float(self.duration),self.duration, self.total_loss / (float(self.duration) / float(self.train_interval))]
for i in range(len(stats)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(stats[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.episode + 1)
# Debug
if self.t < self.initial_replay_size:
mode = 'random'
elif self.initial_replay_size <= self.t < self.initial_replay_size + self.exploration_steps:
mode = 'explore'
else:
mode = 'exploit'
print('EPISODE: {0:6d} / TIMESTEP: {1:8d} / DURATION: {2:5d} / EPSILON: {3:.5f} / TOTAL_REWARD: {4:3.0f} / AVG_MAX_Q: {5:2.4f} / AVG_LOSS: {6:.5f} / MODE: {7}'.format(self.episode + 1, self.t, self.duration, self.epsilon,self.total_reward, self.total_q_max / float(self.duration),self.total_loss / (float(self.duration) / float(self.train_interval)), mode))
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode += 1
self.t += 1
def iterate(self):
pass
def load(self):
checkpoint = tf.train.get_checkpoint_state(self.save_network_path)
if self.load_network and checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print('Successfully loaded: ' + checkpoint.model_checkpoint_path)
else:
print('Training new network...')
def setup_summary(self):
episode_total_reward = tf.Variable(0.)
tf.summary.scalar(self.env_name + '/Total Reward/Episode', episode_total_reward)
episode_avg_max_q = tf.Variable(0.)
tf.summary.scalar(self.env_name + '/Average Max Q/Episode', episode_avg_max_q)
episode_duration = tf.Variable(0.)
tf.summary.scalar(self.env_name + '/Duration/Episode', episode_duration)
episode_avg_loss = tf.Variable(0.)
tf.summary.scalar(self.env_name + '/Average Loss/Episode', episode_avg_loss)
summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration, episode_avg_loss]
summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.summary.merge_all()
return summary_placeholders, update_ops, summary_op
错误:
2019-07-07 02:58:55.652029: W tensorflow/core/common_runtime/bfc_allocator.cc:319] *******************************************************************************____________________* 2019-07-07 02:58:55.652085: W tensorflow/core/framework/op_kernel.cc:1502] OP_REQUIRES failed at assign_op.h:117 : Resource exhausted: OOM when allocating tensor with shape[409600,512] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc Traceback (most recent call last):
最佳答案
在使用 CNNs
时,当它导致 Out Of Memory (OOM)
错误时,我们可以尝试以下提到的步骤:
Mini-Batch
的大小,如上面 Priyanka Chaudhary 所述。32 位 float
替换为 16 位 float
(如果值在该范围内)维度/形状
,因此减少参数数量,从而减少消耗的 RAM。关于python - 如何更改图像尺寸以使我的卷积算法正常工作,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56916896/
我正在尝试学习 Knockout 并尝试创建一个照片 uploader 。我已成功将一些图像存储在数组中。现在我想回帖。在我的 knockout 码(Javascript)中,我这样做: 我在 Jav
我正在使用 php 编写脚本。我的典型问题是如何在 mysql 中添加一个有很多替代文本和图像的问题。想象一下有机化学中具有苯结构的描述。 最有效的方法是什么?据我所知,如果我有一个图像,我可以在数据
我在两个图像之间有一个按钮,我想将按钮居中到图像高度。有人可以帮帮我吗? Entrar
下面的代码示例可以在这里查看 - http://dev.touch-akl.com/celebtrations/ 我一直在尝试做的是在 Canvas 上绘制 2 个图像(发光,然后耀斑。这些图像的链接
请检查此https://jsfiddle.net/rhbwpn19/4/ 图像预览对于第一篇帖子工作正常,但对于其他帖子则不然。 我应该在这里改变什么? function readURL(input)
我对 Canvas 有疑问。我可以用单个图像绘制 Canvas ,但我不能用单独的图像绘制每个 Canvas 。- 如果数据只有一个图像,它工作正常,但数据有多个图像,它不工作你能帮帮我吗? va
我的问题很简单。如何获取 UIImage 的扩展类型?我只能将图像作为 UIImage 而不是它的名称。图像可以是静态的,也可以从手机图库甚至文件路径中获取。如果有人可以为此提供一点帮助,将不胜感激。
我有一个包含 67 个独立路径的 SVG 图像。 是否有任何库/教程可以为每个路径创建单独的光栅图像(例如 PNG),并可能根据路径 ID 命名它们? 最佳答案 谢谢大家。我最终使用了两个答案的组合。
我想将鼠标悬停在一张图片(音乐专辑)上,然后播放一张唱片,所以我希望它向右移动并旋转一点,当它悬停时我希望它恢复正常动画片。它已经可以向右移动,但我无法让它随之旋转。我喜欢让它尽可能简单,因为我不是编
Retina iOS 设备不显示@2X 图像,它显示 1X 图像。 我正在使用 Xcode 4.2.1 Build 4D502,该应用程序的目标是 iOS 5。 我创建了一个测试应用(主/细节)并添加
我正在尝试从头开始以 Angular 实现图像 slider ,并尝试复制 w3school基于图像 slider 。 下面我尝试用 Angular 实现,谁能指导我如何使用 Angular 实现?
我正在尝试获取图像的图像数据,其中 w= 图像宽度,h = 图像高度 for (int i = x; i imageData[pos]>0) //Taking data (here is the pr
我的网页最初通过在 javascript 中动态创建图像填充了大约 1000 个缩略图。由于权限问题,我迁移到 suPHP。现在不用标准 标签本身 我正在通过这个 php 脚本进行检索 $file
我正在尝试将 python opencv 图像转换为 QPixmap。 我按照指示显示Page Link我的代码附在下面 img = cv2.imread('test.png')[:,:,::1]/2
我试图在这个 Repository 中找出语义分割数据集的 NYU-v2 . 我很难理解图像标签是如何存储的。 例如,给定以下图像: 对应的标签图片为: 现在,如果我在 OpenCV 中打开标签图像,
import java.util.Random; class svg{ public static void main(String[] args){ String f="\"
我有一张 8x8 的图片。 (位图 - 可以更改) 我想做的是能够绘制一个形状,给定一个 Path 和 Paint 对象到我的 SurfaceView 上。 目前我所能做的就是用纯色填充形状。我怎样才
要在页面上显示图像,你需要使用源属性(src)。src 指 source 。源属性的值是图像的 URL 地址。 定义图像的语法是: 在浏览器无法载入图像时,替换文本属性告诉读者她们失去的信息。此
**MMEditing是基于PyTorch的图像&视频编辑开源工具箱,支持图像和视频超分辨率(super-resolution)、图像修复(inpainting)、图像抠图(matting)、
我正在尝试通过资源文件将图像插入到我的程序中,如下所示: green.png other files 当我尝试使用 QImage 或 QPixm
我是一名优秀的程序员,十分优秀!