- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我在尝试开始训练我的模型(DCGAN)时遇到了很多麻烦。它给了我错误:
'tuple' object has no attribute 'layer'
我读到这可能是因为同时拥有 TensorFlow 1.14.0 版和 Keras 2.2 版或更高版本。
from google.colab import drive
drive.mount('/mntDrive')
import os,sys #os es para gestionar directorios (trabajar con archivos); sys es para trabajar con variables del sistema
# numpy
import numpy as np
# data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from sklearn.model_selection import train_test_split
# Charts
import matplotlib.pyplot as plt
from matplotlib.pyplot import imread
#process
from tqdm import tqdm
# Image IO
from PIL import Image
import skimage.io
import skimage.transform
from skimage.transform import resize
# Deep learning
import tensorflow as tf
from tensorflow.keras import layers
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout, Input
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras import initializers
from __future__ import absolute_import, division, print_function, unicode_literals
# To make sure that we can reproduce the experiment and get the same results
np.random.seed(21)
# The dimension of our random noise vector.
random_dim = 100
from skimage.color import rgb2gray
import scipy.ndimage
import scipy.misc
import re
images = []
for root, dirnames, filenames in os.walk("/mntDrive/My Drive/Colab Notebooks/cubism"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png)$", filename):
filepath = os.path.join(root, filename)
image = plt.imread(filepath, )
image = (image.astype(np.float32) - 127.5)/127.5
image_resized = resize(image, (112, 112))
images.append(image_resized)
images = np.array(images)
print('Original image shape: {}'.format(images.shape))
im_gray = rgb2gray(images)
print('New image shape: {}'.format(im_gray.shape))
images_resized = im_gray.reshape(320,12544)
def get_optimizer():
optimizer=tf.keras.optimizers.Adam(0.001)
return optimizer
def make_generator_model(optimizer):
generator = tf.keras.Sequential()
generator.add(layers.Dense(7*7*256, use_bias=False, input_shape=(random_dim,)))
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())
generator.add(layers.Reshape((7, 7, 256)))
assert generator.output_shape == (None, 7, 7, 256) # Note: None is the batch size
generator.add(layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert generator.output_shape == (None, 14, 14, 128)
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())
generator.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert generator.output_shape == (None, 28, 28, 64)
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())
generator.add(layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert generator.output_shape == (None, 56, 56, 32)
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())
generator.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert generator.output_shape == (None, 112, 112, 1)
generator.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001))
return generator
def make_discriminator_model(optimizer):
discriminator = tf.keras.Sequential()
discriminator.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[112, 112, 1]))
discriminator.add(layers.LeakyReLU())
discriminator.add(layers.Dropout(0.3))
discriminator.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
discriminator.add(layers.LeakyReLU())
discriminator.add(layers.Dropout(0.3))
discriminator.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same'))
discriminator.add(layers.LeakyReLU())
discriminator.add(layers.Dropout(0.3))
discriminator.add(layers.Flatten())
discriminator.add(layers.Dense(1, activation='sigmoid' ))
discriminator.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001))
return discriminator
def get_gan_network(discriminator, random_dim, generator, optimizer):
# We initially set trainable to False since we only want to train either the
# generator or discriminator at a time
discriminator.trainable = False
# gan input (noise) will be 100-dimensional vectors
gan_input = Input(shape=(random_dim,))
# the output of the generator (an image)
x = generator(gan_input)
# get the output of the discriminator (probability if the image is real or not)
gan_output = discriminator(x)
gan = Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001))
return gan
def plot_generated_images(epoch, generator, examples=64, dim=(10, 10), figsize=(100, 100)):
noise = np.random.normal(0, 1, size=[examples, random_dim]) #mean, std deviation, size
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(examples, 112, 112)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig('dcgan_generated_Originals_2_epoch_%d.png' % epoch)
#you can create a function which will save your generated images every 20 epochs
# Create a wall of generated MNIST images
def train(epochs=15000, batch_size=80):
# Get the training and testing data
images_resized
# Split the training data into batches of size 80
batch_count = images_resized.shape[0] // batch_size
# Build our GAN network
optimizer=get_optimizer()
generator = make_generator_model(optimizer)
discriminator = make_discriminator_model(optimizer)
gan = get_gan_network(discriminator, random_dim, generator, optimizer)
for e in range(1, epochs+1):
print ('-'*15, 'Epoch %d' % e, '-'*15)
for _ in tqdm(range(batch_count)):
# Get a random set of input noise and images
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
image_batch = images_resized[np.random.randint(0, images_resized.shape[0], size=batch_size)]
# Generate fake MNIST images
generated_images = generator.predict(noise)
X = np.concatenate([image_batch, generated_images]) #128x4096 --- 'a' x 4096
# Labels for generated and real data
y_dis = np.zeros(2*batch_size)
y_dis[:batch_size] = 0.9 # One-sided label smoothing. Se refiere a que la mitad de y_dis seran 0.9 (reales) y la otra mitad seran 0 (falsos)
# Train discriminator
discriminator.trainable = True
discriminator.train_on_batch(X, y_dis) #Se entrena el Discriminador con dos vectores: X(tiene un batch(128) imagenes reales y un batch de imagenes generadas); y_dis(tiene un batch de 128 0.9(serian las reales) y otro batch de 128 ceros(serian las generadas). De esta manera, el algoritmo generador, aunque las genere bien, va a seguir optimizandose el numero de epochs que haga falta(seguimos en la fase de entrenamiento), ya que sus imagenes son continuamente rechazadas, comparandose con las reales y sacando continuamente diferencias. )
#Entra X(imagenes reales, imagenes generadas)--->y_dis(0.9 , 0)
#train_on_batch(x,y)---> realiza una actualizacion del gradiente en un batch----> x=array de training data (si el modelo tiene varias entradas pueden ser varios); y= array de target data (si el modelo tiene varias salidas pueden ser varios)
# Train generator
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
y_gen = np.ones(batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y_gen) #Entra noise(pixeles desordenados)---->y_gen(todo son 1)
if e == 1 or e % 50 == 0:
plot_generated_images(e, generator)
if __name__ == '__main__':
train(30000, 80)
这是输出:
Drive already mounted at /mntDrive; to attempt to forcibly remount, call drive.mount("/mntDrive", force_remount=True).
Original image shape: (320, 112, 112, 3)
New image shape: (320, 112, 112)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_impl.py:180: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:68: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:507: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-6e3c9ece87ff> in <module>()
196
197 if __name__ == '__main__':
--> 198 train(30000, 80)
11 frames
<ipython-input-18-6e3c9ece87ff> in train(epochs, batch_size)
161 generator = make_generator_model(optimizer)
162 discriminator = make_discriminator_model(optimizer)
--> 163 gan = get_gan_network(discriminator, random_dim, generator, optimizer)
164
165 for e in range(1, epochs+1):
<ipython-input-18-6e3c9ece87ff> in get_gan_network(discriminator, random_dim, generator, optimizer)
128 gan_input = Input(shape=(random_dim,))
129 # the output of the generator (an image)
--> 130 x = generator(gan_input)
131 # get the output of the discriminator (probability if the image is real or not)
132 gan_output = discriminator(x)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
632 outputs = base_layer_utils.mark_as_return(outputs, acd)
633 else:
--> 634 outputs = call_fn(inputs, *args, **kwargs)
635
636 except TypeError as e:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/sequential.py in call(self, inputs, training, mask)
245 if not self.built:
246 self._init_graph_network(self.inputs, self.outputs, name=self.name)
--> 247 return super(Sequential, self).call(inputs, training=training, mask=mask)
248
249 outputs = inputs # handle the corner case where self.layers is empty
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in call(self, inputs, training, mask)
749 ' implement a `call` method.')
750
--> 751 return self._run_internal_graph(inputs, training=training, mask=mask)
752
753 def compute_output_shape(self, input_shape):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in _run_internal_graph(self, inputs, training, mask)
891
892 # Compute outputs.
--> 893 output_tensors = layer(computed_tensors, **kwargs)
894
895 # Update tensor_dict.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
661 kwargs.pop('training')
662 inputs, outputs = self._set_connectivity_metadata_(
--> 663 inputs, outputs, args, kwargs)
664 self._handle_activity_regularization(inputs, outputs)
665 self._set_mask_metadata(inputs, outputs, previous_mask)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in _set_connectivity_metadata_(self, inputs, outputs, args, kwargs)
1706 kwargs.pop('mask', None) # `mask` should not be serialized.
1707 self._add_inbound_node(
-> 1708 input_tensors=inputs, output_tensors=outputs, arguments=kwargs)
1709 return inputs, outputs
1710
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in _add_inbound_node(self, input_tensors, output_tensors, arguments)
1793 """
1794 inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
-> 1795 input_tensors)
1796 node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
1797 input_tensors)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
513
514 return pack_sequence_as(
--> 515 structure[0], [func(*x) for x in entries],
516 expand_composites=expand_composites)
517
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
513
514 return pack_sequence_as(
--> 515 structure[0], [func(*x) for x in entries],
516 expand_composites=expand_composites)
517
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in <lambda>(t)
1792 `call` method of the layer at the call that created the node.
1793 """
-> 1794 inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
1795 input_tensors)
1796 node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
AttributeError: 'tuple' object has no attribute 'layer'
AttributeError: 'tuple' object has no attribute 'layer'
最佳答案
您已从 导入图层tensorflow.keras 而您从 导入的其他函数keras .您可以从 keras 导入图层,也可以尝试从 tensorflow.keras 导入其他可能有效的函数。
关于python - “元组”对象没有属性 'layer',我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/58138071/
你能比较一下属性吗 我想禁用文本框“txtName”。有两种方式 使用javascript,txtName.disabled = true 使用 ASP.NET, 哪种方法更好,为什么? 最佳答案 我
Count 属性 返回一个集合或 Dictionary 对象包含的项目数。只读。 object.Count object 可以是“应用于”列表中列出的任何集合或对
CompareMode 属性 设置并返回在 Dictionary 对象中比较字符串关键字的比较模式。 object.CompareMode[ = compare] 参数
Column 属性 只读属性,返回 TextStream 文件中当前字符位置的列号。 object.Column object 通常是 TextStream 对象的名称。
AvailableSpace 属性 返回指定的驱动器或网络共享对于用户的可用空间大小。 object.AvailableSpace object 应为 Drive 
Attributes 属性 设置或返回文件或文件夹的属性。可读写或只读(与属性有关)。 object.Attributes [= newattributes] 参数 object
AtEndOfStream 属性 如果文件指针位于 TextStream 文件末,则返回 True;否则如果不为只读则返回 False。 object.A
AtEndOfLine 属性 TextStream 文件中,如果文件指针指向行末标记,就返回 True;否则如果不是只读则返回 False。 object.AtEn
RootFolder 属性 返回一个 Folder 对象,表示指定驱动器的根文件夹。只读。 object.RootFolder object 应为 Dr
Path 属性 返回指定文件、文件夹或驱动器的路径。 object.Path object 应为 File、Folder 或 Drive 对象的名称。 说明 对于驱动器,路径不包含根目录。
ParentFolder 属性 返回指定文件或文件夹的父文件夹。只读。 object.ParentFolder object 应为 File 或 Folder 对象的名称。 说明 以下代码
Name 属性 设置或返回指定的文件或文件夹的名称。可读写。 object.Name [= newname] 参数 object 必选项。应为 File 或&
Line 属性 只读属性,返回 TextStream 文件中的当前行号。 object.Line object 通常是 TextStream 对象的名称。 说明 文件刚
Key 属性 在 Dictionary 对象中设置 key。 object.Key(key) = newkey 参数 object 必选项。通常是 Dictionary 
Item 属性 设置或返回 Dictionary 对象中指定的 key 对应的 item,或返回集合中基于指定的 key 的&
IsRootFolder 属性 如果指定的文件夹是根文件夹,返回 True;否则返回 False。 object.IsRootFolder object 应为&n
IsReady 属性 如果指定的驱动器就绪,返回 True;否则返回 False。 object.IsReady object 应为 Drive&nbs
FreeSpace 属性 返回指定的驱动器或网络共享对于用户的可用空间大小。只读。 object.FreeSpace object 应为 Drive 对象的名称。
FileSystem 属性 返回指定的驱动器使用的文件系统的类型。 object.FileSystem object 应为 Drive 对象的名称。 说明 可
Files 属性 返回由指定文件夹中所有 File 对象(包括隐藏文件和系统文件)组成的 Files 集合。 object.Files object&n
我是一名优秀的程序员,十分优秀!