- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我有这段代码,但应用预测时出现错误?
import pandas as pd
import numpy as np
import sklearn
import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.layers import Conv2D,Conv1D, MaxPooling2D,MaxPooling1D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout,BatchNormalization
dataset=pd.read_csv("C:/Users/User/Desktop/data.csv",encoding='cp1252')
dataset.shape
#output:(53480, 37)
array = dataset.values
X = array[:,0:36]
Y = array[:,36]
kf = KFold(n_splits=10)
kf.get_n_splits(X)
ACC_array = np.array([])
sensitivity_array = np.array([])
specificity_array = np.array([])
for trainindex, testindex in kf.split(X):
Xtrain, Xtest = X[trainindex], X[testindex]
Ytrain, Ytest = Y[trainindex], Y[testindex]
Xtrain = np.expand_dims(np.random.normal(size=(53480, 36)),axis=-1)
Ytrain = np.random.choice([0,1], size=(53480,10))
n_timesteps, n_features, n_outputs =Xtrain.shape[0], Xtrain.shape[1], Ytrain.shape[1]
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=1,
activation='relu',input_shape=(n_features,1)))
model.add(Conv1D(filters=64, kernel_size=1, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
model.fit(Xtrain, Ytrain, epochs=10, batch_size=128, verbose=1)
model.summary()
#output:
Model: "sequential"
.
.
Predictions = model.predict(Xtest,batch_size =1024)
rounded = [round(x[0]) for x in Predictions]
Y_predection = pd.DataFrame(rounded)
Y_predection = Y_predection.iloc[:, 0]
错误消息:
TypeError Traceback (most recent call last)
<ipython-input-16-67624699b454> in <module>
----> 1 Predictions = model.predict(Xtest,batch_size =1024)
2 rounded = [round(x[0]) for x in Predictions]
3 Y_predection = pd.DataFrame(rounded)
4 Y_predection = Y_predection.iloc[:, 0]
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
907 max_queue_size=max_queue_size,
908 workers=workers,
--> 909 use_multiprocessing=use_multiprocessing)
910
911 def reset_metrics(self):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in predict(self, model, x, batch_size, verbose, steps, callbacks, **kwargs)
460 return self._model_iteration(
461 model, ModeKeys.PREDICT, x=x, batch_size=batch_size,
verbose=verbose,
--> 462 steps=steps, callbacks=callbacks, **kwargs)
463
464
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _model_iteration(self, model, mode, x, y, batch_size, verbose, sample_weight, steps, callbacks, **kwargs)
442 mode=mode,
443 training_context=training_context,
--> 444 total_epochs=1)
445 cbks.make_logs(model, epoch_logs, result, mode)
446
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
121 step=step, mode=mode, size=current_batch_size) as batch_logs:
122 try:
--> 123 batch_outs = execution_function(iterator)
124 except (StopIteration, errors.OutOfRangeError):
125 # TODO(kaftan): File bug about tf function and
errors.OutOfRangeError?
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in execution_function(input_fn)
84 # `numpy` translates Tensors to values in Eager mode.
85 return nest.map_structure(_non_none_constant_value,
---> 86 distributed_function(input_fn))
87
88 return execution_function
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in __call__(self, *args, **kwds)
455
456 tracing_count = self._get_tracing_count()
--> 457 result = self._call(*args, **kwds)
458 if tracing_count == self._get_tracing_count():
459 self._call_counter.called_without_tracing()
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in _call(self, *args, **kwds)
501 # This is the first call of __call__, so we have to initialize.
502 initializer_map = object_identity.ObjectIdentityDictionary()
--> 503 self._initialize(args, kwds, add_initializers_to=initializer_map)
504 finally:
505 # At this point we know that the initialization is complete (or
less
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
406 self._concrete_stateful_fn = (
407
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 408 *args, **kwds))
409
410 def invalid_creator_scope(*unused_args, **unused_kwds):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1846 if self.input_signature:
1847 args, kwargs = None, None
-> 1848 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1849 return graph_function
1850
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\eager\function.py in
_maybe_define_function(self, args, kwargs)
2148 graph_function = self._function_cache.primary.get(cache_key,
None)
2149 if graph_function is None:
-> 2150 graph_function = self._create_graph_function(args, kwargs)
2151 self._function_cache.primary[cache_key] = graph_function
2152 return graph_function, args, kwargs
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\eager\function.py in
_create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041 capture_by_value=self._capture_by_value),
2042 self._function_attributes,
2043 # Tell the ConcreteFunction to clean up its graph once it goes
out of
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913 converted_func)
914
--> 915 func_outputs = python_func(*func_args, **func_kwargs)
916
917 # invariant: `func_outputs` contains only Tensors,
CompositeTensors,
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in wrapped_fn(*args, **kwds)
356 # __wrapped__ allows AutoGraph to swap in a converted function. We give
357 # the function a weak reference to itself to avoid a reference
cycle.
--> 358 return weak_wrapped_fn().__wrapped__(*args, **kwds)
359 weak_wrapped_fn = weakref.ref(wrapped_fn)
360
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in distributed_function(input_iterator)
71 strategy = distribution_strategy_context.get_strategy()
72 outputs = strategy.experimental_run_v2(
---> 73 per_replica_function, args=(model, x, y, sample_weights))
74 # Out of PerReplica outputs reduce or pick values to return.
75 all_outputs = dist_utils.unwrap_output_dict(
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
758 fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
759 convert_by_default=False)
--> 760 return self._extended.call_for_each_replica(fn, args=args,
kwargs=kwargs)
761
762 def reduce(self, reduce_op, value, axis):
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\distribute\distribute_lib.py in
call_for_each_replica(self, fn, args, kwargs)
1785 kwargs = {}
1786 with self._container_strategy().scope():
-> 1787 return self._call_for_each_replica(fn, args, kwargs)
1788
1789 def _call_for_each_replica(self, fn, args, kwargs):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in
_call_for_each_replica(self, fn, args, kwargs)
2130 self._container_strategy(),
2131 replica_id_in_sync_group=constant_op.constant(0,
dtypes.int32)):
-> 2132 return fn(*args, **kwargs)
2133
2134 def _reduce_to(self, reduce_op, value, destinations):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\autograph\impl\api.py in wrapper(*args, **kwargs)
290 def wrapper(*args, **kwargs):
291 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292 return func(*args, **kwargs)
293
294 if inspect.isfunction(func) or inspect.ismethod(func):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in _predict_on_batch(***failed resolving arguments***)
160 def _predict_on_batch(model, x, y=None, sample_weights=None):
161 del y, sample_weights
--> 162 return predict_on_batch(model, x)
163
164 func = _predict_on_batch
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in predict_on_batch(model, x)
368
369 with backend.eager_learning_phase_scope(0):
--> 370 return model(inputs) # pylint: disable=not-callable
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
845 outputs = base_layer_utils.mark_as_return(outputs, acd)
846 else:
--> 847 outputs = call_fn(cast_inputs, *args, **kwargs)
848
849 except errors.OperatorNotAllowedInGraphError as e:
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\sequential.py in call(self, inputs, training, mask)
254 if not self.built:
255 self._init_graph_network(self.inputs, self.outputs, name=self.name)
--> 256 return super(Sequential, self).call(inputs, training=training, mask=mask)
257
258 outputs = inputs # handle the corner case where self.layers is empty
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\network.py in call(self, inputs, training, mask)
706 return self._run_internal_graph(
707 inputs, training=training, mask=mask,
--> 708 convert_kwargs_to_constants=base_layer_utils.call_context().saving)
709
710 def compute_output_shape(self, input_shape):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\network.py in _run_internal_graph(self, inputs, training, mask, convert_kwargs_to_constants)
858
859 # Compute outputs.
--> 860 output_tensors = layer(computed_tensors, **kwargs)
861
862 # Update tensor_dict.
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
845 outputs = base_layer_utils.mark_as_return(outputs, acd)
846 else:
--> 847 outputs = call_fn(cast_inputs, *args, **kwargs)
848
849 except errors.OperatorNotAllowedInGraphError as e:
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\layers\convolutional.py in call(self, inputs)
385 if self.padding == 'causal':
386 inputs = array_ops.pad(inputs, self._compute_causal_padding())
--> 387 return super(Conv1D, self).call(inputs)
388
389
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\layers\convolutional.py in call(self, inputs)
195
196 def call(self, inputs):
--> 197 outputs = self._convolution_op(inputs, self.kernel)
198
199 if self.use_bias:
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\ops\nn_ops.py in __call__(self, inp,
filter)
1132 call_from_convolution=False)
1133 else:
-> 1134 return self.conv_op(inp, filter)
1135 # copybara:strip_end
1136 # copybara:insert return self.conv_op(inp, filter)
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\nn_ops.py in __call__(self, inp, filter)
637
638 def __call__(self, inp, filter): # pylint: disable=redefined-
builtin
--> 639 return self.call(inp, filter)
640
641
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\nn_ops.py in __call__(self, inp, filter)
236 padding=self.padding,
237 data_format=self.data_format,
--> 238 name=self.name)
239
240
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\nn_ops.py in _conv1d(self, input, filter, strides, padding, data_format, name)
225 padding=padding,
226 data_format=data_format,
--> 227 name=name)
228
229 # pylint: enable=redefined-builtin
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
572 func.__module__, arg_name, arg_value, 'in a future version'
573 if date is None else ('after %s' % date), instructions)
--> 574 return func(*args, **kwargs)
575
576 doc = _add_deprecated_arg_value_notice_to_docstring(
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\util\deprecation.py in new_func(*args,
**kwargs)
572 func.__module__, arg_name, arg_value, 'in a future
version'
573 if date is None else ('after %s' % date),
instructions)
--> 574 return func(*args, **kwargs)
575
576 doc = _add_deprecated_arg_value_notice_to_docstring(
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\ops\nn_ops.py in conv1d(value, filters,
stride, padding, use_cudnn_on_gpu, data_format, name, input, dilations)
1679 data_format=data_format,
1680 dilations=dilations,
-> 1681 name=name)
1682 return array_ops.squeeze(result, [spatial_start_dim])
1683
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\ops\gen_nn_ops.py in conv2d(input, filter,
strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format,
dilations, name)
1068 padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
1069 explicit_paddings=explicit_paddings,
-> 1070 data_format=data_format, dilations=dilations,
name=name)
1071 _result = _op.outputs[:]
1072 _inputs_flat = _op.inputs
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\framework\op_def_library.py in
_apply_op_helper(self, op_type_name, name, **keywords)
629 _SatisfiesTypeConstraint(base_type,
630 _Attr(op_def,
input_arg.type_attr),
--> 631 param_name=input_name)
632 attrs[input_arg.type_attr] = attr_value
633 inferred_from[input_arg.type_attr] = input_name
~\.conda\envs\tensorflow\lib\site-
packages\tensorflow_core\python\framework\op_def_library.py in
_SatisfiesTypeConstraint(dtype, attr_def, param_name)
58 "allowed values: %s" %
59 (param_name, dtypes.as_dtype(dtype).name,
---> 60 ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
61
62
TypeError: Value passed to parameter 'input' has DataType int64 not in list of allowed values: float16, bfloat16, float32, float64
我想将一维CNN应用于表格数据(数值),如何通过交叉验证来评估模型来计算(准确度、灵敏度、特异性) ,如何修复上面的错误? 或如何通过使用 70% 进行训练和 30% 进行测试来计算混淆指标?
最佳答案
错误发生在以下行:
Predictions = model.predict(Xtest,batch_size =1024)
因为在你定义之后:
Xtrain, Xtest = X[trainindex], X[testindex]
你没有对 Xtest
进行维度扩展就像Xtrain
的随机重新定义(我猜这只是为了一些测试目的):
Xtrain = np.expand_dims(np.random.normal(size=(213412, 36)),axis=-1)
所以Xtrain
具有正确的 3D 形状 (?, 36, 1),与您使用 input_shape=(n_features,1)
定义的输入大小完全相同在您的代码中:
model.add(Conv1D(filters=64, kernel_size=1,
activation='relu',input_shape=(n_features,1)))
同时Xtest
仍然是二维数据,即形状为(?, 36)或完全(5348, 36)。这就是您的模型提示输入形状的原因。
因此做 np.expand_dims()
上Xtest
也有:
Xtest = np.expand_dims(Xtest, axis=-1)
更新:
正如我在您后来的评论中看到的,您不明白为什么“准确率逐渐下降,并且损失函数不断增加,差异显着”。这就是为什么你重新定义了你的 X_train
如上所述:np.random.normal(size=(53480, 36))
行中:
Xtrain = np.expand_dims(np.random.normal(size=(53480, 36)),axis=-1)
所以你的模型试图适应随机数据。
关于python - 错误 : Value passed to parameter 'input' has DataType int64 not in list of allowed values: float16, bfloat16、float32、float64?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/59506070/
或者存在像 这样的指针和引用C ? 我正在尝试开始使用 vala,但很高兴知道 vala 是“按引用传递”还是“按值传递” 最佳答案 首先你应该明白默认的vala编译器valac编译为 C(作为一种中
就目前而言,这个问题不适合我们的问答形式。我们希望答案得到事实、引用资料或专业知识的支持,但这个问题可能会引发辩论、争论、投票或扩展讨论。如果您觉得这个问题可以改进并可能重新打开,visit the
我确实对 crypt() PHP 函数感到困惑。 当第二个 crypt 显然使用不同的第二个参数时,以下两个 crypt 函数如何给出相同的输出?差异盐意味着差异哈希对吗? echo crypt("p
我正在尝试在方案中模拟堆栈。我正在使用 DrScheme 并选择语言 R5RS。我需要创建 pop、push 和 peek 的函数。但我无法弄清楚如何通过引用传递。我已经阅读了一些关于盒子的信息,但是
我已经查过维基百科并用 google 搜索过,但我仍然无法理解 ALGOL 60 中的按名称传递的工作原理。 最佳答案 我在 Pass-By-Name Parameter Passing 找到了很好的
问题:我想知道在 Excel 2003 VBA 中处理数组的最佳解决方案是什么 背景:我在 Excel 2003 中有一个超过 5000 行的宏。我在过去 2 年中构建了它,将新功能添加为新过程,这有
我正在尝试反转位图数组,而不修改源数组。但问题是源数组也被颠倒了。我做错了什么还是我应该以其他方式做?感谢您的帮助。 private GalleryAdapter galleryAdapter; pr
因此,关于按引用传递/按值传递,方法如何处理参数传递已有详细记录,但是变量赋值又如何呢? 例如,我刚刚编写了一些如下所示的代码: TreeNode parent = null; TreeNode cu
我正在编写一个脚本,它将从我们的星号系统中回填调用详细记录到我们的 MySQL 日志数据库中。在下面的代码中,我试图忽略重复键并继续到下一行,但是当这段代码执行时,我所看到的只是第一行欺骗警告,然后脚
我从一个 Action (executeProcess)重定向到另一个(executeIndex)。我希望能够不使用GET传递参数/变量(例如$this->redirect('index', arra
我经常看到方法接口(interface)的两种相互冲突的策略,大致概括如下: // Form 1: Pass in an object. double calculateTaxesOwed(TaxFo
目前正在学习回调在 JavaScript 中的含义,我正在努力理解回调是如何工作的,以及术语“passing this”或“passing that”的来源以及它如何与回调一起工作?我想展示一些示例代
我通过 URL 传递参数并且页面被正确重定向。如何访问下一个(重定向的)页面上(从上一页)传递并在 URL 中可用的参数?谁能告诉我该怎么做? 或者有没有其他的方式在页面之间传递参数? 是否有类似 A
我通过 URL 传递参数并且页面被正确重定向。如何访问下一个(重定向的)页面上(从上一页)传递并在 URL 中可用的参数?谁能告诉我该怎么做? 或者有没有其他的方式在页面之间传递参数? 是否有类似 A
按引用传递和按名称传递的参数传递模式有什么区别这里是 Python 中的一个示例,但假设我们不使用任何 Python 规则: def P(x,y) global i y=1 prin
我正在编写自己的 LLVM pass,它修改了 LLVM 位码。在生成位码时,我想禁用函数内联,但是当我完成对位码的修改后,我想调用执行函数内联的传递。这个可以吗。如果是,如何? 为了更好地理解我在说
我有一个模板函数,负责将模板值写入流。它看起来像这样: template void Write( T value, std::ostream& stream, endianness_t endian
我一直在使用两个程序 llvm 的 opt 和 clifford wolf 的 yosys两者都有类似的通行证接口(interface)。(他们使用共享库作为优化 passes ) 我想根据我的 ll
在我们使用引用传递的 C++ 中,我们引用了我们从参数传递给函数参数的地址,它本质上是一个指针,对吗?因此,虽然它们本质上是相同的东西,别名和所有,但指针不也需要内存空间吗?因此,无论我们在参数函数中
这是一个关于 64 位整数的效率问题。假设我不需要修改“int”参数的值,我应该通过值还是引用传递它。 假设是 32 位机: 1) 32 位 int:我猜答案是“按值传递”,因为“按引用传递”会产生额
我是一名优秀的程序员,十分优秀!