gpt4 book ai didi

python - 在使用 sounddevice 播放声音时使用 event.getKeys 存储 react 时间

转载 作者:行者123 更新时间:2023-12-02 23:33:10 25 4
gpt4 key购买 nike

我编写了一个实验,向参与者展示了一系列视觉刺激(刺激持续时间:100 毫秒,试验持续时间:500 毫秒)。在视觉刺激开始的同时,会播放 100 毫秒的声音。
一些视觉刺激是目标,参与者在检测到目标时应按空格键。
我想知道参与者对目标的 react 时间。所以我使用 event.getKey 存储按下空格键的全局时间。我存储了一个全局时间来比较试验开始的时间和按下空格键的时间。我这样做是因为我的试验间隔很短,并且参与者可能会在接下来的试验中对目标使用react。

当我注释掉声音的 sd.play 时,代码似乎可以工作,但是一旦播放声音, react 时间似乎就会消失,并且它总是在目标试验之后将其存储在试验中(即使我知道我按下了空格键在目标试验期间)。

以前有人遇到过这个问题吗?

以下是该过程的代码:

def response_check(key):
"""
Checks if a key was pressed.
Keyword arguments:
key -- containing either a keypress and a time or nothing (list)

return:
time -- nan if not pressed or time of press if pressed
"""
if len(key) == 0:
pressed = 0
elif 'space' in key[0]:
pressed = 1
if pressed == 1:
time = key[0][1]
elif pressed == 0:
time = 'nan'

return str(time), pressed


for t in range(n_trials): # n_trials is the total amount of trials
show_target_crosses(pauses, t, trial_paradigm[t], hi_targets, low_targets) # show target
l_trial_start = globalClock.getTime()
check4esc() # check for esc
#set stimuli according to condition
standing = visual.Rect(win=win, name='up_cross_hor', width=(dimentions[1]),
height=(dimentions[0]), ori=0, pos=(0, 0), lineWidth=1,
lineColor=colors[all_crosses[trial_paradigm[t]][t]],
lineColorSpace='rgb', fillColor=colors[all_crosses[trial_paradigm[t]][t]],
fillColorSpace='rgb', opacity=1, depth=0.0, interpolate=True)
laying = visual.Rect(win=win, name='up_cross_hor', width=(dimentions[0]),
height=(dimentions[1]), ori=0, pos=(0, position[all_crosses[trial_paradigm[t]][t]]), lineWidth=1,
lineColor=colors[all_crosses[trial_paradigm[t]][t]],
lineColorSpace='rgb', fillColor=colors[all_crosses[trial_paradigm[t]][t]],
fillColorSpace='rgb', opacity=1, depth=0.0, interpolate=True)

sd.play(all_sounds[all_paradigms[trial_paradigm[t]][t]], fs) # Play sound
if first_seven[t] == 0:
if all_responses[trial_paradigm[t]][t] == 0:
trigger(trig_list[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]],0.01) # send sound trigger
elif all_responses[trial_paradigm[t]][t] == 1:
trigger(trig_list_targets[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]],0.01)
core.wait(0.06) # adjust diode to sound delay
standing.draw() # vertical bar
laying.draw() # horizontal bar
whiteOn.draw() # square

win.flip() # show cross and white square for fotodiode

core.wait(0.1) # show cross 100 ms
win.flip() # turn visual stuff off
core.wait(0.032) # adjust ITI

l_fp = int(ok_data[0])
l_block_nr = blocks[t]+1
l_trial_nr = (range(367)*n_blocks)[t]+1
l_condition = trial_paradigm[t]
l_sound = all_sounds_names[all_paradigms[trial_paradigm[t]][t]]
if first_seven[t] == 0:
if all_responses[trial_paradigm[t]][t] == 0:
l_trigger = trig_list[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]] # send sound trigger
elif all_responses[trial_paradigm[t]][t] == 1:
l_trigger = trig_list_targets[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]]

elif first_seven[t] == 1:
l_trigger = 999
l_target = all_responses[trial_paradigm[t]][t]
l_cross_condition = all_crosses[trial_paradigm[t]][t]

key = event.getKeys(keyList = ['space'], timeStamped = globalClock)

l_response_time = response_check(key)[0]
# Save data to file
#'fp\tblock_nr\ttrial_nr\tcondition\tsound\ttrigger\ttarget\tcross_cond\ttrial_start\tresponse_time\n'
dataFile.write('%i\t%i\t%i\t%i\t%s\t%i\t%i\t%i\t%f\t%s\n' %(
l_fp, l_block_nr, l_trial_nr, l_condition, l_sound, l_trigger,
l_target, l_cross_condition, l_trial_start, l_response_time))
paus(t, pauses, blocks, trig = 192) # check for pauses

=========== 编辑 ============
下面我贴出整个实验的MCVE版本:
from psychopy import visual
from psychopy import core, gui, data, event, parallel
import sounddevice as sd
import time, random, math, sys
import numpy as np

# Functions --------------------------------------------------------------------

def response_check(key):
"""
Checks if a key was pressed.
Keyword arguments:
key -- containing either a keypress and a time or nothing (list)

return:
time -- nan if not pressed or time of press if pressed
"""
if len(key) == 0:
pressed = 0
elif 'space' in key[0]:
pressed = 1
if pressed == 1:
time = key[0][1]
elif pressed == 0:
time = 'nan'

return str(time), pressed

def create_sinusoid (freq = 1000, phase = 0, fs = 48000, dur = 1):
'''Create a sinusoid of specified length with amplitude -1 to 1. Use
set_gain() and fade() to set amplitude and fade-in-out.

Keyword arguments:
frequency -- frequency in Hz (float)
phase -- phase in radians (float)
fs -- sampling frequency (int)
duration -- duration of signal in seconds (float).

Return:
sinusoid -- monosignal of sinusoid (1xn numpy array)
'''
t = np.arange(0, dur, 1.0/fs) # Time vector
sinusoid = np.sin(phase + 2*np.pi* freq * t) # Sinusoid (mono signal)
return sinusoid

def fade(monosignal,samples):
'''Apply a raised cosine to the start and end of a mono signal.

Keyword arguments:
monosignal -- vector (1xn numpy array).
samples -- number of samples of the fade (integer). Make sure that:
2*samples < len(monosignal)

Return:
out -- faded monosignal (1xn numpy array)
'''
ramps = 0.5*(1-np.cos(2*np.pi*(np.arange(2*samples))/(2*samples-1)))
fadein = ramps[0:samples]
fadeout = ramps[samples:len(ramps)+1]
plateu = np.ones(len(monosignal)-2*samples)
weight = np.concatenate((fadein,plateu,fadeout))
out = weight*monosignal
return out

def set_gain(mono, gaindb):
''' Set gain of mono signal, to get dB(rms) to specified gaindb

Keyword arguments:
mono -- vector (numpy array).
gaindb -- gain of mono in dB re max = 0 dB (float).

Return:
gained -- monosignal (numpy array)
'''
rms = np.sqrt(np.mean(mono**2))
adjust = gaindb - 20 * np.log10(rms)
gained = 10**(adjust/20.0) * mono # don't forget to make 20 a float (20.0)

# Print warning if overload, that is, if any abs(sample-value) > 1
if (np.max(np.abs(gained)) > 1):
message1 = "WARNING: set_gain() generated overloaded signal!"
message2 = "max(abs(signal)) = " + str(np.max(np.abs(gained)))
message3 = ("number of samples >1 = " +
str(np.sum(1 * (np.abs(gained) > 1))))
print message1
print message2
print message3

return gained

# Screen
win = visual.Window([800, 600], allowGUI = False, # [1920, 1080]
monitor = 'testMonitor', units = 'height', color = 'gray')

# ==============================================================================
# TONE ORDER AND RESPONSES ----------------------------------------------------
# 1 - 500 Hz
# 0 - 550 Hz
# 2 - 605 Hz
# 3 - 666 Hz
# 4 - 732 Hz
# 5 - 805 Hz
# 6 - 886 Hz
# 7 - 974 Hz

tone_order = np.random.choice([0,1,2,3,4,5,6,7], 20, replace = True)
targets = np.random.choice([1,0,0,0,0]*4, 20, replace = False)

# ==============================================================================
# CREATE SOUNDS ----------------------------------------------------------------

#sd.default.device = "ASIO Fireface USB"
print 'Sound device ------------------------------------------------------------'
print sd.query_devices()#device = "ASIO Fireface USB")
print '-------------------------------------------------------------------------'

# Set the gain and sampling frequency (fs)
gain = -30
fs = 44100

frequencies = [500, 550, 605, 666, 732, 805, 886, 974]
tones = [0]*8

for t in range(len(frequencies)):
tones[t] = set_gain(fade(create_sinusoid(
freq = frequencies[t], phase = 0, fs = fs, dur = 0.1),441),gain) # 100 ms, 10 ms fade in/out

f_500 = np.transpose(np.array([tones[0],tones[0]])) # deviant, control
f_550 = np.transpose(np.array([tones[1],tones[1]])) # standard
f_605 = np.transpose(np.array([tones[2],tones[2]]))
f_666 = np.transpose(np.array([tones[3],tones[3]]))
f_732 = np.transpose(np.array([tones[4],tones[4]]))
f_805 = np.transpose(np.array([tones[5],tones[5]]))
f_886 = np.transpose(np.array([tones[6],tones[6]]))
f_974 = np.transpose(np.array([tones[7],tones[7]]))

all_tones = [f_500, f_550, f_605, f_666, f_732, f_805, f_886, f_974]

# ==============================================================================
# CREATE VISUALS ---------------------------------------------------------------
stimulus = visual.TextStim(
win, color = 'white', height = 0.03, pos = (0, 0), text = '')

# ==============================================================================
# Make a text file to save data ------------------------------------------------
fileName = 'test'
dataFile = open(fileName+'.txt', 'w')
dataFile.write('soundCond\ttarget\ttrial_start\tresponse_time\n')

# ==============================================================================
# Keep track of time -----------------------------------------------------------
globalClock = core.Clock()
respClock = core.Clock()

# ==============================================================================
# Experimental procedure -------------------------------------------------------

# Trial loop
for t in range(len(tone_order)):
l_trial_start = globalClock.getTime()
#set stimuli according to condition
if targets[t] == 0:
stimulus.text = '+'
else:
stimulus.text = 'o'
sd.play(all_tones[tone_order[t]], fs) # Play sound for current trial

core.wait(0.08) # adjust visual to sound delay
stimulus.draw() # vertical bar
win.flip() # show cross and white
core.wait(0.1) # show cross 100 ms
win.flip() # turn visual stuff off
core.wait(0.26) # adjust ITI

l_sound = tone_order[t]
l_target = targets[t]

key = event.getKeys(keyList = ['space'], timeStamped = globalClock)

l_response_time = response_check(key)[0]
# Save data to file
#'soundCond\ttarget\ttrial_start\tresponse_time\n'
dataFile.write('%i\t%i\t%f\t%s\n' %(
l_sound, l_target, l_trial_start, l_response_time))

dataFile.close()

最佳答案

您的第二个代码示例显示您正在使用 PsychoPy。
你为什么不使用它的音频功能?

顺便说一句,sounddevice模块可以用作 PsychoPy 中的音频后端,它们使用 sd.OutputStream和内部的回调函数,就像我建议的那样。
但是,如果您使用 PsychoPy 的音频功能,您就不必担心这一点。

顺便说一句,PsychoPy 社区真的很有帮助,请查看他们的论坛:https://discourse.psychopy.org/ .

关于这条评论:

Our program is extremely simple



以准确的时间播放音频绝非易事。
依赖于平台的差异很大,如果您想确保时机正确,您应该始终进行衡量。

关于python - 在使用 sounddevice 播放声音时使用 event.getKeys 存储 react 时间,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49632764/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com