id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,900
|
GUI.py
|
psychopy_psychopy/psychopy/demos/coder/input/GUI.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo to illustrate Dialog (Dlg) classes and usage.
"""
from psychopy import gui # Fetch default gui handler (qt if available)
from psychopy import __version__ # Get the PsychoPy version currently in use
## You can explicitly choose one of the qt/wx backends like this:
## from psychopy.gui import wxgui as gui
## from psychopy.gui import qtgui as gui
# Specify fields for dlg as a dict
info = {'Observer':'jwp',
'Condition':['A', 'B'],
'Grating Orientation': 45,
'PsychoPy Version': __version__,
'Debug Mode': True}
# Use this dict to create the dlg
infoDlg = gui.DlgFromDict(dictionary=info,
title='TestExperiment',
order=['PsychoPy Version', 'Observer'],
tip={'Observer': 'trained visual observer, initials'},
fixed=['PsychoPy Version']) # This attribute can't be changed by the user
# Script will now wait for the dlg to close...
if infoDlg.OK: # This will be True if user hit OK...
print(info)
else: # ...or False, if they hit Cancel
print('User Cancelled')
## You could also use a gui.Dlg and you manually extract the data, this approach gives more
## control, eg, text color.
# Create dlg
dlg = gui.Dlg(title="My experiment", pos=(200, 400))
# Add each field manually
dlg.addText('Subject Info', color='Blue')
dlg.addField('Name:', tip='or subject code')
dlg.addField('Age:', 21)
dlg.addText('Experiment Info', color='Blue')
dlg.addField('', 45)
# Call show() to show the dlg and wait for it to close (this was automatic with DlgFromDict
thisInfo = dlg.show()
if dlg.OK: # This will be True if user hit OK...
print(thisInfo)
else:
print('User cancelled') # ...or False, if they hit Cancel
## The contents of this file are in the public domain.
| 1,763
|
Python
|
.py
| 44
| 37.704545
| 92
| 0.710111
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,901
|
customMouse.py
|
psychopy_psychopy/psychopy/demos/coder/input/customMouse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of CustomMouse(), showing movement limits, click detected upon release,
and ability to change the pointer.
"""
# authors Jeremy Gray & Todd Parsons
from psychopy import visual, event
# Create window
win = visual.Window(units="height")
# Create a virtual mouse
vm = visual.CustomMouse(win,
leftLimit=-0.2, topLimit=0, rightLimit=0.2, bottomLimit=-0.4,
showLimitBox=True, clickOnUp=True)
# Textbox for instructions
instr = visual.TextBox2(win,
text="Move the mouse around. Click to give the mouse more room to move.",
font="Open Sans", letterHeight=0.08,
pos=(0, .3))
# Create a character to use as mouse
new_pointer = visual.TextStim(win,
text=u'\u265e',
font="Gothic A1")
print("[getPos] [getWheelRel] click time")
# Listen for clicks
while not event.getKeys():
# Draw components
instr.draw()
vm.draw()
win.flip()
# Check for clicks
if vm.getClicks():
vm.resetClicks()
# Print click details
print("click at [%.2f, %.2f]" % (vm.getPos()[0], vm.getPos()[1]))
print(vm.getWheelRel())
print("%.3f sec"%vm.mouseMoveTime())
# can set some limits, others are unchanged:
vm.setLimit(leftLimit=-0.5, topLimit=0.1, rightLimit=0.5, bottomLimit=-0.4,)
instr.text = "Room to gallop! Press any key to quit."
# can switch the pointer to anything with a .draw() and setPos() method
vm.pointer = new_pointer
win.close()
# The contents of this file are in the public domain.
| 1,554
|
Python
|
.py
| 44
| 31.090909
| 84
| 0.677312
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,902
|
joystick_universal.py
|
psychopy_psychopy/psychopy/demos/coder/input/joystick_universal.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
There are two ways to retrieve info from the first 3 joystick axes. You can use:
joy.getAxis(0)
joy.getX()
Beyond those 3 axes you need to use the getAxis(id) form.
Although it may be that these don't always align fully. This demo should help
you to find out which physical axis maps to which number for your device.
Known issue: Pygame 1.91 unfortunately spits out a debug message every time the
joystick is accessed and there doesn't seem to be a way to get rid of those
messages.
"""
from psychopy import visual, core, event
from psychopy.hardware import joystick
joystick.backend = 'pyglet'
# As of v1.72.00, you need the winType and joystick.backend to match:
win = visual.Window((800.0, 800.0), allowGUI=False, winType=joystick.backend)
nJoysticks = joystick.getNumJoysticks()
if nJoysticks > 0:
joy = joystick.Joystick(0)
print('found ', joy.getName(), ' with:')
print('...', joy.getNumButtons(), ' buttons')
print('...', joy.getNumHats(), ' hats')
print('...', joy.getNumAxes(), ' analogue axes')
else:
print("You don't have a joystick connected!?")
win.close()
core.quit()
nAxes = joy.getNumAxes()
fixSpot = visual.GratingStim(win, pos=(0, 0),
tex="none", mask="gauss",
size=(0.05, 0.05), color='black')
grating = visual.GratingStim(win, pos=(0.5, 0),
tex="sin", mask="gauss",
color=[1.0, 0.5, -1.0],
size=(0.2, .2), sf=(2, 0))
message = visual.TextStim(win, pos=(0, -0.95), text='Hit "q" to quit')
trialClock = core.Clock()
t = 0
while not event.getKeys():
# update stim from joystick
xx = joy.getX()
yy = joy.getY()
grating.setPos((xx, -yy))
# change SF
if nAxes > 3:
sf = (joy.getZ() + 1) * 2.0 # so should be in the range 0: 4?
grating.setSF(sf)
# change ori
if nAxes > 6:
ori = joy.getAxis(5) * 90
grating.setOri(ori)
# if any button is pressed then make the stimulus colored
if sum(joy.getAllButtons()):
grating.setColor('red')
else:
grating.setColor('white')
# drift the grating
t = trialClock.getTime()
grating.setPhase(t * 2)
grating.draw()
fixSpot.draw()
message.draw()
print(joy.getAllAxes()) # to see what your axes are doing!
event.clearEvents() # do this each frame to avoid a backlog of mouse events
win.flip() # redraw the buffer
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,489
|
Python
|
.py
| 70
| 31.671429
| 80
| 0.669439
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,903
|
mouse.py
|
psychopy_psychopy/psychopy/demos/coder/input/mouse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of mouse handling.
As of version 1.51 the mouse coordinates for
myMouse.getPos()
myMouse.setPos()
myMouse.getRel()
are in the same units as the window.
You can also check the motion of the wheel with myMouse.getWheelRel()
(in two directions for the mac mighty mouse or equivalent!)
"""
from psychopy import visual, core, event
# Create a window to draw in
win = visual.Window((600.0, 600.0), allowGUI=True)
# Initialize some stimuli
fixSpot = visual.GratingStim(win, tex="none", mask="gauss",
pos=(0, 0), size=(0.05, 0.05), color='black', autoLog=False)
grating = visual.GratingStim(win, pos=(0.5, 0),
tex="sin", mask="gauss",
color=[1.0, 0.5, -1.0],
size=(1.0, 1.0), sf=(3, 0),
autoLog=False) # autologging not useful for dynamic stimuli
myMouse = event.Mouse() # will use win by default
message = visual.TextStim(win, pos=(-0.95, -0.9), height=0.08,
alignText='left', anchorHoriz='left',
text='left-drag=SF, right-drag=pos, scroll=ori',
autoLog=False)
# Continue until keypress
while not event.getKeys():
# get mouse events
mouse_dX, mouse_dY = myMouse.getRel()
mouse1, mouse2, mouse3 = myMouse.getPressed()
if (mouse1):
grating.setSF(mouse_dX, '+')
elif (mouse3):
grating.setPos([mouse_dX, mouse_dY], '+')
else:
fixSpot.setPos(myMouse.getPos())
# Handle the wheel(s):
# dY is the normal mouse wheel, but some have a dX as well
wheel_dX, wheel_dY = myMouse.getWheelRel()
grating.setOri(wheel_dY * 5, '+')
# get rid of other, unprocessed events
event.clearEvents()
# Do the drawing
fixSpot.draw()
grating.setPhase(0.05, '+') # advance 0.05 cycles per frame
grating.draw()
message.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,888
|
Python
|
.py
| 54
| 31.148148
| 69
| 0.675631
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,904
|
elementArrays.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/elementArrays.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of the ElementArrayStim, a highly optimised stimulus for generating
arrays of similar (but not identical) elements, such as in global form
arrays or random dot stimuli.
Elements must have the same basic texture and mask, but can differ in any
other way (ori, sf, rgb...).
This demo relies on numpy arrays to manipulate stimulus characteristics.
Working with array vectors is fast, much faster than python for-loops, which
would be too slow for a large array of stimuli like this.
See also the starField demo.
"""
from psychopy import visual, core, event
from psychopy.tools.coordinatetools import cart2pol
# We only need these two commands from numpy.random:
from numpy.random import random, shuffle
win = visual.Window([1024, 768], units='pix', monitor='testMonitor')
N = 500
fieldSize = 500
elemSize = 40
coherence = 0.5
# build a standard (but dynamic!) global form stimulus
xys = random([N, 2]) * fieldSize - fieldSize / 2.0 # numpy vector
globForm = visual.ElementArrayStim(win,
nElements=N, sizes=elemSize, sfs=3,
xys=xys, colors=[180, 1, 1], colorSpace='hsv')
# calculate the orientations for global form stimulus
def makeCoherentOris(XYs, coherence, formAngle):
# length along the first dimension:
nNew = XYs.shape[0]
# random orientations:
newOris = random(nNew) * 180
# select some elements to be coherent
possibleIndices = list(range(nNew)) # create an array of indices
shuffle(possibleIndices) # shuffle it 'in-place' (no new array)
coherentIndices = possibleIndices[0: int(nNew * coherence)]
# use polar coordinates; set the ori of the coherent elements
theta, radius = cart2pol(XYs[: , 0], XYs[: , 1])
newOris[coherentIndices] = formAngle - theta[coherentIndices]
return newOris
globForm.oris = makeCoherentOris(globForm.xys, coherence, 45)
# Give each element a life of 10 frames, and give it a new position after that
lives = random(N) * 10 # this will be the current life of each element
while not event.getKeys():
# take a copy of the current xy and ori values
newXYs = globForm.xys
newOris = globForm.oris
# find the dead elements and reset their life
deadElements = (lives > 10) # numpy vector, not standard python
lives[deadElements] = 0
# for the dead elements update the xy and ori
# random array same shape as dead elements
newXYs[deadElements, : ] = random(newXYs[deadElements, : ].shape) * fieldSize - fieldSize/2.0
# for new elements we still want same % coherent:
new = makeCoherentOris(newXYs[deadElements, : ], coherence, 45)
newOris[deadElements] = new
# update the oris and xys of the new elements
globForm.xys = newXYs
globForm.pris = newOris
globForm.draw()
win.flip()
lives = lives + 1
event.clearEvents('mouse') # only really needed for pygame windows
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,984
|
Python
|
.py
| 67
| 41.19403
| 97
| 0.737552
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,905
|
rotatingFlashingWedge.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/rotatingFlashingWedge.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo: Rotate flashing wedge
"""
from psychopy import visual, event, core
win = visual.Window([800, 800])
globalClock = core.Clock()
# Make two wedges (in opposite contrast) and alternate them for flashing
wedge1 = visual.RadialStim(win, tex='sqrXsqr', color=1, size=1,
visibleWedge=[0, 45], radialCycles=4, angularCycles=8, interpolate=False,
autoLog=False) # this stim changes too much for autologging to be useful
wedge2 = visual.RadialStim(win, tex='sqrXsqr', color=-1, size=1,
visibleWedge=[0, 45], radialCycles=4, angularCycles=8, interpolate=False,
autoLog=False) # this stim changes too much for autologging to be useful
t = 0
rotationRate = 0.1 # revs per sec
flashPeriod = 0.1 # seconds for one B-W cycle (ie 1/Hz)
while not event.getKeys():
t = globalClock.getTime()
if t % flashPeriod < flashPeriod / 2.0: # more accurate to count frames
stim = wedge1
else:
stim = wedge2
stim.ori = t * rotationRate * 360.0 # set new rotation
stim.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,158
|
Python
|
.py
| 30
| 35.5
| 77
| 0.707404
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,906
|
bufferImageStim.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/bufferImageStim.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of class psychopy.visual.BufferImageStim()
- take a snapshot of a multi-item screen image
- save and draw as a BufferImageStim
- report speed of BufferImageStim to speed of drawing each item separately
"""
from psychopy import visual, event, core
# need a window and clock:
win = visual.Window(fullscr=False, monitor='testMonitor')
clock = core.Clock()
# first define a list of various slow, static stim
imageList = ['face.jpg', 'beach.jpg']
imageStim = visual.SimpleImageStim(win, imageList[0])
imageStim2 = visual.SimpleImageStim(win, imageList[1], pos=(.300, .20))
wordStim = visual.TextStim(win,
text='Press < escape > to quit.\n\nThere should be no change after 3 seconds.\n\n' +
'This is a text stim that is kinda verbose and long, so if it ' +
'were actually really long it would take a while to render completely.',
pos=(0, -.2))
stimlist = [imageStim, imageStim2, wordStim]
# Get and save a "screen shot" of everything in stimlist:
rect = (-1, 1, 1, -1)
t0 = clock.getTime()
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1, 1, 1, -1) is whole-screen
# as a list of the edges: Left Top Right Bottom, in norm units.
captureTime = clock.getTime() - t0
instr_buffer = visual.TextStim(win, text='BufferImageStim', pos=(0, .8))
drawTimeBuffer = [] # accumulate draw times of the screenshot
for frameCounter in range(200):
t0 = clock.getTime()
screenshot.draw() # draw the BufferImageStim, fast
drawTimeBuffer.append(clock.getTime() - t0)
instr_buffer.draw()
win.flip()
if len(event.getKeys(['escape'])):
core.quit()
# Just for the demo: Time things when drawn individually:
instr_multi = visual.TextStim(win, text='TextStim and ImageStim', pos=(0, .8))
drawTimeMulti = [] # draw times of the pieces, as drawn separately
for frameCounter in range(200):
t0 = clock.getTime()
for s in stimlist:
s.draw() # draw all individual stim, slow
drawTimeMulti.append(clock.getTime() - t0)
instr_multi.draw()
win.flip()
if len(event.getKeys(['escape'])):
core.quit()
# Report timing:
firstFrameTime = drawTimeBuffer.pop(0)
bufferAvg = 1000. * sum(drawTimeBuffer) / len(drawTimeBuffer)
multiAvg = 1000. * sum(drawTimeMulti) / len(drawTimeMulti)
msg = "\nBufferImageStim\nrect=%s norm units, becomes %s pix"
print(msg % (str(rect), str(screenshot.size)))
print("initial set-up / screen capture: %.0fms total" % (1000. * captureTime))
print("first frame: %.2fms (typically slow)" % (1000. * firstFrameTime))
msg = "BufferImage: %.2fms avg, %.2fms max draw-time (%d frames)"
print(msg % (bufferAvg, max(drawTimeBuffer) * 1000., len(drawTimeBuffer)))
msg = "Text & Image: %.2fms avg, %.2fms max draw-time"
print(msg % (multiAvg, max(drawTimeMulti) * 1000.))
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,984
|
Python
|
.py
| 66
| 42.212121
| 96
| 0.705781
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,907
|
plaid.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/plaid.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo creating a drifting visual plaid stimulus.
For combining stimuli (e.g. to create a plaid) it's best to use blendMode='add'
rather than blendMode='avg'. In this blendMode the background is not overwritten
but added to, which is ideal in this instance.
On the other hand, in this mode the opacity attribute is a slight
misnomer; setting a high 'opacity' doesn't cause the background to be
obscured; it just acts as a multiplier for the contrast of the stimulus being drawn.
"""
from psychopy import visual, logging, event, core
# create a window to draw in
win = visual.Window((600, 600), allowGUI=False, blendMode='avg', useFBO=True)
logging.console.setLevel(logging.DEBUG)
# Initialize some stimuli, note contrast, opacity, ori
grating1 = visual.GratingStim(win, mask="circle", color='white', contrast=0.5,
size=(1.0, 1.0), sf=(4, 0), ori = 45, autoLog=False)
grating2 = visual.GratingStim(win, mask="circle", color='white', opacity=0.5,
size=(1.0, 1.0), sf=(4, 0), ori = -45, autoLog=False,
pos=(0.1,0.1))
trialClock = core.Clock()
t = 0
while not event.getKeys() and t < 20:
t = trialClock.getTime()
grating1.phase = 1 * t # drift at 1Hz
grating1.draw() # redraw it
grating2.phase = 2 * t # drift at 2Hz
grating2.draw() # redraw it
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,430
|
Python
|
.py
| 33
| 40.939394
| 84
| 0.716655
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,908
|
MovieStim.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/MovieStim.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of MovieStim
MovieStim opens a video file and displays it on a window.
"""
from psychopy import visual, core, constants
from psychopy.hardware import keyboard
# window to present the video
win = visual.Window((800, 600), fullscr=False)
# keyboard to listen for keys
kb = keyboard.Keyboard()
# create a new movie stimulus instance
mov = visual.MovieStim(
win,
'default.mp4', # path to video file
size=(256, 256),
flipVert=False,
flipHoriz=False,
loop=False,
noAudio=False,
volume=0.1,
autoStart=False)
# print some information about the movie
print('orig movie size={}'.format(mov.frameSize))
print('orig movie duration={}'.format(mov.duration))
# instructions
instrText = "`r` Play/Resume\n`p` Pause\n`s` Stop\n`q` Stop and Close"
instr = visual.TextStim(win, instrText, pos=(0.0, -0.75))
# main loop, exit when the status is finished
while not mov.isFinished:
# draw the movie
mov.draw()
# draw the instruction text
instr.draw()
# flip buffers so they appear on the window
win.flip()
# process keyboard input
if kb.getKeys('q'): # quit
break
elif kb.getKeys('r'): # play/start
mov.play()
elif kb.getKeys('p'): # pause
mov.pause()
elif kb.getKeys('s'): # stop the movie
mov.stop()
# stop the movie, this frees resources too
mov.unload() # unloads when `mov.status == constants.FINISHED`
# clean up and exit
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,574
|
Python
|
.py
| 52
| 26.865385
| 70
| 0.692512
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,909
|
textStimuli.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/textStimuli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of text rendering in pyglet, including:
- how to specify fonts
- unicode
- rotating text
- mirror-image
- bidirectional and reshaped Arabic/Farsi text
"""
from psychopy import visual, core, event
from numpy import sin
# Create a window to draw in
win = visual.Window(units="height", size=(800, 800))
win.recordFrameIntervals = True
# Initialize some stimuli.
## Note that in Python 3 we no longer need to create special unicode strings
## with a u'' prefix, as all strings are unicode. For the time being, we
## retain the prefix in this demo, for backwards compatibility for people
## running PsychoPy under Python 2.7
fpsText = visual.TextBox2(win,
text="fps",
color="red", fillColor="black",
font="Share Tech Mono", letterHeight=0.04,
size=(0.2, 0.1), pos=(0, 0.1))
psychopyTxt = visual.TextBox2(win,
text=u"PsychoPy \u00A9Jon Peirce",
color="white",
font="Indie Flower", letterHeight=0.05,
size=(0.6, 0.2), pos=(0, 0))
unicodeStuff = visual.TextBox2(win,
text = u"unicode (eg \u03A8 \u040A \u03A3)", # You can find the unicode character value by searching online
color="black",
font="EB Garamond", letterHeight=0.05,
size=(0.5, 0.2), pos=(-0.5, -0.5), anchor="bottom-left")
longSentence = visual.TextBox2(win,
text = u"Text wraps automatically! Just keep typing a long sentence that is very long and also it is entirely unnecessary how long the sentence is, it will wrap neatly.",
color='DarkSlateBlue', borderColor="DarkSlateBlue",
font="Open Sans", letterHeight=0.025,
size=(0.4, 0.3), pos=(0.45, -0.45), anchor='bottom-right')
mirror = visual.TextBox2(win,
text="mirror mirror",
color='silver',
font="Josefin Sans", letterHeight=0.05,
size=(0.2, 0.2), pos=(0, -0.1),
flipHoriz=True)
google = visual.TextBox2(win,
text="Now supporting Google fonts!",
color="blue",
font="Josefin Sans", letterHeight=0.03,
size=(0.4, 0.2), pos=(0.5, 0.5), anchor="top-right")
## By default, right-to-left languages like Hebrew are often shown in
## reversed order. Additionally, Arabic-based text by default is shown
## with characters in their isolated form, rather than flowing correctly
## into their neighbours. We can use the invisible \u200E left-to-right
## control character to resolve ambiguous transitions between text
## directions (for example, to determine in which directional run a
## punctuation character belongs).
## We correct these issues by setting setting the languageStyle to be
## 'bidirectional' (sufficient for Hebrew, for example) or 'Arabic'
## (which additionally does the reshaping of individual characters
## needed for languages based on the Arabic alphabet):
farsi = visual.TextBox2(win,
text = u'Farsi text: \n \u200E خوش آمدید 1999',
color = 'FireBrick',
font="Cairo", letterHeight = 0.03,
size=(0.5, 0.1), pos = (-0.5, 0.4), anchor="top-left")
# Start a clock ticking
trialClock = core.Clock()
t = lastFPSupdate = 0
# Continues the loop until any key is pressed
while not event.getKeys():
# Get current time from clock
t = trialClock.getTime()
# Draw stimuli
mirror.draw()
fpsText.draw()
psychopyTxt.draw()
unicodeStuff.draw()
longSentence.draw()
farsi.draw()
google.draw()
win.flip()
# Update the fps text every second
if t - lastFPSupdate > 1:
fps = win.fps()
fpsText.text = "%i fps" % fps
lastFPSupdate += 1
if fps > 50:
fpsText.color = "green"
print(fpsText.color)
else:
fpsText.color = "red"
# Move PsychoPy text around
psychopyTxt.pos = (sin(t)/2, sin(t)/2)
#
#
win.close()
core.quit()
#
# The contents of this file are in the public domain.
| 3,809
|
Python
|
.py
| 101
| 33.930693
| 174
| 0.694934
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,910
|
textbox_editable.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/textbox_editable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import visual, core, event, logging
import numpy as np
logging.console.setLevel(logging.EXP)
c = core.Clock()
from psychopy.visual.textbox2 import TextBox2, allFonts
win = visual.Window([800, 800], monitor='testMonitor')
logging.exp("{:.3f}: created window".format(c.getTime()))
psychopyInfo = u"<b>PsychoPy</b> is an <i>open-source</i> Python application allowing you to run a supercali-fragilisticexpeilidocious wide range of neuroscience, psychology and psychophysics experiments. It’s a free, powerful alternative to Presentation™ or e-Prime™, written in Python (a free alternative to Matlab™ g)."
# preload some chars into a font to see how long it takes
fontSize = 16
arial = allFonts.getFont("Arial", fontSize)
logging.exp("{:.3f}: created font".format(c.getTime()))
nChars = 256
arial.preload(nChars) # or set to preload specific string of chars
logging.exp("{:.3f}: preloaded {} chars".format(c.getTime(), nChars))
txt1 = TextBox2(win, text="Type here, it's toptastic", font='Times',
color='black', colorSpace='named',
pos=(0, 0.4), letterHeight=0.05, units='height',
size=[0.8, 0.2],
anchor='center-top',
borderColor='lightgrey',
fillColor='slategrey',
editable=True)
txt2 = TextBox2(win, text=psychopyInfo, font='Arial',
pos=(0, -5), anchor='middle', size=(20, None), units='cm',
lineSpacing=1.1,
letterHeight=1.,
color='LightGrey', borderColor='Moccasin', fillColor=None,
editable=True)
txt3 = TextBox2(win, text='Good for non-editable text (Esc to quit)',
font='Arial',
borderColor=None, fillColor=None,
pos=(-0.5,-0.5), units='height', anchor='bottom-left',
letterHeight=0.02,
editable=False)
txt1.autoDraw=True
txt2.autoDraw=True
txt3.autoDraw=True
clock = core.Clock()
t=0
while t<30:
t= clock.getTime()
txt2.pos = (0.2*np.sin(t), 0.2*np.cos(t))
if 'escape' in event.getKeys():
core.quit()
win.flip()
logging.flush()
| 2,190
|
Python
|
.py
| 49
| 37.653061
| 322
| 0.655566
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,911
|
stim3d.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/stim3d.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Demo for 3D stimulus classes.
This demonstrates how to render 3D stimuli, set lighting and adjust materials.
"""
from psychopy import core
import psychopy.visual as visual
from psychopy.visual import LightSource, BlinnPhongMaterial, BoxStim, SphereStim
from psychopy.tools.gltools import createTexImage2dFromFile
from psychopy import event
# open a window to render the shape
win = visual.Window((600, 600), monitor='testMonitor')
# create the stimulus object, try other classes like SphereStim and PlaneStim
boxStim = BoxStim(win, size=(.2, .2, .2))
# set the position of the object by editing the associated rigid body pose
boxStim.thePose.pos = (0, 0, -3)
# create a white material and assign it
boxStim.material = BlinnPhongMaterial(
win, diffuseColor=(1, 1, 1), specularColor=(0, 0, 0), shininess=125.0)
# load a diffuse texture
boxStim.material.diffuseTexture = createTexImage2dFromFile('face.jpg')
# set the box 3 units away from the observer by editing the stimuli's rigid
# body class
boxStim.thePose.pos = (0, 0, -1)
# setup scene lights
redLight = LightSource(
win,
pos=(0, 0.5, -1),
diffuseColor='red',
specularColor='red',
lightType='point')
greenLight = LightSource(
win,
pos=(-0.5, -0.5, -1),
diffuseColor='lightgreen',
specularColor='lightgreen',
lightType='point')
blueLight = LightSource(
win,
pos=(0.5, -0.5, -1),
diffuseColor='blue',
specularColor='blue',
lightType='point')
# assign the lights to the scene
win.lights = [redLight, greenLight, blueLight]
# Draw spheres at the positions of the light sources to show them. Note that the
# spheres themselves are not emitting light, just made to appear so.
redSphere = SphereStim(win, radius=0.1)
redSphere.thePose.pos = redLight.pos
redSphere.material = BlinnPhongMaterial(win, emissionColor='red')
greenSphere = SphereStim(win, radius=0.1)
greenSphere.thePose.pos = greenLight.pos
greenSphere.material = BlinnPhongMaterial(win, emissionColor='green')
blueSphere = SphereStim(win, radius=0.1)
blueSphere.thePose.pos = blueLight.pos
blueSphere.material = BlinnPhongMaterial(win, emissionColor='blue')
# text to overlay
message = visual.TextStim(
win, text='Any key to quit', pos=(0, -0.8), units='norm')
angle = 0.0 # box angle
while not event.getKeys():
win.setPerspectiveView() # set the projection, must be done every frame
win.useLights = True # enable lighting
# spin the stimulus by angle
boxStim.thePose.setOriAxisAngle((0, 1, 1), angle)
# draw the stimulus
boxStim.draw()
# Disable lights for 2D stimuli and light source shapes, or else colors will
# be modulated.
win.useLights = False
# Disabling lighting will cause these not to appear shaded by other light
# sources in the scene.
redSphere.draw()
greenSphere.draw()
blueSphere.draw()
win.resetEyeTransform() # reset the transformation to draw 2D stimuli
message.draw()
win.flip()
angle += 0.5
win.close()
core.quit()
| 3,067
|
Python
|
.py
| 81
| 34.814815
| 80
| 0.742568
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,912
|
screensAndWindows.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/screensAndWindows.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Using multiple screens and windows with PsychoPy
"""
from psychopy import visual, event
from numpy import sin, pi # numeric python
if True: # use two positions on one screen
winL = visual.Window(size=[400, 300], pos=[100, 200], screen=0,
allowGUI=False) # , fullscr=True)
winR = visual.Window(size=[400, 300], pos=[400, 200], screen=0,
allowGUI=False) # , fullscr=True) # same screen diff place
else:
winL = visual.Window(size=[400, 300], pos=[100, 200], screen=0,
allowGUI=False, fullscr=False)
winR = visual.Window(size=[400, 300], pos=[100, 200], screen=1,
allowGUI=False, fullscr=False) # same place diff screen
# create some stimuli
# NB. if the windows have the same characteristics then
# left screen
contextPatchL = visual.GratingStim(winL, tex='sin', mask='circle',
size=1.0, sf=3.0, texRes=512)
targetStimL = visual.GratingStim(winL, ori=20, tex='sin', mask='circle',
size=0.4, sf=3.0, texRes=512, autoLog=False)
# right screen
contextPatchR = visual.GratingStim(winR, tex='sin', mask='circle',
size=1.0, sf=3.0, texRes=512)
targetStimR =visual.GratingStim(winR, ori=20, tex='sin', mask='circle',
size=0.4, sf=3.0, texRes=512, autoLog=False)
t = 0.0
while not event.getKeys():
t = t + 0.01
# don't let it go behind the context (looks weird if it switches):
newX = sin(t * pi * 2) * 0.05 + 0.05
contextPatchR.draw()
targetStimR.pos = [newX, 0] # make this patch move the opposite way
targetStimR.draw()
contextPatchL.draw()
targetStimL.pos = [-newX, 0]
targetStimL.draw()
winL.flip()
winR.flip()
# Close windows
winR.close()
winL.close()
# The contents of this file are in the public domain.
| 1,846
|
Python
|
.py
| 46
| 35.043478
| 85
| 0.654922
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,913
|
variousVisualStims.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/variousVisualStims.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of several visual stimuli depending on the mouse position.
"""
from psychopy import visual, event, core
import numpy
win = visual.Window([600, 600], color='black')
gabor = visual.GratingStim(win, mask='gauss', pos=[-0.5, -0.5], color=[0, 0, 1], sf=5, ori=30)
movie = visual.MovieStim3(win, 'jwpIntro.mp4', units='pix', pos=[100, 100], size=[160, 120])
txt = u"unicode (eg \u03A8 \u040A \u03A3)"
text = visual.TextStim(win, pos=[0.5, -0.5], text=txt, font=['Times New Roman'])
faceRGB = visual.ImageStim(win, image='face.jpg', pos=[-0.5, 0.5])
mouse = event.Mouse()
instr = visual.TextStim(win, text='move the mouse around')
t = 0.0
while not event.getKeys() and not mouse.getPressed()[0]:
# get mouse events
mouse_dX, mouse_dY = mouse.getRel()
gabor.ori -= mouse_dY * 10
text.ori += mouse_dY * 10
faceRGB.ori += mouse_dY * 10
movie.ori -= mouse_dY * 10
t += 1/60.0
gabor.phase = t * 2.0
gabor.draw()
text.color = [numpy.sin(t * 2), 0, 1]
text.draw()
faceRGB.draw()
movie.draw()
instr.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,207
|
Python
|
.py
| 35
| 31.428571
| 94
| 0.652586
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,914
|
soundStimuli.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/soundStimuli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sound stimuli are currently an area of development in PsychoPy
Previously we used pygame. Now the pyo library is also supported.
On OSX this is an improvement (using coreaudio rather than SDL).
On windows this should help on systems with good sound cards,
but this is yet to be confirmed.
See the demo hardware > testSoundLatency.py
"""
import sys
from psychopy import logging, prefs
logging.console.setLevel(logging.DEBUG) # get messages about the sound lib as it loads
from psychopy import sound, core
print('Using %s (with %s) for sounds' % (sound.audioLib, sound.audioDriver))
highA = sound.Sound('A', octave=3, sampleRate=44100, secs=0.8, stereo=True)
highA.setVolume(0.8)
tick = sound.Sound(800, secs=0.01, sampleRate=44100, stereo=True) # sample rate ignored because already set
tock = sound.Sound('600', secs=0.01, sampleRate=44100, stereo=True)
highA.play()
core.wait(0.8)
tick.play()
core.wait(0.4)
tock.play()
core.wait(0.6)
if sys.platform == 'win32':
ding = sound.Sound('ding')
ding.play()
core.wait(1)
tada = sound.Sound('tada.wav')
tada.play()
core.wait(2)
print('done')
core.quit()
# The contents of this file are in the public domain.
| 1,242
|
Python
|
.py
| 35
| 33.4
| 108
| 0.74518
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,915
|
maskReveal.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/maskReveal.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of an efficient visual mask, by modulating its opacity.
When you want to reveal an image gradually from behind a mask,
the tempting thing to do is to alter a stimulus mask using .setMask()
That will actually be very slow because of the overhead in sending
textures to the graphics card on each change. Instead, the more
efficient way of doing this is to create an element array and alter the
opacity of each element of the array to reveal what's behind it.
"""
from psychopy import core, visual, event
from psychopy.tools.arraytools import createXYs
import numpy
win = visual.Window((600, 600), allowGUI=False, color=0,
monitor='testMonitor', winType='pyglet', units='norm')
# Initialize some stimuli
gabor = visual.GratingStim(win, tex='sin', mask='gauss', size=1, sf=5)
# create a grid of xy vals
xys = createXYs(numpy.linspace(-0.5, 0.5, 11)) # 11 entries from -0.5 to 0.5
# create opacity for each square in mask
opacs = numpy.ones(len(xys)) # all opaque to start
# create mask
elSize = xys[1, 0] - xys[0, 0]
mask = visual.ElementArrayStim(win, elementTex=None, elementMask=None,
nElements=len(xys),
colors=win.color, # i.e., same as background
xys=xys, opacities=opacs,
sizes=elSize)
trialClock = core.Clock()
t = 0
maskIndices = numpy.arange(len(xys))
numpy.random.shuffle(maskIndices)
frameN = 0
while not event.getKeys():
t = trialClock.getTime()
gabor.ori += 1 # advance ori by 1 degree
gabor.draw()
# update mask by making one element transparent, selected by index
if frameN < len(maskIndices):
ii = maskIndices[frameN]
opacs[ii] = 0
mask.opacities = opacs
mask.draw()
win.flip()
frameN += 1
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,834
|
Python
|
.py
| 49
| 34.510204
| 77
| 0.726708
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,916
|
counterphase.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/counterphase.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
There are many ways to generate counter-phase, e.g. vary the contrast of
a grating sinusoidally between 1 and -1, take 2 gratings in opposite phase
overlaid and vary the opacity of the upper one between 1: 0, or take two
gratings overlaid with the upper one of 0.5 opacity and drift them
in opposite directions.
This script takes the first approach as a test of how fast
contrast textures are being rewritten to the graphics card
"""
from psychopy import core, visual, event
from numpy import sin, pi
# Create a window to draw in
win = visual.Window((600, 600), allowGUI=False, monitor='testMonitor', units='deg')
# Initialize some stimuli
grating1 = visual.GratingStim(
win, tex="sin", mask="circle", texRes=128,
color='white', size=5, sf=2, ori=45, depth=0.5, autoLog=False)
message = visual.TextStim(
win, text='Any key to quit',
pos=(-0.95, -0.95), units='norm',
anchorVert='bottom', anchorHoriz='left')
trialClock = core.Clock()
t = 0
while not event.getKeys() and t < 20: # quits after 20 secs
t = trialClock.getTime()
grating1.contrast = sin(t * pi * 2)
grating1.draw()
message.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,281
|
Python
|
.py
| 34
| 35.235294
| 83
| 0.726171
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,917
|
visual_noise.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/visual_noise.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo: Using an arbitrary numpy array as a visual stimulus.
Also illustrates logging DEBUG level details to the console.
"""
from psychopy import visual, event, core, logging
import numpy
logging.console.setLevel(logging.DEBUG)
win = visual.Window([600, 600], allowGUI=False)
noiseTexture = numpy.random.rand(128, 128) * 2.0 - 1
patch = visual.GratingStim(win, tex=noiseTexture,
size=(128, 128), units='pix',
interpolate=False, autoLog=False)
while not event.getKeys():
# increment by (1, 0.5) pixels per frame:
patch.phase += (1 / 128.0, 0.5 / 128.0)
patch.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 736
|
Python
|
.py
| 22
| 30.954545
| 60
| 0.71773
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,918
|
clockface.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/clockface.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of using ShapeStim to make a functioning visual clock.
"""
from psychopy import visual, core, event
import numpy, time
win = visual.Window([800, 800], monitor='testMonitor')
# vertices (using numpy means we can scale them easily)
handVerts = numpy.array([ [0, 0.8], [-0.05, 0], [0, -0.05], [0.05, 0] ])
second = visual.ShapeStim(win, vertices=[[0, -0.1], [0.1, 0.8]],
lineColor=[1, -1, -1], fillColor=None, lineWidth=2, autoLog=False)
minute = visual.ShapeStim(win, vertices=handVerts,
lineColor='white', fillColor=[0.8, 0.8, 0.8], autoLog=False)
hour = visual.ShapeStim(win, vertices=handVerts/2.0,
lineColor='black', fillColor=[-0.8, -0.8, -0.8], autoLog=False)
clock = core.Clock()
while not event.getKeys():
t = time.localtime()
minPos = numpy.floor(t[4]) * 360 / 60 # NB floor will round down
minute.ori = minPos
minute.draw()
hourPos = (t[3]) * 360 / 12 # this one can be smooth
hour.ori = hourPos
hour.draw()
secPos = numpy.floor(t[5]) * 360 / 60 # NB floor will round down
second.ori = secPos
second.draw()
win.flip()
event.clearEvents('mouse') # only really needed for pygame windows
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,300
|
Python
|
.py
| 33
| 36.272727
| 72
| 0.670644
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,919
|
secondOrderGratings.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/secondOrderGratings.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import visual, core, event
from psychopy.visual.secondorder import EnvelopeGrating
from psychopy.visual.noise import NoiseStim
import numpy as np
win = visual.Window([512, 512], blendMode='add', screen=1, useFBO=True)
# comment in to test life without shaders
# win._haveShaders=False
#Make a noise stimulus to use as a carrier for a 2nd order envelope
noise = NoiseStim(win=win, name='noise',units='pix',
ori=1.0, pos=(1000, 1000), size=(512, 512), sf=1/512, phase=0,
color=[1,1,1], colorSpace='rgb', opacity=1, blendmode='add', contrast=1.0,
texRes=512,
noiseType='binary', noiseElementSize=2, interpolate=0)
# env1 Bottom right:: Unmodulated roataing carrier sf=8 ori=45:: moddepth=0.
# env2 Bottom left:: 100% modulated binary noise, envelope drifitng and rotating envsf=8
# env3 Top right:: 100% modulated sin carrier, envelope and carrier rotate in
# opposite directions (envelope orientation appears slower than grating below
# but it you track the orientation its not
# env4 Top Left:: 100% beat, envsf=4 but is beat so lookslike 8. Envelope is
# drifitng at same speed as env1
env1 = EnvelopeGrating(win, ori=0, units='norm', carrier='sin', envelope='sin',
mask='gauss', sf=4, envsf=8, size=1, contrast=1.0, moddepth=0.0, envori=0,
pos=[-.5, -.5], interpolate=0)
env2 = EnvelopeGrating(win, ori=0, units='norm', carrier='sin', envelope='sin',
mask='gauss', sf=1, envsf=8, size=1, contrast=0.5, moddepth=1.0, envori=0, texRes=512,
pos=[.5, -.5], interpolate=0)
env3 = EnvelopeGrating(win, ori=0, units='norm', carrier='sin', envelope='sin',
mask='gauss', sf=24, envsf=4, size=1, contrast=0.5, moddepth=1.0, envori=0,
pos=[-.5, .5], interpolate=0)
env4 = EnvelopeGrating(win, ori=90, units='norm', carrier='sin', envelope='sin',
mask='gauss', sf=24, envsf=4, size=1, contrast=0.5, moddepth=1.0, envori=0,
pos=[0.5, 0.5], beat=True, interpolate=0)
#Set the carrier for env 2 to the texture in noise.
env2.setCarrier(noise.tex)
while not event.getKeys():
# contMod.phase += 0.01
env1.ori += 0.1
env2.envori += 0.1
env2.envphase += 0.01
env3.envori += 0.1
env3.ori -= 0.1
env4.envphase += 0.01
env4.phase += 0.01
# env1.phase += 0.01
# env1.ori += 0.1
env1.draw()
env2.draw()
env3.draw()
env4.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,535
|
Python
|
.py
| 55
| 41.890909
| 92
| 0.677602
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,920
|
customTextures.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/customTextures.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Textures (e.g. for a GratingStim) can be created from custom numpy arrays.
For this they should be square arrays, with size in powers of two (e.g. 64, 128, 256, 512)
A 256x256 array can then be given color efficiently using the normal stimulus methods.
A 256x256x3 array has its color defined by the array (obviously).
This demo creates a radial array as a patch stimulus, using helper functions from
psychopy.filters and then creates a second sub-stimulus created from a section of
the original. Both are masked simply by circles.
"""
from psychopy import visual, event, core
from psychopy.visual import filters
import numpy as np
win = visual.Window([800, 600], units='pix')
# Generate the radial textures
cycles = 6
res = 512
radius = filters.makeRadialMatrix(res)
radialTexture = np.sin(radius * 2 * np.pi * cycles)
mainMask = filters.makeMask(res)
# Select the upper left quadrant of our radial stimulus
radialTexture_sub = radialTexture[256:, 0:256]
# and create an appropriate mask for it
subMask = filters.makeMask(res, radius=0.5, center=[-0, 0])
bigStim = visual.GratingStim(win, tex=radialTexture, mask=mainMask,
color='white', size=512, sf=1.0 / 512, interpolate=True)
# draw the quadrant stimulus centered in the top left quadrant of the 'base' stimulus (so they're aligned)
subStim = visual.GratingStim(win, tex=radialTexture_sub, pos=(-128, 128), mask=subMask,
color=[1, 1, 1], size=256, sf=1.0 / 256, interpolate=True, autoLog=False)
bigStim.draw()
subStim.draw()
globalClock =core.Clock()
while not event.getKeys():
# clockwise rotation of sub-patch
t = globalClock.getTime()
bigStim.draw()
subStim.ori = np.sin(t * 2 * np.pi) * 20 # control speed
subStim.draw()
win.flip()
event.clearEvents('mouse') # only really needed for pygame windows
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,935
|
Python
|
.py
| 44
| 41.909091
| 106
| 0.751864
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,921
|
shapeContains.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/shapeContains.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo for psychopy.visual.ShapeStim.contains() and .overlaps()
Also inherited by various other stimulus types.
"""
from psychopy import visual, event, core
win = visual.Window(size=(500, 500), monitor='testMonitor', units='norm')
mouse = event.Mouse()
txt = 'click the shape to quit\nscroll to adjust circle'
instr = visual.TextStim(win, text=txt, pos=(0, -.7), opacity=0.5)
msg = visual.TextStim(win, text=' ', pos=(0, -.4))
# a target polygon (strange shape):
shape = visual.ShapeStim(win, fillColor='darkblue', lineColor=None,
vertices=[(-0.02, -0.0), (-.8, .2), (0, .6), (.1, 0.06), (.8, .3), (.6, -.4)])
# define a buffer zone around the mouse for proximity detection:
# use pix units just to show that it works to mix (shape and mouse use norm units)
bufzone = visual.Circle(win, radius=30, edges=13, units='pix')
# loop until detect a click inside the shape:
while not mouse.isPressedIn(shape):
instr.draw()
# dynamic buffer zone around mouse pointer:
bufzone.pos = mouse.getPos() * win.size / 2 # follow the mouse
bufzone.size += mouse.getWheelRel()[1] / 20.0 # vert scroll adjusts radius
# is the mouse inside the shape (hovering over it)?
if shape.contains(mouse):
msg.text = 'inside'
shape.opacity = bufzone.opacity = 1
elif shape.overlaps(bufzone):
msg.text = 'near'
shape.opacity = bufzone.opacity = 0.6
else:
msg.text = 'far away'
shape.opacity = bufzone.opacity = 0.2
bufzone.draw() # drawing helps visualize the mechanics
msg.draw()
shape.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,712
|
Python
|
.py
| 41
| 38.097561
| 82
| 0.6787
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,922
|
dot_gabors.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/dot_gabors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of gabor dots, using autodraw.
"""
from psychopy import visual, core, event
# Create a window to draw in
win = visual.Window((600, 600), allowGUI=False,
monitor='testMonitor', units='deg')
# Initialize
gabor_shape = visual.GratingStim(win, mask='gauss', sf=3)
dotPatch = visual.DotStim(win, color='black',
dotLife=5, # lifetime of a dot in frames (if this is long density artifacts can occur in the stimulus)
signalDots='different', # are the signal and noise dots 'different' or 'same' popns (see Scase et al)
noiseDots='direction', # do the noise dots follow random- 'walk', 'direction', or 'position'
fieldPos=[0.0, 0.0], nDots=40, fieldSize=3,
speed=0.05, fieldShape='circle', coherence=0.5,
element=gabor_shape, name='dotPatch')
message = visual.TextStim(win, text='Any key to quit', pos=(0, -5))
# always draw
dotPatch.autoDraw = True
message.autoDraw = True
while not event.getKeys():
win.flip() # redraw the buffer, autodraw does the rest
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,142
|
Python
|
.py
| 27
| 39.148148
| 107
| 0.703704
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,923
|
embeddedOpenGL.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/embeddedOpenGL.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This demo shows how you can make standard opengl calls within a psychopy
script, allowing you to draw anything that OpenGL can draw (i.e. anything).
"""
from psychopy import visual, core
from pyglet import gl
win = visual.Window([600, 600], units='norm', monitor='testMonitor')
a_blob = visual.GratingStim(win, pos = [0.5, 0], mask='gauss', sf=3)
def drawStuff():
gl.glBegin(gl.GL_TRIANGLES)
gl.glColor3f(1.0, 0.0, 1)
gl.glVertex3f(0.0, 0.5, 1)
gl.glColor3f(0.0, 1.0, 0.0)
gl.glVertex3f(-0.5, -0.5, 1)
gl.glColor3f(0.0, 0.0, 1.0)
gl.glVertex3f(0.5, -0.5, -1)
gl.glEnd()
a_blob.draw()
win.flip()
drawStuff()
core.wait(2)
win.close()
core.quit()
# The contents of this file are in the public domain.
| 798
|
Python
|
.py
| 26
| 27.769231
| 75
| 0.672346
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,924
|
gabor.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/gabor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import core, visual, event
# create a window to draw in
win = visual.Window([400, 400.0], allowGUI=False)
# INITIALISE SOME STIMULI
gabor = visual.GratingStim(win, tex="sin", mask="gauss", texRes=256,
size=[1.0, 1.0], sf=[4, 0], ori = 0, name='gabor1')
gabor.autoDraw = True
message = visual.TextStim(win, pos=(0.0, -0.9), text='Hit Q to quit')
trialClock = core.Clock()
# repeat drawing for each frame
while trialClock.getTime() < 20:
gabor.phase += 0.01
message.draw()
# handle key presses each frame
if event.getKeys(keyList=['escape', 'q']):
win.close()
core.quit()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 775
|
Python
|
.py
| 23
| 30.26087
| 69
| 0.667114
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,925
|
colorPalette.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/colorPalette.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import locale_setup, visual, core
import numpy as np
from psychopy.hardware import keyboard
from psychopy import misc
def createPalette(size):
"""
Creates the color palette array in HSV and returns as RGB
"""
# Create array
hsv = np.ones([size,size,3], dtype=float)
# Set hue
hsv[:,:,0] = np.linspace(0,360, size, endpoint=False)
# Set saturation
for i in range(size):
hsv[:,i, 1] = np.linspace(0, 1, size, endpoint=False)
# Convert to RGB
rgb = misc.hsv2rgb(hsv)
# Make in range 0:1 for image stim
rgb[:][:][:] = (rgb[:][:][:] + 1) / 2
return rgb
def createValue(size):
"""
Creates the value palette array in HSV and returns as RGB
"""
# Create array
hsv = np.zeros([20,size,3], dtype=float)
# Set value
hsv[:,:,2] = np.linspace(0,1, size, endpoint=False)
# Convert to RGB
rgb = misc.hsv2rgb(hsv)
# Make in range 0:1 for image stim
rgb[:][:][:] = (rgb[:][:][:] + 1) / 2
return rgb
# Setup the Window
win = visual.Window(size=[1920, 1080], fullscr=False, units='height')
colorPalette = visual.ImageStim(win=win,name='colorPalette', units='pix',
image=None, mask=None,
texRes=64, depth=0.0)
valuePalette = visual.ImageStim(win=win, name='valuePalette', units='pix',
pos=(0, -250), depth=-1.0)
hueSlider = visual.Slider(win=win, name='hueSlider',
size=(.37, .02), pos=(0, 0.2),
labels=None, ticks=(0, 360), style=['rating'])
satSlider = visual.Slider(win=win, name='satSlider',
size=(.02, .37), pos=(0.2, 0),
labels=None, ticks=(0, 1), style=['rating'])
valSlider = visual.Slider(win=win, name='valSlider',
size=(.37, .02), pos=(0, -0.25),
labels=None, ticks=(0,1), style=['rating'])
visualFeedback = visual.Rect(win=win, name='visualFeedback',
width=(0.15, 0.15)[0], height=(0.15, 0.15)[1],
pos=(0, 0.35),fillColor=[0,0,0], fillColorSpace='hsv',
depth=-6.0)
hsvText = visual.TextStim(win=win, name='hsvText',
text=None, font='Arial',
pos=(.4, 0), height=0.03)
instText = visual.TextStim(win=win, name='instText',
text=("Use the sliders to change:\n---hue (top)\n---"
"saturation (right)\n---value (bottom)"),
font='Arial',
pos=(-.3, 0), height=0.03, wrapWidth=.4,
alignText='left', anchorHoriz='right')
quitText = visual.TextStim(win=win, name='quitText',
text='Press escape to quit to continue',
font='Arial',
pos=(0, -.35), height=0.025, depth=-8.0,
wrapWidth=.4)
paletteSize = 400 # in pixels
valRGB = createValue(paletteSize)
colPalRGB = createPalette(paletteSize)
hueSlider.reset()
satSlider.reset()
valSlider.reset()
colorPalette.setSize([paletteSize,paletteSize])
colorPalette.setImage(colPalRGB)
valuePalette.setSize((paletteSize, 20))
valuePalette.setImage(valRGB)
key_resp = keyboard.Keyboard()
while True:
h = hueSlider.getRating() or 0
s = satSlider.getRating() or 0
v = valSlider.getRating() or 0.5
visualFeedback.fillColor = [h,s,v]
hsvText.text = "Hue: {h:.0f}\nSat: {s:.2f}\nVal: {v:.2f}".format(h=h, s=s, v=v)
colorPalette.draw()
valuePalette.draw()
hueSlider.draw()
satSlider.draw()
valSlider.draw()
visualFeedback.draw()
instText.draw()
hsvText.draw()
quitText.draw()
theseKeys = key_resp.getKeys(keyList=['escape'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
win.close()
core.quit()
win.flip()
| 4,240
|
Python
|
.py
| 103
| 30.757282
| 83
| 0.561247
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,926
|
dots.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/dots.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of dot kinematogram
"""
from psychopy import visual, event, core
win = visual.Window((600, 600), allowGUI=False, winType='pyglet')
# Initialize some stimuli
dotPatch = visual.DotStim(win, color=(1.0, 1.0, 1.0), dir=270,
nDots=500, fieldShape='circle', fieldPos=(0.0, 0.0), fieldSize=1,
dotLife=5, # number of frames for each dot to be drawn
signalDots='same', # are signal dots 'same' on each frame? (see Scase et al)
noiseDots='direction', # do the noise dots follow random- 'walk', 'direction', or 'position'
speed=0.01, coherence=0.9)
print(dotPatch)
message = visual.TextStim(win, text='Any key to quit', pos=(0, -0.5))
trialClock =core.Clock()
while not event.getKeys():
dotPatch.draw()
message.draw()
win.flip() # make the drawn things visible
event.clearEvents('mouse') # only really needed for pygame windows
print(win.fps())
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,017
|
Python
|
.py
| 26
| 36.384615
| 97
| 0.701629
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,927
|
face_jpg.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/face_jpg.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This demo shows you different image presentation using visual.ImageStim and
visual.GratinGstim. It introduces some of the many attributes of these stimulus
types.
"""
# Import the modules that we need in this script
from psychopy import core, visual, event
# Create a window to draw in
win = visual.Window(size=(600, 600), color='black')
# An image using ImageStim.
image = visual.ImageStim(win, image='face.jpg')
# We can also use the image as a mask (mask="face.jpg") for other stimuli!
grating = visual.GratingStim(win,
pos=(-0.5, 0),
tex='sin',
mask='face.jpg',
color='green')
grating.size = (0.5, 0.5) # attributes can be changed after initialization
grating.sf = 1.0
# Initiate clock to keep track of time
clock = core.Clock()
while clock.getTime() < 12 and not event.getKeys():
# Set dynamic attributes. There's a lot of different possibilities.
# so look at the documentation and try playing around here.
grating.phase += 0.01 # Advance phase by 1/100th of a cycle
grating.pos += (0.001, 0) # Advance on x but not y
image.ori *= 1.01 # Accelerating orientation (1% on every frame)
image.size -= 0.001 # Decrease size uniformly on x and y
if image.opacity >= 0: # attributes can be referenced
image.opacity -= 0.001 # Decrease opacity
# Show the result of all the above
image.draw()
grating.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,530
|
Python
|
.py
| 39
| 36.25641
| 79
| 0.709177
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,928
|
aperture.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/aperture.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo for the class psychopy.visual.Aperture().
Draw two gabor circles, one with an irregular aperture and one with no aperture.
"""
from psychopy import visual, event
# Need to allowStencil=True for a window with an Aperture:
win = visual.Window([400, 400], allowStencil=True, units='norm')
instr = visual.TextStim(win, text="Any key to quit", pos=(0, -.7))
gabor1 = visual.GratingStim(win, mask='circle', sf=4, size=1.2, color=[0.5, -0.5, 1])
gabor2 = visual.GratingStim(win, mask='circle', sf=4, size=1.2, color=[-0.5, -0.5, -1])
vertices = [(-0.02, -0.0), (-.8, .2), (0, .6), (.1, 0.06), (.8, .3), (.6, -.4)]
# `sizes in Aperture refers to the diameter when shape='circle';
# vertices or other shapes are scaled accordingly
aperture = visual.Aperture(win, size=0.9, shape=vertices) # try shape='square'
aperture.enabled = False # enabled by default when created
gabor1.draw()
instr.draw()
# drawing will now only be done within the aperture shape:
aperture.enabled = True
gabor2.draw()
win.flip()
event.waitKeys()
win.close()
# The contents of this file are in the public domain.
| 1,146
|
Python
|
.py
| 26
| 42.653846
| 87
| 0.706943
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,929
|
ratingScale.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/ratingScale.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo for the class psychopy.visual.RatingScale()
author: Jeremy Gray, Example 4 by Henrik Singmann
"""
from psychopy import visual, event, core, logging
import os
# create a window before creating your rating scale, whatever units you like:
win = visual.Window(fullscr=False, size=[1100, 800], units='pix', monitor='testMonitor')
instr = visual.TextStim(win, text="""This is a demo of visual.RatingScale(). There are four examples.
Example 1 is on the next screen. Use the mouse to indicate a rating: click somewhere on the line. You can also use the arrow keys to move left or right.
To accept your rating, either press 'enter' or click the glowing button. Pressing 'tab' will skip the rating.
Press any key to start Example 1 (or escape to quit).""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
# Example 1 --------(basic choices)--------
# create a RatingScale object:
myRatingScale = visual.RatingScale(win, choices=['cold', 'cool', 'hot'])
# Or try this one:
# myRatingScale = visual.RatingScale(win, choices=map(str, range(1, 8)), marker='hover')
# the item to-be-rated or respond to:
myItem = visual.TextStim(win, text="How cool was that?", height=.12, units='norm')
# anything with a frame-by-frame .draw() method will work, e.g.:
# myItem = visual.MovieStim(win, 'jwpIntro.mp4')
event.clearEvents()
while myRatingScale.noResponse: # show & update until a response has been made
myItem.draw()
myRatingScale.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
print('Example 1: rating =', myRatingScale.getRating())
print('history =', myRatingScale.getHistory())
# Example 2 --------(multiple items, multiple dimensions for each)--------
instr = visual.TextStim(win, text="""Example 2. This example uses non-default settings for the visual display, skipping a rating is not possible, and it uses a list of images (two) to be rated on several dimensions (valence and arousal).
Try this: Place a marker, then drag it along the line using the mouse. In this example, you cannot use numeric keys to respond because the scale is 0 to 50.
Press any key to start Example 2 (or escape to quit).""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
# create a scale for Example 2, using quite a few non-default options:
myRatingScale = visual.RatingScale(win, low=0, high=50, precision=10, skipKeys=None,
marker='glow', markerExpansion=10, showValue=False, pos=[0, -200], name='Example2')
# using a list is handy if you have a lot of items to rate on the same scale, eg personality adjectives or images:
imageList = ['beach.jpg', 'face.jpg']
data = []
for image in imageList:
x, y = myRatingScale.win.size
myItem = visual.SimpleImageStim(win=win, image=image, units='pix', pos=[0, y//7])
# rate each image on two dimensions
for dimension in ['0=very negative . . . 50=very positive',
'0=very boring . . . 50=very energizing']:
myRatingScale.reset() # reset between repeated uses of the same scale
myRatingScale.setDescription(dimension) # reset the instructions
event.clearEvents()
while myRatingScale.noResponse:
myItem.draw()
myRatingScale.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
data.append([image, myRatingScale.scaleDescription.text,
myRatingScale.getRating(), myRatingScale.getRT()]) # save for later
# clear the screen & pause between ratings
win.flip()
core.wait(0.35) # brief pause, slightly smoother for the subject
print('Example 2 (data from 2 images, each rated on 2 dimensions, reporting rating & RT):')
for d in data:
print(' ', d)
# Example 3 --------(two simultaneous ratings)--------
instr = visual.TextStim(win, text="""Example 3. This example shows how one could obtain two ratings at the same time, e.g., to allow explicit comparison between images during ratings.
In such a situation, the subject will have to use the mouse (and not keyboard) to respond. The subject has to respond on both scales in order to go on to the next screen. Both of these considerations mean that it's best to disallow the subject to skip a rating.
Press any key to start Example 3 (or escape to quit).""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
x, y = win.size # for converting norm units to pix
leftward = -0.35 * x / 2 # use pix units, because the drawing window's units are pix
rightward = -1 * leftward
# for logging, its useful to give names, esp when there are 2 on-screen
myRatingScaleLeft = visual.RatingScale(win, mouseOnly=True, pos=(leftward, -y/6),
marker='circle', size=0.85, name='left')
myRatingScaleRight = visual.RatingScale(win, mouseOnly=True, pos=(rightward, -y/6),
markerColor='DarkGreen', size=0.85, name='right')
myItemLeft = visual.SimpleImageStim(win=win, image=imageList[0], pos=[leftward, y/6.])
myItemRight = visual.SimpleImageStim(win=win, image=imageList[1], pos=[rightward, y/6.])
event.clearEvents()
while myRatingScaleLeft.noResponse or myRatingScaleRight.noResponse:
# you could hide the item if its been rated:
# if myRatingScaleLeft.noResponse: myItemLeft.draw()
# or easier: just initialize it with the disappear=True option
# but lets just draw it every frame:
myItemLeft.draw()
myItemRight.draw()
myRatingScaleLeft.draw()
myRatingScaleRight.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
# just for fun: briefly show the two scales with the markers in the 'down' position
myItemLeft.draw()
myItemRight.draw()
myRatingScaleLeft.draw()
myRatingScaleRight.draw()
win.flip()
core.wait(1)
print('Example 3:\n rating left=', myRatingScaleLeft.getRating(), ' rt=%.3f' % myRatingScaleLeft.getRT())
print(' rating right=', myRatingScaleRight.getRating(), ' rt=%.3f' % myRatingScaleRight.getRT())
# Example 4 --------(using tickMarks argument)--------
instr = visual.TextStim(win, text="""Example 4.
In this example we will use acustim tick marks, a slider, and custom labels for a scale from 0 too 100.
Press any key to start Example 4 (or escape to quit).""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
myRatingScale = visual.RatingScale(win, low=0, high=100, marker='slider',
tickMarks=[0, 50, 82, 100], stretch=1.5, tickHeight=1.5, # singleClick=True,
labels=["0%", "half/half", "kinda", "100%"])
txt = "How probable is it that you will use this functionality in your next experiment?"
myItem = visual.TextStim(win, text=txt, height=.08, units='norm')
# show & update until a response has been made
while myRatingScale.noResponse:
myItem.draw()
myRatingScale.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
print('Example 4: rating =', myRatingScale.getRating())
win.close()
core.quit()
# The contents of this file are in the public domain.
| 7,136
|
Python
|
.py
| 141
| 46.992908
| 261
| 0.715375
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,930
|
shapes.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/shapes.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of psychopy.visual.ShapeStim: lines and arbitrary fillable shapes
See shapeContains.py for dynamic interaction of ShapeStim and Mouse.
"""
from psychopy import visual, event, core
from psychopy.visual import ShapeStim
win = visual.Window(size=(500, 400), units='height')
# some shapes:
arrowVert = [(-0.4,0.05),(-0.4,-0.05),(-.2,-0.05),(-.2,-0.1),(0,0),(-.2,0.1),(-.2,0.05)]
arrow = ShapeStim(win, vertices=arrowVert, fillColor='darkred', size=.5, lineColor='red')
star7Vert = [(0.0,0.5),(0.09,0.18),(0.39,0.31),(0.19,0.04),(0.49,-0.11),(0.16,-0.12),(0.22,-0.45),(0.0,-0.2),(-0.22,-0.45),(-0.16,-0.12),(-0.49,-0.11),(-0.19,0.04),(-0.39,0.31),(-0.09,0.18)]
star7 = ShapeStim(win, vertices=star7Vert, fillColor='green', lineWidth=2, lineColor='white')
# self-crossings are fine:
selfxVert = [(0, 0), (0, .2), (.2, 0), (.2, .2)]
selfx = ShapeStim(win, vertices=selfxVert, fillColor='darkmagenta', lineColor='yellow', opacity=.6, pos=(.2, -.3), size=2)
# `thing` has a fake hole and discontinuity (as the border will reveal):
thingVert = [(0,0),(0,.4),(.4,.4),(.4,0),(.1,0),(.1,.1),(.3,.1),(.3,.3),(.1,.3),(.1,0),(0,0),(.1,-.1),(.3,-.1),(.3,-.3),(.1,-.3),(.1,-.1)]
thing = ShapeStim(win, vertices=thingVert, fillColor='blue', lineWidth=0, opacity=.3, size=.7)
# `donut` has a true hole, using two loops of vertices:
donutVert = [[(-.2,-.2),(-.2,.2),(.2,.2),(.2,-.2)],[(-.15,-.15),(-.15,.15),(.15,.15),(.15,-.15)]]
donut = ShapeStim(win, vertices=donutVert, fillColor='orange', lineWidth=0, size=.75, pos=(-.2, -.25))
# lines are ok; use closeShape=False
lineAVert = [(0, 0), (.1, .1), (.1, .2), (.1, .1), (.1, -.1), (0, .1)]
lineA = ShapeStim(win, vertices=lineAVert, closeShape=False, lineWidth=2, pos=(-.4, .2), ori=180)
# a complex shape, many vertices:
coastVert = [(-23,230),(-3,223),(32,233),(43,230),(46,236),(34,240),(31,248),(31,267),(45,260),(52,266),(43,274),(47,279),(53,268),(65,282),(65,273),(56,266),(59,265),(53,261),(47,237),(43,230),(39,225),(43,219),(39,209),(29,206),(12,189),(9,183),(-2,183),(18,179),(-2,165),(10,169),(2,162),(29,177),(40,169),(74,170),(80,169),(86,153),(77,145),(76,132),(61,107),(61,100),(33,86),(51,91),(57,84),(27,63),(36,63),(51,70),(71,60),(87,42),(100,4),(97,-9),(125,-28),(139,-46),(138,-56),(148,-73),(118,-66),(149,-82),(157,-98),(157,-106),(151,-109),(148,-114),(154,-120),(158,-120),(159,-111),(168,-110),(188,-114),(205,-131),(203,-144),(200,-160),(188,-170),(164,-180),(179,-180),(179,-188),(157,-193),(172,-196),(165,-197),(176,-202),(193,-200),(193,-211),(181,-217),(180,-229),(172,-220),(155,-234),(139,-227),(118,-233),(99,-227),(94,-232),(91,-237),(101,-243),(106,-242),(107,-237),(103,-232),(94,-238),(90,-233),(81,-233),(81,-240),(61,-243),(50,-234),(27,-240),(21,-262),(15,-262),(15,-260),(-2,-253),(-13,-256),(-26,-264),(-26,-272),(-31,-275),(-31,-269),(-38,-267),(-41,-268),(-46,-271),(-46,-267),(-41,-262),(-28,-257),(-8,-226),(-8,-219),(1,-219),(3,-210),(25,-205),(30,-210),(35,-210),(35,-204),(29,-205),(29,-200),(15,-185),(0,-191),(0,-187),(3,-183),(-4,-180),(-24,-187),(-32,-178),(-29,-178),(-29,-174),(-35,-174),(-26,-164),(4,-149),(8,-139),(6,-118),(3,-117),(-4,-118),(-5,-122),(-16,-122),(-11,-115),(-2,-107),(-2,-100),(-11,-93),(-11,-85),(0,-84),(7,-93),(14,-88),(32,-89),(40,-96),(39,-85),(47,-90),(41,-79),(42,-55),(48,-53),(44,-41),(35,-48),(22,-21),(23,-3),(15,0),(4,-6),(-5,0),(-3,-14),(-20,-2),(-20,-16),(-31,2),(-13,36),(-18,48),(-18,65),(-21,50),(-35,65),(-25,76),(-39,64),(-37,56),(-37,44),(-28,30),(-26,37),(-32,49),(-39,45),(-39,29),(-52,25),(-47,32),(-45,50),(-45,65),(-54,57),(-61,43),(-69,43),(-73,50),(-73,57),(-72,57),(-71,57),(-68,57),(-66,57),(-64,57),(-62,57),(-62,58),(-60,58),(-59,59),(-58,59),(-58,66),(-47,76),(-46,71),(-44,80),(-44,89),(-29,120),(-48,99),(-48,91),(-59,87),(-71,87),(-63,92),(-66,99),(-89,93),(-76,108),(-64,105),(-52,96),(-64,116),(-53,120),(-53,130),(-83,158),(-95,163),(-102,130),(-116,113),(-105,133),(-105,166),(-96,172),(-95,169),(-93,175),(-94,181),(-94,206),(-66,227),(-66,215),(-66,202),(-67,188),(-89,173),(-94,164),(-81,158),(-67,171),(-55,141),(-50,143),(-52,161),(-50,181),(-43,186),(-30,186),(-38,197),(-26,230)]
coast = ShapeStim(win, vertices=coastVert, fillColor='darkgray', lineColor=None, size=.0007, pos=(.4, .2))
while not event.getKeys():
donut.draw()
coast.draw()
star7.setOri(1, '-') # rotate
star7.setSize(star7.ori % 360 / 360) # shrink
star7.draw()
thing.setOri(-star7.ori / 7) # rotate slowly
thing.draw()
arrow.draw()
lineA.draw()
# dynamic vertices:
selfxVert[0] = star7.size / 5
selfxVert[3] = star7.size / 5 * (0, .9)
selfx.vertices = selfxVert # can be slow with many vertices
selfx.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 4,854
|
Python
|
.py
| 48
| 98.5625
| 2,381
| 0.55646
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,931
|
kanizsa.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/kanizsa.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Demo for the class psychopy.visual.Pie().
Use the `Pie` class to create a Kanizsa figure which produces illusory
contours.
"""
from psychopy import core
import psychopy.visual as visual
from psychopy.visual import Pie
from psychopy import event
# open a window to render the shape
win = visual.Window((600, 600), allowGUI=False, monitor='testMonitor')
# create the stimulus object
pieStim = Pie(
win, radius=50, start=0., end=270., fillColor=(-1., -1., -1.), units='pix')
message = visual.TextStim(
win, text='Any key to quit', pos=(0, -0.8), units='norm')
# positions of the corners of the shapes
pos = [(-100, 100), (100, 100), (-100, -100), (100, -100)]
# orientations of the shapes
ori = [180., 270., 90., 0.]
while not event.getKeys():
for i in range(4):
pieStim.pos = pos[i]
pieStim.ori = ori[i]
pieStim.draw()
message.draw()
win.flip()
win.close()
core.quit()
| 971
|
Python
|
.py
| 30
| 29.533333
| 79
| 0.678495
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,932
|
imagesAndPatches.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/imagesAndPatches.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of ImageStim and GratingStim with image contents.
"""
from psychopy import core, visual, event
# Create a window to draw in
win = visual.Window((800, 800), monitor='testMonitor', allowGUI=False, color='black')
# Initialize some stimuli
beach = visual.ImageStim(win, image='beach.jpg', flipHoriz=True, pos=(0, 4.50), units='deg')
faceRGB = visual.ImageStim(win, image='face.jpg', mask=None,
pos=(50, -50), size=None, # will be the size of the original image in pixels
units='pix', interpolate=True, autoLog=False)
print("original image size:", faceRGB.size)
faceALPHA = visual.GratingStim(win, pos=(-0.7, -0.2),
tex="sin", mask="face.jpg", color=[1.0, 1.0, -1.0],
size=(0.5, 0.5), units="norm", autoLog=False)
message = visual.TextStim(win, pos=(-0.95, -0.95),
text='[Esc] to quit', color='white',
anchorVert='bottom', anchorHoriz='left')
trialClock = core.Clock()
t = lastFPSupdate = 0
win.recordFrameIntervals = True
while not event.getKeys():
t = trialClock.getTime()
# Images can be manipulated on the fly
faceRGB.ori += 1 # advance ori by 1 degree
faceRGB.draw()
faceALPHA.phase += 0.01 # advance phase by 1/100th of a cycle
faceALPHA.draw()
beach.draw()
# update fps once per second
if t - lastFPSupdate > 1.0:
lastFPS = win.fps()
lastFPSupdate = t
message.text = "%ifps, [Esc] to quit" % lastFPS
message.draw()
win.flip()
event.clearEvents('mouse') # only really needed for pygame windows
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,637
|
Python
|
.py
| 42
| 35.47619
| 92
| 0.682219
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,933
|
starField.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/starField.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ElementArray demo.
This demo requires a graphics card that supports OpenGL2 extensions.
It shows how to manipulate an arbitrary set of elements using numpy arrays
and avoiding for loops in your code for optimised performance.
See also the elementArrayStim demo.
"""
from psychopy import visual, event, core
from psychopy.tools.coordinatetools import pol2cart
import numpy
nDots = 500
maxSpeed = 0.02
dotSize = .0075
dotsTheta = numpy.random.rand(nDots) * 360
dotsRadius = (numpy.random.rand(nDots) ** 0.5) * 2
speed = numpy.random.rand(nDots) * maxSpeed
win = visual.Window([800, 600], color=[-1, -1, -1])
dots = visual.ElementArrayStim(win, elementTex=None, elementMask='circle',
nElements=nDots, sizes=dotSize)
while not event.getKeys():
# update radius
dotsRadius = (dotsRadius + speed)
# random radius where radius too large
outFieldDots = (dotsRadius >= 2.0)
dotsRadius[outFieldDots] = numpy.random.rand(sum(outFieldDots)) * 2.0
dotsX, dotsY = pol2cart(dotsTheta, dotsRadius)
dotsX *= 0.75 # to account for wider aspect ratio
dots.xys = numpy.array([dotsX, dotsY]).transpose()
dots.draw()
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,292
|
Python
|
.py
| 35
| 34.285714
| 74
| 0.741961
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,934
|
compare_text_timing.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/compare_text_timing.py
|
# -*- coding: utf-8 -*-
"""
Tests the timing of the TextBox, TextBox2, and TextStim:
* Time to create and perform the first build() of each stim type.
* Time to change the stim text to be displayed and call draw().
* Time to do a draw() call when the stim text content has not changed.
At the end of the test, a txt report is printed to the console giving the
various timing measures collected.
"""
import string
import random
from psychopy import visual, core, event
from psychopy.visual import textbox
from psychopy.iohub.util import NumPyRingBuffer
import pyglet.gl as gl
# Variables to control text string length etc.
text_length=160
chng_txt_each_flips=5
max_flip_count=60*10
text_stim_types = [visual.TextBox, visual.TextBox2, visual.TextStim]
text_stim = []
stim_init_durations={}
txt_change_draw_times={}
no_change_draw_times={}
for stype in text_stim_types:
# Circular buffers to store timing measures
cname = stype.__name__
txt_change_draw_times[cname]=NumPyRingBuffer(max_flip_count)
no_change_draw_times[cname]=NumPyRingBuffer(max_flip_count)
# Some utility functions >>>
#
char_choices=string.ascii_uppercase+"ùéèàç^ùèàç髼±£¢¤¬¦²³½¾°µ¯±√∞≤≥±≠"
def getRandomString(slength):
"""
Create a random text string of length 'slength' from the unichar values
in char_choices; then split the random text into 'words' of random length
from [1,3,5,7,9].
"""
s=u''.join(random.choice(char_choices) for i in range(slength))
ns=u''
lns=len(ns)
while lns<slength:
ns+=s[lns:lns+random.choice([1,3,5,7,9])]+' '
lns=len(ns)
return ns[:slength]
text=getRandomString(text_length)
### Main text Script logic ###
# Create Window
window=visual.Window((1920,1080),
units='pix',
fullscr=True, allowGUI=False,
screen=0
)
# Find a font that is available on the system.
fm = textbox.getFontManager()
font_names=fm.getFontFamilyNames()
font_name=font_names[0]
prefered_fonts=[fn for fn in font_names if fn in ['Courier New',
'Consolas',
'Lucida Sans Typewriter',
'Ubuntu Mono',
'DejaVu Sans Mono',
'Bitstream Vera Sans Mono']]
if prefered_fonts:
font_name=prefered_fonts[0]
print("Using font: ", font_name)
text_class_params=dict()
text_class_params['TextBox']=dict(window=window,
text=text,
font_name=font_name,
font_size=28,
font_color=[255,255,255],
size=(1.5,.5),
pos=(0.0,.5),
units='norm',
grid_horz_justification='left',
grid_vert_justification='center',
color_space='rgb255')
text_class_params['TextBox2']=dict(win=window,
text=text,
font=font_name,
borderColor=None,
fillColor=[0,0,0],
pos=(0.0,-0.1),
units='height',
anchor='center',
letterHeight=0.03,
editable=False,
size=[1.5,.33])
text_class_params['TextStim']=dict(win=window,
pos=(0.0,-0.5),
font=font_name,
units='norm',
height=0.06,
text=text,
autoLog=False,
wrapWidth=1.5)
# Create each stim type and perform draw on it. Time how long it takes
# to create the initial stim and do the initial draw.
for ttype in text_stim_types:
cname = ttype.__name__
stime=core.getTime()
text_stim.append(ttype(**text_class_params[cname]))
text_stim[-1].draw()
etime=core.getTime()
stim_init_durations[cname]=etime-stime
# Start the draw duration tests, for text change and no text change conditions.
demo_start=window.flip()
event.clearEvents()
fcount=0
while True:
# For the textBox and TextStim resource, change the text every
# chng_txt_each_flips, and record the time it takes to update the text
# and redraw() each resource type.
#
# Make sure timing of stim is for the time taken for that stim alone. ;)
gl.glFlush()
gl.glFinish()
if fcount==0 or fcount%chng_txt_each_flips==0:
t=getRandomString(text_length)
for tstim in text_stim:
cname = tstim.__class__.__name__
stime=core.getTime()*1000.0
tstim.setText(tstim.__class__.__name__+t)
tstim.draw()
gl.glFinish()
etime=core.getTime()*1000.0
txt_change_draw_times[cname].append(etime-stime)
else:
for tstim in text_stim:
cname = tstim.__class__.__name__
stime=core.getTime()*1000.0
tstim.draw()
gl.glFinish()
etime=core.getTime()*1000.0
no_change_draw_times[cname].append(etime-stime)
# Update the display to show stim changes
flip_time=window.flip()
fcount+=1
# End the test when a keyboard event is detected or when max_flip_count
# win.flip() calls have been made.
#
kb_events=event.getKeys()
if kb_events:
break
if fcount>=max_flip_count:
break
print()
print('-------Text Draw Duration Test---------')
print()
print('+ Draw Order: {}\t'.format([c.__name__ for c in text_stim_types]))
print('+ Text Stim Char Length:\t',text_length)
print()
for stim_type,init_dur in stim_init_durations.items():
print('+ {} INIT Dur (sec):\t{}'.format(stim_type, init_dur))
print()
print('+ Text Change Flip Perc:\t%.2f'%((1.0/chng_txt_each_flips)*100.0))
print('+ Total Flip Count:\t\t',fcount)
print('+ Test Duration (secs):\t\t%.3f'%(flip_time-demo_start))
print('+ FPS:\t\t\t\t%.3f'%(fcount/(flip_time-demo_start)))
print()
print('+ Average Draw Call Durations (msec):')
print()
print(' Text Object\t\tNo Txt Change\tTxt Change')
for stim_type in text_stim_types:
cname = stim_type.__name__
print(' %s\t\t%.3f\t\t%.3f'%(cname,
no_change_draw_times[cname].mean(),
txt_change_draw_times[cname].mean()))
print()
core.quit()
| 6,917
|
Python
|
.py
| 171
| 28.877193
| 79
| 0.555339
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,935
|
textbox_simple.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/textBoxStim/textbox_simple.py
|
# -*- coding: utf-8 -*-
"""
Shows how to create two textBox stim and present them. The first textbox
simply displays the text provided, centered. The second textbox shows more
of the configuration options available for the stim type.
Created on Thu Mar 21 18:37:10 2013
@author: Sol
"""
from psychopy import visual, core, event
from psychopy.visual import textbox
fm = textbox.getFontManager()
print("available_font_names:",fm.getFontFamilyNames())
# Create Window
window=visual.Window((800,600),
units='norm',
fullscr=False, allowGUI=True,
screen=0
)
sometext='PRESS ANY KEY TO QUIT DEMO.'
textbox1=visual.TextBox(window=window,
text=sometext,
font_name=fm.getFontFamilyNames()[0],
font_size=21,
font_color=[-1,-1,1],
size=(1.9,.3),
pos=(0.0,0.25),
grid_horz_justification='center',
units='norm',
)
textbox2=visual.TextBox(window=window,
text='This TextBox illustrates many of the different UX elements.',
font_size=32,
font_color=[1,-1,-1],
background_color=[-1,-1,-1,1],
border_color=[-1,-1,1,1],
border_stroke_width=4,
textgrid_shape=[20,4], # 20 cols (20 chars wide)
# by 4 rows (4 lines of text)
pos=(0.0,-0.25),
grid_color=(-1,1,-1,1)
)
textbox1.draw()
textbox2.draw()
demo_start=window.flip()
event.clearEvents()
last_attrib_change_time=demo_start
while True:
if core.getTime()-last_attrib_change_time> 2.5:
last_attrib_change_time=core.getTime()
textbox1.draw()
textbox2.draw()
# Update the display to show any stim changes
flip_time=window.flip()
# End the test when a keyboard event is detected
#
kb_events=event.getKeys()
if kb_events:
break
core.quit()
| 2,248
|
Python
|
.py
| 59
| 25.542373
| 92
| 0.529385
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,936
|
textbox_glyph_placement.py
|
psychopy_psychopy/psychopy/demos/coder/stimuli/textBoxStim/textbox_glyph_placement.py
|
# -*- coding: utf-8 -*-
"""
Shows use of getGlyphPositionForTextIndex() to get exact bounding box for a
given glyph based on an index in the text string being shown.
Displayed the mouse position in display coord's so the reported glyph position
can be validated.
Created on Thu Mar 21 18:37:10 2013
@author: Sol
"""
from psychopy import visual, core, event
display_resolution=800,600
# Create Window
window=visual.Window(display_resolution,
units='norm',
fullscr=False, allowGUI=True,
screen=0
)
myMouse = event.Mouse()
# Create two textBox stim, each using different parameters supported by
# Textbox. Note that since no font_name is provided when creating the
# textbox stim, a default font is selected by TextBox stim automatically.
#
sometext=u'PRESS ANY KEY TO QUIT DEMO.'
textbox=visual.TextBox(window=window,
text=sometext,
bold=False,
italic=False,
font_size=21,
font_color=[-1,-1,1],
size=(1.9,.3),
grid_color=[-1,1,-1,1],
grid_stroke_width=1,
pos=(0.0,0.5),
units='norm',
grid_horz_justification='center',
grid_vert_justification='center',
)
if textbox.getDisplayedText()!=textbox.getText():
print('**Note: Text provided to TextBox does not fit within the TextBox bounds.')
#print textbox.getTextGridCellPlacement()
print('Char Index 0 glyph box:',textbox.getGlyphPositionForTextIndex(0))
print('Char Index 7 glyph box:',textbox.getGlyphPositionForTextIndex(7))
disp_txt_len=len(textbox.getDisplayedText())-1
print('Char Index %d glyph box:'%(disp_txt_len),textbox.getGlyphPositionForTextIndex(disp_txt_len))
mouse_position=visual.TextBox(window=window,
text='(123456,123456)',
bold=False,
italic=False,
font_size=14,
font_color=[1,1,1],
textgrid_shape=(20,1),
pos=(0.0,0.5),
units='norm',
align_horz='left',
align_vert='bottom',
grid_horz_justification='left',
grid_vert_justification='left',
)
textbox.draw()
demo_start=window.flip()
event.clearEvents()
last_attrib_change_time=demo_start
while True:
if core.getTime()-last_attrib_change_time> 2.5:
last_attrib_change_time=core.getTime()
textbox.draw()
mp=myMouse.getPos()
mouse_position.setText("%.3f,%.3f"%(mp[0], mp[1]))
mouse_position.setPosition(mp)
mouse_position.draw()
# Update the display to show any stim changes
flip_time=window.flip()
# End the test when a keyboard event is detected
#
kb_events=event.getKeys()
if kb_events:
break
core.quit()
| 3,233
|
Python
|
.py
| 78
| 28.717949
| 99
| 0.56114
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,937
|
keyboard.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/keyboard.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example of how to access Keyboard events using iohub.
Displays information from ioHub Keyboard Events vs. psychopy.event.geKeys().
"""
from __future__ import absolute_import, division, print_function
from builtins import str
from psychopy import core, visual, event
from psychopy.hardware import keyboard as ptb_keyboard
from psychopy.iohub import launchHubServer
WINDOW_SIZE = 1024, 768
dw = WINDOW_SIZE[0] / 2
dh = WINDOW_SIZE[1] / 2
unit_type = 'pix'
win = visual.Window(WINDOW_SIZE, units=unit_type,
color=[128, 128, 128], colorSpace='rgb255')
# Create a Keyboard class with ptb as the backend
ptb_keyboard = ptb_keyboard.Keyboard(backend='ptb')
# Start iohub process. The iohub process can be accessed using `io`.
io = launchHubServer(window=win, Keyboard=dict(use_keymap='psychopy'))
# A `keyboard` variable is used to access the iohub Keyboard device.
keyboard = io.devices.keyboard
# constants for text element spacing:
ROW_COUNT = 10
TEXT_ROW_HEIGHT = (dh * 2) / ROW_COUNT
TEXT_STIM_HEIGHT = int(TEXT_ROW_HEIGHT / 2)
MARGIN = 25
LABEL_COLUMN_X = -dw + MARGIN
VALUE_COLUMN_X = MARGIN
LABEL_WRAP_LENGTH = dw - MARGIN / 2
VALUE_WRAP_LENGTH = dw - MARGIN / 2
TEXT_ROWS_START_Y = dh - MARGIN
# Create some psychoPy stim to display the keyboard events received...
# field labels:
title_label = visual.TextStim(win, units=unit_type,
text=u'Press and Releases Keys for ioHub KB Event Details',
pos=[0, TEXT_ROWS_START_Y],
height=TEXT_STIM_HEIGHT,
color='black', wrapWidth=dw * 2)
title2_label = visual.TextStim(win, units=unit_type,
text=u'Press "Q" Key to Exit Demo',
pos=[0, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT],
height=TEXT_STIM_HEIGHT,
color=[0.25, 0.2, 1],
alignText='center', anchorHoriz='center', anchorVert='top',
wrapWidth=dw * 2)
key_text_label = visual.TextStim(win, units=unit_type, text=u'iohub .key:',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 2],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
char_label = visual.TextStim(win, units=unit_type, text=u'iohub .char:',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 3],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
modifiers_label = visual.TextStim(win, units=unit_type,
text=u'iohub .modifiers',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 4],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
keypress_duration_label = visual.TextStim(win, units=unit_type,
text=u'Last Pressed Duration:',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 5],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
all_pressed__label = visual.TextStim(win, units=unit_type,
text=u'All Pressed Keys:',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 6],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
event_type_label = visual.TextStim(win, units=unit_type,
text=u'Last Event Type:',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 7],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
psychopy_key_label = visual.TextStim(win, units=unit_type,
text=u'vs. event.getKeys():',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 8],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
ptb_key_label = visual.TextStim(win, units=unit_type,
text=u'vs. ptb_kb.getKeys():',
pos=[LABEL_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 9],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
# Dynamic stim:
key_text_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 2],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
char_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 3],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
modifiers_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 4],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
keypress_duration_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 5],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
all_pressed_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 6],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
event_type_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 7],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
psychopy_key_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 8],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=dw * 2)
ptb_key_stim = visual.TextStim(win, units=unit_type, text=u'',
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 9],
color='black', alignText='left', anchorHoriz='left',
height=TEXT_STIM_HEIGHT, wrapWidth=dw * 2)
# Having all the stim to update / draw in a list makes drawing code
# more compact and reusable
STIM_LIST = [title_label, title2_label, key_text_label, char_label,
modifiers_label, keypress_duration_label, all_pressed__label,
event_type_label, psychopy_key_label, ptb_key_label,
key_text_stim, char_stim, modifiers_stim, keypress_duration_stim,
all_pressed_stim, event_type_stim, psychopy_key_stim, ptb_key_stim]
# Clear all events from the global and device level ioHub Event Buffers.
io.clearEvents('all')
QUIT_EXP = False
demo_timeout_start = core.getTime()
# Loop until we get a 'escape' key is pressed,
# or until 15 seconds passed since last keyboard event.
# Note that keyboard events can be compared to a string, matching when
# the event.key or .char == basestring value.
events = []
flip_time = demo_timeout_start = 0
while not 'q' in events and flip_time - demo_timeout_start < 15.0:
for s in STIM_LIST:
s.draw()
flip_time = win.flip()
events = keyboard.getKeys()
for kbe in events:
key_text_stim.text = kbe.key
char_stim.text = kbe.char
modifiers_stim.text = str(kbe.modifiers)
psychopy_keys = event.getKeys()
if psychopy_keys:
psychopy_key_stim.text = psychopy_keys[0]
elif kbe.type == "KEYBOARD_PRESS":
psychopy_key_stim.text = ''
ptb_keys = ptb_keyboard.getKeys(waitRelease=False)
if ptb_keys:
ptb_key_stim.text = ptb_keys[0].name
all_pressed_stim.text = str(list(keyboard.state.keys()))
if kbe.type == "KEYBOARD_PRESS":
keypress_duration_stim.text = ''
else:
keypress_duration_stim.text = "%.6f" % kbe.duration
event_type_stim.text = kbe.type
demo_timeout_start = kbe.time
win.close()
core.quit()
# The contents of this file are in the public domain.
| 7,861
|
Python
|
.py
| 162
| 44.493827
| 76
| 0.706986
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,938
|
delaytest.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/delaytest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests the round trip delay from when the experiment runtime requests
new events from the ioHub server to when a response with >=1 new
event is received and ready for use within the experiment script.
Only getEvent requests that return with at least one new event are used in
the calculated statistics to try and ensure the reported delay is measuring
the higher processing load case of new events being returned, vs. the
case of no new events being available.
At the end of the test, a MatPlotLib figure is displayed showing a
histogram of the round trip event request delays as well as two figures
representing the retrace onset detection stability of PsychoPy.
"""
from numpy import zeros
from scipy.stats import norm
from psychopy import visual
from psychopy.iohub import Computer, launchHubServer
from psychopy.iohub.constants import EventConstants
from collections import OrderedDict
totalEventRequestsForTest = 1000
numEventRequests = 0
def run():
global numEventRequests
# create fullscreen pyglet window at current resolution, as well as required resources / drawings
psychoWindow, psychoStim = createPsychoGraphicsWindow()
io = launchHubServer(window=psychoWindow, experiment_code='delay_test')
io.devices.mouse.setPosition((0, 0))
lastFlipTime = 0.0
# create stats numpy arrays, set experiment process to high priority.
# Init Results numpy array
results = zeros((totalEventRequestsForTest, 3), dtype='f4')
numEventRequests = 0
# clear the ioHub event Buffer before starting the test.
# This is VERY IMPORTANT, given an existing bug in ioHub.
# You would want to do this before each trial started until the bug is fixed.
io.clearEvents('all')
# draw and flip to the updated graphics state.
flipTime = drawAndFlipPsychoWindow(psychoStim, psychoWindow, io, None)
ifi = flipTime - lastFlipTime
lastFlipTime = flipTime
# START TEST LOOP >>>>>>>>>>>>>>>>>>>>>>>>>>
while numEventRequests < totalEventRequestsForTest:
# send an Experiment Event to the ioHub server process
io.sendMessageEvent("This is a test message %.3f" % flipTime)
# check for any new events from any of the devices, and return the events list and the time it took to
# request the events and receive the reply
events, callDuration = checkForEvents(io)
if events:
# events were available
results[numEventRequests][0] = callDuration # ctime it took to get events from ioHub
results[numEventRequests][1] = len(events) # number of events returned
results[numEventRequests][2] = ifi * 1000.0 # calculating inter flip interval.
numEventRequests += 1 # incrementing tally counterfgh
# draw and flip to the updated graphics state.
flipTime = drawAndFlipPsychoWindow(psychoStim, psychoWindow, io, events)
ifi = flipTime - lastFlipTime
lastFlipTime = flipTime
# END TEST LOOP <<<<<<<<<<<<<<<<<<<<<<<<<<
psychoWindow.close()
# plot collected delay and retrace detection results.
plotResults(results)
printResults(results)
def createPsychoGraphicsWindow():
# create a window
psychoStim = OrderedDict()
psychoWindow = visual.Window((1920, 1080),
monitor='default',
units='pix',
color=[128, 128, 128], colorSpace='rgb255',
fullscr=True, allowGUI=False,
screen=0
)
psychoWindow.setMouseVisible(False)
fixation = visual.PatchStim(psychoWindow, size=25, pos=[0, 0], sf=0,
color=[-1, -1, -1], colorSpace='rgb')
title = visual.TextStim(win=psychoWindow,
text="ioHub getEvents Delay Test", pos=[0, 125],
height=36, color=[1, .5, 0], colorSpace='rgb',
wrapWidth=800.0)
instr = visual.TextStim(win=psychoWindow,
text='Move the mouse around, press keyboard keys and mouse buttons',
pos=[0, -125], height=32, color=[-1, -1, -1],
colorSpace='rgb', wrapWidth=800.0)
psychoStim['static'] = visual.BufferImageStim(win=psychoWindow,
stim=(fixation, title, instr))
psychoStim['grating'] = visual.PatchStim(psychoWindow,
mask="circle", size=75, pos=[-100, 0],
sf=.075)
psychoStim['keytext'] = visual.TextStim(win=psychoWindow,
text='key', pos=[0, 300], height=48,
color=[-1, -1, -1], colorSpace='rgb',
wrapWidth=800.0)
psychoStim['mouseDot'] = visual.GratingStim(win=psychoWindow,
tex=None, mask="gauss",
pos=(0,0), size=(50, 50),
color='purple')
psychoStim['progress'] = visual.ShapeStim(win=psychoWindow,
vertices=[(0, 0), (0, 0), (0, 0), (0, 0)],
pos=(400, -300))
return psychoWindow, psychoStim
def drawAndFlipPsychoWindow(psychoStim, psychoWindow, io, events):
psychoStim['grating'].setPhase(0.05, '+') # advance phase by 0.05 of a cycle
currentPosition, currentDisplayIndex = io.devices.mouse.getPosition(return_display_index=True)
if currentDisplayIndex == 0:
currentPosition = (float(currentPosition[0]), float(currentPosition[1]))
psychoStim['mouseDot'].setPos(currentPosition)
if events:
diff = totalEventRequestsForTest - numEventRequests
v = psychoWindow.size[1] / 2.0 * diff / totalEventRequestsForTest
vert = [[0, 0], [0, v], [2, v], [2, 0]]
psychoStim['progress'].setVertices(vert)
for r in events:
if r.type is EventConstants.KEYBOARD_PRESS: # keypress code
psychoStim['keytext'].setText(r.key)
[psychoStim[skey].draw() for skey in psychoStim]
flipTime = psychoWindow.flip()
return flipTime
def checkForEvents(io):
# get the time we request events from the ioHub
stime = Computer.getTime()
r = io.getEvents()
if r and len(r) > 0:
# so there were events returned in the request, so include this getEvent request in the tally
etime = Computer.getTime()
dur = etime - stime
return r, dur * 1000.0
return None, None
def plotResults(results):
#### calculate stats on collected data and draw some plots ####
from matplotlib.pyplot import axis, title, xlabel, hist, grid, show, ylabel, plot
import pylab
durations = results[:, 0]
flips = results[1:, 2]
dmean = durations.mean()
dstd = durations.std()
fmean = flips.mean()
fstd = flips.std()
pylab.figure(figsize=(7, 5))
pylab.subplot(1, 3, 1)
# the histogram of the delay data
n, bins, patches = hist(durations, 50, facecolor='blue', alpha=0.75)
# add a 'best fit' line
y = norm.pdf(bins, dmean, dstd)
plot(bins, y, 'r--', linewidth=1)
xlabel('ioHub getEvents Delay')
ylabel('Percentage')
title('ioHub Event Delays (msec):\n' + r'$\ \mu={0:.3f},\ \sigma={1:.3f}$'.format(dmean, dstd))
axis([0, durations.max() + 1.0, 0, 25.0])
grid(True)
# graphs of the retrace data ( taken from retrace example in psychopy demos folder)
intervalsMS = flips
m = fmean
sd = fstd
distString = "Mean={0:.1f}ms, s.d.={1:.1f}, 99%CI={2:.1f}-{3:.1f}".format(
m, sd, m - 3 * sd, m + 3 * sd)
nTotal = len(intervalsMS)
nDropped = sum(intervalsMS > (1.5 * m))
droppedString = "Dropped/Frames = {0:d}/{1:d} = {2:0.2f}%".format(
nDropped, nTotal, int(nDropped) / float(nTotal))
pylab.subplot(1, 3, 2)
# plot the frameintervals
pylab.plot(intervalsMS, '-')
pylab.ylabel('t (ms)')
pylab.xlabel('frame N')
pylab.title(droppedString)
pylab.subplot(1, 3, 3)
pylab.hist(intervalsMS, 50, histtype='stepfilled')
pylab.xlabel('t (ms)')
pylab.ylabel('n frames')
pylab.title(distString)
show()
def printResults(results):
durations = results[:, 0]
dmean = durations.mean()
dstd = durations.std()
print("ioHub getEvent Delays:")
print("\tMEAN: ", dmean)
print("\tSDEV: ", dstd)
if __name__ == "__main__":
run()
| 8,759
|
Python
|
.py
| 181
| 38.41989
| 110
| 0.61773
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,939
|
keyboardreactiontime.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/keyboardreactiontime.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
keyboard_rt.run.py
Keyboard Reaction Time Calculation shown within a line length matching task.
Initial Version: May 6th, 2013, Sol Simpson
"""
from psychopy import core, visual
from psychopy.iohub import launchHubServer
from math import fabs
win = visual.Window((1920, 1080), monitor='default', units='pix', fullscr=True, allowGUI=False)
io = launchHubServer(window=win)
# save some 'dots' during the trial loop
keyboard = io.devices.keyboard
# constants for use in example
line_size_match_delay = 5 + int(core.getTime() * 1000) % 5
full_length = win.size[0] / 2
latest_length = 0
static_bar = visual.ShapeStim(win=win, lineColor='Firebrick',
fillColor='Firebrick',
vertices=[[0, 0], [full_length, 0], [full_length, 5], [0, 5]],
pos=(-win.size[0] / 4, win.size[1] / 24))
expanding_line = visual.ShapeStim(win=win, lineColor='Firebrick',
fillColor='Firebrick',
vertices=[[0, 0], [0, 0], [1, 0], [0, 0]],
pos=(-win.size[0] / 4, -win.size[1] / 24))
text = visual.TextStim(win, text='Press Spacebar When Line Lengths Match',
pos=[0, 0], height=24,
alignText='center', anchorHoriz='center', anchorVert='center',
wrapWidth=win.size[0] * .8)
stim = [static_bar, expanding_line, text]
# Draw and Display first frame of screen
for s in stim:
s.draw()
flip_time = win.flip()
# Clear all events from all ioHub event buffers.
io.clearEvents('all')
# Run until space bar is pressed, or larger than window
spacebar_rt = last_len = 0.0
while spacebar_rt == 0.0 or last_len >= win.size[0]:
# check for RT
for kb_event in keyboard.getEvents():
if kb_event.char == ' ':
spacebar_rt = kb_event.time - flip_time
break
# Update visual stim as needed
time_passed = core.getTime() - flip_time
last_len = time_passed / line_size_match_delay * full_length
expanding_line.setPos((-last_len / 2, -win.size[1] / 24))
expanding_line.setVertices([[0, 0], [last_len, 0], [last_len, 5], [0, 5]])
for s in stim:
s.draw()
win.flip()
results = "Did you Forget to Press the Spacebar?\n"
if spacebar_rt > 0.0:
msg = "RT: %.4f sec || Perc. Length Diff: %.2f || RT Error: %.4f sec\n"
results = msg % (spacebar_rt,
fabs(last_len - full_length) / full_length * 100.0,
spacebar_rt - line_size_match_delay)
exitStr = "Press Any Key To Exit"
results = results + exitStr.center(len(results))
text.setText(results)
for s in stim:
s.draw()
win.flip()
keyboard.waitForPresses(maxWait=10.0)
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,681
|
Python
|
.py
| 69
| 34.884058
| 95
| 0.668085
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,940
|
mouse_multi_window.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/mouse_multi_window.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test for using iohub mouse with multiple windows on multiple monitors.
To enable multi window support for the iohub Mouse device:
launchHubServer(window=win, Mouse=dict(enable_multi_window=True))
In this mode, if the mouse is over a psychopy window, mouse position is returned
as the pix position within the window, with origin (0,0) at window center. mouse event
window_id will equal a psychopy window handle (pyglet only).
Note: If testing using 'cm' or 'deg' units, use your actual monitor configurations
or update the `monitors.Monitor` created below to match your setup.
If the mouse is not over a psychopy window, desktop mouse position is returned with
window_id = 0.
"""
import sys
from psychopy import visual, core, monitors
from psychopy.iohub import launchHubServer
from psychopy.iohub.constants import EventConstants
# True = print mouse events to stdout, False = do not
PRINT_MOUSE_EVENTS = False
# Test creating a monitor before starting iohub
mon0 = monitors.Monitor('monitor0')
mon0.setDistance(60.0)
mon0.setWidth(33.0)
mon0.setSizePix((1280, 1024))
win = visual.Window((400, 400), pos=(0, 30), units='norm', fullscr=False, allowGUI=True, screen=0, monitor=mon0,
winType='pyglet')
# start the iohub server
io = launchHubServer(window=win, Mouse=dict(enable_multi_window=True))
# Test creating a monitor after starting iohub
mon1 = monitors.Monitor('monitor1')
mon1.setDistance(60.0)
mon1.setWidth(34.5)
mon1.setSizePix((1920, 1080))
win2 = visual.Window((600, 600), pos=(500, 30), units='cm', fullscr=False, allowGUI=True, screen=1, monitor=mon1,
winType='pyglet')
# access the iohub keyboard and mouse
keyboard = io.devices.keyboard
mouse = io.devices.mouse
# TODO: How to handle setPos when mouse enable_multi_window=True
# mouse.setPosition((0.0, 0.0))
txt_proto = 'Desktop x,y: {}, {}\nWin x,y: {:.4}, {:.4}\n\nwin.units: {}\n\n\nPress Any Key to Quit.'
win_stim={}
for w in visual.window.openWindows:
win_stim[w()._hw_handle] = visual.TextStim(w(), pos=(-0.75, 0.0), units='norm', alignText='left', anchorHoriz='left',
anchorVert='center', height=.1, autoLog=False, wrapWidth=1.5,
text=txt_proto.format('?', '?', '?', '?', w().units))
io.clearEvents('all')
demo_timeout_start = core.getTime()
# Run the example until a keyboard event is received.
kb_events = None
while not kb_events:
for stim in win_stim.values():
stim.draw()
win.flip() # redraw the buffer
flip_time = win2.flip() # redraw the buffer
# Check for Mouse events
mouse_events = mouse.getEvents()
if mouse_events:
# Simple example of handling different mouse event types.
if PRINT_MOUSE_EVENTS:
for me in mouse_events:
if me.type == EventConstants.MOUSE_MOVE:
print(me)
elif me.type == EventConstants.MOUSE_DRAG:
print(me)
elif me.type == EventConstants.MOUSE_BUTTON_PRESS:
print(me)
elif me.type == EventConstants.MOUSE_BUTTON_RELEASE:
print(me)
elif me.type == EventConstants.MOUSE_SCROLL:
print(me)
else:
print("Unhandled event type:", me.type, me)
sys.exit()
# Only update display based on last received event
me = mouse_events[-1]
#print("display: ", me.display_id)
if me.window_id > 0:
for win_handle, stim in win_stim.items():
if win_handle != me.window_id:
stim.text = txt_proto.format('?', '?', '?', '?', stim.win.units)
else:
stim.text = txt_proto.format('?', '?', me.x_position, me.y_position, stim.win.units)
else:
for win_handle, stim in win_stim.items():
stim.text = txt_proto.format(me.x_position, me.y_position, '?', '?', stim.win.units)
demo_timeout_start = mouse_events[-1].time
# If 15 seconds passes without receiving any kb or mouse event,
# then exit the demo
if flip_time - demo_timeout_start > 15.0:
print("Ending Demo Due to 15 Seconds of Inactivity.")
break
# Check for keyboard events.
kb_events = keyboard.getEvents()
win.close()
win2.close()
core.quit()
# End of Example
# The contents of this file are in the public domain.
| 4,547
|
Python
|
.py
| 100
| 37.92
| 121
| 0.643455
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,941
|
mouse.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/mouse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of using the iohub mouse.
"""
import sys
from psychopy import visual, core
from psychopy.iohub import launchHubServer
win = visual.Window((1920, 1080), units='height', fullscr=True, allowGUI=False, screen=0)
# create the process that will run in the background polling devices
io = launchHubServer(window=win)
# some default devices have been created that can now be used
keyboard = io.devices.keyboard
mouse = io.devices.mouse
mouse.setPosition((0.0, .250))
#win.setMouseVisible(False)
# Create some psychopy visual stim
fixSpot = visual.GratingStim(win, tex="none", mask="gauss", pos=(0, 0), size=(.03, .03), color='black', autoLog=False)
grating = visual.GratingStim(win, pos=(.3, 0), tex="sin", mask="gauss", color=[1.0, .5, -1.0], size=(.15, .15),
sf=(.01, 0), autoLog=False)
message = visual.TextStim(win, pos=(0, -.2), height=.03, alignText='center', anchorHoriz='center', anchorVert='center',
wrapWidth=.7, text='move=mv-spot, left-drag=SF, right-drag=mv-grating, scroll=ori',
autoLog=False)
displayIdMsg = visual.TextStim(win, pos=(0.0, -0.3), alignText='center', anchorHoriz='center', anchorVert='center',
height=.03, text='Display X', autoLog=False, wrapWidth=0.7)
message3 = visual.TextStim(win, pos=(0.0, -0.4), alignText='center', anchorHoriz='center', anchorVert='center',
height=.03, text='Press Any Key to Quit.', autoLog=False, wrapWidth=0.7)
last_wheelPosY = 0
io.clearEvents('all')
demo_timeout_start = core.getTime()
# Run the example until a keyboard event is received.
kb_events = None
last_display_ix = -1
while not kb_events:
# Get the current mouse position
# posDelta is the change in position * since the last call *
position, posDelta, display_ix = mouse.getPositionAndDelta(return_display_index=True)
mouse_dX, mouse_dY = posDelta
if display_ix is not None and display_ix != last_display_ix:
displayIdMsg.setText("Display %d" % display_ix)
last_display_ix = display_ix
# Get the current state of each of the Mouse Buttons
left_button, middle_button, right_button = mouse.getCurrentButtonStates()
# If the left button is pressed, change the grating's spatial frequency
if left_button:
grating.setSF(mouse_dX / 5000.0, '+')
elif right_button:
grating.setPos(position)
# If no buttons are pressed on the Mouse, move the position of the mouse cursor.
if True not in (left_button, middle_button, right_button):
fixSpot.setPos(position)
if sys.platform == 'darwin':
# On macOS, both x and y mouse wheel events can be detected, assuming the mouse being used
# supported 2D mouse wheel motion.
wheelPosX, wheelPosY = mouse.getScroll()
else:
# On Windows and Linux, only vertical (Y) wheel position is supported.
wheelPosY = mouse.getScroll()
# keep track of the wheel position 'delta' since the last frame.
wheel_dY = wheelPosY - last_wheelPosY
last_wheelPosY = wheelPosY
# Change the orientation of the visual grating based on any vertical mouse wheel movement.
grating.setOri(wheel_dY * 5, '+')
# Advance 0.05 cycles per frame.
grating.setPhase(0.05, '+')
# Redraw the stim for this frame.
fixSpot.draw()
grating.draw()
message.draw()
message3.draw()
displayIdMsg.draw()
flip_time = win.flip() # redraw the buffer
# Check for keyboard and mouse events.
# If 15 seconds passes without receiving any mouse events, then exit the demo
kb_events = keyboard.getEvents()
mouse_events = mouse.getEvents()
if mouse_events:
demo_timeout_start = mouse_events[-1].time
if flip_time - demo_timeout_start > 15.0:
print("Ending Demo Due to 15 Seconds of Inactivity.")
break
# Clear out events that were not accessed this frame.
io.clearEvents()
# End of Example
win.close()
core.quit()
# The contents of this file are in the public domain.
| 4,117
|
Python
|
.py
| 87
| 41.586207
| 119
| 0.685643
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,942
|
launchHub.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/launchHub.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Testing the iohub.launchHubServer function
illustrating the different ways it can be used.
No PsychoPy Window is created for this demo; results are
printed to stdout.
Initial Version: May 6th, 2013, Sol Simpson
"""
from psychopy.iohub import launchHubServer
def testWithNoKwargs():
"""
testWithNoKwargs illustrates using the launchHubServer function with no
parameters at all. Considerations:
* A Keyboard, Mouse, Monitor, and Experiment device are created by default.
* All devices use their default parameter settings. Therefore,
not very useful in real studies.
* ioHub DataStore is not enabled.
"""
io = launchHubServer()
# Get the default keyboard device created.
keyboard = io.devices.keyboard
print()
print(" ** PRESS A KEY TO CONTINUE.....")
# Check for new events every 1/4 second.
# By using the io.wait() function, the ioHub Process is checked for
# events every 50 msec or so, and they are cached in the PsychoPy process
# until the next getEvents() call is made. On Windows, messagePump() is also
# called periodically so that any Window you have created does not lock up.
#
while not keyboard.getEvents():
io.wait(0.25)
print("A Keyboard Event was Detected; exiting Test.")
io.quit()
def testUsingPsychoPyMonitorConfig():
"""
testUsingPsychoPyMonitorConfig illustrates using the launchHubServer function
and providing a PsychoPy monitor configuration file name.
Considerations:
* A Keyboard, Mouse, Monitor, and Experiment device are created by default.
* If the psychopy_monitor_name is valid, the ioHub Display is updated to
use the display size and viewing distance specified in the psychopy monitor config.
* ioHub DataStore is not enabled.
"""
io = launchHubServer(psychopy_monitor_name='testMonitor')
# Get the default display device created.
display = io.devices.display
# print(the display's physical characteristics, showing they have
# been updated based on the settings in the PsychoPy monitor config.
print('Display Psychopy Monitor Name: ', display.getPsychopyMonitorName())
print('Display Default Eye Distance: ', display.getDefaultEyeDistance())
print('Display Physical Dimensions: ', display.getPhysicalDimensions())
# That's it, shut down the ioHub Process and exit. ;)
io.quit()
def testEnabledDataStore():
"""
testEnabledDataStore is the same as testUsingPsychoPyMonitorConfig above,
but by adding an experiment_code parameter the ioHub DataStore will
be enabled, using a auto generated session_code each time it is run.
Experiment and session metadata is printed at the end of the demo.
Considerations:
* A Keyboard, Mouse, Monitor, and Experiment device are created by default.
* If the psychopy_monitor_name is valid, the ioHub Display is updated to
use the display size and viewing distance specified in the psychopy monitor config.
* ioHub DataStore is enabled because experiment_code is provided.
session_code will be auto generated using the current time.
"""
psychopy_mon_name = 'testMonitor'
exp_code = 'gap_endo_que'
io = launchHubServer(psychopy_monitor_name=psychopy_mon_name,
experiment_code=exp_code)
display = io.devices.display
print('Display Psychopy Monitor Name: ', display.getPsychopyMonitorName())
print('Display Default Eye Distance: ', display.getDefaultEyeDistance())
print('Display Physical Dimensions: ', display.getPhysicalDimensions())
from pprint import pprint
print('Experiment Metadata: ')
pprint(io.getExperimentMetaData())
print('\nSession Metadata: ')
pprint(io.getSessionMetaData())
io.quit()
def testEnabledDataStoreAutoSessionCode():
"""
testEnabledDataStoreAutoSessionCode is the same as testEnabledDataStore
above, but session_code is provided by the script instead of being
auto-generated. The ioHub DataStore will be enabled, using the
experiment and session_code provided each time it is run. Experiment
and session metadata is printed at the end of the demo.
Considerations:
* A Keyboard, Mouse, Monitor, and Experiment device are created by
default.
* If the psychopy_monitor_name is valid, the ioHub Display is
updated to use the display size and viewing distance specified
in the psychopy monitor config.
* ioHub DataStore is enabled because experiment_code and
session_code are provided.
"""
import time
from pprint import pprint
psychopy_mon_name = 'testMonitor'
exp_code = 'gap_endo_que'
sess_code = 'S_{0}'.format(int(time.mktime(time.localtime())))
print('Current Session Code will be: ', sess_code)
io = launchHubServer(psychopy_monitor_name=psychopy_mon_name,
experiment_code=exp_code,
session_code=sess_code)
display = io.devices.display
print('Display Psychopy Monitor Name: ', display.getPsychopyMonitorName())
print('Display Default Eye Distance: ', display.getDefaultEyeDistance())
print('Display Physical Dimensions: ', display.getPhysicalDimensions())
print('Experiment Metadata: ')
pprint(io.getExperimentMetaData())
print('\nSession Metadata: ')
pprint(io.getSessionMetaData())
io.quit()
test_list = ['testWithNoKwargs', 'testUsingPsychoPyMonitorConfig',
'testEnabledDataStore', 'testEnabledDataStoreAutoSessionCode']
if __name__ == '__main__':
for test in test_list:
print('\n------------------------------------\n')
print('Running %s Test:'%(test))
for namespace in (locals(), globals()):
if test in namespace:
result = namespace[test]()
print('Test Result: ', result)
break
# The contents of this file are in the public domain.
| 6,310
|
Python
|
.py
| 127
| 41.330709
| 97
| 0.679837
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,943
|
pstbox.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/serial/pstbox.py
|
# NOTE: Example currently not working in Python 3.
import time
import numpy as np
from psychopy import core, visual
from psychopy.iohub import launchHubServer
from psychopy.iohub.devices import Computer
#####################################################################
#
# Device setup
#
# Settings for serial port (PST response box) communication.
SERIAL_PORT = 'COM5'
BAUDRATE = 19200
# ioHub configuration.
psychopy_mon_name = 'Monitor_01'
exp_code = 'pstbox'
sess_code = 'S_{0}'.format(int(time.mktime(time.localtime())))
iohubkwargs = {
'psychopy_monitor_name': psychopy_mon_name,
'experiment_code': exp_code,
'session_code': sess_code,
'serial.Pstbox': dict(name='pstbox', port=SERIAL_PORT, baud=BAUDRATE)
}
# Start the iohub server and set up devices.
io = launchHubServer(**iohubkwargs)
computer = Computer
display = io.devices.display
pstbox = io.devices.pstbox
print('Switching on lamp #3...')
pstbox.setLampState([0, 0, 1, 0, 0])
print('...done.')
# Create a window.
win = visual.Window(
display.getPixelResolution(),
units='pix', fullscr=True, allowGUI=False,
screen=0
)
#####################################################################
#
# Set up fixation and stimulus.
#
# Instruction text.
instruction = visual.TextStim(
win,
text='Push a button as soon as the colored figure appears.\n\n'
'Push any button to start.'
)
# Fixation spot.
fixSpot = visual.PatchStim(
win, tex='none', mask='gauss',
pos=(0, 0), size=(30, 30), color='black',
autoLog=False
)
# Visual stimulus.
grating = visual.PatchStim(
win, pos=(0, 0),
tex='sin', mask='gauss',
color=[1.0, 0.5, -1.0],
size=(300.0, 300.0), sf=(0.01, 0.0),
autoLog=False
)
#####################################################################
#
# Start the experiment.
#
pstbox.clearEvents()
start_time = computer.getTime()
# Display instruction and check if we collected any button events.
# If there is no button press within a 30 s period, quit.
instruction.draw()
win.flip()
while not pstbox.getEvents():
if core.getTime() - start_time > 30:
print('Timeout waiting for button event. Exiting...')
core.quit()
# Clear the screen.
win.flip()
nreps = 10
RT = np.array([])
button = np.array([])
io.wait(2)
for i in range(nreps):
print('Trial #', i)
# Raise process priorities.
computer.setPriority('high')
io.setPriority('high')
# Draw the fixation.
fixSpot.draw()
win.flip()
# Clear the PST box event buffers immediately after the
# fixation is displayed.
pstbox.clearEvents()
# Wait a variable time until the stimulus is being presented.
io.wait(1 + np.random.rand())
# Draw the stimulus and have it displayed for approx. 0.5 s.
grating.draw()
t0 = win.flip()
io.wait(0.5)
# Clear the screen and wait a little while for possible late responses.
win.flip()
io.wait(0.25)
# Check if we collected any button events.
# If we did, use the first one to determine response time.
pstevents = pstbox.getEvents()
if pstevents:
RT = np.append(RT, pstevents[0].time - t0)
button = np.append(button, pstevents[0].button)
print('RT: %f, Button: %d' % (RT[-1], button[-1]))
else:
RT = np.append(RT, np.nan)
button = np.append(button, np.nan)
print('No response.')
print('---')
# ITI
io.wait(2)
#####################################################################
#
# All data collected; print some results.
#
print('Collected %d responses.' % np.count_nonzero(~np.isnan(RT)))
print('Mean RT: %f s' % np.nanmean(RT))
print('---')
#####################################################################
#
# Shut down.
#
# Switch off all lamps.
pstbox.setLampState([0, 0, 0, 0, 0])
# Close the window and quit the program.
core.quit()
| 3,876
|
Python
|
.py
| 130
| 26.592308
| 75
| 0.619137
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,944
|
_parseserial.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/serial/_parseserial.py
|
# Contains the custom serial parsing function that is used by
# the customparser.py demo script.
# IMPORTANT: Remember that this code will be called from the iohub server
# process, not the psychopy experiment process.
# When code is running in the ioHub Server Process, you can have text printed
# to the experiment process stderr by using the iohub 'print2err' function.
# Do not use the standard 'print' call, as it will do nothing except maybe make
# the iohub server not start.
from psychopy.iohub import print2err
def checkForSerialEvents(read_time, rx_data, parser_state, **kwargs):
"""
Must have the following signature:
evt_list = someCustomParserName(read_time, rx_data, parser_state, **kwargs)
where:
read_time: The time when the serial device read() returned
with the new rx_data.
rx_data: The new serial data received. Any buffering of data
across function calls must be done by the function
logic itself. parser_state could be used to hold
such a buffer if needed.
parser_state: A dict which can be used by the function to
store any values that need to be accessed
across multiple calls to the function. The dict
is initially empty.
kwargs: The parser_kwargs preference dict read from
the event_parser preferences; or an empty dict if
parser_kwargs was not found.
If events should be generated by iohub, the function must return a
list like object, used to provide ioHub with any new serial events that
have been found. Each element of the list must be a dict like object,
representing a single serial device event found by the parsing function.
An event dict can contain the following key, value pairs:
data: The string containing the parsed event data. (REQUIRED)
time: The timestamp for the event (Optional). If not provided,
the return time of the latest serial.read() is used.
If the function has detected no serial events, an empty list or None
can be returned.
:return: list of ioHub serial device events found. None == [] here.
"""
print2err("checkForSerialEvents called: ", (read_time, rx_data, parser_state, kwargs))
parser_state['last_time'] = read_time
serial_events = []
if rx_data:
serial_events.append({'data': rx_data})
return serial_events
| 2,461
|
Python
|
.py
| 45
| 47.244444
| 90
| 0.701536
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,945
|
customparser.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/serial/customparser.py
|
"""
Demo using a custom serial rx parser to generate ioHub Serial Device events.
The _parseserial.py file is also required for this demo, as it contains the
custom parser function that the ioHub Serial device uses during runtime.
** This demo assumes that whatever is written to the device serial port
is simply returned back (echoed) on the device serial port.**
"""
import time
from psychopy import core, visual
from psychopy.iohub import launchHubServer
# Settings for serial port communication.
SERIAL_PORT = 'COM46'
BAUDRATE = 19200
# event_parser_info dict:
#
# parser_function key value can be a str giving the module.function path,
# or it can be the actual function object to be run by the iohub process.
#
# *Important:* The function provided should be in a file that can be imported
# as a module without causing unwanted behavior on the iohub process.
# Some options:
# 1) Put the function in a file that contains only the function,
# as is done in this example.
# 2) Ensure any script logic that will be run when the file is called by
# a user ( i.e. python.exe filewithfunc.py ) is inside a:
# if __name__ == '__main__':
# condition so it is not run when the file is only imported.
event_parser_info = dict(parser_function="_parseserial.checkForSerialEvents",
parser_kwargs=dict(var1='not used', var2=1234))
# configure iohub
exp_code = 'serial_demo'
sess_code = 'S_{0}'.format(int(time.mktime(time.localtime())))
iohubkwargs = {'experiment_code': exp_code,
'session_code': sess_code,
'serial.Serial': dict(name='serial',
port=SERIAL_PORT,
baud=BAUDRATE,
parity='NONE',
bytesize=8,
event_parser=event_parser_info)}
# start the iohub server and set up display and PST box devices
io = launchHubServer(**iohubkwargs)
serial_device = io.devices.serial
keyboard = io.devices.keyboard
# Start collecting data from the PST box in the background.
serial_device.enableEventReporting(True)
# Create a window.
win = visual.Window((1024, 768), units='pix')
# Instruction text.
instruction = visual.TextStim(win, text='Monitoring for serial input events....\n\nPress any key to exit.')
# Display instruction.
instruction.draw()
win.flip()
io.clearEvents('all')
# Check for keyboard and serial events.
# Exit on keyboard press event.
# Print any serial events.
#
while not keyboard.getPresses():
serial_device.write('aaa')
io.wait(0.05)
serial_device.write('bbb')
io.wait(0.05)
serial_device.write('ccc')
io.wait(.500)
for serevt in serial_device.getEvents():
print(serevt)
# Stop recording serial events.
serial_device.enableEventReporting(False)
# Close the window and quit the program.
core.quit()
| 2,935
|
Python
|
.py
| 71
| 36.211268
| 107
| 0.689583
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,946
|
_wintabgraphics.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/wintab/_wintabgraphics.py
|
# -*- coding: utf-8 -*-
"""
Wintab util objects / functions for stylus, position traces.
"""
import math
from psychopy import visual
from psychopy.visual.basevisual import MinimalStim
class PenPositionStim(MinimalStim):
"""Draws the current pen x,y position with graphics that represent the
pressure, z axis, and tilt data for the wintab sample used."""
def __init__(self, win, min_opacity=0.0, hover_color=(255, 0, 0),
touching_color=(0, 255, 0), tiltline_color=(255, 255, 0),
tiltline_width=2,
min_size=0.033, size_range=0.1666, tiltline_scalar=1.0,
name=None, autoLog=None, depth=-10000, colorSpace='rgb255'):
self.win = win
self.depth = depth
super(PenPositionStim, self).__init__(name, autoLog)
# Pen Hovering Related
# Opaticy is changed based on pen's z axis if data for z axis
# is available. Opacity of min_opacity is used when pen is at the
# furthest hover distance (z value) supported by the device.
# Opacity of 1.0 is used when z value == 0, meaning pen is touching
# digitizer surface.
self.min_opacity = min_opacity
# If z axis is supported, hover_color specifies the color of the pen
# position dot when z val > 0.
self.hover_color = hover_color
# Pen Pressure Related
# Smallest radius (in norm units) that the pen position gaussian blob
# will have, which occurs when pen pressure value is 0
self.min_size = min_size
# As pen pressure value increases, so does position gaussian blob
# radius (in norm units). Max radius is reached when pressure is
# at max device pressure value, and is equal to min_size+size_range
self.size_range = size_range
# Color of pen position blob when pressure > 0.
self.touching_color = touching_color
# Pen tilt Related
# Color of line graphic used to represent the pens tilt relative to
# the digitizer surface.
self.tiltline_color = tiltline_color
self.tiltline_width = tiltline_width
self.tiltline_scalar = tiltline_scalar
# Create a Gaussian blob stim to use for pen position graphic
self.pen_guass = visual.PatchStim(win, units='norm', tex='none',
mask='gauss', pos=(0, 0),
colorSpace='rgb255',
size=(self.min_size, self.min_size),
color=self.hover_color,
autoLog=False,
opacity=0.0)
# Create a line stim to use for pen position graphic
self.pen_tilt_line = visual.Line(win, units='norm', start=[0, 0],
lineWidth=self.tiltline_width,
end=[0, 0],
colorSpace='rgb255',
lineColor=self.tiltline_color,
opacity=0.0)
# self.pen_tilt_line.opacity=0.0
def updateFromEvent(self, evt):
"""Update the pen position and tilt graphics based on the data from
a wintab sample event.
:param evt: iohub wintab sample event
:return:
"""
# update the pen position stim based on
# the last tablet event's data
if evt.pressure > 0:
# pen is touching tablet surface
self.pen_guass.color = self.touching_color
else:
# pen is hovering just above tablet surface
self.pen_guass.color = self.hover_color
if evt.device.axis['pressure']['supported']:
# change size of pen position blob based on samples pressure
# value
pnorm = evt.pressure / evt.device.axis['pressure']['range']
self.pen_guass.size = self.min_size + pnorm * self.size_range
# set the position of the gauss blob to be the pen x,y value converted
# to norm screen coords.
self.pen_guass.pos = evt.getNormPos()
# if supported, update all graphics opacity based on the samples z value
# otherwise opacity is always 1.0
if evt.device.axis['z']['supported']:
z = evt.device.axis['z']['range'] - evt.z
znorm = z / evt.device.axis['z']['range']
sopacity = self.min_opacity + znorm * (1.0 - self.min_opacity)
self.pen_guass.opacity = self.pen_tilt_line.opacity = sopacity
else:
self.pen_guass.opacity = self.pen_tilt_line.opacity = 1.0
# Change the tilt line start position to == pen position
self.pen_tilt_line.start = self.pen_guass.pos
# Change the tilt line end position based on samples tilt value
# If tilt is not supported, it will always return 0,0
# so no line is drawn.
t1, t2 = evt.tilt
pen_tilt_xy = 0, 0
if t1 != t2 != 0:
pen_tilt_xy = t1 * math.sin(t2), t1 * math.cos(t2)
pen_pos = self.pen_guass.pos
tiltend = (pen_pos[0] + pen_tilt_xy[0] * self.tiltline_scalar,
pen_pos[1] + pen_tilt_xy[1] * self.tiltline_scalar)
self.pen_tilt_line.end = tiltend
def draw(self):
"""Draw the PenPositionStim to the opengl back buffer. This needs
to be called prior to calling win.flip() for the stim is to be
displayed.
:return: None
"""
self.pen_guass.draw()
self.pen_tilt_line.draw()
def clear(self):
"""Hide the graphics on the screen, even if they are drawn, by
setting opacity to 0.
:return: None
"""
self.pen_guass.opacity = 0.0
self.pen_tilt_line.opacity = 0.0
def __del__(self):
self.win = None
class PenTracesStim(MinimalStim):
"""Graphics representing where the pen has been moved on the digitizer
surface. Positions where sample pressure > 0 are included.
Implemented as a list of visual.ShapeStim, each representing a
single pen trace/segment (series on pen samples with pressure >
0). For improved performance, a single pen trace can have
max_trace_len points before a new ShapeStim is created and made
the 'current' pen trace'.
"""
def __init__(self, win, lineWidth=2, lineColor=(0, 0, 0), opacity=1.0,
maxlen=256, name=None, autoLog=None, depth=-1000):
self.depth = depth
self.win = win
super(PenTracesStim, self).__init__(name, autoLog)
# A single pen trace can have at most max_trace_len points.
self.max_trace_len = maxlen
# The list of ShapeStim representing pen traces
self.pentracestim = []
# The ShapeStim state new / upcoming position points will be added to.
self.current_pentrace = None
# A list representation of the current_pentrace.vertices
self.current_points = []
# The last pen position added to a pen trace.
self.last_pos = [0, 0]
self.lineWidth = lineWidth
self.lineColor = lineColor
self.opacity = opacity
@property
def traces(self):
"""List of np arrays, each np array is the set of vertices for one
pen trace.
:return: list
"""
return [pts.vertices for pts in self.pentracestim]
def updateFromEvents(self, sample_events):
"""
Update the stim graphics based on 0 - n pen sample events.
:param sample_events:
:return: None
"""
for pevt in sample_events:
if 'FIRST_ENTER' in pevt.status:
self.end()
if pevt.pressure > 0:
lpx, lpy = self.last_pos
px, py = pevt.getPixPos(self.win)
if lpx != px or lpy != py:
if len(self.current_points) >= self.max_trace_len:
self.end()
self.append((lpx, lpy))
self.last_pos = (px, py)
self.append(self.last_pos)
else:
self.end()
def draw(self):
"""Draws each pen trace ShapeStim to the opengl back buffer. This
method must be called prior to calling win.flip() if it is to
appear on the screen.
:return: None
"""
for pts in self.pentracestim:
pts.draw()
def start(self, first_point):
"""Start a new pen trace, by creating a new ShapeStim, adding it to
the pentracestim list, and making it the current_pentrace.
:param first_point: the first point in the ShapStim being created.
:return: None
"""
self.end()
self.current_points.append(first_point)
self.current_pentrace = visual.ShapeStim(self.win,
units='pix',
lineWidth=self.lineWidth,
color=self.lineColor,
lineColorSpace='rgb255',
vertices=self.current_points,
closeShape=False,
pos=(0, 0),
size=1,
ori=0.0,
opacity=self.opacity,
interpolate=True)
self.pentracestim.append(self.current_pentrace)
def end(self):
"""Stop using the current_pentrace ShapeStim. Next time a pen
sample position is added to the PenTracesStim instance, a new
ShapeStim will created and added to the pentracestim list.
:return: None
"""
self.current_pentrace = None
self.current_points = []
self.last_pos = [0, 0]
def append(self, pos):
"""Add a pen position (in pix coords) to the current_pentrace
ShapeStim vertices.
:param pos: (x,y) tuple
:return: None
"""
if self.current_pentrace is None:
self.start(pos)
else:
self.current_points.append(pos)
self.current_pentrace.vertices = self.current_points
def clear(self):
"""Remove all ShapStim being used. Next time this stim is drawn, no
pen traces will exist.
:return:
"""
self.end()
del self.pentracestim[:]
def __del__(self):
self.clear()
self.win = None
| 10,797
|
Python
|
.py
| 234
| 33.111111
| 80
| 0.562571
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,947
|
pen_demo.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/wintab/pen_demo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example using the iohub Wintab Pen device. Demo requires a properly
installed Wintab compatible device running on Windows.
(Wacom digital pen, MS Surface v1 or v2, ...)
"""
import os
import time
import _wintabgraphics as wintabgraphics
from psychopy import core, visual
from psychopy.gui import fileSaveDlg
from psychopy.iohub import launchHubServer
# Default session data file name
DEFAULT_SESSION_CODE = u's1234'
# RGB255 color to use for the experiment window background color. Must be a
# list or tuple of the form [r,g,b], where r,g, and b are values between 0
# (black) and 255 (white).
DEFAULT_SCREEN_COLOR = [128, 128, 128]
# The height of any text that is displayed during experiment trials. The value
# is in norm units, with a maximum value of 1.0.
DEFAULT_TEXT_STIM_HEIGHT = 0.05
# List of key values that will cause the experiment to end if detected by a
# keyboard key press event.
TERMINATE_EXP_KEYS = ['escape', ]
# Defaults for PenPositionStim
# Pen gaussian point color when hover is detected
PEN_POS_HOVER_COLOR = (0, 0, 255)
# Pen gaussian point color when pen press is detected
PEN_POS_TOUCHING_COLOR = (0, 255, 0)
# Color of the pen tilt line graphic
PEN_POS_ANGLE_COLOR = (255, 255, 0)
# Pixel width of the pen tilt line graphic
PEN_POS_ANGLE_WIDTH = 1
# Control the overall length of the pen tilt line graphic.
# 1.0 = default length. Set to < 1.0 to make line shorter, or > 1.0 to
# make line longer.
PEN_POS_TILTLINE_SCALAR = 1.0
# Minimum opacity value allowed for pen position graphics.
# 0.0 = pen position disappears when pen is not detected.
PEN_POS_GFX_MIN_OPACITY = 0.0
# Minimum pen position graphic size, in normal coord space.
PEN_POS_GFX_MIN_SIZE = 0.033
# Maximum pen position graphic size, in normal coord space, is equal to
# PEN_POS_GFX_MIN_SIZE+PEN_POS_GFX_SIZE_RANGE
PEN_POS_GFX_SIZE_RANGE = 0.033
# Defaults for PenTracesStim
# Width of pen trace line graphics (in pixels)
PEN_TRACE_LINE_WIDTH = 2
# Pen trace line color (in r,g,b 0-255)
PEN_TRACE_LINE_COLOR = (0, 0, 0)
# Pen trace line opacity. 0.0 = hidden / fully transparent, 1.0 = fully visible
PEN_TRACE_LINE_OPACITY = 1.0
draw_pen_traces = True
# if no keyboard or pen data is received for test_timeout_sec,
# the test program will exit.
test_timeout_sec = 300 # 5 minutes
# Runtime global variables
pen = None
last_evt = None
last_evt_count = 0
pen_pos_range = None
def start_iohub(myWin, sess_code=None, save_to=None):
# Create initial default session code
if sess_code is None:
sess_code = 'S_{0}'.format(int(time.mktime(time.localtime())))
# Ask for session name / hdf5 file name
if save_to is None:
save_to = fileSaveDlg(initFilePath=os.path.dirname(__file__), initFileName=sess_code,
prompt="Set Session Output File",
allowed="ioHub Data Files (*.hdf5)|*.hdf5")
if save_to:
# session code should equal results file name
fdir, sess_code = os.path.split(save_to)
sess_code = sess_code[0:min(len(sess_code), 24)]
if sess_code.endswith('.hdf5'):
sess_code = sess_code[:-5]
if save_to.endswith('.hdf5'):
save_to = save_to[:-5]
else:
save_to = sess_code
exp_code = 'wintab_evts_test'
kwargs = {'experiment_code': exp_code,
'session_code': sess_code,
'datastore_name': save_to,
'wintab.Wintab': {'name': 'pen',
'mouse_simulation': {'enable': False,
'leave_region_timeout': 2.0
}
}
}
return launchHubServer(window=myWin, **kwargs)
def createPsychopyGraphics(myWin):
#
# Initialize Graphics
#
# hide the OS system mouse when on experiment window
mouse.setPosition((0, 0))
myWin.setMouseVisible(False)
# INITIALISE SOME STIMULI
evt_text = visual.TextStim(myWin, units='norm',
height=DEFAULT_TEXT_STIM_HEIGHT,
pos=(0, .9), text="")
evt_text._txt_proto = 'pen: pos:\t{x},{y},{z}\t' \
'pressure: {pressure}\t' \
# 'orientation: {orient_azimuth},{orient_altitude}'
instruct_text = visual.TextStim(myWin, units='norm', pos=(0, -.9),
height=DEFAULT_TEXT_STIM_HEIGHT,
text="instruct_text")
instruct_text._start_rec_txt = "Press 's' to start wintab reporting. " \
"Press 'q' to exit."
instruct_text._stop_rec_txt = "Press 's' to stop wintab reporting. " \
"Press 'q' to exit."
instruct_text.text = instruct_text._start_rec_txt
pen_trace = wintabgraphics.PenTracesStim(myWin,
PEN_TRACE_LINE_WIDTH,
PEN_TRACE_LINE_COLOR,
PEN_TRACE_LINE_OPACITY)
pen_pos = wintabgraphics.PenPositionStim(myWin, PEN_POS_GFX_MIN_OPACITY,
PEN_POS_HOVER_COLOR,
PEN_POS_TOUCHING_COLOR,
PEN_POS_ANGLE_COLOR,
PEN_POS_ANGLE_WIDTH,
PEN_POS_GFX_MIN_SIZE,
PEN_POS_GFX_SIZE_RANGE,
PEN_POS_TILTLINE_SCALAR)
return evt_text, instruct_text, pen_trace, pen_pos
if __name__ == '__main__':
# Ask for session name / hdf5 file name
save_to = fileSaveDlg(initFilePath=os.path.dirname(__file__), initFileName=DEFAULT_SESSION_CODE,
prompt="Set Session Output File",
allowed="ioHub Data Files (*.hdf5)|*.hdf5")
myWin = visual.Window((1920, 1080), units='pix', color=DEFAULT_SCREEN_COLOR,
colorSpace='rgb255', fullscr=True, allowGUI=False)
# Start iohub process and create shortcut variables to the iohub devices
# used during the experiment.
io = start_iohub(myWin, DEFAULT_SESSION_CODE, save_to)
keyboard = io.devices.keyboard
mouse = io.devices.mouse
pen = io.devices.pen
# Check that the pen device was created without any errors
if pen.getInterfaceStatus() != "HW_OK":
print("Error creating Wintab device:", pen.getInterfaceStatus())
print("TABLET INIT ERROR:", pen.getLastInterfaceErrorString())
else:
# Wintab device is a go, so setup and run test runtime....
# Get Wintab device model specific hardware info and settings....
# Create the PsychoPy Window and Graphics stim used during the test....
vis_stim = createPsychopyGraphics(myWin)
# break out graphics stim list into individual variables for later use
evt_text, instruct_text, pen_trace, pen_pos_gauss = vis_stim
# Get the current reporting / recording state of the pen
is_reporting = pen.reporting
# Get x,y pen evt pos ranges for future use
pen_pos_range = (pen.axis['x']['range'],
pen.axis['y']['range'])
# remove any events iohub has already captured.
io.clearEvents()
# create a timer / clock that is used to determine if the test
# should exit due to inactivity
testTimeOutClock = core.Clock()
pen_pos_list = []
# print "Axis: ", pen.axis
# print "context: ", pen.context
# print "model: ", pen.model
while testTimeOutClock.getTime() < test_timeout_sec:
# check for keyboard press events, process as necessary
kb_events = keyboard.getPresses()
if kb_events:
testTimeOutClock.reset()
if 'q' in kb_events:
# End the text...
break
if 's' in kb_events:
# Toggle the recording state of the pen....
is_reporting = not is_reporting
pen.reporting = is_reporting
if is_reporting:
instruct_text.text = instruct_text._stop_rec_txt
else:
instruct_text.text = instruct_text._start_rec_txt
# check for any pen sample events, processing as necessary
wtab_evts = pen.getSamples()
last_evt_count = len(wtab_evts)
if is_reporting:
if draw_pen_traces:
pen_trace.updateFromEvents(wtab_evts)
if last_evt_count:
# for e in wtab_evts:
# print e
last_evt = wtab_evts[-1]
testTimeOutClock.reset()
pen_pos_gauss.updateFromEvent(last_evt)
# update the text that displays the event pos, pressure, etc...
evt_text.text = evt_text._txt_proto.format(**last_evt.dict)
else:
last_evt = None
if last_evt is None:
last_evt_count = 0
pen_trace.clear()
evt_text.text = ''
for stim in vis_stim:
stim.draw()
myWin.flip() # redraw the buffer
if testTimeOutClock.getTime() >= test_timeout_sec:
print("Test Time Out Occurred.")
core.quit()
| 9,638
|
Python
|
.py
| 207
| 35.082126
| 100
| 0.585413
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,948
|
saveEventReport.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/iodatastore/saveEventReport.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example for reading events from an iohub hdf5 file. Events can optionally be grouped (typically into trials) by either:
1. Reading iohub experiment messages
2. Reading the iohub .hdf5 condition variables table
3. Reading a psychopy trial-by-trial .csv data (results) file
When grouping events, use the TRIAL_START and TRIAL_END variables to specify the message text or column names to use
to find the start and end time for each trial period.
SAVE_EVENT_TYPE and SAVE_EVENT_FIELDS specify the event type, and which event fields, are saved. Set to None
to be prompted for the event type.
Each event is saved as a row in a tab delimited text file.
"""
from psychopy.iohub.datastore.util import saveEventReport
# Specify the iohub .hdf5 file to process. None will prompt for file selection when script is run.
IOHUB_DATA_FILE = None
# If True, psychopy .csv file with same path as IOHUB_DATA_FILE will be used
USE_PSYCHOPY_DATA_FILE = False
# If True, iohub .hdf5 condition variable table will be used to split events based on TRIAL_START and TRIAL_END
USE_CONDITIONS_TABLE = False
# Specify the experiment message text used to split events into trial periods.
# Set both to None to save all events.
TRIAL_START = None#'text.started' # 'target.started'
TRIAL_END = None#'fix_end_stim.started' # 'fix_end_stim.started'
# Specify which event type to save. Setting to None will prompt to select an event table
SAVE_EVENT_TYPE = None # 'MonocularEyeSampleEvent'
# Specify which event fields to save. Setting to None will save all event fields.
SAVE_EVENT_FIELDS = None # ['time', 'gaze_x', 'gaze_y', 'pupil_measure1', 'status']
# SAVE_EVENT_TYPE = 'BinocularEyeSampleEvent'
# SAVE_EVENT_FIELDS = ['time', 'left_gaze_x', 'left_gaze_y', 'left_pupil_measure1',
# 'right_gaze_x', 'right_gaze_y', 'right_pupil_measure1', 'status']
# SAVE_EVENT_TYPE = 'MessageEvent'
# SAVE_EVENT_FIELDS = ['time', 'text']
if __name__ == '__main__':
result = saveEventReport(hdf5FilePath=IOHUB_DATA_FILE,
eventType=SAVE_EVENT_TYPE,
eventFields=SAVE_EVENT_FIELDS,
useConditionsTable=USE_CONDITIONS_TABLE,
usePsychopyDataFile=USE_PSYCHOPY_DATA_FILE,
trialStart=TRIAL_START,
trialStop=TRIAL_END)
if result:
file_saved, events_saved = result
print("Saved %d events to %s." % (events_saved, file_saved))
else:
raise RuntimeError("saveEventReport failed.")
| 2,627
|
Python
|
.py
| 46
| 51.065217
| 119
| 0.696616
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,949
|
simple.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/eyetracking/simple.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple iohub eye tracker device demo.
Select which tracker to use by setting the TRACKER variable below.
"""
from psychopy import core, visual
from psychopy.iohub import launchHubServer
from psychopy.iohub.util import hideWindow, showWindow
# Eye tracker to use ('mouse', 'eyelink', 'gazepoint', or 'tobii')
TRACKER = 'mouse'
BACKGROUND_COLOR = [128, 128, 128]
devices_config = dict()
eyetracker_config = dict(name='tracker')
if TRACKER == 'mouse':
eyetracker_config['calibration'] = dict(screen_background_color=BACKGROUND_COLOR,
auto_pace=True,
target_attributes=dict(animate=dict(enable=True, expansion_ratio=1.5,
contract_only=False))
)
devices_config['eyetracker.hw.mouse.EyeTracker'] = eyetracker_config
elif TRACKER == 'eyelink':
eyetracker_config['model_name'] = 'EYELINK 1000 DESKTOP'
eyetracker_config['simulation_mode'] = False
eyetracker_config['runtime_settings'] = dict(sampling_rate=1000, track_eyes='RIGHT')
eyetracker_config['calibration'] = dict(screen_background_color=BACKGROUND_COLOR, auto_pace=True)
devices_config['eyetracker.hw.sr_research.eyelink.EyeTracker'] = eyetracker_config
elif TRACKER == 'gazepoint':
eyetracker_config['calibration'] = dict(use_builtin=False, screen_background_color=BACKGROUND_COLOR,
auto_pace=True,
target_attributes=dict(animate=dict(enable=True, expansion_ratio=1.5,
contract_only=False)))
devices_config['eyetracker.hw.gazepoint.gp3.EyeTracker'] = eyetracker_config
elif TRACKER == 'tobii':
eyetracker_config['calibration'] = dict(screen_background_color=BACKGROUND_COLOR,
auto_pace=True,
target_attributes=dict(animate=dict(enable=True, expansion_ratio=1.5,
contract_only=False)))
devices_config['eyetracker.hw.tobii.EyeTracker'] = eyetracker_config
else:
print("{} is not a valid TRACKER name; please use 'mouse', 'eyelink', 'gazepoint', or 'tobii'.".format(TRACKER))
core.quit()
# Number if 'trials' to run in demo
TRIAL_COUNT = 2
# Maximum trial time / time timeout
T_MAX = 60.0
win = visual.Window((1920, 1080),
units='pix',
fullscr=True,
allowGUI=False,
colorSpace='rgb255',
monitor='55w_60dist',
color=BACKGROUND_COLOR,
screen=0
)
win.setMouseVisible(False)
text_stim = visual.TextStim(win, text="Start of Experiment",
pos=[0, 0], height=24,
color='black', units='pix', colorSpace='named',
wrapWidth=win.size[0] * .9)
text_stim.draw()
win.flip()
io = launchHubServer(window=win, **devices_config)
# Get some iohub devices for future access.
keyboard = io.getDevice('keyboard')
tracker = io.getDevice('tracker')
# Minimize the PsychoPy window if needed
hideWindow(win)
# Display calibration gfx window and run calibration.
result = tracker.runSetupProcedure()
print("Calibration returned: ", result)
# Maximize the PsychoPy window if needed
showWindow(win)
gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix', colorSpace='named')
gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0),
size=(40, 40), color='green', colorSpace='named', units='pix')
text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n'
text_stim_str += 'Press space key to start next trial.'
missing_gpos_str = 'Eye Position: MISSING. In Region: No\n'
missing_gpos_str += 'Press space key to start next trial.'
text_stim.setText(text_stim_str)
# Run Trials.....
t = 0
while t < TRIAL_COUNT:
io.clearEvents()
tracker.setRecordingState(True)
run_trial = True
tstart_time = core.getTime()
while run_trial is True:
# Get the latest gaze position in display coord space.
gpos = tracker.getLastGazePosition()
# Update stim based on gaze position
valid_gaze_pos = isinstance(gpos, (tuple, list))
gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos)
if valid_gaze_pos:
# If we have a gaze position from the tracker, update gc stim and text stim.
if gaze_in_region:
gaze_in_region = 'Yes'
else:
gaze_in_region = 'No'
text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region)
gaze_dot.setPos(gpos)
else:
# Otherwise just update text stim
text_stim.text = missing_gpos_str
# Redraw stim
gaze_ok_region.draw()
text_stim.draw()
if valid_gaze_pos:
gaze_dot.draw()
# Display updated stim on screen.
flip_time = win.flip()
# Check any new keyboard char events for a space key.
# If one is found, set the trial end variable.
#
if keyboard.getPresses(keys=' '):
run_trial = False
elif core.getTime()-tstart_time > T_MAX:
run_trial = False
win.flip()
# Current Trial is Done
# Stop eye data recording
tracker.setRecordingState(False)
t += 1
# All Trials are done
# End experiment
win.close()
tracker.setConnectionState(False)
core.quit()
| 5,796
|
Python
|
.py
| 129
| 34.410853
| 116
| 0.60347
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,950
|
validation.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/eyetracking/validation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Calibrate, validate, run with GC cursor demo / test.
Select which tracker to use by setting the TRACKER variable below.
"""
from psychopy import core, visual
from psychopy import iohub
from psychopy.iohub.client.eyetracker.validation import TargetStim
from psychopy.iohub.util import hideWindow, showWindow
# Eye tracker to use ('mouse', 'eyelink', 'gazepoint', or 'tobii')
TRACKER = 'mouse'
use_unit_type = 'height'
use_color_type = 'rgb'
eyetracker_config = dict(name='tracker')
devices_config = {}
if TRACKER == 'mouse':
devices_config['eyetracker.hw.mouse.EyeTracker'] = eyetracker_config
eyetracker_config['calibration'] = dict(auto_pace=True,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0, 0, 0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1],
animate=dict(enable=True,
expansion_ratio=1.5,
contract_only=False)
)
)
elif TRACKER == 'eyelink':
eyetracker_config['model_name'] = 'EYELINK 1000 DESKTOP'
eyetracker_config['simulation_mode'] = False
eyetracker_config['runtime_settings'] = dict(sampling_rate=1000, track_eyes='RIGHT')
eyetracker_config['calibration'] = dict(auto_pace=True,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0, 0, 0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1]
)
)
devices_config['eyetracker.hw.sr_research.eyelink.EyeTracker'] = eyetracker_config
elif TRACKER == 'gazepoint':
eyetracker_config['device_timer'] = {'interval': 0.005}
eyetracker_config['calibration'] = dict(use_builtin=False,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0,0,0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1],
animate=dict(enable=True,
expansion_ratio=1.5,
contract_only=False)
)
)
devices_config['eyetracker.hw.gazepoint.gp3.EyeTracker'] = eyetracker_config
elif TRACKER == 'tobii':
eyetracker_config['calibration'] = dict(auto_pace=True,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0, 0, 0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1],
animate=dict(enable=True,
expansion_ratio=1.5,
contract_only=False)
)
)
devices_config['eyetracker.hw.tobii.EyeTracker'] = eyetracker_config
else:
print("{} is not a valid TRACKER name; please use 'mouse', 'eyelink', 'gazepoint', or 'tobii'.".format(TRACKER))
core.quit()
# Number if 'trials' to run in demo
TRIAL_COUNT = 2
# Maximum trial time / time timeout
T_MAX = 60.0
win = visual.Window((1920, 1080),
units=use_unit_type,
fullscr=True,
allowGUI=False,
colorSpace=use_color_type,
monitor='55w_60dist',
color=[0, 0, 0]
)
win.setMouseVisible(False)
text_stim = visual.TextStim(win, text="Start of Experiment",
pos=[0, 0], height=24,
color='black', units='pix', colorSpace='named',
wrapWidth=win.size[0] * .9)
text_stim.draw()
win.flip()
# Since no experiment_code or session_code is given, no iohub hdf5 file
# will be saved, but device events are still available at runtime.
io = iohub.launchHubServer(window=win, **devices_config)
# Get some iohub devices for future access.
keyboard = io.getDevice('keyboard')
tracker = io.getDevice('tracker')
# Minimize the PsychoPy window if needed
hideWindow(win)
# Display calibration gfx window and run calibration.
result = tracker.runSetupProcedure()
print("Calibration returned: ", result)
# Maximize the PsychoPy window if needed
showWindow(win)
# Validation
# Create a target stim. iohub.client.eyetracker.validation.TargetStim provides a standard doughnut style
# target. Or use any stim that has `.setPos()`, `.radius`, `.innerRadius`, and `.draw()`.
target_stim = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2,
dotcolor=[1, -1, -1], dotradius=0.005, units=use_unit_type, colorspace=use_color_type)
# target_positions: Provide your own list of validation positions,
# target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85),
# (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)]
target_positions = 'FIVE_POINTS'
# Create a validation procedure, iohub must already be running with an
# eye tracker device, or errors will occur.
validation_proc = iohub.ValidationProcedure(win,
target=target_stim, # target stim
positions=target_positions, # string constant or list of points
randomize_positions=True, # boolean
expand_scale=1.5, # float
target_duration=1.5, # float
target_delay=1.0, # float
enable_position_animation=True,
color_space=use_color_type,
unit_type=use_unit_type,
progress_on_key="", # str or None
gaze_cursor=(-1.0, 1.0, -1.0), # None or color value
show_results_screen=True, # bool
save_results_screen=False, # bool, only used if show_results_screen == True
)
# Run the validation procedure. run() does not return until the validation is complete.
validation_proc.run()
if validation_proc.results:
results = validation_proc.results
print("++++ Validation Results ++++")
print("Passed:", results['passed'])
print("failed_pos_count:", results['positions_failed_processing'])
print("Units:", results['reporting_unit_type'])
print("min_error:", results['min_error'])
print("max_error:", results['max_error'])
print("mean_error:", results['mean_error'])
else:
print("Validation Aborted by User.")
# Run Trials.....
gaze_ok_region = visual.Circle(win, lineColor='black', radius=0.33, units=use_unit_type, colorSpace='named')
gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0),
size=(0.02, 0.02), color='green', colorSpace='named', units=use_unit_type)
text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n'
text_stim_str += 'Press space key to start next trial.'
missing_gpos_str = 'Eye Position: MISSING. In Region: No\n'
missing_gpos_str += 'Press space key to start next trial.'
text_stim.setText(text_stim_str)
t = 0
while t < TRIAL_COUNT:
io.clearEvents()
tracker.setRecordingState(True)
run_trial = True
tstart_time = core.getTime()
while run_trial is True:
# Get the latest gaze position in display coord space.
gpos = tracker.getLastGazePosition()
# Update stim based on gaze position
valid_gaze_pos = isinstance(gpos, (tuple, list))
gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos)
if valid_gaze_pos:
# If we have a gaze position from the tracker, update gc stim and text stim.
if gaze_in_region:
gaze_in_region = 'Yes'
else:
gaze_in_region = 'No'
text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region)
gaze_dot.setPos(gpos)
else:
# Otherwise just update text stim
text_stim.text = missing_gpos_str
# Redraw stim
gaze_ok_region.draw()
text_stim.draw()
if valid_gaze_pos:
gaze_dot.draw()
# Display updated stim on screen.
flip_time = win.flip()
# Check any new keyboard char events for a space key.
# If one is found, set the trial end variable.
#
if keyboard.getPresses(keys=' '):
run_trial = False
elif core.getTime()-tstart_time > T_MAX:
run_trial = False
win.flip()
# Current Trial is Done
# Stop eye data recording
tracker.setRecordingState(False)
t += 1
# All Trials are done
# End experiment
tracker.setConnectionState(False)
core.quit()
| 12,797
|
Python
|
.py
| 226
| 33.300885
| 120
| 0.444763
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,951
|
run.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/eyetracking/gcCursor/run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gc_cursor_demo/run.py
Demonstrates the ioHub Common EyeTracking Interface by displaying a gaze cursor
at the currently reported gaze position on an image background.
Select which tracker to use by setting the TRACKER variable below. Edit the associated
configuration dict for the eye tracker being used to modify it's settings.
"""
from psychopy import core, visual
from psychopy.data import TrialHandler, importConditions
from psychopy.iohub import launchHubServer
from psychopy.iohub.util import getCurrentDateTimeString, hideWindow, showWindow
import os
# Eye tracker to use ('mouse', 'eyelink', 'gazepoint', or 'tobii')
TRACKER = 'mouse'
eyetracker_config = dict(name='tracker')
devices_config = {}
if TRACKER == 'mouse':
devices_config['eyetracker.hw.mouse.EyeTracker'] = eyetracker_config
eyetracker_config['calibration'] = dict(auto_pace=True,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0, 0, 0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1],
animate=dict(enable=True,
expansion_ratio=1.5,
contract_only=False)
)
)
elif TRACKER == 'eyelink':
eyetracker_config['model_name'] = 'EYELINK 1000 DESKTOP'
eyetracker_config['simulation_mode'] = False
eyetracker_config['runtime_settings'] = dict(sampling_rate=1000, track_eyes='RIGHT')
eyetracker_config['calibration'] = dict(auto_pace=True,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0, 0, 0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1]
)
)
devices_config['eyetracker.hw.sr_research.eyelink.EyeTracker'] = eyetracker_config
elif TRACKER == 'gazepoint':
eyetracker_config['device_timer'] = {'interval': 0.005}
eyetracker_config['calibration'] = dict(use_builtin=False,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0,0,0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1],
animate=dict(enable=True,
expansion_ratio=1.5,
contract_only=False)
)
)
devices_config['eyetracker.hw.gazepoint.gp3.EyeTracker'] = eyetracker_config
elif TRACKER == 'tobii':
eyetracker_config['calibration'] = dict(auto_pace=True,
target_duration=1.5,
target_delay=1.0,
screen_background_color=(0, 0, 0),
type='NINE_POINTS',
unit_type=None,
color_type=None,
target_attributes=dict(outer_diameter=0.05,
inner_diameter=0.025,
outer_fill_color=[-0.5, -0.5, -0.5],
inner_fill_color=[-1, 1, -1],
outer_line_color=[1, 1, 1],
inner_line_color=[-1, -1, -1],
animate=dict(enable=True,
expansion_ratio=1.5,
contract_only=False)
)
)
devices_config['eyetracker.hw.tobii.EyeTracker'] = eyetracker_config
else:
print("{} is not a valid TRACKER name; please use 'mouse', 'eyelink', 'gazepoint', or 'tobii'.".format(TRACKER))
core.quit()
if __name__ == "__main__":
window = visual.Window((1920, 1080),
units='height',
fullscr=True,
allowGUI=False,
colorSpace='rgb',
color=[0, 0, 0]
)
window.setMouseVisible(False)
# Create a dict of image stim for trials and a gaze blob to show the
# reported gaze position with.
#
image_cache = dict()
image_names = ['canal.jpg', 'fall.jpg', 'party.jpg', 'swimming.jpg', 'lake.jpg']
for iname in image_names:
image_cache[iname] = visual.ImageStim(window, image=os.path.join('./images/', iname), name=iname)
# Create a circle to use for the Gaze Cursor. Current units assume pix.
#
gaze_dot = visual.GratingStim(window, tex=None, mask="gauss", pos=(0, 0),
size=(0.1, 0.1), color='green')
# Create a Text Stim for use on /instruction/ type screens.
# Current units assume pix.
instructions_text_stim = visual.TextStim(window, text='', pos=[0, 0], units='pix', height=24, color=[-1, -1, -1],
wrapWidth=window.size[0]*.9)
exp_conditions = importConditions('trial_conditions.xlsx')
trials = TrialHandler(exp_conditions, 1)
io_hub = launchHubServer(window=window, experiment_code='gc_cursor', **devices_config)
# Inform the ioDataStore that the experiment is using a TrialHandler. The ioDataStore will create a table
# which can be used to record the actual trial variable values (DV or IV) in the order run / collected.
#
io_hub.createTrialHandlerRecordTable(trials)
# Let's make some short-cuts to the devices we will be using in this demo.
tracker = None
try:
tracker = io_hub.devices.tracker
except Exception:
print(" No eye tracker config found in iohub_config.yaml")
io_hub.quit()
core.quit()
display = io_hub.devices.display
kb = io_hub.devices.keyboard
# Minimize the PsychoPy window if needed
hideWindow(window)
# Display calibration gfx window and run calibration.
result = tracker.runSetupProcedure()
print("Calibration returned: ", result)
# Maximize the PsychoPy window if needed
showWindow(window)
flip_time = window.flip()
io_hub.sendMessageEvent(text="EXPERIMENT_START", sec_time=flip_time)
# Send some information to the ioDataStore as experiment messages,
# including the experiment and session id's, the calculated pixels per
# degree, display resolution, etc.
#
io_hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
io_hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
io_hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(io_hub.experimentID,
io_hub.experimentSessionID))
io_hub.sendMessageEvent(text="Stimulus Screen ID: {0}, "
"Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),
display.getPixelResolution(),
display.getCoordinateType()))
io_hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
io_hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")
io_hub.clearEvents('all')
# For each trial in the set of trials within the current block.
#
t = 0
for trial in trials:
# Update the instruction screen text to indicate
# a trial is about to start.
#
instuction_text = "Press Space Key To Start Trial %d" % t
instructions_text_stim.setText(instuction_text)
instructions_text_stim.draw()
window.flip()
# Wait until a space key press event occurs after the
# start trial instuctions have been displayed.
#
io_hub.clearEvents('all')
kb.waitForPresses(keys=[' ', ])
# Space Key has been pressed, start the trial.
# Set the current session and trial id values to be saved
# in the ioDataStore for the upcoming trial.
#
trial['session_id'] = io_hub.getSessionID()
trial['trial_id'] = t+1
# Start Recording Eye Data
#
tracker.setRecordingState(True)
# Get the image stim for this trial.
#
imageStim = image_cache[trial['IMAGE_NAME']]
imageStim.draw()
flip_time = window.flip()
# Clear all the events received prior to the trial start.
#
io_hub.clearEvents('all')
# Send a msg to the ioHub indicating that the trial started,
# and the time of the first retrace displaying the trial stim.
#
io_hub.sendMessageEvent(text="TRIAL_START", sec_time=flip_time)
# Set the value of the trial start variable for this trial
#
trial['TRIAL_START'] = flip_time
# Loop until we get a keyboard event
#
run_trial = True
while run_trial is True:
# Get the latest gaze position in display coord space..
#
gpos = tracker.getPosition()
if type(gpos) in [tuple, list]:
# If we have a gaze position from the tracker,
# redraw the background image and then the
# gaze_cursor at the current eye position.
#
gaze_dot.setPos([gpos[0], gpos[1]])
imageStim.draw()
gaze_dot.draw()
else:
# Otherwise just draw the background image.
# This will remove the gaze cursor from the screen
# when the eye tracker is not successfully
# tracking eye position.
#
imageStim.draw()
# Flip video buffers, displaying the stim we just
# updated.
#
flip_time = window.flip()
# Send an experiment message to the ioDataStore
# indicating the time the image was drawn and
# current position of gaze spot.
#
if type(gpos) in [tuple, list]:
io_hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f" % (trial['IMAGE_NAME'], gpos[0], gpos[1]),
sec_time=flip_time)
else:
io_hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]" % (trial['IMAGE_NAME']),
sec_time=flip_time)
# Check any new keyboard press events by a space key.
# If one is found, set the trial end variable and break.
# from the loop
if kb.getPresses(keys=[' ', ]):
run_trial = False
break
# The trial has ended, so update the trial end time condition value,
# and send a message to the ioDataStore with the trial end time.
#
flip_time = window.flip()
trial['TRIAL_END'] = flip_time
io_hub.sendMessageEvent(text="TRIAL_END", sec_time=flip_time)
# Stop recording eye data.
# In this example, we have no use for any eye data
# between trials, so why save it.
#
tracker.setRecordingState(False)
# Save the experiment condition variable values for this
# trial to the ioDataStore.
#
io_hub.addTrialHandlerRecord(trial)
# Clear all event buffers
#
io_hub.clearEvents('all')
t += 1
# All trials have been run, so end the experiment.
#
flip_time = window.flip()
io_hub.sendMessageEvent(text='EXPERIMENT_COMPLETE', sec_time=flip_time)
# Disconnect the eye tracking device.
#
tracker.setConnectionState(False)
# The experiment is done, all trials have been run.
# Clear the screen and show an 'experiment done' message using the
# instructionScreen text.
#
instuction_text = "Press Any Key to Exit Demo"
instructions_text_stim.setText(instuction_text)
instructions_text_stim.draw()
flip_time = window.flip()
io_hub.sendMessageEvent(text="SHOW_DONE_TEXT", sec_time=flip_time)
io_hub.clearEvents('all')
# wait until any key is pressed
kb.waitForPresses()
| 15,476
|
Python
|
.py
| 287
| 33.292683
| 117
| 0.481621
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,952
|
readTrialEventsByMessages.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/eyetracking/gcCursor/readTrialEventsByMessages.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example of reading events from an iohub hdf5 file, saving to a tab delimited text file.
Events are split into trials by reading the time of 'TRIAL_START' and
'TRIAL_END' experiment Message events.
SAVE_EVENT_TYPE and SAVE_EVENT_FIELDS specify the event type, and which event fields, are saved.
This example can process hdf5 files saved by running the gcCursor demo.
"""
from psychopy.iohub.datastore.util import saveEventReport
# Specify which event type and event fields to save
SAVE_EVENT_TYPE = 'MonocularEyeSampleEvent'
SAVE_EVENT_FIELDS = ['time', 'gaze_x', 'gaze_y', 'pupil_measure1', 'status']
#SAVE_EVENT_TYPE = 'BinocularEyeSampleEvent'
#SAVE_EVENT_FIELDS = ['time', 'left_gaze_x', 'left_gaze_y', 'left_pupil_measure1',
# 'right_gaze_x', 'right_gaze_y', 'right_pupil_measure1', 'status']
# Specify the experiment message text used to split events into trial periods.
# Set both to None to save all events.
TRIAL_START_MESSAGE = "TRIAL_START"
TRIAL_END_MESSAGE = "TRIAL_END"
if __name__ == '__main__':
result = saveEventReport(hdf5FilePath=None, eventType=SAVE_EVENT_TYPE, eventFields=SAVE_EVENT_FIELDS,
trialStart=TRIAL_START_MESSAGE, trialStop=TRIAL_END_MESSAGE)
if result:
file_saved, events_saved = result
print("Saved %d events to %s." % (events_saved, file_saved))
else:
raise RuntimeError("saveEventReport failed.")
| 1,472
|
Python
|
.py
| 28
| 49.035714
| 105
| 0.716968
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,953
|
readTrialEventsByConditionVariables.py
|
psychopy_psychopy/psychopy/demos/coder/iohub/eyetracking/gcCursor/readTrialEventsByConditionVariables.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example of reading events from an iohub hdf5 file, saving to a tab delimited text file.
The condition variables table is used to split samples into trial groupings,
saving condition variable columns with each event.
SAVE_EVENT_TYPE and SAVE_EVENT_FIELDS specify the event type, and which event fields, are saved.
This example can process hdf5 files saved by running the gcCursor demo.
"""
import sys
import os
from psychopy.iohub.constants import EventConstants
from psychopy.iohub.datastore.util import displayDataFileSelectionDialog
from psychopy.iohub.datastore.util import ExperimentDataAccessUtility
SAVE_EVENT_TYPE = EventConstants.MONOCULAR_EYE_SAMPLE
SAVE_EVENT_FIELDS = ['time', 'gaze_x', 'gaze_y', 'pupil_measure1', 'status']
#SAVE_EVENT_TYPE = EventConstants.BINOCULAR_EYE_SAMPLE
#SAVE_EVENT_FIELDS = ['time', 'left_gaze_x', 'left_gaze_y', 'left_pupil_measure1',
# 'right_gaze_x', 'right_gaze_y', 'right_pupil_measure1', 'status']
if __name__ == '__main__':
# Select the hdf5 file to process.
data_file_path = displayDataFileSelectionDialog(os.path.dirname(os.path.abspath(__file__)))
if data_file_path is None:
print("File Selection Cancelled, exiting...")
sys.exit(0)
data_file_path = data_file_path[0]
dpath, dfile = os.path.split(data_file_path)
datafile = ExperimentDataAccessUtility(dpath, dfile)
events_by_trial = datafile.getEventAttributeValues(SAVE_EVENT_TYPE, SAVE_EVENT_FIELDS,
startConditions={'time': ('>=', '@TRIAL_START@')},
endConditions={'time': ('<=', '@TRIAL_END@')})
ecount = 0
# Open a file to save the tab delimited output to.
#
output_file_name = "%s.txt" % (dfile[:-5])
with open(output_file_name, 'w') as output_file:
print('Writing Data to %s:\n' % (output_file_name))
column_names = events_by_trial[0].condition_set._fields[2:] + events_by_trial[0]._fields[:-2]
output_file.write('\t'.join(column_names))
output_file.write('\n')
for trial_data in events_by_trial:
cv_fields = [str(cv) for cv in trial_data.condition_set[2:]]
# Convert trial_data namedtuple to list of arrays.
# len(trial_data) == len(SAVE_EVENT_FIELDS)
trial_data = trial_data[:-2]
for eix in range(len(trial_data[0])):
# Step through each event, saving condition variable and event fields
ecount += 1
event_data = [str(c[eix]) for c in trial_data]
output_file.write('\t'.join(cv_fields + event_data))
output_file.write('\n')
if eix % 100 == 0:
sys.stdout.write('.')
print("\n\nWrote %d events." % ecount)
datafile.close()
| 2,909
|
Python
|
.py
| 55
| 44.436364
| 105
| 0.636172
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,954
|
parallelPortOutput.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/parallelPortOutput.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
parallel ports demo
This is for win32 only.
"""
from psychopy import visual, core
from psychopy import parallel
nFramesOn = 5
nFramesOff = 30
nCycles = 2
parallel.setPortAddress(0x378) # address for parallel port on many machines
pinNumber = 2 # choose a pin to write to (2-9).
# setup the stimuli and other objects we need
win = visual.Window([1280, 1024], allowGUI=False) # make a window
win.flip() # present it
myStim = visual.GratingStim(win, tex=None, mask=None, color='white', size=2)
myClock = core.Clock() # just to keep track of time
# present a stimulus for EXACTLY 20 frames and exactly 5 cycles
for cycleN in range(nCycles):
for frameN in range(nFramesOff):
# don't draw, just refresh the window
win.flip()
parallel.setData(0) # sets all pins low
for frameN in range(nFramesOn):
myStim.draw()
win.flip()
# immediately *after* screen refresh set pins as desired
parallel.setPin(2, 1) # sets just this pin to be high
# report the mean time afterwards
print('total time=%0.6f' % myClock.getTime())
print('avg frame rate=%0.3f' % win.fps())
# set pins back to low
win.flip()
parallel.setData(0) # sets all pins low again
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,340
|
Python
|
.py
| 38
| 32.289474
| 76
| 0.712626
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,955
|
labjack_u3.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/labjack_u3.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo for using labjack DAC devices
See also
http: //labjack.com/support/labjackpython
but note that the version shipped with standalone PsychoPy
has u3 (and others below an umbrella called labjack) so the import
line is slightly different to the documentation on LabJack's website
"""
from psychopy import visual, core, event, sound
try:
from labjack import u3
except ImportError:
import u3
# sound.setAudioAPI('pyaudio')
win = visual.Window([800, 800])
stim = visual.GratingStim(win, color=-1, sf=0)
snd = sound.Sound(880)
print(snd)
# setup labjack U3
ports = u3.U3()
FIO4 = 6004 # the address of line FIO4
while True:
# do this repeatedly for timing tests
ports.writeRegister(FIO4, 0) # start low
# draw black square
stim.draw()
win.flip()
# wait for a key press
if 'q' in event.waitKeys():
break
# set to white, flip window and raise level port FIO4
stim.setColor(1)
stim.draw()
win.flip()
ports.writeRegister(FIO4, 1)
snd.play()
for frameN in range(4):
stim.draw()
win.flip()
# set color back to black and set FIO4 to low again
stim.setColor(-1)
stim.draw()
win.flip()
ports.writeRegister(FIO4, 0)
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,357
|
Python
|
.py
| 49
| 24.163265
| 68
| 0.70216
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,956
|
CRS_BitsPlusPlus.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/CRS_BitsPlusPlus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In PsychoPy 1.81.00 the support for Bits++ has been substantially rewritten and will
require a few changes to your code. It will allow a greater range of devices to be used
in future (including BitsSharp).
From now on, rather than the device/mode being a setting of the window you now
create the bitsBox object yourself, providing it with a reference to the window
and then interact with the box directly. So instead of
win = visual.Window([1024, 768], bitsmode='fast')
win.bits.setContrast(0.5)
you would now use:
win = visual.Window([1024, 768])
bitsBox = crs.BitsSharp(win, mode='bits++')
bitsBox.setContrast(0.5)
Check your experiment still works as expected!
"""
from psychopy import visual, core, event, logging
from psychopy.hardware import crs
import numpy
# This is the new way to use the Bits++ box (or a Bits# device) with PsychoPy.
# The BitsSharp device under PsychoPy has some issues right now:
# - the shaders aren't working for mono++and color++modes
# - for BitsSharp, the device will try to deduce the identity LUT of the screen
# but to do that it needs to be in fullscreen mode
# logging.console.setLevel(logging.INFO)
win = visual.Window([1024, 768], useFBO=True, fullscr=True, screen = 0)
bitsBox = crs.BitsPlusPlus(win, mode='bits++',
rampType='configFile') # change this to an integer to use a specific identityLUT
# BitsSharp can check identity LUT automatically:
# bitsBox = crs.BitsSharp(win, mode='bits++', checkConfigLevel=1)
# if not bitsBox.OK:
# print('failed to connect to Bits box')
# else:
# print('found %s on %s' %(bitsBox.type, bitsBox.com.port))
grating = visual.GratingStim(win, mask = 'gauss', ori=45, sf=2)
# Using bits++ with one stimulus
globalClock = core.Clock()
while True:
# get new contrast
t = globalClock.getTime()
newContr = numpy.sin(t * numpy.pi * 2) # sinusoidally modulate contrast
# set whole screen to this contrast
bitsBox.setContrast(newContr)
# draw gratings and update screen
grating.draw()
win.flip()
# check for a keypress
if event.getKeys():
break
event.clearEvents('mouse') # only really needed for pygame windows
# reset the bits++(and update the window so that this is done properly)
bitsBox.setContrast(1)
win.flip()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,423
|
Python
|
.py
| 57
| 39.912281
| 89
| 0.735745
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,957
|
CRS_BitsBox.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/CRS_BitsBox.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This demo shows you how to use a CRS BitsSharp device with PsychoPy
As of version 1.81.00 PsychoPy can make use of the Bits # in any of its rendering
modes provided that your graphics card supports OpenGL framebuffer objects.
You don't need to worry about setting the high- and low-bit pixels. Just draw as
normal and PsychoPy will do the conversions for you
"""
from psychopy import visual, core, event, logging
from psychopy.hardware import crs
logging.console.setLevel(logging.INFO)
win = visual.Window([1024, 768], screen=0, useFBO=True,
fullscr=True, allowGUI=False, autoLog=False)
# Initialize BitsSharp
# you need to give this the psychopy Window so that it can override various
# window functions (e.g. to override gamma settings etc)
bits = crs.BitsSharp(win=win, mode='bits++')
print(bits.info)
if not bits.OK:
print('failed to connect to Bits box')
core.quit()
core.wait(0.1)
# Now, you can change modes using
bits.mode = 'mono++' # 'color++', 'mono++', 'bits++', 'auto++' or 'status'
# Create a stimulus and draw as normal
stim = visual.GratingStim(win, tex='sin', units='pix', size=400, sf=0.01, mask='gauss', autoLog=False)
globalClock = core.Clock()
while globalClock.getTime() < 3:
t = globalClock.getTime()
stim.phase = t * 3 # drift at 3Hz
stim.draw()
win.flip()
# You can test pixel values (going to the box) using getVideoLine();
# this requires 'status' mode and that takes a few moments to set up
bits.mode = 'status'
core.wait(3)
pixels = bits.getVideoLine(lineN=1, nPixels=5)
print(pixels)
# Check that the set up is working
# level=0 just checks that system is the same from previous config
# level=1 checks that identity LUT still works (level=2 would rewrite the config file)
bits.checkConfig(level=1)
# color++ and mono++ are super-easy. Just switch to that mode and draw as normal
# bits++ mode still needs a LUT, which means extra commands
bits.mode = "color++" # get out of status screen
core.wait(3) # wait to get back out of status mode
for frameN in range(300):
stim.draw()
bits.setContrast((frameN%50) / 50.0) # ramp up in a sawtooth
win.flip()
# Make BitsSharp go beep
# bits.beep()
# You probably don't need to but you can send BitsSharp your own messages using
bits.sendMessage('$FirmwareDate\r')
print(bits.read(timeout=0.1))
win.close()
core.quit()
# The contents of this file are in the public domain.
| 2,456
|
Python
|
.py
| 59
| 39.677966
| 102
| 0.740445
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,958
|
egi_netstation.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/egi_netstation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This demo comes from the simple_distilled example provided with pynetstation.
Note that egi pynetstation can also be used in a multi-threaded form.
See the pynetstation documentation for further information.
"""
# Set up:
import egi.simple as egi
# import egi.threaded as egi
# ms_localtime = egi.egi_internal.ms_localtime
ms_localtime = egi.ms_localtime
ns = egi.Netstation()
# sample address and port; change according to your network settings
ns.connect('11.0.0.42', 55513)
# ns.initialize('11.0.0.42', 55513)
ns.BeginSession()
ns.sync()
ns.StartRecording()
# Send many events here:
# optionally can perform additional synchronization
# ns.sync()
ns.send_event('evt_', label="event", timestamp=egi.ms_localtime(),
table = {'fld1' : 123, 'fld2' : "abc", 'fld3' : 0.042} )
# We have sent all we wanted, time to go home:
ns.StopRecording()
ns.EndSession()
ns.disconnect()
# ns.EndSession()
# ns.finalize()
# The contents of this file are in the public domain.
| 1,038
|
Python
|
.py
| 31
| 31.580645
| 77
| 0.740181
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,959
|
camera.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/camera.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple demo for recording a video from a camera and saving the result to
disk.
"""
import psychopy
import time
import psychopy.core as core
import psychopy.event as event
import psychopy.visual as visual
from psychopy.hardware.camera import Camera
from psychopy.sound.microphone import Microphone
# Create a microphone instance for recording audio samples. This will be passed
# off the camera object and controlled by it for recording the audio track of
# the video. Here we just get the first available microphone.
microphones = Microphone.getDevices()
if microphones:
mic = Microphone(microphones[0])
else:
mic = None # no audio if a mic was not found
print('No mic was found on this system, no audio will be recorded!')
# get descriptions for camera devices and their available formats on this system
cameras = Camera.getCameraDescriptions(collapse=True) # collapse=True for list
print("Found cameras and formats:\n\t")
print("\n\t".join(cameras)) # print the list of possible formats
# use the very first one
myCameraFormat = cameras[0]
print('\nUsing camera/format: `{}`'.format(myCameraFormat))
# Create a new camera instance. Values for `size` and `frameRate` must be
# appropriate for the device in use.
cam = Camera(myCameraFormat, mic=mic)
# Open a camera stream. This will remain open until `close()` ia called.
cam.open()
# Create a window to present the live stream from the camera on.
win = visual.Window(size=(800, 600))
# Create an ImageStim object to use as a 'viewfinder', this will allow you to
# view the camera stream in real-time. You should only set the camera instance
# as the ImageStim after calling `open()` on the camera since metadata will not
# be available until so to properly set up the texture.
viewer = visual.ImageStim(win, cam)
# Start recording frames to file. This needs to be called after opening the
# stream if you wish to save video frames.
cam.record()
# record for (close to) 5 seconds
while cam.recordingTime < 5.0:
frame = cam.getVideoFrame() # get video frame data
# print the current time in the recording
print('t={}s'.format(round(frame.absTime, 6)))
viewer.draw() # draw the frame to the window
win.flip()
if event.getKeys('q'):
break
# close the window, we don't need it anymore
win.close()
# Stop the camera recording. This must be called prior to saving the video to
# file. The webcam stream is still open at this point and record can be called
# again.
cam.stop() # stop the webcam recording
# Save the video to disk by calling this method. Video recordings are lost if
# this is not called prior to calling `record` again.
cam.save('myVideo.mp4', useThreads=False) # uncomment to save the file, just specify the path
# Print the path to where the clip was saved, this allows you to pass the clip
# to a `MovieStim` object to view it afterwards if desired. Gives `None` if
# `save()` was not called previously.
if cam.lastClip is not None:
print(cam.lastClip, 'clip')
# Close the camera stream. You must call this before exiting or when you're
# done with the camera.
cam.close()
core.quit()
| 3,174
|
Python
|
.py
| 70
| 43.371429
| 94
| 0.761025
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,960
|
cedrusRB730.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/cedrusRB730.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of using Cedrus RB730 hardware
"""
from psychopy.hardware import cedrus
from psychopy import core
import sys
rb730 = cedrus.RB730(7, baudrate=115200)
# Get RB info
print(rb730.getInfo())
# this is the time taken to send a signal to the unit and back via USB:
print(('roundTrip:', rb730.measureRoundTrip()))
core.wait(0.1) # give chance to clear prev commands
rb730.resetBaseTimer()
rb730.resetTrialTimer()
# Test keys
print('push some keys (1 exits)')
sys.stdout.flush()
notAbort = True
while notAbort:
keyEvents = rb730.waitKeyEvents(downOnly=False)
for evt in keyEvents:
print("key=%s, rt=%.4f, up/down=%i" % (evt.key, evt.rt, evt.direction))
if evt.key == 1:
notAbort = False
print('done')
print(('baseTime:', rb730.getBaseTimer()))
core.quit()
# The contents of this file are in the public domain.
| 902
|
Python
|
.py
| 30
| 27.6
| 79
| 0.71875
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,961
|
testSoundLatency.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/testSoundLatency.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This demo allows for automated testing of the sound latency on your system.
To use it you need a labjack (or adapt for a similar device) and a cable to
connect the earphones jack to the AIN0 (and GND) pins of the labjack.
(The PsychoPy team would be interested to hear how your measurements go)
"""
import psychopy
from psychopy import visual, core, event, sound
from labjack import u3
import numpy, sys, platform
# setup window (can use for visual pulses)
win = visual.Window([800, 800], monitor='testMonitor')
win.recordFrameIntervals = False
stim = visual.GratingStim(win, color=-1, sf=0)
sound.init(rate=48000, buffer=48)
print('Using %s(with %s) for sounds' %(sound.audioLib, sound.audioDriver))
timeWithLabjack = True
maxReps = 100
# setup labjack U3
ports = u3.U3()
ports.__del__ = ports.close # try to autoclose the ports if script crashes
# get zero value of FIO6
startVal = ports.getFIOState(6) # is FIO6 high or low?
print('FIO6 is at', startVal, end='')
print('AIN0 is at', ports.getAIN(0))
if timeWithLabjack:
print('OS\tOSver\taudioAPI\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax')
snd = sound.Sound(1000, secs=0.1)
core.wait(2) # give the system time to settle?
delays = []
nReps = 0
while True: # run the repeats for this sound server
if event.getKeys('q'):
core.quit()
nReps += 1
# do this repeatedly for timing tests
ports.setFIOState(4, 0) # start FIO4 low
# draw black square
stim.draw()
win.flip()
if not timeWithLabjack:
# wait for a key press
if 'q' in event.waitKeys():
break
# set to white, flip window and raise level port FIO4
stim.setColor(1)
stim.draw()
win.flip()
startVal=ports.getAIN(0)
# print('AIN0 is at', startVal)
ports.setFIOState(4, 1)
timer=core.Clock()
snd.play()
if timeWithLabjack:
while abs(ports.getAIN(0)-startVal) < 0.1 and timer.getTime() < 1.0:
pass
t1 = timer.getTime() * 1000
if timer.getTime() > 1.0:
print('failed to detect sound on FIO6 (either inconsistent sound or needs to be louder)')
# for n in range(5):
# core.wait(0.001)
# print('AIN0 now', ports.getAIN(0))
sys.stdout.flush()
delays.append(t1)
core.wait(0.5) # ensure sound has finished
# set color back to black and set FIO4 to low again
stim.setColor(-1)
stim.draw()
win.flip()
ports.setFIOState(4, 0) # set FIO4 to low again
if nReps >= maxReps:
break
if sys.platform == 'darwin':
sysName = 'OSX'
sysVer = platform.mac_ver()[0]
elif sys.platform == 'win32':
sysName = 'win'
sysVer = platform.win32_ver()[0]
elif sys.platform.startswith('linux'):
import distro
sysName = 'linux_' + '_'.join([distro.name(), distro.version(), distro.codename()])
sysVer = platform.release()
else:
sysName = sysVer = 'n/a'
audioLib = sound.audioLib
if audioLib == 'pyo':
# for pyo we also want the undrelying driver (ASIO, windows etc)
audioLib = "%s_%s" % (sound.audioLib, sound.audioDriver)
rate = sound.pyoSndServer.getSamplingRate()
buff_size = sound.pyoSndServer.getBufferSize()
else:
rate = sound.pygame.mixer.get_init()[0]
buff_size = 0
# print('OS\tOSver\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax')
if timeWithLabjack:
print("%s\t%s\t%s\t%s" % (sysName, sysVer, audioLib, psychopy.__version__), end='')
print("\t%i\t%i" % (rate, buff_size), end='')
print("\t%.3f\t%.3f" % (numpy.mean(delays), numpy.std(delays)), end='')
print("\t%.3f\t%.3f" % (numpy.min(delays), numpy.max(delays)), end='')
import pylab
pylab.plot(delays, 'o')
pylab.show()
win.close()
core.quit()
# The contents of this file are in the public domain.
| 3,821
|
Python
|
.py
| 107
| 31.607477
| 101
| 0.672442
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,962
|
monitorDemo.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/monitorDemo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Often you can control monitors simply from the Monitor Center in the
PsychoPy application, but you can also create/control them using scripts.
This allow you to override certain values for the current run: :
mon = monitors.Monitor('testMonitor') # load the testMonitor
mon.setDistance(120) # change distance in this run (don't save)
Or you can load a specific calibration of that monitor:
mon.setCurrent(-1) is the last (alphabetical) calibration
mon.setCurrent('2015_05_21 11:42') # use a specific named calibration
More info is available at http: //www.psychopy.org/api/monitors.html
"""
from psychopy import monitors
names = monitors.getAllMonitors()
for thisName in names:
thisMon = monitors.Monitor(thisName)
print(thisMon.getDistance())
# The contents of this file are in the public domain.
| 883
|
Python
|
.py
| 19
| 43.736842
| 74
| 0.762573
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,963
|
qmixPump.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/qmixPump.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple example of Cetoni neMESYS syringe pump control, based on the `pyqmix`
library. The syringe pump system is described in the following publication:
CA Andersen, L Alfine, K Ohla, & R Höchenberger (2018):
"A new gustometer: Template for the construction of a portable and
modular stimulator for taste and lingual touch."
Behavior Research Methods. doi: 10.3758/s13428-018-1145-1
"""
from psychopy import event
from psychopy import core
from psychopy.visual import Window, TextStim
from psychopy.hardware import qmix
print('Supported syringe types: %s' % qmix.syringeTypes)
print('Supported volume units: %s' % qmix.volumeUnits)
print('Supported flow rate units: %s' % qmix.flowRateUnits)
# Initialize the first pump (index 0). We assume the pump is
# equipped with a 50 mL glass syringe.
pump = qmix.Pump(index=0,
volumeUnit='mL',
flowRateUnit='mL/s',
syringeType='50 mL glass')
print('Max. flow rate: .3%f %s' % (pump.maxFlowRate, pump.flowRateUnit))
win = Window()
msg = ('Press one of the following keys: \n\n'
' F – Fill Syringe at 1 mL/s\n'
' E – Empty Syringe at 1 mL/s\n'
' A – Aspirate 1 mL at 1 mL/s\n'
' D – Dispense 1 mL at 1 mL/s\n'
'\n'
' Q – Quit')
t = TextStim(win, msg)
event.clearEvents()
while True:
t.draw()
win.flip()
# Retrieve keypresses. The user can completely fill or empty the syringe,
# or aspirate or dispense a small volume (1 mL) by pressing the
# corresponding keys.
#
# When aspirating or dispensing, the code halts further script execution
# until the pump operation has finished, and then immediately switches the
# valve position (i.e., from inlet to outlet after aspiration, and from
# outlet to inlet after dispense). During an experiment, this can ensure
# a sharp(er) stimulus offset.
keys = event.getKeys(keyList=['f', 'e', 'a', 'd', 'q'])
if 'f' in keys:
pump.fill(flowRate=1, waitUntilDone=False)
elif 'e' in keys:
pump.empty(flowRate=1, waitUntilDone=False)
elif 'a' in keys:
pump.aspirate(volume=1,
flowRate=1,
waitUntilDone=True,
switchValveWhenDone=True)
elif 'd' in keys:
pump.dispense(volume=1,
flowRate=1,
waitUntilDone=True,
switchValveWhenDone=True)
elif 'q' in keys:
break
# Immdiately halt all pump operation and shutdown PsychoPy.
pump.stop()
core.quit()
| 2,652
|
Python
|
.py
| 66
| 33.515152
| 78
| 0.652225
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,964
|
crsBitsAdvancedDemo.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/crsBitsAdvancedDemo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This demo was created by Andrew Schofield to show how to use the advanced input/output
functionality in the bits.py module.
It also acts as a test routine to make sure your device is working and that the
bits.py module is intact.
The bits.py module mostly provides wrapper functions for getting control of the
CRS hardware family, Bits++, Bits#, Display++ and their variants. ViSaGe devices
are not supported unless you have one with a Bits# emulation mode.
Most but not all of the bits.py commands are tested. Similarly
most but not all of the CRS functionality is tested often in combination. But there
are two many combinations to test every thing.
There is some support for driving stereo goggles (CRS FE1) via the D25 feature connector
output. Bits# has a stereo goggles port which works differently and is not tested. Nor is
support for it currently provided in bits.py.
Stereo via frame interleaving is not recommended on a Display++ due to LCD pixel transition speeds.
CRS have a 3D version of Display++ which uses polarised galsses and line interleaving. This is not
specifically implemented in bits.py yet but you can still build an stereo image yourself.
Bits++ support is relatively minimal with, in particular, digital inputs not being supported.
To work effectively on a Bits++ box you will need an Oscilloscope and:
Connect DOUT1 (pin2) to Oscilloscope channel A"
Connect DOUT5 (pin6) to Oscilloscope channel B"
You will then observe changes in those outputs.
To work effectively on all other devices you will need to:
Connect DOUT1 (pin 2) to DIN7 (pin 21)
Connect DOUT5 (pin 6) to DIN8 (pin 24)
Connect DOUT6 (pin 7) to DIN9 (pin 25)
and connect Analog OUT 1 to Analog IN 1 if you have a Bits# or a Display++
with the optional Analog factures. This will cunningly allow the CRS device to monitor
its own outputs. Testing both output and input features at once.
You can select the CRS hardware and options in a dialog box.
Note Screen is the screen number of your CRS connected monitor = 0 or 1.
(your working in coder after all).
Enter None to see what happens if there is
no device and hence no serial comms.
Note that the monitor and LUT settings don't really matter much.
You will get warnings about stuff being found on the input buffer. This is because the command to
stop the CRS device from sending data expects a return message to say that data collection has stopped
but will always find the last chunk of data recorded instead.
See xxxxx for a description of the bits.py approach to CRS hardware and programmer guide.
"""
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging
from psychopy import monitors, filters, gamma
from psychopy.hardware import crs
from scipy import misc
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from time import sleep, clock, time
import os # handy system and path functions
import sys # to get file system encoding
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))#.decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'crsTest' # from the Builder filename that created this script
expInfo = {'Device': 'Display++',
'Analog': 'No',
'Touch screen': 'Yes',
'Button box': 'CB6',
'Monitor': 'Display++160',
'LUTfile': 'invGammaLUT.txt',
'Screen': '1'}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
#===================================================================#
# Setup the Window
print("Open window")
win = visual.Window(
size=(800, 600), fullscr=True, screen=int(expInfo['Screen']),
allowGUI=False, allowStencil=False,
monitor=expInfo['Monitor'], color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='deg')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# Initialize components for Routine "preflight"
#preflightClock = core.Clock()
#frameRate=win.getActualFrameRate()
#print(expInfo['frameRate'])
mon=monitors.Monitor(expInfo['Monitor'],distance=56)
#=======================================================================================#
#Opening the appropriate a CRS class of the desired / necessary type
print("open CRS")
if expInfo['Device']=='Bits++':
bits = crs.BitsPlusPlus(win, mode='bits++',rampType=1)
if expInfo['Device']=='Bits#':
bits = crs.BitsSharp(win, mode='bits++',checkConfigLevel=1)
if expInfo['Device']=='Display++' or expInfo['Device']=='None':
if expInfo['Touch screen']=="Yes":
if expInfo['Device']=='Display++':
bits = crs.DisplayPlusPlusTouch(win, mode='bits++',checkConfigLevel=1)
else:
bits = crs.DisplayPlusPlusTouch(win, mode='bits++',checkConfigLevel=1,noComms=True)
else:
if expInfo['Device']=='Display++':
bits = crs.DisplayPlusPlus(win, mode='bits++',checkConfigLevel=1)
else:
bits = crs.DisplayPlusPlus(win, mode='bits++',checkConfigLevel=1,noComms=True)
#=======================================================================================#
# If Bits# or Display++ initialise the device. #
# This can also be done via parameter setters but this illustrates the low level send #
# commands. #
if expInfo['Device'] != 'Bits++':
#bits = crs.BitsSharp(win, mode='bits++')
#gamma.setGamma(win.winHandle._dc, 1.0, 1)
bits.sendMessage('$TemporalDithering=[ON]\r')
bits.read(timeout=0.1)
bits.sendMessage('$VideoFrameRate\r')
bits.read(timeout=0.1)
lutfile = expInfo['LUTfile']
msg='$enableGammaCorrection=['+lutfile+']\r'
bits.sendMessage(msg)
bits.read(timeout=0.1)
bits.sendMessage(r'$EnableTouchScreen=[OFF]\e')
bits.read(timeout=0.1)
bits.sendMessage('$Stop\r')
# Clear the buffer
bits.flush()
bits.RTBoxDisable()
bits.flush()
print("OK I seem to have cleared the buffer now")
#=======================================================================================#
# House keeping
print("Connect DOUT1 (pin 2) to DIN7 (pin 21)")
print("Connect DOUT5 (pin 6) to DIN8 (pin 24)")
print("Connect DOUT6 (pin 7) to DIN9 (pin 25)")
if expInfo['Device'] == 'Bits#' or expInfo['Analog'] == 'Yes':
print("Connect Analog OUT 1 TO Analog IN 1")
else:
print("Connect DOUT1 (pin2) to Oscilloscope A")
print("Connect DOUT5 (pin6) to Oscilloscope B")
sleep(10)
# Make a grating
grating = visual.GratingStim(
win=win, name='grating',units='norm',
tex='sin', mask=None,
ori=0, pos=(0,0), size=(1, 1), sf=5, phase=1.0,
color=[1,1,1], colorSpace='rgb', opacity=1,blendmode='avg',
texRes=128, interpolate=True, depth=-2.0)
bits.resetClock()
bits.win.flip()
sleep(1)
#=============================================================================#
#LUT test
# Varies the contrast of a grating by setting the software LUT in bits++ mode
# via a Tlock command.
print("1: LUT test")
print("Should see pulsating grating")
frameN=-1
while frameN < bits.frameRate*5:
# get current time
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
#bits.setAnalog(AOUT1 = 5*sin(t*50), AOUT2 = 5*cos(t*50))
# *thanksMsg* updates
if frameN == 0:
grating.setAutoDraw(True)
bits.setContrast((sin(np.pi*frameN/bits.frameRate)))
win.flip()
grating.setAutoDraw(False)
bits.setContrast(1.0)
win.flip()
#=============================================================================#
# Trigger and goggle tests
print("2a: Triggers test")
# Sets up a 2ms trigger pulse and issues via TLock
# Recoding Digital inputs via the status logging commands
bits.setTrigger(0b0000000010,0.002,0.002)
bits.startTrigger()
bits.win.flip()
T=clock()
if expInfo['Device'] != 'Bits++': #Bist# / Display++ version
# Recording Digital inputs via the status logging commands
# Example of starting and stopping status log
bits.startStatusLog()
while clock()-T<5:
bits.win.flip()
bits.stopStatusLog()
#Example of reading status values
vals = bits.getAllStatusValues()
if vals:
if len(vals) > 20:
print("All status values")
for i in range(-20,0):
print(vals[i])
print("DIN7 should change lots")
#Example of reading status events
vals = bits.getAllStatusEvents()
if vals:
if len(vals) > 20:
print("All status events")
for i in range(-20,0):
print(vals[i])
print("Input 7 should have 2ms high every frame.")
else: # Bits++ version
print("Should observe 2ms pulses on Ch A")
while clock()-T<5:
bits.win.flip()
bits.stopTrigger()
bits.win.flip()
sleep(5)
if expInfo['Device'] != 'Bits++':
bits.flush()
#=============================================================================#
print("2b: Goggles test")
# Sets up alternating left / right goggle control via D25 output and the TLock
bits.startGoggles(left=0,right=1)
bits.win.flip()
T=clock()
if expInfo['Device'] != 'Bits++': # Bits# and Display++ version
# Recording Digital inputs via the status logging commands
# Example of starting and stopping status log
bits.startStatusLog()
while clock()-T<5:
bits.win.flip()
bits.stopStatusLog()
bits.flush()
#Example of reading status values
vals = bits.getAllStatusValues()
if vals:
if len(vals) > 20:
print("All status values")
for i in range(-20,0):
print(vals[i])
print("DIN8 should change lots")
#Example of reading status events
vals = bits.getAllStatusEvents()
if vals:
if len(vals) > 20:
print("All status events")
for i in range(-20,0):
print(vals[i])
print("Input 8 should change state every frame.")
else: # Bits++ version
print("Should observe square wave on Ch B")
while clock()-T<5:
bits.win.flip()
bits.stopGoggles()
bits.win.flip()
sleep(5)
if expInfo['Device'] != 'Bits++':
bits.flush()
#=============================================================================#
print("2c: Goggles with triggers")
# Drive the goggles and a trigger at the same time.
# Note uses trigger from 2a as this will have been kept
# even when the goggles were used ioin 2b.
bits.startTrigger()
bits.startGoggles()
bits.win.flip()
T=clock()
if expInfo['Device'] != 'Bits++':# Bits# and Display++ version
# Recording Digital inputs via the status logging commands
# Example of starting and stopping status log
bits.startStatusLog()
while clock()-T<5:
bits.win.flip()
bits.stopStatusLog()
bits.flush()
#Example of reading status values
vals = bits.getAllStatusValues()
if vals:
if len(vals) > 20:
print("All status values")
for i in range(-20,0):
print(vals[i])
print("DIN7 and DIN8 should change lots")
#Example of reading status events
vals = bits.getAllStatusEvents()
if vals:
if len(vals) > 20:
print("All status events")
for i in range(-20,0):
print(vals[i])
print("Input 7 should have 2ms high every frame.")
print("Input 8 should change state every frame.")
else: # Bits++ version
print("Should observe 2ms pulses on Ch A")
print("Should observe square wave on Ch B")
while clock()-T<5:
#sleep(0.002)
bits.win.flip()
bits.stopGoggles()
bits.stopTrigger()
bits.win.flip()
sleep(5)
if expInfo['Device'] != 'Bits++':
bits.flush()
#=============================================================================#
# If using a bits++ all available tests are now done
if expInfo['Device'] == 'Bits++':
print("All tests done")
else: # otherwise carry on
bits.flush() # Flush the com port often
print("2d: Poll status")
#=============================================================================#
# Status polling
# Example of using the pollStatus command
bits.pollStatus()
# Get the digital word form of DIN states
valD = bits.getDigitalWord()
# Example of how to print it
if valD:
print(bin(valD['DWORD']))
#=============================================================================#
#RTBox operation
# Example of RTBox Enable, here using a bespoke mapping to route DIN9 to btn1
print("2e: Single shot trigger detected by RTBox via DIN")
bits.RTBoxEnable(mode=['down'], map=[('btn1','Din9')])
# Example of using send trigger to issue a trigger that will pulse Dout6 which would be connected to DIN
bits.sendTrigger(0b1111111,0,0.004)
bits.win.flip() # win.flip needed to finish off the trigger
#Example of using RTBoxKeysPressed to detect button box events.
if not bits.noComms: # noComms safe
# Wait for a key - its probably already been recvied anyway.
while not bits.RTBoxKeysPressed(1):
continue
# Get the response
btn=bits.getRTBoxResponse()
# Example button response
if btn:
print(btn)
else:
print("No button")
bits.flush()
btn = None
# Disable the RTBox for now
bits.RTBoxDisable()
sleep(5)
#=============================================================================#
# Example for making the CRS device beep
print("3a: Beep test")
bits.beep(400,0.5)
sleep(0.5)
#=============================================================================#
# Using Tlock to reset the CRS device clock
print("3b: Clock Reset test")
bits.resetClock()
bits.win.flip()
sleep(1)
T=clock()
#=============================================================================#
# Example for using PollStatus to read the device time
bits.pollStatus()
print("Time taken to poll status = ",clock()-T)
val = bits.getStatus()
if val:
print("Time recorded by device should be about 1s. Actually = ", val.time)
sleep(5)
# Just to make sure the status log has been stopped
bits.sendMessage('$Stop\r')
bits.flush()
#=============================================================================#
# Example Using statusBox to record key presses
print("4a: Using statusBox and Din")
bits.statusBoxEnable(mode = ['up','IO10'])
bits.sendTrigger(0b1111111,0,0.004)
bits.win.flip() # win.flip needed to finish off the trigger
if not bits.noComms: # noComms safe
# Wait for a key - its probably already been recvied anyway.
while not bits.statusBoxKeysPressed(1):
continue
# Disable the status box as soon as you don't need it.
bits.statusBoxDisable()
bits.stopTrigger()
bits.win.flip() # win.flip to make sure triggers are off.
# Get the response
btn = bits.getStatusBoxResponse() # just get first response and ditch others.
# Example button response
if btn:
print(btn)
else:
print("No button")
btn = None
bits.flush()
sleep(5)
#=============================================================================#
# Example using the statusBox with different button boxes
print("4b: Using statusBox with a button box")
# Enables different statusBox defaults depending on users input
if expInfo['Button box'] == 'CB6': #IR box
bits.statusBoxEnable(mode=['CB6','Down'])
print("Press a button on the Box")
#Example statusBoxWait command - waits for a button press
button = bits.statusBoxWait()
if button:
print(button)
elif expInfo['Button box'] == 'IO6': # A wired box
bits.statusBoxEnable(mode=['IO6','Down'])
print("Press one of first 3 buttons on the Box")
#Example statusBoxWait command - waits for a button press
button = bits.statusBoxWait()
if button:
print(button)
elif expInfo['Button box'] == 'IO': # A wired box with only 3 buttons
bits.statusBoxEnable(mode=['IO','Down'])
print("Press of first 3 buttons on the Box")
#Example statusBoxWait command - waits for a button press
button = bits.statusBoxWait()
if button:
print(button)
#=============================================================================#
# Example using the statusBox to get multiple responses
# We've left the statusBix running for this
print("4c: Multiple buttons")
print("Now press some buttons")
T = clock()
while clock()-T<10:
bits.win.flip()
bits.statusBoxDisable()
print('Some buttons presses ',bits.statusBoxKeysPressed())
res = bits.getAllStatusBoxResponses()
if res:
print(len(res))
print(res)
bits.flush()
#=============================================================================#
# More RTBox usage examples
print("4d: RTBox test")
bits.flush()
# Enables different RTBox defaults depending on users input
if expInfo['Button box'] == 'CB6': #IR box
bits.RTBoxEnable(mode=['CB6','Down'])
print("Press a button on the Box")
#Example RTBoxWait command - waits for a button press
button = bits.RTBoxWait()
if button:
print(button)
elif expInfo['Button box'] == 'IO6': # A wired box
bits.RTBoxEnable(mode=['IO6','Down'])
print("Press one of first 3 buttons on the Box")
#Example RTBoxWait command - waits for a button press
button = bits.RTBoxWait()
if button:
print(button)
elif expInfo['Button box'] == 'IO': # A wired box
bits.RTBoxEnable(mode=['IO','Down'])
print("Press of first 3 buttons on the Box")
#Example RTBoxWait command - waits for a button press
button = bits.RTBoxWait()
if button:
print(button)
#=============================================================================#
# Example of how to calibrate or charaterise the RTBox timer
# relative to the host clock
print("4e: RTBox calibration test")
res=bits.RTBoxCalibrate(5)
bits.RTBoxDisable()
bits.flush()
print('Average clock difference =', res)
#=============================================================================#
# Commands for analog outputs, used if we have a Bits# or a
# display++ with analog feature
if (expInfo['Device'] == 'Bits#'
or expInfo['Device'] == 'None'
or expInfo['Analog'] == 'Yes'):
print("5: Analog tests")
#=============================================================================#
# Example of sending an analog out - will continue while we poll the status
print("5a: Analog only using pollStatus")
bits.sendAnalog(2,2)
sleep(1) # let the output it settle
bits.pollStatus()
# Example for getting analog values and displaying them
val = bits.getAnalog()
bits.win.flip()
#bits.stopAnalog
print("Analog 1 should be = 2v")
if val:
print(val['ADC'])
sleep(5)
bits.flush()
#=============================================================================#
# Example using triggers and analog outputs at the same time.
print("5b: Analog and triggers")
# Example using set analog
bits.setAnalog(3,3)
# Set a long trigger
bits.setTrigger(0b1111111101,0.0,0.0084)
#Start both outputs
bits.startTrigger()
bits.startAnalog()
bits.win.flip()
sleep(1)
bits.pollStatus()
valA = bits.getAnalog()
valD = bits.getDigitalWord()
bits.stopAnalog
bits.stopTrigger()
print("Analog 1 should be = 3v")
if valA:
print(valA['ADC'])
print("Digital word should have bit 8 low")
if valD:
print(bin(valD['DWORD']))
sleep(5)
bits.flush()
#=============================================================================#
# Example using triggers, goggles and analog all at once
# Also test the ability of triggers to service people messing with
# Goggles and Analog outs as these all use the same communication channel.
print("5c: Goggles, analog and triggers")
bits.setAnalog(3,3)
bits.setTrigger(0b0000000010,0.002,0.002)
# Test to see if triggers can service lots of toing a froing
# of the analog and goggles outputs.
bits.startAnalog()
bits.win.flip()
bits.startGoggles()
bits.win.flip()
bits.stopAnalog()
bits.win.flip()
bits.stopGoggles()
bits.win.flip()
# Now set them all going
bits.startGoggles()
bits.startAnalog()
bits.startTrigger()
bits.win.flip()
T=time()
#Setting the status event parameters to determine which events
#are recorded. Now only 'up' events will be registered
bits.setStatusEventParams(DINBase=0b1111111111,
IRBase=0b111111,
TrigInBase=0,
ADCBase=0,
threshold=1.0,
mode=['up'])
bits.startStatusLog()
while time()-T<5:
bits.setAnalog(5*sin((time()-T)), 0)
bits.win.flip()
bits.stopStatusLog()
sleep(1)
bits.flush()
# Analog should show up on the status log
vals = bits.getAllStatusValues()
if vals:
if len(vals) > 1:
print("All status values")
for i in range(1,len(vals),700):# only read every 700'th status report as
# analog inputs update slowly relative to everything else
print(vals[i])
print("Should see analog 1 changing")
#The Goggles and triggers are best detected as events
vals = bits.getAllStatusEvents()
if vals:
if len(vals) > 20:
print("All status events")
for i in range(-20,0):
print(vals[i])
print("Input 7 should go up once every frame.")
print("Input 8 should go up every other frame.")
bits.stopTrigger()
bits.stopGoggles()
bits.stopAnalog()
bits.win.flip()
#=============================================================================#
# Example for detecting changes on the analog inputs via status events
# Not the analog threshold was set to 0.5 volts above
print("5d: Detecting analog events via status")
bits.setStatusEventParams(DINBase=0b1111111111,
IRBase=0b111111,
TrigInBase=0,
ADCBase=0,
threshold=1.0,
mode=['Up'])
bits.startStatusLog()
bits.startAnalog()
T = time()
while time()-T<10:
bits.setAnalog(5*sin((time()-T)), 0)
bits.win.flip()
bits.stopStatusLog()
sleep(1)
bits.stopAnalog()
bits.flush()
# Analog should show up on the status log
vals = bits.getAllStatusValues()
if vals:
if len(vals) > 1:
print("Status values")
for i in range(1,len(vals),700):# only read every 700'th status report as
# analog inputs update slowly relative to everything else
print(vals[i])
print("Should see analog 1 changing")
#Analog changes as events
vals = bits.getAllStatusEvents()
if vals:
if len(vals) > 5:
print("Status events")
for i in range(len(vals)):
print(vals[i])
print("Should see some events on Analog 1.")
print("May get spurious events on some Bits# boxes.")
bits.stopAnalog()
bits.win.flip()
#=============================================================================#
# Example for usinf the statusBox to detect analog events
print("5e: Using status box to detect analog events")
bits.sendAnalog(0.5,0.5)
bits.win.flip
bits.win.flip
bits.statusBoxEnable(mode=['Analog','Up','down'], threshold = 3.25)
bits.sendAnalog(4.0,4.0)
bits.win.flip
sleep(2)
# need to set analog low again by more than
# threshold in order to record new event
bits.sendAnalog(0.5,0.5)
bits.win.flip
sleep(2)
bits.sendAnalog(4.0,4.0)
bits.win.flip
sleep(2)
if not bits.noComms: # noComms safe
# Wait for a key - its probably already been recvied anyway.
while not bits.statusBoxKeysPressed():
continue
# Get the response
bits.statusBoxDisable()
btn = bits.getAllStatusBoxResponses()
# Example button response
if btn:
print(btn)
else:
print("No button")
#=============================================================================#
# Touch screen tests
if ((expInfo['Device'] == 'Display++' and expInfo['Touch screen'] == 'Yes')
or expInfo['Device'] == 'None'):
print("6: Touch Screen test")
#=============================================================================#
#Example of a touch enable, wait, disable cycle
print("6a: Touch the screen")
bits.touchEnable()
val=bits.touchWait() # pause while waiting for touch
bits.touchDisable()
if val:
print(val)
sleep(5)
#=============================================================================#
# Example of a touch enable, touchPressed, disable cycle
print("6b: Touch the screen again")
bits.touchEnable()
if not bits.noComms:
while not bits.touchPressed(): # idle while waiting for touch
# But you could do stuff here
continue
# Example of getAllTouchResponses()
vals=bits.getAllTouchResponses()
bits.touchDisable()
if vals:
for i in range(len(vals)):
print(vals[i])
sleep(5)
#=============================================================================#
# Exampe of using a touch log cycle to get lots of touch responses
bits.startTouchLog()
print("6c: Touch the screen lots")
sleep(10)
bits.stopTouchLog()
#=============================================================================#
# Example of getting the full touch log and then extracting events
bits.getTouchLog()
vals=bits.getTouchEvents()
if vals:
for i in range(len(vals)):
print(vals[i])
print("All tests done")
del bits
win.close()
core.quit()
| 28,454
|
Python
|
.py
| 682
| 33.870968
| 108
| 0.577515
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,965
|
RiftMinimal.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/RiftMinimal.py
|
# Minimal Oculus Rift head-mounted display example. Press the 'q' key or use
# the application GUI to exit. Requires PsychXR to be installed.
#
# This file is public domain.
#
from psychopy import visual, event, core # visual imported, even if not used!
# Create a VR session, treat the returned object just like a regular window.
#
hmd = visual.Rift()
# loop until the user quits the app through the GUI menu
stopApp = False
while not stopApp:
# You must call these functions before drawing anything. First, you need to
# get the current tracking state of the head, then pass the pose to
# `calcEyePoses` to compute the eye positions. Only then you can begin
# rendering scenes. If `calcEyePoses` is not called, the application will
# stall.
trackingState = hmd.getTrackingState()
hmd.calcEyePoses(trackingState.headPose.thePose)
for i in ('left', 'right'):
hmd.setBuffer(i) # select the eye buffer to draw to
# Setup the viewing parameters for the current buffer, this needs to be
# called every time the buffer changes.
#
# For standard PsychoPy stimuli (e.g. GratingStim, ImageStim, etc.) you
# should use 'setDefaultView' with 'mono=True' when creating a
# visual.Rift instance. This configures the headset to properly render
# 2D stimuli, treating the HMD as a monitor.
#
hmd.setDefaultView()
# send the rendered buffer to the HMD
hmd.flip()
# check if the application should exit
if event.getKeys('q') or hmd.shouldQuit:
stopApp = True
# cleanly end the session
core.quit()
| 1,626
|
Python
|
.py
| 37
| 39.081081
| 79
| 0.713654
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,966
|
ioLab_bbox.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/ioLab_bbox.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo to illustrate using ioLabs button box.
"""
__author__ = 'Jonathan Roberts (orig demo); Jeremy Gray (rewrite 2013)'
from psychopy.hardware import iolab
import random
from psychopy import core, visual, event
# set up the button box
bbox = iolab.ButtonBox()
buttons = [1, 6]
bbox.setLights(buttons) # turn on those two lights, others off
bbox.setEnabled(buttons) # ignore other buttons
# show instructions, wait for spacebar
win = visual.Window()
instructions = visual.TextStim(win, wrapWidth = 1.8, height =.08,
text = '6 trials:\nhit the left lighted button when you see the word "left".\n'
'hit the right lighted button when you see the word "right".\n'
'hit space to start... < escape > to quit')
instructions.draw()
win.flip()
if 'escape' in event.waitKeys(['space', 'escape']):
core.quit()
# loop over fixation + left/right, get response
fixation = visual.TextStim(win, text = '+')
target = visual.TextStim(win, text = 'set during trial loop')
labeledResponse = {1: 'left', 6: 'right'}
stims = list(labeledResponse.values()) * 3 # list of stims: 3 'lefts' and 3 'rights'
random.shuffle(stims)
for stim in stims:
fixation.draw()
win.flip()
core.wait(0.5 + random.random())
target.setText(stim)
target.draw()
win.flip()
if event.getKeys(['q', 'escape']):
break
bbox.resetClock() # sets RT to 0.000 on bbox internal clock
evt = bbox.waitEvents() # clears prior events, wait for response
if not evt:
break
if labeledResponse[evt.btn] == stim: # evt.btn is int, evt.key is str
print('correct', evt.btn, evt.rt) # evt.rt is sec, evt.rtc is ms
else:
print('wrong', evt.btn, evt.rt)
bbox.standby() # lights off
win.close()
core.quit()
# The contents of this file are in the public domain.
| 1,871
|
Python
|
.py
| 51
| 33.313725
| 85
| 0.683803
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,967
|
VSHD_Distortion.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/VSHD_Distortion.py
|
# Barrel distortion adjustment example for the Nordic Neural Labs VisualSystemHD
# in-scanner display.
#
# This file is public domain.
#
from psychopy import visual, event, core # visual imported, even if not used!
import numpy as np
# Create a VSHD window. Multisampling is enabled to reduce artifacts around
# edge of distorted image.
win = visual.VisualSystemHD(fullscr=False, multiSample=True, numSamples=8)
# create checkerboard texture
cbTex = np.tile([[0.5, -0.5], [-0.5, 0.5]], reps=(10, 10))
# convert texture to ImageStim, make it span the whole display
cbImg = visual.ImageStim(win, image=cbTex, units='norm', size=(2, 2))
# Current value of the distortion coefficient to display
msgText = 'Distortion Coef.: {}'.format(win.distCoef)
# text to show the current distortion value
distText = visual.TextStim(win, text=msgText)
# register 'q' key to exit the app
event.globalKeys.add('q', func=core.quit)
# main loop, exits on pressing 'q'
while 1:
# update the distortion coefficient text
distText.text = 'Distortion Coef.: {:.3f}'.format(win.distCoef)
# draw the checkerboard to each eye
for eye in ('left', 'right'):
win.setBuffer(eye)
cbImg.draw()
distText.draw()
# Check if keys have been pressed to update the distortion
# coefficient.
if event.getKeys(['w']):
win.distCoef += 0.001
msgText = 'Distortion Coef.: {:.3f}'.format(win.distCoef)
elif event.getKeys(['s']):
win.distCoef -= 0.001
msgText = 'Distortion Coef.: {:.3f}'.format(win.distCoef)
win.flip()
| 1,576
|
Python
|
.py
| 38
| 37.763158
| 80
| 0.709234
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,968
|
RiftHeadTrackingExample.py
|
psychopy_psychopy/psychopy/demos/coder/hardware/RiftHeadTrackingExample.py
|
# Oculus Rift head-mounted display example for rendering 3D with head tracking.
# Press the 'q' key or use the application GUI to exit. Press 'r' to recenter
# the HMD's view. Requires PsychXR 0.2+ to be installed.
#
# This file is public domain.
#
from psychopy import visual, event, core
from psychopy.tools import arraytools, rifttools
import pyglet.gl as GL
# Create a VR session, treat the returned object just like a regular window.
# Increase the number of samples for anti-aliasing, could be 2, 4, 6, 8, 16 or
# 32 depending on your hardware. The GLFW backend is preferred when using VR.
hmd = visual.Rift(samples=1, color=(0, 0, 0), colorSpace='rgb', winType='glfw')
# Create a LibOVRPose object to represent the rigid body pose of the triangle in
# the scene. The position of the triangle will be 2 meters away from the user at
# eye height which we obtain from the HMD's settings.
trianglePosition = (0., hmd.eyeHeight, -2.)
trianglePose = rifttools.LibOVRPose(trianglePosition)
# convert the pose to a view transformation matrix
translationMatrix = trianglePose.getModelMatrix()
# convert to format Pyglet's GL libraries accept
translationMatrix = arraytools.array2pointer(translationMatrix)
# uncomment the line below to show a performance HUD
# hmd.perfHudMode('PerfSummary')
# loop until the user quits the app through the GUI menu
stopApp = False
while not stopApp:
# Get the current tracking state for the HMD which contains lots of
# information about the current pose and dynamics of the user's head and
# hands, however we are only interested in head pose for now.
trackingState = hmd.getTrackingState()
headPose = trackingState.headPose.thePose
# Calculate the eye poses from the current head pose, must be done before
# drawing anything or else the application hangs.
hmd.calcEyePoses(headPose)
for i in ('left', 'right'):
hmd.setBuffer(i) # select the eye buffer to draw to
# Setup the viewing parameters for the current buffer, this needs to be
# called every time the buffer changes.
#
# Use setRiftView to setup the projection and view matrices
# automatically from data provided by the API. Take note of which eye
# buffer is active when rendering.
#
hmd.setRiftView()
# Get the yaw, pitch and roll angles of the HMD in radians, convert them
# to degrees. This is just to demonstrate how to do so using PsychXR's
# 3D types interface. For instance, hmd.headPose.rotation is a
# Quaternion type with method "getYawPitchRoll".
#
# yaw, pitch, roll = [math.degrees(i) for i in headPose.getYawPitchRoll()]
# print(yaw, pitch, roll)
# You can get the position of the HMD in the scene as follows,
# x, y, z = headPose.pos
# print(x, y, z)
# use OpenGL rendering commands here...
GL.glPushMatrix()
GL.glMultTransposeMatrixf(translationMatrix)
GL.glBegin(GL.GL_TRIANGLES)
GL.glColor3f(1, 0, 0)
GL.glVertex3f(-1.0, -1.0, 0.0)
GL.glColor3f(0, 1, 0)
GL.glVertex3f(1.0, -1.0, 0.0)
GL.glColor3f(0, 0, 1)
GL.glVertex3f(0.0, 1.0, 0.0)
GL.glEnd()
GL.glPopMatrix()
# send the rendered buffer to the HMD
hmd.flip()
# check if the application should exit
if event.getKeys('q') or hmd.shouldQuit:
stopApp = True
elif event.getKeys('r') or hmd.shouldRecenter:
hmd.recenterTrackingOrigin()
# turn off the hud
# hmd.perfHudMode('Off')
# cleanly end the session
hmd.close()
core.quit()
| 3,626
|
Python
|
.py
| 79
| 40.64557
| 82
| 0.707732
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,969
|
draw grid stim.py
|
psychopy_psychopy/psychopy/demos/builder/Experiments/dragAndDrop/draw grid stim.py
|
import os
import random
from PIL import Image, ImageDraw
# Grid parameters
grid_size = 100 # Size of each grid square
gap_size = 5 # Size of the gap between grid squares
num_rows = 3 # Number of rows in the grid
num_cols = 3 # Number of columns in the grid
total_grids = num_rows * num_cols
num_images = 10 # Number of images to create
# Specify the number of white and pink squares you want in each image
# Possibly to use 0 if you don't want that colour
num_white_squares = 1
num_pink_squares = 1
# Create the "stimuli" folder if it doesn't exist
if not os.path.exists("stimuli"):
os.makedirs("stimuli")
# Calculate total image size including gaps
image_width = num_cols * (grid_size + gap_size) - gap_size
image_height = num_rows * (grid_size + gap_size) - gap_size
# Create 10 images
for image_index in range(1, num_images + 1): # Start indexing from 1
# Create a new blank image
image = Image.new("RGB", (image_width, image_height), "white")
draw = ImageDraw.Draw(image)
# Randomly choose positions for the white grids
white_grid_indices = random.sample(range(total_grids), num_white_squares)
# Randomly choose positions for the pink grids, excluding white grid positions
pink_grid_indices = random.sample([i for i in range(total_grids) if i not in white_grid_indices], num_pink_squares)
# Fill the grids with black, white, and pink colors
for row in range(num_rows):
for col in range(num_cols):
x0 = col * (grid_size + gap_size)
y0 = row * (grid_size + gap_size)
x1 = x0 + grid_size
y1 = y0 + grid_size
grid_index = row * num_cols + col
if grid_index in white_grid_indices:
color = "white"
elif grid_index in pink_grid_indices:
color = "pink"
else:
color = "black"
draw.rectangle([x0, y0, x1, y1], fill=color)
# Save the image in the "stimuli" folder
image_path = os.path.join("stimuli", f"grid_image_{image_index}.png")
image.save(image_path)
print("Images created and saved in the 'stimuli' folder.")
| 2,157
|
Python
|
.py
| 48
| 39.083333
| 119
| 0.66348
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,970
|
serialdevice.py
|
psychopy_psychopy/psychopy/hardware/serialdevice.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Base class for serial devices. Includes some convenience methods to open
ports and check for the expected device
"""
import sys
import time
from psychopy import logging
import serial
from psychopy.tools import systemtools as st
from psychopy.tools.attributetools import AttributeGetSetMixin
from .base import BaseDevice
def _findPossiblePorts():
if sys.platform == 'win32':
# get profiles for all serial port devices
profiles = st.systemProfilerWindowsOS(classname="Ports")
# get COM port for each device
final = []
for profile in profiles:
# find "COM" in profile description
desc = profile['Device Description']
start = desc.find("COM") + 3
end = desc.find(")", start)
# skip this profile if there's no reference to a COM port
if -1 in (start, end):
continue
# get COM port number
num = desc[start:end]
# skip this profile if COM port number doesn't look numeric
if not num.isnumeric():
continue
# store COM port
final.append(f"COM{num}")
else:
# on linux and mac the options are too wide so use serial.tools
from serial.tools import list_ports
poss = list_ports.comports()
# filter out any that report 'n/a' for their hardware
final = []
for p in poss:
if p[2] != 'n/a':
final.append(p[0]) # just the port address
return final
# map out all ports on this device, to be filled as serial devices are initialised
ports = {port: None for port in _findPossiblePorts()}
class SerialDevice(BaseDevice, AttributeGetSetMixin):
"""A base class for serial devices, to be sub-classed by specific devices
If port=None then the SerialDevice.__init__() will search for the device
on known serial ports on the computer and test whether it has found the
device using isAwake() (which the sub-classes need to implement).
"""
name = b'baseSerialClass'
longName = ""
# list of supported devices (if more than one supports same protocol)
driverFor = []
def __new__(cls, *args, **kwargs):
import inspect
# convert args to kwargs
argNames = inspect.getfullargspec(cls.__init__).args
for i, arg in enumerate(args):
kwargs[argNames[i]] = arg
# iterate through existing devices
for other in ports.values():
# skip None
if other is None:
continue
# if device already represented, use existing object
if other.isSameDevice(kwargs):
return other
# if first object to represent this device, make as normal
return super(BaseDevice, cls).__new__(cls)
def __init__(self, port=None, baudrate=9600,
byteSize=8, stopBits=1,
parity="N", # 'N'one, 'E'ven, 'O'dd, 'M'ask,
eol=b"\n",
maxAttempts=1, pauseDuration=0.1,
checkAwake=True):
if not serial:
raise ImportError('The module serial is needed to connect to this'
' device. On most systems this can be installed'
' with\n\t easy_install pyserial')
# get a list of port names to try
if port is None:
tryPorts = self._findPossiblePorts()
elif type(port) in [int, float]:
tryPorts = ['COM%i' % port]
else:
tryPorts = [port]
self.pauseDuration = pauseDuration
self.com = None
self.OK = False
self.maxAttempts = maxAttempts
if type(eol) is bytes:
self.eol = eol
else:
self.eol = bytes(eol, 'utf-8')
self.type = self.name # for backwards compatibility
# try to open the port
for portString in tryPorts:
try:
self.com = serial.Serial(
portString,
baudrate=baudrate, bytesize=byteSize, # number of data bits
parity=parity, # enable parity checking
stopbits=stopBits, # number of stop bits
timeout=self.pauseDuration * 3, # set a timeout value, None for
# waiting forever
xonxoff=0, # enable software flow control
rtscts=0,) # enable RTS/CTS flow control
self.portString = portString
except Exception:
if port:
# the user asked for this port and we couldn't connect
logging.warn("Couldn't connect to port %s" % portString)
else: # we were trying this port on a guess
msg = "Tried and failed to connect to port %s"
logging.debug(msg % portString)
continue # try the next port
if not self.com.isOpen():
try:
self.com.open()
except Exception:
msg = ("Couldn't open port %s. Is it being used by "
"another program?")
logging.info(msg % self.portString)
continue
if checkAwake and self.com.isOpen():
# we have an open com port. try to send a command
self.com.flushInput()
awake = False # until we confirm otherwise
for repN in range(self.maxAttempts):
awake = self.isAwake()
if awake:
msg = "Opened port %s and looks like a %s"
logging.info(msg % (self.portString, self.name))
self.OK = True
self.pause()
break
if not awake:
msg = "Opened port %s but it didn't respond like a %s"
logging.info(msg % (self.portString, self.name))
self.com.close()
self.OK = False
else:
break
if self.OK: # we have successfully sent and read a command
msg = "Successfully opened %s with a %s"
logging.info(msg % (self.portString, self.name))
# store device in ports dict
global ports
ports[port] = self
else:
raise ConnectionError(
f"Failed to connect to device on {port}, this device is likely to have "
f"been disconnected, or the port is in use by another application."
)
# we aren't in a time-critical period so flush messages
logging.flush()
def isAwake(self):
"""This should be overridden by the device class
"""
# send a command to the device and check the response matches what
# you expect; then return True or False
return True
def pause(self):
"""Pause for a default period for this device
"""
time.sleep(self.pauseDuration)
def sendMessage(self, message, autoLog=True):
"""
Send a command to the device (does not wait for a reply or sleep())
Parameters
----------
message : str or bytes
Message to send to the device - if given as a string, SerialDevice will encode it
into bytes for you
autoLog : bool
If True, then the message sent will be logged at level DEBUG
"""
if self.com.inWaiting():
inStr = self.com.read(self.com.inWaiting())
msg = "Sending '%s' to %s but found '%s' on the input buffer"
logging.warning(msg % (message, self.name, inStr))
if type(message) is not bytes:
message = bytes(message, 'utf-8')
if not message.endswith(self.eol):
message += self.eol # append a newline if necess
self.com.write(message)
self.com.flush()
# log
if autoLog:
logging.debug(
f"Sent {self.name} message: " + repr(message)
)
# sending isn't as time-critical as receiving, so we can flush now
logging.flush()
def getResponse(self, length=1, timeout=0.1, autoLog=True):
"""
Read the latest response from the serial port
Parameters
----------
length : int
One of:
- 1: a single-line reply (use readline())
- 2: a multiline reply (use readlines() which *requires* timeout)
- -1: may not be any EOL character; just read whatever chars are there
timeout : float
How long to wait for a response before giving up
autoLog : bool
If True, then the message sent will be logged at level DEBUG
"""
# get reply (within timeout limit)
self.com.timeout = timeout
if length == 1:
retVal = self.com.readline()
elif length > 1:
retVal = self.com.readlines()
retVal = [line.decode('utf-8') for line in retVal]
else: # was -1?
retVal = self.com.read(self.com.inWaiting())
if type(retVal) is bytes:
retVal = retVal.decode('utf-8')
# log
if retVal and autoLog:
logging.debug(f"Received {self.name} message: " + repr(retVal))
return retVal
def awaitResponse(self, multiline=False, timeout=None):
"""
Repeatedly request responses until one arrives, or until a timeout is hit.
Parameters
----------
multiline : bool
Look for additional lines after the first? WARNING: May be slow if there are none.
timeout
Time after which to give up waiting (by default is 10x pause length)
Returns
-------
str
The message eventually received
"""
# default timeout
if timeout is None:
timeout = 1
# set timeout
self.com.timeout = self.pauseDuration
# get start time
start = time.time()
t = time.time() - start
# get responses until we have one
resp = b""
while not resp and t < timeout:
t = time.time() - start
resp = self.com.read()
# get remaining chars
resp += self.com.readall()
# if we timed out, return None
if t > timeout:
return
# decode to str
resp = resp.decode('utf-8')
# if multiline, split by eol
if multiline:
resp = resp.split(str(self.eol))
return resp
def isSameDevice(self, other):
"""
Determine whether this object represents the same physical device as a given other object.
Parameters
----------
other : SerialDevice, dict
Other SerialDevice to compare against, or a dict of params (which must include
`port` as a key)
Returns
-------
bool
True if the two objects represent the same physical device
"""
if isinstance(other, SerialDevice):
# if given another object, get port
portString = other.portString
elif isinstance(other, dict) and "port" in other:
# if given a dict, get port from key
portString = other['port']
# make sure port is in the correct format
if not other['port'].startswith("COM"):
portString = "COM" + other['port']
else:
# if the other object is the wrong type or doesn't have a port, it's not this
return False
return self.portString == portString
@staticmethod
def getAvailableDevices():
ports = st.getSerialPorts()
devices = []
for profile in ports:
device = {
'deviceName': profile.get('device_name', "Unknown Serial Device"),
'port': profile.get('port', None),
'baudrate': profile.get('baudrate', 9600),
'byteSize': profile.get('bytesize', 8),
'stopBits': profile.get('stopbits', 1),
'parity': profile.get('parity', "N"),
}
devices.append(device)
return devices
def close(self):
self.com.close()
def __del__(self):
if self.com is not None:
self.com.close()
@property
def isOpen(self):
if self.com is None:
return None
return self.com.isOpen()
@staticmethod
def _findPossiblePorts():
return _findPossiblePorts()
if __name__ == "__main__":
pass
| 13,102
|
Python
|
.py
| 323
| 29.108359
| 98
| 0.558812
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,971
|
brainproducts.py
|
psychopy_psychopy/psychopy/hardware/brainproducts.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for `Brain Products GMBH <https://www.brainproducts.com>`_
hardware.
Here we have implemented support for the Remote Control Server application,
which allows you to control recordings, send annotations etc. all from Python.
"""
import psychopy.logging as logging
try:
from psychopy_brainproducts import RemoteControlServer
except (ModuleNotFoundError, ImportError):
logging.error(
"Support for Brain Products GMBH hardware is not available this "
"session. Please install `psychopy-brainproducts` and restart the "
"session to enable support.")
except Exception as e:
logging.error(
"Error encountered while loading `psychopy-brainproducts`. Check logs "
"for more information.")
if __name__ == "__main__":
pass
| 1,017
|
Python
|
.py
| 24
| 38.75
| 79
| 0.744422
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,972
|
microphone.py
|
psychopy_psychopy/psychopy/hardware/microphone.py
|
import sys
import time
import numpy as np
from psychtoolbox import audio as audio
from psychopy import logging as logging, prefs
from psychopy.localization import _translate
from psychopy.constants import NOT_STARTED
from psychopy.hardware import BaseDevice, BaseResponse, BaseResponseDevice
from psychopy.sound.audiodevice import AudioDeviceInfo, AudioDeviceStatus
from psychopy.sound.audioclip import AudioClip
from psychopy.sound.exceptions import AudioInvalidCaptureDeviceError, AudioInvalidDeviceError, \
AudioStreamError, AudioRecordingBufferFullError
from psychopy.tools import systemtools as st
from psychopy.tools.audiotools import SAMPLE_RATE_48kHz
_hasPTB = True
try:
import psychtoolbox.audio as audio
except (ImportError, ModuleNotFoundError):
logging.warning(
"The 'psychtoolbox' library cannot be loaded but is required for audio "
"capture (use `pip install psychtoolbox` to get it). Microphone "
"recording will be unavailable this session. Note that opening a "
"microphone stream will raise an error.")
_hasPTB = False
class MicrophoneResponse(BaseResponse):
pass
class MicrophoneDevice(BaseDevice, aliases=["mic", "microphone"]):
"""Class for recording audio from a microphone or input stream.
Creating an instance of this class will open a stream using the specified
device. Streams should remain open for the duration of your session. When a
stream is opened, a buffer is allocated to store samples coming off it.
Samples from the input stream will writen to the buffer once
:meth:`~MicrophoneDevice.start()` is called.
Parameters
----------
index : int or `~psychopy.sound.AudioDevice`
Audio capture device to use. You may specify the device either by index
(`int`) or descriptor (`AudioDevice`).
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=48000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
streamBufferSecs : float
Stream buffer size to pre-allocate for the specified number of seconds.
The default is 2.0 seconds which is usually sufficient.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
audioLatencyMode : int or None
Audio latency mode to use, values range between 0-4. If `None`, the
setting from preferences will be used. Using `3` (exclusive mode) is
adequate for most applications and required if using WASAPI on Windows
for other settings (such audio quality) to take effect. Symbolic
constants `psychopy.sound.audiodevice.AUDIO_PTB_LATENCY_CLASS_` can also
be used.
audioRunMode : int
Run mode for the recording device. Default is standby-mode (`0`) which
allows the system to put the device to sleep. However, when the device
is needed, waking the device results in some latency. Using a run mode
of `1` will keep the microphone running (or 'hot') with reduces latency
when th recording is started. Cannot be set when after initialization at
this time.
Examples
--------
Capture 10 seconds of audio from the primary microphone::
import psychopy.core as core
import psychopy.sound.Microphone as Microphone
mic = Microphone(bufferSecs=10.0) # open the microphone
mic.start() # start recording
core.wait(10.0) # wait 10 seconds
mic.stop() # stop recording
audioClip = mic.getRecording()
print(audioClip.duration) # should be ~10 seconds
audioClip.save('test.wav') # save the recorded audio as a 'wav' file
The prescribed method for making long recordings is to poll the stream once
per frame (or every n-th frame)::
mic = Microphone(bufferSecs=2.0)
mic.start() # start recording
# main trial drawing loop
mic.poll()
win.flip() # calling the window flip function
mic.stop() # stop recording
audioClip = mic.getRecording()
"""
# Force the use of WASAPI for audio capture on Windows. If `True`, only
# WASAPI devices will be returned when calling static method
# `Microphone.getDevices()`
enforceWASAPI = True
# other instances of MicrophoneDevice, stored by index
_streams = {}
def __init__(self,
index=None,
sampleRateHz=None,
channels=None,
streamBufferSecs=2.0,
maxRecordingSize=24000,
policyWhenFull='roll',
audioLatencyMode=None,
audioRunMode=0):
if not _hasPTB: # fail if PTB is not installed
raise ModuleNotFoundError(
"Microphone audio capture requires package `psychtoolbox` to "
"be installed.")
from psychopy.hardware import DeviceManager
# numericise index if needed
if isinstance(index, str):
try:
index = int(index)
except ValueError:
pass
# get information about the selected device
if isinstance(index, AudioDeviceInfo):
# if already an AudioDeviceInfo object, great!
self._device = index
elif index in (-1, None):
# get all devices
_devices = MicrophoneDevice.getDevices()
# if there are none, error
if not len(_devices):
raise AudioInvalidCaptureDeviceError(_translate(
"Could not choose default recording device as no recording "
"devices are connected."
))
# Try and get the best match which are compatible with the user's
# specified settings.
if sampleRateHz is not None or channels is not None:
self._device = self.findBestDevice(
index=_devices[0].deviceIndex, # use first that shows up
sampleRateHz=sampleRateHz,
channels=channels
)
else:
self._device = _devices[0]
# Check if the default device settings are differnt than the ones
# specified by the user, if so, warn them that the default device
# settings are overwriting their settings.
if channels is None:
channels = self._device.inputChannels
elif channels != self._device.inputChannels:
logging.warning(
"Number of channels specified ({}) does not match the "
"default device's number of input channels ({}).".format(
channels, self._device.inputChannels))
channels = self._device.inputChannels
if sampleRateHz is None:
sampleRateHz = self._device.defaultSampleRate
elif sampleRateHz != self._device.defaultSampleRate:
logging.warning(
"Sample rate specified ({}) does not match the default "
"device's sample rate ({}).".format(
sampleRateHz, self._device.defaultSampleRate))
sampleRateHz = self._device.defaultSampleRate
elif isinstance(index, str):
# if given a str that's a name from DeviceManager, get info from device
device = DeviceManager.getDevice(index)
# try to duplicate and fail if not found
if isinstance(device, MicrophoneDevice):
self._device = device._device
else:
# if not found, find best match
self._device = self.findBestDevice(
index=index,
sampleRateHz=sampleRateHz,
channels=channels
)
else:
# get best match
self._device = self.findBestDevice(
index=index,
sampleRateHz=sampleRateHz,
channels=channels
)
devInfoText = ('Using audio device #{} ({}) for audio capture. '
'Full spec: {}').format(
self._device.deviceIndex,
self._device.deviceName,
self._device)
logging.info(devInfoText)
# error if specified device is not suitable for capture
if not self._device.isCapture:
raise AudioInvalidCaptureDeviceError(
'Specified audio device not suitable for audio recording. '
'Has no input channels.')
# get these values from the configured device
self._channels = self._device.inputChannels
logging.debug('Set recording channels to {} ({})'.format(
self._channels, 'stereo' if self._channels > 1 else 'mono'))
self._sampleRateHz = self._device.defaultSampleRate
logging.debug('Set stream sample rate to {} Hz'.format(
self._sampleRateHz))
# set the audio latency mode
if audioLatencyMode is None:
self._audioLatencyMode = int(prefs.hardware["audioLatencyMode"])
else:
self._audioLatencyMode = audioLatencyMode
logging.debug('Set audio latency mode to {}'.format(
self._audioLatencyMode))
assert 0 <= self._audioLatencyMode <= 4 # sanity check for pref
# internal recording buffer size in seconds
assert isinstance(streamBufferSecs, (float, int))
self._streamBufferSecs = float(streamBufferSecs)
# PTB specific stuff
self._mode = 2 # open a stream in capture mode
# get audio run mode
assert isinstance(audioRunMode, (float, int)) and \
(audioRunMode == 0 or audioRunMode == 1)
self._audioRunMode = int(audioRunMode)
# open stream
self._stream = None
self._opening = self._closing = False
self.open()
# status flag for Builder
self._statusFlag = NOT_STARTED
# setup recording buffer
self._recording = RecordingBuffer(
sampleRateHz=self._sampleRateHz,
channels=self._channels,
maxRecordingSize=maxRecordingSize,
policyWhenFull=policyWhenFull
)
self._possiblyAsleep = False
self._isStarted = False # internal state
logging.debug('Audio capture device #{} ready'.format(
self._device.deviceIndex))
# list to store listeners in
self.listeners = []
@property
def maxRecordingSize(self):
"""
Until a file is saved, the audio data from a Microphone needs to be stored in RAM. To avoid
a memory leak, we limit the amount which can be stored by a single Microphone object. The
`maxRecordingSize` parameter defines what this limit is.
Parameters
----------
value : int
How much data (in kb) to allow, default is 24mb (so 24,000kb)
"""
return self._recording.maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
# set size
self._recording.maxRecordingSize = value
# re-allocate
self._recording._allocRecBuffer()
@property
def policyWhenFull(self):
"""
Until a file is saved, the audio data from a Microphone needs to be stored in RAM. To avoid
a memory leak, we limit the amount which can be stored by a single Microphone object. The
`policyWhenFull` parameter tells the Microphone what to do when it's reached that limit.
Parameters
----------
value : str
One of:
- "ignore": When full, just don't record any new samples
- "warn"/"warning": Same as ignore, but will log a warning
- "error": When full, will raise an error
- "roll"/"rolling": When full, clears the start of the buffer to make room for new samples
"""
return self._recording._policyWhenFull
@policyWhenFull.setter
def policyWhenFull(self, value):
self._recording._policyWhenFull = value
def findBestDevice(self, index, sampleRateHz, channels):
"""
Find the closest match among the microphone profiles listed by psychtoolbox as valid.
Parameters
----------
index : int
Index of the device
sampleRateHz : int
Sample rate of the device
channels : int
Number of audio channels in input stream
Returns
-------
AudioDeviceInfo
Device info object for the chosen configuration
Raises
------
logging.Warning
If an exact match can't be found, will use the first match to the device index and
raise a warning.
KeyError
If no match is found whatsoever, will raise a KeyError
"""
# start off with no chosen device and no fallback
fallbackDevice = None
chosenDevice = None
# iterate through device profiles
for profile in self.getDevices():
# if same index, keep as fallback
if index in (profile.deviceIndex, profile.deviceName):
fallbackDevice = profile
# if same everything, we got it!
if all((
index in (profile.deviceIndex, profile.deviceName),
profile.defaultSampleRate == sampleRateHz,
profile.inputChannels == channels,
)):
chosenDevice = profile
if chosenDevice is None and fallbackDevice is not None:
# if no exact match found, use fallback and raise warning
logging.warning(
f"Could not find exact match for specified parameters (index={index}, sampleRateHz="
f"{sampleRateHz}, channels={channels}), falling back to best approximation ("
f"index={fallbackDevice.deviceIndex}, "
f"name={fallbackDevice.deviceName},"
f"sampleRateHz={fallbackDevice.defaultSampleRate}, "
f"channels={fallbackDevice.inputChannels})"
)
chosenDevice = fallbackDevice
elif chosenDevice is None:
# if no index match found, raise error
raise KeyError(
f"Could not find any device with index {index}"
)
return chosenDevice
def isSameDevice(self, other):
"""
Determine whether this object represents the same physical microphone as a given other
object.
Parameters
----------
other : MicrophoneDevice, dict
Other MicrophoneDevice to compare against, or a dict of params (which must include
`index` as a key)
Returns
-------
bool
True if the two objects represent the same physical device
"""
if isinstance(other, type(self)):
# if given another object, get index
index = other.index
elif isinstance(other, dict) and "index" in other:
# if given a dict, get index from key
index = other['index']
else:
# if the other object is the wrong type or doesn't have an index, it's not this
return False
return index in (self.index, self._device.deviceName)
@staticmethod
def getDevices():
"""Get a `list` of audio capture device (i.e. microphones) descriptors.
On Windows, only WASAPI devices are used.
Returns
-------
list
List of `AudioDevice` descriptors for suitable capture devices. If
empty, no capture devices have been found.
"""
try:
MicrophoneDevice.enforceWASAPI = bool(prefs.hardware["audioForceWASAPI"])
except KeyError:
pass # use default if option not present in settings
# query PTB for devices
if MicrophoneDevice.enforceWASAPI and sys.platform == 'win32':
allDevs = audio.get_devices(device_type=13)
else:
allDevs = audio.get_devices()
# make sure we have an array of descriptors
allDevs = [allDevs] if isinstance(allDevs, dict) else allDevs
# create list of descriptors only for capture devices
devObjs = [AudioDeviceInfo.createFromPTBDesc(dev) for dev in allDevs]
inputDevices = [desc for desc in devObjs if desc.isCapture]
return inputDevices
@staticmethod
def getAvailableDevices():
devices = []
for profile in st.getAudioCaptureDevices():
# get index as a name if possible
index = profile.get('device_name', None)
if index is None:
index = profile.get('index', None)
device = {
'deviceName': profile.get('device_name', "Unknown Microphone"),
'index': index,
'sampleRateHz': profile.get('defaultSampleRate', None),
'channels': profile.get('inputChannels', None),
}
devices.append(device)
return devices
# def warmUp(self):
# """Warm-/wake-up the audio stream.
#
# On some systems the first time `start` is called incurs additional
# latency, whereas successive calls do not. To deal with this, it is
# recommended that you run this warm-up routine prior to capturing audio
# samples. By default, this routine is called when instancing a new
# microphone object.
#
# """
# # We should put an actual test here to see if timing stabilizes after
# # multiple invocations of this function.
# self._stream.start()
# self._stream.stop()
@property
def recording(self):
"""Reference to the current recording buffer (`RecordingBuffer`)."""
return self._recording
@property
def recBufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self.recording.bufferSecs
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out. By default, the recording buffer is
set to 64000 KB (or 64 MB). At a sample rate of 48kHz, this will result
in about. Using stereo audio (``nChannels == 2``) requires twice the
buffer over mono (``nChannels == 2``) for the same length clip.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._recording.maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
self._recording.maxRecordingSize = value
@property
def latencyBias(self):
"""Latency bias to add when starting the microphone (`float`).
"""
return self._stream.latency_bias
@latencyBias.setter
def latencyBias(self, value):
self._stream.latency_bias = float(value)
@property
def audioLatencyMode(self):
"""Audio latency mode in use (`int`). Cannot be set after
initialization.
"""
return self._audioLatencyMode
@property
def streamBufferSecs(self):
"""Size of the internal audio storage buffer in seconds (`float`).
To ensure all data is captured, there must be less time elapsed between
subsequent `getAudioClip` calls than `bufferSecs`.
"""
return self._streamBufferSecs
@property
def streamStatus(self):
"""Status of the audio stream (`AudioDeviceStatus` or `None`).
See :class:`~psychopy.sound.AudioDeviceStatus` for a complete overview
of available status fields. This property has a value of `None` if
the stream is presently closed.
Examples
--------
Get the capture start time of the stream::
# assumes mic.start() was called
captureStartTime = mic.status.captureStartTime
Check if microphone recording is active::
isActive = mic.status.active
Get the number of seconds recorded up to this point::
recordedSecs = mic.status.recordedSecs
"""
currentStatus = self._stream.status
if currentStatus != -1:
return AudioDeviceStatus.createFromPTBDesc(currentStatus)
@property
def isRecBufferFull(self):
"""`True` if there is an overflow condition with the recording buffer.
If this is `True`, then `poll()` is still collecting stream samples but
is no longer writing them to anything, causing stream samples to be
lost.
"""
return self._recording.isFull
@property
def isStarted(self):
"""``True`` if stream recording has been started (`bool`)."""
return self._isStarted
@property
def isRecording(self):
"""``True`` if stream recording has been started (`bool`). Alias of
`isStarted`."""
return self.isStarted
@property
def index(self):
return self._device.deviceIndex
def testDevice(self, duration=1, testSound=None):
"""
Make a recording to test the microphone.
Parameters
----------
duration : float, int
How long to record for? In seconds.
testSound : str, AudioClip, None
Sound to play to test mic. Use "sine", "square" or "sawtooth" to generate a sound of correct
duration using AudioClip. Use None to not play a test sound.
Returns
-------
bool
True if test passed. On fail, will log the error at level "debug".
"""
# if given a string for testSound, generate
if testSound in ("sine", "square", "sawtooth"):
testSound = getattr(AudioClip, testSound)(duration=duration)
try:
# record
self.start(stopTime=duration)
# play testSound
if testSound is not None:
from psychopy.sound import Sound
snd = Sound(value=testSound)
snd.play()
# sleep for duration
time.sleep(duration)
# poll to refresh recording
self.poll()
# get new clip
clip = self.getRecording()
# check that clip matches test sound
if testSound is not None:
# todo: check the recording against testSound
pass
return True
except Exception as err:
logging.debug(f"Microphone test failed. Error: {err}")
raise err
def start(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording.
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
# check if the stream has been
if self.isStarted:
return None
if self._stream is None:
raise AudioStreamError("Stream not ready.")
# reset the writing 'head'
self._recording.seek(0, absolute=True)
# reset warnings
# self._warnedRecBufferFull = False
startTime = self._stream.start(
repetitions=0,
when=when,
wait_for_start=int(waitForStart),
stop_time=stopTime)
# recording has begun or is scheduled to do so
self._isStarted = True
logging.debug(
'Scheduled start of audio capture for device #{} at t={}.'.format(
self._device.deviceIndex, startTime))
return startTime
def record(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording (alias of `.start()`).
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
return self.start(
when=when,
waitForStart=waitForStart,
stopTime=stopTime)
def stop(self, blockUntilStopped=True, stopTime=None):
"""Stop recording audio.
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple or None
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`. Returns `None` if `stop` or `pause` was called
previously before `start`.
"""
# This function must be idempotent since it can be invoked at any time
# whether a stream is started or not.
if not self.isStarted or self._stream._closed:
return
# poll remaining samples, if any
if not self.isRecBufferFull:
self.poll()
startTime, endPositionSecs, xruns, estStopTime = self._stream.stop(
block_until_stopped=int(blockUntilStopped),
stopTime=stopTime)
self._isStarted = False
logging.debug(
('Device #{} stopped capturing audio samples at estimated time '
't={}. Total overruns: {} Total recording time: {}').format(
self._device.deviceIndex, estStopTime, xruns, endPositionSecs))
return startTime, endPositionSecs, xruns, estStopTime
def pause(self, blockUntilStopped=True, stopTime=None):
"""Pause a recording (alias of `.stop`).
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple or None
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`. Returns `None` if `stop()` or `pause()` was called
previously before `start()`.
"""
return self.stop(blockUntilStopped=blockUntilStopped, stopTime=stopTime)
def open(self):
"""
Open the audio stream.
"""
# do nothing if stream is already open
if self._stream is not None and not self._stream._closed:
return
# set flag that it's mid-open
self._opening = True
# search for open streams and if there is one, use it
if self._device.deviceIndex in MicrophoneDevice._streams:
logging.debug(
f"Assigning audio stream for device #{self._device.deviceIndex} to a new "
f"MicrophoneDevice object."
)
self._stream = MicrophoneDevice._streams[self._device.deviceIndex]
return
# if no open streams, make one
logging.debug(
f"Opening new audio stream for device #{self._device.deviceIndex}."
)
self._stream = MicrophoneDevice._streams[self._device.deviceIndex] = audio.Stream(
device_id=self._device.deviceIndex,
latency_class=self._audioLatencyMode,
mode=self._mode,
freq=self._device.defaultSampleRate,
channels=self._device.inputChannels
)
# set run mode
self._stream.run_mode = self._audioRunMode
logging.debug('Set run mode to `{}`'.format(
self._audioRunMode))
# set latency bias
self._stream.latency_bias = 0.0
logging.debug('Set stream latency bias to {} ms'.format(
self._stream.latency_bias))
# pre-allocate recording buffer, called once
self._stream.get_audio_data(self._streamBufferSecs)
logging.debug(
'Allocated stream buffer to hold {} seconds of data'.format(
self._streamBufferSecs))
# set flag that it's done opening
self._opening = False
def close(self):
"""
Close the audio stream.
"""
# clear any attached listeners
self.clearListeners()
# do nothing further if already closed
if self._stream._closed:
return
# set flag that it's mid-close
self._closing = True
# remove ref to stream
if self._device.deviceIndex in MicrophoneDevice._streams:
MicrophoneDevice._streams.pop(self._device.deviceIndex)
# close stream
self._stream.close()
logging.debug('Stream closed')
# set flag that it's done closing
self._closing = False
def reopen(self):
"""
Calls self.close() then self.open() to reopen the stream.
"""
# get status at close
status = self.isStarted
# start timer
start = time.time()
# close then open
self.close()
self.open()
# log time it took
logging.info(
f"Reopened microphone #{self.index}, took {time.time() - start:.3f}s"
)
# if mic was running beforehand, start it back up again now
if status:
self.start()
def poll(self):
"""Poll audio samples.
Calling this method adds audio samples collected from the stream buffer
to the recording buffer that have been captured since the last `poll`
call. Time between calls of this function should be less than
`bufferSecs`. You do not need to call this if you call `stop` before
the time specified by `bufferSecs` elapses since the `start` call.
Can only be called between called of `start` (or `record`) and `stop`
(or `pause`).
Returns
-------
int
Number of overruns in sampling.
"""
if not self.isStarted:
logging.warning(
"Attempted to poll samples from mic which hasn't started."
)
return
if self._stream._closed:
logging.warning(
"Attempted to poll samples from mic which has been closed."
)
return
if self._opening or self._closing:
action = "opening" if self._opening else "closing"
logging.warning(
f"Attempted to poll microphone while the stream was still {action}. Samples will be "
f"lost."
)
return
# figure out what to do with this other information
audioData, absRecPosition, overflow, cStartTime = \
self._stream.get_audio_data()
if len(audioData):
# if we got samples, the device is awake, so stop figuring out if it's asleep
self._possiblyAsleep = False
elif self._possiblyAsleep is False:
# if it was awake and now we've got no samples, store the time
self._possiblyAsleep = time.time()
elif self._possiblyAsleep + 1 < time.time():
# if we've not had any evidence of it being awake for 1s, reopen
logging.error(
f"Microphone device appears to have gone to sleep, reopening to wake it up."
)
# mark as stopped so we don't recursively poll forever when stopping
self._isStarted = False
# reopen
self.reopen()
# start again
self.start()
# mark as not asleep so we don't restart again if the first poll is empty
self._possiblyAsleep = False
if overflow:
logging.warning(
"Audio stream buffer overflow, some audio samples have been "
"lost! To prevent this, ensure `Microphone.poll()` is being "
"called often enough, or increase the size of the audio buffer "
"with `bufferSecs`.")
overruns = self._recording.write(audioData)
return overruns
def getRecording(self):
"""Get audio data from the last microphone recording.
Call this after `stop` to get the recording as an `AudioClip` object.
Raises an error if a recording is in progress.
Returns
-------
AudioClip
Recorded data between the last calls to `start` (or `record`) and
`stop`.
"""
if self.isStarted:
logging.warn(
"Cannot get audio clip while recording is in progress, so stopping recording now."
)
self.stop()
return self._recording.getSegment() # full recording
def getCurrentVolume(self, timeframe=0.2):
"""
Get the current volume measured by the mic.
Parameters
----------
timeframe : float
Time frame (s) over which to take samples from. Default is 0.1s.
Returns
-------
float
Current volume registered by the mic, will depend on relative volume of the mic but
should mostly be between 0 (total silence) and 1 (very loud).
"""
# if mic hasn't started yet, return 0 as it's recorded nothing
if not self.isStarted or self._stream._closed:
return 0
# poll most recent samples
self.poll()
# get last 0.1sas a clip
clip = self._recording.getSegment(
max(self._recording.lastSample / self._sampleRateHz - timeframe, 0)
)
# get average volume
rms = clip.rms() * 10
# round
rms = np.round(rms.astype(np.float64), decimals=3)
return rms
def addListener(self, listener, startLoop=False):
"""
Add a listener, which will receive all the same messages as this device.
Parameters
----------
listener : str or psychopy.hardware.listener.BaseListener
Either a Listener object, or use one of the following strings to create one:
- "liaison": Create a LiaisonListener with DeviceManager.liaison as the server
- "print": Create a PrintListener with default settings
- "log": Create a LoggingListener with default settings
startLoop : bool
If True, then upon adding the listener, start up an asynchronous loop to dispatch messages.
"""
# add listener as normal
listener = BaseResponseDevice.addListener(self, listener, startLoop=startLoop)
# if we're starting a listener loop, start recording
if startLoop:
self.start()
return listener
def clearListeners(self):
"""
Remove any listeners from this device.
Returns
-------
bool
True if completed successfully
"""
# clear listeners as normal
resp = BaseResponseDevice.clearListeners(self)
# stop recording
self.stop()
return resp
def dispatchMessages(self, clear=True):
"""
Dispatch current volume as a MicrophoneResponse object to any attached listeners.
Parameters
----------
clear : bool
If True, will clear the recording up until now after dispatching the volume. This is
useful if you're just sampling volume and aren't wanting to store the recording.
"""
# if mic is not recording, there's nothing to dispatch
if not self.isStarted:
return
# poll the mic now
self.poll()
# create a response object
message = MicrophoneResponse(
logging.defaultClock.getTime(),
self.getCurrentVolume(),
device=self,
)
# dispatch to listeners
for listener in self.listeners:
listener.receiveMessage(message)
return message
class RecordingBuffer:
"""Class for a storing a recording from a stream.
Think of instances of this class behaving like an audio tape whereas the
`MicrophoneDevice` class is the tape recorder. Samples taken from the stream are
written to the tape which stores the data.
Used internally by the `MicrophoneDevice` class, users usually do not create
instances of this class themselves.
Parameters
----------
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=48000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
policyWhenFull : str
What to do when the recording buffer is full and cannot accept any more
samples. If 'ignore', samples will be silently dropped and the `isFull`
property will be set to `True`. If 'warn', a warning will be logged and
the `isFull` flag will be set. Finally, if 'error' the application will
raise an exception.
"""
def __init__(self, sampleRateHz=SAMPLE_RATE_48kHz, channels=2,
maxRecordingSize=24000, policyWhenFull='ignore'):
self._channels = channels
self._sampleRateHz = sampleRateHz
self._maxRecordingSize = maxRecordingSize
self._samples = None # `ndarray` created in _allocRecBuffer`
self._offset = 0 # recording offset
self._lastSample = 0 # offset of the last sample from stream
self._spaceRemaining = None # set in `_allocRecBuffer`
self._totalSamples = None # set in `_allocRecBuffer`
self._policyWhenFull = policyWhenFull
self._warnedRecBufferFull = False
self._loops = 0
self._allocRecBuffer()
def _allocRecBuffer(self):
"""Allocate the recording buffer. Called internally if properties are
changed."""
# allocate another array
nBytes = self._maxRecordingSize * 1000
recArraySize = int((nBytes / self._channels) / (np.float32()).itemsize)
self._samples = np.zeros(
(recArraySize, self._channels), dtype=np.float32, order='C')
# sanity check
assert self._samples.nbytes == nBytes
self._totalSamples = len(self._samples)
self._spaceRemaining = self._totalSamples
@property
def samples(self):
"""Reference to the actual sample buffer (`ndarray`)."""
return self._samples
@property
def bufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self._totalSamples / self._sampleRateHz
@property
def nbytes(self):
"""Number of bytes the recording buffer occupies in memory (`int`)."""
return self._samples.nbytes
@property
def sampleBytes(self):
"""Number of bytes per sample (`int`)."""
return np.float32().itemsize
@property
def spaceRemaining(self):
"""The space remaining in the recording buffer (`int`). Indicates the
number of samples that the buffer can still add before overflowing.
"""
return self._spaceRemaining
@property
def isFull(self):
"""Is the recording buffer full (`bool`)."""
return self._spaceRemaining <= 0
@property
def totalSamples(self):
"""Total number samples the recording buffer can hold (`int`)."""
return self._totalSamples
@property
def writeOffset(self):
"""Index in the sample buffer where new samples will be written when
`write()` is called (`int`).
"""
return self._offset
@property
def lastSample(self):
"""Index of the last sample recorded (`int`). This can be used to slice
the recording buffer, only getting data from the beginning to place
where the last sample was written to.
"""
return self._lastSample
@property
def loopCount(self):
"""Number of times the recording buffer restarted (`int`). Only valid if
`loopback` is ``True``."""
return self._loops
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out of memory. By default, the recording
buffer is set to 24000 KB (or 24 MB). At a sample rate of 48kHz, this
will result in 62.5 seconds of continuous audio being recorded before
the buffer is full.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
value = int(value)
# don't do this unless the value changed
if value == self._maxRecordingSize:
return
# if different than last value, update the recording buffer
self._maxRecordingSize = value
self._allocRecBuffer()
def seek(self, offset, absolute=False):
"""Set the write offset.
Use this to specify where to begin writing samples the next time `write`
is called. You should call `seek(0)` when starting a new recording.
Parameters
----------
offset : int
Position in the sample buffer to set.
absolute : bool
Use absolute positioning. Use relative positioning if `False` where
the value of `offset` will be added to the current offset. Default
is `False`.
"""
if not absolute:
self._offset += offset
else:
self._offset = absolute
assert 0 <= self._offset < self._totalSamples
self._spaceRemaining = self._totalSamples - self._offset
def write(self, samples):
"""Write samples to the recording buffer.
Parameters
----------
samples : ArrayLike
Samples to write to the recording buffer, usually of a stream. Must
have the same number of dimensions as the internal array.
Returns
-------
int
Number of samples overflowed. If this is zero then all samples have
been recorded, if not, the number of samples rejected is given.
"""
nSamples = len(samples)
if self.isFull:
if self._policyWhenFull in ('warn', 'warning'):
# if policy is warn, we log a warning then proceed as if ignored
if not self._warnedRecBufferFull:
logging.warning(
f"Audio recording buffer filled! This means that no "
f"samples are saved beyond {round(self.bufferSecs, 6)} "
f"seconds. Specify a larger recording buffer next time "
f"to avoid data loss.")
logging.flush()
self._warnedRecBufferFull = True
return nSamples
elif self._policyWhenFull == 'error':
# if policy is error, we fully error
raise AudioRecordingBufferFullError(
"Cannot write samples, recording buffer is full.")
elif self._policyWhenFull == ('rolling', 'roll'):
# if policy is rolling, we clear the first half of the buffer
toSave = self._totalSamples - len(samples)
# get last 0.1s so we still have enough for volume measurement
savedSamples = self._recording._samples[-toSave:, :]
# log
if not self._warnedRecBufferFull:
logging.warning(
f"Microphone buffer reached, as policy when full is 'roll'/'rolling' the "
f"oldest samples will be cleared to make room for new samples."
)
logging.flush()
self._warnedRecBufferFull = True
# clear samples
self._recording.clear()
# reassign saved samples
self._recording.write(savedSamples)
else:
# if policy is to ignore, we simply don't write new samples
return nSamples
if not nSamples: # no samples came out of the stream, just return
return
if self._spaceRemaining >= nSamples:
self._lastSample = self._offset + nSamples
audioData = samples[:, :]
else:
self._lastSample = self._offset + self._spaceRemaining
audioData = samples[:self._spaceRemaining, :]
self._samples[self._offset:self._lastSample, :] = audioData
self._offset += nSamples
self._spaceRemaining -= nSamples
# Check if the recording buffer is now full. Next call to `poll` will
# not record anything.
if self._spaceRemaining <= 0:
self._spaceRemaining = 0
d = nSamples - self._spaceRemaining
return 0 if d < 0 else d
def clear(self):
# reset all live attributes
self._samples = None
self._offset = 0
self._lastSample = 0
self._spaceRemaining = None
self._totalSamples = None
# reallocate buffer
self._allocRecBuffer()
def getSegment(self, start=0, end=None):
"""Get a segment of recording data as an `AudioClip`.
Parameters
----------
start : float or int
Absolute time in seconds for the start of the clip.
end : float or int
Absolute time in seconds for the end of the clip. If `None` the time
at the last sample is used.
Returns
-------
AudioClip
Audio clip object with samples between `start` and `end`.
"""
idxStart = int(start * self._sampleRateHz)
idxEnd = self._lastSample if end is None else int(
end * self._sampleRateHz)
if not len(self._samples):
raise AudioStreamError(
"Could not access recording as microphone has sent no samples."
)
return AudioClip(
np.array(self._samples[idxStart:idxEnd, :],
dtype=np.float32, order='C'),
sampleRateHz=self._sampleRateHz)
| 49,810
|
Python
|
.py
| 1,133
| 33.427184
| 104
| 0.61631
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,973
|
labjacks.py
|
psychopy_psychopy/psychopy/hardware/labjacks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""This provides a basic LabJack U3 class that can write a full byte of data, by
extending the labjack python library u3.U3 class.
These are optional components that can be obtained by installing the
`psychopy-labjack` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_labjack import U3
except (ModuleNotFoundError, ImportError):
logging.error(
"Support for LabJack hardware is not available this session. Please "
"install `psychopy-labjack` and restart the session to enable support.")
except Exception as e:
logging.error(
"Error encountered while loading `psychopy-labjack`. Check logs for "
"more information.")
if __name__ == "__main__":
pass
| 980
|
Python
|
.py
| 23
| 39.217391
| 80
| 0.745263
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,974
|
button.py
|
psychopy_psychopy/psychopy/hardware/button.py
|
from psychopy import logging, constants, core
from psychopy.hardware import base, DeviceManager, keyboard
from psychopy.localization import _translate
class ButtonResponse(base.BaseResponse):
# list of fields known to be a part of this response type
fields = ["t", "value", "channel"]
def __init__(self, t, value, channel):
# initialise base response class
base.BaseResponse.__init__(self, t=t, value=value)
# store channel
self.channel = channel
class BaseButtonGroup(base.BaseResponseDevice):
responseClass = ButtonResponse
def __init__(self, channels=1):
base.BaseResponseDevice.__init__(self)
# store number of channels
self.channels = channels
# attribute in which to store current state
self.state = [None] * channels
# start off with a status
self.status = constants.NOT_STARTED
def resetTimer(self, clock=logging.defaultClock):
raise NotImplementedError()
@staticmethod
def getAvailableDevices():
raise NotImplementedError()
def dispatchMessages(self):
raise NotImplementedError()
def parseMessage(self, message):
raise NotImplementedError()
def receiveMessage(self, message):
# do base receiving
base.BaseResponseDevice.receiveMessage(self, message)
# update state
self.state[message.channel] = message.value
def getResponses(self, state=None, channel=None, clear=True):
"""
Get responses which match a given on/off state.
Parameters
----------
state : bool or None
True to get button "on" responses, False to get button "off" responses, None to get all
responses.
channel : int, list
Which button or buttons to get responses from? Leave as None to get all buttons.
clear : bool
Whether or not to remove responses matching `state` after retrieval.
Returns
-------
list[ButtonResponse]
List of matching responses.
"""
# substitute empty channel param for None
if isinstance(channel, (list, tuple)) and not len(channel):
channel = None
# force channel to list
if channel is not None and not isinstance(channel, (list, tuple)):
channel = [channel]
# make sure device dispatches messages
self.dispatchMessages()
# array to store matching responses
matches = []
# check messages in chronological order
for resp in self.responses.copy():
# does this message meet the criterion?
if state is None or resp.value == state:
if channel is None or resp.channel in channel:
# if clear, remove the response
if clear:
i = self.responses.index(resp)
resp = self.responses.pop(i)
# append the response to responses array
matches.append(resp)
return matches
def getState(self, channel=None):
# dispatch messages from device
self.dispatchMessages()
# return state after update
if channel is not None:
return self.state[channel]
else:
return self.state
class KeyboardButtonBox(BaseButtonGroup):
"""
Use a standard keyboard to immitate the functions of a button box, mostly useful for testing.
"""
def __init__(self, buttons=('g', 'h', 'j', 'k', 'a', 's', 'd', 'f'), device=-1, bufferSize=10000):
# initialise base class
BaseButtonGroup.__init__(self, channels=len(buttons))
# store buttons
self.buttons = [str(btn) for btn in buttons]
# make own clock
self.clock = core.Clock()
# initialise keyboard
self.kb = keyboard.KeyboardDevice(
clock=self.clock,
device=device,
bufferSize=bufferSize,
muteOutsidePsychopy=False
)
def resetTimer(self, clock=logging.defaultClock):
self.clock.reset(clock.getTime())
@staticmethod
def getAvailableDevices():
profiles = []
for profile in keyboard.KeyboardDevice.getAvailableDevices():
# change device name to keyboard button box
profile['deviceName'] = "KeyboardButtonBox"
profiles.append(profile)
return profiles
def dispatchMessages(self):
messages = self.kb.getKeys(keyList=self.buttons, waitRelease=False, clear=True)
messages += self.kb.getKeys(keyList=self.buttons, waitRelease=True, clear=True)
for msg in messages:
resp = self.parseMessage(msg)
self.receiveMessage(resp)
def parseMessage(self, message):
# work out time and state state of KeyPress
state = message.duration is None
t = message.tDown
# if state is a release, add duration to timestamp
if message.duration:
t += message.duration
# get channel
channel = None
if message.name in self.buttons:
channel = self.buttons.index(message.name)
elif message.code in self.buttons:
channel = self.buttons.index(message.code)
# create response
resp = ButtonResponse(
t=t,
value=state,
channel=channel
)
return resp
def isSameDevice(self, other):
# all Keyboards are the same device
return isinstance(other, (KeyboardButtonBox, dict))
class ButtonBox:
"""
Builder-friendly wrapper around BaseButtonGroup.
"""
def __init__(self, device):
if isinstance(device, BaseButtonGroup):
# if given a button group, use it
self.device = device
# if given a string, get via DeviceManager
if isinstance(device, str):
if device in DeviceManager.devices:
self.device = DeviceManager.getDevice(device)
else:
# don't use formatted string literals in _translate()
raise ValueError(_translate(
"Could not find device named '{device}', make sure it has been set up "
"in DeviceManager."
).format(device))
# starting value for status (Builder)
self.status = constants.NOT_STARTED
# arrays to store info (Builder)
self.buttons = []
self.times = []
self.corr = []
def getAvailableDevices(self):
return self.device.getAvailableDevices()
def getResponses(self, state=None, channel=None, clear=True):
return self.device.getResponses(state=state, channel=channel, clear=clear)
def resetTimer(self, clock=logging.defaultClock):
return self.device.resetTimer(clock=clock)
def getState(self, channel):
return self.device.getState(channel=channel)
def clearResponses(self):
return self.device.clearResponses()
| 7,055
|
Python
|
.py
| 173
| 31.127168
| 102
| 0.627099
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,975
|
photodiode.py
|
psychopy_psychopy/psychopy/hardware/photodiode.py
|
import json
from psychopy import core, layout, logging
from psychopy.hardware import base, DeviceManager
from psychopy.localization import _translate
from psychopy.hardware import keyboard
class PhotodiodeResponse(base.BaseResponse):
# list of fields known to be a part of this response type
fields = ["t", "value", "channel", "threshold"]
def __init__(self, t, value, channel, threshold=None):
# initialise base response class
base.BaseResponse.__init__(self, t=t, value=value)
# store channel and threshold
self.channel = channel
self.threshold = threshold
class BasePhotodiodeGroup(base.BaseResponseDevice):
responseClass = PhotodiodeResponse
def __init__(self, channels=1, threshold=None, pos=None, size=None, units=None):
base.BaseResponseDevice.__init__(self)
# store number of channels
self.channels = channels
# attribute in which to store current state
self.state = [False] * channels
# set initial threshold
self.threshold = [None] * channels
self.setThreshold(threshold, channel=list(range(channels)))
# store position params
self.units = units
self.pos = pos
self.size = size
def dispatchMessages(self):
"""
Dispatch messages - this could mean pulling them from a backend, or from a parent device
Returns
-------
bool
True if request sent successfully
"""
raise NotImplementedError()
def parseMessage(self, message):
raise NotImplementedError()
def receiveMessage(self, message):
# do base receiving
base.BaseResponseDevice.receiveMessage(self, message)
# update state
self.state[message.channel] = message.value
@staticmethod
def getAvailableDevices():
devices = []
for cls in DeviceManager.deviceClasses:
# get class from class str
cls = DeviceManager._resolveClassString(cls)
# if class is a photodiode, add its available devices
if issubclass(cls, BasePhotodiodeGroup) and cls is not BasePhotodiodeGroup:
devices += cls.getAvailableDevices()
return devices
def getResponses(self, state=None, channel=None, clear=True):
"""
Get responses which match a given on/off state.
Parameters
----------
state : bool or None
True to get photodiode "on" responses, False to get photodiode "off" responses, None to get all responses.
channel : int
Which photodiode to get responses from?
clear : bool
Whether or not to remove responses matching `state` after retrieval.
Returns
-------
list[PhotodiodeResponse]
List of matching responses.
"""
# make sure parent dispatches messages
self.dispatchMessages()
# array to store matching responses
matches = []
# check messages in chronological order
for resp in self.responses.copy():
# does this message meet the criterion?
if (state is None or resp.value == state) and (channel is None or resp.channel == channel):
# if clear, remove the response
if clear:
i = self.responses.index(resp)
resp = self.responses.pop(i)
# append the response to responses array
matches.append(resp)
return matches
def findChannels(self, win):
"""
Flash the entire window white to check which channels are detecting light from the given
window.
Parameters
----------
win : psychopy.visual.Window
Window to flash white.
"""
from psychopy import visual
# clock for timeouts
timeoutClock = core.Clock()
# box to cover screen
rect = visual.Rect(
win,
size=(2, 2), pos=(0, 0), units="norm",
autoDraw=False
)
win.flip()
# show black
rect.fillColor = "black"
rect.draw()
win.flip()
# wait 250ms for flip to happen and photodiode to catch it
timeoutClock.reset()
while timeoutClock.getTime() < 0.25:
self.dispatchMessages()
# finish dispatching any messages which are only partially received
while self.hasUnfinishedMessage():
self.dispatchMessages()
# clear caught messages so we're starting afresh
self.clearResponses()
# show white
rect.fillColor = "white"
rect.draw()
win.flip()
# wait 250ms for flip to happen and photodiode to catch it
timeoutClock.reset()
while timeoutClock.getTime() < 0.25:
self.dispatchMessages()
# finish dispatching any messages which are only partially received
while self.hasUnfinishedMessage():
self.dispatchMessages()
# start off with no channels
channels = []
# iterate through potential channels
for i, state in enumerate(self.state):
# if any detected the flash, append it
if state:
channels.append(i)
return channels
def findPhotodiode(self, win, channel=None, retryLimit=5):
"""
Draws rectangles on the screen and records photodiode responses to recursively find the location of the diode.
Returns
-------
psychopy.layout.Position
Position of the diode on the window. Essentially, the center of the last rectangle which the photodiode
was able to detect.
psychopy.layout.Size
Size of the area of certainty. Essentially, the size of the last (smallest) rectangle which the photodiode
was able to detect.
"""
# timeout clock
timeoutClock = core.Clock()
# keyboard to check for escape
kb = keyboard.Keyboard(deviceName="photodiodeValidatorKeyboard")
# stash autodraw
win.stashAutoDraw()
# import visual here - if they're using this function, it's already in the stack
from psychopy import visual
# black box to cover screen
bg = visual.Rect(
win,
size=(2, 2), pos=(0, 0), units="norm",
fillColor="black",
autoDraw=False
)
# add low opacity label
label = visual.TextBox2(
win,
text=f"Finding photodiode...",
fillColor=(0, 0, 0), color=(80, 80, 80), colorSpace="rgb255",
pos=(0, 0), size=(2, 2), units="norm",
alignment="center",
autoDraw=False
)
# make rect
rect = visual.Rect(
win,
size=(2, 2), pos=(0, 0), anchor="center", units="norm",
fillColor="white",
autoDraw=False
)
# if not given a channel, use first one which is responsive to the win
if channel is None:
# get responsive channels
responsiveChannels = self.findChannels(win=win)
# use first responsive channel
if responsiveChannels:
channel = responsiveChannels[0]
else:
# if no channels are responsive, use 0th channel and let scanQuadrants fail cleanly
channel = 0
# update label text once we have a channel
label.text = f"Finding photodiode {channel}..."
def scanQuadrants(responsive=False):
"""
Recursively shrink the rectangle around the position of the photodiode until it's too
small to detect.
Parameters
----------
responsive : bool
When calling manually, this should always be left as False! Will be set to True if
any response was received from the photodiode.
"""
# work out width and height of area
w, h = rect.size
# work out left, right, top and bottom of area
r, t = rect.pos + rect.size / 2
l, b = rect.pos - rect.size / 2
# set rect size to half of area size
rect.size /= 2
# try each corner
for x, y in [
(l + w / 4, t - h / 4), # top left
(r - w / 4, t - h / 4), # top right
(l + w / 4, b + h / 4), # bottom left
(r - w / 4, b + h / 4), # bottom right
rect.pos, # center
(l + w / 2, t - h / 4), # top center
(l + w / 2, b + h / 4), # bottom center
(l + w / 4, b + h / 2), # center left
(r - w / 4, b + h / 2), # center right
]:
# position rect
rect.pos = (x, y)
# draw
bg.draw()
label.draw()
rect.draw()
win.flip()
# wait for flip to happen and photodiode to catch it (max 250ms)
timeoutClock.reset()
self.clearResponses()
while not self.responses and timeoutClock.getTime() < 0.25:
self.dispatchMessages()
# finish dispatching any messages which are only partially received
while self.hasUnfinishedMessage():
self.dispatchMessages()
# check for escape before entering recursion
if kb.getKeys(['escape']):
return None
# poll photodiode
if self.getState(channel):
# mark that we've got a response
responsive = True
# if it detected this rectangle, recur
return scanQuadrants(responsive=responsive)
# if none of these have returned, rect is too small to cover the whole photodiode, so return
return responsive
def handleNonResponse(label, rect, timeout=5):
# log error
logging.error("Failed to find Photodiode")
# skip if retry limit hit
if retryLimit <= 0:
return None
# start a countdown
timer = core.CountdownTimer(start=timeout)
# set label text to alert user
msg = (
"Received no responses from photodiode during `findPhotodiode`. Photodiode may not "
"be connected or may be configured incorrectly.\n"
"\n"
"To manually specify the photodiode's position, press ENTER. To quit, press "
"ESCAPE. Otherwise, will retry in {:.0f}s\n"
)
label.foreColor = "red"
# start a frame loop until they press enter
keys = []
while timer.getTime() > 0 and not keys:
# get keys
keys = kb.getKeys()
# skip if escape pressed
if "escape" in keys:
return None
# specify manually if return pressed
if "return" in keys:
return specifyManually(label=label, rect=rect)
# format label
label.text = msg.format(timer.getTime())
# show label and square
label.draw()
# flip
win.flip()
# if we timed out...
logging.error("Trying to find photodiode again after failing")
# re-detect threshold
self.findThreshold(win, channel=channel)
# re-find photodiode
return self.findPhotodiode(win, channel=channel, retryLimit=retryLimit-1)
def specifyManually(label, rect):
# set label text to alert user
label.text = (
"Use the arrow keys to move the photodiode patch and use the plus/minus keys to "
"resize it. Press ENTER when finished, or press ESCAPE to quit.\n"
)
label.foreColor = "red"
# revert to defaults
self.units = rect.units = "norm"
self.size = rect.size = (0.1, 0.1)
self.pos = rect.pos = (0.9, -0.9)
# start a frame loop until they press enter
keys = []
res = 0.05
while "return" not in keys and "escape" not in keys:
# get keys
keys = kb.getKeys()
# skip if escape pressed
if "escape" in keys:
return None
# finish if return pressed
if "return" in keys:
return (
layout.Position(self.pos, units="norm", win=win),
layout.Position(self.size, units="norm", win=win),
)
# move rect according to arrow keys
pos = list(rect.pos)
if "left" in keys:
pos[0] -= res
if "right" in keys:
pos[0] += res
if "up" in keys:
pos[1] += res
if "down" in keys:
pos[1] -= res
rect.pos = self.pos = pos
# resize rect according to +- keys
size = rect.size
if "equal" in keys:
size = [sz * 2 for sz in size]
if "minus" in keys:
size = [sz / 2 for sz in size]
rect.size = self.size = size
# show label and square
label.draw()
rect.draw()
# flip
win.flip()
# reset state
self.state = [None] * self.channels
self.dispatchMessages()
self.clearResponses()
# recursively shrink rect around the photodiode
responsive = scanQuadrants()
# if cancelled, warn and continue
if responsive is None:
logging.warn(
"`findPhotodiode` procedure cancelled by user."
)
return (
layout.Position(self.pos, units="norm", win=win),
layout.Position(self.size, units="norm", win=win),
)
# if we didn't get any responses at all, prompt to try again
if not responsive:
return handleNonResponse(label=label, rect=rect)
# clear all the events created by this process
self.state = [None] * self.channels
self.dispatchMessages()
self.clearResponses()
# reinstate autodraw
win.retrieveAutoDraw()
# flip
win.flip()
# set size/pos/units
self.units = "norm"
self.size = rect.size * 2
self.pos = rect.pos + rect.size / (-2, 2)
return (
layout.Position(self.pos, units="norm", win=win),
layout.Position(self.size, units="norm", win=win),
)
def findThreshold(self, win, channel=None):
# if not given a channel, find for all channels
if channel is None:
thresholds = []
# iterate through channels
for channel in range(self.channels):
thresholds.append(
self.findThreshold(win, channel=channel)
)
# return array of thresholds
return thresholds
# keyboard to check for escape/continue
kb = keyboard.Keyboard(deviceName="photodiodeValidatorKeyboard")
# stash autodraw
win.stashAutoDraw()
# import visual here - if they're using this function, it's already in the stack
from psychopy import visual
# box to cover screen
bg = visual.Rect(
win,
size=(2, 2), pos=(0, 0), units="norm",
autoDraw=False
)
# add low opacity label
label = visual.TextBox2(
win,
text=f"Finding best threshold for photodiode {channel}...",
fillColor=None, color=(0, 0, 0), colorSpace="rgb",
pos=(0, 0), size=(2, 2), units="norm",
alignment="center",
autoDraw=False
)
def _bisectThreshold(threshRange, recursionLimit=16):
"""
Recursively narrow thresholds to approach an acceptable threshold
"""
# log
logging.debug(
f"Trying threshold range: {threshRange}"
)
# work out current
current = int(
sum(threshRange) / 2
)
# set threshold and get value
value = self._setThreshold(int(current), channel=channel)
if value:
# if expecting light and got light, we have an upper bound
threshRange[1] = current
else:
# if expecting light and got none, we have a lower bound
threshRange[0] = current
# check for escape before entering recursion
if kb.getKeys(['escape']):
return current
# check for recursion limit before reentering recursion
if recursionLimit <= 0:
return current
# return if threshold is small enough
if abs(threshRange[1] - threshRange[0]) < 4:
return current
# recur with new range
return _bisectThreshold(threshRange, recursionLimit=recursionLimit-1)
# reset state
self.dispatchMessages()
self.clearResponses()
# get black and white thresholds
thresholds = {}
for col in ("black", "white"):
# set bg color
bg.fillColor = col
bg.draw()
# make text visible, but not enough to throw off the diode
txtCol = 0.8
if col == "white":
txtCol *= -1
label.color = (txtCol, txtCol, txtCol)
# draw
label.draw()
win.flip()
# get threshold
thresholds[col] = _bisectThreshold([0, 255], recursionLimit=16)
# pick a threshold between white and black (i.e. one that's safe)
threshold = (thresholds['white'] + thresholds['black']) / 2
# clear bg rect
bg.setAutoDraw(False)
# clear all the events created by this process
self.state = [None] * self.channels
self.dispatchMessages()
self.clearResponses()
# reinstate autodraw
win.retrieveAutoDraw()
# flip
win.flip()
# set to found threshold
self._setThreshold(int(threshold), channel=channel)
return int(threshold)
def setThreshold(self, threshold, channel):
if isinstance(channel, (list, tuple)):
# if given a list of channels, iterate
if not isinstance(threshold, (list, tuple)):
threshold = [threshold] * len(channel)
# set for each value in threshold and channel
detected = []
for thisThreshold, thisChannel in zip(threshold, channel):
self.threshold[thisChannel] = thisThreshold
detected.append(
self._setThreshold(thisThreshold, channel=thisChannel)
)
return detected
else:
# otherwise, just do once
self.threshold[channel] = threshold
return self._setThreshold(threshold, channel)
def _setThreshold(self, threshold, channel):
raise NotImplementedError()
def resetTimer(self, clock=logging.defaultClock):
raise NotImplementedError()
def getThreshold(self, channel):
return self.threshold[channel]
def getState(self, channel):
# dispatch messages from parent
self.dispatchMessages()
# return state after update
return self.state[channel]
class PhotodiodeValidationError(BaseException):
pass
class ScreenBufferSampler(BasePhotodiodeGroup):
def __init__(self, win, threshold=125, pos=None, size=None, units=None):
# store win
self.win = win
# default rect
self.rect = None
# initialise base class
BasePhotodiodeGroup.__init__(
self, channels=1, threshold=threshold, pos=pos, size=size, units=units
)
# make clock
from psychopy.core import Clock
self.clock = Clock()
def _setThreshold(self, threshold, channel=None):
self._threshold = threshold
def getThreshold(self, channel=None):
return self._threshold
def dispatchMessages(self):
"""
Check the screen for changes and dispatch events as appropriate
"""
from psychopy.visual import Window
# if there's no window, skip
if not isinstance(self.win, Window):
return
# get rect
left, bottom = self._pos.pix + self.win.size / 2
w, h = self._size.pix
left = int(left - w / 2)
bottom = int(bottom - h / 2)
w = int(w)
h = int(h)
# read front buffer luminances for specified area
pixels = self.win._getPixels(
buffer="front",
rect=(left, bottom, w, h),
makeLum=True
)
# work out whether it's brighter than threshold
state = pixels.mean() > (255 - self.getThreshold())
# if state has changed, make an event
if state != self.state[0]:
if self.win._frameTimes:
frameT = logging.defaultClock.getTime() - self.win._frameTimes[-1]
else:
frameT = 0
resp = PhotodiodeResponse(
t=self.clock.getTime() - frameT,
value=state,
channel=0,
threshold=self._threshold
)
self.receiveMessage(resp)
def parseMessage(self, message):
"""
Events are created as PhotodiodeResponses, so parseMessage is not needed for
ScreenBufferValidator. Will return message unchanged.
"""
return message
def isSameDevice(self, other):
if isinstance(other, type(self)):
# if both objects are ScreenBufferSamplers, then compare windows
return other.win is self.win
elif isinstance(other, dict):
# if other is a dict of params and win is "Session.win", it's gotta be the same
# window as Session can only currently have one window
if other.get('win', None) == "session.win":
return True
# otherwise, compare window to the win param
return other.get('win', None) is self.win
else:
# if types don't match up, it's not the same device
return False
@staticmethod
def getAvailableDevices():
return [{
'deviceName': "Photodiode Emulator (Screen Buffer)",
'deviceClass': "psychopy.hardware.photodiode.ScreenBufferSampler",
'win': "session.win"
}]
def resetTimer(self, clock=logging.defaultClock):
self.clock._timeAtLastReset = clock._timeAtLastReset
self.clock._epochTimeAtLastReset = clock._epochTimeAtLastReset
@property
def pos(self):
if self.units and hasattr(self._pos, self.units):
return getattr(self._pos, self.units)
@pos.setter
def pos(self, value):
# retain None so value is identifiable as not set
if value is None:
self._pos = layout.Position(
(16, 16), "pix", win=self.win
)
return
# make sure we have a Position object
if not isinstance(value, layout.Position):
value = layout.Position(
value, self.units, win=self.win
)
# set
self._pos = value
@property
def size(self):
if self.units and hasattr(self._size, self.units):
return getattr(self._size, self.units)
@size.setter
def size(self, value):
# retain None so value is identifiable as not set
if value is None:
self._size = layout.Size(
(16, 16), "pix", win=self.win
)
return
# make sure we have a Size object
if not isinstance(value, layout.Size):
value = layout.Size(
value, self.units, win=self.win
)
# set
self._size = value
@property
def units(self):
units = None
if hasattr(self, "_units"):
units = self._units
return units
@units.setter
def units(self, value):
self._units = value
def findPhotodiode(self, win=None, channel=0):
if win is None:
win = self.win
else:
self.win = win
# handle None
if channel is None:
channel = 0
# there's no physical photodiode, so just pick a reasonable place for it
self._pos = layout.Position((0.95, -0.95), units="norm", win=win)
self._size = layout.Size((0.05, 0.05), units="norm", win=win)
self.units = "norm"
return self._pos, self._size
def findThreshold(self, win=None, channel=0):
if win is None:
win = self.win
else:
self.win = win
# handle None
if channel is None:
channel = 0
# there's no physical photodiode, so just pick a reasonable threshold
self.setThreshold(127, channel=channel)
return self.getThreshold(channel=channel)
class PhotodiodeValidator:
def __init__(
self, win, diode, channel=None,
variability=1/60,
report="log",
autoLog=False):
# set autolog
self.autoLog = autoLog
# store window handle
self.win = win
# store diode handle
self.diode = diode
self.channel = channel
# store method of reporting
self.report = report
# set acceptable variability
self.variability = variability
from psychopy import visual
# black rect which is always drawn on win flip
self.offRect = visual.Rect(
win,
fillColor="black",
depth=1, autoDraw=True,
autoLog=False
)
# white rect which is only drawn when target stim is, and covers black rect
self.onRect = visual.Rect(
win,
fillColor="white",
depth=0, autoDraw=False,
autoLog=False
)
# update rects to match diode
self.updateRects()
def connectStimulus(self, stim):
# store mapping of stimulus to self in window
self.win.validators[stim] = self
stim.validator = self
def draw(self):
self.onRect.draw()
def updateRects(self):
"""
Update the size and position of this validator's rectangles to match the size and position of the associated
diode.
"""
for rect in (self.onRect, self.offRect):
# set units from diode
rect.units = self.diode.units
# set pos from diode, or choose default if None
if self.diode.pos is not None:
rect.pos = self.diode.pos
else:
rect.pos = layout.Position((0.95, -0.95), units="norm", win=self.win)
# set size from diode, or choose default if None
if self.diode.size is not None:
rect.size = self.diode.size
else:
rect.size = layout.Size((0.05, 0.05), units="norm", win=self.win)
def validate(self, state, t=None):
"""
Confirm that stimulus was shown/hidden at the correct time, to within an acceptable margin of variability.
Parameters
----------
state : bool
State which the photodiode is expected to have been in
t : clock.Timestamp, visual.Window or None
Time at which the photodiode should have read the given state.
Returns
-------
bool
True if photodiode state matched requested state, False otherwise.
"""
# if there's no time to validate, return empty handed
if t is None:
return None, None
# get and clear responses
messages = self.diode.getResponses(state=state, channel=self.channel, clear=True)
# if there have been no responses yet, return empty handed
if not messages:
return None, None
# if there are responses, get most recent timestamp
lastTime = messages[-1].t
# if there's no time on the last message, return empty handed
if lastTime is None:
return None, None
# validate
valid = abs(lastTime - t) < self.variability
# construct message to report
validStr = "within acceptable variability"
if not valid:
validStr = "not " + validStr
logMsg = (
"Photodiode expected to receive {state} within {variability}s of {t}s. Actually received {state} at "
"{lastTime}. This is {validStr}."
).format(
state=state, variability=self.variability, t=t, lastTime=lastTime, validStr=validStr
)
# report as requested
if self.report in ("log",):
# if report mode is log or error, log result
logging.debug(logMsg)
if self.report in ("err", "error") and not valid:
# if report mode is error, raise error for invalid
err = PhotodiodeValidationError(logMsg)
logging.error(err)
raise err
if callable(self.report):
# if self.report is a method, call it with args state, t, valid and logMsg
self.report(state, t, valid, logMsg)
# return timestamp and validity
return lastTime, valid
def resetTimer(self, clock=logging.defaultClock):
self.diode.resetTimer(clock=clock)
def getDiodeState(self):
return self.diode.getState()
@staticmethod
def onValid(isWhite):
pass
@staticmethod
def onInvalid(isWhite):
msg = "Stimulus validation failed. "
if isWhite:
msg += "Stimulus drawn when not expected."
else:
msg += "Stimulus not drawn when expected."
raise AssertionError(msg)
| 30,750
|
Python
|
.py
| 777
| 27.976834
| 118
| 0.562981
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,976
|
emotiv.py
|
psychopy_psychopy/psychopy/hardware/emotiv.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for EMOTIV devices such as button boxes.
These are optional components that can be obtained by installing the
`psychopy-emotiv` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_emotiv import (
Cortex,
CortexApiException,
CortexNoHeadsetException,
CortexTimingException)
except (ModuleNotFoundError, ImportError, NameError):
logging.error(
"Support for Emotiv hardware is not available this session. Please "
"install `psychopy-emotiv` and restart the session to enable support.")
except Exception as e:
logging.error(
"Error encountered while loading `psychopy-emotiv`. Check logs for "
"more information.")
if __name__ == "__main__":
pass
| 1,017
|
Python
|
.py
| 26
| 34.807692
| 79
| 0.730964
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,977
|
keyboard.py
|
psychopy_psychopy/psychopy/hardware/keyboard.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""To handle input from keyboard (supersedes event.getKeys)
The Keyboard class was new in PsychoPy 3.1 and replaces the older
`event.getKeys()` calls.
Psychtoolbox versus event.getKeys
------------------------------------
On 64 bits Python3 installations it provides access to the
`Psychtoolbox kbQueue <http://psychtoolbox.org/docs/KbQueueCreate>`_ series of
functions using the same compiled C code (available in python-psychtoolbox lib).
On 32 bit installations and Python2 it reverts to the older
:func:`psychopy.event.getKeys` calls.
The new calls have several advantages:
- the polling is performed and timestamped asynchronously with the main thread
so that times relate to when the key was pressed, not when the call was made
- the polling is direct to the USB HID library in C, which is faster than
waiting for the operating system to poll and interpret those same packets
- we also detect the KeyUp events and therefore provide the option of returning
keypress duration
- on Linux and Mac you can also distinguish between different keyboard devices
(see :func:`getKeyboards`)
This library makes use, where possible of the same low-level asynchronous
hardware polling as in `Psychtoolbox <http://psychtoolbox.org/>`_
.. currentmodule:: psychopy.hardware.keyboard
Example usage
------------------------------------
.. code-block:: python
from psychopy.hardware import keyboard
from psychopy import core
kb = keyboard.Keyboard()
# during your trial
kb.clock.reset() # when you want to start the timer from
keys = kb.getKeys(['right', 'left', 'quit'], waitRelease=True)
if 'quit' in keys:
core.quit()
for key in keys:
print(key.name, key.rt, key.duration)
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import absolute_import, division, print_function
import json
from collections import deque
import sys
import psychopy.clock
from psychopy import logging
from psychopy.constants import NOT_STARTED
import time
import numpy as np
from psychopy.hardware.base import BaseResponseDevice, BaseResponse
from psychopy.hardware import DeviceManager
from psychopy.tools.attributetools import AttributeGetSetMixin
from psychopy.tools import systemtools as st
try:
import psychtoolbox as ptb
from psychtoolbox import hid
havePTB = True
except ImportError as err:
logging.warning(("Import Error: "
+ err.args[0]
+ ". Using event module for keyboard component."))
from psychopy import event
havePTB = False
defaultBufferSize = 10000
# default ptb flush_type, used by macOS & linux
_ptb_flush_type = 1
# monkey-patch bug in PTB keyboard where winHandle=0 is documented but crashes.
# Also set ptb _ptb_flush_type to 0 for win32.
if havePTB and sys.platform == 'win32':
from psychtoolbox import PsychHID
# make a new function where we set default win_handle to be None instead of 0
def _replacement_create_queue(self, num_slots=10000, flags=0, win_handle=None):
PsychHID('KbQueueCreate', self.device_number,
None, 0, num_slots, flags, win_handle)
# replace the broken function with ours
hid.Keyboard._create_queue = _replacement_create_queue
# On win32, flush_type must be 0 or events can get flushed before being processed
_ptb_flush_type = 0
class KeyPress(BaseResponse):
"""Class to store key presses, as returned by `Keyboard.getKeys()`
Unlike keypresses from the old event.getKeys() which returned a list of
strings (the names of the keys) we now return several attributes for each
key:
.name: the name as a string (matching the previous pyglet name)
.rt: the reaction time (relative to last clock reset)
.tDown: the time the key went down in absolute time
.duration: the duration of the keypress (or None if not released)
Although the keypresses are a class they will test `==`, `!=` and `in`
based on their name. So you can still do::
kb = KeyBoard()
# wait for keypresses here
keys = kb.getKeys()
for thisKey in keys:
if thisKey=='q': # it is equivalent to the string 'q'
core.quit()
else:
print(thisKey.name, thisKey.tDown, thisKey.rt)
"""
fields = ["t", "value", "duration"]
def __init__(self, code, tDown, name=None):
self.code = code
self.tDown = tDown
self.duration = None
self.rt = None
if KeyboardDevice._backend == 'event': # we have event.getKeys()
self.name = name
self.rt = tDown
elif KeyboardDevice._backend == 'ptb':
self.rt = tDown
if code not in keyNames and code in keyNames.values():
i = list(keyNames.values()).index(code)
code = list(keyNames.keys())[i]
if code not in keyNames:
logging.warning('Keypress was given unknown key code ({})'.format(code))
self.name = 'unknown'
else:
self.name = keyNames[code]
elif KeyboardDevice._backend == 'iohub':
if name is None:
name = code
self.name = name
# get value
value = self.name
if value is None:
value = self.code
BaseResponse.__init__(self, t=tDown, value=value)
def __eq__(self, other):
return self.name == other
def __ne__(self, other):
return self.name != other
def getKeyboards():
"""Get info about the available keyboards.
Only really useful on Mac/Linux because on these the info can be used to
select a particular physical device when calling :class:`Keyboard`. On Win
this function does return information correctly but the :class:Keyboard
can't make use of it.
Returns
----------
A list of dicts
USB Info including with name, manufacturer, id, etc for each device
"""
if havePTB:
indices, names, keyboards = hid.get_keyboard_indices()
return keyboards
return []
class Keyboard(AttributeGetSetMixin):
def __init__(self, deviceName=None, device=-1, bufferSize=10000, waitForStart=False, clock=None, backend=None):
if deviceName not in DeviceManager.devices:
# if no matching device is in DeviceManager, make a new one
self.device = DeviceManager.addDevice(
deviceClass="psychopy.hardware.keyboard.KeyboardDevice", deviceName=deviceName,
backend=backend, device=device, bufferSize=bufferSize, waitForStart=waitForStart,
clock=clock
)
else:
# otherwise, use the existing device
self.device = DeviceManager.getDevice(deviceName)
# starting value for status (Builder)
self.status = NOT_STARTED
# initiate containers for storing responses
self.keys = [] # the key(s) pressed
self.corr = 0 # was the resp correct this trial? (0=no, 1=yes)
self.rt = [] # response time(s)
self.time = [] # Epoch
@property
def clock(self):
return self.device.clock
@clock.setter
def clock(self, value):
self.device.clock = value
def getBackend(self):
return self.device.getBackend()
def setBackend(self, backend):
return self.device.setBackend(backend=backend)
def start(self):
return self.device.start()
def stop(self):
return self.device.stop()
def getKeys(self, keyList=None, ignoreKeys=None, waitRelease=True, clear=True):
return self.device.getKeys(
keyList=keyList, ignoreKeys=ignoreKeys, waitRelease=waitRelease, clear=clear
)
def getState(self, keys):
"""
Get the current state of a key or set of keys
Parameters
----------
keys : str or list[str]
Either the code for a single key, or a list of key codes.
Returns
-------
keys : bool or list[bool]
True if pressed, False if not. Will be a single value if given a
single key, or a list of bools if given a list of keys.
"""
return self.device.getState(
keys=keys
)
def waitKeys(self, maxWait=float('inf'), keyList=None, waitRelease=True,
clear=True):
return self.device.waitKeys(
maxWait=maxWait, keyList=keyList, waitRelease=waitRelease,
clear=clear
)
def clearEvents(self, eventType=None):
return self.device.clearEvents(eventType=eventType)
class KeyboardDevice(BaseResponseDevice, aliases=["keyboard"]):
"""
Object representing
"""
responseClass = KeyPress
_backend = None
_iohubKeyboard = None
_ptbOffset = 0.0
_instance = None
def __new__(cls, *args, **kwargs):
# KeyboardDevice needs to function as a "singleton" as there is only one HID input and
# multiple devices would compete for presses
if cls._instance is None:
cls._instance = super(KeyboardDevice, cls).__new__(cls)
return cls._instance
def __del__(self):
# if one instance is deleted, reset the singleton instance so that the next
# initialisation recreates it
KeyboardDevice._instance = None
def __init__(self, device=-1, bufferSize=10000, waitForStart=False, clock=None, backend=None,
muteOutsidePsychopy=sys.platform != "linux"):
"""Create the device (default keyboard or select one)
Parameters
----------
device: int or dict
On Linux/Mac this can be a device index
or a dict containing the device info (as from :func:`getKeyboards`)
or -1 for all devices acting as a unified Keyboard
bufferSize: int
How many keys to store in the buffer (before dropping older ones)
waitForStart: bool (default False)
Normally we'll start polling the Keyboard at all times but you
could choose not to do that and start/stop manually instead by
setting this to True
muteOutsidePsychopy : bool
If True, then this KeyboardDevice won't listen for keypresses unless the currently
active window is a PsychoPy window. Default is True, unless on Linux (as detecting
window focus is significantly slower on Linux, potentially affecting timing).
"""
BaseResponseDevice.__init__(self)
global havePTB
# substitute None device for default device
if device is None:
device = -1
if self._backend is None and backend in ['iohub', 'ptb', 'event', '']:
KeyboardDevice._backend = backend
if self._backend is None:
KeyboardDevice._backend = ''
if backend and self._backend != backend:
logging.warning("keyboard.Keyboard already using '%s' backend. Can not switch to '%s'" % (self._backend,
backend))
if clock:
self.clock = clock
else:
self.clock = psychopy.clock.Clock()
if KeyboardDevice._backend in ['', 'iohub']:
from psychopy.iohub.client import ioHubConnection
from psychopy.iohub.devices import Computer
if not ioHubConnection.getActiveConnection() and KeyboardDevice._backend == 'iohub':
# iohub backend was explicitly requested, but iohub is not running, so start it up
# setting keyboard to use standard psychopy key mappings
from psychopy.iohub import launchHubServer
launchHubServer(Keyboard=dict(use_keymap='psychopy'))
if ioHubConnection.getActiveConnection() and KeyboardDevice._iohubKeyboard is None:
KeyboardDevice._iohubKeyboard = ioHubConnection.getActiveConnection().getDevice('keyboard')
KeyboardDevice._backend = 'iohub'
if KeyboardDevice._backend in ['', 'ptb'] and havePTB:
KeyboardDevice._backend = 'ptb'
KeyboardDevice._ptbOffset = self.clock.getLastResetTime()
# get the necessary keyboard buffer(s)
if sys.platform == 'win32':
self._ids = [-1] # no indexing possible so get the combo keyboard
else:
allInds, allNames, allKBs = hid.get_keyboard_indices()
if device == -1:
self._ids = allInds
elif type(device) in [list, tuple]:
self._ids = device
else:
self._ids = [device]
self._buffers = {}
self._devs = {}
for devId in self._ids:
# now we have a list of device IDs to monitor
if devId == -1 or devId in allInds:
buffer = _keyBuffers.getBuffer(devId, bufferSize)
self._buffers[devId] = buffer
self._devs[devId] = buffer.dev
# Is this right, waiting if waitForStart=False??
if not waitForStart:
self.start()
if KeyboardDevice._backend in ['', 'event']:
global event
from psychopy import event
KeyboardDevice._backend = 'event'
logging.info('keyboard.Keyboard is using %s backend.' % KeyboardDevice._backend)
# array in which to store ongoing presses
self._keysStillDown = deque()
# set whether or not to mute any keypresses which happen outside of PsychoPy
self.muteOutsidePsychopy = muteOutsidePsychopy
def isSameDevice(self, other):
"""
Determine whether this object represents the same physical keyboard as a given other
object.
Parameters
----------
other : KeyboardDevice, dict
Other KeyboardDevice to compare against, or a dict of params
Returns
-------
bool
True if the two objects represent the same physical device
"""
# all Keyboards are the same device
return isinstance(other, (KeyboardDevice, dict))
@classmethod
def getBackend(self):
"""Return backend being used."""
return self._backend
@classmethod
def setBackend(self, backend):
"""
Set backend event handler. Returns currently active handler.
:param backend: 'iohub', 'ptb', 'event', or ''
:return: str
"""
if self._backend is None:
if backend in ['iohub', 'ptb', 'event', '']:
KeyboardDevice._backend = backend
else:
logging.warning("keyboard.KeyboardDevice.setBackend failed. backend must be one of %s"
% str(['iohub', 'ptb', 'event', '']))
if backend == 'event':
global event
from psychopy import event
else:
logging.warning("keyboard.KeyboardDevice.setBackend already using '%s' backend. "
"Can not switch to '%s'" % (self._backend, backend))
return self._backend
def start(self):
"""Start recording from this keyboard """
if KeyboardDevice._backend == 'ptb':
for buffer in self._buffers.values():
buffer.start()
def stop(self):
"""Start recording from this keyboard"""
if KeyboardDevice._backend == 'ptb':
logging.warning("Stopping key buffers but this could be dangerous if"
"other keyboards rely on the same.")
for buffer in self._buffers.values():
buffer.stop()
def close(self):
self.stop()
@staticmethod
def getAvailableDevices():
devices = []
for profile in st.getKeyboards():
devices.append({
'deviceName': profile.get('device_name', "Unknown Keyboard"),
'device': profile.get('index', -1),
'bufferSize': profile.get('bufferSize', 10000),
})
return devices
def getKeys(self, keyList=None, ignoreKeys=None, waitRelease=True, clear=True):
"""
Parameters
----------
keyList: list (or other iterable)
The keys that you want to listen out for. e.g. ['left', 'right', 'q']
waitRelease: bool (default True)
If True then we won't report any "incomplete" keypress but all
presses will then be given a `duration`. If False then all
keys will be presses will be returned, but only those with a
corresponding release will contain a `duration` value (others will
have `duration=None`
clear: bool (default True)
If False then keep the keypresses for further calls (leave the
buffer untouched)
Returns
-------
A list of :class:`Keypress` objects
"""
# dispatch messages
self.dispatchMessages()
# filter
keys = []
toClear = []
for i, resp in enumerate(self.responses):
# start off assuming we want the key
wanted = True
# if we're waiting on release, only store if it has a duration
wasRelease = hasattr(resp, "duration") and resp.duration is not None
if waitRelease:
wanted = wanted and wasRelease
else:
wanted = wanted and not wasRelease
# if we're looking for a key list, only store if it's in the list
if keyList:
if resp.value not in keyList:
wanted = False
# if we're ignoring some keys, never store if ignored
if ignoreKeys:
if resp.value in ignoreKeys:
wanted = False
# if we got this far and the key is still wanted and not present, add it to output
if wanted and not any(k is resp for k in keys):
keys.append(resp)
# if clear=True, mark wanted responses as toClear
if wanted and clear:
toClear.append(i)
# pop any responses marked as to clear
for i in sorted(toClear, reverse=True):
self.responses.pop(i)
return keys
def getState(self, keys):
"""
Get the current state of a key or set of keys
Parameters
----------
keys : str or list[str]
Either the code for a single key, or a list of key codes.
Returns
-------
keys : bool or list[bool]
True if pressed, False if not. Will be a single value if given a
single key, or a list of bools if given a list of keys.
"""
# if given a string, convert to a list
if isinstance(keys, str):
keys = [keys]
# start off False
state = [False] * len(keys)
if KeyboardDevice._backend == 'ptb':
# use ptb.Keyboard.check if backend is ptb
for buffer in self._buffers.values():
# get output from ptb
anyPressed, t, mat = buffer.dev.check()
# if got any key...
if mat.any():
# convert each key index to a key name
for i in np.where(mat.flatten())[0]:
# account for ptb's 1-based indexing
i = int(i) + 1
# get key name from index (or None if not applicable)
name = keyNames.get(i, None)
# check if it's on our list
if name in keys:
state[keys.index(name)] = True
elif KeyboardDevice._backend == 'iohub':
# get current state of ioHub keyboard
ioHubState = KeyboardDevice._iohubKeyboard.getCurrentDeviceState()
# iterate through pressed keys
for i in ioHubState.get("pressed_keys", {}):
# iohub returns strings - integerise
i = int(i)
# get key name from index (or None if not applicable)
name = keyNames.get(i, None)
# check if it's on our list
if name in keys:
state[keys.index(name)] = True
else:
# make a key state handler
handler = event.pyglet.window.key.KeyStateHandler()
# iterate through our list of keys
for i, key in enumerate(keys):
# if handler has an entry for the given key, it's pressed
state[i] = handler[getattr(event.pyglet.window.key, key.upper())]
# if state is a single value, remove list wrapper
if len(state) == 1:
state = state[0]
return state
def dispatchMessages(self):
if KeyboardDevice._backend == 'ptb':
for buffer in self._buffers.values():
# flush events for the buffer
buffer._flushEvts()
evts = deque(buffer._evts)
buffer._clearEvents()
# process each event
for evt in evts:
response = self.parseMessage(evt)
# if not a key up event, receive it
if response is not None:
self.receiveMessage(response)
elif KeyboardDevice._backend == 'iohub':
# get events from backend (need to reverse order)
key_events = KeyboardDevice._iohubKeyboard.getKeys(clear=True)
# parse and receive each event
for k in key_events:
kpress = self.parseMessage(k)
if kpress is not None:
self.receiveMessage(kpress)
else:
global event
name = event.getKeys(modifiers=False, timeStamped=True)
if len(name):
thisKey = self.parseMessage(name[0])
if thisKey is not None:
self.receiveMessage(thisKey)
def parseMessage(self, message):
"""
Parse a message received from a Keyboard backend to return a KeyPress object.
Parameters
----------
message
Original raw message from the keyboard backend
Returns
-------
KeyPress
Parsed message into a KeyPress object
"""
response = None
if KeyboardDevice._backend == 'ptb':
if message['down']:
# if message is from a key down event, make a new response
response = KeyPress(
code=message['keycode'],
tDown=message['time'] - logging.defaultClock.getLastResetTime()
)
response.rt = message['time'] - self.clock.getLastResetTime()
self._keysStillDown.append(response)
else:
# if message is from a key up event, alter existing response
for key in self._keysStillDown:
if key.code == message['keycode']:
response = key
# calculate duration
key.duration = message['time'] - key.tDown - logging.defaultClock.getLastResetTime()
# remove key from stillDown
self._keysStillDown.remove(key)
# stop processing keys as we're done
break
elif KeyboardDevice._backend == 'iohub':
if message.type == "KEYBOARD_PRESS":
# if message is from a key down event, make a new response
response = KeyPress(code=message.char, tDown=message.time, name=message.key)
response.rt = response.tDown - (
self.clock.getLastResetTime() - self._iohubKeyboard.clock.getLastResetTime())
self._keysStillDown.append(response)
else:
# if message is from a key up event, alter existing response
for key in self._keysStillDown:
if key.code == message.char:
response = key
# calculate duration
key.duration = message.time - key.tDown
# remove key from stillDown
self._keysStillDown.remove(key)
# stop processing keys as we're done
break
# if no matching press, make a new KeyPress object
if response is None:
response = KeyPress(code=message.char, tDown=message.time, name=message.key)
else:
# if backend is event, just add as str with current time
rt = self.clock.getTime()
response = KeyPress(code=None, tDown=rt, name=message)
response.rt = rt
return response
def waitKeys(self, maxWait=float('inf'), keyList=None, waitRelease=True,
clear=True):
"""Same as `~psychopy.hardware.keyboard.Keyboard.getKeys`,
but halts everything (including drawing) while awaiting keyboard input.
:Parameters:
maxWait : any numeric value.
Maximum number of seconds period and which keys to wait for.
Default is float('inf') which simply waits forever.
keyList : **None** or []
Allows the user to specify a set of keys to check for.
Only keypresses from this set of keys will be removed from
the keyboard buffer. If the keyList is `None`, all keys will be
checked and the key buffer will be cleared completely.
NB, pygame doesn't return timestamps (they are always 0)
waitRelease: **True** or False
If True then we won't report any "incomplete" keypress but all
presses will then be given a `duration`. If False then all
keys will be presses will be returned, but only those with a
corresponding release will contain a `duration` value (others will
have `duration=None`
clear : **True** or False
Whether to clear the keyboard event buffer (and discard preceding
keypresses) before starting to monitor for new keypresses.
Returns None if times out.
"""
timer = psychopy.clock.Clock()
if clear:
self.clearEvents()
while timer.getTime() < maxWait:
keys = self.getKeys(keyList=keyList, waitRelease=waitRelease, clear=clear)
if keys:
return keys
psychopy.clock._dispatchWindowEvents() # prevent "app is not responding"
time.sleep(0.00001)
logging.data('No keypress (maxWait exceeded)')
return None
def clearEvents(self, eventType=None):
"""Clear the events from the Keyboard such as previous key presses"""
# clear backend buffers
if KeyboardDevice._backend == 'ptb':
for buffer in self._buffers.values():
buffer.flush() # flush the device events to the soft buffer
buffer._evts.clear()
buffer._keys.clear()
buffer._keysStillDown.clear()
elif KeyboardDevice._backend == 'iohub':
KeyboardDevice._iohubKeyboard.clearEvents()
else:
global event
event.clearEvents(eventType)
# clear dispatched responses
self.responses = []
logging.info("Keyboard events cleared", obj=self)
class _KeyBuffers(dict):
"""This ensures there is only one virtual buffer per physical keyboard.
There is an option to get_event() from PTB without clearing but right
now we are clearing when we poll so we need to make sure we have a single
virtual buffer."""
def getBuffer(self, kb_id, bufferSize=defaultBufferSize):
if kb_id not in self:
try:
self[kb_id] = _KeyBuffer(bufferSize=bufferSize,
kb_id=kb_id)
except FileNotFoundError as e:
if sys.platform == 'darwin':
# this is caused by a problem with SysPrefs
raise OSError("Failed to connect to Keyboard globally. "
"You need to add PsychoPy App bundle (or the "
"terminal if you run from terminal) to the "
"System Preferences/Privacy/Accessibility "
"(macOS <= 10.14) or "
"System Preferences/Privacy/InputMonitoring "
"(macOS >= 10.15).")
else:
raise (e)
return self[kb_id]
class _KeyBuffer(object):
"""This is our own local buffer of events with more control over clearing.
The user shouldn't use this directly. It is fetched from the _keybuffers
It stores events from a single physical device
It's built on a collections.deque which is like a more efficient list
that also supports a max length
"""
def __init__(self, bufferSize, kb_id):
self.bufferSize = bufferSize
self._evts = deque()
# create the PTB keyboard object and corresponding queue
allInds, names, keyboards = hid.get_keyboard_indices()
self._keys = deque()
self._keysStillDown = deque()
if kb_id == -1:
self.dev = hid.Keyboard() # a PTB keyboard object
else:
self.dev = hid.Keyboard(kb_id) # a PTB keyboard object
self.dev._create_queue(bufferSize, win_handle=None)
def flush(self):
"""Flushes and processes events from the device to this software buffer
"""
self._processEvts()
def _flushEvts(self):
while self.dev.flush(flush_type=_ptb_flush_type):
evt, remaining = self.dev.queue_get_event()
key = {}
key['keycode'] = int(evt['Keycode'])
key['down'] = bool(evt['Pressed'])
key['time'] = evt['Time']
self._evts.append(key)
def getKeys(self, keyList=[], ignoreKeys=[], waitRelease=True, clear=True):
"""Return the KeyPress objects from the software buffer
Parameters
----------
keyList : list of key(name)s of interest
ignoreKeys : list of keys(name)s to ignore if keylist is blank
waitRelease : if True then only process keys that are also released
clear : clear any keys (that have been returned in this call)
Returns
-------
A deque (like a list) of keys
"""
self._processEvts()
# if no conditions then no need to loop through
if not keyList and not waitRelease:
keyPresses = list(self._keysStillDown)
for k in list(self._keys):
if not any(x.name == k.name and x.tDown == k.tDown for x in keyPresses):
keyPresses.append(k)
if clear:
self._keys = deque()
self._keysStillDown = deque()
keyPresses.sort(key=lambda x: x.tDown, reverse=False)
return keyPresses
# otherwise loop through and check each key
keyPresses = deque()
for keyPress in self._keys:
if waitRelease and not keyPress.duration:
continue
if keyList and keyPress.name not in keyList:
continue
if ignoreKeys and keyPress.name in ignoreKeys:
continue
keyPresses.append(keyPress)
# clear keys in a second step (not during iteration)
if clear:
for key in keyPresses:
self._keys.remove(key)
return keyPresses
def _clearEvents(self):
self._evts.clear()
def start(self):
self.dev.queue_start()
def stop(self):
self.dev.queue_stop()
def _processEvts(self):
"""Take a list of events and convert to a list of keyPresses with
tDown and duration"""
self._flushEvts()
evts = deque(self._evts)
self._clearEvents()
for evt in evts:
if evt['down']:
newKey = KeyPress(code=evt['keycode'], tDown=evt['time'])
self._keys.append(newKey)
self._keysStillDown.append(newKey)
else:
for key in self._keysStillDown:
if key.code == evt['keycode']:
key.duration = evt['time'] - key.tDown
self._keysStillDown.remove(key)
break # this key is done
else:
# we found a key that was first pressed before reading
pass
_keyBuffers = _KeyBuffers()
keyNamesWin = {
49: '1', 50: '2', 51: '3', 52: '4', 53: '5',
54: '6', 55: '7', 56: '8', 57: '9', 48: '0',
65: 'a', 66: 'b', 67: 'c', 68: 'd', 69: 'e', 70: 'f',
71: 'g', 72: 'h', 73: 'i', 74: 'j', 75: 'k', 76: 'l',
77: 'm', 78: 'n', 79: 'o', 80: 'p', 81: 'q', 82: 'r',
83: 's', 84: 't', 85: 'u', 86: 'v', 87: 'w', 88: 'x',
89: 'y', 90: 'z',
97: 'num_1', 98: 'num_2', 99: 'num_3',
100: 'num_4', 101: 'num_5', 102: 'num_6', 103: 'num_7',
104: 'num_8', 105: 'num_9', 96: 'num_0',
112: 'f1', 113: 'f2', 114: 'f3', 115: 'f4', 116: 'f5',
117: 'f6', 118: 'f7', 119: 'f8', 120: 'f9', 121: 'f10',
122: 'f11', 123: 'f12',
145: 'scrolllock', 19: 'pause', 36: 'home', 35: 'end',
45: 'insert', 33: 'pageup', 46: 'delete', 34: 'pagedown',
37: 'left', 40: 'down', 38: 'up', 39: 'right', 27: 'escape',
144: 'numlock', 111: 'num_divide', 106: 'num_multiply',
8: 'backspace', 109: 'num_subtract', 107: 'num_add',
13: 'return', 222: 'pound', 161: 'lshift', 163: 'rctrl',
92: 'rwindows', 32: 'space', 164: 'lalt', 165: 'ralt',
91: 'lwindows', 93: 'menu', 162: 'lctrl', 160: 'lshift',
20: 'capslock', 9: 'tab', 223: 'quoteleft', 220: 'backslash',
188: 'comma', 190: 'period', 191: 'slash', 186: 'semicolon',
192: 'apostrophe', 219: 'bracketleft', 221: 'bracketright',
189: 'minus', 187: 'equal'
}
keyNamesMac = {
4: 'a', 5: 'b', 6: 'c', 7: 'd', 8: 'e', 9: 'f', 10: 'g', 11: 'h', 12: 'i',
13: 'j', 14: 'k', 15: 'l', 16: 'm', 17: 'n', 18: 'o', 19: 'p', 20: 'q',
21: 'r', 22: 's', 23: 't', 24: 'u', 25: 'v', 26: 'w', 27: 'x', 28: 'y',
29: 'z',
30: '1', 31: '2', 32: '3', 33: '4', 34: '5', 35: '6', 36: '7',
37: '8', 38: '9', 39: '0',
40: 'return', 41: 'escape', 42: 'backspace', 43: 'tab', 44: 'space',
45: 'minus', 46: 'equal',
47: 'bracketleft', 48: 'bracketright', 49: 'backslash', 51: 'semicolon',
52: 'apostrophe', 53: 'grave', 54: 'comma', 55: 'period', 56: 'slash',
57: 'lshift',
58: 'f1', 59: 'f2', 60: 'f3', 61: 'f4', 62: 'f5', 63: 'f6', 64: 'f7',
65: 'f8', 66: 'f9', 67: 'f10', 68: 'f11', 69: 'f12',
104: 'f13', 105: 'f14', 106: 'f15',
107: 'f16', 108: 'f17', 109: 'f18', 110: 'f19',
79: 'right', 80: 'left', 81: 'down', 82: 'up',
224: 'lctrl', 225: 'lshift', 226: 'loption', 227: 'lcommand',
100: 'function', 229: 'rshift', 230: 'roption', 231: 'rcommand',
83: 'numlock', 103: 'num_equal', 84: 'num_divide', 85: 'num_multiply',
86: 'num_subtract', 87: 'num_add', 88: 'num_enter', 99: 'num_decimal',
98: 'num_0', 89: 'num_1', 90: 'num_2', 91: 'num_3', 92: 'num_4',
93: 'num_5', 94: 'num_6', 95: 'num_7', 96: 'num_8', 97: 'num_9',
74: 'home', 75: 'pageup', 76: 'delete', 77: 'end', 78: 'pagedown',
}
keyNamesLinux = {
66: 'space', 68: 'f1', 69: 'f2', 70: 'f3', 71: 'f4', 72: 'f5',
73: 'f6', 74: 'f7', 75: 'f8', 76: 'f9', 77: 'f10', 96: 'f11', 97: 'f12',
79: 'scrolllock', 153: 'scrolllock', 128: 'pause', 119: 'insert', 111: 'home',
120: 'delete', 116: 'end', 113: 'pageup', 118: 'pagedown', 136: 'menu', 112: 'up',
114: 'left', 117: 'down', 115: 'right', 50: 'quoteleft',
11: '1', 12: '2', 13: '3', 14: '4', 15: '5', 16: '6', 17: '7', 18: '8', 19: '9', 20: '0',
21: 'minus', 22: 'equal', 23: 'backspace', 24: 'tab', 25: 'q', 26: 'w', 27: 'e', 28: 'r',
29: 't', 30: 'y', 31: 'u', 32: 'i', 33: 'o', 34: 'p', 35: 'bracketleft', 36: 'bracketright',
37: 'return', 67: 'capslock', 39: 'a', 40: 's', 41: 'd', 42: 'f', 43: 'g', 44: 'h', 45: 'j',
46: 'k', 47: 'l', 48: 'semicolon', 49: 'apostrophe', 52: 'backslash', 51: 'lshift',
95: 'less', 53: 'z', 54: 'x', 55: 'c', 56: 'v', 57: 'b', 58: 'n', 59: 'm',
60: 'comma', 61: 'period', 62: 'slash', 63: 'rshift', 38: 'lctrl', 65: 'lalt',
109: 'ralt', 106: 'rctrl', 78: 'numlock', 107: 'num_divide', 64: 'num_multiply',
83: 'num_subtract', 80: 'num_7', 81: 'num_8', 82: 'num_9', 87: 'num_add', 84: 'num_4',
85: 'num_5', 86: 'num_6', 88: 'num_1', 89: 'num_2', 90: 'num_3',
105: 'num_enter', 91: 'num_0', 92: 'num_decimal', 10: 'escape'
}
if sys.platform == 'darwin':
keyNames = keyNamesMac
elif sys.platform == 'win32':
keyNames = keyNamesWin
else:
keyNames = keyNamesLinux
| 37,702
|
Python
|
.py
| 815
| 35.206135
| 116
| 0.578229
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,978
|
eyetracker.py
|
psychopy_psychopy/psychopy/hardware/eyetracker.py
|
from psychopy.constants import STARTED, NOT_STARTED, PAUSED, STOPPED, FINISHED
from psychopy.alerts import alert
from psychopy import logging
from psychopy.iohub.devices import importDeviceModule
from psychopy.tools.attributetools import AttributeGetSetMixin
from copy import copy
import importlib
import sys
class EyetrackerControl(AttributeGetSetMixin):
def __init__(self, tracker, actionType="Start and Stop"):
self.tracker = tracker
self.actionType = actionType
self.status = NOT_STARTED
def start(self):
"""
Start recording
"""
# if previously at a full stop, clear events
if not self.tracker.isRecordingEnabled():
logging.exp("eyetracker.clearEvents()")
self.tracker.clearEvents()
# start recording
self.tracker.setRecordingState(True)
logging.exp("eyetracker.setRecordingState(True)")
def stop(self):
"""
Stop recording
"""
self.tracker.setRecordingState(False)
logging.exp("eyetracker.setRecordingState(False)")
@property
def currentlyRecording(self):
"""
Check if the eyetracker is currently recording
added for backwards compatibility, should be removed in future
"""
return self.tracker.isRecordingEnabled()
@property
def pos(self):
"""
Get the current position of the eyetracker
"""
return self.tracker.getPos()
def getPos(self):
return self.pos
class EyetrackerCalibration:
def __init__(self, win,
eyetracker, target,
units="height", colorSpace="rgb",
progressMode="time", targetDur=1.5, expandScale=1.5,
targetLayout="NINE_POINTS", randomisePos=True,
movementAnimation=False, targetDelay=1.0, textColor='Auto'
):
# Store params
self.win = win
self.eyetracker = eyetracker
self.target = target
self.progressMode = progressMode
self.targetLayout = targetLayout
self.randomisePos = randomisePos
self.textColor = textColor
self.units = units or self.win.units
self.colorSpace = colorSpace or self.win.colorSpace
# Animation
self.movementAnimation = movementAnimation
self.targetDelay = targetDelay
self.targetDur = targetDur
self.expandScale = expandScale
# Attribute to store data from last run
self.last = None
def __iter__(self):
"""Overload dict() method to return in ioHub format"""
tracker = self.eyetracker.getIOHubDeviceClass(full=True)
# split into package and class name
pkgName = ".".join(tracker.split(".")[:-1])
clsName = tracker.split(".")[-1]
# make sure pkgName is fully qualified
if not pkgName.startswith("psychopy.iohub.devices."):
pkgName = "psychopy.iohub.devices." + pkgName
# import package
pkg = importDeviceModule(pkgName)
# get tracker class
trackerCls = getattr(pkg, clsName)
# get self as dict
asDict = trackerCls.getCalibrationDict(self)
# return
for key, value in asDict.items():
yield key, value
def run(self):
tracker = self.eyetracker.getIOHubDeviceClass(full=True)
# Deliver any alerts as needed
if tracker == 'eyetracker.hw.sr_research.eyelink.EyeTracker':
if self.movementAnimation:
# Alert user that their animation params aren't used
alert(code=4520, strFields={"brand": "EyeLink"})
elif tracker == 'eyetracker.hw.gazepoint.gp3.EyeTracker':
if not self.progressMode == "time":
# As GazePoint doesn't use auto-pace, alert user
alert(4530, strFields={"brand": "GazePoint"})
# Minimise PsychoPy window
if self.win._isFullScr and sys.platform == 'win32':
self.win.winHandle.set_fullscreen(False)
self.win.winHandle.minimize()
# Run
self.last = self.eyetracker.runSetupProcedure(dict(self))
# Bring back PsychoPy window
if self.win._isFullScr and sys.platform == 'win32':
self.win.winHandle.set_fullscreen(True)
self.win.winHandle.maximize()
# Not 100% sure activate is necessary, but does not seem to hurt.
self.win.winHandle.activate()
# SS: Flip otherwise black screen has been seen, not sure why this just started....
self.win.flip()
| 4,614
|
Python
|
.py
| 113
| 31.575221
| 91
| 0.63967
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,979
|
listener.py
|
psychopy_psychopy/psychopy/hardware/listener.py
|
import sys
import threading
import time
from psychopy import logging
class ListenerLoop(threading.Thread):
"""
Asynchonous execution loop to continuously poll a device for new messages. Not recommended if using listeners
within an experiment.
Attributes
----------
devices : list[BaseDevice]
Devices whose messages to dispatch on each iteration of the loop.
refreshRate : float
How long to sleep inbetween iterations of the loop
maxTime : float
Maximum time (s) which this loop is allowed to run for, after this time limit is reached the loop will end.
"""
def __init__(self):
self.devices = []
# placeholder values for function params
self.refreshRate = self.maxTime = None
# set initial alive and active states
self._alive = False
self._active = False
# initialise base Thread
threading.Thread.__init__(self, target=self.dispatchLoop, daemon=True)
def addDevice(self, device):
"""
Add a device to this loop.
Parameters
----------
device : BaseDevice
Device to add
"""
if device not in self.devices:
self.devices.append(device)
def removeDevice(self, device):
"""
Remove a device from this loop
Parameters
----------
device : BaseDevice
Device to remove
"""
if device in self.devices:
logging.info(f"Removed from listener loop: {device}")
i = self.devices.index(device)
self.devices.pop(i)
else:
logging.error(f"Could not remove from listener loop: {device} not in {self.devices}")
def start(self):
"""
Start the loop polling for new messages.
Returns
-------
bool
True if the loop was started successfully
"""
# if already started, do nothing
if self._alive:
self._active = True
return
# set alive state
self._alive = True
self._active = True
# start the thread
threading.Thread.start(self)
# sleep so it has time to spin up
time.sleep(self.refreshRate)
# return confirmation of thread's alive status
return threading.Thread.is_alive(self)
def stop(self):
"""
Stop the loop polling for new messages.
Returns
-------
bool
True if the loop was stopped successfully
"""
# if already stopped, do nothing
if not self._alive:
return
# set alive status
self._alive = False
self._active = False
# sleep for 2 iterations so it has time to spin down
time.sleep(self.refreshRate * 2)
# return confirmation of thread's dead status
return not threading.Thread.is_alive(self)
def pause(self):
"""
Pause message polling, but continue looping.
Returns
-------
bool
True if the loop was paused successfully
"""
self._active = False
def resume(self):
"""
Continue message polling if paused.
Returns
-------
bool
True if the loop was resumed successfully
"""
self._active = True
def dispatchLoop(self):
"""
Function to make continuous calls to the device for responses.
"""
cont = self._alive
startTime = time.time()
logging.info("Starting listener loop.")
# until something says otherwise, continue
while cont:
# work out whether to continue
cont = self._alive
if self.maxTime is not None:
cont &= time.time() - startTime < self.maxTime
if not cont:
logging.info("Ending listener loop as max time has been reached")
# only dispatch messages if not paused
if self._active:
# dispatch messages from devices
for device in self.devices:
device.dispatchMessages()
# if there are no more devices attached, stop
if not len(self.devices):
self._active = False
# sleep for 10ms
time.sleep(self.refreshRate)
logging.info("Finished listener loop")
# make a global instance of ListenerLoop so all listeners can share the same loop
loop = ListenerLoop()
class BaseListener:
"""
Base class for a "Listener" object. Subclasses must implement the "receiveMessage" method.
Listeners can be attached to a node (such as a Button or Photodiode) and will receive duplicates of any messages
received by that node.
"""
def __init__(self):
# list in which to store responses (if implemented)
self.responses = []
# create threaded loop, but don't start unless asked to
global loop
self.loop = loop
def startLoop(self, device, refreshRate=0.1, maxTime=None):
"""
Start a threaded loop listening for responses
Parameters
----------
device : BaseDevice
Device whose messages to dispatch on each iteration of the loop.
refreshRate : float
How long to sleep inbetween iterations of the loop
maxTime : float
Maximum time (s) which this loop is allowed to run for, after this time limit is reached the loop will end.
Returns
-------
bool
True if loop started successfully
"""
# set attributes of loop
self.loop.addDevice(device)
self.loop.refreshRate = refreshRate
self.loop.maxTime = maxTime
# start loop
return self.loop.start()
def stopLoop(self):
"""
Stop the dispatch loop. WARNING: Cannot be restarted.
Returns
-------
bool
True if loop started successfully
"""
return self.loop.stop()
def receiveMessage(self, message):
"""
Method defining what to do when receiving a message. Must be implemented by subclasses.
Parameters
----------
message
Message received.
"""
raise NotImplementedError()
class PrintListener(BaseListener):
"""
Listener which prints any responses to the given stream. Mostly useful for debugging.
Parameters
----------
stream
A file-like object to `print` responses to. Default value is sys.stdout (aka same place normal `print()`
statements are sent to).
style : str
What string format to print the output as? One of:
- "repr": Do nothing before printing - will be stringified by the object's __repr__ method (as normal)
- "str": Call str() on the output before printing
- "json": Attempt to convert output to a JSON string (first looking for a getJSON method, then using json.dumps)
"""
def __init__(self, stream=sys.stdout, style="repr"):
# init base class
BaseListener.__init__(self)
# store handle of stream
self.stream = stream
# store output style
self.style = style
def receiveMessage(self, message):
"""
On receiving a message, print it.
"""
# store message
self.responses.append(message)
# process output to desired print style
if self.style == "str":
# stringify
message = str(message)
if self.style == "json":
# convert to json
if hasattr(message, "getJSON"):
message = message.getJSON()
else:
message = {
'type': "hardware_response",
'class': "Unknown",
'data': str(message)
}
# print message
print(message, file=self.stream)
class LoggingListener(BaseListener):
"""
Listener which writes any responses to the given log file. Mostly useful for debugging.
Parameters
----------
file : logging.LogFile
Log file to write messages to. Default will be the root logger.
level : int
Logging level to log messages as, can be one of the constants from psychopy.logging. Default is logging.DEBUG.
"""
def __init__(self, file=logging.root, level=logging.DEBUG):
# init base class
BaseListener.__init__(self)
# store params
self.file = file
self.level = level
def receiveMessage(self, message):
"""
On receiving a message, log it.
"""
# append
self.responses.append(message)
self.file.logger.log(message, level=self.level)
class LiaisonListener(BaseListener):
"""
Listener which sends any messages to a Liaison server.
Parameters
----------
liaison : psychopy.liaison.WebSocketServer
Liaison server to send messages to
level : int
Logging level to log messages as, can be one of the constants from psychopy.logging. Default is logging.DEBUG.
"""
def __init__(self, liaison):
# init base class
BaseListener.__init__(self)
# store reference to liaison
self.liaison = liaison
def receiveMessage(self, message):
"""
On receiving message, send it to Liaison.
"""
import asyncio
# append
self.responses.append(message)
# stringify message
if hasattr(message, "getJSON"):
message = message.getJSON()
else:
message = {
'type': "hardware_response",
'class': "Unknown",
'data': str(message)
}
# send
self.liaison.broadcastSync(message)
| 9,931
|
Python
|
.py
| 286
| 25.594406
| 120
| 0.592253
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,980
|
minolta.py
|
psychopy_psychopy/psychopy/hardware/minolta.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Minolta light-measuring devices.
These are optional components that can be obtained by installing the
`psychopy-minolta` extension into the current environment.
"""
from psychopy.tools.pkgtools import PluginStub
class CS100A(PluginStub, plugin="psychopy-minolta", doclink="https://psychopy.github.io/psychopy-minolta/coder/CS100A"):
pass
class LS100(PluginStub, plugin="psychopy-minolta", doclink="https://psychopy.github.io/psychopy-minolta/coder/LS100"):
pass
| 725
|
Python
|
.py
| 14
| 49.571429
| 120
| 0.7849
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,981
|
cedrus.py
|
psychopy_psychopy/psychopy/hardware/cedrus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Cedrus Corporation devices such as button boxes.
These are optional components that can be obtained by installing the
`psychopy-cedrus` extension into the current environment.
DEPRECATED:
This sub-package is out of date. Please use the cedrus-written `pyxid2` package
instead (bundled with Standalone PsychoPy)::
import pyxid2
"""
import psychopy.logging as logging
from psychopy.tools.pkgtools import PluginStub
class RB730(
PluginStub,
plugin="psychopy-cedrus",
doclink="https://psychopy.github.io/psychopy-cedrus/coder/RB730"
):
pass
if __name__ == "__main__":
pass
| 853
|
Python
|
.py
| 23
| 34.434783
| 79
| 0.765281
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,982
|
manager.py
|
psychopy_psychopy/psychopy/hardware/manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Hardware device management.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'deviceManager',
'getDeviceManager',
'DeviceManager',
'closeAllDevices'
]
from psychopy.tools import systemtools as st
from serial.tools import list_ports
from psychopy import logging
import atexit
import traceback
import importlib
import json
from pathlib import Path
__folder__ = Path(__file__).parent
class ManagedDeviceError(BaseException):
"""
Exception arising from a managed device, which will include information about the device from
DeviceManager.
"""
def __init__(self, msg, deviceName, traceback=None):
# create exception
BaseException.__init__(self, msg)
# store device name
self.deviceName = deviceName
# store traceback
if traceback is not None:
self.traceback = traceback
else:
self.traceback = self.__traceback__
def getJSON(self, asString=True):
tb = traceback.format_exception(type(self), self, self.traceback)
message = {
'type': "hardware_error",
'device': self.deviceName,
'msg': "".join(tb),
'context': getattr(self, "userdata", None)
}
# stringify if requested
if asString:
message = json.dumps(message)
return message
class DeviceManager:
"""Class for managing hardware devices.
An instance of this class is used to manage various hardware peripherals used by PsychoPy. It
can be used to access devices such as microphones, button boxes, and cameras though a common
interface. It can also be used to get information about available devices installed on the
system, such as their settings and capabilities prior to initializing them.
It is recommended that devices are initialized through the device manager rather than
directly. The device manager is responsible for keeping track of devices and ensuring that
they are properly closed when the program exits.
This class is implemented as a singleton, so there is only one instance of it per ssession
after its initialized. The instance can be accessed through the global variable
`deviceManager` or by calling `getDeviceManager()`.
Any subclass of BaseDevice is added to DeviceManager's `.deviceClasses` list upon import,
so devices matching that class become available through `DeviceManager.getAvailableDevices(
"*")`.
"""
_instance = None # singleton instance
deviceClasses = [] # subclasses of BaseDevice which we know about
liaison = None
ioServer = None # reference to currently running ioHub ioServer object
devices = {} # devices stored
deviceAliases = {} # aliases lut to reference devices
aliases = {} # aliases to get device classes by
with (__folder__ / "knownDevices.json").open("rb") as f:
knownDevices = json.load(f) # dict of known device classes
def __new__(cls, liaison=None):
# when making a new DeviceManager, if an instance already exists, just return it
# this means that all DeviceManager handles are the same object
if cls._instance is None:
cls._instance = super(DeviceManager, cls).__new__(cls)
return cls._instance
def __init__(self, liaison=None):
# set liaison
if liaison is not None:
DeviceManager.liaison = liaison
# --- utility ---
def _getSerialPortsInUse(self):
"""Get serial ports that are being used and the names of the devices that are using them.
This will only work if the devices have a `portString` attribute, which
requires they inherit from `SerialDevice`.
Returns
-------
dict
Mapping of serial port names to the names of the devices that are
using them as a list.
"""
ports = {}
for devClass in DeviceManager._devices:
for devName, devObj in DeviceManager._devices[devClass].items():
if hasattr(devObj, 'portString'):
ports.setdefault(devObj.portString, []).append(devName)
return ports
@staticmethod
def registerClassAlias(alias, deviceClass):
"""
Register an alias to rever to a particular class, for convenience.
Parameters
----------
alias : str
Short, convenient string to refer to the class by. For example, "keyboard".
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For example
`psychopy.hardware.keyboard.Keyboard`
"""
# if device class is an already registered alias, get the actual class str
deviceClass = DeviceManager._resolveAlias(deviceClass)
# register alias
DeviceManager.aliases[alias] = deviceClass
@staticmethod
def _resolveAlias(alias):
"""
Get a device class string from a previously registered alias. Returns the alias unchanged
if not found.
Parameters
----------
alias : str
Short, convenient string to refer to the class by. For example, "keyboard".
Returns
-------
str
Either the device class corresponding to the given alias, or the alias if there is none.
"""
return DeviceManager.aliases.get(alias, alias)
@staticmethod
def _resolveClassString(deviceClass):
"""
Get a `class` object from an import path string.
Parameters
----------
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For example
`psychopy.hardware.keyboard.Keyboard`
Returns
-------
type
Class pointed to by deviceClass
"""
if deviceClass in (None, "*"):
# resolve "any" flags to BaseDevice
deviceClass = "psychopy.hardware.base.BaseDevice"
# get package and class names from deviceClass string
parts = deviceClass.split(".")
pkgName = ".".join(parts[:-1])
clsName = parts[-1]
# import package
try:
pkg = importlib.import_module(pkgName)
except:
raise ModuleNotFoundError(
f"Could not find module: {pkgName}"
)
# get class
cls = getattr(pkg, clsName)
return cls
@staticmethod
def activatePlugins(which="all"):
"""
Activate plugins, allowing them to be imported from PsychoPy and making device backends
from plugins visible to DeviceManager.
Parameters
----------
which : str
Which set of plugins to import, the same values as would be passed to
`psychopy.plugins.listPlugins`
Returns
-------
bool
True if completed successfully
"""
from psychopy.plugins import activatePlugins
activatePlugins(which=which)
# --- device management ---
@staticmethod
def addDevice(deviceClass, deviceName, *args, **kwargs):
"""
Calls the add method for the specified device type
Parameters
----------
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For example
`psychopy.hardware.keyboard.Keyboard`
deviceName : str
Arbitrary name to store device under.
args : list
Whatever arguments would be passed to the device class's init function
kwargs : dict
Whatever keyword arguments would be passed to the device class's init function
Returns
-------
BaseDevice
Device created by the linked class init
"""
# if device class is an already registered alias, get the actual class str
deviceClass = DeviceManager._resolveAlias(deviceClass)
# get device class
cls = DeviceManager._resolveClassString(deviceClass)
try:
# initialise device
device = cls(*args, **kwargs)
except Exception as err:
# if initialization fails, generate a more informative ManagedDeviceError and return it
raise ManagedDeviceError(
msg=err.args[0],
deviceName=deviceName,
traceback=err.__traceback__,
)
# store device by name
DeviceManager.devices[deviceName] = device
return device
@staticmethod
def addDeviceFromParams(params):
"""
Similar to addDevice, but rather than accepting arguments and keyword arguments,
simply accepts a dict of params. This is useful when receiving parameters from Liaison,
communicating with a keyword-less language like JavaScript. This is relatively niche and
in most cases addDevice will work fine.
Parameters
----------
params : dict
Keyword arguments to be passed to DeviceManager.addDevice
Returns
-------
BaseDevice
Device created by the linked class init
"""
return DeviceManager.addDevice(**params)
@staticmethod
def addDeviceAlias(deviceName, alias):
"""
Store an already added device by an additional name
Parameters
----------
deviceName : str
Key by which the device to alias is currently stored.
alias
Alias to create
Returns
-------
bool
True if completed successfully
"""
# if given a list, call iteratively
if isinstance(alias, (list, tuple)):
for thisAlias in alias:
DeviceManager.addDeviceAlias(deviceName, thisAlias)
return True
# don't add alias if there's no device to alias!
if DeviceManager.getDevice(deviceName) is None:
raise KeyError(
f"Cannot add alias for {deviceName} as no device by this name has been added."
)
# store same device by new handle
DeviceManager.devices[alias] = DeviceManager.getDevice(deviceName)
DeviceManager.deviceAliases[alias] = deviceName
return True
@staticmethod
def registerPID(pid):
"""
Register a given window with PsychoPy, marking it as safe to e.g. perform keylogging in.
Parameters
----------
pid : str
Process ID (PID) of the window to register
Returns
-------
bool
True if completed successfully
"""
# register PID
st.registerPID(pid)
return True
@staticmethod
def removeDeviceAlias(alias):
"""
Removes a device alias from DeviceManager, but doesn't delete the object to which the alias
corresponds.
Parameters
----------
deviceName : str
Key by which the device to alias is currently stored.
Returns
-------
bool
True if completed successfully
"""
DeviceManager.devices.pop(alias)
DeviceManager.deviceAliases.pop(alias)
return True
@staticmethod
def getDeviceAliases(deviceName):
"""
Get all aliases by which a device is known to DeviceManager
Parameters
----------
deviceName : str
One name by which the device is known to DeviceManager
Returns
-------
list[str]
All names by which the device is known to DeviceManager
"""
# get device object
obj = DeviceManager.getDevice(deviceName)
# array to store aliases in
aliases = []
# iterate through devices
for alias, aliasedDeviceName in DeviceManager.deviceAliases.items():
if deviceName == aliasedDeviceName:
aliases.append(alias)
return aliases
@staticmethod
def getAllDeviceAliases():
"""
Get all aliases by which each device is known to DeviceManager
Returns
-------
dict[str]
Stored device aliases
"""
return DeviceManager.deviceAliases
@staticmethod
def updateDeviceName(oldName, newName):
"""
Store an already added device by an additional name
Parameters
----------
oldName : str
Key by which the device to alias is currently stored.
newName
Key to change to.
Returns
-------
bool
True if completed successfully
"""
# store same device by new handle
DeviceManager.addDeviceAlias(oldName, alias=newName)
# remove old name
DeviceManager.devices.pop(oldName)
return True
@staticmethod
def removeDevice(deviceName):
"""
Remove the device matching a specified device type and name.
Parameters
----------
deviceName : str
Arbitrary name device is stored under.
Returns
-------
bool
True if completed successfully
"""
# get device object
device = DeviceManager.devices[deviceName]
# clear any listeners on it
DeviceManager.clearListeners(deviceName)
# close it
if hasattr(device, "close"):
device.close()
# remove any child devices
_toRemove = []
for name, poss in DeviceManager.devices.items():
if hasattr(poss, "parent") and poss.parent is device:
_toRemove.append(name)
for name in _toRemove:
DeviceManager.removeDevice(name)
# remove device aliases
for alias in list(DeviceManager.deviceAliases.keys()):
if deviceName == DeviceManager.deviceAliases[alias]:
del DeviceManager.deviceAliases[alias]
# delete object
del DeviceManager.devices[deviceName]
return True
@staticmethod
def getDevice(deviceName):
"""
Get the device matching a specified device type and name.
Parameters
----------
deviceName : str
Arbitrary name device is stored under.
Returns
-------
BaseDevice
Matching device handle
"""
return DeviceManager.devices.get(deviceName, None)
@staticmethod
def hasDevice(deviceName, deviceClass="*"):
"""
Query whether the named device exists and, if relevant, whether it is an instance of the
expected class.
Parameters
----------
deviceName : str
Arbitrary name device is stored under.
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For
example `psychopy.hardware.keyboard.Keyboard`
Returns
-------
bool
True if device exists and matches deviceClass, False otherwise
"""
# try to get device
device = DeviceManager.getDevice(deviceName)
# if device is None, we don't have it
if device is None:
return False
# if device class is an already registered alias, get the actual class str
deviceClass = DeviceManager._resolveAlias(deviceClass)
# get device class
cls = DeviceManager._resolveClassString(deviceClass)
# assess whether device matches class
return isinstance(device, cls)
@staticmethod
def getRequiredDeviceNamesFromExperiments(experiments):
"""
Get a list of device names referenced in a given set of experiments.
Parameters
----------
experiments : list[str or Path]
List of paths pointing to .psyexp files to investigate
Returns
-------
dict[str:list[str]
Dict of device names against a list of the classes of device it's used to refer to
"""
from psychopy import experiment
# dict in which to store usages
usages = {}
def _process(emt):
"""
Process an element (Component or Routine) for device names and append them to the
usages dict.
Parameters
----------
emt : Component or Routine
Element to process
"""
# if we have a device name for this element...
if "deviceLabel" in emt.params:
# get init value so it lines up with boilerplate code
inits = experiment.getInitVals(emt.params)
# get value
deviceName = inits['deviceLabel'].val
# make sure device name is in usages dict
if deviceName not in usages:
usages[deviceName] = []
# add any new usages
for cls in getattr(emt, "deviceClasses", []):
if cls not in usages[deviceName]:
usages[deviceName].append(cls)
# process each experiment
for file in experiments:
# create experiment object
exp = experiment.Experiment()
exp.loadFromXML(file)
# iterate through routines
for rt in exp.routines.values():
if isinstance(rt, experiment.routines.BaseStandaloneRoutine):
# for standalone routines, get device names from params
_process(rt)
else:
# for regular routines, get device names from each component
for comp in rt:
_process(comp)
return usages
@staticmethod
def getDeviceBy(attr, value, deviceClass="*"):
"""
Get a device by the value of a particular attribute. e.g. get a Microphone device by its
index.
Parameters
----------
attr : str
Name of the attribute to query each device for
value : *
Value which the attribute should return for the device to be a match
deviceClass : str
Filter search only to devices of a particular class (optional)
Returns
-------
BaseDevice
Device matching given parameters, or None if none found
"""
# get devices by class
devices = DeviceManager.getInitialisedDevices(deviceClass=deviceClass)
# try each matching device
for dev in devices.values():
if hasattr(dev, attr):
# if device matches attribute, return it
if getattr(dev, attr) == value:
return dev
@staticmethod
def getInitialisedDevices(deviceClass="*"):
"""
Get all devices of a given type which have been `add`ed to this DeviceManager
Parameters
----------
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For example
`psychopy.hardware.keyboard.Keyboard`
Returns
-------
dict[str:BaseDevice]
Dict of devices matching requested class against their names
"""
# if device class is an already registered alias, get the actual class str
deviceClass = DeviceManager._resolveAlias(deviceClass)
# get actual device class from class str
cls = DeviceManager._resolveClassString(deviceClass)
foundDevices = {}
# iterate through devices and names
for name, device in DeviceManager.devices.items():
# add device to array if device class matches requested
if isinstance(device, cls) or deviceClass == "*":
foundDevices[name] = device
return foundDevices
@staticmethod
def getInitialisedDeviceNames(deviceClass="*"):
"""
Get names of all devices of a given type which have been `add`ed to this DeviceManager
Parameters
----------
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For
example `psychopy.hardware.keyboard.Keyboard`
Returns
-------
list[str]
List of device names
"""
return list(DeviceManager.getInitialisedDevices(deviceClass))
@staticmethod
def getAvailableDevices(deviceClass="*"):
"""
Get all devices of a given type which are known by the operating system.
Parameters
----------
deviceClass : str or list
Full import path for the class, in PsychoPy, of the device. For example
`psychopy.hardware.keyboard.Keyboard`. If given a list, will run iteratively for all
items in the list.
Returns
-------
list[dict]
List of dicts specifying parameters needed to initialise each device.
"""
# if deviceClass is *, call for all types
if deviceClass == "*":
deviceClass = DeviceManager.deviceClasses
# if given multiple types, call for each
if isinstance(deviceClass, (list, tuple)):
devices = {}
for thisClass in deviceClass:
try:
devices[thisClass] = DeviceManager.getAvailableDevices(deviceClass=thisClass)
except NotImplementedError:
# ignore any NotImplementedErrors
pass
return devices
# if device class is an already registered alias, get the actual class str
deviceClass = DeviceManager._resolveAlias(deviceClass)
# get device class
cls = DeviceManager._resolveClassString(deviceClass)
# make sure cass has a getAvailableDevices method
assert hasattr(cls, "getAvailableDevices"), (
f"Could not get available devices of type `{deviceClass}` as device class does not "
f"have a `getAvailableDevices` method."
)
# use class method
devices = []
for profile in cls.getAvailableDevices():
# add device class
profile['deviceClass'] = deviceClass
# append
devices.append(profile)
return devices
@staticmethod
def closeAll():
"""Close all devices.
Close all devices that have been initialized. This is usually called on exit to free
resources cleanly. It is not necessary to call this method manually as it is registered
as an `atexit` handler.
The device manager will be reset after this method is called.
Returns
-------
bool
True if completed successfully
"""
# iterate through devices
for name, device in DeviceManager.devices.items():
# if it has a close method, call it
if hasattr(device, "close"):
device.close()
# delete devices
DeviceManager.devices = {}
return True
@staticmethod
def callDeviceMethod(deviceName, method, *args, **kwargs):
"""
Call the method of a known device and return its output.
Parameters
----------
deviceName : str
Arbitrary name device is stored under.
method : str
Name of the method to run.
args : list
Whatever arguments would be passed to the method
kwargs : dict
Whatever keyword arguments would be passed to the method
Returns
-------
*
The output of the requested function
"""
# get device
device = DeviceManager.getDevice(deviceName)
# call method
return getattr(device, method)(*args, **kwargs)
@staticmethod
def addListener(deviceName, listener, startLoop=False):
"""
Add a listener to a managed device.
Parameters
----------
deviceName : str
Name of the device to add a listener to
listener : str or psychopy.hardware.listener.BaseListener
Either a Listener object, or use one of the following strings to create one:
- "liaison": Create a LiaisonListener with self.liaison as the server
- "print": Create a PrintListener with default settings
- "log": Create a LoggingListener with default settings
startLoop : bool
If True, then upon adding the listener, start up an asynchronous loop to dispatch
messages.
Returns
-------
BaseListener
Listener created/added
"""
# get device
device = DeviceManager.getDevice(deviceName)
# add listener to device
if hasattr(device, "addListener"):
listener = device.addListener(listener, startLoop=startLoop)
else:
raise AttributeError(
f"Could not add a listener to device {deviceName} ({type(device).__name__}) as it "
f"does not have an `addListener` method."
)
return listener
@staticmethod
def clearListeners(deviceName):
"""
Remove any listeners attached to a particular device.
Parameters
----------
deviceName : str
Name of the device to remove listeners from
Returns
-------
bool
True if completed successfully
"""
# get device
device = DeviceManager.getDevice(deviceName)
# add listener to device
if hasattr(device, "clearListeners"):
device.clearListeners()
return True
@staticmethod
def getResponseParams(deviceClass="*"):
"""
Get the necessary params for initialising a response for the given device.
Parameters
----------
deviceClass : str
Full import path for the class, in PsychoPy, of the device. For example
`psychopy.hardware.keyboard.Keyboard`. Use "*" to get for all currently imported
classes.
Returns
-------
list[str]
List of param names for the given device's response object
OR
dict[str:list[str]]
Lists of param names for all devices' response objects, in a dict against device class
strings
"""
from psychopy.hardware.base import BaseResponseDevice, BaseDevice
if deviceClass == "*":
# if deviceClass is *, call for all types
params = {}
for deviceClass in DeviceManager.deviceClasses:
# skip base classes
if deviceClass in (BaseResponseDevice, BaseDevice):
continue
params[deviceClass] = DeviceManager.getResponseParams(deviceClass)
return params
# resolve class string
deviceClass = DeviceManager._resolveAlias(deviceClass)
deviceClass = DeviceManager._resolveClassString(deviceClass)
# if device isn't a ResponseDevice, return None
if not issubclass(deviceClass, BaseResponseDevice):
return
# use inspect to get input params of response class
args = list(deviceClass.responseClass.__init__.__code__.co_varnames)
# remove "self" arg
if "self" in args:
args.remove("self")
return args
@staticmethod
def getScreenCount():
"""
Get the number of currently connected screens
Returns
-------
int
Number of screens
"""
import pyglet
# get screens
display = pyglet.canvas.Display()
allScrs = display.get_screens()
return len(allScrs)
@staticmethod
def showScreenNumbers(dur=5):
"""
Spawn some PsychoPy windows to display each monitor's number.
Parameters
----------
dur : float, int
How many seconds to show each window for
"""
from psychopy import visual
import time
# make a window on each screen showing the screen number
wins = []
lbls = []
bars = []
for n in range(DeviceManager.getScreenCount()):
# create a window on the appropriate screen
win = visual.Window(
pos=(0, 0),
size=(128, 128),
units="norm",
screen=n,
color="black",
checkTiming=False
)
wins.append(win)
# create textbox with screen num
lbl = visual.TextBox2(
win, text=str(n + 1),
size=1, pos=0,
alignment="center", anchor="center",
letterHeight=0.25, bold=True,
fillColor=None, color="white"
)
lbls.append(lbl)
# progress bar to countdown dur
bar = visual.Rect(
win, anchor="bottom left",
pos=(-1, -1), size=(0, 0.1),
fillColor='white'
)
bars.append(bar)
# start a frame loop
start = time.time()
t = 0
while t < dur:
t = time.time() - start
# update progress bar
bar.size = (t / 5 * 2, 0.1)
# draw
bar.draw()
lbl.draw()
# flip
win.flip()
# close window
win.close()
# handle to the device manager, which is a singleton
deviceManager = DeviceManager()
def getDeviceManager():
"""Get the device manager.
Returns an instance of the device manager.
Returns
-------
DeviceManager
The device manager.
"""
global deviceManager
if deviceManager is None:
deviceManager = DeviceManager() # initialize
return deviceManager
def closeAllDevices():
"""Close all devices.
Close all devices that have been initialized. This is usually called on exit to free
resources cleanly. It is not necessary to call this method manually as it's registed as an
`atexit` handler.
"""
devMgr = getDeviceManager()
if devMgr is not None:
devMgr.closeAll()
# register closeAllDevices as an atexit handler
atexit.register(closeAllDevices)
if __name__ == "__main__":
pass
| 30,801
|
Python
|
.py
| 821
| 27.539586
| 100
| 0.60167
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,983
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import glob
from itertools import chain
from psychopy import logging
from . import eyetracker, listener
from .manager import DeviceManager, deviceManager
from .base import BaseDevice, BaseResponse, BaseResponseDevice
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
__all__ = [
'forp',
'cedrus',
'minolta',
'gammasci',
'pr',
'crs',
'iolab',
'eyetracker',
'deviceManager',
'listener'
]
def getSerialPorts():
"""Finds the names of all (virtual) serial ports present on the system
Returns
-------
list
Iterable with all the serial ports.
"""
if sys.platform == "darwin":
ports = [
'/dev/tty.USA*', # keyspan twin adapter is usually USA28X13P1.1
'/dev/tty.Key*', # some are Keyspan.1 or Keyserial.1
'/dev/tty.modem*',
'/dev/cu.usbmodem*', # for PR650
'/dev/tty.usbserial*', # for the 'Plugable' converter,
# according to Tushar Chauhan
]
elif sys.platform.startswith("linux"):
ports = [
"/dev/ttyACM?", # USB CDC devices (virtual serial ports)
"/dev/ttyUSB?", # USB to serial adapters using the
# usb-serial kernel module
"/dev/ttyS?", # genuine serial ports usually
# /dev/ttyS0 or /dev/ttyS1
]
elif sys.platform == "cygwin":
# I don't think anyone has actually tried this
# Cygwin maps the windows serial ports like this
ports = ["/dev/ttyS?", ]
elif sys.platform == "win32":
# While PsychoPy does support using numeric values to specify
# which serial port to use, it is better in this case to
# provide a cannoncial name.
return map("COM{0}".format, range(11)) # COM0-10
else:
logging.error("We don't support serial ports on {0} yet!"
.format(sys.platform))
return []
# This creates an iterator for each glob expression. The glob
# expressions are then chained together. This is more efficient
# because it means we don't perform the lookups before we actually
# need to.
return chain.from_iterable(map(glob.iglob, ports))
def getAllPhotometers():
"""Gets all available photometers.
The returned photometers may vary depending on which drivers are installed.
Standalone PsychoPy ships with libraries for all supported photometers.
Returns
-------
list
A list of all photometer classes.
"""
from .photometer import getAllPhotometerClasses
# need values returned as a list for now
return getAllPhotometerClasses()
def getPhotometerByName(name):
"""Gets a Photometer class by name.
You can use either short names like 'pr650' or a long name like 'CRS
ColorCAL'.
Parameters
----------
name : str
The name of the device.
Returns
-------
object
Returns the photometer matching the passed in device name or `None` if
we were unable to find it.
"""
for photom in getAllPhotometers():
# longName is used from the GUI and driverFor is for coders
if name.lower() in photom.driverFor or name == photom.longName:
return photom
def findPhotometer(ports=None, device=None):
"""Try to find a connected photometer/photospectrometer!
PsychoPy will sweep a series of serial ports trying to open them.
If a port successfully opens then it will try to issue a command to
the device. If it responds with one of the expected values then it
is assumed to be the appropriate device.
Parameters
----------
ports : list
A list of ports to search. Each port can be a string (e.g. 'COM1',
'/dev/tty.Keyspan1.1') or a number (for win32 comports only). If `None`
is provided then PsychoPy will sweep COM0-10 on Win32 and search known
likely port names on MacOS and Linux.
device : str
String giving expected device (e.g. 'PR650', 'PR655', 'CS100A', 'LS100',
'LS110', 'S470'). If this is not given then an attempt will be made to
find a device of any type, but this often fails.
Returns
-------
object or None
An object representing the first photometer found, `None` if the ports
didn't yield a valid response. `None` if there were not even any valid
ports (suggesting a driver not being installed.)
Examples
--------
Sweeps ports 0 to 10 searching for a PR655::
photom = findPhotometer(device='PR655')
print(photom.getLum())
if hasattr(photom, 'getSpectrum'):
# can retrieve spectrum (e.g. a PR650)
print(photom.getSpectrum())
"""
if isinstance(device, str):
photometers = [getPhotometerByName(device)]
elif isinstance(device, Iterable):
# if we find a string assume it is a name, otherwise treat it like a
# photometer
photometers = [getPhotometerByName(d)
if isinstance(d, str) else d
for d in device]
else:
photometers = getAllPhotometers()
# determine candidate ports
if ports is None:
ports = getSerialPorts()
elif type(ports) in (int, float, str):
ports = [ports] # so that we can iterate
# go through each port in turn
photom = None
logging.info('scanning serial ports...')
logging.flush()
for thisPort in ports:
logging.info('...{}'.format(thisPort))
logging.flush()
for Photometer in photometers:
# Looks like we got an invalid photometer, carry on
if Photometer is None:
continue
try:
photom = Photometer(port=thisPort)
except Exception as ex:
msg = "Couldn't initialize photometer {0}: {1}"
logging.error(msg.format(Photometer.__name__, ex))
# We threw an exception so we should just skip ahead
continue
if photom.OK:
logging.info(' ...found a %s\n' % (photom.type))
logging.flush()
# we're now sure that this is the correct device and that
# it's configured now increase the number of attempts made
# to communicate for temperamental devices!
if hasattr(photom, 'setMaxAttempts'):
photom.setMaxAttempts(10)
# we found one so stop looking
return photom
else:
if photom.com and photom.com.isOpen:
logging.info('closing port')
photom.com.close()
# If we got here we didn't find one
logging.info('...nope!\n\t')
logging.flush()
return None
if __name__ == "__main__":
pass
| 7,038
|
Python
|
.py
| 182
| 30.115385
| 80
| 0.615802
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,984
|
emulator.py
|
psychopy_psychopy/psychopy/hardware/emulator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Software fMRI machine emulator.
Idea: Run or debug an experiment script using exactly the same code, i.e., for
both testing and online data acquisition. To debug timing, you can emulate sync
pulses and user responses.
Limitations: pyglet only; keyboard events only.
These are optional components that can be obtained by installing the
`psychopy-mri-emulator` extension into the current environment.
"""
from psychopy.tools.pkgtools import PluginStub
class SyncGenerator(
PluginStub,
plugin="psychopy-mri-emulator"
):
pass
class ResponseEmulator(
PluginStub,
plugin="psychopy-mri-emulator"
):
pass
class launchScan(
PluginStub,
plugin="psychopy-mri-emulator"
):
pass
if __name__ == "__main__":
pass
| 985
|
Python
|
.py
| 31
| 28.967742
| 79
| 0.761194
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,985
|
base.py
|
psychopy_psychopy/psychopy/hardware/base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for hardware device interfaces.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'BaseDevice'
]
import json
import inspect
import numpy as np
from psychopy import logging
class BaseResponse:
"""
Base class for device responses.
"""
# list of fields known to be a part of this response type
fields = ["t", "value"]
def __init__(self, t, value, device=None):
self.device = device
self.t = t
self.value = value
def __repr__(self):
# make key=val strings
attrs = []
for key in self.fields:
try:
attrs.append(f"{key}={getattr(self, key)}")
except:
continue
attrs = ", ".join(attrs)
# construct
try:
return f"<{type(self).__name__} from {self.getDeviceName()}: {attrs}>"
except:
return f"<{type(self).__name__}: {attrs}>"
def getDeviceName(self):
# if device isn't a device, and this method isn't overloaded, return None
if not hasattr(self.device, "getDeviceProfile"):
return None
# get profile
deviceProfile = self.device.getDeviceProfile()
# get name from profile
if "deviceName" in deviceProfile:
return deviceProfile['deviceName']
else:
# if profile doesn't include name, use class name
return type(self.device).__name__
def getJSON(self):
import json
# get device profile
deviceProfile = None
if hasattr(self.device, "getDeviceProfile"):
deviceProfile = self.device.getDeviceProfile()
# construct message as dict
message = {
'type': "hardware_response",
'class': type(self).__name__,
'device': deviceProfile,
'data': {}
}
# add all fields to "data"
for key in self.fields:
message['data'][key] = getattr(self, key)
# sanitize numpy arrays
if isinstance(message['data'][key], np.ndarray):
message['data'][key] = message['data'][key].tolist()
return json.dumps(message)
class BaseDevice:
"""
Base class for device interfaces, includes support for DeviceManager and adding listeners.
"""
# start off with no cached profile
_profile = None
def __init_subclass__(cls, aliases=None):
from psychopy.hardware.manager import DeviceManager
# handle no aliases
if aliases is None:
aliases = []
# if given a class, get its class string
mro = inspect.getmodule(cls).__name__ + "." + cls.__name__
# register aliases
for alias in aliases:
DeviceManager.registerClassAlias(alias, mro)
# store class string
DeviceManager.deviceClasses.append(mro)
return cls
def __eq__(self, other):
"""
For BaseDevice objects, the == operator functions as shorthand for isSameDevice
"""
return self.isSameDevice(other)
def getDeviceProfile(self):
"""
Generate a dictionary describing this device by finding the profile from
getAvailableDevices which represents the same physical device as this object.
Returns
-------
dict
Dictionary representing this device
"""
# only iteratively find it if we haven't done so already
if self._profile is None:
# get class string
cls = type(self)
mro = inspect.getmodule(cls).__name__ + "." + cls.__name__
# iterate through available devices for this class
for profile in self.getAvailableDevices():
if self.isSameDevice(profile):
# if current profile is this device, add deviceClass and return it
profile['deviceClass'] = mro
self._profile = profile
break
return self._profile
def getJSON(self, asString=True):
"""
Convert the output of getDeviceProfile to a JSON string.
Parameters
----------
asString : bool
If True, then the output will be converted to a string, otherwise will simply be a
JSON-friendly dict.
Returns
-------
str or dict
JSON string (or JSON friendly dict) of getDeviceProfile.
"""
profile = self.getDeviceProfile()
if asString:
profile = json.dumps(profile)
return profile
# the following methods must be implemented by subclasses of BaseDevice
def isSameDevice(self, other):
"""
Determine whether this object represents the same physical device as a given other object.
Parameters
----------
other : BaseDevice, dict
Other device object to compare against, or a dict of params.
Returns
-------
bool
True if the two objects represent the same physical device
"""
raise NotImplementedError(
"All subclasses of BaseDevice must implement the method `isSameDevice`"
)
@staticmethod
def getAvailableDevices():
"""
Get all available devices of this type.
Returns
-------
list[dict]
List of dictionaries containing the parameters needed to initialise each device.
"""
raise NotImplementedError(
"All subclasses of BaseDevice must implement the method `getAvailableDevices`"
)
class BaseResponseDevice(BaseDevice):
responseClass = BaseResponse
def __init__(self):
# list to store listeners in
self.listeners = []
# list to store responses in
self.responses = []
# indicator to mute outside of registered apps
self.muteOutsidePsychopy = False
def dispatchMessages(self):
"""
Method to dispatch messages from the device to any nodes or listeners attached.
"""
pass
def hasUnfinishedMessage(self):
"""
If there is a message which have been partially received but not finished (e.g.
getting the start of a message from a serial device but no end of line character
yet), this will return True.
If not implemented or not relevant on a given device (e.g. Keyboard, which only
sends full messages), this will always return False.
"""
return False
def parseMessage(self, message):
raise NotImplementedError(
"All subclasses of BaseDevice must implement the method `parseMessage`"
)
def receiveMessage(self, message):
"""
Method called when a parsed message is received. Includes code to send to any listeners and store the response.
Parameters
----------
message
Parsed message, should be an instance of this Device's `responseClass`
Returns
-------
bool
True if completed successfully
"""
# disregard any messages sent while the PsychoPy window wasn't in focus (for security)
from psychopy.tools.systemtools import isRegisteredApp
if self.muteOutsidePsychopy and not isRegisteredApp():
return
# make sure response is of the correct class
assert isinstance(message, self.responseClass), (
"{ownType}.receiveMessage() can only receive messages of type {targetType}, instead received "
"{msgType}. Try parsing the message first using {ownType}.parseMessage()"
).format(ownType=type(self).__name__, targetType=self.responseClass.__name__, msgType=type(message).__name__)
# add message to responses
self.responses.append(message)
# relay message to listener
for listener in self.listeners:
listener.receiveMessage(message)
# relay to log file
try:
logging.exp(
f"Device response: {message}"
)
except Exception as err:
logging.error(
f"Received a response from a {type(self).__name__} but couldn't print it: {err}"
)
return True
def makeResponse(self, *args, **kwargs):
"""
Programatically make a response on this device. The device won't necessarily physically register the response,
but it will be stored in this object same as an actual response.
Parameters
----------
Function takes the same inputs as the response class for this device. For example, in KeyboardDevice, inputs
are code, tDown and name.
Returns
-------
BaseResponse
The response object created
"""
# create response
resp = self.responseClass(*args, **kwargs)
# receive response
self.receiveMessage(resp)
return resp
def clearResponses(self):
"""
Clear any responses stored on this Device.
Returns
-------
bool
True if completed successfully
"""
# try to dispatch messages
try:
self.dispatchMessages()
except:
pass
# clear resp list
self.responses = []
return True
def getListenerNames(self):
return [type(lsnr).__name__ for lsnr in self.listeners]
def addListener(self, listener, startLoop=False):
"""
Add a listener, which will receive all the same messages as this device.
Parameters
----------
listener : str or psychopy.hardware.listener.BaseListener
Either a Listener object, or use one of the following strings to create one:
- "liaison": Create a LiaisonListener with DeviceManager.liaison as the server
- "print": Create a PrintListener with default settings
- "log": Create a LoggingListener with default settings
startLoop : bool
If True, then upon adding the listener, start up an asynchronous loop to dispatch messages.
"""
from . import listener as lsnr
# dispatch existing events now (so listener doesn't get a lump of historic messages)
self.dispatchMessages()
# map listener classes to names
listenerClasses = {
'liaison': lsnr.LiaisonListener,
'print': lsnr.PrintListener,
'log': lsnr.LoggingListener
}
# if device already has a listener, log warning and skip
for extantListener in self.listeners:
# get class of requested listener
listenerCls = listenerClasses.get(listener, type(listener))
# if the same type as extant listener, return it rather than duplicating
if isinstance(extantListener, listenerCls):
return extantListener
# make listener if needed
if not isinstance(listener, lsnr.BaseListener):
# if given a string rather than an object handle, make an object of correct type
if listener == "liaison":
from psychopy.hardware import DeviceManager
if DeviceManager.liaison is None:
raise AttributeError(
"Cannot create a `liaison` listener as no liaison server is connected to DeviceManager."
)
listener = lsnr.LiaisonListener(DeviceManager.liaison)
elif listener in listenerClasses:
listener = listenerClasses[listener]()
else:
raise ValueError(f"No known listener type '{listener}'")
# add listener handle
self.listeners.append(listener)
# start loop if requested
if startLoop:
listener.startLoop(self)
return listener
def clearListeners(self):
"""
Remove any listeners from this device.
Returns
-------
bool
True if completed successfully
"""
# remove self from listener loop
for listener in self.listeners:
if self in listener.loop.devices:
listener.loop.removeDevice(self)
# clear list
self.listeners = []
return True
if __name__ == "__main__":
pass
| 12,630
|
Python
|
.py
| 326
| 28.941718
| 119
| 0.606982
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,986
|
iolab.py
|
psychopy_psychopy/psychopy/hardware/iolab.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for ioLab Systems button boxes.
These are optional components that can be obtained by installing the
`psychopy-iolabs` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_iolabs import ButtonBox
except (ModuleNotFoundError, ImportError):
logging.error(
"Support for ioLab Systems hardware is not available this session. "
"Please install `psychopy-iolabs` and restart the session to enable "
"support.")
except Exception as e:
logging.error(
"Error encountered while loading `psychopy-iolabs`. Check logs for "
"more information.")
finally:
logging.warning(
"Support for ioLabs Systems hardware has been discontinued and will "
"likely break in the future."
)
if __name__ == "__main__":
pass
| 1,067
|
Python
|
.py
| 28
| 34
| 79
| 0.726744
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,987
|
qmix.py
|
psychopy_psychopy/psychopy/hardware/qmix.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Cetoni neMESYS syringe pump systems.
These are optional components that can be obtained by installing the
`psychopy-qmix` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_qmix import (
Pump,
_init_all_pumps,
_init_bus,
_checkSyringeTypes,
_PumpWrapperForBuilderComponent,
volumeUnits, # don't use module level constants in plugins
flowRateUnits,
configName,
bus,
pumps,
syringeTypes)
except (ModuleNotFoundError, ImportError):
logging.error(
"Support for the Cetoni neMESYS syringe pump system is not available "
"this session. Please install `psychopy-qmix` and restart the session "
"to enable support.")
except Exception as e:
logging.error(
"Error encountered while loading `psychopy-qmix`. Check logs for more "
"information.")
if __name__ == "__main__":
pass
| 1,212
|
Python
|
.py
| 34
| 30.205882
| 79
| 0.695132
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,988
|
labhackers.py
|
psychopy_psychopy/psychopy/hardware/labhackers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""This provides basic LabHackers (www.labhackers.com) device classes.
These are optional components that can be obtained by installing the
`psychopy-labhackers` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_labhackers import (
getDevices, getSerialPorts, getUSB2TTL8s, USB2TTL8)
except (ModuleNotFoundError, ImportError, NameError):
logging.error(
"Support for LabHackers hardware is not available this session. "
"Please install `psychopy-labhackers` and restart the session to "
"enable support.")
if __name__ == "__main__":
pass
| 864
|
Python
|
.py
| 20
| 39.65
| 79
| 0.746714
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,989
|
pr.py
|
psychopy_psychopy/psychopy/hardware/pr.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Photo Research Inc. spectroradiometers.
These are optional components that can be obtained by installing the
`psychopy-photoresearch` extension into the current environment.
"""
from psychopy.tools.pkgtools import PluginStub
class PR650(PluginStub, plugin="psychopy-photoresearch", doclink="https://psychopy.github.io/psychopy-photoresearch/coder/PR650"):
pass
class PR655(PluginStub, plugin="psychopy-photoresearch", doclink="https://psychopy.github.io/psychopy-photoresearch/coder/PR655"):
pass
| 760
|
Python
|
.py
| 14
| 52.071429
| 130
| 0.793758
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,990
|
forp.py
|
psychopy_psychopy/psychopy/hardware/forp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Current Designs Inc. devices such as button boxes.
This class is only useful when the fORP is connected via the serial port. If
you're connecting via USB, just treat it like a standard keyboard. E.g., use a
Keyboard component, and typically listen for Allowed keys ``'1', '2', '3', '4',
'5'``. Or use ``event.getKeys()``.
These are optional components that can be obtained by installing the
`psychopy-curdes` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_curdes import (
ButtonBox,
BUTTON_RED,
BUTTON_BLUE,
BUTTON_GREEN,
BUTTON_YELLOW,
BUTTON_TRIGGER,
BUTTON_MAP)
except (ModuleNotFoundError, ImportError):
logging.error(
"Support for Current Designs Inc. hardware is not available this "
"session. Please install `psychopy-curdes` and restart the session "
"to enable support.")
except Exception as e:
logging.error(
"Error encountered while loading `psychopy-curdes`. Check logs for "
"more information.")
if __name__ == "__main__":
pass
| 1,358
|
Python
|
.py
| 34
| 35.411765
| 79
| 0.705167
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,991
|
speaker.py
|
psychopy_psychopy/psychopy/hardware/speaker.py
|
from psychopy.hardware import BaseDevice, DeviceManager
from psychopy.sound import setDevice, getDevices, backend
from psychopy.tools import systemtools as st
from psychopy import logging
class SpeakerDevice(BaseDevice):
def __init__(self, index, channels=2, sampleRateHz=48000):
# store channels and sample rate
self.channels = channels
self.sampleRateHz = sampleRateHz
# placeholder values, in case none set later
self.deviceName = None
self.index = None
# try simple integerisation of index
if isinstance(index, str):
try:
index = int(index)
except ValueError:
pass
# get all playback devices
profiles = st.getAudioPlaybackDevices()
# if index is default, get default
if index in (-1, None):
if hasattr(backend, 'defaultOutput'):
# check if a default device is already set and update index
defaultDevice = backend.defaultOutput
if isinstance(defaultDevice, (int, float)):
# if a default device index is set, use it
index = defaultDevice
elif isinstance(defaultDevice, str):
# if a default device is set by name, find it
for profile in profiles.values():
if profile['name'] == defaultDevice:
index = profile['index']
else:
index = profiles[0]['index']
# find profile which matches index
for profile in profiles.values():
if index in (profile['index'], profile['name']):
self.index = int(profile['index'])
self.deviceName = profile['name']
# warn if channels / sample rate don't match
if profile['outputChannels'] != self.channels:
logging.warn(
f"Initialised speaker %(name)s with {self.channels} channels, but device "
f"reports having %(outputChannels)s channels. Sounds may fail to play."
% profile
)
if profile['defaultSampleRate'] != self.sampleRateHz:
logging.warn(
f"Initialised speaker %(name)s with sample rate of {self.sampleRateHz}Hz, "
f"but device reports sample rate of %(defaultSampleRate)sHz. Sounds may "
f"fail to play."
% profile
)
if self.index is None:
logging.error("No speaker device found with index {}".format(index))
def isSameDevice(self, other):
"""
Determine whether this object represents the same physical speaker as a given other object.
Parameters
----------
other : SpeakerDevice, dict
Other SpeakerDevice to compare against, or a dict of params (which must include
`index` as a key)
Returns
-------
bool
True if the two objects represent the same physical device
"""
if isinstance(other, SpeakerDevice):
# if given another object, get index
index = other.index
elif isinstance(other, dict) and "index" in other:
# if given a dict, get index from key
index = other['index']
else:
# if the other object is the wrong type or doesn't have an index, it's not this
return False
return index in (self.index, self.deviceName)
def testDevice(self):
"""
Play a simple sound to check whether this device is working.
"""
from psychopy.sound import Sound
import time
# create a basic sound
snd = Sound(
speaker=self,
value="A",
stereo=self.channels > 1,
sampleRate=self.sampleRateHz
)
# play the sound for 1s
snd.play()
time.sleep(1)
snd.stop()
@staticmethod
def getAvailableDevices():
devices = []
for profile in getDevices(kind="output").values():
# get index as a name if possible
index = profile.get('DeviceName', None)
if index is None:
index = profile.get('DeviceIndex', None)
device = {
'deviceName': profile.get('DeviceName', "Unknown Microphone"),
'index': index,
'channels': int(profile.get('NrOutputChannels', 2)),
'sampleRateHz': int(profile.get('DefaultSampleRate', 48000))
}
devices.append(device)
return devices
| 4,799
|
Python
|
.py
| 112
| 29.8125
| 99
| 0.559802
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,992
|
gammasci.py
|
psychopy_psychopy/psychopy/hardware/gammasci.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Gamma-Scientific light-measuring devices.
Tested with S470, but should work with S480 and S490, too.
These are optional components that can be obtained by installing the
`psychopy-gammasci` extension into the current environment.
"""
from psychopy.tools.pkgtools import PluginStub
class S470(PluginStub, plugin="psychopy-gammasci", doclink="https://psychopy.github.io/psychopy-gammasci/coder/S470"):
pass
| 663
|
Python
|
.py
| 13
| 49.076923
| 118
| 0.786604
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,993
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/buttonbox/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for button boxes.
This module serves as the entry point for plugin classes implementing
third-party button box interfaces. All installed interfaces are discoverable
by calling the :func:`getAllButtonBoxes()` function.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'BaseButtonBox',
'getAllButtonBoxes'
]
import sys
import psychopy.logging as logging
from psychopy.hardware.base import BaseDevice
# Interfaces for button boxes will be registered here until we get a proper
# base class to identify them by type within this module's scope.
bboxInterfaces = {}
# Import from legacy namespaces to maintain compatibility. These are loaded if
# optional components are installed.
# Cedrus
try:
from ..cedrus import RB730
except Exception:
RB730 = None
# ioLabs
try:
from ..iolab import ButtonBox as ioLabsButtonBox
except Exception: # NameError from dud pkg
ioLabsButtonBox = None
# Current Designs
try:
from ..forp import ButtonBox as curdesButtonBox
except Exception:
curdesButtonBox = None
class BaseButtonBox(BaseDevice):
"""Base class for button box interfaces.
This class defines the minimal interface for button box implementations.
All button box implementations should inherit from this class and override
its methods.
"""
_deviceName = u"" # name of the button box, shows up in menus
_deviceVendor = u"" # name of the manufacturer
def __init__(self, *args, **kwargs):
"""Initialize the button box interface.
"""
pass
@property
def deviceName(self):
"""Get the name of the button box.
Returns
-------
str
Name of the button box.
"""
return self._deviceName
@property
def deviceVendor(self):
"""Get the name of the manufacturer.
Returns
-------
str
Name of the manufacturer.
"""
return self._deviceVendor
def getAllButtonBoxes():
"""Get all button box interface classes.
Returns
-------
dict
Mapping of button box classes.
"""
# build a dictionary with names
foundBBoxes = {}
# classes from extant namespaces
optionalBBoxes = ('RB730', 'ioLabsButtonBox', 'curdesButtonBox')
for bboxName in optionalBBoxes:
bboxClass = getattr(sys.modules[__name__], bboxName)
if bboxClass is None: # not loaded if `None`
continue
logging.debug('Found button box class `{}`'.format(bboxName))
foundBBoxes[bboxName] = bboxClass
# Merge with classes from plugins. Duplicate names will be overwritten by
# the plugins.
foundBBoxes.update(bboxInterfaces)
return foundBBoxes.copy()
if __name__ == "__main__":
pass
| 2,987
|
Python
|
.py
| 90
| 28.1
| 79
| 0.694241
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,994
|
parallel.py
|
psychopy_psychopy/psychopy/hardware/triggerbox/parallel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Trigger interface for the parallel port.
This module provides a simple trigger interface using the computer's parallel
port.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'ParallelPortTrigger',
'getOpenParallelPorts',
'closeAllParallelPorts'
]
from .base import BaseTriggerBox
# used to keep track of open parallel ports to avoid multiple objects accessing
# the same port
_openParallelPorts = {}
# parallel port addresses on Windows
WIN_PPORT_ADDRESSES = {
'LPT1': 0x0378,
'LPT2': 0x0278,
'LPT3': 0x0278
}
class ParallelPortTrigger(BaseTriggerBox):
"""Class for using the computer's parallel port as a trigger interface.
Parameters
----------
portAddress : int
Address of the parallel port.
"""
_deviceName = u"Parallel Port"
_deviceVendor = u"Open Science Tools Ltd."
def __init__(self, portAddress=0x378, **kwargs):
"""Initialize the parallel port trigger interface.
Parameters
----------
portAddress : int or None
Address of the parallel port. Specify `None` if you plan to open
the port later using :func:`open`.
"""
super().__init__(**kwargs)
self._portAddress = portAddress
self._parallelPort = None # raw interface to parallel port
if portAddress is not None:
self.open()
@staticmethod
def _setupPort():
"""Setup the parallel port interface.
This method will attempt to find a usable driver depending on your
platform. If the parallel port is already open, this method will do
nothing.
Returns
-------
object
Parallel port interface object.
"""
parallelPort = None
if sys.platform.startswith('linux'):
try:
from ._linux import PParallelLinux
except ImportError:
raise RuntimeError(
"The parallel port driver for Linux is not available. "
"Please install the parallel port driver interface "
"library `PParallelLinux` to use this feature.")
parallelPort = PParallelLinux
elif sys.platform == 'win32':
drivers = dict(
inpout32=('_inpout', 'PParallelInpOut'),
inpoutx64=('_inpout', 'PParallelInpOut'),
dlportio=('_dlportio', 'PParallelDLPortIO'))
from ctypes import windll
from importlib import import_module
for key, val in drivers.items():
driver_name, class_name = val
try:
hasattr(windll, key)
parallelPort = getattr(
import_module('.' + driver_name, __name__), class_name)
break
except (OSError, KeyError, NameError):
continue
if parallelPort is None:
logging.warning(
"psychopy.parallel has been imported but no parallel port "
"driver found. Install either inpout32, inpoutx64 or dlportio")
else:
logging.warning("psychopy.parallel has been imported on a Mac "
"(which doesn't have a parallel port?)")
return parallelPort
@staticmethod
def closeAllParallelPorts():
"""Close all open parallel ports.
This function will close all open parallel ports and remove them from the
list of open ports. Any objects that were using the closed ports will
raise an exception if they attempt to use the port.
This function can be registered as an `atexit` handler to ensure that all
parallel ports are closed when the program exits.
"""
for port in _openParallelPorts.values():
port.close()
_openParallelPorts.clear()
@staticmethod
def getOpenParallelPorts():
"""Get a list of open parallel port addresses.
Returns
-------
list
"""
return list(_openParallelPorts.keys())
def __hash__(self):
"""Get the hash value of the parallel port trigger interface.
Returns
-------
int
Hash value of the parallel port trigger interface.
"""
return hash(self.portAddress)
@staticmethod
def isSupported():
"""Check if platform support parallel ports.
This doesn't check if the parallel port is available, just if the
interface is supported on this platform.
Returns
-------
bool
True if the parallel port trigger interface is supported, False
otherwise.
"""
return sys.platform != 'darwin' # macs don't have parallel ports
def setPortAddress(self, portAddress):
"""Set the address of the parallel port.
If the desired adress is not in use by another object, the port will be
closed and the address will be changed. Otherwise a `RuntimeError` will
be raised.
Common port addresses on Windows::
LPT1 = 0x0378 or 0x03BC
LPT2 = 0x0278 or 0x0378
LPT3 = 0x0278
on Linux ports are specifed as files in `/dev`::
/dev/parport0
Parameters
----------
portAddress : int
Address of the parallel port.
"""
if self.isOpen:
raise RuntimeError(
"Cannot change the port address while the port is open.")
# convert u"0x0378" into 0x0378
if isinstance(address, str) and address.startswith('0x'):
address = int(address, 16)
self._portAddress = portAddress
@property
def portAddress(self):
"""Get the address of the parallel port.
Returns
-------
int
Address of the parallel port.
"""
return self._portAddress
def open(self):
"""Open the parallel port.
This method will attempt to find a usable driver depending on your
platform. If the parallel port is already open, this method will do
nothing. You must set the port address using :func:`setPortAddress`
or the constructor before opening the port.
"""
if self._parallelPort is None:
self._parallelPort = _openParallelPorts.get(self.portAddress, None)
if self._parallelPort is None:
parallelInterface = ParallelPortTrigger._setupPort()
self._parallelPort = parallelInterface(self.portAddress)
_openParallelPorts[self.portAddress] = self._parallelPort
def close(self):
"""Close the parallel port.
"""
if self._port is not None:
del _openParallelPorts[self.portAddress]
self._port = None
@property
def isOpen(self):
"""Check if the parallel port is open.
Returns
-------
bool
True if the parallel port is open, False otherwise.
"""
return self._port is not None
def setData(self, data):
"""Set the data to be presented on the parallel port (one ubyte).
Alternatively you can set the value of each pin (data pins are pins 2-9
inclusive) using :func:`~psychopy.parallel.setPin`
Examples
--------
Writing data to the port::
parallel.setData(0) # sets all pins low
parallel.setData(255) # sets all pins high
parallel.setData(2) # sets just pin 3 high (remember that pin2=bit0)
parallel.setData(3) # sets just pins 2 and 3 high
You can also convert base 2 to int very easily in Python::
parallel.setData(int("00000011", 2)) # pins 2 and 3 high
parallel.setData(int("00000101", 2)) # pins 2 and 4 high
"""
if not self.isOpen:
raise RuntimeError("The parallel port is not open.")
self._parallelPort.setData(data)
def clearData(self):
"""Clear the data to be presented on the parallel port.
This method will set all pins to low.
"""
if not self.isOpen:
raise RuntimeError("The parallel port is not open.")
self._parallelPort.setData(0)
def setPin(self, pinNumber, state):
"""Set a desired pin to be high (1) or low (0).
Only pins 2-9 (incl) are normally used for data output::
parallel.setPin(3, 1) # sets pin 3 high
parallel.setPin(3, 0) # sets pin 3 low
"""
if not self.isOpen:
raise RuntimeError("The parallel port is not open.")
self._parallelPort.setPin(pinNumber, state)
def readPin(self, pinNumber):
"""Determine whether a desired (input) pin is high(1) or low(0).
Returns
-------
int
Pin state (1 or 0).
"""
if not self.isOpen:
raise RuntimeError("The parallel port is not open.")
return self._parallelPort.readPin(pinNumber)
def getPin(self, pinNumber):
"""Determine whether a desired (input) pin is high(1) or low(0).
Returns
-------
int
Pin state (1 or 0).
"""
return self.readPin(pinNumber)
def __del__(self):
"""Delete the parallel port trigger interface.
"""
self.close()
# ------------------------------------------------------------------------------
# Utility functions for serial ports
#
def getOpenParallelPorts():
"""Get a list of open parallel port addresses.
Returns
-------
list
"""
return ParallelPortTrigger.getOpenParallelPorts()
def closeAllParallelPorts():
"""Close all open parallel ports.
This function will close all open parallel ports and remove them from the
list of open ports. Any objects that were using the closed ports will
raise an exception if they attempt to use the port.
This function can be registered as an `atexit` handler to ensure that all
parallel ports are closed when the program exits.
"""
ParallelPortTrigger.closeAllParallelPorts()
if __name__ == "__main__":
pass
| 10,530
|
Python
|
.py
| 268
| 29.716418
| 83
| 0.60452
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,995
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/triggerbox/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for using trigger boxes.
Trigger boxes are used to send electrical signals to external devices. They are
typically used to synchronize the presentation of stimuli with the recording of
physiological data. This module provides a common interface for accessing
trigger boxes from within PsychoPy.
This module serves as the entry point for plugin classes implementing
third-party trigger box interfaces. All installed interfaces are discoverable
by calling the :func:`getAllTriggerBoxes()` function. To have your trigger box
interface discovered by PsychoPy, you need to create a Python module that
defines a class inheriting from :class:`BaseTriggerBox` and set its entry point
to ``psychopy.hardware.triggerbox`` in your plugin setup script or configuration
file.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'BaseTriggerBox',
'ParallelPortTrigger',
'getAllTriggerBoxes'
]
from psychopy.hardware.triggerbox.base import BaseTriggerBox
from psychopy.hardware.triggerbox.parallel import ParallelPortTrigger
def getAllTriggerBoxes():
"""Get all trigger box interface classes.
Returns
-------
dict
Mapping of trigger box classes.
"""
foundTriggerBoxes = {}
# todo: handle legacy names
# classes from this namespace
foundTriggerBoxes.update({
name: cls for name, cls in globals().items()
if isinstance(cls, type) and issubclass(cls, BaseTriggerBox)
and cls is not BaseTriggerBox}) # exclude `BaseTriggerBox`
return foundTriggerBoxes
if __name__ == "__main__":
pass
| 1,798
|
Python
|
.py
| 42
| 39.071429
| 81
| 0.759472
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,996
|
base.py
|
psychopy_psychopy/psychopy/hardware/triggerbox/base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Base classes for trigger box interfaces.
Trigger boxes are used to send electrical signals to external devices. They are
typically used to synchronize the presentation of stimuli with the recording of
physiological data. This module provides a common interface for accessing
trigger boxes from within PsychoPy.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = ['BaseTriggerBox']
from psychopy.hardware.base import BaseDevice
class BaseTriggerBox(BaseDevice):
"""Base class for trigger box interfaces.
This class defines the minimal interface for trigger box implementations.
All trigger box implementations should inherit from this class and override
its methods.
"""
_deviceName = u"" # name of the trigger box, shows up in menus
_deviceVendor = u"" # name of the manufacturer
def __init__(self, *args, **kwargs):
"""Initialize the trigger box interface.
"""
pass
@property
def deviceName(self):
"""Get the name of the trigger box.
Returns
-------
str
Name of the trigger box.
"""
return self._deviceName
@property
def deviceVendor(self):
"""Get the name of the manufacturer.
Returns
-------
str
Name of the manufacturer.
"""
return self._deviceVendor
def getCapabilities(self, **kwargs):
"""Get the capabilities of the trigger box.
The returned dictionary contains information about the capabilities of
the trigger box. The strutcture of the dictionary may vary between
trigger box implementations, so it is recommended to check if a key
exists before accessing it.
Returns
-------
dict
Capabilities of the trigger box. The names of the capabilities are
the keys of the dictionary. The values are information realted to
the specified capability.
Examples
--------
Check what the required baudrate of the device is:
useBaudrate = getCapabilities()['baudrate']
"""
return {}
def open(self, **kwargs):
"""Open a connection to the trigger box."""
pass
def close(self, **kwargs):
"""Close the trigger box."""
pass
@property
def isOpen(self):
"""Check if the trigger box connection is open.
Returns
-------
bool
True if the trigger box is open, False otherwise.
"""
return False
def __enter__(self):
"""Enter the context manager."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the context manager."""
self.close()
def setData(self, data, **kwargs):
"""Set the data to be sent.
Parameters
----------
data : int
Data to be sent.
"""
pass
def getData(self, **kwargs):
"""Get the data to be sent.
Returns
-------
int
Data to be sent.
"""
pass
def setPin(self, pin, value, **kwargs):
"""Set the value of a pin.
Parameters
----------
pin : int
Pin number.
value : int
Value to be set.
"""
pass
def getPin(self, pin, **kwargs):
"""Read the value of a pin.
Parameters
----------
pin : int
Pin number.
Returns
-------
int
Value of the pin.
"""
pass
def __del__(self):
"""Clean up the trigger box."""
pass
if __name__ == "__main__":
pass
| 3,929
|
Python
|
.py
| 125
| 23.4
| 79
| 0.584996
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,997
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/mouse/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for interfacing with pointing devices (i.e. mice).
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'Mouse',
'MOUSE_BUTTON_LEFT',
'MOUSE_BUTTON_MIDDLE',
'MOUSE_BUTTON_RIGHT'
]
import numpy as np
import psychopy.core as core
import psychopy.visual.window as window
from psychopy.tools.monitorunittools import pix2cm, pix2deg, cm2pix, deg2pix
from psychopy.tools.attributetools import AttributeGetSetMixin
# mouse button indices
MOUSE_BUTTON_LEFT = 0
MOUSE_BUTTON_MIDDLE = 1
MOUSE_BUTTON_RIGHT = 2
MOUSE_BUTTON_COUNT = 3
buttonNames = {
'left': MOUSE_BUTTON_LEFT,
'middle': MOUSE_BUTTON_MIDDLE,
'right': MOUSE_BUTTON_RIGHT
}
# mouse action events
MOUSE_EVENT_MOTION = 0
MOUSE_EVENT_BUTTON_PRESSED = 1
MOUSE_EVENT_BUTTON_RELEASED = 2
MOUSE_EVENT_SCROLLED = 3
MOUSE_EVENT_COUNT = 4
# offsets in storage arrays
MOUSE_POS_CURRENT = 0
MOUSE_POS_PREVIOUS = 1
class Mouse(AttributeGetSetMixin):
"""Class for using pointing devices (e.g., mice, trackballs, etc.) as input.
PsychoPy presently only supports one pointing device input at a time.
Multiple mice can be used but only one set of events will be registered at a
time as if they were all coming from the same mouse. Users should process
mouse events at least once per frame. Mouse window positions are stored
internally in 'pix' units. Coordinates are automatically converted to the
units used by the window the mouse is in.
You can create a `Mouse` instance at any time, however you are limited to
one per session. Calling `Mouse()` again will raise an error. You can
create the instance of this class at any time, even before any windows are
spawned. However, it is recommended that all windows are realized before
doing so.
Parameters
----------
win : :class:`~psychopy.visual.Window` or None
Initial window for the mouse. If `None`, the window created first will
be used automatically.
pos : ArrayLike or None
Initial position of the mouse cursor on the window `(x, y)` in window
`units`. If `None` or if `win` is `None, the position of the cursor will
not be set.
visible : bool
Show the mouse cursor. This applies to all windows created before
instantiating this class.
exclusive : bool
Enable exclusive mode for `win`, which makes the window take ownership
of the cursor. This should be used for fullscreen applications which
require mouse input but do not show the system cursor. When enabled, the
cursor will not be visible.
autoFocus : bool
Automatically update the `win` property to the window the cursor is
presently hovering over. If `False`, you must manually update which
window the cursor is in. This should be enabled if you plan on using
multiple windows.
Notes
-----
* This class must be instanced only by the user. It is forbidden to instance
`Mouse` outside of the scope of a user's scripts. This allows the user to
configure the mouse input system. Callbacks should only reference this
class through `Mouse.getInstance()`. If the returned value is `None`, then
the user has not instanced this class yet and events should not be
registered.
Examples
--------
Initialize the mouse, can be done at any point::
from psychopy.hardware.mouse import Mouse
mouse = Mouse()
Check if the left mouse button is being pressed down within a window::
pressed = mouse.leftPressed
Move around a stimulus when holding down the right mouse button (dragging)::
if mouse.isDragging and mouse.isRightPressed:
circle.pos = mouse.pos
Check if the `Mouse` has been initialized by the user using static method
`initialized()`::
mouseReady = Mouse.initialized()
You must get the instance of `Mouse` outside the scope of the user's script
by calling `getInstance()`. Only developers of PsychoPy or advanced users
working with mice need to concern themselves with this (see Notes)::
mouseInput = Mouse.getInstance()
if mouseInput is not None: # has been created by the user
mouseInput.win = win # set the window as an example
# never do this, only the user is allowed to!
mouseInput = Mouse()
"""
_instance = None # class instance (singleton)
_initialized = False # `True` after the user instances this class
# Sub-system used for obtaining mouse events. If 'win' the window backend is
# used for getting mouse click and motion events. If 'iohub', then iohub is
# used instead. In either case, mouse hovering events are always gathered by
# the window backend in use.
_device = None
_ioHubSrv = None
# internal clock used for timestamping all events
_clock = core.Clock()
# Window the cursor is presently in, set by the window 'on enter' type
# events.
_currentWindow = None
# Mouse button states as a length 3 boolean array. You can access the state
# for a given button using symbolic constants `MOUSE_BUTTON_*`.
_mouseButtons = np.zeros((MOUSE_BUTTON_COUNT,), dtype=bool)
# Times mouse buttons were pressed and released are stored in this array.
# The first row stores the release times and the last the pressed times. You
# can index the row by passing `int(pressed)` as a row index. Columns
# correspond to the buttons which can be indexed using symbolic constants
# `MOUSE_BUTTON_*`.
_mouseButtonsAbsTimes = np.zeros((2, MOUSE_BUTTON_COUNT), dtype=np.float32)
# Mouse motion timing, first item is when the mouse started moving, second
# is when it stopped.
_mouseMotionAbsTimes = np.zeros((2,), dtype=np.float32)
# Mouse positions during motion, press and scroll events are stored in this
# array. The first index is the event which the position is associated with.
_mousePos = np.zeros((MOUSE_EVENT_COUNT, 2, 2), dtype=np.float32)
# position the last mouse scroll event occurred
_mouseScrollPos = np.zeros((2,), dtype=np.float32)
# positions where mouse button events occurred
_mouseButtonPosPressed = np.zeros((MOUSE_BUTTON_COUNT, 2))
_mouseButtonPosReleased = np.zeros((MOUSE_BUTTON_COUNT, 2))
# velocity of the mouse cursor and direction vector
_mouseVelocity = 0.0
_mouseVector = np.zeros((2,), dtype=np.float32)
_velocityNeedsUpdate = True
# properties the user can set to configure the mouse
_visible = True
_exclusive = False
# have the window automatically be set when a cursor hovers over it
_autoFocus = True
# scaling factor for highdpi displays
_winScaleFactor = 1.0
def __init__(self, win=None, pos=(0, 0), visible=True, exclusive=False,
autoFocus=True):
# only setup if previously not instanced
if not self._initialized:
self.win = win
self.visible = visible
self.exclusive = exclusive
self.autoFocus = autoFocus
if self.win is not None:
self._winScaleFactor = self.win.getContentScaleFactor()
else:
self._winScaleFactor = 1.0 # default to 1.0
if self.win is not None and pos is not None:
self.setPos(pos)
else:
raise RuntimeError(
"Cannot create a new `psychopy.hardware.mouse.Mouse` instance. "
"Already initialized.")
self._initialized = True # we can now accept mouse events
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(Mouse, cls).__new__(cls)
return cls._instance
@classmethod
def getInstance(cls):
"""Get the (singleton) instance of the `Mouse` class.
Getting a reference to the `Mouse` class outside of the scope of the
user's scripts should be done through this class method.
Returns
-------
Mouse or None
Instance of the `Mouse` class created by the user.
Examples
--------
Determine if the user has previously instanced the `Mouse` class::
hasMouse = mouse.Mouse.getInstance() is not None
Getting an instance of the `Mouse` class::
mouseEventHandler = mouse.Mouse.getInstance()
if mouseEventHandler is None:
return
# do something like this only if instanced
mouseEventHandler.setMouseMotionState( ... )
"""
return cls._instance
@classmethod
def initialized(cls):
"""Check if the mouse interface has been initialized by the user.
Returns
-------
bool
`True` if the `Mouse` class is ready and can accept mouse events.
Examples
--------
Checking if we can pass the mouse state to the `Mouse` instance::
from psychopy.hardware.mouse import Mouse
if Mouse.initialized():
# do something like this only if instanced
Mouse.getInstance().setMouseMotionState( ... )
"""
return cls._initialized
def setMouseMotionState(self, pos, absTime=None):
"""Set the mouse motion state.
This method is called by callback functions bound to mouse motion events
emitted by the mouse driver interface. However, the user may call this
to simulate a mouse motion event.
Parameters
----------
pos : ArrayLike
Position of the mouse (x, y).
absTime : float or None
Absolute time `pos` was obtained. If `None` a timestamp will be
created using the default clock.
"""
if absTime is None:
absTime = self._clock.getTime()
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_PREVIOUS, :] = \
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :]
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :] = pos
self._mouseMotionAbsTimes[MOUSE_POS_PREVIOUS] = \
self._mouseMotionAbsTimes[MOUSE_POS_CURRENT]
self._mouseMotionAbsTimes[MOUSE_POS_CURRENT] = absTime
self._velocityNeedsUpdate = True
def setMouseButtonState(self, button, pressed, pos=(0, 0), absTime=None):
"""Set a mouse button state.
This method is called by callback functions bound to mouse press events
emitted by the mouse driver interface. However, the user may call this
to simulate a mouse click events.
Parameters
----------
button : ArrayLike
Mouse button whose state is being updated. Can be one of the
following symbolic constants `MOUSE_BUTTON_LEFT`,
`MOUSE_BUTTON_MIDDLE` or `MOUSE_BUTTON_RIGHT`.
pressed : bool
`True` if the button is presently being pressed down, otherwise
`False` if released.
pos : ArrayLike
Position `(x, y)` the mouse event occurred in 'pix' units.
absTime : float or None
Absolute time the values of `buttons` was obtained. If `None` a
timestamp will be created using the default clock.
"""
if absTime is None:
absTime = self._clock.getTime()
# set the value of the button states
self._mouseButtons[button] = bool(pressed)
# set position
if pressed:
self._mouseButtonPosPressed[button] = pos
else:
self._mouseButtonPosReleased[button] = pos
# update the timing info
self._mouseButtonsAbsTimes[int(pressed), button] = absTime
def setMouseScrollState(self, pos=(0, 0), offset=(0, 0), absTime=None):
"""Set the scroll wheel state.
This method is called by callback functions bound to mouse scroll events
emitted by the mouse driver interface. However, the user may call this
to simulate a mouse scroll events.
Parameters
----------
pos : ArrayLike
Position `(x, y)` of the cursor the scroll event was registered.
offset : ArrayLike
Vertical and horizontal offset of the scroll wheel.
absTime : ArrayLike
Absolute time in seconds the event was registered. If `None`, a
timestamp will be generated automatically.
"""
# todo - figure out how to handle this better
if absTime is None:
absTime = self._clock.getTime()
self._mouseScrollPos[:] = pos
def _pixToWindowUnits(self, pos):
"""Conversion from 'pix' units to window units.
The mouse class stores mouse positions in 'pix' units. This function is
used by getter and setter methods to convert position values to the
units specified by the window.
Parameters
----------
pos : ArrayLike
Position `(x, y)` in 'pix' coordinates to convert.
Returns
-------
ndarray
Position `(x, y)` in window units.
"""
pos = np.asarray(pos, dtype=np.float32)
if self.win is None:
return pos
if self.win.units == 'pix':
if self.win.useRetina:
pos /= 2.0
else:
pos /= self._winScaleFactor
return pos
elif self.win.units == 'norm':
return pos * 2.0 / self.win.size
elif self.win.units == 'cm':
return pix2cm(pos, self.win.monitor)
elif self.win.units == 'deg':
return pix2deg(pos, self.win.monitor)
elif self.win.units == 'height':
return pos / float(self.win.size[1])
def _windowUnitsToPix(self, pos):
"""Convert user specified window units to 'pix'. This method is the
inverse of `_pixToWindowUnits`.
Parameters
----------
pos : ArrayLike
Position `(x, y)` in 'pix' coordinates to convert.
Returns
-------
ndarray
Position `(x, y)` in window units.
"""
pos = np.asarray(pos, dtype=np.float32)
if self.win is None:
return pos
if self.win.units == 'pix':
if self.win.useRetina:
pos *= 2.0
else:
pos *= self._winScaleFactor
return pos
elif self.win.units == 'norm':
return pos * self.win.size / 2.0
elif self.win.units == 'cm':
return cm2pix(pos, self.win.monitor)
elif self.win.units == 'deg':
return deg2pix(pos, self.win.monitor)
elif self.win.units == 'height':
return pos * float(self.win.size[1])
@property
def win(self):
"""Window the cursor is presently hovering over
(:class:`~psychopy.visual.Window` or `None`). This is usually set by the
window backend. This value cannot be updated when ``exclusive=True``.
"""
return self._currentWindow
@win.setter
def win(self, value):
if not self._exclusive:
self._currentWindow = value
@property
def units(self):
"""The units for this mouse (`str`). Will match the current units for
the Window it lives in. To change the units of the mouse, you must
change the window units.
"""
return self.win.units
@property
def visible(self):
"""Mouse visibility state (`bool`)."""
return self.getVisible()
@visible.setter
def visible(self, value):
self.setVisible(value)
def getVisible(self):
"""Get the visibility state of the mouse.
Returns
-------
bool
`True` if the pointer is set to be visible on-screen.
"""
return self._visible
def setVisible(self, visible=True):
"""Set the visibility state of the mouse.
Parameters
----------
visible : bool
Mouse visibility state to set. `True` will result in the mouse
cursor being draw when over the window. If `False`, the cursor will
be invisible. Regardless of the visibility state the mouse pointer
can still be moved in response to the user's inputs and changes in
position are still registered.
"""
if not window.openWindows:
return
for ref in window.openWindows:
win = ref() # resolve weak ref
if hasattr(win.backend, 'setMouseVisibility'):
win.backend.setMouseVisibility(visible)
self._visible = visible
@property
def exclusive(self):
"""Make the current window (at property `win`) exclusive (`bool`).
"""
return self.getExclusive()
@exclusive.setter
def exclusive(self, value):
self.setExclusive(value)
def getExclusive(self):
"""Get if the window is exclusive.
Returns
-------
bool
Window the mouse is exclusive to. If `None`, the mouse is not
exclusive to any window.
"""
return self._exclusive
def setExclusive(self, exclusive):
"""Set the current window (at property `win`) to exclusive. When a
window is in exclusive/raw mode all mouse events are captured by the
window.
Parameters
----------
exclusive : bool
If `True`, the window will be set to exclusive/raw mode.
"""
if self.win is not None:
self._exclusive = exclusive
self.win.backend.setMouseExclusive(self._exclusive)
else:
self._exclusive = False
@property
def autoFocus(self):
"""Automatically update `win` to that which the cursor is hovering over
(`bool`).
"""
return self.getAutoFocus()
@autoFocus.setter
def autoFocus(self, value):
self.setAutoFocus(value)
def getAutoFocus(self):
"""Get if auto focus is enabled.
Returns
-------
bool
`True` if auto focus is enabled.
"""
return self._autoFocus
def setAutoFocus(self, autoFocus):
"""Set cursor auto focus. If enabled, `win` will be automatically
to the window the cursor is presently hovering over.
Parameters
----------
autoFocus : bool
If `True`, auto focus will be enabled.
"""
self._autoFocus = autoFocus
@property
def buttons(self):
"""Global mouse buttons states (`ndarray`).
Is an array of three values corresponding to the left (0), middle (1),
and right (2) mouse buttons. You may use either of the symbolic
constants `MOUSE_BUTTON_LEFT`, `MOUSE_BUTTON_MIDDLE` or
`MOUSE_BUTTON_RIGHT` for indexing this array.
Examples
--------
Get the left mouse button state::
isPressed = mouseEventHandler.buttons[MOUSE_BUTTON_LEFT]
"""
return self._mouseButtons
@buttons.setter
def buttons(self, value):
assert len(value) == 3
self._mouseButtons[:] = value
def getButton(self, button):
"""Get button state.
Parameters
----------
button : str or int
Button name as a string or symbolic constant representing the button
(e.g., `MOUSE_BUTTON_LEFT`). Valid button names are 'left', 'middle'
and 'right'.
Returns
-------
tuple
Button state if pressed (`bool`) and absolute time the state of the
button last changed (`float`).
"""
if isinstance(button, str):
button = buttonNames[button]
state = self.buttons[button]
absTime = self._mouseButtonsAbsTimes[button]
return state, absTime
@property
def pos(self):
"""Current mouse position (x, y) on window (`ndarray`).
"""
return self._pixToWindowUnits(
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :])
@pos.setter
def pos(self, value):
"""Current mouse position (x, y) on window (`ndarray`).
"""
assert len(value) == 2
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :] = \
self._windowUnitsToPix(value)
if self.win is not None:
self.win.backend.setMousePos(
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :])
self._velocityNeedsUpdate = True
def getPos(self):
"""Get the current position of the mouse pointer.
Returns
-------
ndarray
Mouse position (x, y) in window units. Is an independent copy of
the property `Mouse.pos`.
"""
return self.pos.copy() # returns a copy
def setPos(self, pos=(0, 0)):
"""Set the current position of the mouse pointer. Uses the same units as
window at `win`.
Parameters
----------
pos : ArrayLike
Position (x, y) for the mouse in window units.
"""
self.pos = pos
@property
def prevPos(self):
"""Previously reported mouse position (x, y) on window (`ndarray`).
"""
return self._pixToWindowUnits(
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_PREVIOUS, :])
@prevPos.setter
def prevPos(self, value):
assert len(value) == 2
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_PREVIOUS, :] = \
self._windowUnitsToPix(value)
def getPrevPos(self):
"""Get the previous position of the mouse pointer.
Returns
-------
ndarray
Mouse position (x, y) in window units. Is an independent copy of
the property `Mouse.prevPos`.
"""
return self.prevPos.copy() # returns a copy
@property
def relPos(self):
"""Relative change in position of the mouse between motion events
(`ndarray`).
"""
return self.getRelPos()
def getRelPos(self):
"""Get the relative change in position of the mouse.
Returns
-------
ndarray
Vector specifying the relative horizontal and vertical change in
cursor position between motion events (`x`, `y`). Normalizing this
vector will give the direction vector.
"""
relPos = self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :] - \
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_PREVIOUS, :]
return self._pixToWindowUnits(relPos)
def getDistance(self):
"""Get the distance in window units the mouse moved.
Returns
-------
float
Distance in window units.
"""
distPix = np.sqrt(np.sum(np.square(self.getRelPos()), dtype=np.float32))
return self._pixToWindowUnits(distPix)
@property
def leftButtonPressed(self):
"""Is the left mouse button being pressed (`bool`)?"""
return self._mouseButtons[MOUSE_BUTTON_LEFT]
@property
def middleButtonPressed(self):
"""Is the middle mouse button being pressed (`bool`)?"""
return self._mouseButtons[MOUSE_BUTTON_MIDDLE]
@property
def rightButtonPressed(self):
"""Is the right mouse button being pressed (`bool`)?"""
return self._mouseButtons[MOUSE_BUTTON_RIGHT]
@property
def pressedTimes(self):
"""Absolute time in seconds each mouse button was last pressed
(`ndarray`).
"""
return self._mouseButtonsAbsTimes[1, :]
@property
def pressedPos(self):
"""Positions buttons were last pressed (`ndarray`).
"""
return self._pixToWindowUnits(self._mouseButtonPosPressed)
@property
def releasedTimes(self):
"""Absolute time in seconds each mouse button was last released
(`ndarray`).
"""
return self._mouseButtonsAbsTimes[0, :]
@property
def releasedPos(self):
"""Positions buttons were last released (`ndarray`).
"""
return self._pixToWindowUnits(self._mouseButtonPosReleased)
@property
def pressedDuration(self):
"""Time elapsed between press and release events for each button."""
return None
@property
def isMoving(self):
"""`True` if the mouse is presently moving (`bool`)."""
return not np.allclose(
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_PREVIOUS, :],
self._mousePos[MOUSE_EVENT_MOTION, MOUSE_POS_CURRENT, :])
@property
def isDragging(self):
"""`True` if a mouse button is being held down and is moving (`bool`).
"""
return np.any(self._mouseButtons) and self.isMoving
@property
def isHovering(self):
"""`True` if the mouse if hovering over a content window (`bool`)."""
return self.win is not None
@property
def motionAbsTime(self):
"""Absolute time in seconds the most recent mouse motion was polled
(`float`). Setting this will automatically update the previous motion
timestamp.
"""
return self._mouseMotionAbsTimes[MOUSE_POS_CURRENT]
@motionAbsTime.setter
def motionAbsTime(self, value):
self._mouseMotionAbsTimes[MOUSE_POS_CURRENT] = value
self._velocityNeedsUpdate = True
@property
def velocity(self):
"""The velocity of the mouse cursor on-screen in window units (`float`).
The velocity is calculated as the relative change in position of the
mouse cursor between motion events divided by the time elapsed between
the events.
Returns
-------
float
Velocity of the mouse cursor in window units per second.
"""
if self._velocityNeedsUpdate:
tdelta = self.motionAbsTime - \
self._mouseMotionAbsTimes[MOUSE_POS_PREVIOUS]
if tdelta > 0.0:
self._mouseVelocity = self.getRelPos() / tdelta
else:
self._mouseVelocity = 0.0
self._velocityNeedsUpdate = False
return self._pixToWindowUnits(self._mouseVelocity)
def setCursorStyle(self, cursorType='default'):
"""Change the appearance of the cursor for all windows.
Cursor types provide contextual hints about how to interact with
on-screen objects. The graphics used 'standard cursors' provided by the
operating system. They may vary in appearance and hot spot location
across platforms. The following names are valid on most platforms and
backends:
* ``'arrow`` or ``default``: Default system pointer.
* ``ibeam`` or ``text``: Indicates text can be edited.
* ``crosshair``: Crosshair with hot-spot at center.
* ``hand``: A pointing hand.
* ``hresize``: Double arrows pointing horizontally.
* ``vresize``: Double arrows pointing vertically.
These cursors are only supported when using the Pyglet window type
(``winType='pyglet'``):
* ``help``: Arrow with a question mark beside it (Windows only).
* ``no``: 'No entry' sign or circle with diagonal bar.
* ``size``: Vertical and horizontal sizing.
* ``downleft`` or ``upright``: Double arrows pointing diagonally with
positive slope (Windows only).
* ``downright`` or ``upleft``: Double arrows pointing diagonally with
negative slope (Windows only).
* ``lresize``: Arrow pointing left (Mac OS X only).
* ``rresize``: Arrow pointing right (Mac OS X only).
* ``uresize``: Arrow pointing up (Mac OS X only).
* ``dresize``: Arrow pointing down (Mac OS X only).
* ``wait``: Hourglass (Windows) or watch (Mac OS X) to indicate the
system is busy.
* ``waitarrow``: Hourglass beside a default pointer (Windows only).
In cases where a cursor is not supported on the platform, the default
for the system will be used.
Parameters
----------
cursorType : str
Type of standard cursor to use. If not specified, `'default'` is
used.
Notes
-----
* On some platforms the 'crosshair' cursor may not be visible on uniform
grey backgrounds.
"""
if not window.openWindows:
return
for ref in window.openWindows:
win = ref() # resolve weak ref
if hasattr(win.backend, 'setMouseCursor'):
win.backend.setMouseCursor(cursorType)
if __name__ == "__main__":
pass
| 29,029
|
Python
|
.py
| 697
| 32.837877
| 80
| 0.627238
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,998
|
bits.py
|
psychopy_psychopy/psychopy/hardware/crs/bits.py
|
from psychopy.tools.pkgtools import PluginStub
class BitsSharp(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/bits/#psychopy_crs.bits.BitsSharp"
):
pass
class BitsPlusPlus(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/bits/#psychopy_crs.bits.BitsPlusPlus"
):
pass
class DisplayPlusPlus(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/bits/#psychopy_crs.bits.DisplayPlusPlus"
):
pass
class DisplayPlusPlusTouch(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/bits/#psychopy_crs.bits.DisplayPlusPlusTouch"
):
pass
| 766
|
Python
|
.py
| 25
| 26.48
| 104
| 0.747956
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,999
|
shaders.py
|
psychopy_psychopy/psychopy/hardware/crs/shaders.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# Acknowledgements:
# Written by Jon Peirce
#
# Based on shader code for mono++ and color++ modes code in Psythtoolbox
# (Mario Kleiner) but does not use that code directly
# It is, for example, Mario's idea to add the 0.01 to avoid rounding issues
from psychopy.tools.pkgtools import PluginStub
class bitsMonoModeFrag(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/shaders"
):
pass
class bitsColorModeFrag(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/shaders"
):
pass
| 843
|
Python
|
.py
| 24
| 32.291667
| 79
| 0.737361
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|