id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,300
|
genPOI.py
|
overviewer_Minecraft-Overviewer/overviewer_core/aux_files/genPOI.py
|
#!/usr/bin/env python3
'''
genPOI.py
Scans regionsets for TileEntities and Entities, filters them, and writes out
POI/marker info.
A markerSet is list of POIs to display on a tileset. It has a display name,
and a group name.
markersDB.js holds a list of POIs in each group
markers.js holds a list of which markerSets are attached to each tileSet
'''
import datetime
import gzip
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import time
import urllib.request
import urllib.error
from collections import defaultdict
from contextlib import closing
from multiprocessing import Pool
from argparse import ArgumentParser
from overviewer_core import config_parser, logger, nbt, world
from overviewer_core.files import FileReplacer, get_fs_caps
UUID_LOOKUP_URL = 'https://sessionserver.mojang.com/session/minecraft/profile/'
DIMENSION_INT_TO_STR = {
0: "minecraft:overworld",
-1: "minecraft:the_nether",
1: "minecraft:the_end"
}
def replaceBads(s):
"Replaces bad characters with good characters!"
bads = [" ", "(", ")"]
x = s
for bad in bads:
x = x.replace(bad, "_")
return x
# If you want to keep your stomach contents do not, under any circumstance,
# read the body of the following function. You have been warned.
# All of this could be replaced by a simple json.loads if Mojang had
# introduced a TAG_JSON, but they didn't.
#
# So here are a few curiosities how 1.7 signs get seen in 1.8 in Minecraft:
# - null ->
# - "null" -> null
# - ["Hello"] -> Hello
# - [Hello] -> Hello
# - [1,2,3] -> 123
# Mojang just broke signs for everyone who ever used [, { and ". GG.
def jsonText(s):
if s is None or s == "null":
return ""
if ((s.startswith('"') and s.endswith('"')) or
(s.startswith('{') and s.endswith('}'))):
try:
js = json.loads(s)
except ValueError:
return s
def parseLevel(foo):
bar = ""
if isinstance(foo, list):
for extra in foo:
bar += parseLevel(extra)
elif isinstance(foo, dict):
if "text" in foo:
bar += foo["text"]
if "extra" in foo:
bar += parseLevel(foo["extra"])
elif isinstance(foo, str):
bar = foo
return bar
return parseLevel(js)
else:
return s
# Since functions are not pickleable, we send their names instead.
# Here, set up worker processes to have a name -> function map
bucketChunkFuncs = {}
def initBucketChunks(config_path):
global bucketChunkFuncs
mw_parser = config_parser.MultiWorldParser()
mw_parser.parse(config_path)
# ought not to fail since we already did it once
config = mw_parser.get_validated_config()
for name, render in config['renders'].items():
for f in render['markers']:
ff = f['filterFunction']
bucketChunkFuncs[ff.__name__] = ff
# yes there's a double parenthesis here
# see below for when this is called, and why we do this
# a smarter way would be functools.partial, but that's broken on python 2.6
# when used with multiprocessing
# TODO: Do the smarter way
def parseBucketChunks(task_tuple):
global bucketChunkFuncs
bucket, rset, filters = task_tuple
pid = multiprocessing.current_process().pid
markers = defaultdict(list)
i = 0
cnt = 0
for b in bucket:
try:
data = rset.get_chunk(b[0], b[1])
for poi in itertools.chain(data.get('TileEntities', []), data.get('Entities', []), data.get('block_entities', [])):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign':
poi = signWrangler(poi)
for name, filter_function in filters:
ff = bucketChunkFuncs[filter_function]
result = ff(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d.", b[0], b[1])
except world.ChunkDoesntExist:
pass
# Perhaps only on verbose ?
i = i + 1
if i == 250:
i = 0
cnt = 250 + cnt
logging.debug("Found %d markers in thread %d so far at %d chunks.",
sum(len(v) for v in markers.values()), pid, cnt)
return markers
def signWrangler(poi):
"""
Just does the JSON things for signs
"""
for field in ["Text1", "Text2", "Text3", "Text4"]:
poi[field] = jsonText(poi[field])
return poi
def handleEntities(rset, config, config_path, filters, markers):
"""
Add markers for Entities or TileEntities.
For this every chunk of the regionset is parsed and filtered using multiple
processes, if so configured.
This function will not return anything, but it will update the parameter
`markers`.
"""
logging.info("Looking for entities in %r...", rset)
numbuckets = config['processes']
if numbuckets < 0:
numbuckets = multiprocessing.cpu_count()
if numbuckets == 1:
for (x, z, mtime) in rset.iterate_chunks():
try:
data = rset.get_chunk(x, z)
for poi in itertools.chain(data.get('TileEntities', []), data.get('Entities', []), data.get('block_entities', [])):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign': # kill me
poi = signWrangler(poi)
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d.", x, z)
except world.ChunkDoesntExist:
# iterate_chunks() doesn't inspect chunks and filter out
# placeholder ones. It's okay for this chunk to not exist.
pass
else:
buckets = [[] for i in range(numbuckets)]
for (x, z, mtime) in rset.iterate_chunks():
i = x // 32 + z // 32
i = i % numbuckets
buckets[i].append([x, z])
for b in buckets:
logging.info("Buckets has %d entries.", len(b))
# Create a pool of processes and run all the functions
pool = Pool(processes=numbuckets, initializer=initBucketChunks, initargs=(config_path,))
# simplify the filters dict, so pickle doesn't have to do so much
filters = [(name, filter_function.__name__) for name, __, filter_function, __, __, __
in filters]
results = pool.map(parseBucketChunks, ((buck, rset, filters) for buck in buckets))
pool.close()
pool.join()
logging.info("All the threads completed.")
for marker_dict in results:
for name, marker_list in marker_dict.items():
markers[name]['raw'].extend(marker_list)
logging.info("Done.")
class PlayerDict(dict):
use_uuid = False
_name = ''
uuid_cache = None # A cache for the UUID->profile lookups
@classmethod
def load_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
if os.path.exists(cache_file):
try:
with closing(gzip.GzipFile(cache_file)) as gz:
cls.uuid_cache = json.loads(gz.read().decode("utf-8"))
logging.info("Loaded UUID cache from %r with %d entries.",
cache_file, len(cls.uuid_cache.keys()))
except (ValueError, IOError, EOFError):
logging.warning("Failed to load UUID cache -- it might be corrupt.")
cls.uuid_cache = {}
corrupted_cache = cache_file + ".corrupted." + datetime.datetime.now().isoformat()
try:
os.rename(cache_file, corrupted_cache)
logging.warning("If %s does not appear to contain meaningful data, you may "
"safely delete it.", corrupted_cache)
except OSError:
logging.warning("Failed to backup corrupted UUID cache.")
logging.info("Initialized an empty UUID cache.")
else:
cls.uuid_cache = {}
logging.info("Initialized an empty UUID cache.")
@classmethod
def save_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
caps = get_fs_caps(outputdir)
with FileReplacer(cache_file, caps) as cache_file_name:
with closing(gzip.GzipFile(cache_file_name, "wb")) as gz:
gz.write(json.dumps(cls.uuid_cache).encode())
logging.info("Wrote UUID cache with %d entries.",
len(cls.uuid_cache.keys()))
def __getitem__(self, item):
if item == "EntityId":
if "EntityId" not in self:
if self.use_uuid:
super(PlayerDict, self).__setitem__("EntityId", self.get_name_from_uuid())
else:
super(PlayerDict, self).__setitem__("EntityId", self._name)
return super(PlayerDict, self).__getitem__(item)
def get_name_from_uuid(self):
sname = self._name.replace('-', '')
try:
profile = PlayerDict.uuid_cache[sname]
if profile['retrievedAt'] > time.mktime(self['time']):
return profile['name']
except (KeyError,):
pass
try:
profile = json.loads(
urllib.request.urlopen(UUID_LOOKUP_URL + sname).read().decode("utf-8")
)
if 'name' in profile:
profile['retrievedAt'] = time.mktime(time.localtime())
PlayerDict.uuid_cache[sname] = profile
return profile['name']
except (ValueError, urllib.error.URLError):
logging.warning("Unable to get player name for UUID %s.", self._name)
def handlePlayers(worldpath, filters, markers):
"""
Add markers for players to the list of markers.
For this the player files under the given `worldpath` are parsed and
filtered.
This function will not return anything, but it will update the parameter
`markers`.
"""
playerdir = os.path.join(worldpath, "playerdata")
useUUIDs = True
if not os.path.isdir(playerdir):
playerdir = os.path.join(worldpath, "players")
useUUIDs = False
if os.path.isdir(playerdir):
playerfiles = os.listdir(playerdir)
playerfiles = [x for x in playerfiles if x.endswith(".dat")]
isSinglePlayer = False
else:
playerfiles = [os.path.join(worldpath, "level.dat")]
isSinglePlayer = True
for playerfile in playerfiles:
try:
data = PlayerDict(nbt.load(os.path.join(playerdir, playerfile))[1])
data.use_uuid = useUUIDs
if isSinglePlayer:
data = data['Data']['Player']
except (IOError, TypeError, KeyError, nbt.CorruptNBTError):
logging.warning("Skipping bad player dat file %r.", playerfile)
continue
playername = playerfile.split(".")[0]
if isSinglePlayer:
playername = 'Player'
data._name = playername
if useUUIDs:
data['uuid'] = playername
# Position at last logout
data['id'] = "Player"
data['x'] = int(data['Pos'][0])
data['y'] = int(data['Pos'][1])
data['z'] = int(data['Pos'][2])
# Time at last logout, calculated from last time the player's file was modified
data['time'] = time.localtime(os.path.getmtime(os.path.join(playerdir, playerfile)))
# Spawn position (bed or main spawn)
if "SpawnX" in data:
# Spawn position (bed or main spawn)
spawn = PlayerDict()
spawn.use_uuid = useUUIDs
spawn._name = playername
spawn["id"] = "PlayerSpawn"
spawn["x"] = data['SpawnX']
spawn["y"] = data['SpawnY']
spawn["z"] = data['SpawnZ']
for name, __, filter_function, rset, __, __ in filters:
# get the dimension for the filter
# This has do be done every time, because we have filters for
# different regionsets.
if rset.get_type():
dimension = int(re.match(r"^DIM(_MYST)?(-?\d+)$", rset.get_type()).group(2))
else:
dimension = 0
dimension = DIMENSION_INT_TO_STR.get(dimension, "minecraft:overworld")
read_dim = data.get("Dimension", "minecraft:overworld")
if type(read_dim) == int:
read_dim = DIMENSION_INT_TO_STR.get(read_dim, "minecraft:overworld")
if read_dim == dimension:
result = filter_function(data)
if result:
d = create_marker_from_filter_result(data, result)
markers[name]['raw'].append(d)
if dimension == "minecraft:overworld" and "SpawnX" in data:
result = filter_function(spawn)
if result:
d = create_marker_from_filter_result(spawn, result)
markers[name]['raw'].append(d)
def handleManual(manualpois, filters, markers):
"""
Add markers for manually defined POIs to the list of markers.
This function will not return anything, but it will update the parameter
`markers`.
"""
for poi in manualpois:
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
def create_marker_from_filter_result(poi, result):
"""
Takes a POI and the return value of a filter function for it and creates a
marker dict depending on the type of the returned value.
"""
# every marker has a position either directly via attributes x, y, z or
# via tuple attribute Pos
if 'Pos' in poi:
d = dict((v, poi['Pos'][i]) for i, v in enumerate('xyz'))
else:
d = dict((v, poi[v]) for v in 'xyz')
# read some Defaults from POI
if "icon" in poi:
d["icon"] = poi['icon']
if "createInfoWindow" in poi:
d["createInfoWindow"] = poi['createInfoWindow']
# Fill in the rest from result
if isinstance(result, str):
d.update(dict(text=result, hovertext=result))
elif isinstance(result, tuple):
d.update(dict(text=result[1], hovertext=result[0]))
# Dict support to allow more flexible things in the future as well as polylines on the map.
elif isinstance(result, dict):
if 'text' in result:
d['text'] = result['text']
# Use custom hovertext if provided...
if 'hovertext' in result:
d['hovertext'] = str(result['hovertext'])
else: # ...otherwise default to display text.
d['hovertext'] = result.get('text', '')
if "icon" in result:
d["icon"] = result['icon']
if "createInfoWindow" in result:
d["createInfoWindow"] = result['createInfoWindow']
# Polylines and polygons
if (('polyline' in result and hasattr(result['polyline'], '__iter__')) or
'polygon' in result and hasattr(result['polygon'], '__iter__')):
# If the points form a line or closed shape
d['isLine'] = 'polyline' in result
# Collect points
d['points'] = []
for point in (result['polyline'] if d['isLine'] else result['polygon']):
d['points'].append(dict(x=point['x'], y=point['y'], z=point['z']))
# Options and default values
if 'color' in result:
d['strokeColor'] = result['color']
else:
d['strokeColor'] = 'red'
if 'fill' in result:
d['fill'] = result['fill']
else:
d['fill'] = not d['isLine'] # fill polygons by default
if 'weight' in result:
d['strokeWeight'] = result['weight']
else:
d['strokeWeight'] = 2
else:
raise ValueError("Got an %s as result for POI with id %s"
% (type(result).__name__, poi['id']))
return d
def main():
if os.path.basename(sys.argv[0]) == "genPOI.py":
prog_name = "genPOI.py"
else:
prog_name = sys.argv[0] + " --genpoi"
logger.configure()
parser = ArgumentParser(prog=prog_name)
parser.add_argument("-c", "--config", dest="config", action="store", required=True,
help="Specify the config file to use.")
parser.add_argument("-p", "--processes", dest="procs", action="store", type=int,
help="The number of local worker processes to spawn. Defaults to the "
"number of CPU cores your computer has.")
parser.add_argument("-q", "--quiet", dest="quiet", action="count",
help="Reduce logging output")
parser.add_argument("--skip-scan", dest="skipscan", action="store_true",
help="Skip scanning for entities when using GenPOI")
parser.add_argument("--skip-players", dest="skipplayers", action="store_true",
help="Skip getting player data when using GenPOI")
args = parser.parse_args()
if args.quiet and args.quiet > 0:
logger.configure(logging.WARN, False)
# Parse the config file
mw_parser = config_parser.MultiWorldParser()
try:
mw_parser.parse(args.config)
except config_parser.MissingConfigException:
parser.error("The configuration file '{}' does not exist.".format(args.config))
if args.procs:
mw_parser.set_config_item("processes", args.procs)
try:
config = mw_parser.get_validated_config()
except Exception:
logging.exception("An error was encountered with your configuration. See the info below.")
return 1
destdir = config['outputdir']
# saves us from creating the same World object over and over again
worldcache = {}
filters = set()
marker_groups = defaultdict(list)
logging.info("Searching renders: %s", list(config['renders']))
# collect all filters and get regionsets
for rname, render in config['renders'].items():
# Convert render['world'] to the world path, and store the original
# in render['worldname_orig']
try:
worldpath = config['worlds'][render['world']]
except KeyError:
logging.error("Render %s's world is '%s', but I could not find a corresponding entry "
"in the worlds dictionary.", rname, render['world'])
return 1
render['worldname_orig'] = render['world']
render['world'] = worldpath
# find or create the world object
if (render['world'] not in worldcache):
w = world.World(render['world'])
worldcache[render['world']] = w
else:
w = worldcache[render['world']]
# get the regionset for this dimension
rset = w.get_regionset(render['dimension'][1])
if rset is None: # indicates no such dimension was found:
logging.warning("Sorry, you requested dimension '%s' for the render '%s', but I "
"couldn't find it.", render['dimension'][0], rname)
continue
# List of regionsets that should be handled
rsets = []
if "crop" in render:
for zone in render['crop']:
rsets.append(world.CroppedRegionSet(rset, *zone))
else:
rsets.append(rset)
# find filters for this render
for f in render['markers']:
# internal identifier for this filter
name = (replaceBads(f['name']) + hex(hash(f['filterFunction']))[-4:] + "_"
+ hex(hash(rname))[-4:])
# add it to the list of filters
for rset in rsets:
filters.add((name, f['name'], f['filterFunction'], rset, worldpath, rname))
# add an entry in the menu to show markers found by this filter
group = dict(
groupName=name,
displayName=f['name'],
icon=f.get('icon', 'signpost_icon.png'),
createInfoWindow=f.get('createInfoWindow', True),
checked=f.get('checked', False),
showIconInLegend=f.get('showIconInLegend', False))
marker_groups[rname].append(group)
# initialize the structure for the markers
markers = dict((name, dict(created=False, raw=[], name=filter_name))
for name, filter_name, __, __, __, __ in filters)
all_rsets = set(map(lambda f: f[3], filters))
logging.info("Will search %s region sets using %s filters", len(all_rsets), len(filters))
# apply filters to regionsets
if not args.skipscan:
for rset in all_rsets:
rset_filters = list(filter(lambda f: f[3] == rset, filters))
logging.info("Calling handleEntities for %s with %s filters", rset, len(rset_filters))
handleEntities(rset, config, args.config, rset_filters, markers)
# apply filters to players
if not args.skipplayers:
PlayerDict.load_cache(destdir)
# group filters by worldpath, so we only search for players once per
# world
def keyfunc(x):
return x[4]
sfilters = sorted(filters, key=keyfunc)
for worldpath, worldpath_filters in itertools.groupby(sfilters, keyfunc):
handlePlayers(worldpath, list(worldpath_filters), markers)
# add manual POIs
# group filters by name of the render, because only filter functions for
# the current render should be used on the current render's manualpois
def keyfunc(x):
return x[5]
sfilters = sorted(filters, key=keyfunc)
for rname, rname_filters in itertools.groupby(sfilters, keyfunc):
manualpois = config['renders'][rname]['manualpois']
handleManual(manualpois, list(rname_filters), markers)
logging.info("Done handling POIs")
logging.info("Writing out javascript files")
if not args.skipplayers:
PlayerDict.save_cache(destdir)
with open(os.path.join(destdir, "markersDB.js"), "w") as output:
output.write("var markersDB=")
json.dump(markers, output, sort_keys=True, indent=2)
output.write(";\n")
with open(os.path.join(destdir, "markers.js"), "w") as output:
output.write("var markers=")
json.dump(marker_groups, output, sort_keys=True, indent=2)
output.write(";\n")
with open(os.path.join(destdir, "baseMarkers.js"), "w") as output:
output.write("overviewer.util.injectMarkerScript('markersDB.js');\n")
output.write("overviewer.util.injectMarkerScript('markers.js');\n")
output.write("overviewer.util.injectMarkerScript('regions.js');\n")
output.write("overviewer.collections.haveSigns=true;\n")
logging.info("Done")
if __name__ == "__main__":
main()
| 23,684
|
Python
|
.py
| 531
| 34.581921
| 131
| 0.59272
|
overviewer/Minecraft-Overviewer
| 3,348
| 480
| 347
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,301
|
cube_stack16.svg
|
overviewer_Minecraft-Overviewer/docs/design/cuberenderimgs/cube_stack16.svg
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="436.54492"
height="484.69528"
id="svg2"
version="1.1"
inkscape:version="0.48.1 r9760"
sodipodi:docname="cube_stack16.svg"
inkscape:export-filename="/home/andrew/mc/overviewer/anvil/docs/design/cuberenderimgs/cube_stack16.png"
inkscape:export-xdpi="90.016075"
inkscape:export-ydpi="90.016075">
<defs
id="defs4" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="2.0000001"
inkscape:cx="188.74041"
inkscape:cy="142.85522"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="true"
showguides="true"
inkscape:snap-global="true"
inkscape:window-width="1920"
inkscape:window-height="1031"
inkscape:window-x="0"
inkscape:window-y="0"
inkscape:window-maximized="1"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
showborder="false"
inkscape:guide-bbox="true">
<inkscape:grid
type="xygrid"
id="grid3755"
empspacing="5"
visible="true"
enabled="true"
snapvisiblegridlinesonly="true" />
</sodipodi:namedview>
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(144.30527,-229.28993)">
<path
sodipodi:type="arc"
style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
id="path8485"
sodipodi:cx="185"
sodipodi:cy="305.95663"
sodipodi:rx="5"
sodipodi:ry="5"
d="m 190,305.95663 c 0,2.76143 -2.23858,5 -5,5 -2.76142,0 -5,-2.23857 -5,-5 0,-2.76142 2.23858,-5 5,-5 2.76142,0 5,2.23858 5,5 z"
transform="matrix(0.5,0,0,0.5,-5.055293,308.5069)" />
<path
sodipodi:type="arc"
style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
id="path8485-1"
sodipodi:cx="185"
sodipodi:cy="305.95663"
sodipodi:rx="5"
sodipodi:ry="5"
d="m 190,305.95663 c 0,2.76143 -2.23858,5 -5,5 -2.76142,0 -5,-2.23857 -5,-5 0,-2.76142 2.23858,-5 5,-5 2.76142,0 5,2.23858 5,5 z"
transform="matrix(0.5,0,0,0.5,-5.055293,323.5069)" />
<path
sodipodi:type="arc"
style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
id="path8485-1-5"
sodipodi:cx="185"
sodipodi:cy="305.95663"
sodipodi:rx="5"
sodipodi:ry="5"
d="m 190,305.95663 c 0,2.76143 -2.23858,5 -5,5 -2.76142,0 -5,-2.23857 -5,-5 0,-2.76142 2.23858,-5 5,-5 2.76142,0 5,2.23858 5,5 z"
transform="matrix(0.5,0,0,0.5,-5.055293,338.5069)" />
<g
id="g3832"
transform="translate(-198.53304,165.96697)">
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="rect3021-2"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
inkscape:connector-curvature="0"
id="path3041"
d="m 232.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-4"
d="m 338.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-0"
d="m 285.47775,176.98525 0,53"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
</g>
<g
id="g4838"
transform="translate(-331.53302,124.49999)">
<g
id="g3832-0"
transform="translate(133,-11.000002)">
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
id="rect3021-8"
width="59.181324"
height="59.181324"
x="299.48941"
y="-19.084835"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
id="rect3021-2-1"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 232.47775,150.98525 0,52"
id="path3041-02"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 338.47775,150.98525 0,52"
id="path3041-4-2"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 285.47775,176.98525 0,53"
id="path3041-0-9"
inkscape:connector-curvature="0" />
</g>
</g>
<g
id="g3832-5"
transform="translate(-198.53304,215.96697)">
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="rect3021-2-7"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
inkscape:connector-curvature="0"
id="path3041-41"
d="m 232.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-4-8"
d="m 338.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-0-5"
d="m 285.47775,176.98525 0,53"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
</g>
<g
id="g3832-9"
transform="translate(-198.53306,433.49997)">
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="rect3021-2-75"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
inkscape:connector-curvature="0"
id="path3041-3"
d="m 232.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-4-88"
d="m 338.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-0-3"
d="m 285.47775,176.98525 0,53"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
</g>
<g
id="g4838-1"
transform="translate(-331.53303,392.03299)">
<g
id="g3832-0-9"
transform="translate(133,-11.000002)">
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
id="rect3021-8-6"
width="59.181324"
height="59.181324"
x="299.48941"
y="-19.084835"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
id="rect3021-2-1-4"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 232.47775,150.98525 0,52"
id="path3041-02-3"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 338.47775,150.98525 0,52"
id="path3041-4-2-3"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 285.47775,176.98525 0,53"
id="path3041-0-9-3"
inkscape:connector-curvature="0" />
</g>
</g>
<g
id="g3832-5-8"
transform="translate(-198.53306,483.49997)">
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="rect3021-2-7-6"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
inkscape:connector-curvature="0"
id="path3041-41-0"
d="m 232.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-4-8-4"
d="m 338.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-0-5-8"
d="m 285.47775,176.98525 0,53"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
</g>
<path
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:1, 2;stroke-dashoffset:0"
d="m 33.944724,419.01824 0,113"
id="path3362"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:1, 2;stroke-dashoffset:0"
d="m 139.94472,419.01824 0,113"
id="path3362-1"
inkscape:connector-curvature="0" />
<text
xml:space="preserve"
style="font-size:20px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="145.19473"
y="473.9852"
id="text3321"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3323"
x="145.19473"
y="473.9852">12*16</tspan><tspan
sodipodi:role="line"
x="145.19473"
y="498.9852"
id="tspan3342">=192</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 170.19473,454.9852 0,-216"
id="path3325"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 170.19473,503.98521 0,209.99997"
id="path3327"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 160.19473,263.98521 20,0"
id="path3329"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 160.19473,685.98522 20,0"
id="path3331"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 15.19473,363.9852 0,-100.00001"
id="path3325-7"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 5.19473,263.98521 20,0"
id="path3329-6"
inkscape:connector-curvature="0" />
<text
xml:space="preserve"
style="font-size:18px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="-14.80527"
y="293.9852"
id="text3203"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3205"
x="-14.80527"
y="293.9852">12</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 5.19473,313.98521 20,0"
id="path3329-6-0"
inkscape:connector-curvature="0" />
<text
xml:space="preserve"
style="font-size:18px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="-14.805266"
y="343.9852"
id="text3203-0"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3205-3"
x="-14.805266"
y="343.9852">12</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 5.1947303,363.98522 19.9999997,0"
id="path3329-6-0-9"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 160.19473,238.98523 20,0"
id="path3329-6-1"
inkscape:connector-curvature="0" />
<text
xml:space="preserve"
style="font-size:18px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="180.19473"
y="258.9852"
id="text3203-4"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3205-4"
x="180.19473"
y="258.9852">6</tspan></text>
<text
xml:space="preserve"
style="font-size:18px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="180.19473"
y="703.98523"
id="text3203-4-3"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3205-4-1"
x="180.19473"
y="703.98523">6</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 160.19473,713.98521 20,0"
id="path3331-2"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 240.19473,493.98522 0,220"
id="path3327-2"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 230.19473,713.98522 20,0"
id="path3331-6"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 240.19473,463.98521 0,-226"
id="path3325-1"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 230.19473,237.98524 20,0"
id="path3329-6-1-7"
inkscape:connector-curvature="0" />
<text
xml:space="preserve"
style="font-size:20px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="225.19473"
y="483.9852"
id="text3321-4"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
x="225.19473"
y="483.9852"
id="tspan3342-9">204</tspan></text>
</g>
</svg>
| 19,045
|
Python
|
.tac
| 434
| 35.658986
| 232
| 0.645191
|
overviewer/Minecraft-Overviewer
| 3,348
| 480
| 347
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,302
|
cube_stacking.svg
|
overviewer_Minecraft-Overviewer/docs/design/cubepositionimgs/cube_stacking.svg
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="198.70314"
height="159.68738"
id="svg2"
version="1.1"
inkscape:version="0.48.1 r9760"
sodipodi:docname="cube_stacking.svg"
inkscape:export-filename="/home/andrew/mc/overviewer/docs/design/cubepositionimgs/cube_stacking.png"
inkscape:export-xdpi="144.41595"
inkscape:export-ydpi="144.41595">
<defs
id="defs4" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="2.8284271"
inkscape:cx="48.782895"
inkscape:cy="101.25197"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="true"
showguides="true"
inkscape:snap-global="true"
inkscape:window-width="1920"
inkscape:window-height="1003"
inkscape:window-x="0"
inkscape:window-y="25"
inkscape:window-maximized="1"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
showborder="false"
inkscape:guide-bbox="true">
<inkscape:grid
type="xygrid"
id="grid3755"
empspacing="5"
visible="true"
enabled="true"
snapvisiblegridlinesonly="true" />
</sodipodi:namedview>
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-33.675018,-237.79787)">
<text
xml:space="preserve"
style="font-size:20px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Andale Mono"
x="179.47775"
y="272.98526"
id="text4396-7"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4398-45"
x="179.47775"
y="272.98526">12</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 192.0422,258.29789 0,-19.99994"
id="path4400-5"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 187.0422,238.2979 10,0"
id="path4402-7"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 192.0422,290.98525 0,-16.68735"
id="path4406-4"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 187.0422,290.98523 10,0"
id="path4408-91"
inkscape:connector-curvature="0" />
<g
id="g3832"
transform="translate(-165,167)">
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="rect3021-2"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
inkscape:connector-curvature="0"
id="path3041"
d="m 232.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-4"
d="m 338.47775,150.98525 0,52"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
inkscape:connector-curvature="0"
id="path3041-0"
d="m 285.47775,176.98525 0,53"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
</g>
<g
id="g4838"
transform="translate(-297.99998,125.53302)">
<path
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:1, 1;stroke-dashoffset:0;marker-start:none"
d="m 471.51075,192.25175 -53.033,-26.2665 -53.03301,26.2665"
id="rect3021-2-1-5"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" />
<g
id="g3832-0"
transform="translate(133,-11.000002)">
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
id="rect3021-8"
width="59.181324"
height="59.181324"
x="299.48941"
y="-19.084835"
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)" />
<path
transform="matrix(0.89611053,0.44383096,-0.89611053,0.44383096,0,0)"
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1.12123179;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
d="m 417.85207,40.096489 0,59.181324 -59.18132,0"
id="rect3021-2-1"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 232.47775,150.98525 0,52"
id="path3041-02"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 338.47775,150.98525 0,52"
id="path3041-4-2"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 285.47775,176.98525 0,53"
id="path3041-0-9"
inkscape:connector-curvature="0" />
</g>
</g>
<rect
style="fill:none;stroke:#00ff02;stroke-width:0.5;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4847"
width="106"
height="105.71874"
x="67.477753"
y="290.98526" />
</g>
</svg>
| 7,233
|
Python
|
.tac
| 182
| 32.093407
| 232
| 0.644681
|
overviewer/Minecraft-Overviewer
| 3,348
| 480
| 347
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,303
|
installPsychoPy.py
|
psychopy_psychopy/installPsychoPy.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""
Python script to install psychopy including dependencies
NB: At present, for windows and MacOS you may as well just use `pip install psychopy` but
in the future we may add some additional functionality here, like adding application
shortcuts, checking/recommending virtual envs etc.
"""
# Author: Jonathan Peirce, based on work of Flavio Bastos and Florian Osmani
import os, sys
import pathlib
import subprocess
import os
import sys
import requests
_linux_installer = None # will be apt-get or yum depending on system
print(
"This `install_psychopy.py` script is EXPERIMENTAL and may not work!"
" PsychoPy users have many different systems and it's hard to maintain them all. "
" Let us know how you get on!\n"
)
if sys.version_info[:2] != (3,10):
print(
"PsychoPy is designed for Python 3.10.x "
f"You are running Python {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}. "
"PsychoPy may not work and may not even install!\n"
)
print(
"This `install_psychopy.py` script is EXPERIMENTAL and may not work!"
" PsychoPy users have many different systems and it's hard to maintain them all. "
" Let us know how you get on!\n"
)
def pip_install(*packages):
"""Install packages using pip."""
print('Installing packages:', packages)
subprocess.run([sys.executable, '-m', 'pip', 'install', '--upgrade'] + list(packages))
def check_venv():
"""Check if this is a virtual environment. If not, recommend quitting and creating one.
"""
# If this is not a venv then recommend quitting to create one
if not hasattr(sys, 'real_prefix'):
print(
'You should install PsychoPy in a virtual environment,'
' to avoid conflicts with other packages, or damaging your system.'
' To create a virtual environment in the current directory, run:')
print(' python3 -m venv .')
print('Then activate the virtual environment with:')
print(' source bin/activate')
print('Then run this script again.')
response = input('Shall we QUIT now? [y]/n: ')
if response.lower() != 'n':
sys.exit(1)
def apt_install(*packages):
"""Install packages using apt, yum, or similar"""
global _linux_installer
# check if using this system has apt-get or yum
if _linux_installer is None:
for installer in ['apt', 'yum', 'dnf', 'zypper', 'apt-cyg']:
out = subprocess.run(['which', installer], stdout=subprocess.PIPE)
if out.returncode == 0:
_linux_installer = installer
break
if _linux_installer is None:
print('On Linux systems, this script requires either apt-get or yum.')
sys.exit(1)
def find_package(package):
# check pacakage name according to apt/yum
packages_lookup = {
'python3-dev': {'apt':'libgtk-3-dev', 'yum':'gtk3-devel'},
'libgtk-3-dev': {'apt':'libgtk-3-dev', 'yum':'gtk3-devel'},
'libwebkit2gtk-4.0-dev': {'apt':'libwebkit2gtk-4.0-dev', 'yum':'webkit2gtk3-devel'},
'libxcb-xinerama0': {'apt':'libxcb-xinerama0', 'yum':'libxcb-xinerama'},
'libegl1-mesa-dev': {'apt':'libegl1-mesa-dev', 'yum':'mesa-libEGL-devel'},
}
if package in packages_lookup:
if _linux_installer in packages_lookup[package]:
return packages_lookup[package][_linux_installer]
else:
return packages_lookup[package]['yum'] # default to yum for dnf, zypper, apt-cyg
else:
return package
packages = [find_package(p) for p in packages]
print('Installing packages (will require sudo):', packages)
subprocess.run(['sudo', _linux_installer, 'update'])
subprocess.run(['sudo', _linux_installer, 'install', '-y'] + list(packages))
if __name__ == "__main__":
# Check/install builds requirements
if platform.system() == 'Linux':
# Install system dependencies
apt_install(
'python3-dev', # need dev in case of compiling C extensions
'libgtk-3-dev', 'libwebkit2gtk-4.0-dev', # for wxPython
'libxcb-xinerama0', 'libegl1-mesa-dev', # for OpenGL needs
'git', # for push/pull to Pavlovia
)
pip_install('-U', 'pip', 'setuptools', 'attrdict')
print("Next we build wxPython (from source) which takes the longest time."
" The rest of the installation will automatically continue after and be"
" much faster.")
pip_install('wxPython')
# Install PsychoPy using pip
pip_install('psychopy')
print("\nPsychoPy has been installed (or at least attempted). You can now try run it by typing:")
print(" psychopy")
print("or:")
print(" python -m psychopy.app.psychopyApp")
print("You may need to activate the virtual environment first though.")
| 5,178
|
Python
|
.py
| 110
| 39.890909
| 101
| 0.649474
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,304
|
setupApp.py
|
psychopy_psychopy/setupApp.py
|
#!/usr/bin/env python
################
# see notes at bottom for requirements
import glob
import os
import sys
from sys import platform
import setuptools # noqa: setuptools complains if it isn't explicitly imported before distutils
from distutils.core import setup
from packaging.version import Version
import bdist_mpkg # noqa: needed to build bdist, even though not explicitly used here
import py2app # noqa: needed to build app bundle, even though not explicitly used here
from ctypes.util import find_library
import importlib
import building.compile_po
from building import writeVersionFiles
import psychopy
version = psychopy.__version__
building.compile_po.compilePoFiles()
writeVersionFiles.updateVersionFile()
writeVersionFiles.updateGitShaFile()
#define the extensions to compile if necess
packageData = []
requires = []
if platform != 'darwin':
raise RuntimeError("setupApp.py is only for building Mac Standalone bundle")
resources = glob.glob('psychopy/app/Resources/*')
frameworks = [ # these installed using homebrew
find_library("libevent"),
find_library("libmp3lame"),
find_library("libglfw"),
# libffi comes in the system
"/usr/local/opt/libffi/lib/libffi.dylib",
]
opencvLibs = glob.glob(os.path.join(sys.exec_prefix, 'lib', 'libopencv*.2.4.dylib'))
frameworks.extend(opencvLibs)
import macholib
#print("~"*60 + "macholib version: "+macholib.__version__)
if Version(macholib.__version__) <= Version('1.7'):
print("Applying macholib patch...")
import macholib.dyld
import macholib.MachOGraph
dyld_find_1_7 = macholib.dyld.dyld_find
def dyld_find(name, loader=None, **kwargs):
#print("~"*60 + "calling alternate dyld_find")
if loader is not None:
kwargs['loader_path'] = loader
return dyld_find_1_7(name, **kwargs)
macholib.MachOGraph.dyld_find = dyld_find
# excludes are often because of codesign difficulties on macos
excludes=['torch', 'mediapipe',
'bsddb', 'jinja2', 'IPython','ipython_genutils','nbconvert',
'tkinter', 'Tkinter', 'tcl',
'libsz.2.dylib', 'pygame',
# 'stringprep',
'functools32',
'sympy',
'/usr/lib/libffi.dylib',
'libwebp.7.dylib',
'google',
]
includes = ['_sitebuiltins', # needed for help()
'imp', 'subprocess', 'shlex',
'shelve', # for scipy.io
'_elementtree', 'pyexpat', # for openpyxl
'pyo', 'greenlet', 'zmq', 'tornado',
'psutil', # for iohub
'tobii_research', # need tobii_research file and tobiiresearch pkg
'soundfile', 'sounddevice', 'readline',
'xlwt', # writes excel files for pandas
'msgpack_numpy',
'configparser',
'ntplib', # for egi-pynetstation
]
packages = ['pydoc', # needed for help()
'setuptools', 'wheel', # for plugin installing
'wx', 'psychopy',
'PyQt6',
'pyglet', 'pytz',
'scipy', 'matplotlib', 'openpyxl', 'pandas',
'xml', 'xmlschema',
'ffpyplayer', 'cython', 'AVFoundation',
'imageio', 'imageio_ffmpeg',
'_sounddevice_data', '_soundfile_data',
'cffi', 'pycparser',
'PIL', # 'Image',
'freetype',
'objc', 'Quartz', 'AppKit', 'Cocoa',
'Foundation', 'CoreFoundation',
'requests', 'certifi', 'cryptography',
'json_tricks', # allows saving arrays/dates in json
'git', 'gitlab',
'msgpack', 'yaml', 'gevent', # for ioHub
'astunparse', 'esprima', # for translating/adapting py/JS
'metapensiero.pj', 'dukpy',
'jedi', 'parso',
'bidi', 'arabic_reshaper', 'charset_normalizer', # for (natural) language conversions
'ujson', # faster than built-in json
'six', # needed by configobj
# hardware
'serial',
# handy science tools
'tables', # 'cython',
# these aren't needed, but liked
'pylsl',
#'smite', # https://github.com/marcus-nystrom/SMITE (not pypi!)
'cv2',
'questplus',
'psychtoolbox',
'h5py',
'markdown_it',
'zeroconf', 'ifaddr', # for pupillabs plugin (fail to build)
'websocket', # dependency for emotiv that doesn't install nicely from plugins
]
# Add packages that older PsychoPy (<=2023.1.x) shipped, for useVersion() compatibility
# In PsychoPy 2023.2.0 these packages were removed from Standalone Py3.10+ builds
if sys.version_info < (3, 9):
packages.extend(
[
'moviepy',
'OpenGL', 'glfw',
'badapted', #'darc_toolbox', # adaptive methods from Ben Vincent
'egi_pynetstation', 'pylink', 'tobiiresearch',
'pyxid2', 'ftd2xx', # ftd2xx is used by cedrus
'Phidget22',
'hid',
'macropy',
]
)
packages.append('PyQt5')
packages.remove('PyQt6') # PyQt6 is not compatible with earlier PsychoPy versions
excludes.append('PyQt6') # and explicitly exclude it
# check the includes and packages are all available
missingPkgs = []
pipInstallLines = ''
packagePipNames = { # packages that are imported as one thing but installed as another
'OpenGL': 'pyopengl',
'opencv': 'opencv-python',
'googleapiclient': 'google-api-python-client',
'google': 'google-api-python-client',
'macropy': 'macropy3',
}
for pkg in includes+packages:
try:
importlib.import_module(pkg)
except ModuleNotFoundError:
if pkg in packagePipNames:
missingPkgs.append(packagePipNames[pkg])
elif pkg == 'pylink':
pipInstallLines += 'pip install --index-url=https://pypi.sr-support.com sr-research-pylink\n'
else:
missingPkgs.append(pkg)
except OSError as err:
if 'libftd2xx.dylib' in str(err):
raise ImportError(f"Missing package: ftd2xx. Please install the FTDI D2XX drivers from "
"https://www.ftdichip.com/Drivers/D2XX.htm")
except ImportError as err:
if 'eyelink' in str(err):
raise ImportError(f"It looks like the Eyelink dev kit is not installed "
"https://www.sr-research.com/support/thread-13.html")
if missingPkgs or pipInstallLines:
helpStr = f"You're missing some packages to include in standalone. Fix with:\n"
if missingPkgs:
helpStr += f"pip install {' '.join(missingPkgs)}\n"
helpStr += pipInstallLines
raise ImportError(helpStr)
else:
print("All packages appear to be present. Proceeding to build...")
setup(
app=['psychopy/app/psychopyApp.py'],
options=dict(py2app=dict(
includes=includes,
packages=packages,
excludes=excludes,
resources=resources,
argv_emulation=False, # must be False or app bundle pauses (py2app 0.21 and 0.24 tested)
site_packages=True,
frameworks=frameworks,
iconfile='psychopy/app/Resources/psychopy.icns',
plist=dict(
CFBundleIconFile='psychopy.icns',
CFBundleName = "PsychoPy",
CFBundleShortVersionString = version, # must be in X.X.X format
CFBundleVersion = version,
CFBundleExecutable = "PsychoPy",
CFBundleIdentifier = "org.opensciencetools.psychopy",
CFBundleLicense = "GNU GPLv3+",
NSHumanReadableCopyright = "Open Science Tools Limited",
CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],
CFBundleTypeRole='Editor')],
CFBundleURLTypes=[dict(CFBundleURLName='psychopy', # respond to psychopy://
CFBundleURLSchemes='psychopy',
CFBundleTypeRole='Editor')],
LSEnvironment=dict(PATH="/usr/local/git/bin:/usr/local/bin:"
"/usr/local:/usr/bin:/usr/sbin"),
),
)) # end of the options dict
)
# ugly hack for opencv2:
# As of opencv 2.4.5 the cv2.so binary used rpath to a fixed
# location to find libs and even more annoyingly it then appended
# 'lib' to the rpath as well. These were fine for the packaged
# framework python but the libs in an app bundle are different.
# So, create symlinks so they appear in the same place as in framework python
rpath = "dist/PsychoPy.app/Contents/Resources/"
for libPath in opencvLibs:
libname = os.path.split(libPath)[-1]
realPath = "../../Frameworks/"+libname # relative path (w.r.t. the fake)
fakePath = os.path.join(rpath, "lib", libname)
os.symlink(realPath, fakePath)
# they even did this for Python lib itself, which is in diff location
realPath = "../Frameworks/Python.framework/Python" # relative to the fake path
fakePath = os.path.join(rpath, "Python")
os.symlink(realPath, fakePath)
| 9,337
|
Python
|
.py
| 214
| 34.38785
| 105
| 0.609713
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,305
|
updateChangelogMD.py
|
psychopy_psychopy/docs/updateChangelogMD.py
|
#!/usr/bin/env python
# this script replaces hashtags with a sphinx URL string (to the github issues or pull request)
# written by Jon with regex code by Jeremy
import re
from pathlib import Path
thisFolder = Path(__file__).absolute().parent
rootFolder = thisFolder.parent
input_path = rootFolder / 'psychopy/CHANGELOG.txt'
output_path = thisFolder / 'source/changelog.md'
def repl_link(match):
"""convert sphinx-formatted links `name <url>`_ into markdown [name](url)
"""
name = match.group('name').strip()
url = match.group('url')
print(url)
print(name)
print("[{}]({})".format(name,url))
def repl_issue(m):
g = m.group(1)
return g.replace('#', '[#') + "](https://github.com/psychopy/psychopy/issues/" + g.strip(' (#') + ")"
def repl_commit(m):
g = m.group(1)
return g.replace('#', '[commit:')[:18] + "](https://github.com/psychopy/psychopy/commit/" + g.strip(' (#') + ")"
def repl_noncompat(m):
g = m.group(1)
g = g.replace('`', "'")
return g.replace('CHANGE', '<span style="color:red">CHANGE') + "</span>\n"
# raw .txt form of changelog:
txt = open(input_path, "rU", encoding='utf8').read()
# programmatic replacements:
link = re.compile(r'`(?P<name>.*)\<(?P<url>.*)\>`_')
print("found %i links to convert" %(len(link.findall(txt))))
txt_hash = link.sub(repl_link, txt)
hashtag = re.compile(r"([ (]#\d{3,5})\b")
print("found %i issue tags" %(len(hashtag.findall(txt))))
txt_hash = hashtag.sub(repl_issue, txt)
hashtag = re.compile(r"([ (]#[0-9a-f]{6,})\b")
print("found %i commit tags" %(len(hashtag.findall(txt_hash))))
txt_hash = hashtag.sub(repl_commit, txt_hash)
noncompat = re.compile(r"(CHANGE.*)\n")
print("found %i CHANGE" %(len(noncompat.findall(txt_hash))))
txt_final = noncompat.sub(repl_noncompat, txt_hash)
# # one-off specific .rst directives:
# newRST = txt_hashblue.replace('.. note::', """.. raw:: html
#
# <style> .red {color:red} </style>
#
# .. note::""", 1)
# add note about blue meaning a change?
with open(output_path, "w", encoding='utf8') as doc:
doc.write(txt_final)
print(f"generated {output_path}")
#test:
#text = "yes #123\n yes (#4567)\n; none of `#123, #3, #45, #12345 #123a"
#newText = expr.sub(repl, text)
#print newText
| 2,241
|
Python
|
.py
| 56
| 37.642857
| 117
| 0.653757
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,306
|
updateAlerts_rst.py
|
psychopy_psychopy/docs/updateAlerts_rst.py
|
"""Build rst files appropriate for the **currently-installed** psychopy.alerts
"""
from pathlib import Path
from psychopy.alerts import catalog
thisFolder = Path(__file__).parent
alertDocsRoot = thisFolder / "source/alerts"
for ID in catalog.alert:
alert = catalog.alert[ID]
if 'label' in alert:
label = alert['label']
else:
label = alert['synopsis']
with open(alertDocsRoot / (str(ID)+'.rst'), 'w') as f:
titleStr = f"{ID}: {label}\n"
f.write(f"{ID}: {label}\n")
f.write(f"="*len(titleStr) + "\n\n")
if 'synopsis' in alert:
f.write(f"Synopsis\n")
f.write(f"-----------\n\n")
f.write(alert["synopsis"] + "\n\n")
if 'details' in alert:
f.write(f"Details\n")
f.write(f"-----------\n\n")
f.write(alert["details"] + "\n\n")
if 'versions' in alert:
f.write(f"PsychoPy versions affected\n")
f.write(f"---------------------------\n\n")
f.write(alert["versions"] + "\n\n")
if 'solutions' in alert:
f.write(f"Solutions\n")
f.write(f"-----------\n\n")
f.write(alert["solutions"] + "\n\n")
| 1,214
|
Python
|
.py
| 32
| 29.59375
| 78
| 0.521628
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,307
|
buildEpydoc.config
|
psychopy_psychopy/docs/buildEpydoc.config
|
[epydoc] # Epydoc section marker (required by ConfigParser)
# Information about the project.
name: PsychoPy
url: https://www.psychopy.org/
docformat: restructuredtext
# The list of modules to document. Modules can be named using
# dotted names, module filenames, or package directory names.
# This option may be repeated.
modules: psychopy monitors __init__
exclude: demos setup* _win* app* serial.serial* MonitorCenter visual.AlphaStim _parallel _shadersPygame _shadersPyglet makeMovies visual.calib ext tests* configobj* preferences
# Write html output to the directory
output: html
target: build/html/epydoc
graph: all
| 643
|
Python
|
.py
| 14
| 43.5
| 177
| 0.792605
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,308
|
updateChangelog.py
|
psychopy_psychopy/docs/updateChangelog.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# this script replaces hashtags with a sphinx URL string (to the github issues or pull request)
# written by Jon with regex code by Jeremy
import re
from pathlib import Path
import os
thisFolder = Path(__file__).absolute().parent
rootFolder = thisFolder.parent
input_path = rootFolder / 'psychopy/CHANGELOG.txt'
output_path = thisFolder / 'source/changelog.rst'
def repl_issue(m):
g = m.group(1)
return g.replace('#', '`#') + " <https://github.com/psychopy/psychopy/issues/" + g.strip(' (#') + ">`_"
def repl_commit(m):
g = m.group(1)
return g.replace('#', '`commit:')[:18] + " <https://github.com/psychopy/psychopy/commit/" + g.strip(' (#') + ">`_"
def repl_noncompat(m):
g = m.group(1)
g = g.replace('`', "'")
return g.replace('CHANGE', ':noncompat:`CHANGE') + "`\n"
print(thisFolder)
print(f"looking in {input_path.absolute()} from {os.getcwd()}")
# raw .txt form of changelog:
txt = open(input_path, "rU", encoding='utf8').read()
# programmatic replacements:
hashtag = re.compile(r"([ (]#\d{3,5})\b")
print("found %i issue tags" %(len(hashtag.findall(txt))))
txt_hash = hashtag.sub(repl_issue, txt)
hashtag = re.compile(r"([ (]#[0-9a-f]{6,})\b")
print("found %i commit tags" %(len(hashtag.findall(txt_hash))))
txt_hash = hashtag.sub(repl_commit, txt_hash)
noncompat = re.compile(r"(CHANGE.*)\n")
print("found %i CHANGE" %(len(noncompat.findall(txt_hash))))
txt_hash_noncompat = noncompat.sub(repl_noncompat, txt_hash)
# one-off specific .rst directives:
newRST = txt_hash_noncompat.replace('PsychoPy uses **major.minor.patch** version numbers', """.. raw:: html
<style> .noncompat {color:red} </style>
.. role:: noncompat
PsychoPy uses **major.minor.patch** version numbers""", 1)
# add note about blue meaning a change?
with open(output_path, "w", encoding='utf8') as doc:
doc.write(newRST)
print(f"generated {output_path}")
#test:
#text = "yes #123\n yes (#4567)\n; none of `#123, #3, #45, #12345 #123a"
#newText = expr.sub(repl, text)
#print newText
| 2,060
|
Python
|
.py
| 48
| 40.770833
| 119
| 0.67988
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,309
|
update_authors.py
|
psychopy_psychopy/docs/update_authors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Regenerate AUTHORS.md
"""
import codecs
import warnings
from datetime import datetime
from psychopy.core import shellCall
from pathlib import Path
repo_path = Path(__file__).parent.parent
authors_path = repo_path/'AUTHORS.md'
git_command = 'git --no-pager shortlog -s HEAD %s' % repo_path
last_run = datetime.utcnow().strftime('%B %d, %Y')
authors_header = """Authors
-------
**PsychoPy is developed through community effort.**
The project was created and is maintained by Jonathan Peirce.
The following individuals have contributed code or documentation to
PsychoPy:\n
"""
do_not_edit_note = """
---
*This list was auto-generated via `gen_authors.py`. Do not edit manually.*\n
*Last updated on %s (UTC).*
""" % last_run
if __name__ == '__main__':
short_log = shellCall(git_command)
authors = []
for line in short_log.splitlines():
contributions, author = tuple(line.split('\t'))
if author != 'unknown':
authors.append(author)
else:
msg = ('Unknown author found and skipped; please revise the output '
'of `git shortlog -se` and consider amending .mailmap')
warnings.warn(msg)
with codecs.open(authors_path, 'w', encoding='utf-8') as f:
f.write(authors_header)
f.writelines(['* %s\n' % author for author in authors])
f.write(do_not_edit_note)
| 1,426
|
Python
|
.py
| 40
| 31.3
| 80
| 0.669825
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,310
|
__init__.py
|
psychopy_psychopy/docs/themes/__init__.py
|
"""Sphinx psychopy_bootstrap theme."""
import os
VERSION = (3, 0, 0)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
theme_path = os.path.abspath(os.path.dirname(__file__))
return [theme_path]
def setup(app):
"""Setup."""
# add_html_theme is new in Sphinx 1.6+
if hasattr(app, 'add_html_theme'):
theme_path = get_html_theme_path()[0]
app.add_html_theme('psychopy_bootstrap', os.path.join(theme_path, 'psychopy_bootstrap'))
| 564
|
Python
|
.py
| 15
| 33.666667
| 96
| 0.645872
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,311
|
psychopy.css
|
psychopy_psychopy/docs/themes/psychopy_bootstrap/static/psychopy.css
|
/*!
* PsychoPy Boostrap CSS based on the Modern Business template
* Copyright 2013-2017 Start Bootstrap
* Licensed under MIT (https://github.com/BlackrockDigital/startbootstrap-logomodern-business-nav/blob/master/LICENSE)
*/
@import url("./bootswatch-3.3.7/readable/bootstrap.min.css");
@import url("./bootstrap-sphinx.css");
@import url("https://use.fontawesome.com/releases/v6.6.0/css/all.css");
@import url("https://use.fontawesome.com/releases/v6.6.0/css/brands.css");
@import url("https://use.fontawesome.com/releases/v6.6.0/css/fontawesome.css");
@import url("https://use.fontawesome.com/releases/v6.6.0/css/fontawesome.css");
@import url("https://fonts.googleapis.com/css?family=Indie+Flower");
@import url("https://fonts.googleapis.com/css?family=Arvo");
:root {
--black: #000000;
--midgrey: #66666e;
--lightgrey: #acacb0;
--white: #f2f2f2;
--red: #f2545b;
--blue: #02a9ea;
--green: #6ccc74;
--orange: #ec9703;
--yellow: #f1d302;
--purple: #c3bef7;
--f1: "sans-serif"
--f2: "Arvo"
--f3: "Indie Flower"
--gradient: linear-gradient(#4582ec 0%, #fff 100%);
--theme-color: #66666E;
}
body {
padding-top: 54px;
}
html, body {
height: 100%; /* needed for the full-height sections to work*/
/*scroll-behavior: smooth;*/ /* this requires 2 clicks for internal links! */
font-family: sans-serif;
}
h1, .h1 {
font-size: 34px;
}
h1::before {
display: block;
content: " ";
margin-top: -101px;
height: 101px;
visibility: hidden;
pointer-events: none;
}
h2, .h2 {
font-size: 28px;
}
h2::before {
display: block;
content: " ";
margin-top: -101px;
height: 101px;
visibility: hidden;
pointer-events: none;
}
h3, .h3 {
font-size: 24px;
}
h3::before {
display: block;
content: " ";
margin-top: -101px;
height: 101px;
visibility: hidden;
pointer-events: none;
}
.row {
display: flex;
flex-direction: row;
flex-wrap: wrap;
width: 100%;
justify-content: center;
align-items: center;
}
.column-collapse { /* will convert to row for <768 */
display: flex;
flex-direction: column;
flex-basis: 100%;
flex: 1;
}
/* special cases */
blockquote ul {
font-size: initial;
margin: -11px -22px;
margin-left: -22px;
}
blockquote {
border-left: none;
padding: 0;
margin: 0 0 22px;
font-size: initial;
/* border-left: 5px solid #4582ec; */
}
/* logos (big and small) in top left */
.large-logo img {
position: absolute;
/*top: -84px;*/
top: 5px;
left: 10px;
width: auto;
height: 120px;
}
.small-logo img {
display: none;
height: 60px;
top: 5px;
width: auto !important;
}
/* Logo with straplines in center of landing page */
.central-logo {
object-fit: cover;
align-content: center;
position: relative;
width: 90%;
padding: 2vw;
height: auto;
}
.central-logo .img-layer {
position: absolute;
object-fit: cover;
top: 25px;
left: 0px;
width: 90%;
height: auto;
}
.central-logo .icon3-layer {
position: absolute;
font-family: 'Indie Flower';
color: 'dark red';
font-size: 8vw;
top: 2vw;
left: 83vw;
}
.central-logo .text-layer {
position: absolute;
top: 35%;
left: 40%;
font-family: sans-serif;
font-size: 1.8vw;
}
.central-logo .text-layer-abbr {
position: absolute;
top: 20%;
left: 40%;
font-family: sans-serif;
font-size: 2.5vw; /*bigger font but few words*/
}
.delay0 {
opacity: 0;
animation: fadeIn 2s;
animation-fill-mode: forwards;
}
.delay1 {
opacity: 0;
animation: fadeIn 2s;
animation-delay: 0.5s;
animation-fill-mode: forwards;
}
.delay2 {
opacity: 0;
animation: fadeIn 2s;
animation-delay: 1s;
animation-fill-mode: forwards;
}
.delay3 {
opacity: 0;
animation: fadeIn 2s;
animation-delay: 1.5s;
animation-fill-mode: forwards;
}
.delay4 {
opacity: 0;
animation: fadeIn 2s;
animation-delay: 2s;
animation-fill-mode: forwards;
}
.delay5 {
opacity: 0;
animation: fadeIn 2s;
animation-delay: 2.5s;
animation-fill-mode: forwards;
}
/* to set a section to be the height of the browser window*/
.full-height {
height: 100%;
}
.full-height-with-ticker {
height: 100%;
overflow: auto;
}
.title-tagline{
color: : black;
font-size: 150%;
}
.tagline-buttons{
font-size: 2em;
font-weight: bold
}
/* @media (min-width: 992px) {
body {
padding-top: 56px;
}
} */
.bg-primary a {
color: white;
text-decoration: underline;
}
.bg-primary a:hover {
color: 'red';
text-decoration: underline;
}
.carousel-item {
height: 65vh;
min-height: 300px;
background: no-repeat center center scroll;
-webkit-background-size: cover;
-moz-background-size: cover;
-o-background-size: cover;
background-size: cover;
}
.portfolio-item {
margin-bottom: 30px;
}
/* Fixed/sticky icon bar (vertically aligned 50% from the top of the screen) */
.social-bar {
background-color: white;
position: fixed;
right: 0;
top: 200px;
padding: 8px;
-webkit-transform: translateY(-50%);
-ms-transform: translateY(-50%);
transform: translateY(-50%);
}
/* Style the icon bar links */
.social-bar a {
display: block;
text-align: center;
transition: all 0.3s ease;
font-size: 20px;
padding: 8px;
}
/* Style the social media icons with color, if you want */
.icon-bar a:hover {
background-color: #000;
}
.facebook {
background: white;
color: #3B5998;
}
.bluesky {
/* background: white; */
color: #1185FE;
}
.linkedin {
/* background: white; */
color: #0a66c2;
}
.twitter {
/* background: white; */
color: #000000;
}
.google {
/* background: white; */
color: #dd4b39;
}
.github {
/* background: white; */
color: black;
}
.youtube {
/* background: white; */
color: #bb0000;
}
/* landing page transitions */
@keyframes fadeIn {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
/* set our responsive mode controls */
@media (max-width: 768px) {
.tagline-buttons{
font-size: 2em;
font-weight: bold
}
.title-tagline{
font-size: 150%;
}
.large-logo {
display: none;
}
.small-logo img {
display: block;
}
.social-bar {
display: none;
}
.column-collapse {
flex-direction: column;
}
body {
padding-top: 80px;
}
.container {
padding-right: 15px;
padding-left: 15px;
}
}
@media (min-width: 769px) {
.large-logo {
display: block;
}
.small-logo img {
display: none;
}
.social-bar {
display: block;
}
body {
padding-top: 80px;
}
.container {
padding-right: 100px;
padding-left: 100px;
}
}
/* override the bootstrap max-widths to prevent jerky expanding */
@media (max-width: 576px) {
.tagline-buttons{
font-size: 1.25em;
font-weight: bold
}
.title-tagline{
font-size: 100%;
}
.container {
max-width: 90%
}
.text-layer {
display: none
}
.text-layer-abbr {
display: block
}
}
@media (min-width: 576px) {
.container {
max-width: 90%;
}
.text-layer {
display: block;
}
.text-layer-abbr {
display: none
}
}
@media (min-width: 768px) {
.container {
max-width: 90%;
}
}
@media (min-width: 992px) {
.container {
max-width: 90%;
}
}
@media (min-width: 1200px) {
.container {
max-width: 100%;
}
.central-logo {
padding: 80px;
}
}
/* Classes for coloured text (first used in online/status.rst) */
.darkgreen {
color: darkgreen;
font-weight: bold;
}
.darkorange {
color: darkorange;
font-weight: bold;
}
.darkred {
color: darkred;
font-weight: bold;
}
.heroheader {
font-size: 60px;
}
.stats-container {
text-align: center; /* Center the text horizontally */
font-size: 16px;
font-weight: normal;
}
.red_button {
background-color: #f44336; /* Green */
font-size: 16px;
border-radius: 4px;
border: none;
color: white;
padding: 12px 28px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
}
.fade {
-webkit-transition: opacity 1s ease-in-out;
-moz-transition: opacity 1s ease-in-out;
-o-transition: opacity 1s ease-in-out;
transition: opacity 1s ease-in-out;
}
.fade-out {
opacity: 0;
}
.fade-in {
opacity: 1;
}
| 8,705
|
Python
|
.py
| 428
| 16.350467
| 118
| 0.627719
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,312
|
conf.py
|
psychopy_psychopy/docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# PsychoPy documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 27 12:08:21 2009.
#
# This file is execfile() with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import pathlib
docs_folder = pathlib.Path(__file__).parent.parent
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_design', 'sphinx_copybutton']
autoclass_content = 'both'
autosummary_generate = True
# autosummary_imported_members = True
intersphinx_mapping = {
'pathlib': ('https://docs.python.org/3/', None),
'psychxr': ('http://psychxr.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'documentation'
# General information about the project.
project = u'PsychoPy'
copyright = u'2002-18, Jonathan Peirce; 2019-21 Open Science Tools Ltd.'
# use restructured text epilog to get around problem with not being able to use replace and superscript together
rst_epilog = """
.. role:: raw-html(raw)
:format: html
.. |PsychoPy| replace:: :raw-html:`PsychoPy<sup>®</sup>`
.. |Pavlovia| replace:: :raw-html:`<a href="https://pavlovia.org">Pavlovia<sup>®</sup></a>`
"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = (docs_folder.parent/'psychopy/VERSION').open('r').read().strip() # the full version e.g. 2022.1.0.rc2
version = '.'.join(release.split('.')[0:2]) # the major/mid version e.g 2022.1
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
numfig = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix =
# NumPy-style doc settings.
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme_path = ["../themes"]
html_theme = 'psychopy_bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s" %(project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "PsychoPy"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = 'psychopyLogo.gif'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
smart_quotes = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PsychoPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_engine = 'pdflatex' # xelatex or pdflatex
latex_elements = {
'extraclassoptions': 'openany,oneside'
}
# latex_elements = {
# 'babel': '\\usepackage{babel}',
# 'inputenc': '\\usepackage[utf8]{inputenc}'
# }
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('documentation', 'PsychoPyManual.tex', u'PsychoPy - Psychology software for Python',
u'Open Science Tools Ltd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'psychopyLogo.gif'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# path to mathjax. Use default to load from cdnjs content delivery network.
#mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/3.0.0/es5/latest?tex-mml-chtml.js'
# Enable numfig (ThP, 2021-02-07)
numfig = True
| 8,167
|
Python
|
.py
| 184
| 42.304348
| 112
| 0.721914
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,313
|
psychopy.svg
|
psychopy_psychopy/docs/source/_static/psychopy.svg
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="333.05197"
height="164.41785"
id="svg2"
version="1.1"
inkscape:version="0.47pre4 r22446"
sodipodi:docname="psychopy.svg.svg">
<defs
id="defs4">
<linearGradient
id="linearGradient3853"
inkscape:collect="always">
<stop
id="stop3855"
offset="0"
style="stop-color:#808080;stop-opacity:1" />
<stop
id="stop3857"
offset="1"
style="stop-color:#ffffff;stop-opacity:1" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3841">
<stop
style="stop-color:#666666;stop-opacity:1"
offset="0"
id="stop3843" />
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="1"
id="stop3845" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3831">
<stop
style="stop-color:#b3b3b3;stop-opacity:1"
offset="0"
id="stop3833" />
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="1"
id="stop3835" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3772">
<stop
style="stop-color:#222222;stop-opacity:1;"
offset="0"
id="stop3774" />
<stop
style="stop-color:#222222;stop-opacity:0;"
offset="1"
id="stop3776" />
</linearGradient>
<linearGradient
id="linearGradient3750">
<stop
id="stop3752"
offset="0"
style="stop-color:#000000;stop-opacity:1;" />
<stop
style="stop-color:#141414;stop-opacity:1;"
offset="0.12876198"
id="stop3762" />
<stop
id="stop3764"
offset="0.22307248"
style="stop-color:#262626;stop-opacity:1;" />
<stop
style="stop-color:#393939;stop-opacity:1;"
offset="0.31924155"
id="stop3766" />
<stop
id="stop3768"
offset="0.42719939"
style="stop-color:#4f4f4f;stop-opacity:1;" />
<stop
style="stop-color:#bbbbbb;stop-opacity:1;"
offset="0.64108545"
id="stop3760" />
<stop
style="stop-color:#d0d0d0;stop-opacity:1;"
offset="0.72748369"
id="stop3758" />
<stop
style="stop-color:#e5e5e5;stop-opacity:1;"
offset="0.81589973"
id="stop3756" />
<stop
id="stop3754"
offset="1"
style="stop-color:#f7f7f7;stop-opacity:1;" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3714">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop3716" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop3718" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient3643"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient3720"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient3778"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient3829"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient3847"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient3851"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient2903"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient2905"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient2907"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient2909"
gradientUnits="userSpaceOnUse"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient2911"
gradientUnits="userSpaceOnUse"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient2913"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.5749948"
inkscape:cx="182.52396"
inkscape:cy="208.1425"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:window-width="1857"
inkscape:window-height="1179"
inkscape:window-x="61"
inkscape:window-y="19"
inkscape:window-maximized="0" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-326.81408,-525.12119)">
<g
id="g2884">
<g
id="g3859">
<path
transform="matrix(1.0077834,0,0,1.0077834,-53.148013,-148.49809)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3794"
style="fill:#999999;fill-opacity:1;stroke:none"
sodipodi:type="arc" />
<path
transform="matrix(-1.0038917,0,0,-1.0038917,869.4093,1360.2396)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3849"
style="fill:url(#linearGradient2903);fill-opacity:1;stroke:none"
sodipodi:type="arc" />
<path
sodipodi:type="arc"
style="fill:url(#linearGradient2905);fill-opacity:1;stroke:none"
id="path3837"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(0.93384114,0,0,0.93384114,-19.237984,-93.042077)" />
<path
transform="matrix(0.89674453,0,0,0.89674453,-2.22543,-65.219961)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3627"
style="fill:url(#linearGradient2907);fill-opacity:1;stroke:#616161;stroke-width:1.11514485;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:0.65098039;stroke-dasharray:none"
sodipodi:type="arc" />
<path
style="fill:url(#linearGradient2909);fill-opacity:1;stroke:none"
d="m 409.03125,534.1875 c -40.4002,0 -73.15625,32.75605 -73.15625,73.15625 0,6.44798 0.83978,12.70002 2.40625,18.65625 19.41506,-25.1435 49.8382,-41.34375 84.0625,-41.34375 22.13372,0 42.7086,6.75694 59.71875,18.34375 -2.23978,-38.38544 -34.08407,-68.8125 -73.03125,-68.8125 z"
id="path3692" />
<path
sodipodi:type="arc"
style="fill:url(#linearGradient2911);fill-opacity:1;stroke:none"
id="path3770"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(0.89674453,0,0,0.89674453,-2.22543,-65.219961)" />
<path
style="fill:url(#linearGradient2913);fill-opacity:1;stroke:none"
d="m 409.03144,526.70387 c -44.52607,0 -80.64007,36.114 -80.64007,80.64007 0,44.52607 36.114,80.60844 80.64007,80.60844 44.52607,0 80.60844,-36.08237 80.60844,-80.60844 0,-44.52607 -36.08237,-80.64007 -80.60844,-80.64007 z m 0,5.62899 c 41.41541,0 74.97945,33.59566 74.97945,75.01108 0,41.41541 -33.56404,74.97945 -74.97945,74.97945 -41.41542,0 -75.01108,-33.56404 -75.01108,-74.97945 0,-41.41542 33.59566,-75.01108 75.01108,-75.01108 z"
id="path2853" />
</g>
<g
id="text2863"
style="font-size:55.88455963px;font-style:italic;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Monotype Corsiva;-inkscape-font-specification:Monotype Corsiva Bold Italic">
<path
id="path2868"
d="m 484.8175,561.61858 0.32745,-1.22793 c 2.63776,-0.25466 4.89352,-0.61849 6.76727,-1.0915 -0.45481,1.27344 -0.94598,2.93797 -1.47352,4.99359 l -2.01926,7.66776 2.51044,0.16372 c 1.16423,0.0546 2.42855,-0.23647 3.79294,-0.87319 1.38253,-0.63669 2.61047,-1.75547 3.6838,-3.35635 1.09146,-1.60084 1.63721,-3.32904 1.63724,-5.1846 -3e-5,-1.83733 -0.66402,-3.29265 -1.99198,-4.36599 -1.30982,-1.07327 -3.31999,-1.60992 -6.03051,-1.60995 -3.09259,3e-5 -6.12149,0.77317 -9.0867,2.31943 -2.96524,1.52812 -5.25738,3.49281 -6.87642,5.89407 -1.60086,2.38312 -2.40129,4.73893 -2.40129,7.06743 0,1.14609 0.2092,2.14662 0.62761,3.00161 0.4184,0.83683 1.16426,1.66455 2.23757,2.48316 0.89138,0.6549 1.33707,1.29161 1.33708,1.91011 -10e-6,0.40023 -0.14554,0.73677 -0.4366,1.00964 -0.29107,0.27288 -0.63671,0.40931 -1.03692,0.40931 -1.07331,0 -2.11023,-0.73675 -3.11076,-2.21028 -1.00054,-1.47351 -1.50081,-3.33814 -1.50081,-5.59391 0,-4.12948 1.58267,-7.89513 4.74801,-11.29698 4.14767,-4.4751 9.71429,-6.71266 16.69988,-6.7127 2.65594,4e-5 4.82983,0.34568 6.52168,1.03692 1.70998,0.69132 2.98339,1.63728 3.82023,2.83789 0.85497,1.20067 1.28247,2.50137 1.28251,3.9021 -4e-5,1.7828 -0.60036,3.53828 -1.80097,5.26646 -1.18248,1.71003 -2.93797,3.09259 -5.26646,4.14768 -2.31036,1.05513 -4.88447,1.58269 -7.72233,1.58267 -0.43662,2e-5 -1.1188,-0.0273 -2.04655,-0.0819 l -2.15571,8.48638 c -0.36385,1.43714 -0.77316,2.85608 -1.22793,4.25683 1.41893,0.34564 2.65595,0.51846 3.71108,0.51846 1.32797,0 2.75601,-0.25468 4.28412,-0.76405 l -0.32745,1.52809 c -1.41896,0.38203 -2.61961,0.57304 -3.60193,0.57304 -0.94598,0 -2.32854,-0.10005 -4.14768,-0.30016 -1.45534,-0.18192 -2.6378,-0.27288 -3.54736,-0.27288 -1.7646,0 -3.57466,0.20921 -5.43019,0.62761 l 0.32745,-1.44623 c 1.89191,-0.30925 3.18351,-0.75495 3.87481,-1.33708 0.98233,-0.81862 1.78276,-2.46495 2.40129,-4.93901 l 4.12039,-16.34515 c 0.1819,-0.76402 0.27286,-1.30067 0.27287,-1.60995 -1e-5,-0.29104 -0.11826,-0.54572 -0.35473,-0.76405 -0.23651,-0.23646 -0.51848,-0.35471 -0.84591,-0.35473 -0.12736,2e-5 -0.30927,0.0182 -0.54575,0.0546" />
<path
id="path2870"
d="m 514.6699,564.75663 c 1.61903,2e-5 2.86515,0.31837 3.73837,0.95506 0.89137,0.61853 1.33706,1.3371 1.33708,2.1557 -2e-5,0.49119 -0.19103,0.91869 -0.57303,1.28251 -0.36386,0.36385 -0.80955,0.54576 -1.33708,0.54575 -0.34566,10e-6 -0.64582,-0.0728 -0.90049,-0.2183 -0.2547,-0.16371 -0.61853,-0.55483 -1.09149,-1.17336 -0.89141,-1.18243 -1.7737,-1.77366 -2.64688,-1.77368 -0.74587,2e-5 -1.40986,0.29109 -1.99198,0.87319 -0.56395,0.56396 -0.84592,1.27344 -0.84591,2.12842 -1e-5,0.74587 0.18191,1.47354 0.54575,2.18299 0.36382,0.6913 1.28249,1.81917 2.75603,3.38364 2.01924,2.14661 3.02888,4.18407 3.0289,6.11237 -2e-5,1.92831 -0.81864,3.62013 -2.45587,5.07545 -1.63726,1.43714 -3.74748,2.15571 -6.33067,2.15571 -2.16481,0 -3.77476,-0.4366 -4.82987,-1.3098 -1.05511,-0.89138 -1.58267,-1.98288 -1.58267,-3.27448 0,-0.6367 0.20011,-1.16426 0.60033,-1.58267 0.40021,-0.4184 0.89138,-0.62761 1.47351,-0.62761 0.50936,0 0.94596,0.17282 1.3098,0.51846 0.36382,0.32745 0.54574,0.71857 0.54575,1.17336 -1e-5,0.18192 -0.0182,0.41841 -0.0546,0.70947 -0.0182,0.23649 -0.0273,0.4366 -0.0273,0.60032 0,0.60033 0.25468,1.10969 0.76405,1.52809 0.49117,0.41841 1.16425,0.62761 2.01927,0.62761 0.98233,0 1.84643,-0.16372 2.5923,-0.49117 0.74584,-0.34564 1.32797,-0.855 1.74639,-1.52809 0.41839,-0.69128 0.6276,-1.37346 0.62761,-2.04656 -1e-5,-0.65489 -0.20012,-1.30978 -0.60032,-1.96469 -0.38204,-0.65489 -1.25523,-1.73729 -2.61959,-3.2472 -1.89194,-2.11021 -2.8379,-4.13857 -2.83789,-6.08508 -1e-5,-1.78276 0.71856,-3.33814 2.15571,-4.66614 1.43712,-1.34616 3.26537,-2.01925 5.48476,-2.01927" />
<path
id="path2872"
d="m 520.5094,570.32325 -0.8732,-1.2825 2.01927,-1.36437 c 1.96468,-1.34616 3.34724,-2.21026 4.14768,-2.5923 0.45478,-0.21828 0.87318,-0.32743 1.25522,-0.32745 0.38201,2e-5 0.74584,0.21832 1.09149,0.6549 1.60085,2.01928 2.94703,4.74802 4.03854,8.18621 1.09147,3.42003 1.63722,6.73999 1.63724,9.9599 -2e-5,0.41841 -0.0182,1.05511 -0.0546,1.91011 3.07436,-5.65757 4.61155,-10.15089 4.61157,-13.47996 -2e-5,-0.69127 -0.15465,-1.60994 -0.46388,-2.75603 -0.32747,-1.27339 -0.4912,-2.18297 -0.49118,-2.72874 -2e-5,-0.52753 0.16371,-0.96413 0.49118,-1.30979 0.34561,-0.34562 0.77312,-0.51844 1.2825,-0.51846 0.63669,2e-5 1.14605,0.24561 1.5281,0.73676 0.40019,0.49119 0.60029,1.19157 0.60032,2.10112 -3e-5,2.34674 -0.53668,5.27558 -1.60996,8.78654 -1.07332,3.49279 -2.76514,7.25845 -5.07545,11.29698 -2.29216,4.02034 -4.39328,7.00375 -6.30338,8.95026 -1.89194,1.96468 -3.72929,3.36543 -5.51206,4.20226 -1.76459,0.85499 -3.43821,1.28249 -5.02087,1.2825 -1.20065,-1e-5 -2.10113,-0.22741 -2.70146,-0.68218 -0.61851,-0.43661 -0.92776,-0.96417 -0.92777,-1.58267 10e-6,-0.54576 0.2365,-1.01874 0.70948,-1.41894 0.47298,-0.38204 1.10968,-0.57305 1.91011,-0.57304 0.61851,-10e-6 1.46442,0.11824 2.53773,0.35474 1.14606,0.25467 1.9283,0.38201 2.34671,0.38202 1.10968,-10e-6 2.28304,-0.46389 3.52008,-1.39166 1.6918,-1.27341 3.02888,-2.79241 4.01124,-4.55699 1.00052,-1.76459 1.50079,-3.54736 1.50081,-5.34833 -2e-5,-2.52862 -0.37294,-5.56662 -1.11879,-9.11398 -0.72767,-3.54735 -1.76459,-6.25789 -3.11076,-8.13164 -0.6731,-0.96414 -1.39166,-1.44621 -2.1557,-1.44623 -0.8914,2e-5 -2.16481,0.60034 -3.82023,1.80096" />
<path
id="path2874"
d="m 557.94769,581.81124 0.57303,1.58267 c -4.32961,3.40183 -7.98612,5.10274 -10.96952,5.10274 -1.58268,0 -2.73785,-0.56394 -3.4655,-1.69182 -0.70948,-1.12787 -1.06422,-2.55591 -1.06421,-4.28412 -10e-6,-2.87426 0.76404,-5.83039 2.29214,-8.86839 1.52808,-3.03798 3.38362,-5.3665 5.56663,-6.98557 1.70999,-1.2552 3.6383,-1.88281 5.78492,-1.88283 1.34616,2e-5 2.36489,0.2547 3.05619,0.76404 0.69126,0.50939 1.0369,1.1279 1.03692,1.85554 -2e-5,0.56396 -0.18194,1.02785 -0.54575,1.39166 -0.36385,0.34566 -0.87322,0.51848 -1.52809,0.51846 -0.63673,2e-5 -1.45535,-0.29105 -2.45587,-0.87319 -1.03693,-0.6185 -1.90103,-0.92776 -2.5923,-0.92778 -0.8914,2e-5 -1.78279,0.40933 -2.67416,1.22794 -0.87321,0.80044 -1.71002,2.36492 -2.51044,4.69343 -0.80044,2.32853 -1.20066,4.66615 -1.20065,7.01285 -10e-6,1.72821 0.33654,3.06529 1.00964,4.01125 0.49116,0.72767 1.18244,1.0915 2.07384,1.09149 2.1284,10e-6 4.66612,-1.24612 7.61318,-3.73837" />
<path
id="path2876"
d="m 583.8707,581.94768 0.70947,1.00963 c -1.67365,1.85555 -3.39275,3.32907 -5.15731,4.42056 -1.20067,0.74585 -2.19211,1.11878 -2.97433,1.11878 -0.63672,0 -1.15518,-0.25468 -1.55538,-0.76405 -0.38204,-0.50936 -0.57305,-1.30069 -0.57303,-2.374 -2e-5,-0.72766 0.14551,-1.78277 0.4366,-3.16533 0.30924,-1.40075 1.16424,-4.39326 2.56501,-8.97755 0.61849,-2.25574 0.92775,-3.57463 0.92777,-3.95667 -2e-5,-0.6185 -0.30928,-0.92775 -0.92777,-0.92777 -0.76407,2e-5 -1.71912,0.38204 -2.86517,1.14607 -1.60088,1.05513 -3.01982,2.4013 -4.25684,4.03853 -0.78225,1.05512 -1.40986,2.6196 -1.88283,4.69343 l -2.18299,9.49601 -4.06582,0 c 0.87319,-3.18353 1.54628,-5.78492 2.01927,-7.80419 l 4.01125,-17.5185 c 0.72765,-3.21989 1.6827,-5.69394 2.86517,-7.42217 1.20063,-1.74636 2.75601,-3.13801 4.66614,-4.17497 1.92829,-1.03688 3.82931,-1.55534 5.70307,-1.55538 0.78221,4e-5 1.40072,0.19105 1.85554,0.57303 0.45476,0.38206 0.68216,0.86414 0.68218,1.44623 -2e-5,0.49121 -0.22742,0.92781 -0.68218,1.3098 -0.43663,0.36387 -1.03695,0.54578 -1.80097,0.54575 -0.43662,3e-5 -1.00056,-0.0454 -1.69182,-0.13644 -1.56449,-0.21826 -2.64689,-0.32741 -3.2472,-0.32745 -0.83683,4e-5 -1.51901,0.33658 -2.04655,1.00963 -0.52757,0.67313 -1.07332,2.25579 -1.63724,4.74801 l -3.05619,13.47997 c 2.16479,-2.27393 4.52059,-4.19315 7.06743,-5.75764 1.49169,-0.92775 2.69234,-1.39164 3.60194,-1.39166 0.70945,2e-5 1.26429,0.21832 1.66453,0.6549 0.40019,0.41843 0.6003,1.00965 0.60032,1.77368 -2e-5,0.61853 -0.0728,1.23705 -0.2183,1.85554 -0.23651,1.07332 -0.79136,3.01982 -1.66453,5.8395 -1.18247,3.89301 -1.88285,6.31249 -2.10113,7.25844 -0.20013,0.94597 -0.30018,1.79188 -0.30016,2.53773 -2e-5,0.29107 0.0455,0.50937 0.13644,0.6549 0.10913,0.14553 0.26375,0.2183 0.46388,0.2183 0.43658,0 1.2552,-0.51846 2.45587,-1.55538 0.67306,-0.56394 1.49168,-1.23703 2.45586,-2.01927" />
<path
id="path2878"
d="m 600.05212,564.78392 c 1.78276,2e-5 3.26537,0.70949 4.44785,2.12841 1.18243,1.41897 1.77365,3.39275 1.77368,5.92136 -3e-5,4.03855 -1.41897,7.66777 -4.25684,10.88767 -2.8379,3.20172 -5.83951,4.80258 -9.00483,4.80258 -1.94651,0 -3.52008,-0.63671 -4.72072,-1.91012 -1.18246,-1.27341 -1.77368,-3.0107 -1.77368,-5.21189 0,-3.82022 1.09149,-7.34939 3.27449,-10.5875 2.74692,-4.02032 6.16693,-6.03049 10.26005,-6.03051 m -2.31942,2.04655 c -1.5463,2e-5 -3.04711,1.10971 -4.50242,3.32906 -1.43715,2.2012 -2.15571,5.24829 -2.15571,9.14127 0,2.36492 0.44569,4.22955 1.33709,5.59392 0.65488,1.00054 1.52808,1.5008 2.61958,1.5008 1.52808,0 2.91974,-0.94596 4.17497,-2.83788 1.60085,-2.45586 2.40127,-5.50295 2.40129,-9.14128 -2e-5,-2.72872 -0.37294,-4.67522 -1.11878,-5.8395 -0.72768,-1.16424 -1.64635,-1.74637 -2.75602,-1.74639" />
<path
id="path2880"
d="m 621.25442,561.61858 0.32745,-1.22793 c 2.63776,-0.25466 4.89352,-0.61849 6.76727,-1.0915 -0.45481,1.27344 -0.94598,2.93797 -1.47352,4.99359 l -2.01926,7.66776 2.51044,0.16372 c 1.16423,0.0546 2.42855,-0.23647 3.79294,-0.87319 1.38253,-0.63669 2.61047,-1.75547 3.6838,-3.35635 1.09146,-1.60084 1.63721,-3.32904 1.63724,-5.1846 -3e-5,-1.83733 -0.66402,-3.29265 -1.99198,-4.36599 -1.30982,-1.07327 -3.31999,-1.60992 -6.03051,-1.60995 -3.09259,3e-5 -6.12149,0.77317 -9.0867,2.31943 -2.96524,1.52812 -5.25738,3.49281 -6.87642,5.89407 -1.60086,2.38312 -2.40129,4.73893 -2.40129,7.06743 0,1.14609 0.2092,2.14662 0.62761,3.00161 0.4184,0.83683 1.16426,1.66455 2.23757,2.48316 0.89138,0.6549 1.33707,1.29161 1.33708,1.91011 -1e-5,0.40023 -0.14554,0.73677 -0.4366,1.00964 -0.29107,0.27288 -0.63671,0.40931 -1.03692,0.40931 -1.07331,0 -2.11023,-0.73675 -3.11076,-2.21028 -1.00054,-1.47351 -1.50081,-3.33814 -1.50081,-5.59391 0,-4.12948 1.58267,-7.89513 4.74801,-11.29698 4.14767,-4.4751 9.71429,-6.71266 16.69988,-6.7127 2.65594,4e-5 4.82983,0.34568 6.52168,1.03692 1.70998,0.69132 2.98339,1.63728 3.82023,2.83789 0.85497,1.20067 1.28247,2.50137 1.28251,3.9021 -4e-5,1.7828 -0.60036,3.53828 -1.80097,5.26646 -1.18248,1.71003 -2.93797,3.09259 -5.26646,4.14768 -2.31036,1.05513 -4.88447,1.58269 -7.72233,1.58267 -0.43662,2e-5 -1.1188,-0.0273 -2.04655,-0.0819 l -2.15571,8.48638 c -0.36385,1.43714 -0.77316,2.85608 -1.22793,4.25683 1.41893,0.34564 2.65595,0.51846 3.71108,0.51846 1.32797,0 2.75601,-0.25468 4.28412,-0.76405 l -0.32745,1.52809 c -1.41896,0.38203 -2.61961,0.57304 -3.60193,0.57304 -0.94598,0 -2.32854,-0.10005 -4.14768,-0.30016 -1.45534,-0.18192 -2.6378,-0.27288 -3.54736,-0.27288 -1.7646,0 -3.57466,0.20921 -5.43019,0.62761 l 0.32745,-1.44623 c 1.89191,-0.30925 3.18351,-0.75495 3.87481,-1.33708 0.98233,-0.81862 1.78276,-2.46495 2.40129,-4.93901 l 4.12039,-16.34515 c 0.1819,-0.76402 0.27286,-1.30067 0.27287,-1.60995 -10e-6,-0.29104 -0.11826,-0.54572 -0.35473,-0.76405 -0.23651,-0.23646 -0.51848,-0.35471 -0.84591,-0.35473 -0.12736,2e-5 -0.30927,0.0182 -0.54575,0.0546" />
<path
id="path2882"
d="m 639.0458,570.32325 -0.8732,-1.2825 2.01927,-1.36437 c 1.96468,-1.34616 3.34724,-2.21026 4.14768,-2.5923 0.45478,-0.21828 0.87318,-0.32743 1.25522,-0.32745 0.38201,2e-5 0.74584,0.21832 1.09149,0.6549 1.60085,2.01928 2.94703,4.74802 4.03854,8.18621 1.09147,3.42003 1.63722,6.73999 1.63724,9.9599 -2e-5,0.41841 -0.0182,1.05511 -0.0546,1.91011 3.07436,-5.65757 4.61155,-10.15089 4.61157,-13.47996 -2e-5,-0.69127 -0.15465,-1.60994 -0.46388,-2.75603 -0.32747,-1.27339 -0.4912,-2.18297 -0.49118,-2.72874 -2e-5,-0.52753 0.16371,-0.96413 0.49118,-1.30979 0.34561,-0.34562 0.77312,-0.51844 1.2825,-0.51846 0.63669,2e-5 1.14605,0.24561 1.5281,0.73676 0.40019,0.49119 0.60029,1.19157 0.60032,2.10112 -3e-5,2.34674 -0.53668,5.27558 -1.60996,8.78654 -1.07332,3.49279 -2.76514,7.25845 -5.07545,11.29698 -2.29216,4.02034 -4.39328,7.00375 -6.30338,8.95026 -1.89194,1.96468 -3.72929,3.36543 -5.51206,4.20226 -1.76459,0.85499 -3.43821,1.28249 -5.02087,1.2825 -1.20065,-1e-5 -2.10113,-0.22741 -2.70146,-0.68218 -0.61851,-0.43661 -0.92776,-0.96417 -0.92777,-1.58267 10e-6,-0.54576 0.2365,-1.01874 0.70948,-1.41894 0.47298,-0.38204 1.10968,-0.57305 1.91011,-0.57304 0.61851,-10e-6 1.46442,0.11824 2.53773,0.35474 1.14606,0.25467 1.9283,0.38201 2.34671,0.38202 1.10968,-10e-6 2.28304,-0.46389 3.52008,-1.39166 1.6918,-1.27341 3.02888,-2.79241 4.01124,-4.55699 1.00052,-1.76459 1.50079,-3.54736 1.50081,-5.34833 -2e-5,-2.52862 -0.37294,-5.56662 -1.11879,-9.11398 -0.72767,-3.54735 -1.76459,-6.25789 -3.11076,-8.13164 -0.6731,-0.96414 -1.39166,-1.44621 -2.1557,-1.44623 -0.8914,2e-5 -2.16481,0.60034 -3.82023,1.80096" />
</g>
</g>
</g>
</svg>
| 25,134
|
Python
|
.py
| 362
| 61.046961
| 2,092
| 0.647854
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,314
|
psychopyIcon.svg
|
psychopy_psychopy/docs/source/_static/psychopyIcon.svg
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="164.41785"
height="164.41785"
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="psychopy.svg">
<defs
id="defs4">
<linearGradient
id="linearGradient3853"
inkscape:collect="always">
<stop
id="stop3855"
offset="0"
style="stop-color:#808080;stop-opacity:1" />
<stop
id="stop3857"
offset="1"
style="stop-color:#ffffff;stop-opacity:1" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3841">
<stop
style="stop-color:#666666;stop-opacity:1"
offset="0"
id="stop3843" />
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="1"
id="stop3845" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3831">
<stop
style="stop-color:#b3b3b3;stop-opacity:1"
offset="0"
id="stop3833" />
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="1"
id="stop3835" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3772">
<stop
style="stop-color:#222222;stop-opacity:1;"
offset="0"
id="stop3774" />
<stop
style="stop-color:#222222;stop-opacity:0;"
offset="1"
id="stop3776" />
</linearGradient>
<linearGradient
id="linearGradient3750">
<stop
id="stop3752"
offset="0"
style="stop-color:#000000;stop-opacity:1;" />
<stop
style="stop-color:#141414;stop-opacity:1;"
offset="0.12876198"
id="stop3762" />
<stop
id="stop3764"
offset="0.22307248"
style="stop-color:#262626;stop-opacity:1;" />
<stop
style="stop-color:#393939;stop-opacity:1;"
offset="0.31924155"
id="stop3766" />
<stop
id="stop3768"
offset="0.42719939"
style="stop-color:#4f4f4f;stop-opacity:1;" />
<stop
style="stop-color:#bbbbbb;stop-opacity:1;"
offset="0.64108545"
id="stop3760" />
<stop
style="stop-color:#d0d0d0;stop-opacity:1;"
offset="0.72748369"
id="stop3758" />
<stop
style="stop-color:#e5e5e5;stop-opacity:1;"
offset="0.81589973"
id="stop3756" />
<stop
id="stop3754"
offset="1"
style="stop-color:#f7f7f7;stop-opacity:1;" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3714">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop3716" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop3718" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient3643"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient3720"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient3778"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient3829"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient3847"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient3851"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient2903"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient2905"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient2907"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient2909"
gradientUnits="userSpaceOnUse"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient2911"
gradientUnits="userSpaceOnUse"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient2913"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient3054"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient3056"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient3058"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient3060"
gradientUnits="userSpaceOnUse"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient3062"
gradientUnits="userSpaceOnUse"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient3064"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.5749948"
inkscape:cx="182.52396"
inkscape:cy="208.14249"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:window-width="1615"
inkscape:window-height="1026"
inkscape:window-x="65"
inkscape:window-y="24"
inkscape:window-maximized="1"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-326.81408,-525.12119)">
<g
id="g3859"
inkscape:export-xdpi="328.43149"
inkscape:export-ydpi="328.43149"
inkscape:export-filename="/home/lpzjwp/Dropbox/resources/logos/psychopy/main icon/psychopy600dpi.png">
<path
sodipodi:type="arc"
style="fill:#999999;fill-opacity:1;stroke:none"
id="path3794"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(1.0077834,0,0,1.0077834,-53.148013,-148.49809)" />
<path
sodipodi:type="arc"
style="fill:url(#linearGradient3054);fill-opacity:1;stroke:none"
id="path3849"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(-1.0038917,0,0,-1.0038917,869.4093,1360.2396)" />
<path
transform="matrix(0.93384114,0,0,0.93384114,-19.237984,-93.042077)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3837"
style="fill:url(#linearGradient3056);fill-opacity:1;stroke:none"
sodipodi:type="arc" />
<path
sodipodi:type="arc"
style="fill:url(#linearGradient3058);fill-opacity:1;stroke:#616161;stroke-width:1.11514485;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:0.65098039;stroke-dasharray:none"
id="path3627"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(0.89674453,0,0,0.89674453,-2.22543,-65.219961)" />
<path
inkscape:connector-curvature="0"
id="path3692"
d="m 409.03125,534.1875 c -40.4002,0 -73.15625,32.75605 -73.15625,73.15625 0,6.44798 0.83978,12.70002 2.40625,18.65625 19.41506,-25.1435 49.8382,-41.34375 84.0625,-41.34375 22.13372,0 42.7086,6.75694 59.71875,18.34375 -2.23978,-38.38544 -34.08407,-68.8125 -73.03125,-68.8125 z"
style="fill:url(#linearGradient3060);fill-opacity:1;stroke:none" />
<path
transform="matrix(0.89674453,0,0,0.89674453,-2.22543,-65.219961)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3770"
style="fill:url(#linearGradient3062);fill-opacity:1;stroke:none"
sodipodi:type="arc" />
<path
inkscape:connector-curvature="0"
id="path2853"
d="m 409.03144,526.70387 c -44.52607,0 -80.64007,36.114 -80.64007,80.64007 0,44.52607 36.114,80.60844 80.64007,80.60844 44.52607,0 80.60844,-36.08237 80.60844,-80.60844 0,-44.52607 -36.08237,-80.64007 -80.60844,-80.64007 z m 0,5.62899 c 41.41541,0 74.97945,33.59566 74.97945,75.01108 0,41.41541 -33.56404,74.97945 -74.97945,74.97945 -41.41542,0 -75.01108,-33.56404 -75.01108,-74.97945 0,-41.41542 33.59566,-75.01108 75.01108,-75.01108 z"
style="fill:url(#linearGradient3064);fill-opacity:1;stroke:none" />
</g>
</g>
</svg>
| 13,716
|
Python
|
.py
| 397
| 26.732997
| 446
| 0.615333
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,315
|
tutorial1.py
|
psychopy_psychopy/docs/source/coder/tutorial1.py
|
from psychopy import visual, core, event #import some libraries from PsychoPy
from psychopy.hardware import keyboard
#create a window
mywin = visual.Window([800,600],monitor="testMonitor", units="deg")
#create some stimuli
grating = visual.GratingStim(win=mywin, mask='circle', size=3, pos=[-4,0], sf=3)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0,0], sf=0, rgb=-1)
#create a keyboard component
kb = keyboard.Keyboard()
#draw the stimuli and update the window
while True: #this creates a never-ending loop
grating.setPhase(0.05, '+')#advance phase by 0.05 of a cycle
grating.draw()
fixation.draw()
mywin.flip()
if len(kb.getKeys()) > 0:
break
event.clearEvents()
#cleanup
mywin.close()
core.quit()
| 750
|
Python
|
.py
| 21
| 32.904762
| 80
| 0.731674
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,316
|
tutorial3.py
|
psychopy_psychopy/docs/source/coder/tutorial3.py
|
#This analysis script takes one or more staircase datafiles as input
#from a GUI. It then plots the staircases on top of each other on
#the left and a combined psychometric function from the same data
#on the right
from psychopy import data, gui, core
from psychopy.tools.filetools import fromFile
import pylab
#Open a dialog box to select files from
files = gui.fileOpenDlg('.')
if not files:
core.quit()
#get the data from all the files
allIntensities, allResponses = [],[]
for thisFileName in files:
thisDat = fromFile(thisFileName)
allIntensities.append( thisDat.intensities )
allResponses.append( thisDat.data )
#plot each staircase
pylab.subplot(121)
colors = 'brgkcmbrgkcm'
lines, names = [],[]
for fileN, thisStair in enumerate(allIntensities):
#lines.extend(pylab.plot(thisStair))
#names = files[fileN]
pylab.plot(thisStair, label=files[fileN])
#pylab.legend()
#get combined data
combinedInten, combinedResp, combinedN = \
data.functionFromStaircase(allIntensities, allResponses, 5)
#fit curve - in this case using a Weibull function
fit = data.FitWeibull(combinedInten, combinedResp, guess=[0.2, 0.5])
smoothInt = pylab.arange(min(combinedInten), max(combinedInten), 0.001)
smoothResp = fit.eval(smoothInt)
thresh = fit.inverse(0.8)
print(thresh)
#plot curve
pylab.subplot(122)
pylab.plot(smoothInt, smoothResp, '-')
pylab.plot([thresh, thresh],[0,0.8],'--'); pylab.plot([0, thresh],\
[0.8,0.8],'--')
pylab.title('threshold = %0.3f' %(thresh))
#plot points
pylab.plot(combinedInten, combinedResp, 'o')
pylab.ylim([0,1])
pylab.show()
| 1,591
|
Python
|
.py
| 45
| 33.288889
| 72
| 0.758285
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,317
|
tutorial2.py
|
psychopy_psychopy/docs/source/coder/tutorial2.py
|
"""measure your JND in orientation using a staircase method"""
from psychopy import core, visual, gui, data, event
from psychopy.tools.filetools import fromFile, toFile
import numpy, random
try: # try to get a previous parameters file
expInfo = fromFile('lastParams.pickle')
except: # if not there then use a default set
expInfo = {'observer':'jwp', 'refOrientation':0}
expInfo['dateStr'] = data.getDateStr() # add the current time
# present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title='simple JND Exp', fixed=['dateStr'])
if dlg.OK:
toFile('lastParams.pickle', expInfo) # save params to file for next time
else:
core.quit() # the user hit cancel so exit
# make a text file to save data
fileName = expInfo['observer'] + expInfo['dateStr']
dataFile = open(fileName+'.csv', 'w') # a simple text file with 'comma-separated-values'
dataFile.write('targetSide,oriIncrement,correct\n')
# create the staircase handler
staircase = data.StairHandler(startVal = 20.0,
stepType = 'db', stepSizes=[8,4,4,2],
nUp=1, nDown=3, # will home in on the 80% threshold
nTrials=1)
# create window and stimuli
win = visual.Window([800,600],allowGUI=True,
monitor='testMonitor', units='deg')
foil = visual.GratingStim(win, sf=1, size=4, mask='gauss',
ori=expInfo['refOrientation'])
target = visual.GratingStim(win, sf=1, size=4, mask='gauss',
ori=expInfo['refOrientation'])
fixation = visual.GratingStim(win, color=-1, colorSpace='rgb',
tex=None, mask='circle', size=0.2)
# and some handy clocks to keep track of time
globalClock = core.Clock()
trialClock = core.Clock()
# display instructions and wait
message1 = visual.TextStim(win, pos=[0,+3],text='Hit a key when ready.')
message2 = visual.TextStim(win, pos=[0,-3],
text="Then press left or right to identify the %.1f deg probe." %expInfo['refOrientation'])
message1.draw()
message2.draw()
fixation.draw()
win.flip()#to show our newly drawn 'stimuli'
#pause until there's a keypress
event.waitKeys()
for thisIncrement in staircase: # will continue the staircase until it terminates!
# set location of stimuli
targetSide= random.choice([-1,1]) # will be either +1(right) or -1(left)
foil.setPos([-5*targetSide, 0])
target.setPos([5*targetSide, 0]) # in other location
# set orientation of probe
foil.setOri(expInfo['refOrientation'] + thisIncrement)
# draw all stimuli
foil.draw()
target.draw()
fixation.draw()
win.flip()
# wait 500ms; but use a loop of x frames for more accurate timing
core.wait(0.5)
# blank screen
fixation.draw()
win.flip()
# get response
thisResp=None
while thisResp==None:
allKeys=event.waitKeys()
for thisKey in allKeys:
if thisKey=='left':
if targetSide==-1: thisResp = 1 # correct
else: thisResp = -1 # incorrect
elif thisKey=='right':
if targetSide== 1: thisResp = 1 # correct
else: thisResp = -1 # incorrect
elif thisKey in ['q', 'escape']:
core.quit() # abort experiment
event.clearEvents() # clear other (eg mouse) events - they clog the buffer
# add the data to the staircase so it can calculate the next level
staircase.addData(thisResp)
dataFile.write('%i,%.3f,%i\n' %(targetSide, thisIncrement, thisResp))
core.wait(1)
# staircase has ended
dataFile.close()
staircase.saveAsPickle(fileName) # special python binary file to save all the info
# give some output to user in the command line in the output window
print('reversals:')
print(staircase.reversalIntensities)
approxThreshold = numpy.average(staircase.reversalIntensities[-6:])
print('mean of final 6 reversals = %.3f' % (approxThreshold))
# give some on-screen feedback
feedback1 = visual.TextStim(
win, pos=[0,+3],
text='mean of final 6 reversals = %.3f' % (approxThreshold))
feedback1.draw()
fixation.draw()
win.flip()
event.waitKeys() # wait for participant to respond
win.close()
core.quit()
| 4,246
|
Python
|
.py
| 99
| 37.171717
| 95
| 0.67046
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,318
|
compileTestimonials.py
|
psychopy_psychopy/docs/source/about/compileTestimonials.py
|
import csv
from psychopy import gui
filename = gui.fileOpenDlg('.', allowed='*.csv')[0]
# use csv from python (not from numpy) due to handling newlines within quote char
with open(filename, 'rU') as csvFile:
spamreader = csv.reader(csvFile, delimiter=',', quotechar='"', dialect=csv.excel)
headers = next(spamreader)
print('headers:', type(headers), headers)
entries=[]
for thisRow in spamreader:
print(thisRow)
thisEntry = {}
for fieldN, thisFieldName in enumerate(headers):
thisEntry[thisFieldName] = thisRow[fieldN]
entries.append(thisEntry)
companHead = "Your Company or Institution"
nameHead = 'Your name (or anon, but a name is nicer)'
testimHead = 'Your thoughts on PsychoPy'
posnHead = 'Your position'
with open('testimonialsText.html', 'wb') as outFile:
for thisEntry in entries:
outFile.write(' <hr>%s <p>\n' %(thisEntry[testimHead].replace('\n', '<br>')))
nameStr = ' - <em>%s' %thisEntry[nameHead]
if thisEntry[posnHead]:
nameStr += ', %s' %thisEntry[posnHead]
if thisEntry[companHead]:
nameStr += ', %s' %thisEntry[companHead]
nameStr += ' </em><br>\n'
outFile.write(nameStr)
| 1,248
|
Python
|
.py
| 29
| 36.758621
| 88
| 0.655058
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,319
|
plaid.py
|
psychopy_psychopy/docs/source/demos/plaid.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import visual, core, event
#create a window to draw in
myWin = visual.Window((600,600), allowGUI=False)
#INITIALISE SOME STIMULI
grating1 = visual.GratingStim(myWin, mask="gauss",
color=[1.0, 1.0, 1.0],
opacity=1.0,
size=(1.0, 1.0),
sf=(4,0), ori=45)
grating2 = visual.GratingStim(myWin, mask="gauss",
color=[1.0, 1.0, 1.0],
opacity=0.5,
size=(1.0, 1.0),
sf=(4,0), ori=135)
trialClock = core.Clock()
t = 0
while t < 20: # quits after 20 secs
t = trialClock.getTime()
grating1.setPhase(1*t) # drift at 1Hz
grating1.draw() #redraw it
grating2.setPhase(2*t) #drift at 2Hz
grating2.draw() #redraw it
myWin.flip() #update the screen
#handle key presses each frame
for keys in event.getKeys():
if keys in ['escape','q']:
core.quit()
| 1,107
|
Python
|
.py
| 29
| 26.758621
| 52
| 0.506554
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,320
|
face.py
|
psychopy_psychopy/docs/source/demos/face.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import core, visual, event
#create a window to draw in
myWin = visual.Window((600, 600), allowGUI=False, color=(-1, -1, -1))
#INITIALISE SOME STIMULI
faceRGB = visual.GratingStim(myWin, tex='face.jpg',
mask=None,
pos=(0.0, 0.0),
size=(1.0, 1.0),
sf=(1.0, 1.0))
faceALPHA = visual.GratingStim(myWin, pos=(-0.5, 0),
tex="sin", mask="face.jpg",
color=[1.0, 1.0, -1.0],
size=(0.5, 0.5), sf=1.0,
units="norm")
message = visual.TextStim(myWin, pos=(-0.95, -0.95),
text='[Esc] to quit', color=1,
alignText='left', anchorHoriz='left',
alignTextVert='bottom', anchorVert='bottom')
trialClock = core.Clock()
t = lastFPSupdate = 0
while True:
t = trialClock.getTime()
faceRGB.setOri(1, '+')#advance ori by 1 degree
faceRGB.draw()
faceALPHA.setPhase(0.01, "+")#advance phase by 1/100th of a cycle
faceALPHA.draw()
#update fps every second
if t-lastFPSupdate > 1.0:
lastFPS = myWin.fps()
lastFPSupdate = t
message.setText("%ifps, [Esc] to quit" %lastFPS)
message.draw()
myWin.flip()
#handle key presses each frame
for keys in event.getKeys():
if keys in ['escape', 'q']:
print(myWin.fps())
myWin.close()
core.quit()
event.clearEvents()
| 1,626
|
Python
|
.py
| 42
| 27.166667
| 70
| 0.514286
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,321
|
text.py
|
psychopy_psychopy/docs/source/demos/text.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from psychopy import visual, core
"""
Text rendering has changed a lot (for the better) under pyglet. This
script shows you the new way to specify fonts.
"""
#create a window to draw in
myWin = visual.Window((800.0,800.0),allowGUI=False,winType='pyglet',
monitor='testMonitor', units ='deg', screen=0)
myWin.setRecordFrameIntervals()
#choose some fonts. If a list is provided, the first font found will be used.
fancy = ['Monotype Corsiva', 'Palace Script MT', 'Edwardian Script ITC']
sans = ['Gill Sans MT', 'Arial','Helvetica','Verdana'] #use the first font found on this list
serif = ['Times','Times New Roman'] #use the first font found on this list
comic = 'Comic Sans MS' #note the full name of the font - the short name won't work
#INITIALISE SOME STIMULI
rotating = visual.TextStim(myWin,text="Fonts \nrotate!",pos=(-5, -5),#and can have line breaks
color=[-1.0,-1,1],
units='deg',
ori=0, height = 2.0,
font=comic)
unicodeStuff = visual.TextStim(myWin,
text = u"unicode (eg \u03A8 \u040A \u03A3)",#you can find the unicode character value from MS Word 'insert symbol'
color='black', font=serif,pos=(0,3), wrapWidth=20.0,
height = 2)
psychopyTxt = visual.TextStim(myWin, color='#FFFFFF',
text = u"PsychoPy \u00A9Jon Peirce",
units='norm', height=0.2,
pos=[0.95, 0.95],
alignText='right', anchorHoriz='right',
alignTextVert='top', anchorVert='top',
font=fancy)
longSentence = visual.TextStim(myWin,
text = u"Very long sentences can wrap", wrapWidth=0.8,
units='norm', height=0.15,color='DarkSlateBlue',
pos=[0.95, -0.95],
alignText='left', anchorHoriz='right',
anchorVert='bottom')
trialClock = core.Clock()
t=lastFPSupdate=0;
while t<20:#quits after 20 secs
t=trialClock.getTime()
rotating.setOri(1,"+")
rotating.draw()
unicodeStuff.draw()
longSentence.draw()
psychopyTxt.draw()
myWin.flip()
| 2,324
|
Python
|
.py
| 49
| 36.673469
| 138
| 0.587924
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,322
|
webcam_demo.py
|
psychopy_psychopy/docs/source/tutorials/webcam_demo.py
|
from psychopy import visual, event
import Image, pylab, cv
mywin = visual.Window(allowGUI=False, monitor='testMonitor', units='norm',
colorSpace='rgb', color=[-1, -1, -1], fullscr=True)
mywin.setMouseVisible(False)
capture = cv.CaptureFromCAM(0)
img = cv.QueryFrame(capture)
pi = Image.fromstring(
"RGB", cv.GetSize(img), img.tostring(), "raw", "BGR", 0, 1)
print(pi.size)
myStim = visual.GratingStim(win=mywin, tex=pi, pos=[0, 0.5], size=[0.6, 0.6],
opacity=1.0, units='norm')
myStim.setAutoDraw(True)
while True:
img = cv.QueryFrame(capture)
pi = Image.fromstring(
"RGB", cv.GetSize(img), img.tostring(), "raw", "BGR", 0, 1)
myStim.setTex(pi)
mywin.flip()
theKey = event.getKeys()
if len(theKey) != 0:
break
| 815
|
Python
|
.py
| 22
| 31.272727
| 77
| 0.63038
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,323
|
interleaveStaircases.py
|
psychopy_psychopy/docs/source/tutorials/interleaveStaircases.py
|
from psychopy import visual, core, data, event
from numpy.random import shuffle
import copy, time #from the std python libs
#create some info to store with the data
info={}
info['startPoints']=[1.5,3,6]
info['nTrials']=10
info['observer']='jwp'
win=visual.Window([400,400])
#---------------------
#create the stimuli
#---------------------
#create staircases
stairs=[]
for thisStart in info['startPoints']:
#we need a COPY of the info for each staircase
#(or the changes here will be made to all the other staircases)
thisInfo = copy.copy(info)
#now add any specific info for this staircase
thisInfo['thisStart']=thisStart #we might want to keep track of this
thisStair = data.StairHandler(startVal=thisStart,
extraInfo=thisInfo,
nTrials=50, nUp=1, nDown=3,
minVal = 0.5, maxVal=8,
stepSizes=[4,4,2,2,1,1])
stairs.append(thisStair)
for trialN in range(info['nTrials']):
shuffle(stairs) #this shuffles 'in place' (ie stairs itself is changed, nothing returned)
#then loop through our randomised order of staircases for this repeat
for thisStair in stairs:
thisIntensity = next(thisStair)
print('start=%.2f, current=%.4f' %(thisStair.extraInfo['thisStart'], thisIntensity))
#---------------------
#run your trial and get an input
#---------------------
keys = event.waitKeys() #(we can simulate by pushing left for 'correct')
if 'left' in keys: wasCorrect=True
else: wasCorrect = False
thisStair.addData(wasCorrect) #so that the staircase adjusts itself
#this trial (of all staircases) has finished
#all trials finished
#save data (separate pickle and txt files for each staircase)
dateStr = time.strftime("%b_%d_%H%M", time.localtime())#add the current time
for thisStair in stairs:
#create a filename based on the subject and start value
filename = "%s start%.2f %s" %(thisStair.extraInfo['observer'], thisStair.extraInfo['thisStart'], dateStr)
thisStair.saveAsPickle(filename)
thisStair.saveAsText(filename)
| 2,123
|
Python
|
.py
| 48
| 38.770833
| 110
| 0.6839
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,324
|
showImages.py
|
psychopy_psychopy/docs/source/tutorials/showImages.py
|
from pathlib import Path
import numpy as np
from PIL import Image
from psychopy import core, event, visual
# ---------------------
# Setup window
# ---------------------
win = visual.Window(
(900, 900),
screen=0,
units="pix",
allowGUI=True,
fullscr=False,
)
# ---------------------
# Example 1: load a stimulus from disk
# ---------------------
# assume we're running from the root psychopy repo.
# you could replace with path to any image:
path_to_image_file = Path() / "PsychoPy2_screenshot.png"
# simply pass the image path to ImageStim to load and display:
image_stim = visual.ImageStim(win, image=path_to_image_file)
text_stim = visual.TextStim(
win,
text="Showing image from file",
pos=(0.0, 0.8),
units="norm",
height=0.05,
wrapWidth=0.8,
)
image_stim.draw()
text_stim.draw()
win.flip()
event.waitKeys() # press space to continue
# ---------------------
# Example 2: convert image to numpy array
#
# Perhaps you want to convert an image to numpy, do some things to it,
# and then display. Here I use the Python Imaging Library for image loading,
# and a conversion function from skimage. PsychoPy has an internal
# "image2array" function but this only handles single-layer (i.e. intensity) images.
#
# ---------------------
pil_image = Image.open(path_to_image_file)
image_np = np.array(
pil_image, order="C"
) # convert to numpy array with shape width, height, channels
image_np = (
image_np.astype(np.float) / 255.0
) # convert to float in 0--1 range, assuming image is 8-bit uint.
# Note this float conversion is "quick and dirty" and will not
# fix potential out-of-range problems if you're going
# straight from a numpy array. See the img_as_float
# function of scikit image for a more careful conversion.
# flip image (row-axis upside down so we need to reverse it):
image_np = np.flip(image_np, axis=0)
image_stim = visual.ImageStim(
win,
image=image_np,
units="pix",
size=(
image_np.shape[1],
image_np.shape[0],
), # here's a gotcha: need to pass the size (x, y) explicitly.
colorSpace="rgb1", # img_as_float converts to 0:1 range, whereas PsychoPy defaults to -1:1.
)
text_stim.text = "Showing image from numpy array"
image_stim.draw()
text_stim.draw()
win.flip()
event.waitKeys() # press space to continue
win.close()
core.quit()
| 2,355
|
Python
|
.py
| 73
| 29.863014
| 96
| 0.680018
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,325
|
plat_custom_installs.py
|
psychopy_psychopy/building/plat_custom_installs.py
|
import subprocess
import sys
if sys.platform == 'win32':
subprocess.call(
'pip install --extra-index-url https://www.lfd.uci.edu/~gohlke/pythonlibs pyWinhook'
)
| 183
|
Python
|
.py
| 6
| 25.833333
| 92
| 0.697143
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,326
|
updateBuilderDemos.py
|
psychopy_psychopy/building/updateBuilderDemos.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# this script replaces hashtags with a sphinx URL string (to the github issues or pull request)
# written by Jon with regex code by Jeremy
import os
from psychopy import experiment, __version__
from pathlib import Path
thisFolder = Path(__file__).parent
nFiles = 0
for root, dirs, files in os.walk(thisFolder.parent/"psychopy/demos/builder"):
for filename in files:
if filename.endswith('.psyexp'):
filepath = os.path.join(root, filename)
exp = experiment.Experiment()
exp.loadFromXML(filepath)
origVersion = exp.psychopyVersion
exp.psychopyVersion = __version__
exp.saveToXML(filepath)
print("switching {} from {} to {}".format(filepath, origVersion, __version__))
| 813
|
Python
|
.py
| 19
| 36.473684
| 95
| 0.674271
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,327
|
writeVersionFiles.py
|
psychopy_psychopy/building/writeVersionFiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Writes the current version, build platform etc.
"""
from subprocess import run
import pathlib
from packaging import version
root = pathlib.Path(__file__).parent.parent # root of the repo
def _call(cmd, verbose=False):
result = run(cmd, capture_output=True, text=True)
if verbose or result.returncode or result.stderr:
print(f"Call:\n {' '.join(cmd)}")
print(f"Resulted in:\n {result.stdout + result.stderr}")
return None
else:
return result.stdout.strip()
def _checkValidVersion(v):
"""Check if version string is valid and return True/False"""
try:
version.Version(v)
except version.InvalidVersion:
print(f"Invalid version: {v}")
return False
return True
def countSince(ref):
"""Get number of commits since given commit/tag"""
cmd = ['git', 'rev-list', '--count', ref + '..HEAD']
return _call(cmd)
def getLastCommit(filepath=None):
"""Get SHA of last commit that touched given file or last commit in repo"""
if filepath:
cmd = ['git', 'log', '-n', '1', '--pretty=format:%H', filepath]
else:
cmd = ['git', 'log', '-n', '1', '--pretty=format:%H']
return _call(cmd)
def getBranch():
"""Get current branch name"""
cmd = ['git', 'rev-parse', '--abbrev-ref', 'HEAD']
resp = _call(cmd)
if resp is None:
return ''
return resp
def getTags():
"""Get list of tags"""
cmd = ['git', 'tag', '--sort=-v:refname']
resp = _call(cmd)
if resp is None:
return []
return resp.split()
def isShallowRepo():
"""Check if git repo is shallow (or not a repo)"""
cmd = ['git', 'rev-parse', '--is-shallow-repository']
resp = _call(cmd)
return resp is None or resp=='true'
def makeVersionSuffix(base):
"""Makes version suffix like post3 or dev8 given base version number
Suffix checks for a tag matching the base and then counts commits since
either that tag or the last commit that touched the VERSION file.
Choice of post/dev is based on whether we're on the release branch."""
if isShallowRepo():
print("Can't calculate good version number in shallow repo. "
"Did you fetch with `git clone --depth=1`?\n"
f"Using simple version number ({base})")
return ''
if base in getTags():
nCommits = countSince(base)
if nCommits == '0':
return '' # we're on a tag
else:
nCommits = countSince(getLastCommit(root/'psychopy/VERSION'))
branch = getBranch()
if branch=='release':
return f'post{nCommits}'
else:
return f'dev{nCommits}'
def updateVersionFile():
"""Take psychopy/VERSION, append the branch and distance to commit
and update the VERSION file accordingly"""
raw = (root/'psychopy/VERSION').read_text().strip()
try:
origVersion = version.Version(raw)
except version.InvalidVersion:
raise version.InvalidVersion("Can't create valid version from invalid starting point:\n"
" {raw}")
base = origVersion.base_version # removing things like the dev21 or post3
suffix = makeVersionSuffix(base)
final = base + suffix
if final != raw:
with open(root/'psychopy/VERSION', 'w') as f:
f.write(final)
print(f"Updated version file to {final}")
def updateGitShaFile(sha=None):
"""Create psychopy/GIT_SHA
:param:`dist` can be:
None:
writes __version__
'sdist':
for python setup.py sdist - writes git id (__git_sha__)
'bdist':
for python setup.py bdist - writes git id (__git_sha__)
and __build_platform__
"""
shaPath = root/"psychopy/GIT_SHA"
if sha is None:
sha = getLastCommit() or 'n/a'
with open(shaPath, 'w') as f:
f.write(sha)
print(f"Created file: {shaPath.absolute()}")
if __name__ == "__main__":
updateGitShaFile()
updateVersionFile()
| 4,054
|
Python
|
.py
| 111
| 30.09009
| 96
| 0.623499
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,328
|
buildRelease.py
|
psychopy_psychopy/building/buildRelease.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script is used to:
- update the version numbers
- update the psychopyVersions repo:
- copy over the code
- commit, tag and push(?)
It should be run from the root of the main git repository, which should be
next to a clone of the psychopy/versions git repository
"""
import os, sys, shutil, subprocess
from os.path import join
from pathlib import Path
# MAIN is the root of the psychopy repo
MAIN = Path(__file__).parent.parent.parent.absolute()
# versions repo is next to MAIN
VERSIONS = MAIN.parent / 'versions'
print("Building release version from: ", MAIN)
print("To: ", VERSIONS)
if sys.platform == "darwin":
gitgui = ["git", "gui"]
elif sys.platform == "linux":
gitgui = ["cola"]
else:
gitgui = ["git", "gui"]
print("This script requires a unix-based terminal to run (for commands "
"like `du -sck` to work)")
sys.exit()
def getSHA(cwd='.'):
if cwd == '.':
cwd = os.getcwd()
# get the SHA of the git HEAD
SHA_string = subprocess.check_output(
['git', 'rev-parse', '--short', 'HEAD'],
cwd=cwd).split()[0].decode('utf-8')
# convert to hex from a string and return it
print('SHA:', SHA_string, 'for repo:', cwd)
return SHA_string
def buildRelease(versionStr, noCommit=False, interactive=True):
dest = VERSIONS / "psychopy"
shutil.rmtree(dest)
ignores = shutil.ignore_patterns("demos", "docs", "tests", "pylink",
"*.pyo", "*.pyc", "*.orig", "*.bak",
".DS_Store", ".coverage")
shutil.copytree("psychopy", dest, symlinks=False, ignore=ignores)
os.mkdir(dest/'tests')
shutil.copyfile("psychopy/tests/__init__.py", dest/'tests/__init__.py')
shutil.copyfile("psychopy/tests/utils.py", dest/'tests/utils.py')
# todo: would be nice to check here that we didn't accidentally add anything large (check new folder size)
Mb = float(subprocess.check_output(["du", "-sck", dest]).split()[0])/10**3
print("size for '%s' will be: %.2f Mb" %(versionStr, Mb))
if noCommit:
return False
if interactive:
ok = input("OK to continue? [n]y :")
if ok != "y":
return False
lastSHA = getSHA(cwd=VERSIONS)
print('updating: git add --all')
output = subprocess.check_output(["git", "add", "--all"], cwd=VERSIONS)
if interactive:
ok = subprocess.call(gitgui, cwd=VERSIONS)
if lastSHA == getSHA():
# we didn't commit the changes so quit
print("no git commit was made: exiting")
return False
else:
print("committing: git commit -m 'release version %s'" %versionStr)
subprocess.call(
["git", "commit", "-m", "'release version %s'" %versionStr],
cwd=VERSIONS)
print("tagging: git tag -m 'release %s'" %versionStr)
ok = subprocess.call(
["git", "tag", versionStr, "-m", "'release %s'" %versionStr],
cwd=VERSIONS)
print("'versions' tags are now:", subprocess.check_output(
["git","tag"], cwd=VERSIONS).split())
print('pushing: git push origin %s' %versionStr)
output = subprocess.check_output(["git", "push", "origin", "%s" % versionStr],
cwd=VERSIONS)
print(output)
return True # success
if __name__ == "__main__":
if "--noCommit" in sys.argv:
noCommit = True
else:
noCommit = False
if "--noInteractive" not in sys.argv:
interactive = True
else:
interactive = False
# todo: update versions first
versionStr = input("version:")
buildRelease(versionStr, noCommit=noCommit, interactive=interactive)
| 3,751
|
Python
|
.py
| 94
| 33.234043
| 110
| 0.614941
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,329
|
searchCopyrightYear.py
|
psychopy_psychopy/building/searchCopyrightYear.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""looks for lines containing 'Copyright|(C)', <last-year>, and 'Peirce'
in all files in or below the current directory
and writes out an executable file, replaceCopyright<year>, with commands that
could be used to update last-year to the current year.
usage steps:
- run tests
- ./searchCopyrightYear.py
- review the new file, replaceCopyright<Year>.sh, edit as needed
- ./replaceCopyrightYear.sh -- this does the replacing
- run tests again -- make sure we didn't break anything
- commit
relies on: perl -pi -e 's/\\Qold\\E/new/' <file>
I couldn't figure out a pythonic way to do in-place changes to files
import fileinput # looked promising but a) don't really want to copy every line
of every file, and b) doesn't actually work easily...
for line in fileinput.input(file, inplace = 1):
print line.replace(...).strip() #--> loses initial whitespace
line.replace(....) #--> adds quote marks around line
"""
__author__ = 'Jeremy Gray'
import os, sys, time, glob
from psychopy import core
assert (sys.platform == 'darwin' or sys.platform.startswith('linux')), "This script must be run on a unix-based platform"
perlVersion = core.shellCall('perl -V').splitlines()[0]
assert perlVersion.find('perl5') > -1 # not completely sure what will happen with other perl versions...
newYear = str(time.localtime()[0]) # current year
oldYear = str(int(newYear)-1) # last year; will need to set manually if you miss a year
print("copyright %s -> %s: searching for files" % (oldYear, newYear))
#find relevant files:
files = []
toSearch = ['.txt', '.py', 'md1', '.rst', '.ps1', '.nsi', ]
excludeFolders = set(['build', 'dist', '.git'])
for root, dirs, tmpfiles in os.walk('.', topdown=True):
dirs[:] = [d for d in dirs if d not in excludeFolders]
for f in tmpfiles:
file = root+'/'+f
main, ext = os.path.splitext(file)
# if ext in ['.html','.orig','.pickle','.doctree','.pyc','.pdf','.dll',
# '.pyw', '.mov', '.wav','.mp4','.mpg','.ico','.jpg','.gif',
# '.png','.DS_Store','.xlsx', '.icns','.svg',
# '.so','.mo','.h5','ttf','.dat']:
# continue
if ext in toSearch:
files.append(file)
print(len(files), 'files found, screening each')
badLines = 0 # ['$/] will mess with perl search-replace; other characters might too
targetFiles = 0 # count of files to be updated
tmpFile = './replaceCopyright'+oldYear+'_'+newYear+'.sh'
try:
del files[files.index(tmpFile)]
except:
pass
tmp = open(tmpFile, 'w')
tmp.write('#!/bin/sh \necho Updating...\n')
# check each line of each relevant file:
for file in files:
if os.path.isdir(file) or file.endswith(sys.argv[0]):
continue
try:
contents = open(file, 'r').readlines()
except UnicodeDecodeError:
print("Couldn't read file '{}'".format(file))
lines = [line for line in contents if \
line.find("Peirce") > -1 and \
line.find(oldYear) > -1 and \
(line.lower().find("(c)") > -1 or line.lower().find("copyright") > -1)
]
for i,line in enumerate(lines): #allow multiple lines per file, each gets its own replace command
#print i+1, file
line = line.strip()
#print line
if line.find("'") > -1: # capture stuff in between single-quotes, hopefully including the year
line = line[line.find("'")+1:]
line = line[:line.find("'")]
if line.find(oldYear) == -1:
badLines += 1
print(file+": expected <last-year> somewhere between single-quotes:", line)
continue # skip the line
if '$' in line:
badLines += 1
print(file+": cannot handle '$' in line:", line)
continue
sep = '/' # perl search-replace separator
if sep in line:
sep = '|' # try this one instead
if sep in line:
badLines += 1
print(file+": cannot handle '"+sep+"' in line:", line)
continue
newLine = line.replace(oldYear, newYear) # should not contain characters that will mess with perl 's/oldLine/newLine/'
cmd = "echo "+file+"\n " # helps with debugging, if the perl s/// flails due to a bad character -> you know what file to look at
cmd += "perl -pi -e 's"+sep+r"\Q"+line+r"\E"+sep+newLine+sep+"' '"+file+"'\n" # only match one line, avoid s///g
tmp.write(cmd)
targetFiles += 1
tmp.write('echo Updated %d files.\n' % targetFiles)
tmp.close()
core.shellCall('chmod u+x '+tmpFile) # make executable
if targetFiles:
print('To make %d changes, inspect then run:\n '%targetFiles, tmpFile)
print('If something looks amiss, you can manually edit then run it.')
if badLines:
print("Warning: %d lines were skipped" % badLines)
else:
print('No matching files found for year', oldYear)
os.unlink(tmpFile)
| 4,974
|
Python
|
.py
| 107
| 40.626168
| 137
| 0.625541
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,330
|
compile_po.py
|
psychopy_psychopy/building/compile_po.py
|
import polib
import pathlib
root = pathlib.Path(__file__).absolute().parent.parent / 'psychopy/app/locale'
def compilePoFiles(root=root, errIfEmpty=True):
"""Looks for all paths matching **/*.po and compiles to a .mo file using
python polib
:param: root
"""
po_files = list(pathlib.Path(root).glob('**/*.po'))
for popath in po_files:
mopath = popath.with_suffix(".mo")
po = polib.pofile(popath)
po.save_as_mofile(mopath)
if len(po_files)<1:
raise FileNotFoundError(f"Found no po files to compile to mo. Was this the right folder to search? "
f"\n {root}")
else:
print(f"compiled {len(po_files)} .po files to .mo in {root.absolute()}")
return len(po_files)
if __name__ == "__main__":
n_files = compilePoFiles(root)
| 831
|
Python
|
.py
| 21
| 33.047619
| 108
| 0.62531
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,331
|
apple_sign.py
|
psychopy_psychopy/building/apple_sign.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from pathlib import Path
import subprocess
import re
import time, sys, os
import argparse
import shutil
import argparse
import functools
unbuffered_print = functools.partial(print, flush=True)
thisFolder = Path(__file__).parent
finalDistFolder = thisFolder.parent.parent/'dist'
ENTITLEMENTS = (thisFolder / "entitlements.plist").absolute()
assert ENTITLEMENTS.exists()
BUNDLE_ID = "org.opensciencetools.psychopy"
USERNAME = "admin@opensciencetools.org"
SIGN_ALL = True
logFile = open("_lastCodeSign.log", "w")
# handy resources for info:
#
# to get a new Apple app-specific password:
# https://appleid.apple.com/account/manage NOT developer.apple.com
# why use zip file to notarize as well as dmg:
# https://deciphertools.com/blog/notarizing-dmg/
# notarize from Python:
# https://github.com/najiji/notarizer/blob/master/notarize.py
# apple entitlements:
# https://developer.apple.com/documentation/xcode/notarizing_macos_software_before_distribution/resolving_common_notarization_issues
class AppSigner:
def __init__(self, appFile, version, destination=None, verbose=False,
team_id='', apple_id='', pword=''):
self.appFile = Path(appFile)
self.version = version
self.destination = destination
self._zipFile = None #'/Users/lpzjwp/code/psychopy/git/dist/PsychoPy3_2020.2.3.zip'
self._appNotarizeUUID = None
self._dmgBuildFile = None
self._pword = pword
self.verbose = verbose
self._apple_id = apple_id
self._team_id = team_id
def signAll(self, verbose=None):
if verbose is None:
verbose = self.verbose
# remove files that we know will fail the signing:
for filename in self.appFile.glob("**/Frameworks/SDL*"):
shutil.rmtree(filename)
for filename in self.appFile.glob("**/Frameworks/eyelink*"):
shutil.rmtree(filename)
# this never really worked - probably the files signed in wrong order?
# find all the included dylibs
unbuffered_print('Signing dylibs:', end='')
files = list(self.appFile.glob('**/*.dylib'))
files.extend(self.appFile.glob('**/*.so'))
files.extend(self.appFile.glob('**/git-core/git*'))
files.extend(self.appFile.glob('**/git-core/scalar')) # for some reason it's named differently
files.extend(self.appFile.glob('**/cv2/.dylibs/*'))
# ffmpeg
files.extend(self.appFile.glob('**/imageio_ffmpeg/binaries/*'))
files.extend(self.appFile.glob('**/resources/ffmpeg/ffmpeg-osx*'))
# PyQt
files.extend(self.appFile.glob('**/Versions/5/Qt*')) # PyQt5
files.extend(self.appFile.glob('**/Versions/A/Qt*')) # PyQt6
files.extend(self.appFile.glob('**/Contents/MacOS/QtWebEngineProcess'))
files.extend(self.appFile.glob('**/Resources/lib/python3.8/lib-dynload/*.so'))
files.extend(self.appFile.glob('**/Frameworks/Python.framework'))
files.extend(self.appFile.glob('**/Contents/MacOS/python'))
# ready? Let's do this!
t0 = time.time()
unbuffered_print(f"Signing dylibs...see {logFile.name} for details. key: \n"
" . success\n"
" o already signed\n"
" - failed (deleted)\n"
" X failed (couldn't delete)")
for filename in files:
if filename.exists(): # might have been removed since glob
self.signSingleFile(filename, verbose=False, removeFailed=True)
unbuffered_print(f'\n...done signing dylibs in {time.time()-t0:.03f}s')
# then sign the outer app file
unbuffered_print('Signing app')
t0 = time.time()
self.signSingleFile(self.appFile, removeFailed=False)
unbuffered_print(f'...done signing app in {time.time()-t0:.03f}s')
def signSingleFile(self, filename, removeFailed=False, verbose=None):
"""Signs a single file (if it isn't already signed)
Returns:
True (success)
list of warnings (partial success)
False (failed)
Params:
filename
removedFailed (bool): if True then try to remove files that don't sign
verbose: increases printing level (although you can see the logs)
"""
# " . success\n"
# " - failed (deleted)\n"
# " X failed (couldn't delete)
if verbose is None:
verbose = self.verbose
# is there already a valid signature? MUST overwrite or won't notarize
# if self.signCheck(str(filename)) is True: # check actual boolean, not list of warnings
# unbuffered_print('o', end='')
# return True
# try signing it ourselves
if not self._apple_id:
raise ValueError('No identity provided for signing')
cmd = ['codesign', str(filename),
'--sign', self._team_id,
'--entitlements', str(ENTITLEMENTS),
'--force',
'--timestamp',
# '--deep', # not recommended although used in most demos
'--options', 'runtime',
]
cmdStr = ' '.join(cmd)
logFile.write(f"{cmdStr}\n")
if verbose:
unbuffered_print(cmdStr)
exitcode, output = subprocess.getstatusoutput(cmdStr)
if verbose and output:
unbuffered_print(output)
# CODESIGN SUCCESS
if exitcode == 0 and not ('failed' in output):
# successfully signed
unbuffered_print('.', end='')
# do a detailed check and return
return self.signCheck(filename, verbose=False, removeFailed=removeFailed)
# CODESIGN FAIL. Let's see if we can remove
logFile.write(f"{output}\n")
try: # remove the file because we couldn't sign it
Path(filename).unlink()
unbuffered_print('-', end='')
logFile.write(f"FILE {filename}: failed to codesign and was removed\n")
except:
unbuffered_print('X', end='')
logFile.write(f"\nFILE {filename}: failed to codesign and failed to remove\n")
return
def signCheck(self, filepath=None, verbose=False, strict=True,
removeFailed=False):
"""Checks whether a file is signed and returns a list of warnings
Returns:
False if not signed at all
A list of warnings if signed but with concerns (and these are printed)
True if signed with no warnings found
"""
if not filepath:
filepath = self.appFile
# just check the details
strictFlag = "--strict" if strict else ""
cmdStr = f'codesign -dvvv {strictFlag} {filepath}'
# make the call
if verbose:
unbuffered_print(cmdStr)
exitcode, output = subprocess.getstatusoutput(cmdStr)
if verbose:
unbuffered_print(f"Checking that codesign worked: {output}")
if exitcode == 1: # indicates no valid signature
return False
# check for warnings
warnings=[]
for line in output.split("\n"):
if 'warning' in line.lower():
warnings.append(line)
if warnings:
unbuffered_print(filepath)
for line in warnings:
unbuffered_print(" ", line)
if removeFailed:
Path(filepath).unlink()
unbuffered_print(f"REMOVED FILE {filepath}: failed to codesign")
return warnings
else:
return True
def upload(self, fileToNotarize):
"""Uploads a file to Apple for notarizing"""
if not self._pword:
raise ValueError('No app-specific password provided for notarizing')
filename = Path(fileToNotarize).name
unbuffered_print(f'Sending {filename} to apple for notarizing')
cmdStr = (f'xcrun notarytool submit {fileToNotarize} '
f'--apple-id "{self._apple_id}" '
f'--team-id "{self._team_id}" '
f'--password "{self._pword}"')
# cmdStr = (f"xcrun altool --notarize-app -t osx -f {fileToNotarize} "
# f"--primary-bundle-id {BUNDLE_ID} -u {USERNAME} ")
# unbuffered_print(cmdStr)
t0 = time.time()
exitcode, output = subprocess.getstatusoutput(cmdStr)
m = re.findall(r"^ id: (.*)$", output, re.M)
if 'Please sign in with an app-specific password' in output:
unbuffered_print("[Error] Upload failed: You probably need a new app-specific "
"password from https://appleid.apple.com/account/manage")
exit(1)
elif exitcode != 0:
unbuffered_print(f"[Error] Upload failed with message: {output}")
exit(exitcode)
unbuffered_print(output)
uuid = m[0].strip()
self._appNotarizeUUID = uuid
unbuffered_print(f'Uploaded file {filename} in {time.time()-t0:.03f}s: {uuid}')
unbuffered_print(f'Upload to Apple completed at {time.ctime()}')
return uuid
@property
def dmgFile(self):
if not self._dmgBuildFile:
self._dmgBuildFile = self._buildDMG()
return self._dmgBuildFile
@property
def zipFile(self):
if self._zipFile:
return self._zipFile
else:
unbuffered_print("Creating zip archive to send to Apple: ", end='')
zipFilename = self.appFile.parent / (self.appFile.stem+f'_{self.version}.zip')
shutil.rmtree(zipFilename, ignore_errors=True)
# zipFilename.unlink(missing_ok=True) # remove the file if it exists
t0 = time.time()
cmdStr = f'/usr/bin/ditto -c -k --keepParent {self.appFile} {zipFilename}'
unbuffered_print(cmdStr)
exitcode, output = subprocess.getstatusoutput(cmdStr)
if exitcode == 0:
unbuffered_print(f"Done creating zip in {time.time()-t0:.03f}s")
else:
unbuffered_print(output)
self._zipFile = zipFilename
return zipFilename
def awaitNotarized(self, logFile='_notarization.json'):
unbuffered_print("Waiting for notarization to complete"); sys.stdout.flush()
# can use 'xcrun notarytool info' to check status or 'xcrun notarytool wait'
exitcode, output = subprocess.getstatusoutput(f'xcrun notarytool wait {self._appNotarizeUUID} '
f'--apple-id "{self._apple_id}" '
f'--team-id {self._team_id} '
f'--password {self._pword}')
unbuffered_print(output); sys.stdout.flush()
unbuffered_print("Fetching notarisation log"); sys.stdout.flush()
# always fetch the log file too
exitcode2, output = subprocess.getstatusoutput(f'xcrun notarytool log {self._appNotarizeUUID} '
f'--apple-id "{self._apple_id}" '
f'--team-id {self._team_id} '
f'--password {self._pword} '
f'{logFile}')
unbuffered_print(output)
if exitcode != 0:
unbuffered_print("`xcrun notarytool wait` returned exit code {exitcode}. Exiting immediately.")
exit(exitcode)
def staple(self, filepath):
cmdStr = f'xcrun stapler staple {filepath}'; sys.stdout.flush()
unbuffered_print(cmdStr)
exitcode, output = subprocess.getstatusoutput(cmdStr)
unbuffered_print(f"exitcode={exitcode}: {output}"); sys.stdout.flush()
if exitcode != 0:
unbuffered_print('*********Staple failed*************')
exit(exitcode)
else:
unbuffered_print(f"Staple successful. You can verify with\n xcrun stapler validate {filepath}"); sys.stdout.flush()
def dmgBuild(self):
import dmgbuild
dmgFilename = str(self.appFile).replace(".app", "_rw.dmg")
appName = self.appFile.name
unbuffered_print(f"building dmg file: {dmgFilename}..."); sys.stdout.flush()
# remove previous file if it's there
if Path(dmgFilename).exists():
os.remove(dmgFilename)
# then build new one
icon = (thisFolder.parent /
'psychopy/app/Resources/psychopy.icns').resolve()
background = (thisFolder / "dmg722x241.tiff").resolve()
dmgbuild.build_dmg(
filename=dmgFilename,
volume_name=f'PsychoPy-{self.version}', # avoid spaces
settings={
'format': 'UDRW',
'files': [str(self.appFile)],
'symlinks': { 'Applications': '/Applications' },
'size': '3g', # but maybe irrelevant in UDRW mode?
'badge_icon': str(icon),
'background': None, # background
'icon_size': 128,
'icon_locations': {
'PsychoPy.app': (150, 160),
'Applications': (350, 160)
},
'window_rect': ((600, 600), (500, 400)),
},
)
unbuffered_print(f"building dmg file complete")
return dmgFilename
def dmgStapleInside(self):
dmgFilename = str(self.appFile).replace(".app", "_rw.dmg")
appName = self.appFile.name
"""Staple the notarization to the app inside the r/w dmg file"""
# staple the file inside the dmg
cmdStr = f"hdiutil attach '{dmgFilename}'"
exitcode, output = subprocess.getstatusoutput(cmdStr)
# subprocess.getstatusoutput("say 'waiting' --voice=Kate")
time.sleep(10)
volName = output.split('\t')[-1]
self.staple(f"'{volName}/{appName}'")
time.sleep(10) # wait for 10s and then try more forcefully
# try to eject all volumens with PsychoPy in the name
for volume in Path("/Volumes").glob("PsychoPy*"):
# Eject the disk image
for attemptN in range(5):
exitcode, output = subprocess.getstatusoutput(f"diskutil eject {volume}")
unbuffered_print(f"Attempt {attemptN}: {output}"); sys.stdout.flush()
if exitcode == 0:
break
# have a rest and try again
time.sleep(5)
def dmgCompress(self):
dmgFilename = str(self.appFile).replace(".app", "_rw.dmg")
dmgFinalFilename = self.appFile.parent / f"StandalonePsychoPy-{self.version}-macOS.dmg"
# remove previous file if it's there
if Path(dmgFinalFilename).exists():
os.remove(dmgFinalFilename)
cmdStr = f"hdiutil convert {dmgFilename} -format UDZO -o {dmgFinalFilename}"
for attemptN in range(5):
unbuffered_print(f"Attempt {attemptN}: {cmdStr}")
exitcode, output = subprocess.getstatusoutput(cmdStr)
unbuffered_print(output)
if exitcode == 0:
return dmgFinalFilename
raise RuntimeError(f'****Failed to compress {dmgFilename} to {dmgFinalFilename} (is it not ejected?) ****')
def main():
with open(thisFolder.parent / "psychopy/VERSION") as f:
defaultVersion = f.read().strip()
parser = argparse.ArgumentParser(description="Codesigning PsychoPy.app")
parser.add_argument("--app", help=("Path to the app bundle, "
"assumed to be in dist/"),
action='store', required=False, default="PsychoPy.app")
parser.add_argument("--version", help="Version of the app",
action='store', required=False, default=defaultVersion)
parser.add_argument("--file", help="path for a single file to be signed",
action='store', required=False, default=None)
parser.add_argument("--skipNotarize", help="Include this flag only if you want to skip",
action='store', required=False, default=None)
parser.add_argument("--runPreDmgBuild", help="Runs up until dmg is built (and notarized) then exits",
action='store', required=False, default='true')
parser.add_argument("--runDmgBuild", help="Runs the dmg build itself",
action='store', required=False, default='true')
parser.add_argument("--runPostDmgBuild", help="Runs up until dmg is built (and notarized) then exits",
action='store', required=False, default='true')
parser.add_argument("--teamId", help="ost id from apple for codesigning",
action='store', required=False, default=None)
parser.add_argument("--appleId", help="apple id for codesigning",
action='store', required=False, default=None)
parser.add_argument("--pwd", help="password for app-specific password",
action='store', required=False, default=None)
args = parser.parse_args()
args.runPreDmgBuild = args.runPreDmgBuild.lower() in ['true', 'True', '1', 'y', 'yes']
args.runDmgBuild = args.runDmgBuild.lower() in ['true', 'True', '1', 'y', 'yes']
args.runPostDmgBuild = args.runPostDmgBuild.lower() in ['true', 'True', '1', 'y', 'yes']
if args.skipNotarize in ['true', 'True', '1', 'y', 'yes']:
NOTARIZE = False
else:
NOTARIZE = True
# codesigning TEAM_ID from CLI args?
if args.teamId:
TEAM_ID = args.teamId
else:
with Path().home()/ 'keys/apple_ost_id' as p:
TEAM_ID = p.read_text().strip()
if args.appleId:
APPLE_ID = args.appleId
else:
with Path().home()/ 'keys/apple_id' as p:
APPLE_ID = p.read_text().strip()
if args.pwd:
PWORD = args.pwd
else:
with Path().home()/ 'keys/apple_psychopy_app_specific' as p:
PWORD = p.read_text().strip()
if args.file: # not the whole app - just sign one file
distFolder = (thisFolder / '../dist').resolve()
signer = AppSigner(appFile='', version=None,
pword=PWORD, team_id=TEAM_ID, apple_id=APPLE_ID)
signer.signSingleFile(args.file, removeFailed=False, verbose=True)
signer.signCheck(args.file, verbose=True)
if NOTARIZE:
signer.upload(args.file)
# notarize and staple
signer.awaitNotarized()
signer.staple(args.file)
else: # full app signing and notarization
distFolder = (thisFolder / '../dist').resolve()
signer = AppSigner(appFile=distFolder/args.app, version=args.version,
pword=PWORD, team_id=TEAM_ID, apple_id=APPLE_ID)
if args.runPreDmgBuild:
if SIGN_ALL:
signer.signAll()
signer.signCheck(verbose=False)
if args.runDmgBuild:
if NOTARIZE:
unbuffered_print(f'Signer.upload("{signer.zipFile}")'); sys.stdout.flush()
signer.upload(signer.zipFile)
# build the read/writable dmg file (while waiting for notarize)
signer.dmgBuild()
if NOTARIZE:
# notarize and staple
unbuffered_print(f'Signer.awaitNotarized()'); sys.stdout.flush()
signer.awaitNotarized()
if args.runPostDmgBuild:
unbuffered_print(f'Signer.dmgStapleInside()'); sys.stdout.flush()
signer.dmgStapleInside() # doesn't require UUID
unbuffered_print(f'Signer.dmgCompress()'); sys.stdout.flush()
dmgFile = signer.dmgCompress()
unbuffered_print(f'Signer.signSingleFile(dmgFile'); sys.stdout.flush()
signer.signSingleFile(dmgFile, removeFailed=False, verbose=True)
if NOTARIZE:
unbuffered_print(f'Signer.upload(dmgFile)'); sys.stdout.flush()
OK = signer.upload(dmgFile)
if not OK:
return 0
# notarize and staple
unbuffered_print(f'Signer.awaitNotarized()'); sys.stdout.flush()
signer.awaitNotarized(logFile="") # don't need the log file for the dmg
unbuffered_print(f'Signer.staple(dmgFile)'); sys.stdout.flush()
signer.staple(dmgFile)
if __name__ == "__main__":
main()
| 20,636
|
Python
|
.py
| 424
| 37.613208
| 136
| 0.600885
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,332
|
checksum.py
|
psychopy_psychopy/.github/workflows/checksum.py
|
#! python3
"""Using Python to create a cross-platform function to
compare sha checksums (mostly because windows pwsh annoying) """
import hashlib
def checksum(filename, sha_file, alg='sha256'):
"""Verify a hash and raise error if fails"""
computed_hash = getattr(hashlib, alg)
computed_hash = hashlib.sha256()
with open(filename, 'rb') as f:
computed_hash.update(f.read())
computed_hash = computed_hash.hexdigest()
with open(sha_file) as f:
hash_value = f.read().split()[0]
if computed_hash != hash_value:
raise ValueError(f"The hash did not match:\n"
f" - {filename}: {repr(computed_hash)}\n"
f" - {sha_file}: {repr(hash_value)}")
print(f"Confirmed checksum OK for: {filename}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
prog = 'checksum.py',
description = 'Checks a hash against a known sha '
'value and raises error if not equal')
parser.add_argument('filename')
parser.add_argument('sha_file')
parser.add_argument('alg', default='sha256')
args = parser.parse_args()
checksum(args.filename, args.sha_file, args.alg)
| 1,269
|
Python
|
.py
| 29
| 35.413793
| 72
| 0.616505
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,333
|
psychopy-env.yml
|
psychopy_psychopy/conda/psychopy-env.yml
|
name: psychopy
channels:
- conda-forge
dependencies:
- python>=3.8
- psychopy
- pip
- pip:
- psychtoolbox
- pygame
- pyo
- pyparallel; platform_system != "Windows"
- SoundFile; platform_system == "Windows"
- websocket_client
| 237
|
Python
|
.py
| 14
| 15.071429
| 44
| 0.721973
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,334
|
compatibility.py
|
psychopy_psychopy/psychopy/compatibility.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from packaging.version import Version
import psychopy.data
######### Begin Compatibility Class Definitions #########
class _oldStyleBaseTrialHandler():
"""Please excuse these ugly kluges, but in order to unpickle
psydat pickled trial handlers that were created using the old-style
(pre python 2.2) class, original classes have to be defined.
"""
pass
class _oldStyleBaseStairHandler():
"""Stubbed compapatibility class for StairHandler"""
pass
class _oldStyleTrialHandler():
"""Stubbed compapatibility class for TrialHandler"""
pass
class _oldStyleStairHandler():
"""Stubbed compapatibility class for StairHandler"""
pass
class _oldStyleMultiStairHandler():
"""Stubbed compapatibility class for MultiStairHandler"""
pass
######### End Compatibility Class Definitions #########
def _convertToNewStyle(newClass, oldInstance):
"""Converts un-pickled old-style compatibility classes to new-style ones
by initializing a new-style class and copying the old compatibility
instance's attributes.
"""
# if the oldInstance was an ExperimentHandler it wouldn't throw an error
# related to itself, but to the underlying loops within it. So check if we
# have that and then do imports on each loop
if oldInstance.__class__.__name__ == 'ExperimentHandler':
newHandler = psychopy.data.ExperimentHandler()
# newClass() # Init a new new-style object
else:
newHandler = newClass([], 0) # Init a new new-style object
for thisAttrib in dir(oldInstance):
# can handle each attribute differently
if 'instancemethod' in str(type(getattr(oldInstance, thisAttrib))):
# this is a method
continue
elif thisAttrib == '__weakref__':
continue
else:
value = getattr(oldInstance, thisAttrib)
setattr(newHandler, thisAttrib, value)
return newHandler
def checkCompatibility(old, new, prefs=None, fix=True):
"""Check for known compatibility issue between a pair of versions and fix
automatically if possible. (This facility, and storing the most recently
run version of the app, was added in version 1.74.00)
usage::
ok, msg = checkCompatibility(old, new, prefs, fix=True)
prefs is a standard psychopy preferences object. It isn't needed by all
checks but may be useful.
This function can be used simply to check for issues by setting fix=False
"""
if old == new:
return 1, "" # no action needed
if old > new: # switch them over
old, new = new, old
msg = "From %s to %s:" % (old, new)
warning = False
if old[0:4] < '1.74':
msg += ("\n\nThere were many changes in version 1.74.00 that will "
"break\ncompatibility with older versions. Make sure you read"
" the changelog carefully\nbefore using this version. Do not "
"upgrade to this version halfway through an experiment.\n")
if fix and 'PatchComponent' not in prefs.builder['hiddenComponents']:
prefs.builder['hiddenComponents'].append('PatchComponent')
warning = True
if not warning:
msg += "\nNo known compatibility issues"
return (not warning), msg
def checkUpdatesInfo(old, new):
"""
Checks whether we need to display information from a new update, e.g. introducing a new feature.
Parameters
----------
old : str or packaging.version.Version
Last version which was opened
new : str or packaging.version.Version
Current version
prefs : psychopy.preferences.Preferences
Preferences for the app
Returns
-------
list[str]
List of strings with markdown content for relevant updates
"""
from psychopy.preferences import prefs
# make sure both versions are Version objects
if isinstance(old, str):
old = Version(old)
if isinstance(new, str):
new = Version(new)
# start off with no messages
messages = []
# if not a new version, no action needed
if old >= new:
return messages
# find changes folder
changesDir = Path(prefs.paths['psychopy']) / "changes"
# if it is a new version, check for updates
for file in changesDir.glob("*.md"):
# try to Version-ise target
try:
target = Version(file.stem)
except (TypeError, ValueError):
# skip if it fails
continue
# have we just crossed the target version?
if old < target < new:
# load the markdown file
msg = file.read_text(encoding="utf-8")
# add its contents to messages array
messages.append(msg)
# reverse messages so they go from most-to-least recent
messages.reverse()
return messages
| 4,935
|
Python
|
.py
| 122
| 33.622951
| 100
| 0.666458
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,335
|
piloting.py
|
psychopy_psychopy/psychopy/piloting.py
|
from psychopy import logging
# global variable used throughout PsychoPy to tell whether we're in pilot mode
PILOTING = False
def setPilotMode(mode):
"""
Set pilot mode to be True or False to enable/disable piloting features.
Parameters
----------
mode : bool
True for piloting, False otherwise.
"""
global PILOTING
# set mode
PILOTING = bool(mode)
# log change
logging.exp(
"Running in pilot mode."
)
def getPilotMode():
"""
Get the current state of pilot mode.
Returns
-------
bool
True for piloting, False otherwise.
"""
global PILOTING
return PILOTING
def setPilotModeFromArgs():
"""
Set pilot mode according to the arguments passed to whatever script invoked PsychoPy.
Returns
-------
bool
True for piloting, False otherwise.
"""
import argparse
global PILOTING
# make argument parser
parser = argparse.ArgumentParser()
# define pilot arg and abbreviation
parser.add_argument('--pilot', action='store_true', dest='pilot')
parser.add_argument('--d', action='store_true', dest='pilot')
# set mode
known_args, unknownArgs = parser.parse_known_args()
setPilotMode(
known_args.pilot
)
return getPilotMode()
| 1,310
|
Python
|
.py
| 49
| 21.571429
| 89
| 0.663731
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,336
|
filters.py
|
psychopy_psychopy/psychopy/filters.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""filters.py placeholder file for backwards compatibility; Dec 2015
"""
from psychopy import logging
logging.warning('Deprecated v1.84.00: instead of `from psychopy import '
'filters`, now do `from psychopy.visual import filters`')
from psychopy.visual.filters import * # pylint: disable=0401,W0614
| 366
|
Python
|
.py
| 8
| 42.375
| 73
| 0.729577
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,337
|
colors.py
|
psychopy_psychopy/psychopy/colors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for working with colors.
"""
__all__ = [
"colorExamples",
"colorNames",
"colorSpaces",
"isValidColor",
"hex2rgb255",
"Color"
]
import re
from math import inf
from psychopy import logging
import psychopy.tools.colorspacetools as ct
from psychopy.tools.mathtools import infrange
import numpy as np
# Dict of examples of Psychopy Red at 12% opacity in different formats
colorExamples = {
'named': 'crimson',
'hex': '#F2545B',
'hexa': '#F2545B1E',
'rgb': (0.89, -0.35, -0.28),
'rgba': (0.89, -0.35, -0.28, -0.76),
'rgb1': (0.95, 0.32, 0.36),
'rgba1': (0.95, 0.32, 0.36, 0.12),
'rgb255': (242, 84, 91),
'rgba255': (242, 84, 91, 30),
'hsv': (357, 0.65, 0.95),
'hsva': (357, 0.65, 0.95, 0.12),
}
# Dict of named colours
colorNames = {
"none": (0, 0, 0, 0),
"transparent": (0, 0, 0, 0),
"aliceblue": (0.882352941176471, 0.945098039215686, 1),
"antiquewhite": (0.96078431372549, 0.843137254901961, 0.686274509803922),
"aqua": (-1, 1, 1),
"aquamarine": (-0.00392156862745097, 1, 0.662745098039216),
"azure": (0.882352941176471, 1, 1),
"beige": (0.92156862745098, 0.92156862745098, 0.725490196078431),
"bisque": (1, 0.788235294117647, 0.537254901960784),
"black": (-1, -1, -1),
"blanchedalmond": (1, 0.843137254901961, 0.607843137254902),
"blue": (-1, -1, 1),
"blueviolet": (0.0823529411764705, -0.662745098039216, 0.772549019607843),
"brown": (0.294117647058824, -0.670588235294118, -0.670588235294118),
"burlywood": (0.741176470588235, 0.443137254901961, 0.0588235294117647),
"cadetblue": (-0.254901960784314, 0.23921568627451, 0.254901960784314),
"chartreuse": (-0.00392156862745097, 1, -1),
"chestnut": (0.607843137254902, -0.27843137254902, -0.27843137254902),
"chocolate": (0.647058823529412, -0.176470588235294, -0.764705882352941),
"coral": (1, -0.00392156862745097, -0.372549019607843),
"cornflowerblue": (-0.215686274509804, 0.168627450980392, 0.858823529411765),
"cornsilk": (1, 0.945098039215686, 0.725490196078431),
"crimson": (0.725490196078431, -0.843137254901961, -0.529411764705882),
"cyan": (-1, 1, 1),
"darkblue": (-1, -1, 0.0901960784313725),
"darkcyan": (-1, 0.0901960784313725, 0.0901960784313725),
"darkgoldenrod": (0.443137254901961, 0.0509803921568628, -0.913725490196078),
"darkgray": (0.325490196078431, 0.325490196078431, 0.325490196078431),
"darkgreen": (-1, -0.215686274509804, -1),
"darkgrey": (0.325490196078431, 0.325490196078431, 0.325490196078431),
"darkkhaki": (0.482352941176471, 0.435294117647059, -0.16078431372549),
"darkmagenta": (0.0901960784313725, -1, 0.0901960784313725),
"darkolivegreen": (-0.333333333333333, -0.16078431372549, -0.631372549019608),
"darkorange": (1, 0.0980392156862746, -1),
"darkorchid": (0.2, -0.607843137254902, 0.6),
"darkred": (0.0901960784313725, -1, -1),
"darksalmon": (0.827450980392157, 0.176470588235294, -0.0431372549019607),
"darkseagreen": (0.12156862745098, 0.474509803921569, 0.12156862745098),
"darkslateblue": (-0.435294117647059, -0.52156862745098, 0.0901960784313725),
"darkslategray": (-0.631372549019608, -0.380392156862745, -0.380392156862745),
"darkslategrey": (-0.631372549019608, -0.380392156862745, -0.380392156862745),
"darkturquoise": (-1, 0.615686274509804, 0.63921568627451),
"darkviolet": (0.16078431372549, -1, 0.654901960784314),
"deeppink": (1, -0.843137254901961, 0.152941176470588),
"deepskyblue": (-1, 0.498039215686275, 1),
"dimgray": (-0.176470588235294, -0.176470588235294, -0.176470588235294),
"dimgrey": (-0.176470588235294, -0.176470588235294, -0.176470588235294),
"dodgerblue": (-0.764705882352941, 0.129411764705882, 1),
"firebrick": (0.396078431372549, -0.733333333333333, -0.733333333333333),
"floralwhite": (1, 0.96078431372549, 0.882352941176471),
"forestgreen": (-0.733333333333333, 0.0901960784313725, -0.733333333333333),
"fuchsia": (1, -1, 1),
"gainsboro": (0.725490196078431, 0.725490196078431, 0.725490196078431),
"ghostwhite": (0.945098039215686, 0.945098039215686, 1),
"gold": (1, 0.686274509803922, -1),
"goldenrod": (0.709803921568627, 0.294117647058824, -0.749019607843137),
"gray": (0.00392156862745097, 0.00392156862745097, 0.00392156862745097),
"grey": (0.00392156862745097, 0.00392156862745097, 0.00392156862745097),
"green": (-1, 0.00392156862745097, -1),
"greenyellow": (0.356862745098039, 1, -0.631372549019608),
"honeydew": (0.882352941176471, 1, 0.882352941176471),
"hotpink": (1, -0.176470588235294, 0.411764705882353),
"indigo": (-0.411764705882353, -1, 0.0196078431372548),
"ivory": (1, 1, 0.882352941176471),
"khaki": (0.882352941176471, 0.803921568627451, 0.0980392156862746),
"lavender": (0.803921568627451, 0.803921568627451, 0.96078431372549),
"lavenderblush": (1, 0.882352941176471, 0.92156862745098),
"lawngreen": (-0.0274509803921569, 0.976470588235294, -1),
"lemonchiffon": (1, 0.96078431372549, 0.607843137254902),
"lightblue": (0.356862745098039, 0.694117647058824, 0.803921568627451),
"lightcoral": (0.882352941176471, 0.00392156862745097, 0.00392156862745097),
"lightcyan": (0.756862745098039, 1, 1),
"lightgoldenrodyellow": (0.96078431372549, 0.96078431372549, 0.647058823529412),
"lightgray": (0.654901960784314, 0.654901960784314, 0.654901960784314),
"lightgreen": (0.129411764705882, 0.866666666666667, 0.129411764705882),
"lightgrey": (0.654901960784314, 0.654901960784314, 0.654901960784314),
"lightpink": (1, 0.427450980392157, 0.513725490196078),
"lightsalmon": (1, 0.254901960784314, -0.0431372549019607),
"lightseagreen": (-0.749019607843137, 0.396078431372549, 0.333333333333333),
"lightskyblue": (0.0588235294117647, 0.615686274509804, 0.96078431372549),
"lightslategray": (-0.0666666666666667, 0.0666666666666667, 0.2),
"lightslategrey": (-0.0666666666666667, 0.0666666666666667, 0.2),
"lightsteelblue": (0.380392156862745, 0.537254901960784, 0.741176470588235),
"lightyellow": (1, 1, 0.756862745098039),
"lime": (-1, 1, -1),
"limegreen": (-0.607843137254902, 0.607843137254902, -0.607843137254902),
"linen": (0.96078431372549, 0.882352941176471, 0.803921568627451),
"magenta": (1, -1, 1),
"maroon": (0.00392156862745097, -1, -1),
"mediumaquamarine": (-0.2, 0.607843137254902, 0.333333333333333),
"mediumblue": (-1, -1, 0.607843137254902),
"mediumorchid": (0.458823529411765, -0.333333333333333, 0.654901960784314),
"mediumpurple": (0.152941176470588, -0.12156862745098, 0.717647058823529),
"mediumseagreen": (-0.529411764705882, 0.403921568627451, -0.113725490196078),
"mediumslateblue": (-0.0352941176470588, -0.184313725490196, 0.866666666666667),
"mediumspringgreen": (-1, 0.96078431372549, 0.207843137254902),
"mediumturquoise": (-0.435294117647059, 0.63921568627451, 0.6),
"mediumvioletred": (0.56078431372549, -0.835294117647059, 0.0431372549019609),
"midnightblue": (-0.803921568627451, -0.803921568627451, -0.12156862745098),
"mintcream": (0.92156862745098, 1, 0.96078431372549),
"mistyrose": (1, 0.788235294117647, 0.764705882352941),
"moccasin": (1, 0.788235294117647, 0.419607843137255),
"navajowhite": (1, 0.741176470588235, 0.356862745098039),
"navy": (-1, -1, 0.00392156862745097),
"oldlace": (0.984313725490196, 0.92156862745098, 0.803921568627451),
"olive": (0.00392156862745097, 0.00392156862745097, -1),
"olivedrab": (-0.16078431372549, 0.113725490196078, -0.725490196078431),
"orange": (1, 0.294117647058824, -1),
"orangered": (1, -0.458823529411765, -1),
"orchid": (0.709803921568627, -0.12156862745098, 0.67843137254902),
"palegoldenrod": (0.866666666666667, 0.819607843137255, 0.333333333333333),
"palegreen": (0.192156862745098, 0.968627450980392, 0.192156862745098),
"paleturquoise": (0.372549019607843, 0.866666666666667, 0.866666666666667),
"palevioletred": (0.717647058823529, -0.12156862745098, 0.152941176470588),
"papayawhip": (1, 0.874509803921569, 0.670588235294118),
"peachpuff": (1, 0.709803921568627, 0.450980392156863),
"peru": (0.607843137254902, 0.0431372549019609, -0.505882352941176),
"pink": (1, 0.505882352941176, 0.592156862745098),
"plum": (0.733333333333333, 0.254901960784314, 0.733333333333333),
"powderblue": (0.380392156862745, 0.756862745098039, 0.803921568627451),
"purple": (0.00392156862745097, -1, 0.00392156862745097),
"red": (1, -1, -1),
"rosybrown": (0.474509803921569, 0.12156862745098, 0.12156862745098),
"royalblue": (-0.490196078431373, -0.176470588235294, 0.764705882352941),
"saddlebrown": (0.0901960784313725, -0.458823529411765, -0.850980392156863),
"salmon": (0.96078431372549, 0.00392156862745097, -0.105882352941176),
"sandybrown": (0.913725490196079, 0.286274509803922, -0.247058823529412),
"seagreen": (-0.63921568627451, 0.0901960784313725, -0.317647058823529),
"seashell": (1, 0.92156862745098, 0.866666666666667),
"sienna": (0.254901960784314, -0.356862745098039, -0.647058823529412),
"silver": (0.505882352941176, 0.505882352941176, 0.505882352941176),
"skyblue": (0.0588235294117647, 0.615686274509804, 0.843137254901961),
"slateblue": (-0.168627450980392, -0.294117647058823, 0.607843137254902),
"slategray": (-0.12156862745098, 0.00392156862745097, 0.129411764705882),
"slategrey": (-0.12156862745098, 0.00392156862745097, 0.129411764705882),
"snow": (1, 0.96078431372549, 0.96078431372549),
"springgreen": (-1, 1, -0.00392156862745097),
"steelblue": (-0.450980392156863, 0.0196078431372548, 0.411764705882353),
"tan": (0.647058823529412, 0.411764705882353, 0.0980392156862746),
"teal": (-1, 0.00392156862745097, 0.00392156862745097),
"thistle": (0.694117647058824, 0.498039215686275, 0.694117647058824),
"tomato": (1, -0.223529411764706, -0.443137254901961),
"turquoise": (-0.498039215686275, 0.756862745098039, 0.631372549019608),
"violet": (0.866666666666667, 0.0196078431372548, 0.866666666666667),
"wheat": (0.92156862745098, 0.741176470588235, 0.403921568627451),
"white": (1, 1, 1),
"whitesmoke": (0.92156862745098, 0.92156862745098, 0.92156862745098),
"yellow": (1, 1, -1),
"yellowgreen": (0.207843137254902, 0.607843137254902, -0.607843137254902)
}
# Convert all named colors to numpy arrays
for key in colorNames:
colorNames[key] = np.array(colorNames[key])
# Dict of regexpressions/ranges for different formats
colorSpaces = {
'named': re.compile("|".join(list(colorNames))), # A named colour space
'hex': re.compile(r'#[\dabcdefABCDEF]{6}'), # Hex
'rgb': [infrange(-1, 1), infrange(-1, 1), infrange(-1, 1)], # RGB from -1 to 1
'rgba': [infrange(-1, 1), infrange(-1, 1), infrange(-1, 1), infrange(0, 1)], # RGB + alpha from -1 to 1
'rgb1': [infrange(0, 1), infrange(0, 1), infrange(0, 1)], # RGB from 0 to 1
'rgba1': [infrange(0, 1), infrange(0, 1), infrange(0, 1), infrange(0, 1)], # RGB + alpha from 0 to 1
'rgb255': [infrange(0, 255, 1), infrange(0, 255, 1), infrange(0, 255, 1)], # RGB from 0 to 255
'rgba255': [infrange(0, 255, 1), infrange(0, 255, 1), infrange(0, 255, 1), infrange(0, 1)], # RGB + alpha from 0 to 255
'hsv': [infrange(0, 360, 1), infrange(0, 1), infrange(0, 1)], # HSV with hue from 0 to 360 and saturation/vibrancy from 0 to 1
'hsva': [infrange(0, 360, 1), infrange(0, 1), infrange(0, 1), infrange(0, 1)], # HSV with hue from 0 to 360 and saturation/vibrancy from 0 to 1 + alpha from 0 to 1
# 'rec709TF': [infrange(-4.5, 1), infrange(-4.5, 1), infrange(-4.5, 1)], # rec709TF adjusted RGB from -4.5 to 1
# 'rec709TFa': [infrange(-4.5, 1), infrange(-4.5, 1), infrange(-4.5, 1), infrange(0, 1)], # rec709TF adjusted RGB from -4.5 to 1 + alpha from 0 to 1
'srgb': [infrange(-1, 1), infrange(-1, 1), infrange(-1, 1)], # srgb from -1 to 1
'srgba': [infrange(-1, 1), infrange(-1, 1), infrange(-1, 1), infrange(0, 1)], # srgb from -1 to 1 + alpha from 0 to 1
'lms': [infrange(-1, 1), infrange(-1, 1), infrange(-1, 1), infrange(0, 1)], # LMS from -1 to 1
'lmsa': [infrange(-1, 1), infrange(-1, 1), infrange(-1, 1), infrange(0, 1)], # LMS + alpha from 0 to 1
'dkl': [infrange(-inf, inf), infrange(-inf, inf), infrange(-inf, inf), infrange(0, 1)], # DKL placeholder: Accepts any values
'dkla': [infrange(-inf, inf), infrange(-inf, inf), infrange(-inf, inf), infrange(0, 1)], # DKLA placeholder: Accepts any values + alpha from 0 to 1
'dklCart': [infrange(-inf, inf), infrange(-inf, inf), infrange(-inf, inf), infrange(0, 1)],
# Cartesian DKL placeholder: Accepts any values
'dklaCart': [infrange(-inf, inf), infrange(-inf, inf), infrange(-inf, inf), infrange(0, 1)],
# Cartesian DKLA placeholder: Accepts any values + alpha from 0 to 1
}
# Create subgroups of spaces for easy reference
integerSpaces = []
strSpaces = []
for key, val in colorSpaces.items():
if isinstance(val, re.compile("").__class__):
# Add any spaces which are str to a list
strSpaces.append(key)
elif isinstance(val, (list, tuple)):
# Add any spaces which are integer-only to a list
for cell in val:
if isinstance(cell, infrange):
if cell.step == 1 and key not in integerSpaces:
integerSpaces.append(key)
alphaSpaces = [
'rgba', 'rgba1', 'rgba255', 'hsva', 'srgba', 'lmsa', 'dkla', 'dklaCart']
nonAlphaSpaces = list(colorSpaces)
for val in alphaSpaces:
nonAlphaSpaces.remove(val)
class Color:
"""A class to store color details, knows what colour space it's in and can
supply colours in any space.
Parameters
----------
color : ArrayLike or None
Color values (coordinates). Value must be in a format applicable to the
specified `space`.
space : str or None
Colorspace to interpret the value of `color` as being within.
contrast : int or float
Factor to modulate the contrast of the color.
conematrix : ArrayLike or None
Cone matrix for colorspaces which require it. Must be a 3x3 array.
"""
def __init__(self, color=None, space=None, contrast=None, conematrix=None):
self._cache = {}
self._renderCache = {}
self.contrast = contrast if isinstance(contrast, (int, float)) else 1
self.alpha = 1
self.valid = False
self.conematrix = conematrix
# defined here but set later
self._requested = None
self._requestedSpace = None
self.set(color=color, space=space)
def validate(self, color, space=None):
"""
Check that a color value is valid in the given space, or all spaces if space==None.
"""
# Treat None as a named color
if color is None:
color = "none"
if isinstance(color, str):
if color == "":
color = "none"
# Handle everything as an array
if not isinstance(color, np.ndarray):
color = np.array(color)
if color.ndim <= 1:
color = np.reshape(color, (1, -1))
# If data type is string, check against named and hex as these override other spaces
if color.dtype.char == 'U':
# Remove superfluous quotes
for i in range((len(color[:, 0]))):
color[i, 0] = color[i, 0].replace("\"", "").replace("'", "")
# If colors are all named, override color space
namedMatch = np.vectorize(
lambda col: bool(colorSpaces['named'].fullmatch(
str(col).lower()))) # match regex against named
if all(namedMatch(color[:, 0])):
space = 'named'
# If colors are all hex, override color space
hexMatch = np.vectorize(
lambda col: bool(colorSpaces['hex'].fullmatch(str(col)))) # match regex against hex
if all(hexMatch(color[:, 0])):
space = 'hex'
# If color is a string but does not match any string space, it's invalid
if space not in strSpaces:
self.valid = False
# Error if space still not set
if not space:
self.valid = False
raise ValueError("Please specify a color space.")
# Check that color space is valid
if not space in colorSpaces:
self.valid = False
raise ValueError("{} is not a valid color space.".format(space))
# Get number of columns
if color.ndim == 1:
ncols = len(color)
else:
ncols = color.shape[1]
# Extract alpha if set
if space in strSpaces and ncols > 1:
# If color should only be one value, extract second row
self.alpha = color[:, 1]
color = color[:, 0]
ncols -= 1
elif space not in strSpaces and ncols > 3:
# If color should be triplet, extract fourth row
self.alpha = color[:, 3]
color = color[:, :3]
ncols -= 1
elif space not in strSpaces and ncols == 2:
# If color should be triplet but is single value, extract second row
self.alpha = color[:, 1]
color = color[:, 1]
ncols -= 1
# If single value given in place of triplet, duplicate it
if space not in strSpaces and ncols == 1:
color = np.tile(color, (1, 3))
# ncols = 3 # unused?
# If values should be integers, round them
if space in integerSpaces:
color.round()
# Finally, if array is only 1 long, remove extraneous dimension
if color.shape[0] == 1:
color = color[0]
return color, space
def set(self, color=None, space=None):
"""Set the colour of this object - essentially the same as what happens
on creation, but without having to initialise a new object.
"""
# If input is a Color object, duplicate all settings
if isinstance(color, Color):
self._requested = color._requested
self._requestedSpace = color._requestedSpace
self.valid = color.valid
if color.valid:
self.rgba = color.rgba
return
# Store requested colour and space (or defaults, if none given)
self._requested = color
self._requestedSpace = space
# Validate and prepare values
color, space = self.validate(color, space)
# Convert to lingua franca
if space in colorSpaces:
self.valid = True
setattr(self, space, color)
else:
self.valid = False
raise ValueError("{} is not a valid color space.".format(space))
def render(self, space='rgb'):
"""Apply contrast to the base color value and return the adjusted color
value.
"""
if space not in colorSpaces:
raise ValueError(f"{space} is not a valid color space")
# If value is cached, return it rather than doing calculations again
if space in self._renderCache:
return self._renderCache[space]
# Transform contrast to match rgb
contrast = self.contrast
contrast = np.reshape(contrast, (-1, 1))
contrast = np.hstack((contrast, contrast, contrast))
# Multiply
adj = np.clip(self.rgb * contrast, -1, 1)
buffer = self.copy()
buffer.rgb = adj
self._renderCache[space] = getattr(buffer, space)
return self._renderCache[space]
def __repr__(self):
"""If colour is printed, it will display its class and value.
"""
if self.valid:
if self.named:
return (f"<{self.__class__.__module__}."
f"{self.__class__.__name__}: {self.named}, "
f"alpha={self.alpha}>")
else:
return (f"<{self.__class__.__module__}."
f"{self.__class__.__name__}: "
f"{tuple(np.round(self.rgba, 2))}>")
else:
return (f"<{self.__class__.__module__}."
f"{self.__class__.__name__}: Invalid>")
def __bool__(self):
"""Determines truth value of object"""
return self.valid
def __len__(self):
"""Determines the length of object"""
if len(self.rgb.shape) > 1:
return self.rgb.shape[0]
else:
return int(bool(self.rgb.shape))
# --------------------------------------------------------------------------
# Rich comparisons
#
def __eq__(self, target):
"""`==` will compare RGBA values, rounded to 2dp"""
if isinstance(target, Color):
return np.all(np.round(target.rgba, 2) == np.round(self.rgba, 2))
elif target == None:
return self._requested is None
else:
return False
def __ne__(self, target):
"""`!=` will return the opposite of `==`"""
return not self == target
# --------------------------------------------------------------------------
# Operators
#
def __add__(self, other):
buffer = self.copy()
# If target is a list or tuple, convert it to an array
if isinstance(other, (list, tuple)):
other = np.array(other)
# If target is a single number, add it to each rgba value
if isinstance(other, (int, float)):
buffer.rgba = self.rgba + other
# If target is an array, add the arrays provided they are viable
if isinstance(other, np.ndarray):
if other.shape in [(len(self), 1), self.rgb.shape, self.rgba.shape]:
buffer.rgba = self.rgba + other
# If target is a Color object, add together the rgba values
if isinstance(other, Color):
if len(self) == len(other):
buffer.rgba = self.rgba + other.rgba
return buffer
def __sub__(self, other):
buffer = self.copy()
# If target is a list or tuple, convert it to an array
if isinstance(other, (list, tuple)):
other = np.array(other)
# If target is a single number, subtract it from each rgba value
if isinstance(other, (int, float)):
buffer.rgba = self.rgba - other
# If target is an array, subtract the arrays provided they are viable
if isinstance(other, np.ndarray):
if other.shape in [(len(self), 1), self.rgb.shape, self.rgba.shape]:
buffer.rgba = self.rgba - other
# If target is a Color object, add together the rgba values
if isinstance(other, Color):
if len(self) == len(other):
buffer.rgb = self.rgb - other.rgb
return buffer
# --------------------------------------------------------------------------
# Methods and properties
#
def copy(self):
"""Return a duplicate of this colour"""
return self.__copy__()
def __copy__(self):
return self.__deepcopy__()
def __deepcopy__(self):
dupe = self.__class__(
self._requested, self._requestedSpace, self.contrast)
dupe.rgba = self.rgba
dupe.valid = self.valid
return dupe
def getReadable(self, contrast=4.5/21):
"""
Get a color which will stand out and be easily readable against this
one. Useful for choosing text colors based on background color.
Parameters
----------
contrast : float
Desired perceived contrast between the two colors, between 0 (the
same color) and 1 (as opposite as possible). Default is the
w3c recommended minimum of 4.5/21 (dividing by 21 to adjust for
sRGB units).
Returns
-------
colors.Color
A contrasting color to this color.
"""
# adjust contrast to sRGB
contrast *= 21
# get value as rgb1
rgb = self.rgb1
# convert to srgb
srgb = rgb**2.2 * [0.2126, 0.7151, 0.0721]
# apply contrast adjustment
if np.sum(srgb) < 0.5:
srgb = (srgb + 0.05) * contrast
else:
srgb = (srgb + 0.05) / contrast
# convert back
rgb = (srgb / [0.2126, 0.7151, 0.0721])**(1/2.2)
# cap
rgb = np.clip(rgb, 0, 1)
# Return new color
return Color(rgb, "rgb1")
@property
def alpha(self):
"""How opaque (1) or transparent (0) this color is. Synonymous with
`opacity`.
"""
return self._alpha
@alpha.setter
def alpha(self, value):
# Treat 1x1 arrays as a float
if isinstance(value, np.ndarray):
if value.size == 1:
value = float(value[0])
else:
try:
value = float(value) # If coercible to float, do so
except (TypeError, ValueError) as err:
raise TypeError(
"Could not set alpha as value `{}` of type `{}`".format(
value, type(value).__name__
)
)
value = np.clip(value, 0, 1) # Clip value(s) to within range
# Set value
self._alpha = value
# Clear render cache
self._renderCache = {}
@property
def contrast(self):
if hasattr(self, "_contrast"):
return self._contrast
@contrast.setter
def contrast(self, value):
# Set value
self._contrast = value
# Clear render cache
self._renderCache = {}
@property
def opacity(self):
"""How opaque (1) or transparent (0) this color is (`float`). Synonymous
with `alpha`.
"""
return self.alpha
@opacity.setter
def opacity(self, value):
self.alpha = value
def _appendAlpha(self, space):
# Get alpha, if necessary transform to an array of same length as color
alpha = self.alpha
if isinstance(alpha, (int, float)):
if len(self) > 1:
alpha = np.tile([alpha], (len(self), 1))
else:
alpha = np.array([alpha])
if isinstance(alpha, np.ndarray) and len(self) > 1:
alpha = alpha.reshape((len(self), 1))
# Get color
color = getattr(self, space)
# Append alpha to color
return np.append(color, alpha, axis=1 if color.ndim > 1 else 0)
#---spaces---
# Lingua franca is rgb
@property
def rgba(self):
"""Color value expressed as an RGB triplet from -1 to 1, with alpha
values (0 to 1).
"""
return self._appendAlpha('rgb')
@rgba.setter
def rgba(self, color):
self.rgb = color
@property
def rgb(self):
"""Color value expressed as an RGB triplet from -1 to 1.
"""
if not self.valid:
return
if hasattr(self, '_franca'):
rgb = self._franca
return rgb
else:
return np.array([0, 0, 0])
@rgb.setter
def rgb(self, color):
# Validate
color, space = self.validate(color, space='rgb')
if space != 'rgb':
setattr(self, space, color)
return
# Set color
self._franca = color
# Clear outdated values from cache
self._cache = {'rgb': color}
self._renderCache = {}
@property
def rgba255(self):
"""Color value expressed as an RGB triplet from 0 to 255, with alpha
value (0 to 1).
"""
return self._appendAlpha('rgb255')
@rgba255.setter
def rgba255(self, color):
self.rgb255 = color
@property
def rgb255(self):
"""Color value expressed as an RGB triplet from 0 to 255.
"""
if not self.valid:
return
# Recalculate if not cached
if 'rgb255' not in self._cache:
self._cache['rgb255'] = np.round(255 * (self.rgb + 1) / 2)
return self._cache['rgb255']
@rgb255.setter
def rgb255(self, color):
# Validate
color, space = self.validate(color, space='rgb255')
if space != 'rgb255':
setattr(self, space, color)
return
# Iterate through values and do conversion
self.rgb = 2 * (color / 255 - 0.5)
# Clear outdated values from cache
self._cache = {'rgb255': color}
self._renderCache = {}
@property
def rgba1(self):
"""Color value expressed as an RGB triplet from 0 to 1, with alpha value
(0 to 1).
"""
return self._appendAlpha('rgb1')
@rgba1.setter
def rgba1(self, color):
self.rgb1 = color
@property
def rgb1(self):
"""Color value expressed as an RGB triplet from 0 to 1.
"""
if not self.valid:
return
# Recalculate if not cached
if 'rgb1' not in self._cache:
self._cache['rgb1'] = (self.rgb + 1) / 2
return self._cache['rgb1']
@rgb1.setter
def rgb1(self, color):
# Validate
color, space = self.validate(color, space='rgb1')
if space != 'rgb1':
setattr(self, space, color)
return
# Iterate through values and do conversion
self.rgb = 2 * (color - 0.5)
# Clear outdated values from cache
self._cache = {'rgb1': color}
self._renderCache = {}
@property
def hex(self):
"""Color value expressed as a hex string. Can be a '#' followed by 6
values from 0 to F (e.g. #F2545B).
"""
if not self.valid:
return
if 'hex' not in self._cache:
# Map rgb255 values to corresponding letters in hex
hexmap = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'}
# Handle arrays
if self.rgb255.ndim > 1:
rgb255 = self.rgb255
# Iterate through rows of rgb255
self._cache['hex'] = np.array([])
for row in rgb255:
rowHex = '#'
# Convert each value to hex and append
for val in row:
dig = hex(int(val)).strip('0x')
rowHex += dig if len(dig) == 2 else '0' + dig
# Append full hex value to new array
self._cache['hex'] = np.append(
self._cache['hex'], [rowHex], 0)
else:
rowHex = '#'
# Convert each value to hex and append
for val in self.rgb255:
dig = hex(int(val))[2:]
rowHex += dig if len(dig) == 2 else '0' + dig
# Append full hex value to new array
self._cache['hex'] = rowHex
return self._cache['hex']
@hex.setter
def hex(self, color):
# Validate
color, space = self.validate(color, space='hex')
if space != 'hex':
setattr(self, space, color)
return
if len(color) > 1:
# Handle arrays
rgb255 = np.array([""])
for row in color:
if isinstance(row, np.ndarray):
row = row[0]
row = row.strip('#')
# Convert string to list of strings
hexList = [row[:2], row[2:4], row[4:6]]
# Convert strings to int
hexInt = [int(val, 16) for val in hexList]
# Convert to array and append
rgb255 = np.append(rgb255, np.array(hexInt), 0)
else:
# Handle single values
if isinstance(color, np.ndarray):
# Strip away any extraneous numpy layers
color = color[(0,)*color.ndim]
color = color.strip('#')
# Convert string to list of strings
hexList = [color[:2], color[2:4], color[4:6]]
# Convert strings to int
hexInt = [int(val, 16) for val in hexList]
# Convert to array
rgb255 = np.array(hexInt)
# Set rgb255 accordingly
self.rgb255 = rgb255
# Clear outdated values from cache
self._cache = {'hex': color}
self._renderCache = {}
@property
def named(self):
"""The name of this color, if it has one (`str`).
"""
if 'named' not in self._cache:
self._cache['named'] = None
# If alpha is 0, then we know that the color is None
if isinstance(self.alpha, np.ndarray):
invis = all(self.alpha == 0)
elif isinstance(self.alpha, (int, float)):
invis = self.alpha == 0
else:
invis = False
if invis:
self._cache['named'] = 'none'
return self._cache['named']
self._cache['named'] = np.array([])
# Handle array
if len(self) > 1:
for row in self.rgb:
for name, val in colorNames.items():
if all(val[:3] == row):
self._cache['named'] = np.append(
self._cache['named'], [name], 0)
continue
self._cache['named'] = np.reshape(self._cache['named'], (-1, 1))
else:
rgb = self.rgb
for name, val in colorNames.items():
if name == 'none': # skip None
continue
if all(val[:3] == rgb):
self._cache['named'] = name
continue
return self._cache['named']
@named.setter
def named(self, color):
# Validate
color, space = self.validate(color=color, space='named')
if space != 'named':
setattr(self, space, color)
return
# Retrieve named colour
if len(color) > 1:
# Handle arrays
for row in color:
row = str(np.reshape(row, ())) # Enforce str
if str(row).lower() in colorNames:
self.rgb = colorNames[str(row).lower()]
if row.lower() == 'none':
self.alpha = 0
else:
color = str(np.reshape(color, ())) # Enforce str
if color.lower() in colorNames:
self.rgb = colorNames[str(color).lower()]
if color.lower() == 'none':
self.alpha = 0
# Clear outdated values from cache
self._cache = {'named': color}
self._renderCache = {}
@property
def hsva(self):
"""Color value expressed as an HSV triplet, with alpha value (0 to 1).
"""
return self._appendAlpha('hsv')
@hsva.setter
def hsva(self, color):
self.hsv = color
@property
def hsv(self):
"""Color value expressed as an HSV triplet.
"""
if 'hsva' not in self._cache:
self._cache['hsv'] = ct.rgb2hsv(self.rgb)
return self._cache['hsv']
@hsv.setter
def hsv(self, color):
# Validate
color, space = self.validate(color=color, space='hsv')
if space != 'hsv':
setattr(self, space, color)
return
# Apply via rgba255
self.rgb = ct.hsv2rgb(color)
# Clear outdated values from cache
self._cache = {'hsv': color}
self._renderCache = {}
@property
def lmsa(self):
"""Color value expressed as an LMS triplet, with alpha value (0 to 1).
"""
return self._appendAlpha('lms')
@lmsa.setter
def lmsa(self, color):
self.lms = color
@property
def lms(self):
"""Color value expressed as an LMS triplet.
"""
if 'lms' not in self._cache:
self._cache['lms'] = ct.rgb2lms(self.rgb)
return self._cache['lms']
@lms.setter
def lms(self, color):
# Validate
color, space = self.validate(color=color, space='lms')
if space != 'lms':
setattr(self, space, color)
return
# Apply via rgba255
self.rgb = ct.lms2rgb(color, self.conematrix)
# Clear outdated values from cache
self._cache = {'lms': color}
self._renderCache = {}
@property
def dkla(self):
"""Color value expressed as a DKL triplet, with alpha value (0 to 1).
"""
return self._appendAlpha('dkl')
@dkla.setter
def dkla(self, color):
self.dkl = color
@property
def dkl(self):
"""Color value expressed as a DKL triplet.
"""
if 'dkl' not in self._cache:
raise NotImplementedError(
"Conversion from rgb to dkl is not yet implemented.")
return self._cache['dkl']
@dkl.setter
def dkl(self, color):
# Validate
color, space = self.validate(color=color, space='dkl')
if space != 'dkl':
setattr(self, space, color)
return
# Apply via rgba255
self.rgb = ct.dkl2rgb(color, self.conematrix)
# Clear outdated values from cache
self._cache = {'dkl': color}
self._renderCache = {}
@property
def dklaCart(self):
"""Color value expressed as a cartesian DKL triplet, with alpha value
(0 to 1).
"""
return self.dklCart
@dklaCart.setter
def dklaCart(self, color):
self.dklCart = color
@property
def dklCart(self):
"""Color value expressed as a cartesian DKL triplet.
"""
if 'dklCart' not in self._cache:
self._cache['dklCart'] = ct.rgb2dklCart(self.rgb)
return self._cache['dklCart']
@dklCart.setter
def dklCart(self, color):
# Validate
color, space = self.validate(color=color, space='dklCart')
if space != 'dkl':
setattr(self, space, color)
return
# Apply via rgba255
self.rgb = ct.dklCart2rgb(color, self.conematrix)
# Clear outdated values from cache
self._cache = {'dklCart': color}
self._renderCache = {}
@property
def srgb(self):
"""
Color value expressed as an sRGB triplet
"""
if 'srgb' not in self._cache:
self._cache['srgb'] = ct.srgbTF(self.rgb)
return self._cache['srgb']
@srgb.setter
def srgb(self, color):
# Validate
color, space = self.validate(color=color, space='srgb')
if space != 'srgb':
setattr(self, space, color)
return
# Apply via rgba255
self.rgb = ct.srgbTF(color, reverse=True)
# Clear outdated values from cache
self._cache = {'srgb': color}
self._renderCache = {}
# removing for now
# @property
# def rec709TF(self):
# if 'rec709TF' not in self._cache:
# self._cache['rec709TF'] = ct.rec709TF(self.rgb)
# return self._cache['rec709TF']
#
# @rec709TF.setter
# def rec709TF(self, color):
# # Validate
# color, space = self.validate(color=color, space='rec709TF')
# if space != 'rec709TF':
# setattr(self, space, color)
# return
# # Apply via rgba255
# self.rgb = ct.rec709TF(color, reverse=True)
# # Clear outdated values from cache
# self._cache = {'rec709TF': color}
# self._renderCache = {}
# ------------------------------------------------------------------------------
# Legacy functions
#
# Old reference tables
colors = colorNames
# colorsHex = {key: Color(key, 'named').hex for key in colors}
# colors255 = {key: Color(key, 'named').rgb255 for key in colors}
# Old conversion functions
def hex2rgb255(hexColor):
"""Depreciated as of 2021.0
Converts a hex color string (e.g. "#05ff66") into an rgb triplet
ranging from 0:255
"""
col = Color(hexColor, 'hex')
if len(hexColor.strip('#')) == 6:
return col.rgb255
elif len(hexColor.strip('#')) == 8:
return col.rgba255
def isValidColor(color, space='rgb'):
"""Depreciated as of 2021.0
"""
logging.warning(
"DEPRECIATED: While psychopy.colors.isValidColor will still roughly "
"work, you should use a Color object, allowing you to check its "
"validity simply by converting it to a `bool` (e.g. `bool(myColor)` or "
"`if myColor:`). If you use this function for colors in any space "
"other than hex, named or rgb, please specify the color space.")
try:
buffer = Color(color, space)
return bool(buffer)
except:
return False
if __name__ == "__main__":
pass
| 41,138
|
Python
|
.py
| 963
| 33.843198
| 167
| 0.587821
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,338
|
gamma.py
|
psychopy_psychopy/psychopy/gamma.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""gamma.py placeholder file for backwards compatibility; Dec 2015
"""
from psychopy import logging
logging.warning('Deprecated v1.84.00: instead of `from psychopy import gamma`'
', now do `from psychopy.visual import gamma`')
| 291
|
Python
|
.py
| 7
| 38
| 78
| 0.70922
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,339
|
liaison.py
|
psychopy_psychopy/psychopy/liaison.py
|
"""
Liaison provides a simple server for Remote Process Communication (RPC),
using WebSockets as communication protocol and JSON for message packing
This is a simple, secure alternative to existing general-purpose options such as
zeroRPC
**This is a work in progress and all subject to change [2023]**
@author Alain Pitiot
@copyright (c) 2023 Open Science Tools Ltd.
"""
import inspect
import asyncio
import signal
import json
import sys
import traceback
import logging as _logging
from psychopy import logging
from psychopy.localization import _translate
try:
import websockets
except ModuleNotFoundError as err:
err.msg = _translate(
"`psychopy.liaison` requires the package `websockets`, this can be installed via command line:\n"
"`pip install websockets`."
)
class LiaisonJSONEncoder(json.JSONEncoder):
"""
JSON encoder which calls the `getJSON` method of an object (if it has one) to convert to a
string before JSONifying.
"""
def default(self, o):
# if object has a getJSON method, use it
if hasattr(o, "getJSON"):
return o.getJSON(asString=False)
# if object is an error, transform in standardised form
if isinstance(o, BaseException):
tb = traceback.format_exception(type(o), o, o.__traceback__)
msg = "".join(tb)
return {
'type': "error",
'msg': msg,
'context': getattr(o, "userdata", None)
}
# otherwise behave as normal
try:
return json.JSONEncoder.default(self, o=o)
except TypeError:
return str(o)
class LiaisonLogger(logging._Logger):
"""
Special logger for Liaison which logs any messages sent, and the direction they
were sent in (Python to JS or JS to Python). Logs both at level INFO.
"""
def sent(self, message):
"""
Log a message sent by Liaison
"""
self.log(
message=message,
level=logging.INFO,
levelname="LIAISON PY->JS",
)
# immediately flush - we're not in a frame loop
self.flush()
def received(self, message):
"""
Log a message received by Liaison
"""
self.log(
message=message,
level=logging.INFO,
levelname="LIAISON JS->PY",
)
# immediately flush - we're not in a frame loop
self.flush()
class WebSocketServer:
"""
A simple Liaison server, using WebSockets as communication protocol.
"""
def __init__(self):
"""
Create an instance of a Liaison WebSocket server, to which clients can connect to run the methods of class instances.
"""
# the set of currently established connections:
self._connections = set()
# setup a dedicated logger for messages
self.logger = LiaisonLogger()
# setup a base Python logger
self._logger = _logging.getLogger('liaison.WebSocketServer')
self._logger.setLevel(logging.DEBUG)
consoleHandler = _logging.StreamHandler()
consoleHandler.setLevel(_logging.DEBUG)
consoleHandler.setFormatter(_logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self._logger.addHandler(consoleHandler)
# register the Liaison methods available to clients:
self._methods = {
'liaison': (self, ['listRegisteredMethods', 'addLogFile', 'pingPong'])
}
def addLogFile(self, file, loggingLevel=logging.INFO):
# actualize logging level
if isinstance(loggingLevel, str):
loggingLevel = getattr(logging, loggingLevel.upper())
# if given a log file, add it
logFile = logging.LogFile(
file, level=logging.DEBUG
)
self._logger.addTarget(logFile)
def registerObject(self, targetObject, referenceName):
"""
Register handle of the given target object, so it can be acted upon by clients of the server.
Parameters
----------
targetObject : object
the object whose handle will be registered as <reference name>
referenceName : string
the name used to refer to the given target object when calling its method
"""
self._methods[referenceName] = (targetObject, ['registerMethods'])
# create, log and return success message
msg = f"Registered object of class: {type(targetObject).__name__} with reference name: {referenceName}"
self._logger.info(msg)
return msg
def registerMethods(self, targetObject, referenceName):
"""
Register all public methods of the given target object, so they can be called by clients of the server.
Parameters
----------
targetObject : object
the object whose methods will be registered as <reference name>.<method name>
referenceName : string
the name used to refer to the given target object when calling its method
"""
targetCls = type(targetObject)
# choose methods
targetMethods = []
for name in dir(targetObject):
# if function is callable and not private, register it
fnct = getattr(targetObject, name)
if callable(fnct) and not name.startswith("__"):
targetMethods.append(name)
# if function is a property and not private, register its fget method
clsfnct = getattr(targetCls, name, None)
if isinstance(clsfnct, property) and not name.startswith("__"):
targetMethods.append(name)
self._methods[referenceName] = (targetObject, targetMethods)
# create, log and return success message
msg = (
f"Registered the following methods: {self._methods[referenceName][1]} for object of class: "
f"{type(targetObject).__name__} with reference name: {referenceName}"
)
self._logger.info(msg)
return msg
def registerClass(self, targetCls, referenceName):
"""
Register a given class, so that an instance can be created by clients of the server.
Parameters
----------
targetCls : class
Class to register as <reference name>
referenceName : str
Name used to refer to the target class when calling its constructor
"""
# register init
self._methods[referenceName] = (targetCls, ['init'])
# create, log and return success message
msg = f"Registered class: {targetCls} with reference name: {referenceName}"
self._logger.info(msg)
return msg
def listRegisteredMethods(self):
"""
Get the list of all registered methods for all objects.
Returns
-----
list of str
the list of registered methods, as strings in the format: <object reference name>.<method name>
"""
registeredMethods = []
for name in self._methods:
for method in self._methods[name][1]:
registeredMethods.append(f"{name}.{method}")
return registeredMethods
def actualizeAttributes(self, arg):
"""
Convert a string pointing to an attribute of a registered object into the value of that
attribute.
Parameters
----------
arg : str
String in the format `object.attribute` pointing to the target attribute
"""
if isinstance(arg, str) and "." in arg:
_name, _attr = arg.split(".", 1)
if _name in self._methods:
_obj, _methods = self._methods[_name]
if _attr in _methods:
arg = getattr(_obj, _attr)
elif isinstance(arg, dict):
# actualize all values if given a dict of params
for key in arg:
arg[key] = self.actualizeAttributes(arg[key])
return arg
def start(self, host, port):
"""
Start the Liaison WebSocket server at the given address.
Notes
----
This is a blocking call.
Parameters
----------
host : string
the hostname, e.g. 'localhost'
port : int
the port number, e.g. 8001
"""
asyncio.run(self.run(host, port))
def pingPong(self):
"""
This method provides the server-side pong to the client-side ping that acts as
a keep-alive approach for the WebSocket connection.
"""
pass
async def run(self, host, port):
"""
Run a Liaison WebSocket server at the given address.
Parameters
----------
host : string
the hostname, e.g. 'localhost'
port : int
the port number, e.g. 8001
"""
# set the loop future on SIGTERM or SIGINT for clean interruptions:
loop = asyncio.get_running_loop()
loopFuture = loop.create_future()
if sys.platform in ("linux", "linux2"):
loop.add_signal_handler(signal.SIGINT, loopFuture.set_result, None)
async with websockets.serve(self._connectionHandler, host, port):
self._logger.info(f"Liaison Server started on: {host}:{port}")
await loopFuture
# await asyncio.Future() # run forever
self._logger.info('Liaison Server terminated.')
async def broadcast(self, message):
"""
Send a message to all connected clients:
Parameters
----------
message : string
the message to be sent to all clients
"""
if not isinstance(message, str):
message = json.dumps(message, cls=LiaisonJSONEncoder)
for websocket in self._connections:
self.logger.sent(message)
await websocket.send(message)
def broadcastSync(self, message):
"""
Call Liaison.broadcast from a synchronous context.
Parameters
----------
message : string
the message to be sent to all clients
"""
try:
# try to run in new loop
asyncio.run(self.broadcast(message))
except RuntimeError:
# use existing if there's already a loop
loop = asyncio.get_event_loop()
loop.create_task(self.broadcast(message))
async def _connectionHandler(self, websocket):
"""
Handler managing all communications received from a client connected to the server.
Parameters
----------
websocket : WebSocketServerProtocol
the websocket connection established when the client connected to the server
"""
clientIP = websocket.remote_address[0]
self._logger.info(f"New connection established with client at IP: {clientIP}")
self._connections.add(websocket)
while True:
try:
message = await websocket.recv()
self._logger.debug(f"New message received from client at IP: {clientIP}: {message}")
# process the message:
await self._processMessage(websocket, message)
except websockets.ConnectionClosedOK as error:
self._logger.info(f"Connection closed cleanly with client at IP: {clientIP}: {error}")
self._connections.remove(websocket)
break
except websockets.ConnectionClosedError as error:
self._logger.info(f"Connection closed uncleanly (protocol error or network failure) with client at IP: {clientIP}: {error}")
self._connections.remove(websocket)
break
async def _processMessage(self, websocket, message):
"""
Process a message received from a client.
Currently, only method calls are processed.
They should be in the following format:
{
"object": <object reference name>,
"method": <method name>,
"args": [<arg>,<arg>,...],
"messageId": <uuid>
}
"args" and "messageId" are optional. messageId's are used to match results to messages, they enable
a single client to make multiple concurrent calls.
To instantiate a registered class, use the keyword "init" as the method name. To register the methods of
an instantiated object, use the keyword "registerMethods" as the method name.
The result of the method call is sent back to the client in the following format:
{"result": <result as string>, "messageId": <uuid>}
If an error occurred when the method was called, the error is return to the client in the following format:
{"error": <result as string>, "messageId": <uuid>}
Parameters
----------
websocket : WebSocketServerProtocol
the websocket connection on which the message was received
message : string
the message sent by the client to the server, as a JSON string
"""
# log message
self.logger.received(message)
# decode the message:
try:
decodedMessage = json.loads(message)
except Exception as error:
self._logger.debug(f"unable to json decode the message: {error}")
return
self._logger.debug(f"decoded message: {decodedMessage}")
# process the decoded message:
try:
# - if the message has an object and a method field, check whether a corresponding method was registered
if 'object' in decodedMessage:
# get object
queryObject = decodedMessage['object']
if queryObject not in self._methods:
raise Exception(f"No methods of the object {queryObject} have been registered with the server")
# get method
queryMethod = decodedMessage['method']
if queryMethod not in self._methods[queryObject][1]:
raise Exception(f"{queryObject}.{queryMethod} has not been registered with the server")
# extract and unpack args
rawArgs = decodedMessage['args'] if 'args' in decodedMessage else []
args = []
for arg in rawArgs:
# try to parse json string
try:
arg = json.loads(arg)
except (json.decoder.JSONDecodeError, TypeError):
pass
# if arg is a known property, get its value
arg = self.actualizeAttributes(arg)
# append to list of args
args.append(arg)
if 'method' in decodedMessage:
# if method is init, initialise object from class and register it under reference name
if queryMethod == "init":
cls = self._methods[queryObject][0]
# add self to args if relevant
kwargs = {}
if "liaison" in inspect.getfullargspec(cls.__init__).args:
kwargs['liaison'] = self
# create instance
obj = cls(*args, **kwargs)
# register object (but not its methods yet)
rawResult = self.registerObject(obj, referenceName=queryObject)
# if method is register, register methods of object
elif queryMethod == "registerMethods":
# get object
obj = self._methods[queryObject][0]
# register its methods
rawResult = self.registerMethods(obj, referenceName=queryObject)
# any other method, call as normal
else:
self._logger.debug(f"running the registered method: {queryObject}.{queryMethod}")
# get the method and determine whether it needs to be awaited:
method = getattr(self._methods[queryObject][0], queryMethod)
methodIsCoroutine = inspect.iscoroutinefunction(method)
# run the method, with arguments if need be:
if methodIsCoroutine:
rawResult = await method(*args)
else:
rawResult = method(*args)
# convert result to a string
result = json.dumps(rawResult, cls=LiaisonJSONEncoder)
# send a response back to the client:
response = {
"result": result
}
# if there is a messageId in the message, add it to the response:
if 'messageId' in decodedMessage:
response['messageId'] = decodedMessage['messageId']
self.logger.sent(response)
await websocket.send(json.dumps(response))
except BaseException as err:
# JSONify any errors
err = json.dumps(err, cls=LiaisonJSONEncoder)
# send to server
self.logger.sent(err)
await websocket.send(err)
| 14,373
|
Python
|
.py
| 400
| 32.1325
| 128
| 0.723009
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,340
|
microphone.py
|
psychopy_psychopy/psychopy/microphone.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Audio capture and analysis using pyo.
These are optional components that can be obtained by installing the
`psychopy-legcay-mic` extension into the current environment.
"""
from psychopy.tools.pkgtools import PluginStub
class AudioCapture(
PluginStub,
plugin="psychopy-legacy-mic"
):
pass
if __name__ == "__main__":
pass
| 578
|
Python
|
.py
| 17
| 31.470588
| 79
| 0.749546
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,341
|
event.py
|
psychopy_psychopy/psychopy/event.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""To handle input from keyboard, mouse and joystick (joysticks require
pygame to be installed).
See demo_mouse.py and i{demo_joystick.py} for examples
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# 01/2011 modified by Dave Britton to get mouse event timing
import sys
import string
import copy
import numpy
from collections import namedtuple, OrderedDict
from psychopy.preferences import prefs
# try to import pyglet & pygame and hope the user has at least one of them!
try:
from pygame import mouse, locals, joystick, display
import pygame.key
import pygame.event as evt
havePygame = True
except ImportError:
havePygame = False
try:
import pyglet
havePyglet = True
except ImportError:
havePyglet = False
try:
import glfw
if not glfw.init():
raise ImportError
haveGLFW = True
except ImportError:
haveGLFW = False
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
if havePygame:
usePygame = True # will become false later if win not initialised
else:
usePygame = False
if haveGLFW:
useGLFW = True
else:
useGLFW = False
import psychopy.core
from psychopy.tools.monitorunittools import cm2pix, deg2pix, pix2cm, pix2deg
from psychopy import logging
from psychopy.constants import NOT_STARTED
# global variable to keep track of mouse buttons
mouseButtons = [0, 0, 0]
if havePyglet or haveGLFW:
# importing from mouse takes ~250ms, so do it now
if havePyglet:
from pyglet.window.mouse import LEFT, MIDDLE, RIGHT
from pyglet.window.key import (
MOD_SHIFT,
MOD_CTRL,
MOD_ALT,
MOD_CAPSLOCK,
MOD_NUMLOCK,
MOD_WINDOWS,
MOD_COMMAND,
MOD_OPTION,
MOD_SCROLLLOCK
)
_keyBuffer = []
mouseWheelRel = numpy.array([0.0, 0.0])
# list of 3 clocks that are reset on mouse button presses
mouseClick = [psychopy.core.Clock(), psychopy.core.Clock(),
psychopy.core.Clock()]
# container for time elapsed from last reset of mouseClick[n] for any
# button pressed
mouseTimes = [0.0, 0.0, 0.0]
# clock for tracking time of mouse movement, reset when mouse is moved,
# reset on mouse motion:
mouseMove = psychopy.core.Clock()
# global eventThread
# eventThread = _EventDispatchThread()
# eventThread.start()
if haveGLFW:
# GLFW keycodes for special characters
_glfw_keycodes_ = {
glfw.KEY_SPACE: 'space',
glfw.KEY_ESCAPE: 'esc',
glfw.KEY_ENTER: 'return',
glfw.KEY_TAB: 'tab',
glfw.KEY_BACKSPACE: 'backspace',
glfw.KEY_INSERT: 'insert',
glfw.KEY_DELETE: 'delete',
glfw.KEY_RIGHT: 'right',
glfw.KEY_LEFT: 'left',
glfw.KEY_DOWN: 'down',
glfw.KEY_UP: 'up',
glfw.KEY_PAGE_UP: 'pageup',
glfw.KEY_PAGE_DOWN: 'pagedn',
glfw.KEY_HOME: 'home',
glfw.KEY_END: 'end',
glfw.KEY_CAPS_LOCK: 'capslock',
glfw.KEY_SCROLL_LOCK: 'scrolllock',
glfw.KEY_NUM_LOCK: 'numlock',
glfw.KEY_PRINT_SCREEN: 'printscreen',
glfw.KEY_PAUSE: 'pause',
glfw.KEY_F1: 'f1',
glfw.KEY_F2: 'f2',
glfw.KEY_F3: 'f3',
glfw.KEY_F4: 'f4',
glfw.KEY_F5: 'f5',
glfw.KEY_F6: 'f6',
glfw.KEY_F7: 'f7',
glfw.KEY_F8: 'f8',
glfw.KEY_F9: 'f9',
glfw.KEY_F10: 'f10',
glfw.KEY_F11: 'f11',
glfw.KEY_F12: 'f12',
glfw.KEY_F13: 'f13',
glfw.KEY_F14: 'f14',
glfw.KEY_F15: 'f15',
glfw.KEY_F16: 'f16',
glfw.KEY_F17: 'f17',
glfw.KEY_F18: 'f18',
glfw.KEY_F19: 'f19',
glfw.KEY_F20: 'f20',
glfw.KEY_F21: 'f21',
glfw.KEY_F22: 'f22',
glfw.KEY_F23: 'f23',
glfw.KEY_F24: 'f24',
glfw.KEY_F25: 'f25',
}
useText = False # By default _onPygletText is not used
def _onPygletText(text, emulated=False):
"""handler for on_text pyglet events, or call directly to emulate a text
event.
S Mathot 2012: This function only acts when the key that is pressed
corresponds to a non-ASCII text character (Greek, Arabic, Hebrew, etc.).
In that case the symbol that is passed to _onPygletKey() is translated
into a useless 'user_key()' string. If this happens, _onPygletText takes
over the role of capturing the key. Unfortunately, _onPygletText()
cannot solely handle all input, because it does not respond to spacebar
presses, etc.
"""
global useText
if not useText: # _onPygletKey has handled the input
return
# This is needed because sometimes the execution
# sequence is messed up (somehow)
useText = False
# capture when the key was pressed:
keyTime = psychopy.core.getTime()
if emulated:
keySource = 'EmulatedKey'
else:
keySource = 'KeyPress'
_keyBuffer.append((text.lower(), lastModifiers, keyTime))
logging.data("%s: %s" % (keySource, text))
def _onPygletKey(symbol, modifiers, emulated=False):
"""handler for on_key_press pyglet events; call directly to emulate a
key press
Appends a tuple with (keyname, timepressed) into the global _keyBuffer.
The _keyBuffer can then be accessed as normal using event.getKeys(),
.waitKeys(), clearBuffer(), etc.
J Gray 2012: Emulated means add a key (symbol) to the buffer virtually.
This is useful for fMRI_launchScan, and for unit testing (in testTheApp)
Logging distinguishes EmulatedKey events from real Keypress events.
For emulation, the key added to the buffer is unicode(symbol), instead of
pyglet.window.key.symbol_string(symbol).
S Mathot 2012: Implement fallback to _onPygletText
5AM Solutions 2016: Add the keyboard modifier flags to the key buffer.
M Cutone 2018: Added GLFW backend support.
"""
global useText, lastModifiers
keyTime = psychopy.core.getTime() # capture when the key was pressed
if emulated:
if not isinstance(modifiers, int):
msg = 'Modifiers must be passed as an integer value.'
raise ValueError(msg)
thisKey = str(symbol)
keySource = 'EmulatedKey'
else:
thisKey = pyglet.window.key.symbol_string(
symbol).lower() # convert symbol into key string
# convert pyglet symbols to pygame forms ( '_1'='1', 'NUM_1'='[1]')
# 'user_key' indicates that Pyglet has been unable to make sense
# out of the keypress. In that case, we fall back to _onPygletText
# to handle the input.
if 'user_key' in thisKey:
useText = True
lastModifiers = modifiers
return
useText = False
thisKey = thisKey.lstrip('_').lstrip('NUM_')
# Pyglet 1.3.0 returns 'enter' when Return key (0xFF0D) is pressed
# in Windows Python3. So we have to replace 'enter' with 'return'.
if thisKey == 'enter':
thisKey = 'return'
keySource = 'Keypress'
_keyBuffer.append((thisKey, modifiers, keyTime)) # tuple
logging.data("%s: %s" % (keySource, thisKey))
_process_global_event_key(thisKey, modifiers)
def _process_global_event_key(key, modifiers):
if modifiers == 0:
modifier_keys = ()
else:
modifier_keys = ['%s' % m.strip('MOD_').lower() for m in
(pyglet.window.key.modifiers_string(modifiers)
.split('|'))]
# Ignore Num Lock.
if 'numlock' in modifier_keys:
modifier_keys.remove('numlock')
index_key = globalKeys._gen_index_key((key, modifier_keys))
if index_key in globalKeys:
event = globalKeys[index_key]
logging.exp('Global key event: %s. Calling %s.'
% (event.name, event.func))
r = event.func(*event.func_args, **event.func_kwargs)
return r
def _onPygletMousePress(x, y, button, modifiers, emulated=False):
"""button left=1, middle=2, right=4;
specify multiple buttons with | operator
"""
global mouseButtons, mouseClick, mouseTimes
now = psychopy.clock.getTime()
if emulated:
label = 'Emulated'
else:
label = ''
if button & LEFT:
mouseButtons[0] = 1
mouseTimes[0] = now - mouseClick[0].getLastResetTime()
label += ' Left'
if button & MIDDLE:
mouseButtons[1] = 1
mouseTimes[1] = now - mouseClick[1].getLastResetTime()
label += ' Middle'
if button & RIGHT:
mouseButtons[2] = 1
mouseTimes[2] = now - mouseClick[2].getLastResetTime()
label += ' Right'
logging.data("Mouse: %s button down, pos=(%i,%i)" % (label.strip(), x, y))
def _onPygletMouseRelease(x, y, button, modifiers, emulated=False):
global mouseButtons
if emulated:
label = 'Emulated'
else:
label = ''
if button & LEFT:
mouseButtons[0] = 0
label += ' Left'
if button & MIDDLE:
mouseButtons[1] = 0
label += ' Middle'
if button & RIGHT:
mouseButtons[2] = 0
label += ' Right'
logging.data("Mouse: %s button up, pos=(%i,%i)" % (label, x, y))
def _onPygletMouseWheel(x, y, scroll_x, scroll_y):
global mouseWheelRel
mouseWheelRel = mouseWheelRel + numpy.array([scroll_x, scroll_y])
msg = "Mouse: wheel shift=(%i,%i), pos=(%i,%i)"
logging.data(msg % (scroll_x, scroll_y, x, y))
# will this work? how are pyglet event handlers defined?
def _onPygletMouseMotion(x, y, dx, dy):
global mouseMove
# mouseMove is a core.Clock() that is reset when the mouse moves
# default is None, but start and stopMoveClock() create and remove it,
# mouseMove.reset() resets it by hand
if mouseMove:
mouseMove.reset()
def startMoveClock():
global mouseMove
mouseMove = psychopy.core.Clock()
def stopMoveClock():
global mouseMove
mouseMove = None
def resetMoveClock():
global mouseMove
if mouseMove:
mouseMove.reset()
else:
startMoveClock()
# class Keyboard:
# """The keyboard class is currently just a helper class to allow common
# attributes with other objects (like mouse and stimuli). In particular
# it allows storage of the .status property (NOT_STARTED, STARTED, STOPPED).
# It isn't really needed for most users - the functions it supports (e.g.
# getKeys()) are directly callable from the event module.
# Note that multiple Keyboard instances will not keep separate buffers.
# """
# def __init__(self):
# self.status=NOT_STARTED
# def getKeys(keyList=None, timeStamped=False):
# return getKeys(keyList=keyList, timeStamped=timeStamped)
# def waitKeys(maxWait = None, keyList=None):
# return def waitKeys(maxWait = maxWait, keyList=keyList)
def modifiers_dict(modifiers):
"""Return dict where the key is a keyboard modifier flag
and the value is the boolean state of that flag.
"""
return {(mod[4:].lower()): modifiers & getattr(sys.modules[__name__], mod) > 0 for mod in [
'MOD_SHIFT',
'MOD_CTRL',
'MOD_ALT',
'MOD_CAPSLOCK',
'MOD_NUMLOCK',
'MOD_WINDOWS',
'MOD_COMMAND',
'MOD_OPTION',
'MOD_SCROLLLOCK'
]}
def getKeys(keyList=None, modifiers=False, timeStamped=False):
"""Returns a list of keys that were pressed.
:Parameters:
keyList : **None** or []
Allows the user to specify a set of keys to check for.
Only keypresses from this set of keys will be removed from
the keyboard buffer. If the keyList is `None`, all keys will be
checked and the key buffer will be cleared completely.
NB, pygame doesn't return timestamps (they are always 0)
modifiers : **False** or True
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, modifiers). The modifiers
are a dict of keyboard modifier flags keyed by the modifier
name (eg. 'shift', 'ctrl').
timeStamped : **False**, True, or `Clock`
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, time). If a `core.Clock`
is given then the time will be relative to the `Clock`'s last
reset.
:Author:
- 2003 written by Jon Peirce
- 2009 keyList functionality added by Gary Strangman
- 2009 timeStamped code provided by Dave Britton
- 2016 modifiers code provided by 5AM Solutions
"""
keys = []
if havePygame and display.get_init():
# see if pygame has anything instead (if it exists)
windowSystem = 'pygame'
for evts in evt.get(locals.KEYDOWN):
# pygame has no keytimes
keys.append((pygame.key.name(evts.key), 0))
global _keyBuffer
if havePyglet:
# for each (pyglet) window, dispatch its events before checking event
# buffer
windowSystem = 'pyglet'
for win in pyglet.app.windows:
try:
win.dispatch_events() # pump events on pyglet windows
except ValueError as e: # pragma: no cover
# Pressing special keys, such as 'volume-up', results in a
# ValueError. This appears to be a bug in pyglet, and may be
# specific to certain systems and versions of Python.
logging.error(u'Failed to handle keypress')
if len(_keyBuffer) > 0:
# then pyglet is running - just use this
keys = _keyBuffer
# _keyBuffer = [] # DO /NOT/ CLEAR THE KEY BUFFER ENTIRELY
if haveGLFW:
windowSystem = 'glfw'
glfw.poll_events()
if len(_keyBuffer) > 0:
keys = _keyBuffer
if keyList is None:
_keyBuffer = [] # clear buffer entirely
targets = keys # equivalent behavior to getKeys()
else:
nontargets = []
targets = []
# split keys into keepers and pass-thrus
for key in keys:
if key[0] in keyList:
targets.append(key)
else:
nontargets.append(key)
_keyBuffer = nontargets # save these
# now we have a list of tuples called targets
# did the user want timestamped tuples or keynames?
if modifiers == False and timeStamped == False:
keyNames = [k[0] for k in targets]
return keyNames
elif timeStamped == False:
keyNames = [(k[0], modifiers_dict(k[1])) for k in targets]
return keyNames
elif timeStamped and windowSystem=='pygame':
# provide a warning and set timestamps to be None
logging.warning('Pygame keyboard events do not support timestamped=True')
relTuple = [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, None) if _f] for k in targets]
return relTuple
elif hasattr(timeStamped, 'getLastResetTime'):
# keys were originally time-stamped with
# core.monotonicClock._lastResetTime
# we need to shift that by the difference between it and
# our custom clock
_last = timeStamped.getLastResetTime()
_clockLast = psychopy.core.monotonicClock.getLastResetTime()
timeBaseDiff = _last - _clockLast
relTuple = [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, k[-1] - timeBaseDiff) if _f] for k in targets]
return relTuple
elif timeStamped is True:
return [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, k[-1]) if _f] for k in targets]
elif isinstance(timeStamped, (float, int, int)):
relTuple = [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, k[-1] - timeStamped) if _f] for k in targets]
return relTuple
else: ## danger - catch anything that gets here because it shouldn't!
raise ValueError("We received an unknown combination of params to "
"getKeys(): timestamped={}, windowSystem={}, "
"modifiers={}"
.format(timeStamped, windowSystem, modifiers))
def waitKeys(maxWait=float('inf'), keyList=None, modifiers=False,
timeStamped=False, clearEvents=True):
"""Same as `~psychopy.event.getKeys`, but halts everything
(including drawing) while awaiting input from keyboard.
:Parameters:
maxWait : any numeric value.
Maximum number of seconds period and which keys to wait for.
Default is float('inf') which simply waits forever.
keyList : **None** or []
Allows the user to specify a set of keys to check for.
Only keypresses from this set of keys will be removed from
the keyboard buffer. If the keyList is `None`, all keys will be
checked and the key buffer will be cleared completely.
NB, pygame doesn't return timestamps (they are always 0)
modifiers : **False** or True
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, modifiers). The modifiers
are a dict of keyboard modifier flags keyed by the modifier
name (eg. 'shift', 'ctrl').
timeStamped : **False**, True, or `Clock`
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, time). If a `core.Clock`
is given then the time will be relative to the `Clock`'s last
reset.
clearEvents : **True** or False
Whether to clear the keyboard event buffer (and discard preceding
keypresses) before starting to monitor for new keypresses.
Returns None if times out.
"""
if clearEvents:
# Only consider keypresses from here onwards.
# We need to invoke clearEvents(), but our keyword argument is
# also called clearEvents. We can work around this conflict by
# accessing the global scope explicitly.
globals()['clearEvents']('keyboard')
# Check for keypresses until maxWait is exceeded
#
# NB pygame.event does have a wait() function that will
# do this and maybe leave more cpu idle time?
timer = psychopy.core.Clock()
got_keypress = False
while not got_keypress and timer.getTime() < maxWait:
# Get keypresses and return if anything is pressed.
keys = getKeys(keyList=keyList, modifiers=modifiers,
timeStamped=timeStamped)
if keys:
got_keypress = True
if got_keypress:
return keys
else:
logging.data('No keypress (maxWait exceeded)')
return None
def xydist(p1=(0.0, 0.0), p2=(0.0, 0.0)):
"""Helper function returning the cartesian distance between p1 and p2
"""
return numpy.sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2))
class Mouse:
"""Easy way to track what your mouse is doing.
It needn't be a class, but since Joystick works better
as a class this may as well be one too for consistency
Create your `visual.Window` before creating a Mouse.
:Parameters:
visible : bool or None
Show the mouse if True, hide it if False, leave it as is if None (default)
newPos : **None** or [x,y]
gives the mouse a particular starting position
(pygame `Window` only)
win : **None** or `Window`
the window to which this mouse is attached
(the first found if None provided)
"""
def __init__(self,
visible=None,
newPos=None,
win=None):
super(Mouse, self).__init__()
self._visible = visible
self.lastPos = None
self.prevPos = None # used for motion detection and timing
if win:
self.win = win
else:
try:
# to avoid circular imports, core.openWindows is defined
# by visual.py and updated in core namespace;
# it's circular to "import visual" here in event
self.win = psychopy.core.openWindows[0]()
logging.info('Mouse: using default window')
except (NameError, IndexError):
logging.error('Mouse: failed to get a default visual.Window'
' (need to create one first)')
self.win = None
# get the scaling factors for the display
if self.win is not None:
self._winScaleFactor = self.win.getContentScaleFactor()
else:
self._winScaleFactor = 1.0
# for builder: set status to STARTED, NOT_STARTED etc
self.status = None
self.mouseClock = psychopy.core.Clock()
self.movedistance = 0.0
# if pygame isn't initialised then we must use pyglet
global usePygame
if havePygame and not pygame.display.get_init():
usePygame = False
if visible is not None:
self.setVisible(visible)
if newPos is not None:
self.setPos(newPos)
@property
def units(self):
"""The units for this mouse
(will match the current units for the Window it lives in)
"""
return self.win.units
def setPos(self, newPos=(0, 0)):
"""Sets the current position of the mouse,
in the same units as the :class:`~visual.Window`. (0,0) is the center.
:Parameters:
newPos : (x,y) or [x,y]
the new position on the screen
"""
newPosPix = self._windowUnits2pix(numpy.array(newPos))
if usePygame:
newPosPix[1] = self.win.size[1] / 2 - newPosPix[1]
newPosPix[0] = self.win.size[0] / 2 + newPosPix[0]
mouse.set_pos(newPosPix)
else:
if hasattr(self.win.winHandle, 'set_mouse_position'):
if self.win.useRetina:
newPosPix = numpy.array(self.win.size) / 4 + newPosPix / 2
else:
wsf = self._winScaleFactor
newPosPix = \
numpy.array(self.win.size) / (2 * wsf) + newPosPix / wsf
x, y = int(newPosPix[0]), int(newPosPix[1])
self.win.winHandle.set_mouse_position(x, y)
self.win.winHandle._mouse_x = x
self.win.winHandle._mouse_y = y
else:
msg = 'mouse position could not be set (pyglet %s)'
logging.error(msg % pyglet.version)
def getPos(self):
"""Returns the current position of the mouse,
in the same units as the :class:`~visual.Window` (0,0) is at centre
"""
lastPosPix = numpy.zeros((2,), dtype=numpy.float32)
if usePygame: # for pygame top left is 0,0
lastPosPix = numpy.array(mouse.get_pos())
# set (0,0) to centre
lastPosPix[1] = self.win.size[1] / 2 - lastPosPix[1]
lastPosPix[0] = lastPosPix[0] - self.win.size[0] / 2
self.lastPos = self._pix2windowUnits(lastPosPix)
elif useGLFW and self.win.winType=='glfw':
lastPosPix[:] = self.win.backend.getMousePos()
if self.win.useRetina:
lastPosPix *= 2.0
else: # for pyglet bottom left is 0,0
# use default window if we don't have one
if self.win:
w = self.win.winHandle
else:
if psychopy.core.openWindows:
w = psychopy.core.openWindows[0]()
else:
logging.warning("Called event.Mouse.getPos() for the mouse with no Window being opened")
return None
# get position in window
lastPosPix[:] = w._mouse_x, w._mouse_y
# set (0,0) to centre
if self.win.useRetina:
lastPosPix = lastPosPix * 2 - numpy.array(self.win.size) / 2
else:
wsf = self._winScaleFactor
lastPosPix = lastPosPix * wsf - numpy.array(self.win.size) / 2
self.lastPos = self._pix2windowUnits(lastPosPix)
return copy.copy(self.lastPos)
def mouseMoved(self, distance=None, reset=False):
"""Determine whether/how far the mouse has moved.
With no args returns true if mouse has moved at all since last
getPos() call, or distance (x,y) can be set to pos or neg
distances from x and y to see if moved either x or y that
far from lastPos, or distance can be an int/float to test if
new coordinates are more than that far in a straight line
from old coords.
Retrieve time of last movement from self.mouseClock.getTime().
Reset can be to 'here' or to screen coords (x,y) which allows
measuring distance from there to mouse when moved. If reset is
(x,y) and distance is set, then prevPos is set to (x,y) and
distance from (x,y) to here is checked, mouse.lastPos is set as
current (x,y) by getPos(), mouse.prevPos holds lastPos from
last time mouseMoved was called.
"""
# mouseMove = clock that gets reset by pyglet mouse movement handler:
global mouseMove
# needs initialization before getPos resets lastPos
self.prevPos = copy.copy(self.lastPos)
self.getPos() # sets self.lastPos to current position
if not reset:
if distance is None:
if self.prevPos[0] != self.lastPos[0]:
return True
if self.prevPos[1] != self.lastPos[1]:
return True
else:
if isinstance(distance, int) or isinstance(distance, float):
self.movedistance = xydist(self.prevPos, self.lastPos)
if self.movedistance > distance:
return True
else:
return False
if self.prevPos[0] + distance[0] - self.lastPos[0] > 0.0:
return True # moved on X-axis
if self.prevPos[1] + distance[1] - self.lastPos[0] > 0.0:
return True # moved on Y-axis
return False
if reset is True:
# just reset the last move time: starts/zeroes the move clock
mouseMove.reset() # resets the global mouseMove clock
return False
if reset == 'here':
# set to wherever we are
self.prevPos = copy.copy(self.lastPos) # lastPos set in getPos()
return False
if hasattr(reset, '__len__'):
# a tuple or list of (x,y)
# reset to (x,y) to check movement from there
self.prevPos = copy.copy(reset)
if not distance:
return False # just resetting prevPos, not checking distance
else:
# checking distance of current pos to newly reset prevposition
if isinstance(distance, int) or isinstance(distance, float):
self.movedistance = xydist(self.prevPos, self.lastPos)
if self.movedistance > distance:
return True
else:
return False
# distance is x,y tuple, to check if the mouse moved that
# far on either x or y axis
# distance must be (dx,dy), and reset is (rx,ry), current pos
# (cx,cy): Is cx-rx > dx ?
if abs(self.lastPos[0] - self.prevPos[0]) > distance[0]:
return True # moved on X-axis
if abs(self.lastPos[1] - self.prevPos[1]) > distance[1]:
return True # moved on Y-axis
return False
return False
def mouseMoveTime(self):
global mouseMove
if mouseMove:
return mouseMove.getTime()
else:
return 0 # mouseMove clock not started
def getRel(self):
"""Returns the new position of the mouse relative to the
last call to getRel or getPos, in the same units as the
:class:`~visual.Window`.
"""
if usePygame:
relPosPix = numpy.array(mouse.get_rel()) * [1, -1]
return self._pix2windowUnits(relPosPix)
else:
# NB getPost() resets lastPos so MUST retrieve lastPos first
if self.lastPos is None:
relPos = self.getPos()
else:
# DON't switch to (this-lastPos)
relPos = -self.lastPos + self.getPos()
return relPos
def getWheelRel(self):
"""Returns the travel of the mouse scroll wheel since last call.
Returns a numpy.array(x,y) but for most wheels y is the only
value that will change (except Mac mighty mice?)
"""
global mouseWheelRel
rel = mouseWheelRel
mouseWheelRel = numpy.array([0.0, 0.0])
return rel
@property
def visible(self):
"""Gets the visibility of the mouse (1 or 0)
"""
if usePygame:
return mouse.get_visible()
else:
print("Getting the mouse visibility is not supported under"
" pyglet, but you can set it anyway")
@visible.setter
def visible(self, visible):
"""Sets the visibility of the mouse to 1 or 0
NB when the mouse is not visible its absolute position is held
at (0, 0) to prevent it from going off the screen and getting lost!
You can still use getRel() in that case.
"""
self.setVisible(visible)
def getVisible(self):
"""Gets the visibility of the mouse (1 or 0)
"""
if usePygame:
return mouse.get_visible()
return self._visible
def setVisible(self, visible):
"""Sets the visibility of the mouse to 1 or 0
NB when the mouse is not visible its absolute position is held
at (0, 0) to prevent it from going off the screen and getting lost!
You can still use getRel() in that case.
"""
if self.win: # use default window if we don't have one
self.win.setMouseVisible(visible)
elif usePygame:
mouse.set_visible(visible)
else:
from psychopy.visual import openWindows
if openWindows:
w = openWindows[0]() # type: psychopy.visual.Window
else:
logging.warning(
"Called event.Mouse.getPos() for the mouse with no Window "
"being opened")
return None
w.setMouseVisible(visible)
self._visible = visible # set internal state
def clickReset(self, buttons=(0, 1, 2)):
"""Reset a 3-item list of core.Clocks use in timing button clicks.
The pyglet mouse-button-pressed handler uses their
clock.getLastResetTime() when a button is pressed so the user
can reset them at stimulus onset or offset to measure RT. The
default is to reset all, but they can be reset individually as
specified in buttons list
"""
global mouseClick
for c in buttons:
mouseClick[c].reset()
mouseTimes[c] = 0.0
def getPressed(self, getTime=False):
"""Returns a 3-item list indicating whether or not buttons 0,1,2
are currently pressed.
If `getTime=True` (False by default) then `getPressed` will
return all buttons that have been pressed since the last call
to `mouse.clickReset` as well as their time stamps::
buttons = mouse.getPressed()
buttons, times = mouse.getPressed(getTime=True)
Typically you want to call :ref:`mouse.clickReset()` at stimulus
onset, then after the button is pressed in reaction to it, the
total time elapsed from the last reset to click is in mouseTimes.
This is the actual RT, regardless of when the call to `getPressed()`
was made.
"""
global mouseButtons, mouseTimes
if usePygame:
return mouse.get_pressed()
else:
# for each (pyglet) window, dispatch its events before checking
# event buffer
if havePyglet:
for win in pyglet.app.windows:
win.dispatch_events() # pump events on pyglet windows
if haveGLFW:
glfw.poll_events()
# else:
if not getTime:
return copy.copy(mouseButtons)
else:
return copy.copy(mouseButtons), copy.copy(mouseTimes)
def isPressedIn(self, shape, buttons=(0, 1, 2)):
"""Returns `True` if the mouse is currently inside the shape and
one of the mouse buttons is pressed. The default is that any of
the 3 buttons can indicate a click; for only a left-click,
specify `buttons=[0]`::
if mouse.isPressedIn(shape):
if mouse.isPressedIn(shape, buttons=[0]): # left-clicks only
Ideally, `shape` can be anything that has a `.contains()` method,
like `ShapeStim` or `Polygon`. Not tested with `ImageStim`.
"""
wanted = numpy.zeros(3, dtype=int)
for c in buttons:
wanted[c] = 1
pressed = self.getPressed()
return any(wanted & pressed) and shape.contains(self)
def _pix2windowUnits(self, pos):
if self.win.units == 'pix':
if self.win.useRetina:
pos /= 2.0
return pos
elif self.win.units == 'norm':
return pos * 2.0 / self.win.size
elif self.win.units == 'cm':
return pix2cm(pos, self.win.monitor)
elif self.win.units == 'deg':
return pix2deg(pos, self.win.monitor)
elif self.win.units == 'height':
return pos / float(self.win.size[1])
def _windowUnits2pix(self, pos):
if self.win.units == 'pix':
return pos
elif self.win.units == 'norm':
return pos * self.win.size / 2.0
elif self.win.units == 'cm':
return cm2pix(pos, self.win.monitor)
elif self.win.units == 'deg':
return deg2pix(pos, self.win.monitor)
elif self.win.units == 'height':
return pos * float(self.win.size[1])
def setExclusive(self, exclusivity):
"""Binds the mouse to the experiment window. Only works in Pyglet.
In multi-monitor settings, or with a window that is not fullscreen,
the mouse pointer can drift, and thereby PsychoPy might not get the
events from that window. setExclusive(True) works with Pyglet to
bind the mouse to the experiment window.
Note that binding the mouse pointer to a window will cause the
pointer to vanish, and absolute positions will no longer be
meaningful getPos() returns [0, 0] in this case.
"""
if type(exclusivity) is not bool:
raise ValueError('Exclusivity must be a boolean!')
if not usePygame:
msg = ('Setting mouse exclusivity in Pyglet will cause the '
'cursor to disappear, and getPos() will be rendered '
'meaningless, returning [0, 0]')
psychopy.logging.warning(msg)
self.win.winHandle.set_exclusive_mouse(exclusivity)
else:
print('Mouse exclusivity can only be set for Pyglet!')
class BuilderKeyResponse():
"""Used in scripts created by the builder to keep track of a clock and
the current status (whether or not we are currently checking the keyboard)
"""
def __init__(self):
super(BuilderKeyResponse, self).__init__()
self.status = NOT_STARTED
self.keys = [] # the key(s) pressed
self.corr = 0 # was the resp correct this trial? (0=no, 1=yes)
self.rt = [] # response time(s)
self.clock = psychopy.core.Clock() # we'll use this to measure the rt
def clearEvents(eventType=None):
"""Clears all events currently in the event buffer.
Optional argument, eventType, specifies only certain types to be
cleared.
:Parameters:
eventType : **None**, 'mouse', 'joystick', 'keyboard'
If this is not None then only events of the given type are cleared
"""
if not havePygame or not display.get_init(): # pyglet
# For each window, dispatch its events before
# checking event buffer.
if havePyglet:
for win in pyglet.app.windows:
win.dispatch_events() # pump events on pyglet windows
if haveGLFW:
glfw.poll_events()
if eventType == 'mouse':
pass
elif eventType == 'joystick':
pass
else: # eventType='keyboard' or eventType=None.
global _keyBuffer
_keyBuffer = []
else: # pygame
if eventType == 'mouse':
evt.get([locals.MOUSEMOTION, locals.MOUSEBUTTONUP,
locals.MOUSEBUTTONDOWN])
elif eventType == 'keyboard':
evt.get([locals.KEYDOWN, locals.KEYUP])
elif eventType == 'joystick':
evt.get([locals.JOYAXISMOTION, locals.JOYBALLMOTION,
locals.JOYHATMOTION, locals.JOYBUTTONUP,
locals.JOYBUTTONDOWN])
else:
evt.get()
class _GlobalEventKeys(MutableMapping):
"""
Global event keys for the pyglet backend.
Global event keys are single keys (or combinations of a single key
and one or more "modifier" keys such as Ctrl, Alt, etc.) with an
associated Python callback function. This function will be executed
if the key (or key/modifiers combination) was pressed.
PsychoPy fully automatically monitors and processes key presses
during most portions of the experimental run, for example during
`core.wait()` periods, or when calling `win.flip()`. If a global
event key press is detected, the specified function will be run
immediately. You are not required to manually poll and check for key
presses. This can be particularly useful to implement a global
"shutdown" key, or to trigger laboratory equipment on a key press
when testing your experimental script -- without cluttering the code.
But of course the application is not limited to these two scenarios.
In fact, you can associate any Python function with a global event key.
The PsychoPy preferences for `shutdownKey` and `shutdownKeyModifiers`
(both unset by default) will be used to automatically create a global
shutdown key once the `psychopy.event` module is being imported.
:Notes:
All keyboard -> event associations are stored in the `self._events`
OrderedDict. The dictionary keys are namedtuples with the elements
`key` and `mofifiers`. `key` is a string defining an (ordinary)
keyboard key, and `modifiers` is a tuple of modifier key strings,
e.g., `('ctrl', 'alt')`. The user does not access this attribute
directly, but should index the class instance itself (via
`globalKeys[key, modifiers]`). That way, the `modifiers` sequence
will be transparently converted into a tuple (which is a hashable
type) before trying to index `self._events`.
"""
_GlobalEvent = namedtuple(
'_GlobalEvent',
['func', 'func_args', 'func_kwargs', 'name'])
_IndexKey = namedtuple('_IndexKey', ['key', 'modifiers'])
_valid_keys = set(string.ascii_lowercase + string.digits
+ string.punctuation + ' \t')
_valid_keys.update(['escape', 'left', 'right', 'up', 'down', 'space'])
_valid_modifiers = {'shift', 'ctrl', 'alt', 'capslock',
'scrolllock', 'command', 'option', 'windows'}
def __init__(self):
super(_GlobalEventKeys, self).__init__()
self._events = OrderedDict()
if prefs.general['shutdownKey']:
msg = ('Found shutdown key definition in preferences; '
'enabling shutdown key.')
logging.info(msg)
self.add(key=prefs.general['shutdownKey'],
modifiers=prefs.general['shutdownKeyModifiers'],
func=psychopy.core.quit,
name='shutdown (auto-created from prefs)')
def __repr__(self):
info = ''
for index_key, event in list(self._events.items()):
info += '\n\t'
if index_key.modifiers:
_modifiers = ['[%s]' % m.upper() for m in index_key.modifiers]
info += '%s + ' % ' + '.join(_modifiers)
info += ("[%s] -> '%s' %s"
% (index_key.key.upper(), event.name, event.func))
return '<_GlobalEventKeys : %s\n>' % info
def __str__(self):
return ('<_GlobalEventKeys : %i key->event mappings defined.>'
% len(self))
def __len__(self):
return len(self._events)
def __getitem__(self, key):
index_key = self._gen_index_key(key)
return self._events[index_key]
def __setitem__(self, key, value):
msg = 'Please use `.add()` to add a new global event key.'
raise NotImplementedError(msg)
def __delitem__(self, key):
index_key = self._gen_index_key(key)
event = self._events.pop(index_key, None)
if event is None:
msg = 'Requested to remove unregistered global event key.'
raise KeyError(msg)
else:
logging.exp("Removed global key event: '%s'." % event.name)
def __iter__(self):
return iter(self._events.keys())
def _gen_index_key(self, key):
if isinstance(key, str): # Single key, passed as a string.
index_key = self._IndexKey(key, ())
else: # Convert modifiers into a hashable type.
index_key = self._IndexKey(key[0], tuple(key[1]))
return index_key
def add(self, key, func, func_args=(), func_kwargs=None,
modifiers=(), name=None):
"""
Add a global event key.
:Parameters:
key : string
The key to add.
func : function
The function to invoke once the specified keys were pressed.
func_args : iterable
Positional arguments to be passed to the specified function.
func_kwargs : dict
Keyword arguments to be passed to the specified function.
modifiers : collection of strings
Modifier keys. Valid keys are:
'shift', 'ctrl', 'alt' (not on macOS), 'capslock',
'scrolllock', 'command' (macOS only), 'option' (macOS only)
Num Lock is not supported.
name : string
The name of the event. Will be used for logging. If None,
will use the name of the specified function.
:Raises:
ValueError
If the specified key or modifiers are invalid, or if the
key / modifier combination has already been assigned to a global
event.
"""
if key not in self._valid_keys:
raise ValueError('Unknown key specified: %s' % key)
if not set(modifiers).issubset(self._valid_modifiers):
raise ValueError('Unknown modifier key specified.')
index_key = self._gen_index_key((key, modifiers))
if index_key in self._events:
msg = ('The specified key is already assigned to a global event. '
'Use `.remove()` to remove it first.')
raise ValueError(msg)
if func_kwargs is None:
func_kwargs = {}
if name is None:
name = func.__name__
self._events[index_key] = self._GlobalEvent(func, func_args,
func_kwargs, name)
logging.exp('Added new global key event: %s' % name)
def remove(self, key, modifiers=()):
"""
Remove a global event key.
:Parameters:
key : string
A single key name. If `'all'`, remove all event keys.
modifiers : collection of strings
Modifier keys. Valid keys are:
'shift', 'ctrl', 'alt' (not on macOS), 'capslock', 'numlock',
'scrolllock', 'command' (macOS only), 'option' (macOS only),
'windows' (Windows only)
"""
if key == 'all':
self._events = OrderedDict()
logging.exp('Removed all global key events.')
return
del self[key, modifiers]
def _onGLFWKey(*args, **kwargs):
"""Callback for key/character events for the GLFW backend.
:return:
"""
keyTime = psychopy.core.getTime() # get timestamp
# TODO - support for key emulation
win_ptr, key, scancode, action, modifiers = args
# only send events for PRESS and REPEAT to match pyglet behavior
if action == glfw.RELEASE:
return
global useText
if key == glfw.KEY_UNKNOWN:
useText = True
return
useText = False
# get the printable name, always make lowercase
key_name = glfw.get_key_name(key, scancode)
# if there is no localized key name or space
if key_name is None or key_name == ' ':
try:
key_name = _glfw_keycodes_[key]
except KeyError:
pass
else:
key_name = key_name.lower()
# TODO - modifier integration
keySource = 'Keypress'
_keyBuffer.append((key_name, modifiers, keyTime)) # tuple
logging.data("%s: %s" % (keySource, key_name))
def _onGLFWText(*args, **kwargs):
"""Handle unicode character events if _onGLFWKey() cannot.
:return:
"""
keyTime = psychopy.core.getTime() # get timestamp
# TODO - support for key emulation
win_ptr, codepoint, modifiers = args
# win = glfw.get_window_user_pointer(win_ptr)
text = chr(codepoint) # convert to unicode character (Python 3.0)
global useText
if not useText: # _onPygletKey has handled the input
return
keySource = 'KeyPress'
_keyBuffer.append((text, keyTime))
logging.data("%s: %s" % (keySource, text))
def _onGLFWMouseButton(*args, **kwargs):
"""Callback for mouse press events. Both press and release actions are
handled by this function as they both invoke the same callback.
"""
global mouseButtons, mouseClick, mouseTimes
now = psychopy.core.getTime()
win_ptr, button, action, modifier = args
# win = glfw.get_window_user_pointer(win_ptr)
# get current position of the mouse
# this might not be at the exact location of the mouse press
x, y = glfw.get_cursor_pos(win_ptr)
# process actions
if action == glfw.PRESS:
if button == glfw.MOUSE_BUTTON_LEFT:
mouseButtons[0] = 1
mouseTimes[0] = now - mouseClick[0].getLastResetTime()
elif button == glfw.MOUSE_BUTTON_MIDDLE:
mouseButtons[1] = 1
mouseTimes[1] = now - mouseClick[1].getLastResetTime()
elif button == glfw.MOUSE_BUTTON_RIGHT:
mouseButtons[2] = 1
mouseTimes[2] = now - mouseClick[2].getLastResetTime()
elif action == glfw.RELEASE:
if button == glfw.MOUSE_BUTTON_LEFT:
mouseButtons[0] = 0
elif button == glfw.MOUSE_BUTTON_MIDDLE:
mouseButtons[1] = 0
elif button == glfw.MOUSE_BUTTON_RIGHT:
mouseButtons[2] = 0
def _onGLFWMouseScroll(*args, **kwargs):
"""Callback for mouse scrolling events. For most computer mice with scroll
wheels, only the vertical (Y-offset) is relevant.
"""
window_ptr, x_offset, y_offset = args
global mouseWheelRel
mouseWheelRel = mouseWheelRel + numpy.array([x_offset, y_offset])
msg = "Mouse: wheel shift=(%i,%i)"
logging.data(msg % (x_offset, y_offset))
def _getGLFWJoystickButtons(*args, **kwargs):
"""
:return:
"""
pass
def _getGLFWJoystickAxes(*args, **kwargs):
"""
:return:
"""
pass
if havePyglet:
globalKeys = _GlobalEventKeys()
| 48,778
|
Python
|
.py
| 1,124
| 33.790036
| 131
| 0.608908
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,342
|
layout.py
|
psychopy_psychopy/psychopy/layout.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Classes and functions for working with coordinates systems."""
__all__ = [
"unitTypes",
"Vector",
"Position",
"Size",
"Vertices"
]
import numpy as np
from .tools import monitorunittools as tools
# list of applicable units
unitTypes = [
None,
'',
'pix',
'deg',
'degFlat',
'degFlatPos',
'cm',
'pt',
'norm',
'height'
]
# anchor offsets and names
_anchorAliases = {
'top': -0.5,
'bottom': 0.5,
'left': 0.5,
'right': -0.5,
'center': 0
}
class Vector:
"""Class representing a vector.
A vector is a mathematical construct that specifies a length (or magnitude)
and direction within a given coordinate system. This class provides methods
to manipulate vectors and convert them between coordinates systems.
This class may be used to assist in positioning stimuli on a screen.
Parameters
----------
value : ArrayLike
Array of vector lengths along each dimension of the space the vector is
within. Vectors are specified as either 1xN for single vectors, and
Nx2 or Nx3 for multiple vectors.
units : str or None
Units which `value` has been specified in. Applicable values are
`'pix'`, `'deg'`, `'degFlat'`, `'degFlatPos'`, `'cm'`, `'pt'`, `'norm'`,
`'height'`, or `None`.
win : `~psychopy.visual.Window` or None
Window associated with this vector. This value must be specified if you
wish to map vectors to coordinate systems that require additional
information about the monitor the window is being displayed on.
Examples
--------
Create a new vector object using coordinates specified in pixel (`'pix'`)
units::
my_vector = Vector([256, 256], 'pix')
Multiple vectors may be specified by supplying a list of vectors::
my_vector = Vector([[256, 256], [640, 480]], 'pix')
Operators can be used to compare the magnitudes of vectors::
mag_is_same = vec1 == vec2 # same magnitude
mag_is_greater = vec1 > vec2 # one greater than the other
1xN vectors return a boolean value while Nx2 or Nx3 arrays return N-length
arrays of boolean values.
"""
def __init__(self, value, units, win):
# Create a dict to cache values on access
self._cache = {}
# Assume invalid until validation happens
self.valid = False
# define some names used by `set`
self.win = None
self._requested = None
self._requestedUnits = None
self.set(value, units, win)
def set(self, value, units, win=None):
# Check inputs
if win is None:
win = self.win
self.win = win # set extras
# If input is a Vector object, duplicate all settings
if isinstance(value, Vector):
self._requested = value._requested
self._requestedUnits = value._requestedUnits
self.valid = value.valid
self.pix = value.pix
if win is None:
self.win = value.win
return
# Validate
value, units = self.validate(value, units)
# Set values
self._requested = value
self._requestedUnits = units
setattr(self, self._requestedUnits, self._requested)
def validate(self, value, units):
"""Validate input values.
Ensures the values are in the correct format.
Returns
-------
tuple
Parameters `value` and `units`.
"""
# Assume valid until shown otherwise
self.valid = True
# Check units are valid
if units not in unitTypes:
raise ValueError(
f"Unit type '{units}' not recognised, must be one of: "
f"{unitTypes}")
# Get window units if units are None
if units in (None, ''):
units = self.win.units
# Coerce value to a numpy array of floats
try:
value = np.array(value, dtype=float)
except ValueError as err:
self.valid = False
raise err
# Make sure each value is no more than 3D
if len(value.shape) == 0:
value = np.array([value, value])
self.valid = True
elif len(value.shape) == 1:
self.valid = value.shape[0] <= 3
elif len(value.shape) == 2:
self.valid = value.shape[1] <= 3
if value.shape[0] == 1:
# Remove extraneous layer if single value
value = value[0]
else:
self.valid = False
# Replace None with the matching window dimension
if (value == None).any() or np.isnan(value).any(): # noqa: E711
win = Vector((1, 1), units="norm", win=self.win)
if len(value.shape) == 1:
value[value == None] = getattr(win, units)[value == None] # noqa: E711
value[np.isnan(value)] = getattr(win, units)[np.isnan(value)]
else:
value[np.isnan(value[:, 0]), 0] = getattr(win, units)[0]
value[np.isnan(value[:, 1]), 1] = getattr(win, units)[1]
value[value[:, 0] == None, 0] = getattr(win, units)[0] # noqa: E711
value[value[:, 1] == None, 1] = getattr(win, units)[1] # noqa: E711
assert self.valid, (f"Array of position/size values must be either "
f"Nx1, Nx2 or Nx3, not {value.shape}")
return value, units
def __bool__(self):
return self.valid
def __repr__(self):
"""If vector is printed, it will display its class and value."""
if self:
return (f"<psychopy.layout.{self.__class__.__name__}: "
f"{np.round(self.pix, 3)}px>")
else:
return "<psychopy.layout.{self.__class__.__name__}: Invalid>"
# --------------------------------------------------------------------------
# Rich comparisons
#
def __eq__(self, target):
"""`==` will compare position in pix"""
if isinstance(target, Vector):
if self.pix.size > 1:
return all(self.pix == target.pix)
else:
return self.pix == target.pix
else:
return False
def __ne__(self, target):
"""`!=` will return the opposite of `==`"""
return not self == target
def __lt__(self, target):
"""`<` will compare magnitude"""
if isinstance(target, Vector):
return self.magnitude < target.magnitude
elif isinstance(target, (int, float)):
return self.magnitude < target
else:
return False
def __le__(self, target):
"""`<=` will compare magnitude"""
if isinstance(target, Vector):
return self.magnitude <= target.magnitude
elif isinstance(target, (int, float)):
return self.magnitude <= target
else:
return False
def __gt__(self, target):
"""`>` will compare magnitude"""
if isinstance(target, Vector):
return self.magnitude > target.magnitude
elif isinstance(target, (int, float)):
return self.magnitude > target
else:
return False
def __ge__(self, target):
"""`>=` will compare magnitude"""
if isinstance(target, Vector):
return self.magnitude >= target.magnitude
elif isinstance(target, (int, float)):
return self.magnitude >= target
else:
return False
# --------------------------------------------------------------------------
# Operators
#
def __add__(self, other):
if isinstance(other, Vector):
return Vector(self.pix + other.pix, "pix", self.win)
def __sub__(self, other):
if isinstance(other, Vector):
return Vector(self.pix - other.pix, "pix", self.win)
def __mul__(self, other):
if isinstance(other, Vector):
return Vector(self.pix * other.pix, "pix", self.win)
if isinstance(other, (int, float)):
return Vector(self.pix * other, "pix", self.win)
if isinstance(other, (list, tuple, np.ndarray)):
return Vector(self.pix * np.array(other), "pix", self.win)
def __truediv__(self, other):
if isinstance(other, Vector):
return Vector(self.pix / other.pix, "pix", self.win)
if isinstance(other, (int, float)):
return Vector(self.pix / other, "pix", self.win)
if isinstance(other, (list, tuple, np.ndarray)):
return Vector(self.pix / np.array(other), "pix", self.win)
# --------------------------------------------------------------------------
# Class methods and properties
#
def copy(self):
"""Create a copy of this object"""
return self.__copy__()
def __copy__(self):
return self.__deepcopy__()
def __deepcopy__(self):
return self.__class__(self._requested, self._requestedUnits, self.win)
@property
def monitor(self):
"""The monitor used for calculations within this object
(`~psychopy.monitors.Monitor`).
"""
return self.win.monitor
@property
def dimensions(self):
"""How many dimensions (x, y, z) are specified?"""
# Run _requested through validator to sanitise it
value, units = self.validate(self._requested, self._requestedUnits)
if len(value.shape) == 1:
# If single value, return number of coords
return len(value)
else:
# If multi value, return number of columns
return value.shape[1]
def __len__(self):
"""How many values are specified?"""
# Run _requested through validator to sanitise it
value, units = self.validate(self._requested, self._requestedUnits)
if len(value.shape) == 1:
# If single value, return 1
return 1
else:
# If multi value, return number of rows
return value.shape[0]
@property
def magnitude(self):
"""Magnitude of vector (i.e. length of the line from vector to (0, 0)
in pixels).
"""
return np.hypot3d(*self.pix)
@property
def direction(self):
"""Direction of vector (i.e. angle between vector and the horizontal
plane).
"""
if self.dimensions < 2:
# with only 1 dimension, y is essentially zero, so angle is always 0
return 0.0
toReturn = [] # store output values
if self.dimensions >= 2:
if self.pix[0] != 0.0: # Angle from x-axis (y is opp, x is adj)
x = np.degrees(np.arctan(self.pix[1] / self.pix[0]))
else:
x = 90.0
toReturn.append(x)
if self.pix[1] != 0.0: # Angle from y-axis (x is opp, y is adj)
y = np.degrees(np.arctan(self.pix[0] / self.pix[1]))
else:
y = 90.0
toReturn.append(y)
if self.dimensions == 3:
# Angle from z-axis (z is opp, hyp(x,y) is adj)
if np.hypot3d(*self.pix[:2]) != 0.0:
u = np.hypot3d(*self.pix[:2])
z = np.degrees(np.arctan(self.pix[2] / u))
else:
z = 90.0
toReturn.append(z)
return toReturn
@property
def pix(self):
"""Values in units of 'pix' (pixels).
"""
# Check that object is valid
assert self.valid, (
u"Could not access pixel value of invalid position/size object")
# Return cached value if present
if 'pix' in self._cache:
return self._cache['pix']
else:
raise AttributeError(
f"Could not retrieve pixel value of Vector object set in "
f"{self._requestedUnits}")
@pix.setter
def pix(self, value):
# Validate
value, units = self.validate(value, 'pix')
# Clear cache and set
self._cache = {
'pix': value
}
@property
def deg(self):
"""Values in units of 'deg' (degrees of visual angle).
"""
# Return cached value if present
if 'deg' in self._cache:
return self._cache['deg']
# Otherwise, do conversion and cache
self._cache['deg'] = tools.pix2deg(self.pix, self.monitor)
# Return new cached value
return self._cache['deg']
@deg.setter
def deg(self, value):
# Validate
value, units = self.validate(value, 'deg')
# Convert and set
self.pix = tools.deg2pix(value, self.monitor)
@property
def degFlat(self):
"""Values in units of 'degFlat' (degrees of visual angle corrected for
screen curvature).
When dealing with positions/sizes in isolation; 'deg', 'degFlat' and
'degFlatPos' are synonymous - as the conversion is done at the vertex
level.
"""
return self.deg
@degFlat.setter
def degFlat(self, value):
self.deg = value
@property
def degFlatPos(self):
"""Values in units of 'degFlatPos'.
When dealing with positions/sizes in isolation; 'deg', 'degFlat' and
'degFlatPos' are synonymous - as the conversion is done at the vertex
level.
"""
return self.degFlat
@degFlatPos.setter
def degFlatPos(self, value):
self.degFlat = value
@property
def cm(self):
"""Values in units of 'cm' (centimeters).
"""
# Return cached value if present
if 'cm' in self._cache:
return self._cache['cm']
# Otherwise, do conversion and cache
self._cache['cm'] = tools.pix2cm(self.pix, self.monitor)
# Return new cached value
return self._cache['cm']
@cm.setter
def cm(self, value):
# Validate
value, units = self.validate(value, 'cm')
# Convert and set
self.pix = tools.cm2pix(value, self.monitor)
@property
def pt(self):
"""Vector coordinates in 'pt' (points).
Points are commonly used in print media to define text sizes. One point
is equivalent to 1/72 inches, or around 0.35 mm.
"""
# Return cached value if present
if 'pt' in self._cache:
return self._cache['pt']
# Otherwise, do conversion and cache
self._cache['pt'] = self.cm / (2.54 / 72)
# Return new cached value
return self._cache['pt']
@pt.setter
def pt(self, value):
# Validate
value, units = self.validate(value, 'height')
# Convert and set
self.cm = value * (2.54 / 72)
@property
def norm(self):
"""Value in units of 'norm' (normalized device coordinates).
"""
# Return cached value if present
if 'norm' in self._cache:
return self._cache['norm']
# Otherwise, do conversion and cache
buffer = np.ndarray(self.pix.shape, dtype=float)
for i in range(self.dimensions):
u = self.win.useRetina + 1
if len(self) > 1:
buffer[:, i] = self.pix[:, i] / (self.win.size[i] / u) * 2
else:
buffer[i] = self.pix[i] / (self.win.size[i] / u) * 2
self._cache['norm'] = buffer
return self._cache['norm'] # return new cached value
@norm.setter
def norm(self, value):
# Validate
value, units = self.validate(value, 'norm')
# Convert and set
buffer = np.ndarray(value.shape, dtype=float)
for i in range(self.dimensions):
u = self.win.useRetina + 1
if len(self) > 1:
buffer[:, i] = value[:, i] * (self.win.size[i] / u) / 2
else:
buffer[i] = value[i] * (self.win.size[i] / u) / 2
self.pix = buffer
@property
def height(self):
"""Value in units of 'height' (normalized to the height of the window).
"""
# Return cached value if present
if 'height' in self._cache:
return self._cache['height']
# Otherwise, do conversion and cache
self._cache['height'] = \
self.pix / (self.win.size[1] / (self.win.useRetina + 1))
# Return new cached value
return self._cache['height']
@height.setter
def height(self, value):
# Validate
value, units = self.validate(value, 'height')
# Convert and set
self.pix = value * (self.win.size[1] / (self.win.useRetina + 1))
class Position(Vector):
"""Class representing a position vector.
This class is used to specify the location of a point within some
coordinate system (e.g., `(x, y)`).
Parameters
----------
value : ArrayLike
Array of coordinates representing positions within a coordinate system.
Positions are specified in a similar manner to `~psychopy.layout.Vector`
as either 1xN for single vectors, and Nx2 or Nx3 for multiple positions.
units : str or None
Units which `value` has been specified in. Applicable values are
`'pix'`, `'deg'`, `'degFlat'`, `'degFlatPos'`, `'cm'`, `'pt'`, `'norm'`,
`'height'`, or `None`.
win : `~psychopy.visual.Window` or None
Window associated with this position. This value must be specified if
you wish to map positions to coordinate systems that require additional
information about the monitor the window is being displayed on.
"""
def __init__(self, value, units, win=None):
Vector.__init__(self, value, units, win)
class Size(Vector):
"""Class representing a size.
Parameters
----------
value : ArrayLike
Array of values representing size axis-aligned bounding box within a
coordinate system. Sizes are specified in a similar manner to
`~psychopy.layout.Vector` as either 1xN for single vectors, and Nx2 or
Nx3 for multiple positions.
units : str or None
Units which `value` has been specified in. Applicable values are
`'pix'`, `'deg'`, `'degFlat'`, `'degFlatPos'`, `'cm'`, `'pt'`, `'norm'`,
`'height'`, or `None`.
win : `~psychopy.visual.Window` or None
Window associated with this size object. This value must be specified if
you wish to map sizes to coordinate systems that require additional
information about the monitor the window is being displayed on.
"""
def __init__(self, value, units, win=None):
Vector.__init__(self, value, units, win)
class Vertices:
"""Class representing an array of vertices.
Parameters
----------
verts : ArrayLike
Array of coordinates specifying the locations of vertices.
obj : object or None
size : ArrayLike or None
Scaling factors for vertices along each dimension.
pos : ArrayLike or None
Offset for vertices along each dimension.
units : str or None
Units which `verts` has been specified in. Applicable values are
`'pix'`, `'deg'`, `'degFlat'`, `'degFlatPos'`, `'cm'`, `'pt'`, `'norm'`,
`'height'`, or `None`.
flip : ArrayLike or None
Array of boolean values specifying which dimensions to flip/mirror.
Mirroring is applied prior to any other transformation.
anchor : str or None
Anchor location for vertices, specifies the origin for the vertices.
"""
def __init__(self, verts, obj=None, size=None, pos=None, units=None,
flip=None, anchor=None):
if obj is None and pos is None and size is None:
raise ValueError(
"Vertices array needs either an object or values for pos and "
"size.")
# Store object
self.obj = obj
# Store size and pos
self._size = size
self._pos = pos
self._units = units
self.flip = flip # store flip
self.anchor = anchor # set anchor
# Convert to numpy array
verts = np.array(verts)
# Make sure it's coercible to a Nx2 or nxNx2 numpy array
assert (3 >= len(verts.shape) >= 2) and (verts.shape[-1] == 2), (
f"Expected vertices to be coercible to a Nx2 or nxNx2 numpy array, not {verts.shape}"
)
# Store base vertices
self.base = verts
def __repr__(self):
"""If vertices object is printed, it will display its class and value.
"""
if self:
return (
f"<psychopy.layout.{self.__class__.__name__}: "
f"{np.round(self.base, 3)} * "
f"{np.round(self.obj._size.pix, 3)} + "
f"{np.round(self.obj._pos.pix, 3)}>")
else:
return "<psychopy.layout.{self.__class__.__name__}: Invalid>"
@property
def pos(self):
"""Positional offset of the vertices (`~psychopy.layout.Vector` or
ArrayLike)."""
if isinstance(self._pos, Vector):
return self._pos
if hasattr(self.obj, "_pos"):
return self.obj._pos
else:
raise AttributeError(
f"Could not derive position from object {self.obj} as object "
f"does not have a position attribute.")
@property
def size(self):
"""Scaling factors for vertices (`~psychopy.layout.Vector` or
ArrayLike)."""
if isinstance(self._size, Vector):
return self._size
if hasattr(self.obj, "_size"):
return self.obj._size
else:
raise AttributeError(
f"Could not derive size from object {self.obj} as object does "
f"not have a size attribute.")
@property
def units(self):
"""Units which the vertices are specified in (`str`).
"""
if hasattr(self, "_units") and self._units is not None:
return self._units
if hasattr(self, "obj") and hasattr(self.obj, "units"):
return self.obj.units
@property
def flip(self):
"""1x2 array for flipping vertices along each axis; set as `True` to
flip or `False` to not flip (`ArrayLike`).
If set as a single value, will duplicate across both axes. Accessing the
protected attribute (`._flip`) will give an array of 1s and -1s with
which to multiply vertices.
"""
# Get base value
if hasattr(self, "_flip"):
flip = self._flip
else:
flip = np.array([[False, False]])
# Convert from boolean
return flip == -1
@flip.setter
def flip(self, value):
if value is None:
value = False
# Convert to 1x2 numpy array
value = np.array(value)
value = np.resize(value, (1, 2))
# Ensure values were bool
assert value.dtype == bool, (
"Flip values must be either a boolean (True/False) or an array of "
"booleans")
# Set as multipliers rather than bool
self._flip = np.array([[
-1 if value[0, 0] else 1,
-1 if value[0, 1] else 1,
]])
self._flipHoriz, self._flipVert = self._flip[0]
@property
def flipHoriz(self):
"""Apply horizontal mirroring (`bool`)?
"""
return self.flip[0][0]
@flipHoriz.setter
def flipHoriz(self, value):
self.flip = [value, self.flip[0, 1]]
@property
def flipVert(self):
"""Apply vertical mirroring (`bool`)?
"""
return self.flip[0][1]
@flipVert.setter
def flipVert(self, value):
self.flip = [self.flip[0, 0], value]
@property
def anchor(self):
"""Anchor location (`str`).
Possible values are on of `'top'`, `'bottom'`, `'left'`, `'right'`,
`'center'`. Combinations of these values may also be specified (e.g.,
`'top_center'`, `'center-right'`, `'topleft'`, etc. are all valid).
"""
if hasattr(self, "_anchorX") and hasattr(self, "_anchorY"):
# If set, return set values
return self._anchorX, self._anchorY
if hasattr(self.obj, "anchor"):
return self.obj.anchor
# Otherwise, assume center
return "center", "center"
@anchor.setter
def anchor(self, anchor):
if anchor is None and hasattr(self.obj, "anchor"):
anchor = self.obj.anchor
# Set defaults
self._anchorY = None
self._anchorX = None
# Look for unambiguous terms first (top, bottom, left, right)
if 'top' in str(anchor):
self._anchorY = 'top'
elif 'bottom' in str(anchor):
self._anchorY = 'bottom'
if 'right' in str(anchor):
self._anchorX = 'right'
elif 'left' in str(anchor):
self._anchorX = 'left'
# Then 'center' can apply to either axis that isn't already set
if self._anchorX is None:
self._anchorX = 'center'
if self._anchorY is None:
self._anchorY = 'center'
@property
def anchorAdjust(self):
"""Map anchor values to numeric vertices adjustments.
"""
return [_anchorAliases[a] for a in self.anchor]
def getas(self, units):
assert units in unitTypes, f"Unrecognised unit type '{units}'"
# Start with base values
verts = self.base.copy()
verts = verts.astype(float)
# Apply size
if self.size is None:
raise ValueError(
u"Cannot not calculate absolute positions of vertices without "
u"a size attribute")
verts *= getattr(self.size, units)
# Apply flip
verts *= self._flip
# Apply anchor
verts += self.anchorAdjust * getattr(self.size, units)
# Apply pos
if self.pos is None:
raise ValueError(
u"Cannot not calculate absolute positions of vertices without "
u"a pos attribute")
verts += getattr(self.pos, units)
return verts
def setas(self, value, units):
assert units in unitTypes, f"Unrecognised unit type '{units}'"
# Enforce numpy
value = np.array(value, dtype=float)
# Account for size
if self.size is None:
raise ValueError(
u"Cannot not calculate absolute positions of vertices without "
u"a size attribute")
value /= getattr(self.size, units)
# Account for flip
value *= self._flip
# Account for anchor
value -= self.anchorAdjust * getattr(self.size, units)
# Account for pos
if self.pos is None:
raise ValueError(
u"Cannot not calculate absolute positions of vertices without "
u"a pos attribute")
value -= getattr(self.pos, units)
self.base = value # apply
@property
def pix(self):
"""Get absolute positions of vertices in 'pix' units.
"""
# If correcting for screen curve, use the old functions
if self.units == 'degFlat':
return tools._degFlat2pix(
self.base * self.obj.size, self.obj.pos, self.obj.win)
elif self.units == 'degFlatPos':
return tools._degFlatPos2pix(
self.base * self.obj.size, self.obj.pos, self.obj.win)
else:
# Otherwise, use standardised method
return self.getas('pix')
@pix.setter
def pix(self, value):
self.setas(value, 'pix')
@property
def deg(self):
"""Get absolute positions of vertices in 'deg' units.
"""
return self.getas('deg')
@deg.setter
def deg(self, value):
self.setas(value, 'deg')
@property
def degFlat(self):
"""Get absolute positions of vertices in 'degFlat' units.
"""
return self.getas('degFlat')
@degFlat.setter
def degFlat(self, value):
cm = tools.deg2cm(value, self.obj.win.monitor, correctFlat=True)
self.setas(cm, 'cm')
@property
def cm(self):
"""Get absolute positions of vertices in 'cm' units.
"""
return self.getas('cm')
@cm.setter
def cm(self, value):
self.setas(value, 'cm')
@property
def norm(self):
"""Get absolute positions of vertices in 'norm' units.
"""
return self.getas('norm')
@norm.setter
def norm(self, value):
self.setas(value, 'norm')
@property
def height(self):
"""Get absolute positions of vertices in 'height' units.
"""
return self.getas('height')
@height.setter
def height(self, value):
self.setas(value, 'height')
if __name__ == "__main__":
pass
| 29,247
|
Python
|
.py
| 765
| 29.079739
| 97
| 0.572412
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,343
|
clock.py
|
psychopy_psychopy/psychopy/clock.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 11:28:32 2013
Provides the high resolution timebase used by psychopy, and defines some time
related utility Classes.
Moved functionality from core.py so a common code
base could be used in core.py and logging.py; vs. duplicating the getTime and
Clock logic.
@author: Sol
@author: Jon
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import logging
import time
import sys
from datetime import datetime
from packaging.version import Version
try:
import pyglet
except ImportError:
pass # pyglet is not installed
from psychopy.constants import STARTED, NOT_STARTED, FINISHED
import psychopy.logging # Absolute import to work around circularity
# set the default timing mechanism
getTime = None
# Select the timer to use as the psychopy high resolution time base. Selection
# is based on OS and Python version.
#
# Three requirements exist for the psychopy time base implementation:
# A) The Python interpreter does not apply an offset to the times returned
# based on when the timer module being used was loaded or when the
# timer function first called was first called.
# B) The timer implementation used must be monotonic and report elapsed
# time between calls, 'not' system or CPU usage time.
# C) The timer implementation must provide a resolution of 50 usec or
# better.
#
# Given the above requirements, psychopy selects a timer implementation as
# follows:
# 1) On Windows, the Windows Query Performance Counter API is used using
# ctypes access.
# 2) On other OS's, if the Python version being used is 2.6 or lower,
# time.time is used. For Python 2.7 and above, the timeit.default_timer
# function is used.
try:
import psychtoolbox
havePTB = True
except ImportError:
havePTB = False
if havePTB:
# def getTime():
# secs, wallTime, error = psychtoolbox.GetSecs('allclocks')
# return wallTime
getTime = psychtoolbox.GetSecs
elif sys.platform == 'win32':
from ctypes import byref, c_int64, windll
_fcounter = c_int64()
_qpfreq = c_int64()
windll.Kernel32.QueryPerformanceFrequency(byref(_qpfreq))
_qpfreq = float(_qpfreq.value)
_winQPC = windll.Kernel32.QueryPerformanceCounter
def getTime():
_winQPC(byref(_fcounter))
return _fcounter.value / _qpfreq
elif sys.platform == "darwin":
# Monotonic getTime with absolute origin. Suggested by @aforren1, and
# copied from github.com/aforren1/toon/blob/master/toon/input/mac_clock.py
import ctypes
_libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
# create helper class to store data
class mach_timebase_info_data_t(ctypes.Structure):
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
# get function and set response type
_mach_absolute_time = _libc.mach_absolute_time
_mach_absolute_time.restype = ctypes.c_uint64
# calculate timebase
_timebase = mach_timebase_info_data_t()
_libc.mach_timebase_info(ctypes.byref(_timebase))
_ticks_per_second = _timebase.numer / _timebase.denom * 1.0e9
# scaling factor so that timing works correctly on Intal and Apple Silicon
_scaling_factor = _timebase.numer / _timebase.denom
# then define getTime func
def getTime():
return (_mach_absolute_time() * _scaling_factor) / 1.0e9
else:
import timeit
getTime = timeit.default_timer
class Timestamp(float):
"""
Object to represent a timestamp, which can return itself in a variety of formats.
Parameters
----------
value : float or str
Current time, as either:
- float : Seconds since arbitrary start time (if only using as a duration)
- float : Seconds since epoch (for an absolute time)
- str : Time string in the format specified by the parameter `format`
format : str or class
Time format string (as in time.strftime) indicated how to convert this timestamp to a string, and how to
interpret its value if given as a string. Use `float` (default) to always print timestamp as a float, or `str`
as
lastReset : float
Epoch time at last clock reset. Will be added to raw value if printing to string.
"""
def __new__(cls, value, format=float, lastReset=0.0):
# if given a string, attempt to parse it using the given format
if isinstance(value, str):
# substitute nonspecified str format for ISO 8601
if format in (str, "str"):
format = "%Y-%m-%d_%H:%M:%S.%f%z"
# try to parse
try:
value = datetime.strptime(value, format)
except ValueError as err:
# if parsing fails, try again without %z (as this is excluded in GMT)
if format.endswith("%z"):
value = datetime.strptime(value, format[:-2])
# convert to timestamp
value = datetime.timestamp(value) - lastReset
return float.__new__(cls, value)
def __init__(self, value, format=float, lastReset=0.0):
self.lastReset = lastReset
self.format = format
# create self as float representing the time
float.__init__(value)
def __str__(self):
# use strftime to return with own format
return self.strftime(format=self.format)
def __format__(self, format_spec):
if self.format in (float, "float"):
# format as normal if float is requested
return float.__format__(self, format_spec)
else:
# otherwise just stringify
return str(self)
def resolve(self, format=None):
"""
Get the value of this timestamp as a simple value, either str or float.
Parameters
----------
format : str, class or None
Time format string, as in time.strftime, or `float` to return as a float. Defaults (None) to using the
format given when this timestamp was initialised.
Returns
-------
str, float
The value of this timestamp in the requested format.
"""
# if format is unspecified, use own default
if format is None:
format = self.format
# if format is float, return as simple (non-timestamp) float
if format in (float, "float"):
return float(self)
# otherwise, format to string in requested format
return self.strftime(format=format)
def strftime(self, format="%Y-%m-%d_%H:%M:%S.%f%z"):
"""
Format this timestamp into a string with the given format.
Parameters
----------
format : str, class or None
Time format string, as in time.strftime, or `float` to print as a float. Defaults (None) to using the
format given when this timestamp was initialised.
Returns
-------
str
This timestamp as a string
"""
# if format is unspecified, use own default
if format in (None, "None"):
format = self.format
# if format is float, print using base method
if format in (float, "float"):
return float.__str__(self)
# substitute nonspecified str format for ISO 8601
if format in (str, "str"):
format = "%Y-%m-%d_%H:%M:%S.%f%z"
# convert to datetime
now = datetime.fromtimestamp(self + self.lastReset)
# format
return now.strftime(format)
class MonotonicClock:
"""A convenient class to keep track of time in your experiments using a
sub-millisecond timer.
Unlike the :class:`~psychopy.core.Clock` this cannot be reset to
arbitrary times. For this clock t=0 always represents the time that
the clock was created.
Don't confuse this `class` with `core.monotonicClock` which is an
`instance` of it that got created when PsychoPy.core was imported.
That clock instance is deliberately designed always to return the
time since the start of the study.
Version Notes: This class was added in PsychoPy 1.77.00
"""
def __init__(self, start_time=None, format=float):
super(MonotonicClock, self).__init__()
if start_time is None:
# this is sub-millisecond timer in python
self._timeAtLastReset = getTime()
else:
self._timeAtLastReset = start_time
self._epochTimeAtLastReset = time.time()
# store default format
self.format = format
def getTime(self, applyZero=True, format=None):
"""
Returns the current time on this clock in secs (sub-ms precision).
Parameters
----------
applyZero : bool
If applying zero then this will be the time since the clock was created (typically the beginning of the
script). If not applying zero then it is whatever the underlying clock uses as its base time but that is
system dependent. e.g. can be time since reboot, time since Unix Epoch etc.
Only applies when format is `float`.
format : type, str or None
Format in which to show timestamp when converting to a string. Can be either:
- time format codes: Time will return as a string in that format, as in time.strftime
- `str`: Time will return as a string in ISO 8601 (YYYY-MM-DD_HH:MM:SS.mmmmmmZZZZ)
- `None`: Will use this clock's `format` attribute
Returns
-------
Timestamp
Time with format requested.
"""
# substitute no format for default
if format in (None, "None"):
format = self.format
# substitute nonspecified str format for ISO 8601
if format in (str, "str"):
format = "%Y-%m-%d_%H:%M:%S.%f%z"
# get time since last reset
t = getTime() - self._timeAtLastReset
# get last reset time from epoch
lastReset = self._epochTimeAtLastReset
if not applyZero:
# if not applying zero, add epoch start time to t rather than supplying it
t += self._epochTimeAtLastReset
lastReset = 0
return Timestamp(t, format, lastReset=lastReset)
def getLastResetTime(self):
"""
Returns the current offset being applied to the high resolution
timebase used by Clock.
"""
return self._timeAtLastReset
monotonicClock = MonotonicClock()
class Clock(MonotonicClock):
"""A convenient class to keep track of time in your experiments.
You can have as many independent clocks as you like (e.g. one
to time responses, one to keep track of stimuli ...)
This clock is identical to the :class:`~psychopy.core.MonotonicClock`
except that it can also be reset to 0 or another value at any point.
"""
def __init__(self, format=float):
super(Clock, self).__init__(format=format)
def reset(self, newT=0.0):
"""Reset the time on the clock. With no args time will be
set to zero. If a float is received this will be the new
time on the clock
"""
self._timeAtLastReset = getTime() + newT
self._epochTimeAtLastReset = time.time()
def addTime(self, t):
"""Add more time to the Clock/Timer
e.g.::
timer = core.Clock()
timer.addTime(5)
while timer.getTime() > 0:
# do something
"""
self._timeAtLastReset -= t
self._epochTimeAtLastReset -= t
def add(self, t):
"""
DEPRECATED: use .addTime() instead
This function adds time TO THE BASE (t0) which, counterintuitively,
reduces the apparent time on the clock
"""
psychopy.logging.log(msg=(
"Clock.add() is deprecated in favor of .addTime() due to the counterintuitive design "
"(it added time to the baseline, which reduced the values returned from getTime() )"
),
level=psychopy.logging.DEPRECATION
)
self._timeAtLastReset += t
self._epochTimeAtLastReset += t
class CountdownTimer(Clock):
"""Similar to a :class:`~psychopy.core.Clock` except that time counts down
from the time of last reset.
Parameters
----------
start : float or int
Starting time in seconds to countdown on.
Examples
--------
Create a countdown clock with a 5 second duration::
timer = core.CountdownTimer(5)
while timer.getTime() > 0: # after 5s will become negative
# do stuff
"""
def __init__(self, start=0):
super(CountdownTimer, self).__init__()
self._countdown_duration = start
if start:
self.reset()
def getTime(self):
"""Returns the current time left on this timer in seconds with sub-ms
precision (`float`).
"""
return self._timeAtLastReset - getTime()
def addTime(self, t):
"""Add more time to the CountdownTimer
e.g.:
countdownTimer = core.CountdownTimer()
countdownTimer.addTime(1)
while countdownTimer.getTime() > 0:
# do something
"""
self._timeAtLastReset += t
self._epochTimeAtLastReset += t
def reset(self, t=None):
"""Reset the time on the clock.
Parameters
----------
t : float, int or None
With no args (`None`), time will be set to the time used for last
reset (or start time if no previous resets). If a number is
received, this will be the new time on the clock.
"""
if t is not None:
self._countdown_duration = t
Clock.reset(self, self._countdown_duration)
class StaticPeriod:
"""A class to help insert a timing period that includes code to be run.
Parameters
----------
screenHz : int or None
the frame rate of the monitor (leave as None if you
don't want this accounted for)
win : :class:`~psychopy.visual.Window`
If a :class:`~psychopy.visual.Window` is given then
:class:`StaticPeriod` will also pause/restart frame interval recording.
name : str
Give this StaticPeriod a name for more informative logging messages.
Examples
--------
Typical usage for the static period::
fixation.draw()
win.flip()
ISI = StaticPeriod(screenHz=60)
ISI.start(0.5) # start a period of 0.5s
stim.image = 'largeFile.bmp' # could take some time
ISI.complete() # finish the 0.5s, taking into account one 60Hz frame
stim.draw()
win.flip() # the period takes into account the next frame flip
# time should now be at exactly 0.5s later than when ISI.start()
# was called
"""
def __init__(self, screenHz=None, win=None, name='StaticPeriod'):
self.status = NOT_STARTED
self.countdown = CountdownTimer()
self.name = name
self.win = win
if screenHz is None:
self.frameTime = 0
else:
self.frameTime = 1.0 / screenHz
self._winWasRecordingIntervals = False
def start(self, duration):
"""Start the period. If this is called a second time, the timer will
be reset and starts again
Parameters
----------
duration : float or int
The duration of the period, in seconds.
"""
self.status = STARTED
self.countdown.reset(duration - self.frameTime)
# turn off recording of frame intervals throughout static period
if self.win:
self._winWasRecordingIntervals = self.win.recordFrameIntervals
self.win.recordFrameIntervals = False
def complete(self):
"""Completes the period, using up whatever time is remaining with a
call to `wait()`.
Returns
-------
float
`1` for success, `0` for fail (the period overran).
"""
self.status = FINISHED
timeRemaining = self.countdown.getTime()
if self.win:
self.win.recordFrameIntervals = self._winWasRecordingIntervals
if timeRemaining < 0:
msg = ('We overshot the intended duration of %s by %.4fs. The '
'intervening code took too long to execute.')
vals = self.name, abs(timeRemaining)
psychopy.logging.warn(msg % vals)
return 0
wait(timeRemaining)
return 1
def _dispatchWindowEvents():
"""Helper function for :func:`~.psychopy.core.wait`. Handles window event if
needed or returns otherwise.
"""
from . import core
if not (core.havePyglet and core.checkPygletDuringWait):
return # nop
# let's see if pyglet collected any event in meantime
try:
# this takes focus away from command line terminal window:
if Version(pyglet.version) < Version('1.2'):
# events for sounds/video should run independently of wait()
pyglet.media.dispatch_events()
except AttributeError:
# see http://www.pyglet.org/doc/api/pyglet.media-module.html#dispatch_events
# Deprecated: Since pyglet 1.1, Player objects schedule themselves
# on the default clock automatically. Applications should not call
# pyglet.media.dispatch_events().
pass
for winWeakRef in core.openWindows:
win = winWeakRef()
if (win.winType == "pyglet" and
hasattr(win.winHandle, "dispatch_events")):
win.winHandle.dispatch_events() # pump events
def wait(secs, hogCPUperiod=0.2):
"""Wait for a given time period.
This function halts execution of the program for the specified duration.
Precision of this function is usually within 1 millisecond of the specified
time, this may vary depending on factors such as system load and the Python
version in use. Window events are periodically dispatched during the wait
to keep the application responsive, to avoid the OS complaining that the
process is unresponsive.
If `secs=10` and `hogCPU=0.2` then for 9.8s Python's `time.sleep` function
will be used, which is not especially precise, but allows the cpu to
perform housekeeping. In the final `hogCPUperiod` the more precise
method of constantly polling the clock is used for greater precision.
If you want to obtain key-presses during the wait, be sure to use
pyglet and then call :func:`psychopy.event.getKeys()` after calling
:func:`~.psychopy.core.wait()`
If you want to suppress checking for pyglet events during the wait, do this
once::
core.checkPygletDuringWait = False
and from then on you can do::
core.wait(sec)
This will preserve terminal-window focus during command line usage.
Parameters
----------
secs : float or int
Number of seconds to wait before continuing the program.
hogCPUperiod : float or int
Number of seconds to hog the CPU. This causes the thread to enter a
'tight' loop when the remaining wait time is less than the specified
interval. This is set to 200ms (0.2s) by default. It is recommended that
this interval is kept short to avoid stalling the processor for too
long which may result in poorer timing.
"""
# Calculate the relaxed period which we periodically suspend the thread,
# this puts less load on the CPU during long wait intervals.
relaxedPeriod = secs - hogCPUperiod
# wait loop, suspends the thread periodically and consumes CPU resources
t0 = getTime()
while True:
elapsed = getTime() - t0
if elapsed > secs: # no more time left, break the loop
break
if elapsed > relaxedPeriod: # hog period
sleepDur = 0.00001 # 0.1ms
else:
relaxedTimeLeft = relaxedPeriod - elapsed
sleepDur = 0.01 if relaxedTimeLeft > 0.01 else relaxedTimeLeft
time.sleep(sleepDur)
_dispatchWindowEvents()
def getAbsTime():
"""Get the absolute time.
This uses the same clock-base as the other timing features, like
`getTime()`. The time (in seconds) ignores the time-zone (like `time.time()`
on linux). To take the timezone into account, use
`int(time.mktime(time.gmtime()))`.
Absolute times in seconds are especially useful to add to generated file
names for being unique, informative (= a meaningful time stamp), and because
the resulting files will always sort as expected when sorted in
chronological, alphabetical, or numerical order, regardless of locale and so
on.
Version Notes: This method was added in PsychoPy 1.77.00
Returns
-------
float
Absolute Unix time (i.e., whole seconds elapsed since Jan 1, 1970).
"""
return int(time.mktime(time.localtime()))
| 21,228
|
Python
|
.py
| 496
| 34.967742
| 118
| 0.648758
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,344
|
constants.py
|
psychopy_psychopy/psychopy/constants.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# instead of import *, use this (+ PSYCHOPY_USERAGENT if you need that)
# (NOT_STARTED, STARTED, PLAYING, PAUSED, STOPPED, FINISHED, PRESSED,
# RELEASED, FOREVER)
import sys, os, copy
from os.path import abspath, join
from types import SimpleNamespace
# pertaining to the status of components/routines/experiments
status = SimpleNamespace()
status.__doc__ = (
"- NOT_STARTED (0): The component has not yet started.\n"
"- PLAYING / STARTED (1): The component has started.\n"
"- PAUSED (2): The component has started but has been paused.\n"
"- RECORDING (3): Component is not only started, but also actively recording some input.\n"
"- STOPPED / FINISHED (-1): Component has finished.\n"
"- SKIP / SEEKING (-2): Component is in the process of changing state.\n"
"- STOPPING (-3): Component is in the process of stopping.\n"
"- INVALID (-9999): Something has gone wrong and status is not available.\n"
)
status.NOT_STARTED = NOT_STARTED = 0
status.PLAYING = PLAYING = 1
status.STARTED = STARTED = PLAYING
status.PAUSED = PAUSED = 2
status.RECORDING = RECORDING = 3
status.STOPPED = STOPPED = -1
status.FINISHED = FINISHED = STOPPED
status.SKIP = SKIP = SEEKING = -2
status.STOPPING = STOPPING = -3
status.INVALID = INVALID = -9999
# pertaining to the priority of columns in the data file
priority = SimpleNamespace()
priority.__doc__ = (
"- CRITICAL (30): Always at the start of the data file, generally reserved for Routine start times\n "
"- HIGH (20): Important columns which are near the front of the data file\n"
"- MEDIUM (10): Possibly important columns which are around the middle of the data file\n"
"- LOW (0): Columns unlikely to be important which are at the end of the data file\n"
"- EXCLUDE (-10): Always at the end of the data file, actively marked as unimportant\n"
)
priority.CRITICAL = PRIORITY_CRITICAL = 30
priority.HIGH = PRIORITY_HIGH = 20
priority.MEDIUM = PRIORITY_MEDIUM = 10
priority.LOW = PRIORITY_LOW = 0
priority.EXCLUDE = PRIORITY_EXCLUDE = -10
# for button box:
PRESSED = 1
RELEASED = -1
# while t < FOREVER ... -- in scripts generated by Builder
FOREVER = 1000000000 # seconds
# USERAGENT is for consistent http-related self-identification across an app.
# It shows up in server logs on the receiving end. Currently, the value (and
# its use from psychopy) is arbitrary and optional. Having it standardized
# and fixed will also help people who develop their own http-log analysis
# tools for use with contrib.http.upload()
PSYCHOPY_USERAGENT = ("PsychoPy: open-source Psychology & Neuroscience tools; "
"www.psychopy.org")
# find a copy of git if possible to do push/pull as needed
# the pure-python dulwich lib can do most things but merged push/pull
# isn't currently possible (e.g. pull overwrites any local commits!)
# see https://github.com/dulwich/dulwich/issues/666
ENVIRON = copy.copy(os.environ)
gitExe = None
if sys.platform == 'darwin':
_gitStandalonePath = abspath(join(sys.executable, '..', '..',
'Resources', 'git-core'))
if os.path.exists(_gitStandalonePath):
ENVIRON["PATH"] = "{}:".format(_gitStandalonePath) + ENVIRON["PATH"]
gitExe = join(_gitStandalonePath, 'git')
elif sys.platform == 'win32':
_gitStandalonePath = abspath(join(sys.executable, '..', 'MinGit', 'cmd'))
if os.path.exists(_gitStandalonePath):
ENVIRON["PATH"] = "{};".format(_gitStandalonePath) + ENVIRON["PATH"]
os.environ["GIT_PYTHON_GIT_EXECUTABLE"] = _gitStandalonePath
gitExe = join(_gitStandalonePath, 'git.exe')
if gitExe:
os.environ["GIT_PYTHON_GIT_EXECUTABLE"] = gitExe
| 3,725
|
Python
|
.py
| 76
| 45.605263
| 106
| 0.711655
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,345
|
logging.py
|
psychopy_psychopy/psychopy/logging.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Provides functions for logging error and other messages to one or more
files and/or the console, using python's own logging module. Some warning
messages and error messages are generated by PsychoPy itself. The user can
generate more using the functions in this module.
There are various levels for logged messages with the following order of
importance: ERROR, WARNING, DATA, EXP, INFO and DEBUG.
When setting the level for a particular log target (e.g. LogFile)
the user can set the minimum level that is required
for messages to enter the log. For example, setting a level of INFO will
result in INFO, EXP, DATA, WARNING and ERROR messages to be recorded but not
DEBUG messages.
By default, PsychoPy will record messages of WARNING level and above to
the console. The user can silence that by setting it to receive only CRITICAL
messages, (which PsychoPy doesn't use) using the commands::
from psychopy import logging
logging.console.setLevel(logging.CRITICAL)
"""
# Much of the code below is based conceptually, if not syntactically, on the
# python logging module but it's simpler (no threading) and maintaining a
# stack of log entries for later writing (don't want files written while
# drawing)
from os import path
import atexit
import sys
import codecs
import locale
from pathlib import Path
from psychopy import clock
_packagePath = path.split(__file__)[0]
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
DEPRECATION = ERROR
WARNING = 30
WARN = WARNING
DATA = 25 # will be a custom level
EXP = 22 # info about the experiment might be less important than data info?
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
DATA: 'DATA',
EXP: 'EXP',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
'CRITICAL': CRITICAL,
'ERROR': ERROR,
'DEPRECATION': DEPRECATION,
'DATA': DATA,
'EXP': EXP,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET}
# string to search for level names in a log message
_levelNamesRe = "|".join(key for key in _levelNames if isinstance(key, str))
_prefEncoding = locale.getpreferredencoding()
def getLevel(level):
"""Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
# use allcaps
if isinstance(level, str):
level = level.upper()
return _levelNames.get(level, "Level %s" % level)
def addLevel(level, levelName):
"""Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_levelNames[level] = levelName
_levelNames[levelName] = level
# global defaultClock
defaultClock = clock.monotonicClock
def setDefaultClock(clock):
"""Set the default clock to be used to reference all logging times.
Must be a :class:`psychopy.core.Clock` object. Beware that if you
reset the clock during the experiment then the resets will be
reflected here. That might be useful if you want your logs to be
reset on each trial, but probably not.
"""
global defaultClock
defaultClock = clock
class _LogEntry():
def __init__(self, level, message, t=None, obj=None, levelname=None):
super(_LogEntry, self).__init__()
self.t = t
self.t_ms = t * 1000
self.level = level
if levelname is None:
levelname = getLevel(level)
self.levelname = levelname
self.message = message
self.obj = obj
class LogFile():
"""A text stream to receive inputs from the logging system
"""
def __init__(self, f=None, level=WARNING, filemode='a', logger=None,
encoding='utf8'):
"""Create a log file as a target for logged entries of a given level
:parameters:
- f:
this could be a string to a path, that will be created if it
doesn't exist. Alternatively this could be a file object,
sys.stdout or any object that supports .write() and .flush()
methods
- level:
The minimum level of importance that a message must have
to be logged by this target.
- filemode: 'a', 'w'
Append or overwrite existing log file
"""
super(LogFile, self).__init__()
# work out if this is a filename or a stream to write to
if isinstance(f, Path):
f = str(f)
if f is None:
self.stream = 'stdout'
elif hasattr(f, 'write'):
self.stream = f
elif isinstance(f, str):
self.stream = codecs.open(f, filemode, encoding)
self.level = level
if logger is None:
logger = root
# Can not use weak ref to logger, as sometimes this class
# instance would be gc'ed before _Logger.__del__
# was complete (running .flush()).
# This was causing following error when script closed:
# Exception AttributeError: "'NoneType' object has no
# attribute 'stdout'" in
# <bound method _Logger.__del__ of
# <psychopy.logging._Logger instance at 0x102e0d878>> ignored
# So instead free logger ref in __del__ of this class,
# so we know any log backlog can be flushed before it is gc'ed.
self.logger = logger
self.logger.addTarget(self)
def setLevel(self, level):
"""Set a new minimal level for the log file/stream
"""
# if given a name, get corresponding integer value
if isinstance(level, str):
level = getLevel(level)
# make sure we (now) have an integer
if type(level) is not int:
raise TypeError("LogFile.setLevel() should be given an int, which"
"is usually one of logging.INFO (not logging.info)")
self.level = level
self.logger._calcLowestTarget()
def write(self, txt):
"""Write directly to the log file (without using logging functions).
Useful to send messages that only this file receives
"""
# find the current stdout if we're the console logger
if self.stream == 'stdout':
stream = sys.stdout
else:
stream = self.stream
# try to write
try:
stream.write(txt)
except UnicodeEncodeError as e: # incompatible encoding of stdout?
try:
if hasattr(stream, 'reconfigure'):
stream.reconfigure(encoding='utf-8')
elif stream == sys.stdout:
# try opening sys.stdout manually as a file
sys.stdout = stream = open(sys.stdout.fileno(), mode='w',
encoding='utf-8', buffering=1)
stream.write(txt) # try again with the new encoding
except Exception:
print('Failed to reconfigure logger output encoding', e)
try:
stream.flush()
except Exception:
pass
class _Logger():
"""Maintains a set of log targets (text streams such as files of stdout)
self.targets is a list of dicts {'stream':stream, 'level':level}
"""
def __init__(self, format="{t:.4f} \t{levelname} \t{message}"):
"""The string-formatted elements {xxxx} can be used, where
each xxxx is an attribute of the LogEntry.
e.g. t, t_ms, level, levelname, message
"""
super(_Logger, self).__init__()
self.targets = []
self.flushed = []
self.toFlush = []
self.format = format
self.lowestTarget = 50
def __del__(self):
self.flush()
# unicode logged to coder output window can cause logger failure, with
# error message pointing here. this is despite it being ok to log to
# terminal or Builder output. proper fix: fix coder unicode bug #97
# (currently closed)
def addTarget(self, target):
"""Add a target, typically a :class:`~log.LogFile` to the logger
"""
self.targets.append(target)
self._calcLowestTarget()
def removeTarget(self, target):
"""Remove a target, typically a :class:`~log.LogFile` from the logger
"""
if target in self.targets:
self.targets.remove(target)
self._calcLowestTarget()
def _calcLowestTarget(self):
self.lowestTarget = 50
for target in self.targets:
self.lowestTarget = min(self.lowestTarget, target.level)
def log(self, message, level, t=None, obj=None, levelname=None):
"""Add the `message` to the log stack at the appropriate `level`
If no relevant targets (files or console) exist then the message is
simply discarded.
"""
# check for at least one relevant logger
if level < self.lowestTarget:
return
# check time
if t is None:
global defaultClock
t = defaultClock.getTime()
# add message to list
self.toFlush.append(
_LogEntry(t=t, level=level, levelname=levelname, message=message, obj=obj))
def flush(self):
"""Process all current messages to each target
"""
# loop through targets then entries in toFlush
# so that stream.flush can be called just once
formatted = {} # keep a dict - so only do the formatting once
for target in self.targets:
for thisEntry in self.toFlush:
if thisEntry.level >= target.level:
if not thisEntry in formatted:
# convert the entry into a formatted string
formatted[thisEntry] = self.format.format(**thisEntry.__dict__)
target.write(formatted[thisEntry] + '\n')
if hasattr(target.stream, 'flush'):
target.stream.flush()
# finished processing entries - move them to self.flushed
self.flushed.extend(self.toFlush)
self.toFlush = [] # a new empty list
root = _Logger()
console = LogFile(level=WARNING)
def flush(logger=root):
"""Send current messages in the log to all targets
"""
logger.flush()
# make sure this function gets called as python closes
atexit.register(flush)
def critical(msg, t=None, obj=None):
"""log.critical(message)
Send the message to any receiver of logging info (e.g. a LogFile)
of level `log.CRITICAL` or higher
"""
root.log(msg, level=CRITICAL, t=t, obj=obj, levelname="CRITICAL")
fatal = critical
def error(msg, t=None, obj=None):
"""log.error(message)
Send the message to any receiver of logging info (e.g. a LogFile)
of level `log.ERROR` or higher
"""
root.log(msg, level=ERROR, t=t, obj=obj, levelname="ERROR")
def warning(msg, t=None, obj=None):
"""log.warning(message)
Sends the message to any receiver of logging info (e.g. a LogFile)
of level `log.WARNING` or higher
"""
root.log(msg, level=WARNING, t=t, obj=obj, levelname="WARNING")
warn = warning
def data(msg, t=None, obj=None):
"""Log a message about data collection (e.g. a key press)
usage::
log.data(message)
Sends the message to any receiver of logging info (e.g. a LogFile)
of level `log.DATA` or higher
"""
root.log(msg, level=DATA, t=t, obj=obj, levelname="DATA")
def exp(msg, t=None, obj=None):
"""Log a message about the experiment
(e.g. a new trial, or end of a stimulus)
usage::
log.exp(message)
Sends the message to any receiver of logging info (e.g. a LogFile)
of level `log.EXP` or higher
"""
root.log(msg, level=EXP, t=t, obj=obj, levelname="EXP")
def info(msg, t=None, obj=None):
"""Log some information - maybe useful, maybe not
usage::
log.info(message)
Sends the message to any receiver of logging info (e.g. a LogFile)
of level `log.INFO` or higher
"""
root.log(msg, level=INFO, t=t, obj=obj, levelname="INFO")
def debug(msg, t=None, obj=None):
"""Log a debugging message (not likely to be wanted once
experiment is finalised)
usage::
log.debug(message)
Sends the message to any receiver of logging info (e.g. a LogFile)
of level `log.DEBUG` or higher
"""
root.log(msg, level=DEBUG, t=t, obj=obj, levelname="DEBUG")
def log(msg, level, t=None, obj=None):
"""Log a message
usage::
log(msg, level, t=t, obj=obj)
Log the msg, at a given level on the root logger
"""
root.log(msg, level=level, t=t, obj=obj)
| 13,352
|
Python
|
.py
| 329
| 33.243161
| 87
| 0.644044
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,346
|
__init__.py
|
psychopy_psychopy/psychopy/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import os
import sys
import pathlib
def getVersion():
return (pathlib.Path(__file__).parent/"VERSION").read_text(encoding="utf-8").strip()
__version__ = getVersion()
__git_sha__ = (pathlib.Path(__file__).parent/"GIT_SHA").read_text(encoding="utf-8").strip()
__license__ = 'GPL v3'
__author__ = 'Open Science Tools Ltd'
__author_email__ = 'support@opensciencetools.org'
__maintainer_email__ = 'support@opensciencetools.org'
__url__ = 'https://www.psychopy.org/'
__download_url__ = 'https://github.com/psychopy/psychopy/releases/'
__build_platform__ = 'n/a'
__all__ = ["gui", "misc", "visual", "core",
"event", "data", "sound", "microphone"]
# for developers the following allows access to the current git sha from
# their repository
if __git_sha__ == 'n/a':
from subprocess import check_output, PIPE
# see if we're in a git repo and fetch from there
try:
thisFileLoc = os.path.split(__file__)[0]
output = check_output(['git', 'rev-parse', '--short', 'HEAD'],
cwd=thisFileLoc, stderr=PIPE)
except Exception:
output = False
if output:
__git_sha__ = output.strip() # remove final linefeed
# update preferences and the user paths
if 'installing' not in locals():
from psychopy.preferences import prefs
import site
# Configure the environment to use our custom site-packages location for
# user-installed packages. In the future, this will be configured outside of
# the running environment, but for now, we need to do it here.
useDefaultSite = False
if 'PSYCHOPYNOPACKAGES' in os.environ:
# Custom environment variable for people using PsychoPy as a library,
# who don't want to use the custom site-packages location. If set to 1,
# this will disable the custom site-packages location. Packages will be
# installed in the default, system dependent user's site-packages
# location.
useDefaultSite = os.environ['PSYCHOPYNOPACKAGES'] == '1'
# configure environment for custom site-packages location
if not useDefaultSite:
env = os.environ.copy()
if 'PYTHONPATH' in env: # append entries to existing PYTHONPATH
_userPackages = str(prefs.paths['packages'])
if _userPackages not in env['PYTHONPATH']:
env['PYTHONPATH'] = os.pathsep.join([
env['PYTHONPATH'], _userPackages])
_userSitePackages = str(prefs.paths['userPackages'])
if _userSitePackages not in env['PYTHONPATH']:
env['PYTHONPATH'] = os.pathsep.join([
env['PYTHONPATH'], _userSitePackages])
else:
env['PYTHONPATH'] = os.pathsep.join([
str(prefs.paths['packages']),
str(prefs.paths['userPackages'])])
# set user site packages
env['PYTHONUSERBASE'] = prefs.paths['packages']
# set environment variable for pip to get egg-info of installed packages
if sys.platform == 'darwin':
# python 3.8 cannot parse the egg-info of zip-packaged dists
# correctly, so pip install will always install extra copies
# of dependencies. On python 3.10, we can force importlib to
# avoid this issue, but we need to use an environment variable
if sys.version_info >= (3, 10):
env['_PIP_USE_IMPORTLIB_METADATA'] = 'True'
# update environment, pass this to sub-processes (e.g. pip)
os.environ.update(env)
# make sure site knows about our custom user site-packages
site.USER_SITE = prefs.paths['userPackages']
site.ENABLE_USER_SITE = True
# site.main()
# add paths from main plugins/packages (installed by plugins manager)
site.addsitedir(prefs.paths['userPackages']) # user site-packages
site.addsitedir(prefs.paths['userInclude']) # user include
site.addsitedir(prefs.paths['packages']) # base package dir
_envPath = os.environ.get('PATH', None)
if _envPath is not None:
# add user include path to system PATH (for C extensions)
if str(prefs.paths['userInclude']) not in _envPath:
os.environ['PATH'] = os.pathsep.join([
os.environ['PATH'], str(prefs.paths['userInclude'])])
# add scripts path for user packages to system PATH
if str(prefs.paths['userScripts']) not in _envPath:
os.environ['PATH'] = os.pathsep.join([
os.environ['PATH'], str(prefs.paths['userScripts'])])
if sys.platform == 'darwin' and sys._framework:
# add scripts path for user packages to system PATH
fwBinPath = os.path.join(sys.prefix, 'bin')
if fwBinPath not in os.environ['PATH']:
os.environ['PATH'] = os.pathsep.join([
fwBinPath, os.environ['PATH']])
# add paths from general preferences
for _pathName in prefs.general['paths']:
sys.path.append(_pathName)
# Add paths from individual plugins/packages (installed by plugins manager),
# this is to support legacy plugins that don't use the customized user
# site-packages location. This will be removed in the future.
import pathlib as _pathlib
for _pathName in _pathlib.Path(prefs.paths['packages']).glob("*"):
if _pathName.is_dir():
sys.path.append(str(_pathName))
from psychopy.tools.versionchooser import useVersion, ensureMinimal
if sys.version_info.major < 3:
raise ImportError("psychopy does not support Python2 installations. "
"The last version to support Python2.7 was PsychoPy "
"2021.2.x")
# import readline here to get around an issue with sounddevice
# issues GH-2230 GH-2344 GH-2662
try:
import readline
except ImportError:
pass # all that will happen is the stderr/stdout might get redirected
| 6,212
|
Python
|
.py
| 122
| 42.368852
| 91
| 0.645981
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,347
|
core.py
|
psychopy_psychopy/psychopy/core.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Basic functions, including timing, rush (imported), quit
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import sys
import threading
import subprocess
import shlex
import locale
# some things are imported just to be accessible within core's namespace
from psychopy.clock import (MonotonicClock, Clock, CountdownTimer,
wait, monotonicClock, getAbsTime,
StaticPeriod) # pylint: disable=W0611
# always safe to call rush, even if its not going to do anything for a
# particular OS
from psychopy.platform_specific import rush # pylint: disable=W0611
from psychopy import logging
from psychopy.constants import STARTED, NOT_STARTED, FINISHED
from psychopy.piloting import PILOTING, getPilotMode, setPilotMode, setPilotModeFromArgs
try:
import pyglet
havePyglet = True
# may not want to check, to preserve terminal window focus
checkPygletDuringWait = True
except ImportError:
havePyglet = False
checkPygletDuringWait = False
try:
import glfw
haveGLFW = True
except ImportError:
haveGLFW = False
runningThreads = [] # just for backwards compatibility?
openWindows = [] # visual.Window updates this, event.py and clock.py use it
# Set getTime in core to == the monotonicClock instance created in the
# clockModule.
# The logging module sets the defaultClock to == clock.monotonicClock,
# so by default the core.getTime() and logging.defaultClock.getTime()
# functions return the 'same' timebase.
#
# This way 'all' OSs have a core.getTime() timebase that starts at 0.0 when
# the experiment is launched, instead of it being this way on Windows only
# (which was also a descripancy between OS's when win32 was using time.clock).
def getTime(applyZero = True):
"""Get the current time since psychopy.core was loaded.
Version Notes: Note that prior to PsychoPy 1.77.00 the behaviour of
getTime() was platform dependent (on OSX and linux it was equivalent to
:func:`psychopy.core.getAbsTime`
whereas on windows it returned time since loading of the module, as now)
"""
return monotonicClock.getTime(applyZero)
def quit():
"""Close everything and exit nicely (ending the experiment)
"""
# pygame.quit() # safe even if pygame was never initialised
logging.flush()
# properly shutdown ioHub server
from psychopy.iohub.client import ioHubConnection
if ioHubConnection.ACTIVE_CONNECTION:
ioHubConnection.ACTIVE_CONNECTION.quit()
for thisThread in threading.enumerate():
if hasattr(thisThread, 'stop') and hasattr(thisThread, 'running'):
# this is one of our event threads - kill it and wait for success
thisThread.stop()
while thisThread.running == 0:
pass # wait until it has properly finished polling
sys.exit(0) # quits the python session entirely
def shellCall(shellCmd, stdin='', stderr=False, env=None, encoding=None):
"""Call a single system command with arguments, return its stdout.
Returns stdout, stderr if stderr is True.
Handles simple pipes, passing stdin to shellCmd (pipes are untested
on windows) can accept string or list as the first argument
Parameters
----------
shellCmd : str, or iterable
The command to execute, and its respective arguments.
stdin : str, or None
Input to pass to the command.
stderr : bool
Whether to return the standard error output once execution is finished.
env : dict
The environment variables to set during execution.
encoding : str
The encoding to use for communication with the executed command.
This argument will be ignored on Python 2.7.
Notes
-----
We use ``subprocess.Popen`` to execute the command and establish
`stdin` and `stdout` pipes.
Python 2.7 always opens the pipes in text mode; however,
Python 3 defaults to binary mode, unless an encoding is specified.
To unify pipe communication across Python 2 and 3, we now provide an
`encoding` parameter, enforcing `utf-8` text mode by default.
This parameter is present from Python 3.6 onwards; using an older
Python 3 version will raise an exception. The parameter will be ignored
when running Python 2.7.
"""
if encoding is None:
encoding = locale.getpreferredencoding()
if type(shellCmd) == str:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd)
elif type(shellCmd) == bytes:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd.decode('utf-8'))
elif type(shellCmd) in (list, tuple): # handles whitespace in filenames
shellCmdList = shellCmd
else:
msg = 'shellCmd requires a string or iterable.'
raise TypeError(msg)
cmdObjects = []
for obj in shellCmdList:
if type(obj) != bytes:
cmdObjects.append(obj)
else:
cmdObjects.append(obj.decode('utf-8'))
# the `encoding` parameter results in unicode coming back
if sys.version_info.minor >= 6:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding, env=env)
else:
msg = 'shellCall() requires Python 2.7, or 3.6 and newer.'
raise RuntimeError(msg)
stdoutData, stderrData = proc.communicate(stdin)
del proc
if stderr:
return stdoutData.strip(), stderrData.strip()
else:
return stdoutData.strip()
| 5,864
|
Python
|
.py
| 134
| 37.589552
| 88
| 0.704055
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,348
|
locale_setup.py
|
psychopy_psychopy/psychopy/locale_setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""locale handling for PsychoPy experiment scripts, currently Mac 10.10.3+
Purpose: Avoid a unicode-related python crash on Mac 10.10.3 (maybe only
in conda environment?)
Usage: Just import this module at the top of experiment scripts. Should be fast
enough and safe to do for all Builder scripts.
Its unlikely to be widely useful for localizing experiments; that is not its
intended purpose. Few people will want to have the visual display of text in
their experiment vary by locale. If they do, it is easy enough for them to
create multiple versions of an experiment.
"""
import platform
macVer = platform.mac_ver()[0] # e.g., '10.9.5' or '' for non-Mac
if macVer:
def _versionTuple(v):
return tuple(map(int, v.split('.')))
ver = _versionTuple(macVer)
if ver > _versionTuple('10.10.2'):
# set locale and prefs experiment-wide, without saving prefs to disk
import locale
from psychopy import prefs
if not prefs.app['locale']:
prefs.app['locale'] = u'en_US'
locale.setlocale(locale.LC_ALL, str(prefs.app['locale']) + '.UTF-8')
| 1,152
|
Python
|
.py
| 25
| 41.96
| 79
| 0.71008
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,349
|
exceptions.py
|
psychopy_psychopy/psychopy/exceptions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
class DependencyError(Exception):
"""The user requested something that won't be possible because
of a dependency error (e.g. audiolib that isn't available)
"""
pass
class SoundFormatError(Exception):
"""The user tried to create two streams (diff sample rates) on a machine
that won't allow that
"""
pass
class NoUserError(Exception):
pass
class NoGitError(DependencyError):
pass
class RepositoryError(Exception):
pass
class ConditionsImportError(Exception):
"""
Exception class to handle errors arising when attempting to import conditions
from a file. Includes attribute "translated" as these error messages are used
by Builder to set a label in the loop dialog so need to be available both as
a fixed value which can be queried and as a variable value which can change
according to locale.
Parameters
==========
msg : str
The error message to be displayed, same as any other Exception.
translated : str
The error message translated according to user's locale.
"""
def __init__(self, msg, translated=None):
# Initialise exception with message
Exception.__init__(self, msg)
# Add reason
self.translated = translated or msg
class MissingFontError(Exception):
pass
| 1,552
|
Python
|
.py
| 42
| 32.119048
| 81
| 0.719357
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,350
|
misc.py
|
psychopy_psychopy/psychopy/misc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Wrapper for all miscellaneous functions and classes from psychopy.tools
"""
# pylint: disable=W0611
# W0611 = Unused import %s
from psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,
ratioRange, shuffleArray, val2array)
from psychopy.tools.attributetools import (attributeSetter, setAttribute,
logAttrib)
from psychopy.tools.colorspacetools import (dkl2rgb, dklCart2rgb,
hsv2rgb, lms2rgb,
rgb2dklCart, rgb2lms)
from psychopy.tools.coordinatetools import (cart2pol, pol2cart,
cart2sph, sph2cart)
from psychopy.tools.fileerrortools import handleFileCollision
from psychopy.tools.filetools import toFile, fromFile, mergeFolder
from psychopy.tools.imagetools import array2image, image2array, makeImageAuto
from psychopy.tools.monitorunittools import (cm2deg, deg2cm, cm2pix, pix2cm,
deg2pix, pix2deg, convertToPix)
from psychopy.tools.plottools import plotFrameIntervals
from psychopy.tools.typetools import float_uint8, float_uint16, uint8_float
from numpy import radians, degrees
| 1,505
|
Python
|
.py
| 26
| 46.384615
| 79
| 0.684642
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,351
|
web.py
|
psychopy_psychopy/psychopy/web.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Library for working with internet connections"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import pathlib
import sys
import socket
import re
from psychopy import logging
from psychopy import prefs
import urllib.request
import urllib.error
import urllib.parse
# default 20s from prefs, min 2s
TIMEOUT = max(prefs.connections['timeout'], 2.0)
socket.setdefaulttimeout(TIMEOUT)
# global proxies
proxies = None # if this is populated then it has been set up already
class NoInternetAccessError(Exception):
"""An internet connection is required but not available
"""
# global haveInternet
haveInternet = None # gets set True or False when you check
def haveInternetAccess(forceCheck=False):
"""Detect active internet connection or fail quickly.
If forceCheck is False, will rely on a cached value if possible.
"""
global haveInternet
if forceCheck or haveInternet is None:
# try to connect to a high-availability site
sites = ["http://www.google.com/", "http://www.opendns.com/"]
for wait in [0.3, 0.7]: # try to be quick first
for site in sites:
try:
urllib.request.urlopen(site, timeout=wait)
haveInternet = True # cache
return True # one success is good enough
except Exception: # urllib.error.URLError:
# socket.timeout() can also happen
pass
else:
haveInternet = False
return haveInternet
def requireInternetAccess(forceCheck=False):
"""Checks for access to the internet, raise error if no access.
"""
if not haveInternetAccess(forceCheck=forceCheck):
msg = 'Internet access required but not detected.'
logging.error(msg)
raise NoInternetAccessError(msg)
return True
def tryProxy(handler, URL=None):
"""
Test whether we can connect to a URL with the current proxy settings.
`handler` can be typically `web.proxies`, if `web.setupProxy()` has been
run.
:Returns:
- True (success)
- a `urllib.error.URLError` (which can be interrogated with `.reason`)
- a `urllib.error.HTTPError` (which can be interrogated with `.code`)
"""
if URL is None:
URL = 'http://www.google.com' # hopefully google isn't down!
req = urllib.request.Request(URL)
opener = urllib.request.build_opener(handler)
try:
opener.open(req, timeout=2).read(5) # open and read a few characters
return True
except urllib.error.HTTPError as err:
return err
except urllib.error.URLError as err:
return err
def getPacFiles():
"""Return a list of possible auto proxy .pac files being used,
based on the system registry (win32) or system preferences (OSX).
"""
pacFiles = []
if sys.platform == 'win32':
try:
import _winreg as winreg # used from python 2.0-2.6
except ImportError:
import winreg # used from python 2.7 onwards
net = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
"Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings")
nSubs, nVals, lastMod = winreg.QueryInfoKey(net)
subkeys = {}
for i in range(nVals):
thisName, thisVal, thisType = winreg.EnumValue(net, i)
subkeys[thisName] = thisVal
if ('AutoConfigURL' in list(subkeys.keys()) and
len(subkeys['AutoConfigURL']) > 0):
pacFiles.append(subkeys['AutoConfigURL'])
elif sys.platform == 'darwin':
import plistlib
prefs_loc = pathlib.Path('/Library/Preferences/SystemConfiguration/preferences.plist')
if prefs_loc.exists():
with open(prefs_loc, 'rb') as fp :
sysPrefs = plistlib.loads(fp.read())
networks = sysPrefs['NetworkServices']
# loop through each possible network (e.g. Ethernet, Airport...)
for network in list(networks.items()):
netKey, network = network # the first part is a long identifier
if 'ProxyAutoConfigURLString' in network['Proxies']:
pacFiles.append(network['Proxies']['ProxyAutoConfigURLString'])
return list(set(pacFiles)) # remove redundant ones
def getWpadFiles():
"""Return possible pac file locations from the standard set of .wpad
locations
NB this method only uses the DNS method to search, not DHCP queries, and
so may not find all possible .pac locations.
See http://en.wikipedia.org/wiki/Web_Proxy_Autodiscovery_Protocol
"""
# pacURLs.append("http://webproxy."+domain+"/wpad.dat")
# for me finds a file that starts: function FindProxyForURL(url,host)
# dynamcially chooses a proxy based on the requested url and host; how to
# parse?
domainParts = socket.gethostname().split('.')
pacURLs = []
for ii in range(len(domainParts)):
domain = '.'.join(domainParts[ii:])
pacURLs.append("http://wpad." + domain + "/wpad.dat")
return list(set(pacURLs)) # remove redundant ones
def proxyFromPacFiles(pacURLs=None, URL=None, log=True):
"""Attempts to locate and setup a valid proxy server from pac file URLs
:Parameters:
- pacURLs : list
List of locations (URLs) to look for a pac file. This might
come from :func:`~psychopy.web.getPacFiles` or
:func:`~psychopy.web.getWpadFiles`.
- URL : string
The URL to use when testing the potential proxies within the files
:Returns:
- A urllib.request.ProxyHandler if successful (and this will have
been added as an opener to the urllib)
- False if no proxy was found in the files that allowed successful
connection
"""
if pacURLs == None: # if given none try to find some
pacURLs = getPacFiles()
if pacURLs == []: # if still empty search for wpad files
pacURLs = getWpadFiles()
# for each file search for valid urls and test them as proxies
for thisPacURL in pacURLs:
if log:
msg = 'proxyFromPacFiles is searching file:\n %s'
logging.debug(msg % thisPacURL)
try:
response = urllib.request.urlopen(thisPacURL, timeout=2)
except urllib.error.URLError:
if log:
logging.debug("Failed to find PAC URL '%s' " % thisPacURL)
continue
pacStr = response.read().decode('utf-8')
# find the candidate PROXY strings (valid URLS), numeric and
# non-numeric:
pattern = r"PROXY\s([^\s;,:]+:[0-9]{1,5})[^0-9]"
possProxies = re.findall(pattern, pacStr + '\n')
for thisPoss in possProxies:
proxUrl = 'http://' + thisPoss
handler = urllib.request.ProxyHandler({'http': proxUrl})
if tryProxy(handler) == True:
if log:
logging.debug('successfully loaded: %s' % proxUrl)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
return handler
return False
def setupProxy(log=True):
"""Set up the urllib proxy if possible.
The function will use the following methods in order to try and
determine proxies:
#. standard urllib.request.urlopen (which will use any
statically-defined http-proxy settings)
#. previous stored proxy address (in prefs)
#. proxy.pac files if these have been added to system settings
#. auto-detect proxy settings (WPAD technology)
.. note:
This can take time, as each failed attempt to set up a proxy
involves trying to load a URL and timing out. Best
to do in a separate thread.
Returns
_________
True (success) or False (failure)
"""
global proxies
# try doing nothing
proxies = urllib.request.ProxyHandler(urllib.request.getproxies())
if tryProxy(proxies) is True:
if log:
logging.debug("Using standard urllib (static proxy or "
"no proxy required)")
# this will now be used globally for ALL urllib opening
urllib.request.install_opener(urllib.request.build_opener(proxies))
return 1
# try doing what we did last time
if len(prefs.connections['proxy']) > 0:
proxConnPref = {'http': prefs.connections['proxy']}
proxies = urllib.request.ProxyHandler(proxConnPref)
if tryProxy(proxies) is True:
if log:
msg = 'Using %s (from prefs)'
logging.debug(msg % prefs.connections['proxy'])
# this will now be used globally for ALL urllib opening
opener = urllib.request.build_opener(proxies)
urllib.request.install_opener(opener)
return 1
else:
if log:
logging.debug("Found a previous proxy but it didn't work")
# try finding/using a proxy.pac file
pacURLs = getPacFiles()
if log:
logging.debug("Found proxy PAC files: %s" % pacURLs)
proxies = proxyFromPacFiles(pacURLs) # installs opener, if successful
if (proxies and
hasattr(proxies, 'proxies') and
len(proxies.proxies['http']) > 0):
# save that proxy for future
prefs.connections['proxy'] = proxies.proxies['http']
prefs.saveUserPrefs()
if log:
msg = 'Using %s (from proxy PAC file)'
logging.debug(msg % prefs.connections['proxy'])
return 1
# try finding/using 'auto-detect proxy'
pacURLs = getWpadFiles()
proxies = proxyFromPacFiles(pacURLs) # installs opener, if successful
if (proxies and
hasattr(proxies, 'proxies') and
len(proxies.proxies['http']) > 0):
# save that proxy for future
prefs.connections['proxy'] = proxies.proxies['http']
prefs.saveUserPrefs()
if log:
msg = 'Using %s (from proxy auto-detect)'
logging.debug(msg % prefs.connections['proxy'])
return 1
proxies = 0
return 0
| 10,328
|
Python
|
.py
| 242
| 34.355372
| 94
| 0.641897
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,352
|
info.py
|
psychopy_psychopy/psychopy/info.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""This module has tools for fetching data about the system or the current
Python process. Such info can be useful for understanding the context in which
an experiment was run.
"""
__all__ = [
'RunTimeInfo',
'getMemoryUsage',
'getRAM',
'APP_FLAG_LIST', # user might want to see these
'APP_IGNORE_LIST',
# These should be hidden, but I'm unsure if somewhere `import *` is being
# used so I'm adding them for now to prevent breakage. - mdc
'_getUserNameUID',
'_getHashGitHead',
'_getSha1hexDigest',
'_getHgVersion',
'_getSvnVersion',
'_thisProcess'
]
import sys
import os
import platform
import io
from pyglet.gl import gl_info, GLint, glGetIntegerv, GL_MAX_ELEMENTS_VERTICES
import numpy
import scipy
import matplotlib
import pyglet
try:
import ctypes
haveCtypes = True
except ImportError:
haveCtypes = False
import hashlib
import wx
import locale
import subprocess
import psutil
from psychopy import visual, logging, core, data, web
from psychopy.core import shellCall
from psychopy.platform_specific import rush
from psychopy import __version__ as psychopyVersion
# List of applications to flag as problematic while running an experiment. These
# apps running in the background consume resources (CPU, GPU and memory) which
# may interfere with a PsychoPy experiment. If these apps are allowed to run, it
# may result in poor timing, glitches, dropped frames, etc.
#
# Names that appear here are historically known to affect performance. The user
# can check if these processes are running using `RunTimeInfo()` and shut them
# down. App names are matched in a case insensitive way from the start of the
# name string obtained from `psutils`.
APP_FLAG_LIST = [
# web browsers can burn CPU cycles
'Firefox',
'Safari',
'Explorer',
'Netscape',
'Opera',
'Google Chrome',
'Dropbox',
'BitTorrent',
'iTunes', # but also matches iTunesHelper (add to ignore-list)
'mdimport',
'mdworker',
'mds', # can have high CPU
# productivity
'Office',
'KeyNote',
'Pages',
'LaunchCFMApp', # on mac, MS Office (Word etc) can be launched by this
'Skype',
'VirtualBox',
'VBoxClient', # virtual machine as host or client
'Parallels',
'Coherence',
'prl_client_app',
'prl_tools_service',
'VMware' # just a guess
# gaming, may need to be started for VR support
'Steam'
'Oculus'
]
# Apps listed here will not be flagged if a partial match exist in
# `APP_FLAG_LIST`. This list is checked first before `RunTimeInfo()` looks for a
# name in `APP_FLAG_LIST`. You can also add names here to eliminate ambiguity,
# for instance if 'Dropbox' is in `APP_FLAG_LIST`, then `DropboxUpdate` will
# also be flagged. You can prevent this by adding 'DropboxUpdate' to
# `APP_IGNORE_LIST`.
APP_IGNORE_LIST = [
# shells
'ps',
'login',
'-tcsh',
'bash',
# helpers and updaters
'iTunesHelper',
'DropboxUpdate',
'OfficeClickToRun'
]
class RunTimeInfo(dict):
"""Returns a snapshot of your configuration at run-time, for immediate or
archival use.
Returns a dict-like object with info about PsychoPy, your experiment script,
the system & OS, your window and monitor settings (if any), python &
packages, and openGL.
If you want to skip testing the refresh rate, use 'refreshTest=None'
Example usage: see runtimeInfo.py in coder demos.
Parameters
----------
win : :class:`~psychopy.visual.Window`, False or None
What window to use for refresh rate testing (if any) and settings.
`None` -> temporary window using defaults; `False` -> no window created,
used, nor profiled; a `Window()` instance you have already created one.
author : str or None
`None` will try to autodetect first __author__ in sys.argv[0], whereas
a `str` being user-supplied author info (of an experiment).
version : str or None
`None` try to autodetect first __version__ in sys.argv[0] or `str` being
the user-supplied version info (of an experiment).
verbose : bool
Show additional information. Default is `False`.
refreshTest : str, bool or None
True or 'grating' = assess refresh average, median, and SD of 60
win.flip()s, using visual.getMsPerFrame() 'grating' = show a visual
during the assessment; `True` = assess without a visual. Default is
`'grating'`.
userProcsDetailed: bool
Get details about concurrent user's processes (command, process-ID).
Default is `False`.
Returns
-------
A flat dict (but with several groups based on key names):
psychopy : version, rush() availability
psychopyVersion, psychopyHaveExtRush, git branch and current commit hash
if available
experiment : author, version, directory, name, current time-stamp, SHA1
digest, VCS info (if any, svn or hg only),
experimentAuthor, experimentVersion, ...
system : hostname, platform, user login, count of users,
user process info (count, cmd + pid), flagged processes
systemHostname, systemPlatform, ...
window : (see output; many details about the refresh rate, window,
and monitor; units are noted)
windowWinType, windowWaitBlanking, ...windowRefreshTimeSD_ms,
... windowMonitor.<details>, ...
python : version of python, versions of key packages
(wx, numpy, scipy, matplotlib, pyglet, pygame)
pythonVersion, pythonScipyVersion, ...
openGL : version, vendor, rendering engine, plus info on whether
several extensions are present
openGLVersion, ..., openGLextGL_EXT_framebuffer_object, ...
"""
# Author: 2010 written by Jeremy Gray, input from Jon Peirce and
# Alex Holcombe
def __init__(self, author=None, version=None, win=None,
refreshTest='grating', userProcsDetailed=False,
verbose=False):
# this will cause an object to be created with all the same methods as
# a dict
dict.__init__(self)
self['psychopyVersion'] = psychopyVersion
# NB: this looks weird, but avoids setting high-priority incidentally
self['psychopyHaveExtRush'] = rush(False)
d = os.path.abspath(os.path.dirname(__file__))
githash = _getHashGitHead(d) # should be .../psychopy/psychopy/
if githash:
self['psychopyGitHead'] = githash
self._setExperimentInfo(author, version, verbose)
self._setSystemInfo() # current user, locale, other software
self._setCurrentProcessInfo(verbose, userProcsDetailed)
# need a window for frame-timing, and some openGL drivers want
# a window open
if win is None: # make a temporary window, later close it
win = visual.Window(
fullscr=True, monitor="testMonitor", autoLog=False)
refreshTest = 'grating'
usingTempWin = True
elif win != False:
# we were passed a window instance, use it:
usingTempWin = False
self.winautoLog = win.autoLog
win.autoLog = False
else: # don't want any window
usingTempWin = False
if win:
self._setWindowInfo(win, verbose, refreshTest, usingTempWin)
self['pythonVersion'] = sys.version.split()[0]
if verbose:
self._setPythonInfo()
if win:
self._setOpenGLInfo()
if usingTempWin:
win.close() # close after doing openGL
elif win != False:
win.autoLog = self.winautoLog # restore
def _setExperimentInfo(self, author, version, verbose):
"""Auto-detect __author__ and __version__ in sys.argv[0] (= the
# users's script)
"""
if not author or not version:
lines = ''
if os.path.isfile(sys.argv[0]):
with io.open(sys.argv[0], 'r', encoding='utf-8-sig') as f:
lines = f.read()
if not author and '__author__' in lines:
linespl = lines.splitlines()
while linespl[0].find('__author__') == -1:
linespl.pop(0)
auth = linespl[0]
if len(auth) and '=' in auth:
try:
author = str(eval(auth[auth.find('=') + 1:]))
except Exception:
pass
if not version and '__version__' in lines:
linespl = lines.splitlines()
while linespl[0].find('__version__') == -1:
linespl.pop(0)
ver = linespl[0]
if len(ver) and ver.find('=') > 0:
try:
version = str(eval(ver[ver.find('=') + 1:]))
except Exception:
pass
if author or verbose:
self['experimentAuthor'] = author
if version or verbose:
self['experimentAuthVersion'] = version
# script identity & integrity information:
self['experimentScript'] = os.path.basename(sys.argv[0]) # file name
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
self['experimentScript.directory'] = scriptDir
# sha1 digest, text-format compatibility
scriptPath = os.path.abspath(sys.argv[0])
key = 'experimentScript.digestSHA1'
self[key] = _getSha1hexDigest(scriptPath, isfile=True)
# subversion revision?
try:
svnrev, last, url = _getSvnVersion(scriptPath) # svn revision
if svnrev: # or verbose:
self['experimentScript.svnRevision'] = svnrev
self['experimentScript.svnRevLast'] = last
self['experimentScript.svnRevURL'] = url
except Exception:
pass
# mercurical revision?
try:
hgChangeSet = _getHgVersion(scriptPath)
if hgChangeSet: # or verbose:
self['experimentScript.hgChangeSet'] = hgChangeSet
except Exception:
pass
# when was this run?
self['experimentRunTime.epoch'] = core.getAbsTime()
fmt = "%Y_%m_%d %H:%M (Year_Month_Day Hour:Min)"
self['experimentRunTime'] = data.getDateStr(format=fmt)
def _setSystemInfo(self):
"""System info
"""
# machine name
self['systemHostName'] = platform.node()
self['systemMemTotalRAM'], self['systemMemFreeRAM'] = getRAM()
# locale information:
# (None, None) -> str
loc = '.'.join([str(x) for x in locale.getlocale()])
if loc == 'None.None':
loc = locale.setlocale(locale.LC_ALL, '')
# == the locale in use, from OS or user-pref
self['systemLocale'] = loc
# platform name, etc
if sys.platform in ['darwin']:
OSXver, _junk, architecture = platform.mac_ver()
platInfo = 'darwin ' + OSXver + ' ' + architecture
# powerSource = ...
elif sys.platform.startswith('linux'):
platInfo = 'linux ' + platform.release()
# powerSource = ...
elif sys.platform in ['win32']:
platInfo = 'windowsversion=' + repr(sys.getwindowsversion())
# powerSource = ...
else:
platInfo = ' [?]'
# powerSource = ...
self['systemPlatform'] = platInfo
# self['systemPowerSource'] = powerSource
# count all unique people (user IDs logged in), and find current user
# name & UID
self['systemUser'], self['systemUserID'] = _getUserNameUID()
try:
users = shellCall("who -q").splitlines()[0].split()
self['systemUsersCount'] = len(set(users))
except Exception:
self['systemUsersCount'] = False
# when last rebooted?
try:
lastboot = shellCall("who -b").split()
self['systemRebooted'] = ' '.join(lastboot[2:])
except Exception: # windows
sysInfo = shellCall('systeminfo').splitlines()
lastboot = [line for line in sysInfo if line.startswith(
"System Up Time") or line.startswith("System Boot Time")]
lastboot += ['[?]'] # put something in the list just in case
self['systemRebooted'] = lastboot[0].strip()
# R (and r2py) for stats:
try:
Rver = shellCall(["R", "--version"])
Rversion = Rver.splitlines()[0]
if Rversion.startswith('R version'):
self['systemRavailable'] = Rversion.strip()
try:
import rpy2
self['systemRpy2'] = rpy2.__version__
except ImportError:
pass
except Exception:
pass
# encryption / security tools:
try:
vers, se = shellCall('openssl version', stderr=True)
if se:
vers = str(vers) + se.replace('\n', ' ')[:80]
if vers.strip():
self['systemSec.OpenSSLVersion'] = vers
except Exception:
pass
try:
so = shellCall(['gpg', '--version'])
if so.find('GnuPG') > -1:
self['systemSec.GPGVersion'] = so.splitlines()[0]
_home = [line.replace('Home:', '').lstrip()
for line in so.splitlines()
if line.startswith('Home:')]
self['systemSec.GPGHome'] = ''.join(_home)
except Exception:
pass
try:
import ssl
self['systemSec.pythonSSL'] = True
except ImportError:
self['systemSec.pythonSSL'] = False
# pyo for sound:
import importlib.util
if importlib.util.find_spec('pyo') is not None:
self['systemPyoVersion'] = '-'
# flac (free lossless audio codec) for google-speech:
flacv = ''
if sys.platform == 'win32':
flacexe = 'C:\\Program Files\\FLAC\\flac.exe'
if os.path.exists(flacexe):
flacv = core.shellCall(flacexe + ' --version')
else:
flac, se = core.shellCall('which flac', stderr=True)
if not se and flac and not flac.find('Command not found') > -1:
flacv = core.shellCall('flac --version')
if flacv:
self['systemFlacVersion'] = flacv
# detect internet access or fail quickly:
# web.setupProxy() & web.testProxy(web.proxies) # can be slow
# to fail if there's no connection
self['systemHaveInternetAccess'] = web.haveInternetAccess()
if not self['systemHaveInternetAccess']:
self['systemHaveInternetAccess'] = 'False (proxies not attempted)'
def _setCurrentProcessInfo(self, verbose=False, userProcsDetailed=False):
"""What other processes are currently active for this user?
"""
systemProcPsu = [] # found processes
systemProcPsuFlagged = [] # processes which are flagged
systemUserProcFlaggedPID = [] # PIDs of those processes
# lower case these names for matching
appFlagListLowerCase = [pn.lower() for pn in APP_FLAG_LIST]
appIgnoreListLowerCase = [pn.lower() for pn in APP_IGNORE_LIST]
# iterate over processes retrieved by psutil
for proc in psutil.process_iter(attrs=None, ad_value=None):
try:
processFullName = proc.name() # get process name
processPid = proc.pid
processName = processFullName.lower() # use for matching only
except (psutil.AccessDenied, psutil.NoSuchProcess,
psutil.ZombieProcess):
continue # skip iteration on exception
# check if process is in ignore list, skip if so
for appIgnore in appIgnoreListLowerCase:
# case-insensitive match from the start of string
if processName.startswith(appIgnore):
break
else:
# if we get here, the name isn't in the ignore list
for appFlag in appFlagListLowerCase:
if processName.startswith(appFlag):
# append actual name and PID to output lists
systemProcPsuFlagged.append(processFullName)
systemUserProcFlaggedPID.append(processPid)
break
systemProcPsu.append(processName)
# add items to dictionary
self['systemUserProcCount'] = len(systemProcPsu)
self['systemUserProcFlagged'] = systemProcPsuFlagged
# if the user wants more ...
if verbose and userProcsDetailed:
self['systemUserProcCmdPid'] = systemProcPsu # is this right?
self['systemUserProcFlaggedPID'] = systemUserProcFlaggedPID
# CPU speed (will depend on system busy-ness)
d = numpy.array(numpy.linspace(0., 1., 1000000))
t0 = core.getTime()
numpy.std(d)
t = core.getTime() - t0
del d
self['systemTimeNumpySD1000000_sec'] = t
def _setWindowInfo(self, win, verbose=False, refreshTest='grating',
usingTempWin=True):
"""Find and store info about the window: refresh rate,
configuration info.
"""
if refreshTest in ['grating', True]:
wantVisual = bool(refreshTest == 'grating')
a, s, m = visual.getMsPerFrame(win, nFrames=120,
showVisual=wantVisual)
self['windowRefreshTimeAvg_ms'] = a
self['windowRefreshTimeMedian_ms'] = m
self['windowRefreshTimeSD_ms'] = s
if usingTempWin:
return
# These 'configuration lists' control what attributes are reported.
# All desired attributes/properties need a legal internal name,
# e.g., win.winType. If an attr is callable, its gets called with
# no arguments, e.g., win.monitor.getWidth()
winAttrList = ['winType', '_isFullScr', 'units',
'monitor', 'pos', 'screen', 'rgb', 'size']
winAttrListVerbose = ['allowGUI', 'useNativeGamma',
'recordFrameIntervals', 'waitBlanking',
'_haveShaders', 'refreshThreshold']
if verbose:
winAttrList += winAttrListVerbose
monAttrList = ['name', 'getDistance', 'getWidth', 'currentCalibName']
monAttrListVerbose = ['getGammaGrid', 'getLinearizeMethod',
'_gammaInterpolator', '_gammaInterpolator2']
if verbose:
monAttrList += monAttrListVerbose
if 'monitor' in winAttrList:
# replace 'monitor' with all desired monitor.<attribute>
# retain list-position info, put monitor stuff there
i = winAttrList.index('monitor')
del winAttrList[i]
for monAttr in monAttrList:
winAttrList.insert(i, 'monitor.' + monAttr)
i += 1
for winAttr in winAttrList:
try:
attrValue = eval('win.' + winAttr)
except AttributeError:
msg = ('AttributeError in RuntimeInfo._setWindowInfo(): '
'Window instance has no attribute')
logging.warning(0, msg, winAttr)
continue
if hasattr(attrValue, '__call__'):
try:
a = attrValue()
attrValue = a
except Exception:
msg = ('Warning: could not get a value from win. '
'%s() (expects arguments?)' % winAttr)
print(msg)
continue
while winAttr[0] == '_':
winAttr = winAttr[1:]
winAttr = winAttr[0].capitalize() + winAttr[1:]
winAttr = winAttr.replace('Monitor._', 'Monitor.')
if winAttr in ('Pos', 'Size'):
winAttr += '_pix'
if winAttr in ('Monitor.getWidth', 'Monitor.getDistance'):
winAttr += '_cm'
if winAttr in ('RefreshThreshold'):
winAttr += '_sec'
self['window' + winAttr] = attrValue
def _setPythonInfo(self):
"""External python packages, python details
"""
self['pythonNumpyVersion'] = numpy.__version__
self['pythonScipyVersion'] = scipy.__version__
self['pythonWxVersion'] = wx.version()
self['pythonMatplotlibVersion'] = matplotlib.__version__
self['pythonPygletVersion'] = pyglet.version
try:
from pygame import __version__ as pygameVersion
except ImportError:
pygameVersion = '(no pygame)'
self['pythonPygameVersion'] = pygameVersion
# Python gory details:
self['pythonFullVersion'] = sys.version.replace('\n', ' ')
self['pythonExecutable'] = sys.executable
def _setOpenGLInfo(self):
# OpenGL info:
self['openGLVendor'] = gl_info.get_vendor()
self['openGLRenderingEngine'] = gl_info.get_renderer()
self['openGLVersion'] = gl_info.get_version()
GLextensionsOfInterest = ('GL_ARB_multitexture',
'GL_EXT_framebuffer_object',
'GL_ARB_fragment_program',
'GL_ARB_shader_objects',
'GL_ARB_vertex_shader',
'GL_ARB_texture_non_power_of_two',
'GL_ARB_texture_float', 'GL_STEREO')
for ext in GLextensionsOfInterest:
self['openGLext.' + ext] = bool(gl_info.have_extension(ext))
maxVerts = GLint()
glGetIntegerv(GL_MAX_ELEMENTS_VERTICES, maxVerts)
self['openGLmaxVerticesInVertexArray'] = maxVerts.value
def __repr__(self):
"""Return a string that is a legal python (dict), and close
to YAML, .ini, and configObj syntax
"""
info = '{\n#[ PsychoPy3 RuntimeInfoStart ]\n'
sections = ['PsychoPy', 'Experiment',
'System', 'Window', 'Python', 'OpenGL']
for sect in sections:
info += ' #[[ %s ]] #---------\n' % (sect)
sectKeys = [k for k in list(self.keys(
)) if k.lower().find(sect.lower()) == 0]
# get keys for items matching this section label;
# use reverse-alpha order if easier to read:
revSet = ('PsychoPy', 'Window', 'Python', 'OpenGL')
sectKeys.sort(reverse=bool(sect in revSet))
for k in sectKeys:
selfk = self[k] # alter a copy for display purposes
try:
if type(selfk) == type('abc'):
selfk = selfk.replace('"', '').replace('\n', ' ')
elif '_ms' in k: # type(selfk) == type(0.123):
selfk = "%.3f" % selfk
elif '_sec' in k:
selfk = "%.4f" % selfk
elif '_cm' in k:
selfk = "%.1f" % selfk
except Exception:
pass
# then strcat unique proc names
if (k in ('systemUserProcFlagged', 'systemUserProcCmdPid') and
selfk is not None and
len(selfk)):
prSet = []
for pr in self[k]: # str -> list of lists
if ' ' in pr: # add single quotes if file has spaces
pr = "'" + pr + "'"
# first item in sublist is proc name (CMD)
prSet += [pr]
selfk = ' '.join(list(set(prSet)))
# suppress display PID info -- useful at run-time, never useful
# in an archive
if k != 'systemUserProcFlaggedPID':
info += ' "%s": "%s",\n' % (k, selfk)
info += '#[ PsychoPy3 RuntimeInfoEnd ]\n}\n'
return info
def __str__(self):
"""Return a string intended for printing to a log file
"""
infoLines = self.__repr__()
# remove enclosing braces from repr
info = infoLines.splitlines()[1:-1]
for i, line in enumerate(info):
if 'openGLext' in line:
# swap order for OpenGL extensions -- much easier to read
tmp = line.split(':')
info[i] = ': '.join([' ' + tmp[1].replace(',', ''),
tmp[0].replace(' ', '') + ','])
info[i] = info[i].rstrip(',')
info = '\n'.join(info).replace('"', '') + '\n'
return info
def _getHashGitHead(gdir='.'):
if not os.path.isdir(gdir):
raise OSError('not a directory')
try:
git_hash = subprocess.check_output('git rev-parse --verify HEAD',
cwd=gdir,
shell=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return None # no git
git_branches = subprocess.check_output('git branch', cwd=gdir, shell=True)
git_branch = [line.split()[1] for line in git_branches.splitlines()
if line.startswith(b'*')]
if len(git_branch):
return "{} {}".format(git_branch[0], git_hash.strip())
else:
return '(unknown branch)'
def _getUserNameUID():
"""Return user name, UID.
UID values can be used to infer admin-level:
-1=undefined, 0=full admin/root,
>499=assume non-admin/root (>999 on debian-based)
:Author:
- 2010 written by Jeremy Gray
"""
user = os.environ.get('USER', None) or os.environ.get('USERNAME', None)
if not user:
return 'undefined', '-1'
if sys.platform not in ['win32']:
uid = shellCall('id -u')
else:
uid = '1000'
if haveCtypes and ctypes.windll.shell32.IsUserAnAdmin():
uid = '0'
return str(user), int(uid)
def _getSha1hexDigest(thing, isfile=False):
"""Returns base64 / hex encoded sha1 digest of str(thing), or
of a file contents. Return None if a file is requested but no such
file exists
:Author:
- 2010 Jeremy Gray; updated 2011 to be more explicit,
- 2012 to remove sha.new()
>>> _getSha1hexDigest('1')
'356a192b7913b04c54574d18c28d46e6395428ab'
>>> _getSha1hexDigest(1)
'356a192b7913b04c54574d18c28d46e6395428ab'
"""
digester = hashlib.sha1()
if isfile:
filename = thing
if os.path.isfile(filename):
f = open(filename, 'rb')
# check file size < available RAM first? or update in chunks?
digester.update(f.read())
f.close()
else:
return None
else:
digester.update(str(thing))
return digester.hexdigest()
def getRAM():
"""Return system's physical RAM & available RAM, in M.
"""
totalRAM, available = psutil.virtual_memory()[0:2]
return totalRAM / 1048576., available / 1048576.
# faster to get the current process only once:
_thisProcess = psutil.Process()
def getMemoryUsage():
"""Get the memory (RAM) currently used by this Python process, in M.
"""
return _thisProcess.memory_info()[0] / 1048576.
| 27,885
|
Python
|
.py
| 642
| 32.685358
| 80
| 0.579548
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,353
|
session.py
|
psychopy_psychopy/psychopy/session.py
|
import asyncio
import importlib
import os
import sys
import shutil
import threading
import time
import json
import traceback
from functools import partial
from pathlib import Path
from psychopy import experiment, logging, constants, data, core, __version__
from psychopy.hardware.manager import DeviceManager, deviceManager
from psychopy.hardware.listener import loop as listenerLoop
from psychopy.tools.arraytools import AliasDict
from psychopy.localization import _translate
class SessionQueue:
def __init__(self):
# start off alive
self._alive = True
# blank list of partials to run
self.queue = []
# blank list of outputs
self.results = []
# blank list of Sessions to maintain
self.sessions = []
def start(self):
"""
Start a while loop which will execute any methods added to this queue via
queueTask as well as the onIdle functions of any sessions registered with
this queue. This while loop will keep running until `stop` is called, so
only use this method if you're running with multiple threads!
"""
self._alive = True
# process any calls
while self._alive:
# empty the queue of any tasks
while len(self.queue):
# run the task
task = self.queue.pop(0)
try:
retval = task()
except Exception as err:
# send any errors to server
tb = traceback.format_exception(type(err), err, err.__traceback__)
output = json.dumps({
'type': "error",
'msg': "".join(tb)
})
else:
# process output
output = {
'method': task.func.__name__,
'args': task.args,
'kwargs': task.keywords,
'returned': retval
}
self.results.append(output)
# Send to liaisons
for session in self.sessions:
session.sendToLiaison(output)
# while idle, run idle functions for each session
for session in self.sessions:
session.onIdle()
# take a little time to sleep so tasks on other threads can execute
time.sleep(0.1)
def stop(self):
"""
Stop this queue.
"""
# stop running the queue
self._alive = False
def queueTask(self, method, *args, **kwargs):
"""
Add a task to this queue, to be executed when next possible.
Parameters
----------
method : function
Method to execute
args : tuple
Tuple of positional arguments to call `method` with.
kwargs : dict
Dict of named arguments to call `method` with.
Returns
-------
bool
True if added successfully.
"""
# create partial from supplied method, args and kwargs
task = partial(method, *args, **kwargs)
# add partial to queue
self.queue.append(task)
return True
def connectSession(self, session):
"""
Associate a Session object with this queue, meaning that its onIdle
method will be called whenever the queue is not running anything else.
Parameters
----------
session : Session
Session to associate with this queue.
Returns
-------
bool
True if associated successfully.
"""
# add session to list of sessions whose onIdle function to call
self.sessions.append(session)
def disconnectSession(self, session):
"""
Remove association between a Session object and this queue, meaning
that its onIdle method will not be called by the queue.
Parameters
----------
session : Session
Session to disconnect from this queue.
Returns
-------
bool
True if associated successfully.
"""
# remove session from list of linked sessions
if session in self.sessions:
i = self.sessions.index(session)
self.sessions.pop(i)
_queue = SessionQueue()
class Session:
"""
A Session is from which you can run multiple PsychoPy experiments, so long
as they are stored within the same folder. Session uses a persistent Window
and inputs across experiments, meaning that you don't have to keep closing
and reopening windows to run multiple experiments.
Through the use of multithreading, an experiment running via a Session can
be sent commands and have variables changed while running. Methods of
Session can be called from a second thread, meaning they don't have to wait
for `runExperiment` to return on the main thread. For example, you could
pause an experiment after 10s like so:
```
# define a function to run in a second thread
def stopAfter10s(thisSession):
# wait 10s
time.sleep(10)
# pause
thisSession.pauseExperiment()
# create a second thread
thread = threading.Thread(
target=stopAfter10s,
args=(thisSession,)
)
# start the second thread
thread.start()
# run the experiment (in main thread)
thisSession.runExperiment("testExperiment")
```
When calling methods of Session which have the parameter `blocking` from
outside of the main thread, you can use `blocking=False` to force them to
return immediately and, instead of executing, add themselves to a queue to
be executed in the main thread by a while loop within the `start` function.
This is important for methods like `runExperiment` or
`setupWindowFromParams` which use OpenGL and so need to be run in the
main thread. For example, you could alternatively run the code above like
this:
```
# define a function to run in a second thread
def stopAfter10s(thisSession):
# start the experiment in the main thread
thisSession.runExperiment("testExperiment", blocking=False)
# wait 10s
time.sleep(10)
# pause
thisSession.pauseExperiment()
# create a second thread
thread = threading.Thread(
target=stopAfter10s,
args=(thisSession,)
)
# start the second thread
thread.start()
# start the Session so that non-blocking methods are executed
thisSession.start()
```
Parameters
----------
root : str or pathlib.Path
Root folder for this session - should contain all of the experiments to be run.
liaison : liaison.WebSocketServer
Liaison server from which to receive run commands, if running via a liaison setup.
loggingLevel : str
How much output do you want in the log files? Should be one of the following:
- 'error'
- 'warning'
- 'data'
- 'exp'
- 'info'
- 'debug'
('error' is fewest messages, 'debug' is most)
inputs: dict, str or None
Dictionary of input objects for this session. Leave as None for a blank dict, or supply the
name of an experiment to use the `setupInputs` method from that experiment.
win : psychopy.visual.Window, str or None
Window in which to run experiments this session. Supply a dict of parameters to make a Window
from them, or supply the name of an experiment to use the `setupWindow` method from that experiment.
experiments : dict or None
Dict of name:experiment pairs which this Session can run. Each should be the file path of a .psyexp
file, contained somewhere within the folder supplied for `root`. Paths can be absolute or
relative to the root folder. Leave as None for a blank dict, experiments can be added
later on via `addExperiment()`.
restMsg : str
Message to display inbetween experiments.
"""
def __init__(
self,
root,
dataDir=None,
clock="iso",
win=None,
experiments=None,
loggingLevel="info",
priorityThreshold=constants.priority.EXCLUDE+1,
params=None,
liaison=None,
restMsg="Rest..."
):
# Store root and add to Python path
self.root = Path(root)
sys.path.insert(1, str(self.root))
# store rest message
self.restMsg = restMsg
# Create data folder
if dataDir is None:
dataDir = self.root / "data" / str(core.Clock().getTime(format="%Y-%m-%d_%H-%M-%S-%f"))
dataDir = Path(dataDir)
if not dataDir.is_dir():
os.makedirs(str(dataDir), exist_ok=True)
# Store data folder
self.dataDir = dataDir
# Create log file
wallTime = data.getDateStr(fractionalSecondDigits=6)
self.logFile = logging.LogFile(
dataDir / f"session_{wallTime}.log",
level=getattr(logging, loggingLevel.upper())
)
if liaison is not None:
liaison.logger.addTarget(self.logFile)
# Store priority threshold
self.priorityThreshold = priorityThreshold
# Add experiments
self.experiments = {}
self.experimentObjects = {}
if experiments is not None:
for nm, exp in experiments.items():
self.addExperiment(exp, key=nm)
# Store/create window object
self.win = win
if isinstance(win, dict):
from psychopy import visual
self.win = visual.Window(**win)
if win in self.experiments:
# If win is the name of an experiment, setup from that experiment's method
self.win = None
self.setupWindowFromExperiment(win)
# Setup Session clock
if clock in (None, "float"):
clock = core.Clock()
elif clock == "iso":
clock = core.Clock(format=str)
elif isinstance(clock, str):
clock = core.Clock(format=clock)
self.sessionClock = clock
# make sure we have a default keyboard
if DeviceManager.getDevice("defaultKeyboard") is None:
DeviceManager.addDevice(
deviceClass="psychopy.hardware.keyboard.KeyboardDevice",
deviceName="defaultKeyboard",
)
# Store params as an aliased dict
if params is None:
params = {}
self.params = AliasDict(params)
# List of ExperimentHandlers from previous runs
self.runs = []
# Store ref to liaison object
self.liaison = liaison
# Start off with no current experiment
self.currentExperiment = None
def start(self):
"""
Start this Session running its queue. Not recommended unless running
across multiple threads.
Returns
-------
bool
True if this Session was started safely.
"""
# register self with queue
_queue.connectSession(self)
# start queue if we're in the main branch and it's not alive yet
if threading.current_thread() == threading.main_thread() and not _queue._alive:
_queue.start()
return True
def onIdle(self):
"""
Function to be called continuously while a SessionQueue is idle.
Returns
-------
bool
True if this Session was stopped safely.
"""
if self.win is not None and not self.win._closed:
# Show waiting message
self.win.showMessage(self.restMsg)
self.win.color = "grey"
# Flip the screen
self.win.flip()
# Flush log
self.logFile.logger.flush()
def stop(self):
"""
Stop this Session running the queue. Not recommended unless running
across multiple threads.
"""
_queue.disconnectSession(self)
def addExperiment(self, file, key=None, folder=None):
"""
Register an experiment with this Session object, to be referred to
later by a given key.
Parameters
----------
file : str, Path
Path to the experiment (psyexp) file or script (py) of a Python
experiment.
key : str
Key to refer to this experiment by once added. Leave as None to use
file path relative to session root.
folder : str, Path
Folder for this project, if adding from outside of the root folder
this entire folder will be moved. Leave as None to use the parent
folder of `file`.
Returns
-------
bool or None
True if the operation completed successfully
"""
# Path-ise file
file = Path(file)
if not file.is_absolute():
# If relative, treat as relative to root
file = self.root / file
# Get project folder if not specified
if folder is None:
folder = file.parent
# If folder isn't within root, copy it to root and show a warning
if not str(folder).startswith(str(self.root)):
# Warn user that some files are going to be copied
logging.warning(_translate(
"Experiment '{}' is located outside of the root folder for this Session. All files from its "
"experiment folder ('{}') will be copied to the root folder and the experiment will run "
"from there."
).format(file.stem, folder.stem))
# Create new folder
newFolder = self.root / folder.stem
# Copy files to it
shutil.copytree(
src=str(folder),
dst=str(newFolder),
dirs_exist_ok=True
)
# Store new locations
file = newFolder / file.relative_to(folder)
folder = newFolder
# Notify user than files are copied
logging.info(_translate(
"Experiment '{}' and its experiment folder ('{}') have been copied to {}"
).format(file.stem,folder.stem,newFolder))
# Initialise as module
moduleInitFile = (folder / "__init__.py")
if not moduleInitFile.is_file():
moduleInitFile.write_text("")
# Construct relative path starting from root
relPath = []
for parent in file.relative_to(self.root).parents:
if parent.stem:
relPath.append(parent.stem)
relPath.reverse()
# Add experiment name
relPath.append(file.stem)
# Join with . so it's a valid import path
importPath = ".".join(relPath)
# Write experiment as Python script
pyFile = file.parent / (file.stem + ".py")
if "psyexp" in file.suffix:
# Load experiment
exp = experiment.Experiment()
exp.loadFromXML(file)
# Make sure useVersion is off
exp.settings.params['Use version'].val = ""
# Write script
script = exp.writeScript(target="PsychoPy")
pyFile.write_text(script, encoding="utf8")
# Store experiment object
self.experimentObjects[key] = exp
else:
# if no experiment object, store None
self.experimentObjects[key] = None
# Handle if key is None
if key is None:
key = str(file.relative_to(self.root))
# Check that first part of import path isn't the name of an already existing module
try:
isPackage = importlib.import_module(relPath[0])
# If we imported successfully, check that the module imported is in the root dir
if not hasattr(isPackage, "__file__") or not isPackage.__file__.startswith(str(self.root)):
raise NameError(_translate(
"Experiment could not be loaded as name of folder {} is also the name of an installed Python "
"package. Please rename."
).format(self.root / relPath[0]))
except ImportError:
# If we can't import, it's not a package and so we're good!
pass
# Import python file
self.experiments[key] = importlib.import_module(importPath)
return True
def getStatus(self):
"""
Get an overall status flag for this Session. Will be one of either:
Returns
-------
int
A value `psychopy.constants`, either:
- NOT_STARTED: If no experiment is running
- STARTED: If an experiment is running
- PAUSED: If an experiment is paused
- FINISHED: If an experiment is in the process of terminating
"""
if self.currentExperiment is None:
# If no current experiment, return NOT_STARTED
return constants.NOT_STARTED
else:
# Otherwise, return status of experiment handler
return self.currentExperiment.status
def getPsychoPyVersion(self):
return __version__
def getTime(self, format=str):
"""
Get time from this Session's clock object.
Parameters
----------
format : type, str or None
Can be either:
- `float`: Time will return as a float as number of seconds
- time format codes: Time will return as a string in that format, as in time.strftime
- `str`: Time will return as a string in ISO 8601 (YYYY-MM-DD_HH:MM:SS.mmmmmmZZZZ)
- `None`: Will use the Session clock object's `defaultStyle` attribute
Returns
-------
str or float
Time in format requested.
"""
return self.sessionClock.getTime(format=format)
def getExpInfoFromExperiment(self, key, sessionParams=True):
"""
Get the global-level expInfo object from one of this Session's experiments. This will contain all of
the keys needed for this experiment, alongside their default values.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
sessionParams : bool
Should expInfo be extended with params from the Session, overriding experiment params
where relevant (True, default)? Or return expInfo as it is in the experiment (False)?
Returns
-------
dict
Experiment info dict
"""
# Get params from experiment
expInfo = self.experiments[key].expInfo
if sessionParams:
# If alias of a key in params exists in expInfo, delete it
for key in self.params.aliases:
if key in expInfo:
del expInfo[key]
# Replace with Session params
for key in self.params:
expInfo[key] = self.params[key]
return expInfo
def showExpInfoDlgFromExperiment(self, key, expInfo=None):
"""
Update expInfo for this Session via the 'showExpInfoDlg` method from one of this Session's experiments.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
expInfo : dict
Information about the experiment, created by the `setupExpInfo` function.
Returns
-------
bool or None
True if the operation completed successfully
"""
if expInfo is None:
expInfo = self.getExpInfoFromExperiment(key)
# Run the expInfo method
expInfo = self.experiments[key].showExpInfoDlg(expInfo=expInfo)
return expInfo
def setCurrentExpInfoItem(self, key, value):
"""
Set the value of a key (or set of keys) from the current expInfo dict.
Parameters
----------
key : str or Iterable[str]
Key or list of keys whose value or values to set.
value : object or Iterable[str]
Value or values to set the key to. If one value is given along with multiple keys, all
keys will be set to that value. Otherwise, the number of values should match the number
of keys.
Returns
-------
bool
True if operation completed successfully
"""
# get expInfo dict
expInfo = self.getCurrentExpInfo()
# return False if there is none
if expInfo is False:
return expInfo
# wrap key in list
if not isinstance(key, (list, tuple)):
key = [key]
# wrap value in a list and extend it to match length of key
if not isinstance(value, (list, tuple)):
value = [value] * len(key)
# set values
for subkey, subval in zip(key, value):
expInfo[subkey] = subval
def getCurrentExpInfoItem(self, key):
"""
Get the value of a key (or set of keys) from the current expInfo dict.
Parameters
----------
key : str or Iterable[str]
Key or keys to get values of fro expInfo dict
Returns
-------
object, dict{str:object} or False
If key was a string, the value of this key in expInfo. If key was a list of strings, a dict of key:value
pairs for each key in the list. If no experiment is running or the process can't complete, False.
"""
# get expInfo dict
expInfo = self.getCurrentExpInfo()
# return False if there is none
if expInfo is False:
return expInfo
# if given a single key, get it
if key in expInfo:
return expInfo[key]
# if given a list of keys, get subset
if isinstance(key, (list, tuple)):
subset = {}
for subkey in key:
subset[subkey] = expInfo[subkey]
return subset
# if we've not returned yet, something is up, so return False
return False
def updateCurrentExpInfo(self, other):
"""
Update key:value pairs in the current expInfo dict from another dict.
Parameters
----------
other : dict
key:value pairs to update dict from.
Returns
-------
bool
True if operation completed successfully
"""
# get expInfo dict
expInfo = self.getCurrentExpInfo()
# return False if there is none
if expInfo is False:
return expInfo
# set each key
for key, value in other.items():
expInfo[key] = value
return True
def getCurrentExpInfo(self):
"""
Get the `expInfo` dict for the currently running experiment.
Returns
-------
dict or False
The `expInfo` for the currently running experiment, or False if no experiment is running.
"""
# if no experiment is currently running, return False
if self.currentExperiment is None:
return False
# get expInfo from ExperimentHandler object
return self.currentExperiment.extraInfo
@property
def win(self):
"""
Window associated with this Session. Defined as a property so as to be accessible from Liaison
if needed.
"""
return self._win
@win.setter
def win(self, value):
self._win = value
def setupWindowFromExperiment(self, key, expInfo=None, blocking=True):
"""
Setup the window for this Session via the 'setupWindow` method from one of this
Session's experiments.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
expInfo : dict
Information about the experiment, created by the `setupExpInfo` function.
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.setupWindowFromExperiment,
key, expInfo=expInfo
)
return True
if expInfo is None:
expInfo = self.getExpInfoFromExperiment(key)
# Run the setupWindow method
self.win = self.experiments[key].setupWindow(expInfo=expInfo, win=self.win)
# Set window title to signify that we're in a Session
self.win.title = "PsychoPy Session"
return True
def setupWindowFromParams(self, params, measureFrameRate=False, recreate=True, blocking=True):
"""
Create/setup a window from a dict of parameters
Parameters
----------
params : dict
Dict of parameters to create the window from, keys should be from the
__init__ signature of psychopy.visual.Window
measureFrameRate : bool
If True, will measure frame rate upon window creation.
recreate : bool
If True, will close and recreate the window as needed
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.setupWindowFromParams,
params
)
return True
# figure out whether we need to recreate the window
needsRecreate = False
for param in ("fullscr", "size", "pos", "screen"):
# skip params not specified
if param not in params:
continue
# skip all if there's no window
if self.win is None:
continue
# if param has changed, we'll need to recreate the window to apply it
if getattr(self.win, param) != params[param]:
needsRecreate = True
# if not allowed to recreate, warn
if not recreate:
logging.warn(
f"Changing Window.{param} requires the Window to be recreated, but "
"`Session.setupWindowFromParams` was called with `recreate=False`."
)
# if recreating, close window so we make a new one
if recreate and needsRecreate:
self.win.close()
self.win = None
if self.win is None:
# If win is None, make a Window
from psychopy.visual import Window
self.win = Window(**params)
self.win.showMessage(self.restMsg)
else:
# otherwise, just set the attributes which are safe to set
self.win.color = params.get('color', self.win.color)
self.win.colorSpace = params.get('colorSpace', self.win.colorSpace)
self.win.backgroundImage = params.get('backgroundImage', self.win.backgroundImage)
self.win.backgroundFit = params.get('backgroundFit', self.win.backgroundFit)
self.win.units = params.get('units', self.win.units)
# Set window title to signify that we're in a Session
self.win.title = "PsychoPy Session"
# Measure frame rate
if measureFrameRate:
frameRate = self.getFrameRate(retest=True)
expInfo = self.getCurrentExpInfo()
if expInfo is not False:
expInfo['frameRate'] = frameRate
return True
def getFrameRate(self, retest=False):
"""
Get the frame rate from the window.
Parameters
----------
retest : bool
If True, then will always run the frame rate test again, even if measured frame rate is already available.
Returns
-------
float
Frame rate retrieved from Session window.
"""
# if asked to, or if not yet measured, measure framerate
if retest or self.win._monitorFrameRate is None:
self.win._monitorFrameRate = self.win.getActualFrameRate()
# return from Window object
return self.win._monitorFrameRate
def setupInputsFromExperiment(self, key, expInfo=None, thisExp=None, blocking=True):
"""
Deprecated: legacy alias of setupDevicesFromExperiment
"""
self.setupDevicesFromExperiment(key, expInfo=expInfo, thisExp=thisExp, blocking=blocking)
def setupDevicesFromExperiment(self, key, expInfo=None, thisExp=None, blocking=True):
"""
Setup inputs for this Session via the 'setupInputs` method from one of this Session's experiments.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
expInfo : dict
Information about the experiment, created by the `setupExpInfo` function.
thisExp : psychopy.data.ExperimentHandler
Handler object for this experiment, contains the data to save and information about where to save it to.
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.setupDevicesFromExperiment,
key, expInfo=expInfo
)
return True
if expInfo is None:
expInfo = self.getExpInfoFromExperiment(key)
# store current devices dict
ogDevices = DeviceManager.devices.copy()
# run the setupDevices method
self.experiments[key].setupDevices(expInfo=expInfo, thisExp=thisExp, win=self.win)
# reinstate any original devices which were overwritten
for key, obj in ogDevices.items():
DeviceManager.devices[key] = obj
return True
def addKeyboardFromParams(self, name, params, blocking=True):
"""
Add a keyboard to this session's inputs dict from a dict of params.
Parameters
----------
name : str
Name of this input, what to store it under in the inputs dict.
params : dict
Dict of parameters to create the keyboard from, keys should be from the
`addKeyboard` function in hardware.DeviceManager
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.addKeyboardFromParams,
name, params
)
return True
# Create keyboard
deviceManager.addKeyboard(*params)
return True
def getRequiredDeviceNamesFromExperiment(self, key):
"""
Get a list of device names referenced in a given experiment.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
Returns
-------
list[str]
List of device names
"""
# get an experiment object
exp = self.experimentObjects[key]
if exp is None:
raise ValueError(
f"Device names are not available for experiments added to Session directly as a "
f".py file."
)
# get ready to store usages
usages = {}
def _process(name, emt):
"""
Process an element (Component or Routine) for device names and append them to the
usages dict.
Parameters
----------
name : str
Name of this element in Builder
emt : Component or Routine
Element to process
"""
# if we have a device name for this element...
if "deviceLabel" in emt.params:
# get init value so it lines up with boilerplate code
inits = experiment.getInitVals(emt.params)
# get value
deviceName = inits['deviceLabel'].val
# if deviceName exists from other elements, add usage to it
if deviceName in usages:
usages[deviceName].append(name)
else:
usages[deviceName] = [name]
# iterate through routines
for rtName, rt in exp.routines.items():
if isinstance(rt, experiment.routines.BaseStandaloneRoutine):
# for standalone routines, get device names from params
_process(rtName, rt)
else:
# for regular routines, get device names from each component
for comp in rt:
_process(comp.name, comp)
return list(usages)
def runExperiment(self, key, expInfo=None, blocking=True):
"""
Run the `setupData` and `run` methods from one of this Session's experiments.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
expInfo : dict
Information about the experiment, created by the `setupExpInfo` function.
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
err = None
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.runExperiment,
key, expInfo=expInfo
)
return True
if expInfo is None:
expInfo = self.getExpInfoFromExperiment(key)
# Setup data for this experiment
thisExp = self.experiments[key].setupData(expInfo=expInfo, dataDir=str(self.dataDir))
thisExp.name = key
# Mark ExperimentHandler as current
self.currentExperiment = thisExp
# Make sure we have at least one response device
if "defaultKeyboard" not in DeviceManager.devices:
DeviceManager.addDevice(
deviceClass="psychopy.hardware.keyboard.KeyboardDevice",
deviceName="defaultKeyboard"
)
# Hide Window message
self.win.hideMessage()
# Setup window for this experiment
self.setupWindowFromExperiment(expInfo=expInfo, key=key)
self.win.flip()
self.win.flip()
# Hold all autodraw stimuli
self.win.stashAutoDraw()
# Pause the listener loop
listenerLoop.pause()
# Setup logging
self.experiments[key].run.__globals__['logFile'] = self.logFile
# Log start
logging.info(_translate(
"Running experiment via Session: name={key}, expInfo={expInfo}"
).format(key=key, expInfo=expInfo))
# reset session clock
self.sessionClock.reset()
# Run this experiment
try:
self.experiments[key].run(
expInfo=expInfo,
thisExp=thisExp,
win=self.win,
globalClock=self.sessionClock,
thisSession=self
)
except Exception as _err:
err = _err
err.userdata = key
# Reinstate autodraw stimuli
self.win.retrieveAutoDraw()
# Restart the listener loop
listenerLoop.pause()
# Restore original chdir
os.chdir(str(self.root))
# Store ExperimentHandler
self.runs.append(thisExp)
# Save data
self.saveCurrentExperimentData()
# Mark ExperimentHandler as no longer current
self.currentExperiment = None
# Display waiting text
self.win.showMessage(self.restMsg)
self.win.color = "grey"
# Raise any errors now
if err is not None:
raise err
# Log finished and flush logs
logging.info(_translate(
"Finished running experiment via Session: name={key}, expInfo={expInfo}"
).format(key=key, expInfo=expInfo))
logging.flush()
# Send finished data to liaison
if self.liaison is not None:
self.sendToLiaison({
'type': "experiment_status",
'name': thisExp.name,
'status': thisExp.status,
'expInfo': expInfo
})
return True
def getAllTrials(self):
"""
Returns all trials (elapsed, current and upcoming) with an index indicating which trial is
the current trial.
Returns
-------
list[Trial]
List of trials, in order (oldest to newest)
int
Index of the current trial in this list
"""
# return None if there's no current experiment
if self.currentExperiment is None:
return None
# get trials from current experiment
trials, i = self.currentExperiment.getAllTrials()
return trials, i
def getCurrentTrial(self, asDict=False):
"""
Returns the current trial (`.thisTrial`)
Returns
-------
Trial
The current trial
"""
# return None if there's no current experiment
if self.currentExperiment is None:
return None
# get trial from current experiment
trial = self.currentExperiment.getCurrentTrial()
# convert to dict if needed
if asDict and trial is not None:
trial = trial.getDict()
return trial
def getFutureTrial(self, n=1, asDict=False):
"""
Returns the condition for n trials into the future, without
advancing the trials. Returns 'None' if attempting to go beyond
the last trial in the current loop, if there is no current loop
or if there is no current experiment.
Parameters
----------
n : int
Number of places into the future to look
asDict : bool
If True, convert Trial object to a dict before returning (useful for Liaison)
"""
# return None if there's no current experiment
if self.currentExperiment is None:
return None
# get future trial from current experiment
trial = self.currentExperiment.getFutureTrial(n)
# convert to dict if needed
if asDict and trial is not None:
trial = trial.getDict()
return trial
def getFutureTrials(self, n=1, start=0, asDict=False):
"""
Returns Trial objects for a given range in the future. Will start looking at `start` trials
in the future and will return n trials from then, so e.g. to get all trials from 2 in the
future to 5 in the future you would use `start=2` and `n=3`.
Parameters
----------
n : int, optional
How many trials into the future to look, by default 1
start : int, optional
How many trials into the future to start looking at, by default 0
asDict : bool
If True, convert Trial objects to a dict before returning (useful for Liaison)
Returns
-------
list[Trial or dict or None]
List of Trial objects n long. Any trials beyond the last trial are None.
"""
# blank list to store trials in
trials = []
# iterate through n trials
for i in range(n):
# add each to the list
trials.append(
self.getFutureTrial(start + i, asDict=asDict)
)
return trials
def pauseLoop(self):
"""
Pause the current loop of the current experiment. Note that this will not take effect until
the loop would next iterate.
Returns
-------
bool or None
True if the operation completed successfully
"""
# warn and return failed if no experiment is running
if self.currentExperiment is None:
logging.warn(_translate(
"Could not pause loop as there is no experiment running."
))
return False
# warn and return failed if not in a loop
if self.currentExperiment.currentLoop is self.currentExperiment:
logging.warn(_translate(
"Could not pause loop as the current experiment is not in a loop."
))
return False
# pause loop
self.currentExperiment.currentLoop.status = constants.PAUSED
def pauseExperiment(self):
"""
Pause the currently running experiment.
Returns
-------
bool or None
True if the operation completed successfully
"""
# warn and return failed if no experiment is running
if self.currentExperiment is None:
logging.warn(
_translate("Could not pause experiment as there is none "
"running.")
)
return False
# set ExperimentHandler status to PAUSED
self.currentExperiment.pause()
return True
def resumeExperiment(self):
"""
Resume the currently paused experiment.
Returns
-------
bool or None
True if the operation completed successfully
"""
# warn and return failed if no experiment is running
if self.currentExperiment is None:
logging.warn(
_translate("Could not resume experiment as there is none "
"running or paused.")
)
return False
# set ExperimentHandler status to STARTED
self.currentExperiment.resume()
return True
def stopExperiment(self):
"""
Stop the currently running experiment.
Returns
-------
bool or None
True if the operation completed successfully
"""
# warn and return failed if no experiment is running
if self.currentExperiment is None:
logging.warn(
_translate("Could not stop experiment as there is none "
"running.")
)
return False
self.currentExperiment.stop()
return True
def skipTrials(self, n=1):
"""
Skip ahead n trials - the trials inbetween will be marked as "skipped". If you try to
skip past the last trial, will log a warning and skip *to* the last trial.
Parameters
----------
n : int
Number of trials to skip ahead
"""
# return if there's no current experiment
if self.currentExperiment is None:
return
# skip trials in current loop
return self.currentExperiment.skipTrials(n)
def rewindTrials(self, n=1):
"""
Skip ahead n trials - the trials inbetween will be marked as "skipped". If you try to
skip past the last trial, will log a warning and skip *to* the last trial.
Parameters
----------
n : int
Number of trials to skip ahead
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
# return if there's no current experiment
if self.currentExperiment is None:
return
# rewind trials in current loop
return self.currentExperiment.rewindTrials(n)
def saveExperimentData(self, key, thisExp=None, blocking=True):
"""
Run the `saveData` method from one of this Session's experiments, on a
given ExperimentHandler.
Parameters
----------
key : str
Key by which the experiment is stored (see `.addExperiment`).
thisExp : psychopy.data.ExperimentHandler
ExperimentHandler object to save the data from. If None, save the
last run of the given experiment.
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully
"""
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.saveExperimentData,
key, thisExp=thisExp
)
return True
# get last run
if thisExp is None:
# copy list of runs in reverse
runs = self.runs.copy()
runs.reverse()
# iterate through runs, starting at the end
for run in runs:
# use the first run to match given exp
if run.name == key:
thisExp = run
break
# save to Session folder
self.experiments[key].saveData(thisExp)
return True
def saveCurrentExperimentData(self, blocking=True):
"""
Call `.saveExperimentData` on the currently running experiment - if
there is one.
Parameters
----------
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
Returns
-------
bool or None
True if the operation completed/queued successfully, False if there
was no current experiment running
"""
if self.currentExperiment is None:
return False
return self.saveExperimentData(
key=self.currentExperiment.name,
thisExp=self.currentExperiment,
blocking=blocking
)
def addAnnotation(self, value):
"""
Add an annotation in the data file at the current point in the
experiment and to the log.
Parameters
----------
value : str
Value of the annotation
Returns
-------
bool
True if completed successfully
"""
# add to experiment data if there's one running
if hasattr(self.currentExperiment, "addAnnotation"):
# annotate
self.currentExperiment.addAnnotation(value)
# log regardless
logging.info(value)
return True
def addData(self, name, value, row=None, priority=None):
"""
Add data in the data file at the current point in the experiment, and to the log.
Parameters
----------
name : str
Name of the column to add data as.
value : any
Value to add
row : int or None
Row in which to add this data. Leave as None to add to the current entry.
priority : int
Priority value to set the column to - higher priority columns appear nearer to the start of
the data file. Use values from `constants.priority` as landmark values:
- CRITICAL: Always at the start of the data file, generally reserved for Routine start times
- HIGH: Important columns which are near the front of the data file
- MEDIUM: Possibly important columns which are around the middle of the data file
- LOW: Columns unlikely to be important which are at the end of the data file
- EXCLUDE: Always at the end of the data file, actively marked as unimportant
Returns
-------
bool
True if completed successfully
"""
# add to experiment data if there's one running
if hasattr(self.currentExperiment, "addData"):
# add
self.currentExperiment.addData(name, value, row=row, priority=priority)
# log regardless
logging.data(f"NAME={name}, PRIORITY={priority}, VALUE={value}")
return True
def sendExperimentData(self, key=None):
"""
Send last ExperimentHandler for an experiment to liaison. If no experiment is given, sends the currently
running experiment.
Parameters
----------
key : str or None
Name of the experiment whose data to send, or None to send the current experiment's data.
Returns
-------
bool
True if data was sent, otherwise False
"""
# Skip if there's no liaison
if self.liaison is None:
return
# Sub None for current
if key is None and self.currentExperiment is not None:
key = self.currentExperiment.name
elif key is None:
key = self.runs[-1].name
# Get list of runs (including current)
runs = self.runs.copy()
if self.currentExperiment is not None:
runs.append(self.currentExperiment)
# Get last experiment data
for run in reversed(runs):
if run.name == key:
# Send experiment data
self.sendToLiaison(run)
return True
# Return False if nothing sent
return False
def sendToLiaison(self, value):
"""
Send data to this Session's `Liaison` object.
Parameters
----------
value : str, dict, psychopy.data.ExperimentHandler
Data to send - this can either be a single string, a dict of strings, or an
ExperimentHandler (whose data will be sent)
Returns
-------
bool or None
True if the operation completed successfully
"""
if self.liaison is None:
logging.warn(_translate(
"Could not send data to liaison server as none is initialised for this Session."
))
return
# If ExperimentHandler, get its data as a list of dicts
if isinstance(value, data.ExperimentHandler):
value = value.getJSON(priorityThreshold=self.priorityThreshold)
# Send
self.liaison.broadcastSync(message=value)
def close(self, blocking=True):
"""
Safely close and delete the current session.
Parameters
----------
blocking : bool
Should calling this method block the current thread?
If True (default), the method runs as normal and won't return until
completed.
If False, the method is added to a `queue` and will be run by the
while loop within `Session.start`. This will block the main thread,
but won't block the thread this method was called from.
If not using multithreading, this value is ignored. If you don't
know what multithreading is, you probably aren't using it - it's
difficult to do by accident!
"""
# If not in main thread and not requested blocking, use queue and return now
if threading.current_thread() != threading.main_thread() and not blocking:
# The queue is emptied each iteration of the while loop in `Session.start`
_queue.queueTask(
self.close
)
return True
# remove self from queue
if self in _queue.sessions:
self.stop()
# if there is a Liaison object, re-register Session class
if self.liaison is not None:
self.liaison.registerClass(Session, "session")
# close any windows
if self.win is not None:
self.win.close()
self.win = None
# flush any remaining logs and kill reference to log file
self.logFile.logger.flush()
self.logFile.logger.removeTarget(self.logFile)
# delete self
del self
return True
if __name__ == "__main__":
"""
Create a Session with parameters passed by command line.
Parameters
----------
--root
Root directory for the Session
--host
Port address of host server (if any)
--timing
How to handle timing, can be either:
- "float": Start a timer when Session is created and do timing relative to that (default)
- "iso": Do timing via wall clock in ISO 8601 format
- any valid strftime string: Do timing via wall clock in the given format
--session-data-dir
Folder to store all data from this Session in, including the log file.
"""
# Parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--host", dest="host")
args, _ = parser.parse_known_args()
if ":" in str(args.host):
host, port = str(args.host).split(":")
# Import liaison
from psychopy import liaison
# Create liaison server
liaisonServer = liaison.WebSocketServer()
# Add DeviceManager to liaison server
liaisonServer.registerClass(DeviceManager, "DeviceManager")
# Add session to liaison server
liaisonServer.registerClass(Session, "session")
# Register queue with liaison
liaisonServer.registerMethods(_queue, "SessionQueue")
# Create thread to run liaison server in
liaisonThread = threading.Thread(
target=liaisonServer.start,
kwargs={
'host': host,
'port': port,
}
)
# Start liaison server
liaisonThread.start()
# Start processing script queue
_queue.start()
else:
liaisonServer = None
| 59,145
|
Python
|
.py
| 1,451
| 30.08408
| 118
| 0.599373
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,354
|
counterbalance.py
|
psychopy_psychopy/psychopy/data/counterbalance.py
|
from psychopy import logging
from psychopy.tools.attributetools import attributeSetter
class CounterbalancerFinishedError(BaseException):
"""
Exception raised when a Counterbalancer is finished and its onFinished method is set to "raise"
"""
pass
class Counterbalancer:
"""
Tool for getting a group assignment from the Shelf, and keeping track of which previous
participants were assigned to which groups.
Parameters
----------
shelf : psychopy.data.shelf.Shelf
Which shelf to draw data from?
entry : str
Name of the Shelf entry to use.
conditions : list[dict]
List of dicts indicating, for each group:
- Its name
- Max number of participants
- [optional] Additional arbitrary parameters
nReps : int
Number of (total) times the slots for each group need to be depleted for the shelf entry
to be considered "finished". This sets the initial value for `reps` - each time the
number of slots left for all groups reaches 0, if `reps` is more than 1, the slots are
refilled and `reps` decreases by 1.
autoLog : bool
Whether to print to the log whenever an attribute of this object changes.
"""
def __init__(
self,
shelf,
entry,
conditions,
nReps=1,
autoLog=False):
# store autolog
self.autoLog = autoLog
# store ref to shelf
self.shelf = shelf
# store entry name
self.entry = entry
# store conditions array
self.conditions = conditions
# placeholder values before querying shelf
self.group = None
self.params = {}
# store total nReps
self.nReps = nReps
# make sure entry exists
if self.entry not in self.shelf.data:
self.makeNewEntry()
# get remaining reps
self.reps = self.shelf.data[self.entry].get("_reps", nReps)
# update data for remaining in conditions
self.updateRemaining()
@property
def data(self):
"""
Returns
-------
dict
Full Shelf data associated with this Counterbalancer. Returns as a dict, not a handle, so changing the
value of Counterbalancer.data won't change the value on the Shelf.
"""
# make sure entry exists
if self.entry not in self.shelf.data:
self.makeNewEntry()
# filter out protected (_) entries
data = {
key: val
for key, val in self.shelf.data[self.entry].items()
if not str(key).startswith("_")
}
# return entry
return data
@attributeSetter
def reps(self, value):
"""
Set the number of repetitions remaining for this shelf entry. If reps > 0 when
allocateGroups is called,
Parameters
----------
value : int
Number of repetitions remaining
"""
# make sure entry exists
if self.entry not in self.shelf.data:
self.makeNewEntry()
# get entry
entry = self.shelf.data[self.entry]
# set value in entry
entry['_reps'] = value
# reapply entry to shelf
self.shelf.data[self.entry] = entry
# store value
self.__dict__['reps'] = value
def makeNewEntry(self):
# create an entry with only reps
self.shelf.data[self.entry] = {'_reps': self.nReps}
# reset slots (to create groups)
self.resetSlots()
def resetSlots(self):
# get entry
entry = self.shelf.data[self.entry]
# populate entry with groups and caps
for row in self.conditions:
entry[row['group']] = row['cap']
# reapply entry to shelf
self.shelf.data[self.entry] = entry
@property
def depleted(self):
"""
Returns
-------
bool
True if all participant counters are below 0, False otherwise.
"""
return all(val <= 0 for val in self.data.values())
@property
def finished(self):
"""
Returns
-------
bool
True if all participant counters are at or below 0 and there are no repetitions left,
False otherwise.
"""
return self.depleted and self.reps <= 1
@property
def remaining(self):
"""
Returns
-------
int
How many participants are left for the currently chosen group?
"""
if self.group is not None:
return self.data[self.group]
def allocateGroup(self):
"""
Retrieve a group allocation from the Shelf and decrement the participant counter for that group.
Returns
-------
str
Name of the chosen group.
"""
if self.finished:
# log warning
msg = (f"All groups in shelf entry '{self.entry}' are now finished, with no "
f"repetitions remaining.")
logging.warning(msg)
# if onFinished is ignore, set group to None and params to blank
self.group = None
if len(self.conditions):
self.params = {key: None for key in self.conditions[0]}
else:
self.params = {'group': None}
return
elif self.depleted:
# if depleted but not finished, reset slots before choosing
self.resetSlots()
# decrement reps
self.reps = self.reps - 1
# get group assignment from shelf
self.group = self.shelf.counterBalanceSelect(
key=self.entry,
groups=[row['group'] for row in self.conditions],
groupSizes=[row['cap'] for row in self.conditions],
)[0]
# get params from matching row of conditions array
for row in self.conditions:
if row['group'] == self.group:
self.params = row.copy()
# pop group and cap from params
for key in ("group", "cap"):
if key in self.params:
del self.params[key]
# update data for remaining in conditions
self.updateRemaining()
return self.group
def updateRemaining(self):
# get data just once
data = self.data
# store all remaining info in conditions array
for row in self.conditions:
row['remaining'] = data[str(row['group'])]
| 6,561
|
Python
|
.py
| 188
| 25.510638
| 114
| 0.585262
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,355
|
shelf.py
|
psychopy_psychopy/psychopy/data/shelf.py
|
import json
import os
import numpy as np
from pathlib import Path
from psychopy.preferences import prefs
class Shelf:
"""
Parameters
----------
scope : str
Scope of the Shelf file, one of:
- "designer" / "d" / "des" / "user": Shelf file is accessible to any experiment running on this computer. File
will be stored in your user folder (%AppData%/psychopy3 on Windows) as `shelf.json`.
- "experiment" / "e" / "exp" / "project": Shelf file is accessible only to the given experiment. File will be
stored in the experiment folder as `shelf.json`.
- "participant" / "p" / "par" / "subject": Shelf file is accessible only to the given participant. File will be
stored in the "shelf" folder within your user folder as the participant ID followed by `.json`
expPath : str or Path
Path to the experiment folder, if scope is "experiment". Can also accept a path to the experiment file.
participant : str
Participant ID, if scope is "participant".
"""
# other names which scopes can be referred to as
scopeAliases = {
'designer': ["designer", "d", "des", "user"],
'experiment': ["experiment", "e", "exp", "project"],
'participant': ["participant", "p", "par", "subject"]
}
def __init__(self, scope="experiment", expPath=None, participant=None):
# handle scope aliases
scope = self.scopeFromAlias(scope)
# if given an experiment path, sanitize it
if expPath is not None:
# convert to Path object
expPath = Path(expPath)
# if given a file path, use parent dir
if not expPath.is_dir():
expPath = expPath.parent
# work out path of scope file from scope and params
if scope == "designer":
# access shelf from user folder
self.path = Path(prefs.paths['userPrefsDir']) / "shelf.json"
elif scope in "experiment":
# make sure we have the information we need to get scope file
assert expPath is not None, (
"Cannot access experiment-scope shelf records without reference to experiment's origin path. Please "
"supply a value for 'expPath' when creating an experiment-scope Shelf object."
)
# access shelf from experiment folder
self.path = expPath / "shelf.json"
elif scope in "participant":
# make sure we have the information we need to get scope file
assert participant is not None, (
"Cannot access participant-scope shelf records without reference to participant ID. Please "
"supply a value for 'participant' when creating a participant-scope Shelf object."
)
# access shelf from a participant shelf file in the user folder
self.path = Path(prefs.paths['userPrefsDir']) / "shelf" / f"{participant}.json"
# open file(s)
self.data = ShelfData(self.path)
@staticmethod
def scopeFromAlias(alias):
"""
Get the scope name from one of its aliases, e.g. get "experiment" from "exp".
Parameters
----------
alias : str
Alias of the scope.
Returns
-------
str
Proper name of the scope.
"""
# if alias is present in aliases dict, return corresponding scope
for scope in Shelf.scopeAliases:
if alias in Shelf.scopeAliases[scope]:
return scope
# if it isn't aliased, return as is
return alias
def counterBalanceSelect(self, key, groups, groupSizes):
"""
Select a group from a counterbalancing entry and decrement the associated counter.
Parameters
----------
key : str
Key of the entry to draw from
groups : list[str]
List of group names. Names not present in the entry will be created with the matching groupSize value.
groupSizes : list[int]
List of group max sizes, must be the same length as `groups`. The probability of each group being chosen is
determined by its number of remaining participants.
Returns
-------
str
Chosen group name
bool
True if the given group is now at 0, False otherwise
"""
# get entry
try:
entry = self.data[key]
except KeyError:
entry = {}
# for each group...
options = []
weights = []
for group, size in zip(groups, groupSizes):
group = str(group)
# make sure it exists in entry
if group not in entry:
entry[group] = size
# figure out weight from cap
weight = size / sum(groupSizes)
# add to options if not full
if entry[group] > 0:
options.append(group)
weights.append(weight)
# make sure weights sum to 1
weights = weights / np.sum(weights)
# choose a group at random
try:
chosen = np.random.choice(options, p=weights)
except ValueError:
# if no groups, force to be None
return None, True
# iterate chosen group
entry[chosen] -= 1
# get finished
finished = entry[chosen] <= 0
# set entry
self.data[key] = entry
return chosen, finished
class ShelfData:
"""
Dict-like object representing the data on a Shelf. ShelfData is linked to a particular JSON file - when its data
changes, the file is written to, keeping it in sync.
Parameters
----------
path : str or Path
Path to the JSON file which this ShelfData corresponds to.
"""
def __init__(self, path):
# make sure path exists
if not path.parent.is_dir():
os.makedirs(str(path.parent), exist_ok=True)
if not path.is_file():
path.write_text("{}", encoding="utf-8")
# store ref to path
self._path = path
# make sure file is valid json
try:
self.read()
except json.JSONDecodeError as err:
errcls = type(err)
raise json.JSONDecodeError((
f"Contents of shelf file '{path}' are not valid JSON syntax. Has the file been edited outside of "
f"PsychoPy? Original error:\n"
f"\t{errcls.__module__}.{errcls.__name__}: {err.msg}"
),
doc=err.doc,
pos=err.pos
)
def __repr__(self):
# get data from file
data = self.read()
return repr(data)
def __contains__(self, item):
data = self.read()
return item in data
def read(self):
"""
Get data from linked JSON file.
Returns
-------
dict
Data read from file.
"""
# get data from file
with self._path.open("r", encoding="utf-8") as f:
data = json.load(f)
return data
def __getitem__(self, key):
# get data from file
data = self.read()
return data[key]
def write(self, data):
"""
Write data to linked JSON file
Parameters
----------
data : dict
Data to write to file.
"""
# write data to file
with self._path.open("w", encoding="utf-8") as f:
json.dump(data, f, indent=True)
def __setitem__(self, key, value):
# get data from file
data = self.read()
# set data
data[key] = value
# write data to file
self.write(data)
| 7,817
|
Python
|
.py
| 203
| 28.561576
| 119
| 0.573483
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,356
|
trial.py
|
psychopy_psychopy/psychopy/data/trial.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import sys
import copy
import numpy as np
import pandas as pd
from psychopy import logging, constants
from psychopy.tools.filetools import (openOutputFile, genDelimiter,
genFilenameFromDelimiter)
from .utils import importConditions
from .base import _BaseTrialHandler, DataHandler
class TrialType(dict):
"""This is just like a dict, except that you can access keys with obj.key
"""
def __getattribute__(self, name):
try: # to get attr from dict in normal way (passing self)
return dict.__getattribute__(self, name)
except AttributeError:
try:
return self[name]
except KeyError:
msg = "TrialType has no attribute (or key) \'%s\'"
raise AttributeError(msg % name)
class TrialHandler(_BaseTrialHandler):
"""Class to handle trial sequencing and data storage.
Calls to .next() will fetch the next trial object given to this handler,
according to the method specified (random, sequential, fullRandom).
Calls will raise a StopIteration error if trials have finished.
See demo_trialHandler.py
The psydat file format is literally just a pickled copy of the
TrialHandler object that saved it. You can open it with::
from psychopy.tools.filetools import fromFile
dat = fromFile(path)
Then you'll find that `dat` has the following attributes that
"""
def __init__(self,
trialList,
nReps,
method='random',
dataTypes=None,
extraInfo=None,
seed=None,
originPath=None,
name='',
autoLog=True):
"""
:Parameters:
trialList: a simple list (or flat array) of dictionaries
specifying conditions. This can be imported from an
excel/csv file using :func:`~psychopy.data.importConditions`
nReps: number of repeats for all conditions
method: *'random',* 'sequential', or 'fullRandom'
'sequential' obviously presents the conditions in the order
they appear in the list. 'random' will result in a shuffle
of the conditions on each repeat, but all conditions
occur once before the second repeat etc. 'fullRandom'
fully randomises the trials across repeats as well,
which means you could potentially run all trials of
one condition before any trial of another.
dataTypes: (optional) list of names for data storage.
e.g. ['corr','rt','resp']. If not provided then these
will be created as needed during calls to
:func:`~psychopy.data.TrialHandler.addData`
extraInfo: A dictionary
This will be stored alongside the data and usually
describes the experiment and subject ID, date etc.
seed: an integer
If provided then this fixes the random number generator to
use the same pattern of trials, by seeding its startpoint
originPath: a string describing the location of the
script / experiment file path. The psydat file format will
store a copy of the experiment if possible. If
`originPath==None` is provided here then the TrialHandler
will still store a copy of the script where it was
created. If `OriginPath==-1` then nothing will be stored.
:Attributes (after creation):
.data - a dictionary (or more strictly, a `DataHandler` sub-
class of a dictionary) of numpy arrays, one for each data
type stored
.trialList - the original list of dicts, specifying the conditions
.thisIndex - the index of the current trial in the original
conditions list
.nTotal - the total number of trials that will be run
.nRemaining - the total number of trials remaining
.thisN - total trials completed so far
.thisRepN - which repeat you are currently on
.thisTrialN - which trial number *within* that repeat
.thisTrial - a dictionary giving the parameters of the current
trial
.finished - True/False for have we finished yet
.extraInfo - the dictionary of extra info as given at beginning
.origin - the contents of the script or builder experiment that
created the handler
"""
self.name = name
self.autoLog = autoLog
if trialList in (None, []): # user wants an empty trialList
# which corresponds to a list with a single empty entry
self.trialList = [None]
# user has hopefully specified a filename
elif isinstance(trialList, str) and os.path.isfile(trialList):
# import conditions from that file
self.trialList = importConditions(trialList)
else:
self.trialList = trialList
# convert any entry in the TrialList into a TrialType object (with
# obj.key or obj[key] access)
for n, entry in enumerate(self.trialList):
if type(entry) == dict:
self.trialList[n] = TrialType(entry)
self.nReps = int(nReps)
self.nTotal = self.nReps * len(self.trialList)
self.nRemaining = self.nTotal # subtract 1 each trial
self.method = method
self.thisRepN = 0 # records which repetition or pass we are on
self.thisTrialN = -1 # records trial number within this repetition
self.thisN = -1
self.thisIndex = 0 # index of current trial in the conditions list
self.thisTrial = []
self.finished = False
self.extraInfo = extraInfo
self.seed = seed
# create dataHandler
self.data = DataHandler(trials=self)
if dataTypes != None:
self.data.addDataType(dataTypes)
self.data.addDataType('ran')
self.data['ran'].mask = False # this is a bool; all entries are valid
self.data.addDataType('order')
# generate stimulus sequence
if self.method in ['random', 'sequential', 'fullRandom']:
self.sequenceIndices = self._createSequence()
else:
self.sequenceIndices = []
self.originPath, self.origin = self.getOriginPathAndFile(originPath)
self._exp = None # the experiment handler that owns me!
# starting status
self.status = constants.NOT_STARTED
def __iter__(self):
return self
def __repr__(self):
"""prints a more verbose version of self as string
"""
return self.__str__(verbose=True)
def __str__(self, verbose=False):
"""string representation of the object
"""
strRepres = 'psychopy.data.{}(\n'.format(self.__class__.__name__)
attribs = dir(self)
# data first, then all others
try:
data = self.data
except Exception:
data = None
if data:
strRepres += str('\tdata=')
strRepres += str(data) + '\n'
method_string = "<class 'method'>"
for thisAttrib in attribs:
# can handle each attribute differently
if method_string in str(type(getattr(self, thisAttrib))):
# this is a method
continue
elif thisAttrib[0] == '_':
# the attrib is private
continue
elif thisAttrib == 'data':
# we handled this first
continue
elif len(str(getattr(self, thisAttrib))) > 20 and not verbose:
# just give type of LONG public attribute
strRepres += str('\t' + thisAttrib + '=')
strRepres += str(type(getattr(self, thisAttrib))) + '\n'
else:
# give the complete contents of attribute
strRepres += str('\t' + thisAttrib + '=')
strRepres += str(getattr(self, thisAttrib)) + '\n'
strRepres += ')'
return strRepres
def _createSequence(self):
"""Pre-generates the sequence of trial presentations
(for non-adaptive methods). This is called automatically when
the TrialHandler is initialised so doesn't need an explicit call
from the user.
The returned sequence has form indices[stimN][repN]
Example: sequential with 6 trialtypes (rows), 5 reps (cols), returns::
[[0 0 0 0 0]
[1 1 1 1 1]
[2 2 2 2 2]
[3 3 3 3 3]
[4 4 4 4 4]
[5 5 5 5 5]]
These 30 trials will be returned by .next() in the order:
0, 1, 2, 3, 4, 5, 0, 1, 2, ... ... 3, 4, 5
To add a new type of sequence (as of v1.65.02):
- add the sequence generation code here
- adjust "if self.method in [ ...]:" in both __init__ and .next()
- adjust allowedVals in experiment.py -> shows up in DlgLoopProperties
Note that users can make any sequence whatsoever outside of PsychoPy,
and specify sequential order; any order is possible this way.
"""
# create indices for a single rep
indices = np.asarray(self._makeIndices(self.trialList), dtype=int)
rng = np.random.default_rng(seed=self.seed)
if self.method == 'random':
sequenceIndices = []
for thisRep in range(self.nReps):
thisRepSeq = rng.permutation(indices.flat).tolist()
sequenceIndices.append(thisRepSeq)
sequenceIndices = np.transpose(sequenceIndices)
elif self.method == 'sequential':
sequenceIndices = np.repeat(indices, self.nReps, 1)
elif self.method == 'fullRandom':
# indices*nReps, flatten, shuffle, unflatten; only use seed once
sequential = np.repeat(indices, self.nReps, 1) # = sequential
randomFlat = rng.permutation(sequential.flat)
sequenceIndices = np.reshape(
randomFlat, (len(indices), self.nReps))
if self.autoLog:
msg = 'Created sequence: %s, trialTypes=%d, nReps=%i, seed=%s'
vals = (self.method, len(indices), self.nReps, str(self.seed))
logging.exp(msg % vals)
return sequenceIndices
def _makeIndices(self, inputArray):
"""
Creates an array of tuples the same shape as the input array
where each tuple contains the indices to itself in the array.
Useful for shuffling and then using as a reference.
"""
# make sure its an array of objects (can be strings etc)
inputArray = np.asarray(inputArray, 'O')
# get some simple variables for later
dims = inputArray.shape
dimsProd = np.product(dims)
dimsN = len(dims)
dimsList = list(range(dimsN))
listOfLists = []
# this creates space for an array of any objects
arrayOfTuples = np.ones(dimsProd, 'O')
# for each dimension create list of its indices (using modulo)
for thisDim in dimsList:
prevDimsProd = np.product(dims[:thisDim])
# NB this means modulus in python
thisDimVals = np.arange(dimsProd) / prevDimsProd % dims[thisDim]
listOfLists.append(thisDimVals)
# convert to array
indexArr = np.asarray(listOfLists)
for n in range(dimsProd):
arrayOfTuples[n] = tuple((indexArr[:, n]))
return (np.reshape(arrayOfTuples, dims)).tolist()
def __next__(self):
"""Advances to next trial and returns it.
Updates attributes; thisTrial, thisTrialN and thisIndex
If the trials have ended this method will raise a StopIteration error.
This can be handled with code such as::
trials = data.TrialHandler(.......)
for eachTrial in trials: # automatically stops when done
# do stuff
or::
trials = data.TrialHandler(.......)
while True: # ie forever
try:
thisTrial = trials.next()
except StopIteration: # we got a StopIteration error
break #break out of the forever loop
# do stuff here for the trial
"""
# update pointer for next trials
self.thisTrialN += 1 # number of trial this pass
self.thisN += 1 # number of trial in total
self.nRemaining -= 1
if self.thisTrialN == len(self.trialList):
# start a new repetition
self.thisTrialN = 0
self.thisRepN += 1
if self.thisRepN >= self.nReps:
# all reps complete
self.thisTrial = []
self.finished = True
if self.finished == True:
self._terminate()
# fetch the trial info
if self.method in ('random', 'sequential', 'fullRandom'):
self.thisIndex = self.sequenceIndices[
self.thisTrialN][self.thisRepN]
self.thisTrial = self.trialList[self.thisIndex]
self.data.add('ran', 1)
self.data.add('order', self.thisN)
if self.autoLog:
msg = 'New trial (rep=%i, index=%i): %s'
vals = (self.thisRepN, self.thisTrialN, self.thisTrial)
logging.exp(msg % vals, obj=self.thisTrial)
return self.thisTrial
next = __next__ # allows user to call without a loop `val = trials.next()`
def getCurrentTrial(self):
"""Returns the condition for the current trial, without
advancing the trials.
"""
return self.trialList[self.thisIndex]
def getFutureTrial(self, n=1):
"""Returns the condition for n trials into the future,
without advancing the trials. A negative n returns a previous (past)
trial. Returns 'None' if attempting to go beyond the last trial.
"""
# check that we don't go out of bounds for either positive or negative
if n > self.nRemaining or self.thisN + n < 0:
return None
seqs = np.array(self.sequenceIndices).transpose().flat
condIndex = seqs[self.thisN + n]
return self.trialList[condIndex]
def getEarlierTrial(self, n=-1):
"""Returns the condition information from n trials previously.
Useful for comparisons in n-back tasks. Returns 'None' if trying
to access a trial prior to the first.
"""
# treat positive offset values as equivalent to negative ones:
return self.getFutureTrial(-abs(n))
def _createOutputArray(self, stimOut, dataOut, delim=None,
matrixOnly=False):
"""Does the leg-work for saveAsText and saveAsExcel.
Combines stimOut with ._parseDataOutput()
"""
if (stimOut == [] and
len(self.trialList) and
hasattr(self.trialList[0], 'keys')):
stimOut = list(self.trialList[0].keys())
# these get added somewhere (by DataHandler?)
if 'n' in stimOut:
stimOut.remove('n')
if 'float' in stimOut:
stimOut.remove('float')
lines = []
# parse the dataout section of the output
dataOut, dataAnal, dataHead = self._createOutputArrayData(dataOut)
if not matrixOnly:
thisLine = []
lines.append(thisLine)
# write a header line
for heading in list(stimOut) + dataHead:
if heading == 'ran_sum':
heading = 'n'
elif heading == 'order_raw':
heading = 'order'
thisLine.append(heading)
# loop through stimuli, writing data
for stimN in range(len(self.trialList)):
thisLine = []
lines.append(thisLine)
# first the params for this stim (from self.trialList)
for heading in stimOut:
thisLine.append(self.trialList[stimN][heading])
# then the data for this stim (from self.data)
for thisDataOut in dataOut:
# make a string version of the data and then format it
tmpData = dataAnal[thisDataOut][stimN]
replaceNone = False
if hasattr(tmpData, 'tolist'): # is a numpy array
strVersion = str(tmpData.tolist())
# for numeric data replace None with a blank cell
if tmpData.dtype.kind not in 'SaUV':
replaceNone = True
strVersion = strVersion.replace('None', '')
elif tmpData in [None, 'None']:
strVersion = ''
else:
strVersion = str(tmpData)
if strVersion == '()':
# 'no data' in masked array should show as "--"
strVersion = "--"
# handle list of values (e.g. rt_raw )
if (len(strVersion) and
strVersion[0] in '[(' and
strVersion[-1] in '])'):
strVersion = strVersion[1:-1] # skip first and last chars
# handle lists of lists (e.g. raw of multiple key presses)
if (len(strVersion) and
strVersion[0] in '[(' and
strVersion[-1] in '])'):
if replaceNone:
# Add None back so that the str is valid for eval
strVersion = strVersion.replace('[,', '[None,')
strVersion = strVersion.replace(', ,', ', None,')
tup = eval(strVersion) # convert back to a tuple
for entry in tup:
# contents of each entry is a list or tuple so keep in
# quotes to avoid probs with delim
currentEntry = str(entry)
if replaceNone:
currentEntry = currentEntry.replace('None', '')
thisLine.append(currentEntry)
else:
thisLine.extend(strVersion.split(','))
# add self.extraInfo
if (self.extraInfo != None) and not matrixOnly:
lines.append([])
# give a single line of space and then a heading
lines.append(['extraInfo'])
for key, value in list(self.extraInfo.items()):
lines.append([key, value])
return lines
def _createOutputArrayData(self, dataOut):
"""This just creates the dataOut part of the output matrix.
It is called by _createOutputArray() which creates the header
line and adds the stimOut columns
"""
dataHead = [] # will store list of data headers
dataAnal = dict([]) # will store data that has been analyzed
if type(dataOut) == str:
# don't do list convert or we get a list of letters
dataOut = [dataOut]
elif type(dataOut) != list:
dataOut = list(dataOut)
# expand any 'all' dataTypes to be full list of available dataTypes
allDataTypes = list(self.data.keys())
# treat these separately later
allDataTypes.remove('ran')
# ready to go through standard data types
dataOutNew = []
for thisDataOut in dataOut:
if thisDataOut == 'n':
# n is really just the sum of the ran trials
dataOutNew.append('ran_sum')
continue # no need to do more with this one
# then break into dataType and analysis
dataType, analType = thisDataOut.rsplit('_', 1)
if dataType == 'all':
dataOutNew.extend(
[key + "_" + analType for key in allDataTypes])
if 'order_mean' in dataOutNew:
dataOutNew.remove('order_mean')
if 'order_std' in dataOutNew:
dataOutNew.remove('order_std')
else:
dataOutNew.append(thisDataOut)
dataOut = dataOutNew
# sort so all datatypes come together, rather than all analtypes
dataOut.sort()
# do the various analyses, keeping track of fails (e.g. mean of a
# string)
dataOutInvalid = []
# add back special data types (n and order)
if 'ran_sum' in dataOut:
# move n to the first column
dataOut.remove('ran_sum')
dataOut.insert(0, 'ran_sum')
if 'order_raw' in dataOut:
# move order_raw to the second column
dataOut.remove('order_raw')
dataOut.append('order_raw')
# do the necessary analysis on the data
for thisDataOutN, thisDataOut in enumerate(dataOut):
dataType, analType = thisDataOut.rsplit('_', 1)
if not dataType in self.data:
# that analysis can't be done
dataOutInvalid.append(thisDataOut)
continue
thisData = self.data[dataType]
# set the header
dataHead.append(dataType + '_' + analType)
# analyse thisData using numpy module
if analType in dir(np):
try:
# will fail if we try to take mean of a string for example
if analType == 'std':
thisAnal = np.std(thisData, axis=1, ddof=0)
# normalise by N-1 instead. This should work by
# setting ddof=1 but doesn't as of 08/2010 (because
# of using a masked array?)
N = thisData.shape[1]
if N == 1:
thisAnal *= 0 # prevent a divide-by-zero error
else:
sqrt = np.sqrt
thisAnal = thisAnal * sqrt(N) / sqrt(N - 1)
else:
thisAnal = eval("np.%s(thisData,1)" % analType)
except Exception:
# that analysis doesn't work
dataHead.remove(dataType + '_' + analType)
dataOutInvalid.append(thisDataOut)
continue # to next analysis
elif analType == 'raw':
thisAnal = thisData
else:
raise AttributeError('You can only use analyses from numpy')
# add extra cols to header if necess
if len(thisAnal.shape) > 1:
for n in range(thisAnal.shape[1] - 1):
dataHead.append("")
dataAnal[thisDataOut] = thisAnal
# remove invalid analyses (e.g. average of a string)
for invalidAnal in dataOutInvalid:
dataOut.remove(invalidAnal)
return dataOut, dataAnal, dataHead
def saveAsWideText(self, fileName,
delim=None,
matrixOnly=False,
appendFile=True,
encoding='utf-8-sig',
fileCollisionMethod='rename'):
"""Write a text file with the session, stimulus, and data values
from each trial in chronological order. Also, return a
pandas DataFrame containing same information as the file.
That is, unlike 'saveAsText' and 'saveAsExcel':
- each row comprises information from only a single trial.
- no summarizing is done (such as collapsing to produce mean and
standard deviation values across trials).
This 'wide' format, as expected by R for creating dataframes, and
various other analysis programs, means that some information must
be repeated on every row.
In particular, if the trialHandler's 'extraInfo' exists, then each
entry in there occurs in every row. In builder, this will include
any entries in the 'Experiment info' field of the
'Experiment settings' dialog. In Coder, this information can be
set using something like::
myTrialHandler.extraInfo = {'SubjID': 'Joan Smith',
'Group': 'Control'}
:Parameters:
fileName:
if extension is not specified, '.csv' will be appended
if the delimiter is ',', else '.tsv' will be appended.
Can include path info.
delim:
allows the user to use a delimiter other than the default
tab ("," is popular with file extension ".csv")
matrixOnly:
outputs the data with no header row.
appendFile:
will add this output to the end of the specified file if
it already exists.
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file.
Defaults to `utf-8-sig`.
"""
if self.thisTrialN < 0 and self.thisRepN < 0:
# if both are < 1 we haven't started
logging.info('TrialHandler.saveAsWideText called but no '
'trials completed. Nothing saved')
return -1
# set default delimiter if none given
if delim is None:
delim = genDelimiter(fileName)
# create the file or send to stdout
fileName = genFilenameFromDelimiter(fileName, delim)
f = openOutputFile(fileName, append=appendFile,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding)
# collect parameter names related to the stimuli:
if self.trialList[0]:
header = list(self.trialList[0].keys())
else:
header = []
# and then add parameter names related to data (e.g. RT)
header.extend(self.data.dataTypes)
# get the extra 'wide' parameter names into the header line:
header.insert(0, "TrialNumber")
# this is wide format, so we want fixed information
# (e.g. subject ID, date, etc) repeated every line if it exists:
if self.extraInfo is not None:
for key in self.extraInfo:
header.insert(0, key)
df = pd.DataFrame(columns=header)
# loop through each trial, gathering the actual values:
dataOut = []
trialCount = 0
# total number of trials = number of trialtypes * number of
# repetitions:
repsPerType = {}
entriesList = []
for rep in range(self.nReps):
for trialN in range(len(self.trialList)):
# find out what trial type was on this trial
trialTypeIndex = self.sequenceIndices[trialN, rep]
# determine which repeat it is for this trial
if trialTypeIndex not in repsPerType:
repsPerType[trialTypeIndex] = 0
else:
repsPerType[trialTypeIndex] += 1
# what repeat are we on for this trial type?
trep = repsPerType[trialTypeIndex]
# create a dictionary representing each trial:
nextEntry = {}
# add a trial number so the original order of the data can
# always be recovered if sorted during analysis:
trialCount += 1
# now collect the value from each trial of vars in header:
for prmName in header:
# the header includes both trial and data variables, so
# need to check before accessing:
tti = trialTypeIndex
if self.trialList[tti] and prmName in self.trialList[tti]:
nextEntry[prmName] = self.trialList[tti][prmName]
elif prmName in self.data:
nextEntry[prmName] = self.data[prmName][tti][trep]
elif self.extraInfo != None and prmName in self.extraInfo:
nextEntry[prmName] = self.extraInfo[prmName]
else:
# allow a null value if this parameter wasn't
# explicitly stored on this trial:
if prmName == "TrialNumber":
nextEntry[prmName] = trialCount
else:
nextEntry[prmName] = ''
# store this trial's data
dataOut.append(nextEntry)
# df = df.append(nextEntry, ignore_index=True)
entriesList.append(nextEntry)
df = pd.concat([df, pd.DataFrame(entriesList)])
if not matrixOnly:
# write the header row:
nextLine = ''
for prmName in header:
nextLine = nextLine + prmName + delim
# remove the final orphaned tab character
f.write(nextLine[:-1] + '\n')
# write the data matrix:
for trial in dataOut:
nextLine = ''
for prmName in header:
nextLine = nextLine + str(trial[prmName]) + delim
# remove the final orphaned tab character
nextLine = nextLine[:-1]
f.write(nextLine + '\n')
if f != sys.stdout:
f.close()
logging.info('saved wide-format data to %s' % f.name)
# Converts numbers to numeric, such as float64, boolean to bool.
# Otherwise they all are "object" type, i.e. strings
# df = df.convert_objects()
return df
def saveAsJson(self,
fileName=None,
encoding='utf-8',
fileCollisionMethod='rename'):
raise NotImplementedError('Not implemented for TrialHandler.')
def addData(self, thisType, value, position=None):
"""Add data for the current trial
"""
self.data.add(thisType, value, position=None)
if self.getExp() != None: # update the experiment handler too
self.getExp().addData(thisType, value)
class Trial(dict):
def __init__(self, parent, thisN, thisRepN, thisTrialN, thisIndex, data=None):
dict.__init__(self)
# TrialHandler containing this trial
self.parent = parent
# state information about this trial
self.thisN = thisN
self.thisRepN = thisRepN
self.thisTrialN = thisTrialN
self.thisIndex = thisIndex
# add status
self.status = constants.NOT_STARTED
# data for this trial
if data is None:
data = {}
else:
data = data.copy()
self.data = data
def __repr__(self):
return (
f"<Trial {self.thisN} ({self.thisTrialN} in rep {self.thisRepN}) "
f"data={ {key: val for key,val in self.items()} }>"
)
@property
def data(self):
# return self when getting data (so it's modified by modifying data)
return self
@data.setter
def data(self, value: dict):
# when setting data, clear self...
self.clear()
# ... and set each value from the given dict
for key, val in value.items():
self[key] = val
@property
def skipped(self):
"""
Has this Trial been skipped?
"""
return self.data.get('skipped', False)
def getDict(self):
"""
Get this Trial as a dict.
Returns
-------
dict
Dict containing information for this Trial.
"""
return {
'type': "trial_data",
'thisN': self.thisN,
'thisRepN': self.thisRepN,
'thisTrialN': self.thisTrialN,
'thisIndex': self.thisIndex,
'data': {key: val for key, val in self.items()},
}
def getJSON(self):
"""
Serialize this Trial to a JSON format.
Returns
-------
str
The results of Trial.getDict expressed as a JSON string
"""
return json.dumps(
self.getDict()
)
class TrialHandler2(_BaseTrialHandler):
"""Class to handle trial sequencing and data storage.
Calls to .next() will fetch the next trial object given to this handler,
according to the method specified (random, sequential, fullRandom).
Calls will raise a StopIteration error if trials have finished.
See demo_trialHandler.py
The psydat file format is literally just a pickled copy of the
TrialHandler object that saved it. You can open it with::
from psychopy.tools.filetools import fromFile
dat = fromFile(path)
Then you'll find that `dat` has the following attributes that
"""
def __init__(self,
trialList,
nReps,
method='random',
dataTypes=None,
extraInfo=None,
seed=None,
originPath=None,
name='',
autoLog=True):
"""
:Parameters:
trialList: filename or a simple list (or flat array) of
dictionaries specifying conditions
nReps: number of repeats for all conditions
method: *'random',* 'sequential', or 'fullRandom'
'sequential' obviously presents the conditions in the order
they appear in the list. 'random' will result in a shuffle
of the conditions on each repeat, but all conditions occur
once before the second repeat etc. 'fullRandom' fully
randomises the trials across repeats as well, which means
you could potentially run all trials of one condition
before any trial of another.
dataTypes: (optional) list of names for data storage.
e.g. ['corr','rt','resp']. If not provided then these
will be created as needed during calls to
:func:`~psychopy.data.TrialHandler.addData`
extraInfo: A dictionary
This will be stored alongside the data and usually describes
the experiment and subject ID, date etc.
seed: an integer
If provided then this fixes the random number generator to
use the same pattern of trials, by seeding its startpoint.
originPath: a string describing the location of the script /
experiment file path. The psydat file format will store a
copy of the experiment if possible. If `originPath==None`
is provided here then the TrialHandler will still store a
copy of the script where it was
created. If `OriginPath==-1` then nothing will be stored.
:Attributes (after creation):
.data - a dictionary of numpy arrays, one for each data type
stored
.trialList - the original list of dicts, specifying the conditions
.thisIndex - the index of the current trial in the original
conditions list
.nTotal - the total number of trials that will be run
.nRemaining - the total number of trials remaining
.thisN - total trials completed so far
.thisRepN - which repeat you are currently on
.thisTrialN - which trial number *within* that repeat
.thisTrial - a dictionary giving the parameters of the current
trial
.finished - True/False for have we finished yet
.extraInfo - the dictionary of extra info as given at beginning
.origin - the contents of the script or builder experiment that
created the handler
"""
self.name = name
self.autoLog = autoLog
if trialList in [None, [None], []]: # user wants an empty trialList
# which corresponds to a list with a single empty entry
self.trialList = [None]
self.columns = []
# user has hopefully specified a filename
elif isinstance(trialList, str) and os.path.isfile(trialList):
# import conditions from that file
self.trialList, self.columns = importConditions(
trialList,
returnFieldNames=True)
else:
self.trialList = trialList
self.columns = list(trialList[0].keys())
# convert any entry in the TrialList into a TrialType object (with
# obj.key or obj[key] access)
for n, entry in enumerate(self.trialList):
if type(entry) == dict:
self.trialList[n] = TrialType(entry)
self.nReps = int(nReps)
self.nTotal = self.nReps * len(self.trialList)
self.nRemaining = self.nTotal # subtract 1 each trial
self.remainingIndices = []
self.prevIndices = []
self.method = method
self.extraInfo = extraInfo
self.seed = seed
self._rng = np.random.default_rng(seed=seed)
self._trialAborted = False
# store a list of dicts, convert to pandas DataFrame on access
self.elapsedTrials = []
self.upcomingTrials = None
self.thisTrial = None
self.originPath, self.origin = self.getOriginPathAndFile(originPath)
self._exp = None # the experiment handler that owns me!
def __iter__(self):
return self
def __repr__(self):
"""prints a more verbose version of self as string
"""
return self.__str__(verbose=True)
def __str__(self, verbose=False):
"""string representation of the object
"""
strRepres = 'psychopy.data.{}(\n'.format(self.__class__.__name__)
attribs = dir(self)
# data first, then all others
try:
data = self.data
except Exception:
strRepres += '\t(no data)\n'
else:
strRepres += str('\tdata=')
strRepres += str(data) + '\n'
method_string = "<class 'method'>"
for thisAttrib in attribs:
# can handle each attribute differently
if method_string in str(type(getattr(self, thisAttrib))):
# this is a method
continue
elif thisAttrib[0] == '_':
# the attrib is private
continue
elif thisAttrib == 'data':
# we handled this first
continue
elif (len(str(getattr(self, thisAttrib))) > 20 and
not verbose):
# just give type of LONG public attribute
strRepres += str('\t' + thisAttrib + '=')
strRepres += str(type(getattr(self, thisAttrib))) + '\n'
else:
# give the complete contents of attribute
strRepres += str('\t' + thisAttrib + '=')
strRepres += str(getattr(self, thisAttrib)) + '\n'
strRepres += ')'
return strRepres
def __eq__(self, other):
# We want to ignore the RNG object when doing the comparison.
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
del self_copy._rng, other_copy._rng
result = super(TrialHandler2, self_copy).__eq__(other_copy)
return result
@property
def data(self):
"""Returns a pandas DataFrame of the trial data so far
Read only attribute - you can't directly modify TrialHandler.data
Note that data are stored internally as a list of dictionaries,
one per trial. These are converted to a DataFrame on access.
"""
return pd.DataFrame(self.elapsedTrials)
def __next__(self):
"""Advances to next trial and returns it.
Updates attributes; thisTrial, thisTrialN and thisIndex
If the trials have ended this method will raise a StopIteration error.
This can be handled with code such as::
trials = data.TrialHandler(.......)
for eachTrial in trials: # automatically stops when done
# do stuff
or::
trials = data.TrialHandler(.......)
while True: # ie forever
try:
thisTrial = trials.next()
except StopIteration: # we got a StopIteration error
break # break out of the forever loop
# do stuff here for the trial
"""
# mark previous trial as elapsed
if self.thisTrial is not None:
self.elapsedTrials.append(self.thisTrial)
# if upcoming is None, recaculate
if self.upcomingTrials is None:
self.calculateUpcoming()
# if upcoming is empty, finish
if not self.upcomingTrials:
self.finished = True
self.thisTrial = None
self._terminate()
raise StopIteration
# get first upcoming trial
self.thisTrial = self.upcomingTrials.pop(0)
# update data structure with new info
self.addData('thisN', self.thisN)
self.addData('thisTrialN', self.thisTrialN)
self.addData('thisRepN', self.thisRepN)
if self.autoLog:
msg = 'New trial (rep=%i, index=%i): %s'
vals = (self.thisRepN, self.thisTrialN, self.thisTrial)
logging.exp(msg % vals, obj=self.thisTrial)
# update experiment handler entry
exp = self.getExp()
if exp is not None:
exp.updateEntryFromLoop(self)
return self.thisTrial
next = __next__ # allows user to call without a loop `val = trials.next()`
@property
def thisIndex(self):
if self.thisTrial is None:
if len(self.elapsedTrials):
return self.elapsedTrials[-1].thisIndex
else:
return -1
return self.thisTrial.thisIndex
@property
def thisN(self):
if self.thisTrial is None:
if len(self.elapsedTrials):
return self.elapsedTrials[-1].thisN
else:
return -1
return self.thisTrial.thisN
@property
def thisTrialN(self):
if self.thisTrial is None:
if len(self.elapsedTrials):
return self.elapsedTrials[-1].thisTrialN
else:
return -1
return self.thisTrial.thisTrialN
@property
def thisRepN(self):
if self.thisTrial is None:
if len(self.elapsedTrials):
return self.elapsedTrials[-1].thisRepN
else:
return -1
return self.thisTrial.thisRepN
def calculateUpcoming(self, fromIndex=-1):
"""Rebuild the sequence of trial/state info as if running the trials
Args:
fromIndex (int, optional): the point in the sequnce from where to rebuild. Defaults to -1.
"""
# clear upcoming
self.upcomingTrials = []
# start off at 0 trial
thisTrialN = 0
thisN = 0
thisRepN = -1
# empty array to store indices once taken
prevIndices = []
# empty array to store remaining indices
remainingIndices = []
# iterate a while loop until we run out of trials
while thisN < (self.nReps * len(self.trialList)):
if not remainingIndices:
# we've just started, or just starting a new repeat
sequence = list(range(len(self.trialList)))
if (self.method == 'fullRandom' and
thisN < (self.nReps * len(self.trialList))):
# we've only just started on a fullRandom sequence
sequence *= self.nReps
# NB permutation *returns* a shuffled array
remainingIndices = list(self._rng.permutation(sequence))
elif (self.method in ('sequential', 'random') and
thisRepN < self.nReps):
thisTrialN = 0
thisRepN += 1
if self.method == 'random':
self._rng.shuffle(sequence) # shuffle (is in-place)
remainingIndices = list(sequence)
else:
# we've finished
break
if thisN < len(self.elapsedTrials):
# trial has already happened - get its value
thisTrial = self.elapsedTrials[thisN]
# remove from remaining
remainingIndices.pop(remainingIndices.index(thisTrial.thisIndex))
else:
# fetch the trial info
if len(self.trialList) == 0:
thisIndex = 0
thisTrial = {}
else:
thisIndex = remainingIndices.pop(0)
# if None then use empty dict
thisTrial = self.trialList[thisIndex] or {}
thisTrial = copy.copy(thisTrial)
# make Trial object
thisTrial = Trial(
self,
thisN=thisN,
thisRepN=thisRepN,
thisTrialN=thisTrialN,
thisIndex=thisIndex,
data=thisTrial
)
# otherwise, append trial
self.upcomingTrials.append(thisTrial)
# for fullRandom check how many times this has come up before
if self.method == 'fullRandom':
thisTrial.thisRepN = prevIndices.count(thisTrial.thisIndex)
# update prev indices
prevIndices.append(thisTrial.thisIndex)
# update pointer for next trials
thisTrialN += 1 # number of trial this pass
thisN += 1 # number of trial in total
def abortCurrentTrial(self, action='random'):
"""Abort the current trial.
Calling this during an experiment replace this trial. The condition
related to the aborted trial will be replaced elsewhere in the session
depending on the `method` in use for sampling conditions.
Parameters
----------
action : str
Action to take with the aborted trial. Can be either of `'random'`,
or `'append'`. The default action is `'random'`.
Notes
-----
* When using `action='random'`, the RNG state for the trial handler is
not used.
"""
# clear this trial so it's not appended to elapsed
self.thisTrial = None
# clear upcoming trials so they're recalculated on next iteration
self.upcomingTrials = None
@property
def finished(self):
"""
Whether this loop has finished or not. Will be True if there are no upcoming trials and
False if there are any. Set `.finished = True` to skip all remaining trials (equivalent to
calling `.skipTrials()` with a value larger than the number of trials remaining)
Returns
-------
bool
True if there are no upcoming trials, False otherwise.
"""
return not bool(self.upcomingTrials)
@finished.setter
def finished(self, value):
# when setting finished to True, skip all remaining trials
if value:
self.upcomingTrials = []
else:
self.calculateUpcoming()
def skipTrials(self, n=1):
"""
Skip ahead n trials - the trials inbetween will be marked as "skipped". If you try to
skip past the last trial, will log a warning and skip *to* the last trial.
Parameters
----------
n : int
Number of trials to skip ahead
"""
# account for the fact current trial will end once skipped
n -= 1
# if skipping past last trial, print warning and skip to last trial
if n > len(self.upcomingTrials):
logging.warn(
f"Requested skip of {n} trials when only {len(self.elapsedTrials)} trials are upcoming. "
f"Skipping to the last upcoming trial."
)
n = len(self.upcomingTrials)
# mark as skipping so routines end
self.thisTrial.status = constants.STOPPING
# before iterating, add "skipped" to data
self.addData("skipped", True)
# iterate n times (-1 to account for current trial)
for i in range(n):
self.__next__()
# before iterating, add "skipped" to data
self.addData("skipped", True)
# advance row in data file
if self.getExp() is not None:
self.getExp().nextEntry()
return self.thisTrial
def rewindTrials(self, n=1):
"""
Rewind back n trials - previously elapsed trials will return to being upcoming. If you
try to rewind before the first trial, will log a warning and rewind *to* the first trial.
Parameters
----------
n : int
Number of trials to rewind back
"""
# treat -n as n
n = abs(n)
# account for the fact current trial will end once skipped
n += 1
# if rewinding past first trial, print warning and rewind to first trial
if n > len(self.elapsedTrials):
logging.warn(
f"Requested rewind of {n} trials when only {len(self.elapsedTrials)} trials have "
f"elapsed. Rewinding to the first trial."
)
n = len(self.elapsedTrials)
# mark current trial as skipping so it ends
self.thisTrial.status = constants.STOPPING
# start with no trials
rewound = [self.thisTrial]
# pop the last n values from elapsed trials
for i in range(n):
rewound = [self.elapsedTrials.pop(-1)] + rewound
# set thisTrial from first rewound value
self.thisTrial = rewound.pop(0)
# prepend rewound trials to upcoming array
self.upcomingTrials = rewound + self.upcomingTrials
return self.thisTrial
def getCurrentTrial(self):
"""
Returns the current trial (`.thisTrial`)
Returns
-------
Trial
The current trial
"""
return self.thisTrial
def getAllTrials(self):
"""
Returns all trials (elapsed, current and upcoming) with an index indicating which trial is
the current trial.
Returns
-------
list[Trial]
List of trials, in order (oldest to newest)
int
Index of the current trial in this list
"""
return (self.elapsedTrials or []) + [self.thisTrial] + (self.upcomingTrials or []), len(self.elapsedTrials)
def getFutureTrial(self, n=1):
"""
Returns the condition for n trials into the future, without
advancing the trials. Returns 'None' if attempting to go beyond
the last trial.
Returns
-------
Trial or None
Trial object for n trials into the future.
"""
# make sure n is an integer
if isinstance(n, str) and n.isnumeric():
n = int(n)
# return None if requesting beyond last trial
if self.upcomingTrials is None or n > len(self.upcomingTrials):
return None
# return the corresponding trial from upcoming trials array
return self.upcomingTrials[n-1]
def getFutureTrials(self, n=None, start=0):
"""
Returns Trial objects for a given range in the future. Will start looking at `start` trials
in the future and will return n trials from then, so e.g. to get all trials from 2 in the
future to 5 in the future you would use `start=2` and `n=3`.
Parameters
----------
n : int, optional
How many trials into the future to look, by default None. Leave as None to show all
future trials
start : int, optional
How many trials into the future to start looking at, by default 0
Returns
-------
list[Trial or None]
List of Trial objects n long. Any trials beyond the last trial are None.
"""
# if there are no future trials, return a blank list
if self.upcomingTrials is None:
return []
# if None, get all future trials
if n is None:
n = len(self.upcomingTrials) - start
# blank list to store trials in
trials = []
# iterate through n trials
for i in range(n):
# add each to the list
trials.append(
self.getFutureTrial(start + i + 1)
)
return trials
def getEarlierTrial(self, n=-1):
"""Returns the condition information from n trials previously.
Useful for comparisons in n-back tasks. Returns 'None' if trying
to access a trial prior to the first.
"""
# treat positive offset values as equivalent to negative ones:
if n > 0:
n = n * -1
# return None if requesting before first trial
if self.upcomingTrials is None or abs(n) > len(self.upcomingTrials):
return None
# return the corresponding trial from elapsed trials array
return self.elapsedTrials[n]
def _createOutputArray(self, stimOut, dataOut, delim=None,
matrixOnly=False):
"""Does the leg-work for saveAsText and saveAsExcel.
Combines stimOut with ._parseDataOutput()
"""
if (stimOut == [] and
len(self.trialList) and
hasattr(self.trialList[0], 'keys')):
stimOut = list(self.trialList[0].keys())
# these get added somewhere (by DataHandler?)
if 'n' in stimOut:
stimOut.remove('n')
if 'float' in stimOut:
stimOut.remove('float')
lines = []
# parse the dataout section of the output
dataOut, dataAnal, dataHead = self._createOutputArrayData(dataOut)
if not matrixOnly:
thisLine = []
lines.append(thisLine)
# write a header line
for heading in list(stimOut) + dataHead:
if heading == 'ran_sum':
heading = 'n'
elif heading == 'order_raw':
heading = 'order'
thisLine.append(heading)
# loop through stimuli, writing data
for stimN in range(len(self.trialList)):
thisLine = []
lines.append(thisLine)
# first the params for this stim (from self.trialList)
for heading in stimOut:
thisLine.append(self.trialList[stimN][heading])
# then the data for this stim (from self.data)
for thisDataOut in dataOut:
# make a string version of the data and then format it
tmpData = dataAnal[thisDataOut][stimN]
if hasattr(tmpData, 'tolist'): # is a numpy array
strVersion = str(tmpData.tolist())
# for numeric data replace None with a blank cell
if tmpData.dtype.kind not in ['SaUV']:
strVersion = strVersion.replace('None', '')
elif tmpData in [None, 'None']:
strVersion = ''
else:
strVersion = str(tmpData)
if strVersion == '()':
# 'no data' in masked array should show as "--"
strVersion = "--"
# handle list of values (e.g. rt_raw )
if (len(strVersion) and
strVersion[0] in '[(' and
strVersion[-1] in '])'):
strVersion = strVersion[1:-1] # skip first and last chars
# handle lists of lists (e.g. raw of multiple key presses)
if (len(strVersion) and
strVersion[0] in '[(' and
strVersion[-1] in '])'):
tup = eval(strVersion) # convert back to a tuple
for entry in tup:
# contents of each entry is a list or tuple so keep in
# quotes to avoid probs with delim
thisLine.append(str(entry))
else:
thisLine.extend(strVersion.split(','))
# add self.extraInfo
if (self.extraInfo != None) and not matrixOnly:
lines.append([])
# give a single line of space and then a heading
lines.append(['extraInfo'])
for key, value in list(self.extraInfo.items()):
lines.append([key, value])
return lines
def _createOutputArrayData(self, dataOut):
"""This just creates the dataOut part of the output matrix.
It is called by _createOutputArray() which creates the header
line and adds the stimOut columns
"""
dataHead = [] # will store list of data headers
dataAnal = dict([]) # will store data that has been analyzed
if type(dataOut) == str:
# don't do list convert or we get a list of letters
dataOut = [dataOut]
elif type(dataOut) != list:
dataOut = list(dataOut)
# expand any 'all' dataTypes to be full list of available dataTypes
allDataTypes = list(self.data.keys())
# ready to go through standard data types
dataOutNew = []
for thisDataOut in dataOut:
if thisDataOut == 'n':
# n is really just the sum of the ran trials
dataOutNew.append('ran_sum')
continue # no need to do more with this one
# then break into dataType and analysis
dataType, analType = thisDataOut.rsplit('_', 1)
if dataType == 'all':
dataOutNew.extend(
[key + "_" + analType for key in allDataTypes])
if 'order_mean' in dataOutNew:
dataOutNew.remove('order_mean')
if 'order_std' in dataOutNew:
dataOutNew.remove('order_std')
else:
dataOutNew.append(thisDataOut)
dataOut = dataOutNew
# sort so all datatypes come together, rather than all analtypes
dataOut.sort()
# do the various analyses, keeping track of fails (e.g. mean of a
# string)
dataOutInvalid = []
# add back special data types (n and order)
if 'ran_sum' in dataOut:
# move n to the first column
dataOut.remove('ran_sum')
dataOut.insert(0, 'ran_sum')
if 'order_raw' in dataOut:
# move order_raw to the second column
dataOut.remove('order_raw')
dataOut.append('order_raw')
# do the necessary analysis on the data
for thisDataOutN, thisDataOut in enumerate(dataOut):
dataType, analType = thisDataOut.rsplit('_', 1)
if not dataType in self.data:
# that analysis can't be done
dataOutInvalid.append(thisDataOut)
continue
thisData = self.data[dataType]
# set the header
dataHead.append(dataType + '_' + analType)
# analyse thisData using numpy module
if analType in dir(np):
try:
# will fail if we try to take mean of a string for example
if analType == 'std':
thisAnal = np.std(thisData, axis=1, ddof=0)
# normalise by N-1 instead. This should work by
# setting ddof=1 but doesn't as of 08/2010 (because
# of using a masked array?)
N = thisData.shape[1]
if N == 1:
thisAnal *= 0 # prevent a divide-by-zero error
else:
sqrt = np.sqrt
thisAnal = thisAnal * sqrt(N) / sqrt(N - 1)
else:
thisAnal = eval("np.%s(thisData,1)" % analType)
except Exception:
# that analysis doesn't work
dataHead.remove(dataType + '_' + analType)
dataOutInvalid.append(thisDataOut)
continue # to next analysis
elif analType == 'raw':
thisAnal = thisData
else:
raise AttributeError('You can only use analyses from numpy')
# add extra cols to header if necess
if len(thisAnal.shape) > 1:
for n in range(thisAnal.shape[1] - 1):
dataHead.append("")
dataAnal[thisDataOut] = thisAnal
# remove invalid analyses (e.g. average of a string)
for invalidAnal in dataOutInvalid:
dataOut.remove(invalidAnal)
return dataOut, dataAnal, dataHead
def saveAsWideText(self, fileName,
delim=None,
matrixOnly=False,
appendFile=True,
encoding='utf-8-sig',
fileCollisionMethod='rename'):
"""Write a text file with the session, stimulus, and data values
from each trial in chronological order. Also, return a
pandas DataFrame containing same information as the file.
That is, unlike 'saveAsText' and 'saveAsExcel':
- each row comprises information from only a single trial.
- no summarising is done (such as collapsing to produce mean and
standard deviation values across trials).
This 'wide' format, as expected by R for creating dataframes, and
various other analysis programs, means that some information must
be repeated on every row.
In particular, if the trialHandler's 'extraInfo' exists, then each
entry in there occurs in every row. In builder, this will include
any entries in the 'Experiment info' field of the
'Experiment settings' dialog. In Coder, this information can be set
using something like::
myTrialHandler.extraInfo = {'SubjID': 'Joan Smith',
'Group': 'Control'}
:Parameters:
fileName:
if extension is not specified, '.csv' will be appended if
the delimiter is ',', else '.tsv' will be appended.
Can include path info.
delim:
allows the user to use a delimiter other than the default
tab ("," is popular with file extension ".csv")
matrixOnly:
outputs the data with no header row.
appendFile:
will add this output to the end of the specified file if
it already exists.
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file.
Defaults to `utf-8-sig`.
"""
if self.thisTrialN < 0 and self.thisRepN < 0:
# if both are < 1 we haven't started
logging.info('TrialHandler.saveAsWideText called but no '
'trials completed. Nothing saved')
return -1
# set default delimiter if none given
if delim is None:
delim = genDelimiter(fileName)
# create the file or send to stdout
fileName = genFilenameFromDelimiter(fileName, delim)
with openOutputFile(fileName=fileName, append=appendFile,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding) as f:
csvData = self.data.to_csv(sep=delim,
encoding=encoding,
columns=self.columns, # sets the order
header=(not matrixOnly),
index=False)
f.write(csvData)
if (fileName is not None) and (fileName != 'stdout'):
logging.info('saved wide-format data to %s' % f.name)
def saveAsJson(self,
fileName=None,
encoding='utf-8',
fileCollisionMethod='rename'):
"""
Serialize the object to the JSON format.
Parameters
----------
fileName: string, or None
the name of the file to create or append. Can include a relative or
absolute path. If `None`, will not write to a file, but return an
in-memory JSON object.
encoding : string, optional
The encoding to use when writing the file.
fileCollisionMethod : string
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`. Can be
either of `'rename'`, `'overwrite'`, or `'fail'`.
Notes
-----
Currently, a copy of the object is created, and the copy's .origin
attribute is set to an empty string before serializing
because loading the created JSON file would sometimes fail otherwise.
The RNG self._rng cannot be serialized as-is, so we store its state in
self._rng_state so we can restore it when loading.
"""
self_copy = copy.deepcopy(self)
self_copy._rng_state = self_copy._rng.bit_generator.state
del self_copy._rng
r = (super(TrialHandler2, self_copy)
.saveAsJson(fileName=fileName,
encoding=encoding,
fileCollisionMethod=fileCollisionMethod))
if fileName is None:
return r
def addData(self, thisType, value):
"""Add a piece of data to the current trial
"""
# store in the columns list to help ordering later
if thisType not in self.columns:
self.columns.append(thisType)
# make sure we have a thisTrial
if self.thisTrial is None:
if self.upcomingTrials:
self.thisTrial = self.upcomingTrials.pop(0)
else:
self.thisTrial = Trial(
self,
thisN=0,
thisRepN=0,
thisTrialN=0,
thisIndex=0,
data={}
)
# save the actual value in a data dict
self.thisTrial[thisType] = value
if self.getExp() is not None:
# update the experiment handler too
self.getExp().addData(f"{self.name}.{thisType}", value)
class TrialHandlerExt(TrialHandler):
"""A class for handling trial sequences in a *non-counterbalanced design*
(i.e. *oddball paradigms*). Its functions are a superset of the
class TrialHandler, and as such, can also be used for normal trial
handling.
TrialHandlerExt has the same function names for data storage facilities.
To use non-counterbalanced designs, all TrialType dict entries in the
trial list must have a key called "weight". For example, if you want
trial types A, B, C, and D to have 10, 5, 3, and 2 repetitions per
block, then the trialList can look like:
[{Name:'A', ..., weight:10},
{Name:'B', ..., weight:5},
{Name:'C', ..., weight:3},
{Name:'D', ..., weight:2}]
For experimenters using an excel or csv file for trial list, a column
called weight is appropriate for this purpose.
Calls to .next() will fetch the next trial object given to this handler,
according to the method specified (random, sequential, fullRandom).
Calls will raise a StopIteration error when all trials are exhausted.
*Authored by Suddha Sourav at BPN, Uni Hamburg - heavily borrowing
from the TrialHandler class*
"""
def __init__(self,
trialList,
nReps,
method='random',
dataTypes=None,
extraInfo=None,
seed=None,
originPath=None,
name='',
autoLog=True):
"""
:Parameters:
trialList: a simple list (or flat array) of dictionaries
specifying conditions. This can be imported from an
excel / csv file using :func:`~psychopy.data.importConditions`
For non-counterbalanced designs, each dict entry in
trialList must have a key called weight!
nReps: number of repeats for all conditions. When using a
non-counterbalanced design, nReps is analogous to the number
of blocks.
method: *'random',* 'sequential', or 'fullRandom'
When the weights are not specified:
'sequential' presents the conditions in the order they appear
in the list. 'random' will result in a shuffle of the
conditions on each repeat, but all conditions occur once
before the second repeat etc. 'fullRandom' fully randomises
the trials across repeats as well, which means you could
potentially run all trials of one condition before any trial
of another.
In the presence of weights:
'sequential' presents each trial type the number of times
specified by its weight, before moving on to the next type.
'random' randomizes the presentation order within block.
'fulLRandom' shuffles trial order across weights an nRep,
that is, a full shuffling.
dataTypes: (optional) list of names for data storage. e.g.
['corr','rt','resp']. If not provided then these will be
created as needed during calls to
:func:`~psychopy.data.TrialHandler.addData`
extraInfo: A dictionary
This will be stored alongside the data and usually describes
the experiment and subject ID, date etc.
seed: an integer
If provided then this fixes the random number generator
to use the same pattern
of trials, by seeding its startpoint
originPath: a string describing the location of the script /
experiment file path. The psydat file format will store a
copy of the experiment if possible. If `originPath==None`
is provided here then the TrialHandler will still store a
copy of the script where it was created. If `OriginPath==-1`
then nothing will be stored.
:Attributes (after creation):
.data - a dictionary of numpy arrays, one for each data type
stored
.trialList - the original list of dicts, specifying the conditions
.thisIndex - the index of the current trial in the original
conditions list
.nTotal - the total number of trials that will be run
.nRemaining - the total number of trials remaining
.thisN - total trials completed so far
.thisRepN - which repeat you are currently on
.thisTrialN - which trial number *within* that repeat
.thisTrial - a dictionary giving the parameters of the current
trial
.finished - True/False for have we finished yet
.extraInfo - the dictionary of extra info as given at beginning
.origin - the contents of the script or builder experiment that
created the handler
.trialWeights - None if all weights are not specified. If all
weights are specified, then a list containing the weights
of the trial types.
"""
self.name = name
self.autoLog = autoLog
if trialList in (None, []):
# user wants an empty trialList
# which corresponds to a list with a single empty entry
self.trialList = [None]
# user has hopefully specified a filename
elif isinstance(trialList, str) and os.path.isfile(trialList):
# import conditions from that file
self.trialList = importConditions(trialList)
else:
self.trialList = trialList
# convert any entry in the TrialList into a TrialType object (with
# obj.key or obj[key] access)
for n, entry in enumerate(self.trialList):
if type(entry) == dict:
self.trialList[n] = TrialType(entry)
self.nReps = nReps
# Add Su
if not trialList or not all('weight' in d for d in trialList):
self.trialWeights = None
self.nTotal = self.nReps * len(self.trialList)
else:
self.trialWeights = [d['weight'] for d in trialList]
self.nTotal = self.nReps * sum(self.trialWeights)
self.nRemaining = self.nTotal # subtract 1 each trial
self.method = method
self.thisRepN = 0 # records which repetition or pass we are on
self.thisTrialN = -1 # records trial number within this repetition
self.thisN = -1
self.thisIndex = 0 # index of current trial in the conditions list
self.thisTrial = []
self.finished = False
self.extraInfo = extraInfo
self.seed = seed
# create dataHandler
if self.trialWeights is None:
self.data = DataHandler(trials=self)
else:
self.data = DataHandler(trials=self,
dataShape=[sum(self.trialWeights), nReps])
if dataTypes is not None:
self.data.addDataType(dataTypes)
self.data.addDataType('ran')
self.data['ran'].mask = False # bool - all entries are valid
self.data.addDataType('order')
# generate stimulus sequence
if self.method in ('random', 'sequential', 'fullRandom'):
self.sequenceIndices = self._createSequence()
else:
self.sequenceIndices = []
self.originPath, self.origin = self.getOriginPathAndFile(originPath)
self._exp = None # the experiment handler that owns me!
def _createSequence(self):
"""Pre-generates the sequence of trial presentations (for
non-adaptive methods). This is called automatically when the
TrialHandler is initialised so doesn't need an explicit call
from the user.
The returned sequence has form indices[stimN][repN]
Example: sequential with 6 trialtypes (rows), 5 reps (cols), returns::
[[0 0 0 0 0]
[1 1 1 1 1]
[2 2 2 2 2]
[3 3 3 3 3]
[4 4 4 4 4]
[5 5 5 5 5]]
These 30 trials will be returned by .next() in the order:
0, 1, 2, 3, 4, 5, 0, 1, 2, ... ... 3, 4, 5
Example: random, with 3 trialtypes, where the weights of
conditions 0,1, and 2 are 3,2, and 1 respectively,
and a rep value of 5, might return::
[[0 1 2 0 1]
[1 0 1 1 1]
[0 2 0 0 0]
[0 0 0 1 0]
[2 0 1 0 2]
[1 1 0 2 0]]
These 30 trials will be returned by .next() in the order:
0, 1, 0, 0, 2, 1, 1, 0, 2, 0, 0, 1, ...
... 0, 2, 0 *stopIteration*
To add a new type of sequence (as of v1.65.02):
- add the sequence generation code here
- adjust "if self.method in [ ...]:" in both __init__ and .next()
- adjust allowedVals in experiment.py -> shows up in DlgLoopProperties
Note that users can make any sequence whatsoever outside of PsychoPy,
and specify sequential order; any order is possible this way.
"""
# create indices for a single rep
indices = np.asarray(self._makeIndices(self.trialList), dtype=int)
repeat = np.repeat
reshape = np.reshape
rng = np.random.default_rng(seed=self.seed)
if self.method == 'random':
seqIndices = []
if self.trialWeights is None:
thisRepSeq = indices.flat # take a fresh copy
else:
thisRepSeq = repeat(indices, self.trialWeights)
for thisRep in range(self.nReps):
seqIndices.append(rng.permutation(thisRepSeq))
seqIndices = np.transpose(seqIndices)
elif self.method == 'sequential':
if self.trialWeights is None:
seqIndices = repeat(indices, self.nReps, 1)
else:
_base = repeat(indices, self.trialWeights, 0)
seqIndices = repeat(_base, self.nReps, 1)
elif self.method == 'fullRandom':
if self.trialWeights is None:
# indices * nReps, flatten, shuffle, unflatten;
# only use seed once
sequential = np.repeat(indices, self.nReps, 1) # = sequential
randomFlat = rng.permutation(sequential.flat)
seqIndices = np.reshape(
randomFlat, (len(indices), self.nReps))
else:
_base = repeat(indices, self.trialWeights, 0)
sequential = repeat(_base, self.nReps, 1)
randomFlat = rng.permutation(sequential.flat)
seqIndices = reshape(randomFlat,
(sum(self.trialWeights), self.nReps))
if self.autoLog:
# Change
msg = 'Created sequence: %s, trialTypes=%d, nReps=%d, seed=%s'
vals = (self.method, len(indices), self.nReps, str(self.seed))
logging.exp(msg % vals)
return seqIndices
def __next__(self):
"""Advances to next trial and returns it.
Updates attributes; thisTrial, thisTrialN and thisIndex
If the trials have ended this method will raise a StopIteration error.
This can be handled with code such as::
trials = data.TrialHandler(.......)
for eachTrial in trials: # automatically stops when done
# do stuff
or::
trials = data.TrialHandler(.......)
while True: # ie forever
try:
thisTrial = trials.next()
except StopIteration: # we got a StopIteration error
break # break out of the forever loop
# do stuff here for the trial
"""
# update pointer for next trials
self.thisTrialN += 1 # number of trial this pass
self.thisN += 1 # number of trial in total
self.nRemaining -= 1
if self.trialWeights is None:
if self.thisTrialN == len(self.trialList):
# start a new repetition
self.thisTrialN = 0
self.thisRepN += 1
else:
if self.thisTrialN == sum(self.trialWeights):
# start a new repetition
self.thisTrialN = 0
self.thisRepN += 1
if self.thisRepN >= self.nReps:
# all reps complete
self.thisTrial = []
self.finished = True
if self.finished == True:
self._terminate()
# fetch the trial info
if self.method in ('random', 'sequential', 'fullRandom'):
if self.trialWeights is None:
idx = self.sequenceIndices[self.thisTrialN]
self.thisIndex = idx[self.thisRepN]
self.thisTrial = self.trialList[self.thisIndex]
self.data.add('ran', 1)
self.data.add('order', self.thisN)
else:
idx = self.sequenceIndices[self.thisTrialN]
self.thisIndex = idx[self.thisRepN]
self.thisTrial = self.trialList[self.thisIndex]
self.data.add('ran', 1,
position=self.getNextTrialPosInDataHandler())
# The last call already adds a ran to this trial, so get the
# current pos now
self.data.add('order', self.thisN,
position=self.getCurrentTrialPosInDataHandler())
if self.autoLog:
msg = 'New trial (rep=%i, index=%i): %s'
vals = (self.thisRepN, self.thisTrialN, self.thisTrial)
logging.exp(msg % vals, obj=self.thisTrial)
return self.thisTrial
next = __next__ # allows user to call without a loop `val = trials.next()`
def getCurrentTrialPosInDataHandler(self):
# if there's no trial weights, then the current position is simply
# [trialIndex, nRepetition]
if self.trialWeights is None:
repN = sum(self['ran'][self.trials.thisIndex]) - 1
position = [self.trials.thisIndex, repN]
else:
# if there are trial weights, the situation is slightly more
# involved, because the same index can be repeated for a number
# of times. If we had a sequential array, then the rows in
# DataHandler for that trialIndex would be from
# sum(trialWeights[begin:trialIndex]) to
# sum(trialWeights[begin:trialIndex+1]).
# if we haven't begun the experiment yet, then the last row
# of the first column is used as the current position,
# emulating what TrialHandler does. The following two lines
# also prevents calculating garbage position values in case
# the first row has a null weight
if self.thisN < 0:
return [0, -1]
firstRowIndex = sum(self.trialWeights[:self.thisIndex])
lastRowIndex = sum(self.trialWeights[:self.thisIndex + 1])
# get the number of the trial presented by summing in ran for the
# rows above and all columns
# BF-Sourav-29032021: numpy returns float, so cast to int
nThisTrialPresented = int(round(np.sum(
self.data['ran'][firstRowIndex:lastRowIndex, :])))
_tw = self.trialWeights[self.thisIndex]
dataRowThisTrial = firstRowIndex + (nThisTrialPresented - 1) % _tw
dataColThisTrial = int((nThisTrialPresented - 1) // _tw)
position = [dataRowThisTrial, dataColThisTrial]
return position
def getNextTrialPosInDataHandler(self):
# if there's no trial weights, then the current position is
# simply [trialIndex, nRepetition]
if self.trialWeights is None:
repN = sum(self['ran'][self.trials.thisIndex])
position = [self.trials.thisIndex, repN]
else:
# if there are trial weights, the situation is slightly more
# involved, because the same index can be repeated for a
# number of times. If we had a sequential array, then the
# rows in DataHandler for that trialIndex would
# be from sum(trialWeights[begin:trialIndex]) to
# sum(trialWeights[begin:trialIndex+1]).
firstRowIndex = sum(self.trialWeights[:self.thisIndex])
lastRowIndex = sum(self.trialWeights[:self.thisIndex + 1])
# get the number of the trial presented by summing in ran for the
# rows above and all columns
# BF-Sourav-29032021: numpy returns float, so cast to int
nThisTrialPresented = int(round(np.sum(
self.data['ran'][firstRowIndex:lastRowIndex, :])))
_tw = self.trialWeights[self.thisIndex]
dataRowThisTrial = firstRowIndex + nThisTrialPresented % _tw
dataColThisTrial = int(nThisTrialPresented // _tw)
position = [dataRowThisTrial, dataColThisTrial]
return position
def addData(self, thisType, value, position=None):
"""Add data for the current trial
"""
if self.trialWeights is None:
pos = None
else:
pos = self.getCurrentTrialPosInDataHandler()
self.data.add(thisType, value, position=pos)
# change this!
if self.getExp() is not None:
# update the experiment handler too:
self.getExp().addData(thisType, value)
def _createOutputArrayData(self, dataOut):
"""This just creates the dataOut part of the output matrix.
It is called by _createOutputArray() which creates the header
line and adds the stimOut columns
"""
if self.trialWeights is not None:
# remember to use other array instead of self.data
_vals = np.arange(len(self.trialList))
idx_data = np.repeat(_vals, self.trialWeights)
# list of data headers
dataHead = []
# will store data that has been analyzed
dataAnal = dict([])
if type(dataOut) == str:
# don't do list convert or we get a list of letters
dataOut = [dataOut]
elif type(dataOut) != list:
dataOut = list(dataOut)
# expand any 'all' dataTypes to the full list of available dataTypes
allDataTypes = list(self.data.keys())
# treat these separately later
allDataTypes.remove('ran')
# ready to go through standard data types
dataOutNew = []
for thisDataOut in dataOut:
if thisDataOut == 'n':
# n is really just the sum of the ran trials
dataOutNew.append('ran_sum')
continue # no need to do more with this one
# then break into dataType and analysis
dataType, analType = thisDataOut.rsplit('_', 1)
if dataType == 'all':
keyType = [key + "_" + analType for key in allDataTypes]
dataOutNew.extend(keyType)
if 'order_mean' in dataOutNew:
dataOutNew.remove('order_mean')
if 'order_std' in dataOutNew:
dataOutNew.remove('order_std')
else:
dataOutNew.append(thisDataOut)
dataOut = dataOutNew
# sort so that all datatypes come together, rather than all analtypes
dataOut.sort()
# do the various analyses, keeping track of fails (e.g. mean of a
# string)
dataOutInvalid = []
# add back special data types (n and order)
if 'ran_sum' in dataOut:
# move n to the first column
dataOut.remove('ran_sum')
dataOut.insert(0, 'ran_sum')
if 'order_raw' in dataOut:
# move order_raw to the second column
dataOut.remove('order_raw')
dataOut.append('order_raw')
# do the necessary analysis on the data
for thisDataOutN, thisDataOut in enumerate(dataOut):
dataType, analType = thisDataOut.rsplit('_', 1)
if not dataType in self.data:
# that analysis can't be done
dataOutInvalid.append(thisDataOut)
continue
if self.trialWeights is None:
thisData = self.data[dataType]
else:
# BF_202302210_trialHandlerExt_save_nonnumeric_excel
# Allow saving non-numeric data to the excel format
# Previous case: masked arrays for numeric data
if self.data.isNumeric[dataType]:
resizedData = np.ma.masked_array(
np.zeros((len(self.trialList),
max(self.trialWeights) * self.nReps)),
np.ones((len(self.trialList),
max(self.trialWeights) * self.nReps),
dtype=bool))
for curTrialIndex in range(len(self.trialList)):
thisDataChunk = self.data[dataType][
idx_data == curTrialIndex, :]
padWidth = (max(self.trialWeights) * self.nReps -
np.prod(thisDataChunk.shape))
thisDataChunkRowPadded = np.pad(
thisDataChunk.transpose().flatten().data,
(0, padWidth), mode='constant',
constant_values=(0, 0))
thisDataChunkRowPaddedMask = np.pad(
thisDataChunk.transpose().flatten().mask,
(0, padWidth), mode='constant',
constant_values=(0, True))
thisDataChunkRow = np.ma.masked_array(
thisDataChunkRowPadded,
mask=thisDataChunkRowPaddedMask)
resizedData[curTrialIndex, :] = thisDataChunkRow
# For non-numeric data, Psychopy uses typical object arrays in-
# stead of masked arrays. Adjust accordingly, filling with '--'
# instead of masks
else:
resizedData = np.array(np.zeros((len(self.trialList),
max(self.trialWeights) *
self.nReps)), dtype='O')
for curTrialIndex in range(len(self.trialList)):
thisDataChunk = self.data[dataType][
idx_data == curTrialIndex, :]
padWidth = (max(self.trialWeights) * self.nReps -
np.prod(thisDataChunk.shape))
thisDataChunkRowPadded = np.pad(
thisDataChunk.transpose().flatten().data,
(0, padWidth), mode='constant',
constant_values=('--', '--'))
resizedData[curTrialIndex, :] = thisDataChunkRowPadded
thisData = resizedData
# set the header
dataHead.append(dataType + '_' + analType)
# analyse thisData using numpy module
if analType in dir(np):
try:
# this will fail if we try to take mean of a string
if analType == 'std':
thisAnal = np.std(thisData, axis=1, ddof=0)
# normalise by N-1 instead. This should work by
# setting ddof=1 but doesn't as of 08/2010
# (because of using a masked array?)
N = thisData.shape[1]
if N == 1:
thisAnal *= 0 # prevent a divide-by-zero error
else:
sqrt = np.sqrt
thisAnal = thisAnal * sqrt(N) / sqrt(N - 1)
else:
thisAnal = eval("np.%s(thisData,1)" % analType)
except Exception:
# that analysis doesn't work
dataHead.remove(dataType + '_' + analType)
dataOutInvalid.append(thisDataOut)
continue # to next analysis
elif analType == 'raw':
thisAnal = thisData
else:
raise AttributeError('You can only use analyses from numpy')
# add extra cols to header if necess
if len(thisAnal.shape) > 1:
for n in range(thisAnal.shape[1] - 1):
dataHead.append("")
dataAnal[thisDataOut] = thisAnal
# remove invalid analyses (e.g. average of a string)
for invalidAnal in dataOutInvalid:
dataOut.remove(invalidAnal)
return dataOut, dataAnal, dataHead
def saveAsWideText(self,
fileName,
delim='\t',
matrixOnly=False,
appendFile=True,
encoding='utf-8-sig',
fileCollisionMethod='rename'):
"""Write a text file with the session, stimulus, and data values
from each trial in chronological order.
That is, unlike 'saveAsText' and 'saveAsExcel':
- each row comprises information from only a single trial.
- no summarizing is done (such as collapsing to produce mean and
standard deviation values across trials).
This 'wide' format, as expected by R for creating dataframes, and
various other analysis programs, means that some information must
be repeated on every row.
In particular, if the trialHandler's 'extraInfo' exists, then each
entry in there occurs in every row. In builder, this will include
any entries in the 'Experiment info' field of the
'Experiment settings' dialog. In Coder, this information can be set
using something like::
myTrialHandler.extraInfo = {'SubjID':'Joan Smith',
'Group':'Control'}
:Parameters:
fileName:
if extension is not specified, '.csv' will be appended if
the delimiter is ',', else '.txt' will be appended.
Can include path info.
delim:
allows the user to use a delimiter other than the default
tab ("," is popular with file extension ".csv")
matrixOnly:
outputs the data with no header row.
appendFile:
will add this output to the end of the specified file if
it already exists.
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file.
Defaults to `utf-8-sig`.
"""
if self.thisTrialN < 0 and self.thisRepN < 0:
# if both are < 1 we haven't started
logging.info('TrialHandler.saveAsWideText called but no trials'
' completed. Nothing saved')
return -1
# set default delimiter if none given
if delim is None:
delim = genDelimiter(fileName)
# create the file or send to stdout
fileName = genFilenameFromDelimiter(fileName, delim)
f = openOutputFile(fileName=fileName, append=appendFile,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding)
# collect parameter names related to the stimuli:
if self.trialList[0]:
header = list(self.trialList[0].keys())
else:
header = []
# and then add parameter names related to data (e.g. RT)
header.extend(self.data.dataTypes)
# loop through each trial, gathering the actual values:
dataOut = []
trialCount = 0
# total number of trials = number of trialtypes * number of
# repetitions:
repsPerType = {}
for rep in range(self.nReps):
if self.trialWeights is None:
nRows = len(self.trialList)
else:
nRows = sum(self.trialWeights)
for trialN in range(nRows):
# find out what trial type was on this trial
trialTypeIndex = self.sequenceIndices[trialN, rep]
# determine which repeat it is for this trial
if trialTypeIndex not in repsPerType:
repsPerType[trialTypeIndex] = 0
else:
repsPerType[trialTypeIndex] += 1
# create a dictionary representing each trial:
# this is wide format, so we want fixed information (e.g.
# subject ID, date, etc) repeated every line if it exists:
if self.extraInfo != None:
nextEntry = self.extraInfo.copy()
else:
nextEntry = {}
# add a trial number so the original order of the data can
# always be recovered if sorted during analysis:
trialCount += 1
nextEntry["TrialNumber"] = trialCount
# what repeat are we on for this trial type?
trep = repsPerType[trialTypeIndex]
# collect the value from each trial of the vars in the header:
tti = trialTypeIndex
for prmName in header:
# the header includes both trial and data variables, so
# need to check before accessing:
if self.trialList[tti] and prmName in self.trialList[tti]:
nextEntry[prmName] = self.trialList[tti][prmName]
elif prmName in self.data:
if self.trialWeights is None:
nextEntry[prmName] = self.data[prmName][tti][trep]
else:
firstRowIndex = sum(self.trialWeights[:tti])
_tw = self.trialWeights[tti]
row = firstRowIndex + rep % _tw
col = int(rep // _tw)
nextEntry[prmName] = self.data[prmName][row][col]
else:
# allow a null value if this parameter wasn't
# explicitly stored on this trial:
nextEntry[prmName] = ''
# store this trial's data
dataOut.append(nextEntry)
# get the extra 'wide' parameter names into the header line:
header.insert(0, "TrialNumber")
if self.extraInfo is not None:
for key in self.extraInfo:
header.insert(0, key)
# write a header row:
if not matrixOnly:
f.write(delim.join(header) + '\n')
# write the data matrix:
for trial in dataOut:
line = delim.join([str(trial[prm]) for prm in header])
f.write(line + '\n')
if (fileName is not None) and (fileName != 'stdout'):
f.close()
logging.info('saved wide-format data to %s' % f.name)
def saveAsJson(self,
fileName=None,
encoding='utf-8',
fileCollisionMethod='rename'):
raise NotImplementedError('Not implemented for TrialHandlerExt.')
| 99,792
|
Python
|
.py
| 2,132
| 33.273452
| 115
| 0.565428
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,357
|
staircase.py
|
psychopy_psychopy/psychopy/data/staircase.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import pickle
import copy
import warnings
import numpy as np
from packaging.version import Version
import psychopy
from psychopy import logging
from psychopy.tools.filetools import openOutputFile, genDelimiter
from psychopy.tools.fileerrortools import handleFileCollision
from psychopy.contrib.quest import QuestObject
from psychopy.contrib.psi import PsiObject
from .base import _BaseTrialHandler, _ComparisonMixin
from .utils import _getExcelCellName
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
try:
# import openpyxl
import openpyxl
if Version(openpyxl.__version__) >= Version('2.4.0'):
# openpyxl moved get_column_letter to utils.cell
from openpyxl.utils.cell import get_column_letter
else:
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
haveOpenpyxl = True
except ImportError:
haveOpenpyxl = False
class StairHandler(_BaseTrialHandler):
"""Class to handle smoothly the selection of the next trial
and report current values etc.
Calls to next() will fetch the next object given to this
handler, according to the method specified.
See ``Demos >> ExperimentalControl >> JND_staircase_exp.py``
The staircase will terminate when *nTrials* AND *nReversals* have
been exceeded. If *stepSizes* was an array and has been exceeded
before nTrials is exceeded then the staircase will continue
to reverse.
*nUp* and *nDown* are always considered as 1 until the first reversal
is reached. The values entered as arguments are then used.
"""
def __init__(self,
startVal,
nReversals=None,
stepSizes=4, # dB stepsize
nTrials=0,
nUp=1,
nDown=3, # correct responses before stim goes down
applyInitialRule=True,
extraInfo=None,
method='2AFC',
stepType='db',
minVal=None,
maxVal=None,
originPath=None,
name='',
autoLog=True,
**kwargs):
"""
:Parameters:
startVal:
The initial value for the staircase.
nReversals:
The minimum number of reversals permitted.
If `stepSizes` is a list, but the minimum number of
reversals to perform, `nReversals`, is less than the
length of this list, PsychoPy will automatically increase
the minimum number of reversals and emit a warning.
This minimum number of reversals is always set to be
greater than 0.
stepSizes:
The size of steps as a single value or a list (or array).
For a single value the step size is fixed. For an array or
list the step size will progress to the next entry
at each reversal.
nTrials:
The minimum number of trials to be conducted. If the
staircase has not reached the required number of reversals
then it will continue.
nUp:
The number of 'incorrect' (or 0) responses before the
staircase level increases.
nDown:
The number of 'correct' (or 1) responses before the
staircase level decreases.
applyInitialRule : bool
Whether to apply a 1-up/1-down rule until the first reversal
point (if `True`), before switching to the specified up/down
rule.
extraInfo:
A dictionary (typically) that will be stored along with
collected data using
:func:`~psychopy.data.StairHandler.saveAsPickle` or
:func:`~psychopy.data.StairHandler.saveAsText` methods.
method:
Not used and may be deprecated in future releases.
stepType: *'db'*, 'lin', 'log'
The type of steps that should be taken each time. 'lin'
will simply add or subtract that amount each step, 'db'
and 'log' will step by a certain number of decibels or
log units (note that this will prevent your value ever
reaching zero or less)
minVal: *None*, or a number
The smallest legal value for the staircase, which can be
used to prevent it reaching impossible contrast values,
for instance.
maxVal: *None*, or a number
The largest legal value for the staircase, which can be
used to prevent it reaching impossible contrast values,
for instance.
Additional keyword arguments will be ignored.
:Notes:
The additional keyword arguments `**kwargs` might for example be
passed by the `MultiStairHandler`, which expects a `label` keyword
for each staircase. These parameters are to be ignored by the
StairHandler.
"""
self.name = name
self.startVal = startVal
self.nUp = nUp
self.nDown = nDown
self.applyInitialRule = applyInitialRule
self.extraInfo = extraInfo
self.method = method
self.stepType = stepType
try:
self.stepSizes = list(stepSizes)
except TypeError:
# stepSizes is not array-like / iterable, i.e., a scalar.
self.stepSizes = [stepSizes]
self._variableStep = True if len(self.stepSizes) > 1 else False
self.stepSizeCurrent = self.stepSizes[0]
if nReversals is None:
self.nReversals = len(self.stepSizes)
elif len(self.stepSizes) > nReversals:
msg = ('Increasing number of minimum required reversals to the '
'number of step sizes, (%i).' % len(self.stepSizes))
logging.warn(msg)
self.nReversals = len(self.stepSizes)
else:
self.nReversals = nReversals
# to terminate the nTrials must be exceeded and either
self.nTrials = nTrials
self.finished = False
self.thisTrialN = -1
# a dict of lists where each should have the same length as the main
# data:
self.otherData = {}
self.data = []
self.intensities = []
self.reversalPoints = []
self.reversalIntensities = []
# initially it goes down but on every step:
self.currentDirection = 'start'
# correct since last stim change (minus are incorrect):
self.correctCounter = 0
self.intensity = self.startVal
self.minVal = minVal
self.maxVal = maxVal
self.autoLog = autoLog
# a flag for the 1-up 1-down initial rule:
self.initialRule = False
# self.originPath and self.origin (the contents of the origin file)
self.originPath, self.origin = self.getOriginPathAndFile(originPath)
self._exp = None # the experiment handler that owns me!
def __iter__(self):
return self
@property
def intensity(self):
"""The intensity (level) of the current staircase"""
return self._nextIntensity
@intensity.setter
def intensity(self, intensity):
"""The intensity (level) of the current staircase"""
self._nextIntensity = intensity
def addResponse(self, result, intensity=None):
"""Add a 1 or 0 to signify a correct / detected or
incorrect / missed trial.
This is essential to advance the staircase to a new intensity level!
Supplying an `intensity` value here indicates that you did not use
the recommended intensity in your last trial and the staircase will
replace its recorded value with the one you supplied here.
"""
self.data.append(result)
# if needed replace the existing intensity with this custom one
if intensity != None:
self.intensities.pop()
self.intensities.append(intensity)
# increment the counter of correct scores
if result == 1:
if len(self.data) > 1 and self.data[-2] == result:
# increment if on a run
self.correctCounter += 1
else:
# or reset
self.correctCounter = 1
else:
if len(self.data) > 1 and self.data[-2] == result:
# increment if on a run
self.correctCounter -= 1
else:
# or reset
self.correctCounter = -1
# add the current data to experiment if poss
if self.getExp() is not None: # update the experiment handler too
self.getExp().addData(self.name + ".response", result)
self.calculateNextIntensity()
def addOtherData(self, dataName, value):
"""Add additional data to the handler, to be tracked alongside
the result data but not affecting the value of the staircase
"""
if not dataName in self.otherData: # init the list
if self.thisTrialN > 0:
# might have run trials already
self.otherData[dataName] = [None] * (self.thisTrialN - 1)
else:
self.otherData[dataName] = []
# then add current value
self.otherData[dataName].append(value)
# add the current data to experiment if poss
if self.getExp() != None: # update the experiment handler too
self.getExp().addData(dataName, value)
def addData(self, result, intensity=None):
"""Deprecated since 1.79.00: This function name was ambiguous.
Please use one of these instead:
* .addResponse(result, intensity)
* .addOtherData('dataName', value')
"""
self.addResponse(result, intensity)
def calculateNextIntensity(self):
"""Based on current intensity, counter of correct responses, and
current direction.
"""
if not self.reversalIntensities and self.applyInitialRule:
# always using a 1-down, 1-up rule initially
if self.data[-1] == 1: # last answer correct
# got it right
if self.currentDirection == 'up':
reversal = True
else:
# direction is 'down' or 'start'
reversal = False
self.currentDirection = 'down'
else:
# got it wrong
if self.currentDirection == 'down':
reversal = True
else:
# direction is 'up' or 'start'
reversal = False
# now:
self.currentDirection = 'up'
elif self.correctCounter >= self.nDown:
# n right, time to go down!
# 'start' covers `applyInitialRule=False`.
if self.currentDirection not in ['start', 'down']:
reversal = True
else:
reversal = False
self.currentDirection = 'down'
elif self.correctCounter <= -self.nUp:
# n wrong, time to go up!
# note current direction
# 'start' covers `applyInitialRule=False`.
if self.currentDirection not in ['start', 'up']:
reversal = True
else:
reversal = False
self.currentDirection = 'up'
else:
# same as previous trial
reversal = False
# add reversal info
if reversal:
self.reversalPoints.append(self.thisTrialN)
if not self.reversalIntensities and self.applyInitialRule:
self.initialRule = True
self.reversalIntensities.append(self.intensities[-1])
# test if we're done
if (len(self.reversalIntensities) >= self.nReversals and
len(self.intensities) >= self.nTrials):
self.finished = True
# new step size if necessary
if reversal and self._variableStep:
if len(self.reversalIntensities) >= len(self.stepSizes):
# we've gone beyond the list of step sizes
# so just use the last one
self.stepSizeCurrent = self.stepSizes[-1]
else:
_sz = len(self.reversalIntensities)
self.stepSizeCurrent = self.stepSizes[_sz]
# apply new step size
if ((not self.reversalIntensities or self.initialRule) and
self.applyInitialRule):
self.initialRule = False # reset the flag
if self.data[-1] == 1:
self._intensityDec()
else:
self._intensityInc()
elif self.correctCounter >= self.nDown:
# n right, so going down
self._intensityDec()
elif self.correctCounter <= -self.nUp:
# n wrong, so going up
self._intensityInc()
def __next__(self):
"""Advances to next trial and returns it.
Updates attributes; `thisTrial`, `thisTrialN` and `thisIndex`.
If the trials have ended, calling this method will raise a
StopIteration error. This can be handled with code such as::
staircase = data.StairHandler(.......)
for eachTrial in staircase: # automatically stops when done
# do stuff
or::
staircase = data.StairHandler(.......)
while True: # ie forever
try:
thisTrial = staircase.next()
except StopIteration: # we got a StopIteration error
break # break out of the forever loop
# do stuff here for the trial
"""
if not self.finished:
# check that all 'otherData' is aligned with current trialN
for key in self.otherData:
while len(self.otherData[key]) < self.thisTrialN:
self.otherData[key].append(None)
# update pointer for next trial
self.thisTrialN += 1
self.intensities.append(self._nextIntensity)
return self._nextIntensity
else:
self._terminate()
next = __next__ # allows user to call without a loop `val = trials.next()`
def _intensityInc(self):
"""increment the current intensity and reset counter
"""
if self.stepType == 'db':
self._nextIntensity *= 10.0**(self.stepSizeCurrent/20.0)
elif self.stepType == 'log':
self._nextIntensity *= 10.0**self.stepSizeCurrent
elif self.stepType == 'lin':
self._nextIntensity += self.stepSizeCurrent
# check we haven't gone out of the legal range
if (self.maxVal is not None) and (self._nextIntensity > self.maxVal):
self._nextIntensity = self.maxVal
self.correctCounter = 0
def _intensityDec(self):
"""decrement the current intensity and reset counter
"""
if self.stepType == 'db':
self._nextIntensity /= 10.0**(self.stepSizeCurrent/20.0)
if self.stepType == 'log':
self._nextIntensity /= 10.0**self.stepSizeCurrent
elif self.stepType == 'lin':
self._nextIntensity -= self.stepSizeCurrent
self.correctCounter = 0
# check we haven't gone out of the legal range
if (self.minVal is not None) and (self._nextIntensity < self.minVal):
self._nextIntensity = self.minVal
def saveAsText(self, fileName,
delim=None,
matrixOnly=False,
fileCollisionMethod='rename',
encoding='utf-8-sig'):
"""Write a text file with the data
:Parameters:
fileName: a string
The name of the file, including path if needed. The extension
`.tsv` will be added if not included.
delim: a string
the delimitter to be used (e.g. '\t' for tab-delimitted,
',' for csv files)
matrixOnly: True/False
If True, prevents the output of the `extraInfo` provided
at initialisation.
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file.
Defaults to `utf-8-sig`.
"""
if self.thisTrialN < 1:
if self.autoLog:
logging.debug('StairHandler.saveAsText called but no '
'trials completed. Nothing saved')
return -1
# set default delimiter if none given
if delim is None:
delim = genDelimiter(fileName)
# create the file or send to stdout
f = openOutputFile(
fileName, append=False,
fileCollisionMethod=fileCollisionMethod, encoding=encoding)
# write the data
reversalStr = str(self.reversalIntensities)
reversalStr = reversalStr.replace(',', delim)
reversalStr = reversalStr.replace('[', '')
reversalStr = reversalStr.replace(']', '')
f.write('\nreversalIntensities=\t%s\n' % reversalStr)
reversalPts = str(self.reversalPoints)
reversalPts = reversalPts.replace(',', delim)
reversalPts = reversalPts.replace('[', '')
reversalPts = reversalPts.replace(']', '')
f.write('reversalIndices=\t%s\n' % reversalPts)
rawIntens = str(self.intensities)
rawIntens = rawIntens.replace(',', delim)
rawIntens = rawIntens.replace('[', '')
rawIntens = rawIntens.replace(']', '')
f.write('\nintensities=\t%s\n' % rawIntens)
responses = str(self.data)
responses = responses.replace(',', delim)
responses = responses.replace('[', '')
responses = responses.replace(']', '')
f.write('responses=\t%s\n' % responses)
# add self.extraInfo
if self.extraInfo is not None and not matrixOnly:
strInfo = str(self.extraInfo)
# dict begins and ends with {} - remove
# strInfo.replace('{','')
# strInfo = strInfo.replace('}','')
strInfo = strInfo[1:-1]
# separate value from keyname
strInfo = strInfo.replace(strInfo, ': ', ':\n')
# separate values from each other
strInfo = strInfo.replace(',', '\n')
strInfo = strInfo.replace('array([ ', '')
strInfo = strInfo.replace('])', '')
f.write('\n%s\n' % strInfo)
f.write("\n")
if f != sys.stdout:
f.close()
if self.autoLog:
logging.info('saved data to %s' % f.name)
def saveAsExcel(self, fileName, sheetName='data',
matrixOnly=False, appendFile=True,
fileCollisionMethod='rename'):
"""Save a summary data file in Excel OpenXML format workbook
(:term:`xlsx`) for processing in most spreadsheet packages.
This format is compatible with versions of Excel (2007 or greater)
and and with OpenOffice (>=3.0).
It has the advantage over the simpler text files
(see :func:`TrialHandler.saveAsText()` ) that data can be stored
in multiple named sheets within the file. So you could have a
single file named after your experiment and then have one worksheet
for each participant. Or you could have one file for each participant
and then multiple sheets for repeated sessions etc.
The file extension `.xlsx` will be added if not given already.
The file will contain a set of values specifying the staircase level
('intensity') at each reversal, a list of reversal indices
(trial numbers), the raw staircase / intensity level on *every*
trial and the corresponding responses of the participant on every
trial.
:Parameters:
fileName: string
the name of the file to create or append. Can include
relative or absolute path.
sheetName: string
the name of the worksheet within the file
matrixOnly: True or False
If set to True then only the data itself will be output
(no additional info)
appendFile: True or False
If False any existing file with this name will be
overwritten. If True then a new worksheet will be appended.
If a worksheet already exists with that name a number will
be added to make it unique.
fileCollisionMethod: string
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
This is ignored if ``appendFile`` is ``True``.
"""
if self.thisTrialN < 1:
if self.autoLog:
logging.debug('StairHandler.saveAsExcel called but no '
'trials completed. Nothing saved')
return -1
# NB this was based on the limited documentation for openpyxl v1.0
if not haveOpenpyxl:
raise ImportError('openpyxl is required for saving files in '
'Excel (xlsx) format, but was not found.')
# return -1
# import necessary subpackages - they are small so won't matter to do
# it here
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook
if not fileName.endswith('.xlsx'):
fileName += '.xlsx'
# create or load the file
if appendFile and os.path.isfile(fileName):
wb = load_workbook(fileName)
newWorkbook = False
else:
if not appendFile:
# the file exists but we're not appending, will be overwritten
fileName = handleFileCollision(fileName,
fileCollisionMethod)
wb = Workbook()
wb.properties.creator = 'PsychoPy' + psychopy.__version__
newWorkbook = True
if newWorkbook:
ws = wb.worksheets[0]
ws.title = sheetName
else:
ws = wb.create_sheet()
ws.title = sheetName
# write the data
# reversals data
ws['A1'] = 'Reversal Intensities'
ws['B1'] = 'Reversal Indices'
for revN, revIntens in enumerate(self.reversalIntensities):
ws.cell(column=1, row=revN+2,
value=u"{}".format(revIntens))
ws.cell(column=2, row=revN+2,
value=u"{}".format(self.reversalPoints[revN]))
# trials data
ws['C1'] = 'All Intensities'
ws['D1'] = 'All Responses'
for intenN, intensity in enumerate(self.intensities):
ws.cell(column=3, row=intenN+2,
value=u"{}".format(intensity))
ws.cell(column=4, row=intenN+2,
value=u"{}".format(self.data[intenN]))
# add other data
col = 5
if self.otherData is not None:
# for varName in self.otherData:
for key, val in list(self.otherData.items()):
ws.cell(column=col, row=1,
value=u"{}".format(key))
for oDatN in range(len(self.otherData[key])):
ws.cell(column=col, row=oDatN+2,
value=u"{}".format(self.otherData[key][oDatN]))
col += 1
# add self.extraInfo
if self.extraInfo is not None and not matrixOnly:
ws.cell(column=col, row=1,
value='extraInfo')
rowN = 2
for key, val in list(self.extraInfo.items()):
ws.cell(column=col, row=rowN,
value=u"{}:".format(key))
_cell = _getExcelCellName(col=col+1, row=rowN)
ws.cell(column=col+2, row=rowN+1,
value=u"{}".format(val))
rowN += 1
wb.save(filename=fileName)
if self.autoLog:
logging.info('saved data to %s' % fileName)
def saveAsPickle(self, fileName, fileCollisionMethod='rename'):
"""Basically just saves a copy of self (with data) to a pickle file.
This can be reloaded if necessary and further analyses carried out.
:Parameters:
fileCollisionMethod: Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
"""
if self.thisTrialN < 1:
if self.autoLog:
logging.debug('StairHandler.saveAsPickle called but no '
'trials completed. Nothing saved')
return -1
# otherwise use default location
if not fileName.endswith('.psydat'):
fileName += '.psydat'
with openOutputFile(fileName=fileName, append=False,
fileCollisionMethod=fileCollisionMethod) as f:
pickle.dump(self, f)
if (fileName is not None) and (fileName != 'stdout'):
logging.info('saved data to %s' % f.name)
class QuestObject_(QuestObject, _ComparisonMixin):
"""A QuestObject that implements the == and != operators.
"""
pass
class QuestHandler(StairHandler):
r"""Class that implements the Quest algorithm for quick measurement of
psychophysical thresholds.
Uses Andrew Straw's `QUEST <http://www.visionegg.org/Quest>`_, which is a
Python port of Denis Pelli's Matlab code.
Measures threshold using a Weibull psychometric function. Currently, it is
not possible to use a different psychometric function.
The Weibull psychometric function is given by the formula
:math:`\Psi(x) = \delta \gamma + (1 - \delta) [1 - (1 - \gamma)\, \exp(-10^{\\beta (x - T + \epsilon)})]`
Here, :math:`x` is an intensity or a contrast (in log10 units), and :math:`T` is estimated threshold.
Quest internally shifts the psychometric function such that intensity at the user-specified
threshold performance level ``pThreshold`` (e.g., 50% in a yes-no or 75% in a 2-AFC task) is euqal to 0.
The parameter :math:`\epsilon` is responsible for this shift, and is determined automatically based on the
specified ``pThreshold`` value. It is the parameter Watson & Pelli (1983) introduced to perform measurements
at the "optimal sweat factor". Assuming your ``QuestHandler`` instance is called ``q``, you can retrieve this
value via ``q.epsilon``.
**Example**::
# setup display/window
...
# create stimulus
stimulus = visual.RadialStim(win=win, tex='sinXsin', size=1,
pos=[0,0], units='deg')
...
# create staircase object
# trying to find out the contrast where subject gets 63% correct
# if wanted to do a 2AFC then the defaults for pThreshold and gamma
# are good. As start value, we'll use 50% contrast, with SD = 20%
staircase = data.QuestHandler(0.5, 0.2,
pThreshold=0.63, gamma=0.01,
nTrials=20, minVal=0, maxVal=1)
...
while thisContrast in staircase:
# setup stimulus
stimulus.setContrast(thisContrast)
stimulus.draw()
win.flip()
core.wait(0.5)
# get response
...
# inform QUEST of the response, needed to calculate next level
staircase.addResponse(thisResp)
...
# can now access 1 of 3 suggested threshold levels
staircase.mean()
staircase.mode()
staircase.quantile(0.5) # gets the median
"""
def __init__(self,
startVal,
startValSd,
pThreshold=0.82,
nTrials=None,
stopInterval=None,
method='quantile',
beta=3.5,
delta=0.01,
gamma=0.5,
grain=0.01,
range=None,
extraInfo=None,
minVal=None,
maxVal=None,
staircase=None,
originPath=None,
name='',
autoLog=True,
**kwargs):
"""
Typical values for pThreshold are:
* 0.82 which is equivalent to a 3 up 1 down standard staircase
* 0.63 which is equivalent to a 1 up 1 down standard staircase
(and might want gamma=0.01)
The variable(s) nTrials and/or stopSd must be specified.
`beta`, `delta`, and `gamma` are the parameters of the Weibull
psychometric function.
:Parameters:
startVal:
Prior threshold estimate or your initial guess threshold.
startValSd:
Standard deviation of your starting guess threshold.
Be generous with the sd as QUEST will have trouble finding
the true threshold if it's more than one sd from your
initial guess.
pThreshold
Your threshold criterion expressed as probability of
response==1. An intensity offset is introduced into the
psychometric function so that the threshold (i.e.,
the midpoint of the table) yields pThreshold.
nTrials: *None* or a number
The maximum number of trials to be conducted.
stopInterval: *None* or a number
The minimum 5-95% confidence interval required in the
threshold estimate before stopping. If both this and
nTrials is specified, whichever happens first will
determine when Quest will stop.
method: *'quantile'*, 'mean', 'mode'
The method used to determine the next threshold to test.
If you want to get a specific threshold level at the end
of your staircasing, please use the quantile, mean, and
mode methods directly.
beta: *3.5* or a number
Controls the steepness of the psychometric function.
delta: *0.01* or a number
The fraction of trials on which the observer presses blindly.
gamma: *0.5* or a number
The fraction of trials that will generate response 1 when
intensity=-Inf.
grain: *0.01* or a number
The quantization of the internal table.
range: *None*, or a number
The intensity difference between the largest and smallest
intensity that the internal table can store. This interval
will be centered on the initial guess tGuess. QUEST assumes
that intensities outside of this range have zero prior
probability (i.e., they are impossible).
extraInfo:
A dictionary (typically) that will be stored along with
collected data using
:func:`~psychopy.data.StairHandler.saveAsPickle` or
:func:`~psychopy.data.StairHandler.saveAsText` methods.
minVal: *None*, or a number
The smallest legal value for the staircase, which can be
used to prevent it reaching impossible contrast values,
for instance.
maxVal: *None*, or a number
The largest legal value for the staircase, which can be
used to prevent it reaching impossible contrast values,
for instance.
staircase: *None* or StairHandler
Can supply a staircase object with intensities and results.
Might be useful to give the quest algorithm more information
if you have it. You can also call the importData function
directly.
Additional keyword arguments will be ignored.
:Notes:
The additional keyword arguments `**kwargs` might for example be
passed by the `MultiStairHandler`, which expects a `label` keyword
for each staircase. These parameters are to be ignored by the
StairHandler.
"""
StairHandler.__init__(
self, startVal, nTrials=nTrials, extraInfo=extraInfo,
method=method, stepType='lin', minVal=minVal,
maxVal=maxVal, name=name, autoLog=autoLog)
self.startVal = startVal
self.startValSd = startValSd
self.pThreshold = pThreshold
self.stopInterval = stopInterval
# NB there is also _nextIntensity
self._questNextIntensity = startVal
self._range = range
# Create Quest object
self._quest = QuestObject_(
startVal, startValSd, pThreshold, beta, delta, gamma,
grain=grain, range=self._range)
# Import any old staircase data
if staircase is not None:
self.importData(staircase.intensities, staircase.data)
# store the origin file and its path
self.originPath, self.origin = self.getOriginPathAndFile(originPath)
self._exp = None
self.autoLog = autoLog
# NB we inherit self.intensity from StairHandler
@property
def beta(self):
return self._quest.beta
@property
def gamma(self):
return self._quest.gamma
@property
def delta(self):
return self._quest.delta
@property
def epsilon(self):
return self._quest.xThreshold
@property
def grain(self):
return self._quest.grain
@property
def range(self):
return self._range
def addResponse(self, result, intensity=None):
"""Add a 1 or 0 to signify a correct / detected or
incorrect / missed trial
Supplying an `intensity` value here indicates that you did not use the
recommended intensity in your last trial and the staircase will
replace its recorded value with the one you supplied here.
"""
# Process user supplied intensity
if intensity is None:
intensity = self._questNextIntensity
else:
# Update the intensity.
#
# During the first trial, self.intensities will be of length 0,
# so pop() would not work.
if len(self.intensities) != 0:
self.intensities.pop() # remove the auto-generated one
self.intensities.append(intensity)
# Update quest
self._quest.update(intensity, result)
# Update other things
self.data.append(result)
# add the current data to experiment if poss
if self.getExp() != None: # update the experiment handler too
self.getExp().addData(self.name + ".response", result)
self._checkFinished()
if not self.finished:
self.calculateNextIntensity()
def importData(self, intensities, results):
"""import some data which wasn't previously given to the quest
algorithm
"""
# NOT SURE ABOUT CLASS TO USE FOR RAISING ERROR
if len(intensities) != len(results):
raise AttributeError("length of intensities and results input "
"must be the same")
self.incTrials(len(intensities))
for intensity, result in zip(intensities, results):
try:
next(self)
self.addResponse(result, intensity)
except StopIteration:
# would get a stop iteration if stopInterval set
pass # TODO: might want to check if nTrials is still good
def calculateNextIntensity(self):
"""based on current intensity and counter of correct responses
"""
self._intensity()
# Check we haven't gone out of the legal range
if self.maxVal is not None and self._nextIntensity > self.maxVal:
self._nextIntensity = self.maxVal
elif self.minVal is not None and self._nextIntensity < self.minVal:
self._nextIntensity = self.minVal
self._questNextIntensity = self._nextIntensity
def _intensity(self):
"""assigns the next intensity level"""
if self.method == 'mean':
self._questNextIntensity = self._quest.mean()
elif self.method == 'mode':
self._questNextIntensity = self._quest.mode()[0]
elif self.method == 'quantile':
self._questNextIntensity = self._quest.quantile()
else:
raise TypeError(f"Requested method for QUEST: {self.method} is not a valid method. Please use mean, mode or quantile")
self._nextIntensity = self._questNextIntensity
def mean(self):
"""mean of Quest posterior pdf
"""
return self._quest.mean()
def sd(self):
"""standard deviation of Quest posterior pdf
"""
return self._quest.sd()
def mode(self):
"""mode of Quest posterior pdf
"""
return self._quest.mode()[0]
def quantile(self, p=None):
"""quantile of Quest posterior pdf
"""
return self._quest.quantile(quantileOrder=p)
def confInterval(self, getDifference=False):
"""
Return estimate for the 5%--95% confidence interval (CI).
:Parameters:
getDifference (bool)
If ``True``, return the width of the confidence interval
(95% - 5% percentiles). If ``False``, return an NumPy array
with estimates for the 5% and 95% boundaries.
:Returns:
scalar or array of length 2.
"""
interval = [self.quantile(0.05), self.quantile(0.95)]
if getDifference:
return abs(interval[0] - interval[1])
else:
return interval
def incTrials(self, nNewTrials):
"""increase maximum number of trials
Updates attribute: `nTrials`
"""
self.nTrials += nNewTrials
def simulate(self, tActual):
"""returns a simulated user response to the next intensity level
presented by Quest, need to supply the actual threshold level
"""
# Current estimated intensity level
if self.method == 'mean':
tTest = self._quest.mean()
elif self.method == 'mode':
tTest = self._quest.mode()
elif self.method == 'quantile':
tTest = self._quest.quantile()
return self._quest.simulate(tTest, tActual)
def __next__(self):
"""Advances to next trial and returns it.
Updates attributes; `thisTrial`, `thisTrialN`, `thisIndex`,
`finished`, `intensities`
If the trials have ended, calling this method will raise a
StopIteration error. This can be handled with code such as::
staircase = data.QuestHandler(.......)
for eachTrial in staircase: # automatically stops when done
# do stuff
or::
staircase = data.QuestHandler(.......)
while True: # i.e. forever
try:
thisTrial = staircase.next()
except StopIteration: # we got a StopIteration error
break # break out of the forever loop
# do stuff here for the trial
"""
if self.finished == False:
# update pointer for next trial
self.thisTrialN += 1
self.intensities.append(self._nextIntensity)
return self._nextIntensity
else:
self._terminate()
next = __next__ # allows user to call without a loop `val = trials.next()`
def _checkFinished(self):
"""checks if we are finished
Updates attribute: `finished`
"""
if self.nTrials is not None and len(self.intensities) >= self.nTrials:
self.finished = True
elif (self.stopInterval is not None and
self.confInterval(True) < self.stopInterval):
self.finished = True
else:
self.finished = False
class PsiObject_(PsiObject, _ComparisonMixin):
"""A PsiObject that implements the == and != operators.
"""
pass
class PsiHandler(StairHandler):
"""Handler to implement the "Psi" adaptive psychophysical method
(Kontsevich & Tyler, 1999).
This implementation assumes the form of the psychometric function
to be a cumulative Gaussian. Psi estimates the two free parameters
of the psychometric function, the location (alpha) and slope (beta),
using Bayes' rule and grid approximation of the posterior distribution.
It chooses stimuli to present by minimizing the entropy of this grid.
Because this grid is represented internally as a 4-D array, one must
choose the intensity, alpha, and beta ranges carefully so as to avoid
a Memory Error. Maximum likelihood is used to estimate Lambda, the most
likely location/slope pair. Because Psi estimates the entire
psychometric function, any threshold defined on the function may be
estimated once Lambda is determined.
It is advised that Lambda estimates are examined after completion of
the Psi procedure. If the estimated alpha or beta values equal your
specified search bounds, then the search range most likely did not
contain the true value. In this situation the procedure should be
repeated with appropriately adjusted bounds.
Because Psi is a Bayesian method, it can be initialized with a prior
from existing research. A function to save the posterior over Lambda
as a Numpy binary file is included.
Kontsevich & Tyler (1999) specify their psychometric function in terms
of d'. PsiHandler avoids this and treats all parameters with respect
to stimulus intensity. Specifically, the forms of the psychometric
function assumed for Yes/No and Two Alternative Forced Choice (2AFC)
are, respectively:
_normCdf = norm.cdf(x, mean=alpha, sd=beta)
Y(x) = .5 * delta + (1 - delta) * _normCdf
Y(x) = .5 * delta + (1 - delta) * (.5 + .5 * _normCdf)
"""
def __init__(self,
nTrials,
intensRange, alphaRange, betaRange,
intensPrecision, alphaPrecision, betaPrecision,
delta,
stepType='lin',
expectedMin=0.5,
prior=None,
fromFile=False,
extraInfo=None,
name=''):
"""Initializes the handler and creates an internal Psi Object for
grid approximation.
:Parameters:
nTrials (int)
The number of trials to run.
intensRange (list)
Two element list containing the (inclusive) endpoints of
the stimuli intensity range.
alphaRange (list)
Two element list containing the (inclusive) endpoints of
the alpha (location parameter) range.
betaRange (list)
Two element list containing the (inclusive) endpoints of
the beta (slope parameter) range.
intensPrecision (float or int)
If stepType == 'lin', this specifies the step size of the
stimuli intensity range. If stepType == 'log', this specifies
the number of steps in the stimuli intensity range.
alphaPrecision (float)
The step size of the alpha (location parameter) range.
betaPrecision (float)
The step size of the beta (slope parameter) range.
delta (float)
The guess rate.
stepType (str)
The type of steps to be used when constructing the stimuli
intensity range. If 'lin' then evenly spaced steps are used.
If 'log' then logarithmically spaced steps are used.
Defaults to 'lin'.
expectedMin (float)
The expected lower asymptote of the psychometric function
(PMF).
For a Yes/No task, the PMF usually extends across the
interval [0, 1]; here, `expectedMin` should be set to `0`.
For a 2-AFC task, the PMF spreads out across [0.5, 1.0].
Therefore, `expectedMin` should be set to `0.5` in this
case, and the 2-AFC psychometric function described above
going to be is used.
Currently, only Yes/No and 2-AFC designs are supported.
Defaults to 0.5, or a 2-AFC task.
prior (numpy ndarray or str)
Optional prior distribution with which to initialize the
Psi Object. This can either be a numpy ndarray object or
the path to a numpy binary file (.npy) containing the ndarray.
fromFile (str)
Flag specifying whether prior is a file pathname or not.
extraInfo (dict)
Optional dictionary object used in PsychoPy's built-in
logging system.
name (str)
Optional name for the PsiHandler used in PsychoPy's built-in
logging system.
:Raises:
NotImplementedError
If the supplied `minVal` parameter implies an experimental
design other than Yes/No or 2-AFC.
"""
if expectedMin not in [0, 0.5]:
raise NotImplementedError(
'Currently, only Yes/No and 2-AFC designs are '
'supported. Please specify either `expectedMin=0` '
'(Yes/No) or `expectedMin=0.5` (2-AFC).')
StairHandler.__init__(
self, startVal=None, nTrials=nTrials, extraInfo=extraInfo,
stepType=stepType, minVal=intensRange[0],
maxVal=intensRange[1], name=name
)
# Create Psi object
if prior is not None and fromFile:
try:
prior = np.load(prior)
except IOError:
logging.warning("The specified pickle file could not be "
"read. Using a uniform prior instead.")
prior = None
twoAFC = True if expectedMin == 0.5 else False
self._psi = PsiObject_(
intensRange, alphaRange, betaRange, intensPrecision,
alphaPrecision, betaPrecision, delta=delta,
stepType=stepType, TwoAFC=twoAFC, prior=prior)
self._psi.update(None)
def addResponse(self, result, intensity=None):
"""Add a 1 or 0 to signify a correct / detected or
incorrect / missed trial. Supplying an `intensity` value here
indicates that you did not use the
recommended intensity in your last trial and the staircase will
replace its recorded value with the one you supplied here.
"""
self.data.append(result)
# if needed replace the existing intensity with this custom one
if intensity is not None:
self.intensities.pop()
self.intensities.append(intensity)
# add the current data to experiment if possible
if self.getExp() is not None:
# update the experiment handler too
self.getExp().addData(self.name + ".response", result)
self._psi.update(result)
def __next__(self):
"""Advances to next trial and returns it.
"""
self._checkFinished()
if self.finished == False:
# update pointer for next trial
self.thisTrialN += 1
self.intensities.append(self._psi.nextIntensity)
return self._psi.nextIntensity
else:
self._terminate()
next = __next__ # allows user to call without a loop `val = trials.next()`
def _checkFinished(self):
"""checks if we are finished.
Updates attribute: `finished`
"""
if self.nTrials is not None and len(self.intensities) >= self.nTrials:
self.finished = True
else:
self.finished = False
def estimateLambda(self):
"""Returns a tuple of (location, slope)
"""
return self._psi.estimateLambda()
def estimateThreshold(self, thresh, lamb=None):
"""Returns an intensity estimate for the provided probability.
The optional argument 'lamb' allows thresholds to be estimated
without having to recompute the maximum likelihood lambda.
"""
if lamb is not None:
try:
if len(lamb) != 2:
msg = ("Invalid user-specified lambda pair. A "
"new estimate of lambda will be computed.")
warnings.warn(msg, SyntaxWarning)
lamb = None
except TypeError:
msg = ("Invalid user-specified lambda pair. A new "
"estimate of lambda will be computed.")
warnings.warn(msg, SyntaxWarning)
lamb = None
return self._psi.estimateThreshold(thresh, lamb)
def savePosterior(self, fileName, fileCollisionMethod='rename'):
"""Saves the posterior array over probLambda as a pickle file
with the specified name.
Parameters
----------
fileCollisionMethod : string
Collision method passed to :func:`~psychopy.tools.fileerrortools.handleFileCollision`
"""
try:
if os.path.exists(fileName):
fileName = handleFileCollision(
fileName,
fileCollisionMethod=fileCollisionMethod
)
self._psi.savePosterior(fileName)
except IOError:
warnings.warn("An error occurred while trying to save the "
"posterior array. Continuing without saving...")
class QuestPlusHandler(StairHandler):
def __init__(self,
nTrials,
intensityVals, thresholdVals, slopeVals,
lowerAsymptoteVals, lapseRateVals,
responseVals=('Yes', 'No'), prior=None,
startIntensity=None,
psychometricFunc='weibull', stimScale='log10',
stimSelectionMethod='minEntropy',
stimSelectionOptions=None, paramEstimationMethod='mean',
extraInfo=None, name='', label='', **kwargs):
"""
QUEST+ implementation. Currently only supports parameter estimation of
a Weibull-shaped psychometric function.
The parameter estimates can be retrieved via the `.paramEstimate`
attribute, which returns a dictionary whose keys correspond to the
names of the estimated parameters (i.e., `QuestPlusHandler.paramEstimate['threshold']`
will provide the threshold estimate). Retrieval of the marginal posterior distributions works
similarly: they can be accessed via the `.posterior` dictionary.
Parameters
----------
nTrials : int
Number of trials to run.
intensityVals : collection of floats
The complete set of possible stimulus levels. Note that the
stimulus levels are not necessarily limited to intensities (as the
name of this parameter implies), but they could also be contrasts,
durations, weights, etc.
thresholdVals : float or collection of floats
The complete set of possible threshold values.
slopeVals : float or collection of floats
The complete set of possible slope values.
lowerAsymptoteVals : float or collection of floats
The complete set of possible values of the lower asymptote. This
corresponds to false-alarm rates in yes-no tasks, and to the
guessing rate in n-AFC tasks. Therefore, when performing an n-AFC
experiment, the collection should consists of a single value only
(e.g., `[0.5]` for 2-AFC, `[0.33]` for 3-AFC, `[0.25]` for 4-AFC,
etc.).
lapseRateVals : float or collection of floats
The complete set of possible lapse rate values. The lapse rate
defines the upper asymptote of the psychometric function, which
will be at `1 - lapse rate`.
responseVals : collection
The complete set of possible response outcomes. Currently, only
two outcomes are supported: the first element must correspond to
a successful response / stimulus detection, and the second one to
an unsuccessful or incorrect response. For example, in a yes-no
task, one would use `['Yes', 'No']`, and in an n-AFC task,
`['Correct', 'Incorrect']`; or, alternatively, the less verbose
`[1, 0]` in both cases.
prior : dict of floats
The prior probabilities to assign to the parameter values. The
dictionary keys correspond to the respective parameters:
``threshold``, ``slope``, ``lowerAsymptote``, ``lapseRate``.
startIntensity : float
The very first intensity (or stimulus level) to present.
psychometricFunc : {'weibull'}
The psychometric function to fit. Currently, only the Weibull
function is supported.
stimScale : {'log10', 'dB', 'linear'}
The scale on which the stimulus intensities (or stimulus levels)
are provided. Currently supported are the decadic logarithm,
`log10`; decibels, `dB`; and a linear scale, `linear`.
stimSelectionMethod : {'minEntropy', 'minNEntropy'}
How to select the next stimulus. `minEntropy` will select the
stimulus that will minimize the expected entropy. `minNEntropy`
will randomly pick pick a stimulus from the set of stimuli that
will produce the smallest, 2nd-smallest, ..., N-smallest entropy.
This can be used to ensure some variation in the stimulus selection
(and subsequent presentation) procedure. The number `N` will then
have to be specified via the `stimSelectionOption` parameter.
stimSelectionOptions : dict
This parameter further controls how to select the next stimulus in
case `stimSelectionMethod=minNEntropy`.
The dictionary supports two keys:
`N` and `maxConsecutiveReps`.
`N` defines the number of "best" stimuli (i.e., those which
produce the smallest `N` expected entropies) from which to randomly
select a stimulus for presentation in the next trial.
`maxConsecutiveReps` defines how many times the exact same stimulus
can be presented on consecutive trials.
For example, to randomly pick a stimulus from those which will
produce the 4 smallest expected entropies, and to allow the same
stimulus to be presented on two consecutive trials max, use
`stimSelectionOptions=dict(N=4, maxConsecutiveReps=2)`.
To achieve reproducible results, you may pass a seed to the
random number generator via the `randomSeed` key.
paramEstimationMethod : {'mean', 'mode'}
How to calculate the final parameter estimate. `mean` returns the
mean of each parameter, weighted by their respective posterior
probabilities. `mode` returns the parameters at the peak of
the posterior distribution.
extraInfo : dict
Additional information to store along the actual QUEST+ staircase
data.
name : str
The name of the QUEST+ staircase object. This will appear in the
PsychoPy logs.
label : str
Only used by :class:`MultiStairHandler`, and otherwise ignored.
kwargs : dict
Additional keyword arguments. These might be passed, for example,
through a :class:`MultiStairHandler`, and will be ignored. A
warning will be emitted whenever additional keyword arguments
have been passed.
Warns
-----
RuntimeWarning
If an unknown keyword argument was passed.
Notes
-----
The QUEST+ algorithm was first described by [1]_.
.. [1] Andrew B. Watson (2017). QUEST+: A general multidimensional
Bayesian adaptive psychometric method.
Journal of Vision, 17(3):10. doi: 10.1167/17.3.10.
"""
if sys.version_info.major == 3 and sys.version_info.minor >= 6:
import questplus as qp
else:
msg = 'QUEST+ implementation requires Python 3.6 or newer'
raise RuntimeError(msg)
msg = ('The QUEST+ staircase implementation is currently being '
'tested and may be subject to change.')
logging.critical(msg)
if kwargs:
msg = ('The following keyword arguments are unknown to '
'QuestPlusHandler and will be ignored: \n')
for k in kwargs.keys():
msg += '\n - %s' % k
msg += ('\n\nIf you are using QuestPlusHandler through a '
'MultiStairHandler, it may be safe to ignore this '
'warning.')
logging.warn(msg)
# Ensure we get a proper unit-testable warning too (not just a
# logfile entry)
msg = 'Unknown keyword argument(s) passed to QuestPlusHandler'
warnings.warn(msg, RuntimeWarning)
super().__init__(startVal=startIntensity, nTrials=nTrials,
stepType=stimScale, extraInfo=extraInfo, name=name)
# We don't use these attributes that were inherited from StairHandler.
self.currentDirection = None
self.stepSizeCurrent = None
# Note that self.stepType is not used either: we use self.stimScale
# instead (which is defined below).
self.intensityVals = intensityVals
self.thresholdVals = thresholdVals
self.slopeVals = slopeVals
self.lowerAsymptoteVals = lowerAsymptoteVals
self.lapseRateVals = lapseRateVals
self.responseVals = responseVals
self.psychometricFunc = psychometricFunc
self.stimScale = stimScale
self.stimSelectionMethod = stimSelectionMethod
self.stimSelectionOptions = stimSelectionOptions
self.paramEstimationMethod = paramEstimationMethod
self._prior = prior
# questplus uses different parameter names.
if self.stimSelectionMethod == 'minEntropy':
stimSelectionMethod_ = 'min_entropy'
elif self.stimSelectionMethod == 'minNEntropy':
stimSelectionMethod_ = 'min_n_entropy'
else:
raise ValueError('Unknown stimSelectionMethod requested.')
if self.stimSelectionOptions is not None:
valid = ('N', 'maxConsecutiveReps', 'randomSeed')
if any([o not in valid for o in self.stimSelectionOptions]):
msg = ('Unknown stimSelectionOptions requested. '
'Valid options are: %s' % ', '.join(valid))
raise ValueError(msg)
stimSelectionOptions_ = dict()
if 'N' in self.stimSelectionOptions:
stimSelectionOptions_['n'] = self.stimSelectionOptions['N']
if 'maxConsecutiveReps' in self.stimSelectionOptions:
stimSelectionOptions_['max_consecutive_reps'] = self.stimSelectionOptions['maxConsecutiveReps']
if 'randomSeed' in self.stimSelectionOptions:
stimSelectionOptions_['random_seed'] = self.stimSelectionOptions['randomSeed']
else:
stimSelectionOptions_ = self.stimSelectionOptions
if self._prior is not None:
valid = ('threshold', 'slope', 'lapseRate', 'lowerAsymptote')
if any([p not in valid for p in self._prior]):
msg = ('Invalid prior parameter(s) specified. '
'Valid parameter names are: %s' % ', '.join(valid))
raise ValueError(msg)
prior_ = dict()
if 'threshold' in self._prior:
prior_['threshold'] = self._prior['threshold']
if 'slope' in self._prior:
prior_['slope'] = self._prior['slope']
if 'lapseRate' in self._prior:
prior_['lapse_rate'] = self._prior['lapseRate']
if 'lowerAsymptote' in self._prior:
prior_['lower_asymptote'] = self._prior['lowerAsymptote']
else:
prior_ = self._prior
if self.psychometricFunc == 'weibull':
self._qp = qp.QuestPlusWeibull(
intensities=self.intensityVals,
thresholds=self.thresholdVals,
slopes=self.slopeVals,
lower_asymptotes=self.lowerAsymptoteVals,
lapse_rates=self.lapseRateVals,
prior=prior_,
responses=self.responseVals,
stim_scale=self.stimScale,
stim_selection_method=stimSelectionMethod_,
stim_selection_options=stimSelectionOptions_,
param_estimation_method=self.paramEstimationMethod)
else:
msg = ('Currently only the Weibull psychometric function is '
'supported.')
raise ValueError(msg)
# Ensure self._nextIntensity is set in case the `startIntensity` kwarg
# was supplied. We never actually use self._nextIntensity in the
# QuestPlusHandler; it's mere purpose here is to make the
# MultiStairHandler happy.
if self.startIntensity is not None:
self._nextIntensity = self.startIntensity
else:
self._nextIntensity = self._qp.next_intensity
@property
def startIntensity(self):
return self.startVal
def addResponse(self, response, intensity=None):
self.data.append(response)
# if needed replace the existing intensity with this custom one
if intensity is not None:
self.intensities.pop()
self.intensities.append(intensity)
# add the current data to experiment if possible
if self.getExp() is not None:
# update the experiment handler too
self.getExp().addData(self.name + ".response", response)
self._qp.update(intensity=self.intensities[-1],
response=response)
def __next__(self):
self._checkFinished()
if not self.finished:
# update pointer for next trial
self.thisTrialN += 1
if self.thisTrialN == 0 and self.startIntensity is not None:
self.intensities.append(self.startVal)
else:
self.intensities.append(self._qp.next_intensity)
# We never actually use self._nextIntensity in the
# QuestPlusHandler; it's mere purpose here is to make the
# MultiStairHandler happy.
self._nextIntensity = self.intensities[-1]
return self.intensities[-1]
else:
self._terminate()
next = __next__
def _checkFinished(self):
if self.nTrials is not None and len(self.intensities) >= self.nTrials:
self.finished = True
else:
self.finished = False
@property
def paramEstimate(self):
"""
The estimated parameters of the psychometric function.
Returns
-------
dict of floats
A dictionary whose keys correspond to the names of the estimated
parameters.
"""
qp_estimate = self._qp.param_estimate
estimate = dict(threshold=qp_estimate['threshold'],
slope=qp_estimate['slope'],
lowerAsymptote=qp_estimate['lower_asymptote'],
lapseRate=qp_estimate['lapse_rate'])
return estimate
@property
def prior(self):
"""
The marginal prior distributions.
Returns
-------
dict of np.ndarrays
A dictionary whose keys correspond to the names of the parameters.
"""
qp_prior = self._qp.prior
threshold = qp_prior.sum(dim=('slope', 'lower_asymptote', 'lapse_rate'))
slope = qp_prior.sum(dim=('threshold', 'lower_asymptote', 'lapse_rate'))
lowerAsymptote = qp_prior.sum(dim=('threshold', 'slope', 'lapse_rate'))
lapseRate = qp_prior.sum(dim=('threshold', 'slope', 'lower_asymptote'))
qp_prior = dict(threshold=threshold.values,
slope=slope.values,
lowerAsymptote=lowerAsymptote.values,
lapseRate=lapseRate.values)
return qp_prior
@property
def posterior(self):
"""
The marginal posterior distributions.
Returns
-------
dict of np.ndarrays
A dictionary whose keys correspond to the names of the estimated
parameters.
"""
qp_posterior = self._qp.posterior
threshold = qp_posterior.sum(dim=('slope', 'lower_asymptote', 'lapse_rate'))
slope = qp_posterior.sum(dim=('threshold', 'lower_asymptote', 'lapse_rate'))
lowerAsymptote = qp_posterior.sum(dim=('threshold', 'slope', 'lapse_rate'))
lapseRate = qp_posterior.sum(dim=('threshold', 'slope', 'lower_asymptote'))
posterior = dict(threshold=threshold.values,
slope=slope.values,
lowerAsymptote=lowerAsymptote.values,
lapseRate=lapseRate.values)
return posterior
def saveAsJson(self,
fileName=None,
encoding='utf-8-sig',
fileCollisionMethod='rename'):
self_copy = copy.deepcopy(self)
# Convert questplus.QuestPlus to JSON using questplus's built-in
# functionality. questplus uses xarray, which cannot be easily
# serialized directly using json_tricks (yet).
self_copy._qp_json = self_copy._qp.to_json()
del self_copy._qp
r = (super(QuestPlusHandler, self_copy)
.saveAsJson(fileName=fileName,
encoding=encoding,
fileCollisionMethod=fileCollisionMethod))
if fileName is None:
return r
class MultiStairHandler(_BaseTrialHandler):
def __init__(self, stairType='simple', method='random',
conditions=None, nTrials=50, randomSeed=None,
originPath=None, name='', autoLog=True):
"""A Handler to allow easy interleaved staircase procedures
(simple or QUEST).
Parameters for the staircases, as used by the relevant
:class:`StairHandler` or
:class:`QuestHandler` (e.g. the `startVal`, `minVal`, `maxVal`...)
should be specified in the `conditions` list and may vary between
each staircase. In particular, the conditions **must** include
a `startVal` (because this is a required argument to the above
handlers), a `label` to tag the staircase and a `startValSd`
(only for QUEST staircases). Any parameters not specified in the
conditions file will revert to the default for that individual
handler.
If you need to customize the behaviour further you may want to
look at the recipe on :ref:`interleavedStairs`.
:params:
stairType: 'simple', 'quest', or 'questplus'
Use a :class:`StairHandler`, a :class:`QuestHandler`, or a
:class:`QuestPlusHandler`.
method: 'random', 'fullRandom', or 'sequential'
If `random`, stairs are shuffled in each repeat but not
randomized more than that (so you can't have 3 repeats of the
same staircase in a row unless it's the only one still
running). If `fullRandom`, the staircase order is "fully"
randomized, meaning that, theoretically, a large number of
subsequent trials could invoke the same staircase repeatedly.
If `sequential`, don't perform any randomization.
conditions: a list of dictionaries specifying conditions
Can be used to control parameters for the different staircases.
Can be imported from an Excel file using
`psychopy.data.importConditions`
MUST include keys providing, 'startVal', 'label' and
'startValSd' (QUEST only).
The 'label' will be used in data file saving so should
be unique.
See Example Usage below.
nTrials=50
Minimum trials to run (but may take more if the staircase
hasn't also met its minimal reversals.
See :class:`~psychopy.data.StairHandler`
randomSeed : int or None
The seed with which to initialize the random number generator
(RNG). If `None` (default), do not initialize the RNG with
a specific value.
Example usage::
conditions=[
{'label':'low', 'startVal': 0.1, 'ori':45},
{'label':'high','startVal': 0.8, 'ori':45},
{'label':'low', 'startVal': 0.1, 'ori':90},
{'label':'high','startVal': 0.8, 'ori':90},
]
stairs = data.MultiStairHandler(conditions=conditions, nTrials=50)
for thisIntensity, thisCondition in stairs:
thisOri = thisCondition['ori']
# do something with thisIntensity and thisOri
stairs.addResponse(correctIncorrect) # this is ESSENTIAL
# save data as multiple formats
stairs.saveDataAsExcel(fileName) # easy to browse
stairs.saveAsPickle(fileName) # contains more info
Raises
------
ValueError
If an unknown randomization option was passed via the `method`
keyword argument.
"""
self.name = name
self.autoLog = autoLog
self.type = stairType
self.method = method
self.randomSeed = randomSeed
self._rng = np.random.RandomState(seed=randomSeed)
self.conditions = conditions
self.nTrials = nTrials
self.finished = False
self.totalTrials = 0
self._checkArguments()
# create staircases
self.staircases = [] # all staircases
self.runningStaircases = [] # staircases that haven't finished yet
self.thisPassRemaining = [] # staircases to run this pass
self._createStairs()
# fetch first staircase/value (without altering/advancing it)
self._startNewPass()
self.currentStaircase = self.thisPassRemaining[0] # take the first
# gets updated by self.addData()
self._nextIntensity = self.currentStaircase._nextIntensity
# store the origin file and its path
self.originPath, self.origin = self.getOriginPathAndFile(originPath)
self._exp = None # the experiment handler that owns me!
def _checkArguments(self):
# Did we get a `conditions` parameter, correctly formatted?
if not isinstance(self.conditions, Iterable):
raise TypeError(
'`conditions` parameter passed to MultiStairHandler '
'should be a list, not a %s.' % type(self.conditions))
c0 = self.conditions[0]
if not isinstance(c0, dict):
raise TypeError(
'`conditions` passed to MultiStairHandler should be a '
'list of python dictionaries, not a list of %ss.' %
type(c0))
# Did `conditions` contain the things we need?
params = list(c0.keys())
if self.type not in ['simple', 'quest', 'QUEST', 'questplus']:
raise ValueError(
'MultiStairHandler `stairType` should be \'simple\', '
'\'QUEST\' or \'quest\', not \'%s\'' % self.type)
if self.type != 'questplus' and 'startVal' not in params:
raise AttributeError('MultiStairHandler needs a parameter called '
'`startVal` in conditions')
if 'label' not in params:
raise AttributeError('MultiStairHandler needs a parameter called'
' `label` in conditions')
if self.type in ['QUEST', 'quest'] and 'startValSd' not in params:
raise AttributeError(
'MultiStairHandler needs a parameter called '
'`startValSd` in conditions for QUEST staircases.')
def _createStairs(self):
for condition in self.conditions:
# We create a copy, because we are going to remove items from
# this dictionary in this loop, but don't want these
# changes to alter the originals in self.conditions.
args = dict(condition)
# If no individual `nTrials` parameter was supplied for this
# staircase, use the `nTrials` that were passed to
# the MultiStairHandler on instantiation.
if 'nTrials' not in args:
args['nTrials'] = self.nTrials
if self.type == 'simple':
startVal = args.pop('startVal')
thisStair = StairHandler(startVal, **args)
elif self.type in ['QUEST', 'quest']:
startVal = args.pop('startVal')
startValSd = args.pop('startValSd')
thisStair = QuestHandler(startVal, startValSd, **args)
elif self.type == 'questplus':
thisStair = QuestPlusHandler(**args)
# This isn't normally part of handler.
thisStair.condition = condition
# And finally, add it to the list.
self.staircases.append(thisStair)
self.runningStaircases.append(thisStair)
def __iter__(self):
return self
def __next__(self):
"""Advances to next trial and returns it.
This can be handled with code such as::
staircase = data.MultiStairHandler(.......)
for eachTrial in staircase: # automatically stops when done
# do stuff here for the trial
or::
staircase = data.MultiStairHandler(.......)
while True: # ie forever
try:
thisTrial = staircase.next()
except StopIteration: # we got a StopIteration error
break # break out of the forever loop
# do stuff here for the trial
"""
# create a new set for this pass if needed
if (not hasattr(self, 'thisPassRemaining') or
not self.thisPassRemaining):
if self.runningStaircases:
self._startNewPass()
else:
self.finished = True
raise StopIteration
# fetch next staircase/value
self.currentStaircase = self.thisPassRemaining.pop(
0) # take the first and remove it
# if staircase.next() not called, staircaseHandler would not
# save the first intensity,
# Error: miss align intensities and responses
# gets updated by self.addResponse()
self._nextIntensity = next(self.currentStaircase)
# return value
if not self.finished:
# inform experiment of the condition (but not intensity,
# that might be overridden by user)
if self.getExp() != None:
exp = self.getExp()
stair = self.currentStaircase
for key, value in list(stair.condition.items()):
exp.addData("%s.%s" % (self.name, key), value)
exp.addData(self.name + '.thisIndex',
self.conditions.index(stair.condition))
exp.addData(self.name + '.thisRepN', stair.thisTrialN + 1)
exp.addData(self.name + '.thisN', self.totalTrials)
if self.type != 'questplus':
exp.addData(self.name + '.direction', stair.currentDirection)
exp.addData(self.name + '.stepSize', stair.stepSizeCurrent)
exp.addData(self.name + '.stepType', stair.stepType)
exp.addData(self.name + '.intensity', self._nextIntensity)
self._trialAborted = False # reset this flag
return self._nextIntensity, self.currentStaircase.condition
else:
raise StopIteration
next = __next__ # allows user to call without a loop `val = trials.next()`
def _startNewPass(self):
"""Create a new iteration of the running staircases for this pass.
This is not normally needed by the user - it gets called at __init__
and every time that next() runs out of trials for this pass.
"""
if self.method == 'sequential':
self.thisPassRemaining = copy.copy(self.runningStaircases)
elif self.method == 'random':
# np.random.shuffle() works in-place!
self.thisPassRemaining = copy.copy(self.runningStaircases)
self._rng.shuffle(self.thisPassRemaining)
elif self.method == 'fullRandom':
n = len(self.runningStaircases)
self.thisPassRemaining = self._rng.choice(self.runningStaircases,
size=n, replace=True)
# np.random.choice() returns an ndarray, so convert back to a list
# again.
self.thisPassRemaining = list(self.thisPassRemaining)
else:
raise ValueError('Unknown randomization method requested.')
@property
def intensity(self):
"""The intensity (level) of the current staircase"""
return self.currentStaircase._nextIntensity
@intensity.setter
def intensity(self, intensity):
"""The intensity (level) of the current staircase"""
self.currentStaircase._nextIntensity = intensity
def abortCurrentTrial(self, action='random'):
"""Abort the current trial (staircase).
Calling this during an experiment abort the current staircase used this
trial. The current staircase will be reshuffled into available
staircases depending on the `action` parameter.
Parameters
----------
action : str
Action to take with the aborted trial. Can be either of `'random'`,
or `'append'`. The default action is `'random'`.
Notes
-----
* When using `action='random'`, the RNG state for the trial handler is
not used.
"""
# check if value for parameter `action` is valid
if not isinstance(action, str): # type checks for params
raise TypeError(
"Parameter `action` specified incorrect type, must be `str`.")
# reinsert the current staircase into the list of running staircases
if action == 'append':
self.thisPassRemaining.append(self.currentStaircase)
elif action == 'random':
self.thisPassRemaining.append(self.currentStaircase)
# shuffle using the numpy RNG to preserve state
np.random.shuffle(self.thisPassRemaining)
else:
raise ValueError(
"Value for parameter `action` must be either 'random' or "
"'append'.")
# set flag to indicate that the trial was aborted
self._trialAborted = True
def addResponse(self, result, intensity=None):
"""Add a 1 or 0 to signify a correct / detected or
incorrect / missed trial
This is essential to advance the staircase to a new intensity level!
"""
self.currentStaircase.addResponse(result, intensity)
if self.currentStaircase.finished:
self.runningStaircases.remove(self.currentStaircase)
# add the current data to experiment if poss
if self.getExp() != None: # update the experiment handler too
self.getExp().addData(self.name + ".response", result)
self.totalTrials += 1
def addOtherData(self, name, value):
"""Add some data about the current trial that will not be used to
control the staircase(s) such as reaction time data
"""
self.currentStaircase.addOtherData(name, value)
def addData(self, result, intensity=None):
"""Deprecated 1.79.00: It was ambiguous whether you were adding
the response (0 or 1) or some other data concerning the trial so
there is now a pair of explicit methods:
* addResponse(corr,intensity) #some data that alters the next
trial value
* addOtherData('RT', reactionTime) #some other data that won't
control staircase
"""
self.addResponse(result, intensity)
if isinstance(result, str):
raise TypeError("MultiStairHandler.addData should only receive "
"corr / incorr. Use .addOtherData('datName',val)")
def saveAsPickle(self, fileName, fileCollisionMethod='rename'):
"""Saves a copy of self (with data) to a pickle file.
This can be reloaded later and further analyses carried out.
:Parameters:
fileCollisionMethod: Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
"""
if self.totalTrials < 1:
if self.autoLog:
logging.debug('StairHandler.saveAsPickle called but no '
'trials completed. Nothing saved')
return -1
# otherwise use default location
if not fileName.endswith('.psydat'):
fileName += '.psydat'
with openOutputFile(fileName=fileName, append=False,
fileCollisionMethod=fileCollisionMethod) as f:
pickle.dump(self, f)
if (fileName is not None) and (fileName != 'stdout'):
logging.info('saved data to %s' % f.name)
def saveAsExcel(self, fileName, matrixOnly=False, appendFile=False,
fileCollisionMethod='rename'):
"""Save a summary data file in Excel OpenXML format workbook
(:term:`xlsx`) for processing in most spreadsheet packages.
This format is compatible with versions of Excel (2007 or greater)
and with OpenOffice (>=3.0).
It has the advantage over the simpler text files (see
:func:`TrialHandler.saveAsText()` )
that the data from each staircase will be save in the same file, with
the sheet name coming from the 'label' given in the dictionary of
conditions during initialisation of the Handler.
The file extension `.xlsx` will be added if not given already.
The file will contain a set of values specifying the staircase level
('intensity') at each reversal, a list of reversal indices
(trial numbers), the raw staircase/intensity level on *every* trial
and the corresponding responses of the participant on every trial.
:Parameters:
fileName: string
the name of the file to create or append. Can include
relative or absolute path
matrixOnly: True or False
If set to True then only the data itself will be output
(no additional info)
appendFile: True or False
If False any existing file with this name will be overwritten.
If True then a new worksheet will be appended.
If a worksheet already exists with that name a number will
be added to make it unique.
fileCollisionMethod: string
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
This is ignored if ``append`` is ``True``.
"""
if self.totalTrials < 1:
if self.autoLog:
logging.debug('StairHandler.saveAsExcel called but no'
' trials completed. Nothing saved')
return -1
append = appendFile
for thisStair in self.staircases:
# make a filename
label = thisStair.condition['label']
thisStair.saveAsExcel(
fileName, sheetName=label, matrixOnly=matrixOnly,
appendFile=append, fileCollisionMethod=fileCollisionMethod)
append = True
def saveAsText(self, fileName,
delim=None,
matrixOnly=False,
fileCollisionMethod='rename',
encoding='utf-8-sig'):
"""Write out text files with the data.
For MultiStairHandler this will output one file for each staircase
that was run, with _label added to the fileName that you specify above
(label comes from the condition dictionary you specified when you
created the Handler).
:Parameters:
fileName: a string
The name of the file, including path if needed. The extension
`.tsv` will be added if not included.
delim: a string
the delimiter to be used (e.g. '\t' for tab-delimited,
',' for csv files)
matrixOnly: True/False
If True, prevents the output of the `extraInfo` provided
at initialisation.
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file.
Defaults to `utf-8-sig`.
"""
if self.totalTrials < 1:
if self.autoLog:
logging.debug('StairHandler.saveAsText called but no trials'
' completed. Nothing saved')
return -1
for thisStair in self.staircases:
# make a filename
label = thisStair.condition['label']
thisFileName = fileName + "_" + label
thisStair.saveAsText(
fileName=thisFileName, delim=delim, matrixOnly=matrixOnly,
fileCollisionMethod=fileCollisionMethod, encoding=encoding
)
def printAsText(self,
delim='\t',
matrixOnly=False):
"""Write the data to the standard output stream
:Parameters:
delim: a string
the delimitter to be used (e.g. '\t' for tab-delimitted,
',' for csv files)
matrixOnly: True/False
If True, prevents the output of the `extraInfo` provided
at initialisation.
"""
nStairs = len(self.staircases)
for stairN, thisStair in enumerate(self.staircases):
if stairN < nStairs - 1:
thisMatrixOnly = True # no header info for first files
else:
thisMatrixOnly = matrixOnly
# make a filename
label = thisStair.condition['label']
thisStair.saveAsText(fileName='stdout', delim=delim,
matrixOnly=thisMatrixOnly)
if __name__ == "__main__":
pass
| 88,596
|
Python
|
.py
| 1,865
| 35.196247
| 130
| 0.59962
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,358
|
utils.py
|
psychopy_psychopy/psychopy/data/utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import os
import re
import ast
import pickle
import time, datetime
import numpy as np
import pandas as pd
from collections import OrderedDict
from packaging.version import Version
from psychopy import logging, exceptions
from psychopy.tools.filetools import pathToString
from psychopy.localization import _translate
try:
import openpyxl
if Version(openpyxl.__version__) >= Version('2.4.0'):
# openpyxl moved get_column_letter to utils.cell
from openpyxl.utils.cell import get_column_letter
else:
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
haveOpenpyxl = True
except ImportError:
haveOpenpyxl = False
haveXlrd = False
_nonalphanumeric_re = re.compile(r'\W') # will match all bad var name chars
def checkValidFilePath(filepath, makeValid=True):
"""Checks whether file path location (e.g. is a valid folder)
This should also check whether we have write-permissions to the folder
but doesn't currently do that!
added in: 1.90.00
"""
folder = os.path.split(os.path.abspath(filepath))[0]
if not os.path.isdir(folder):
os.makedirs(folder) # spit an error if we fail
return True
def isValidVariableName(name):
"""Checks whether a certain string could be used as a valid variable.
Usage::
OK, msg, translated = isValidVariableName(name)
>>> isValidVariableName('name')
(True, '', '')
>>> isValidVariableName('0name')
(False, 'Variables cannot begin with numeric character', 'Variabiles non possunt incipere numerorum mores')
>>> isValidVariableName('first.second')
(False, 'Variables cannot contain punctuation or spaces', 'Variabiles non habet interpunctionem vel spatia')
>>> isValidVariableName('first second')
(False, 'Variables cannot contain punctuation or spaces', 'Variabiles non habet interpunctionem vel spatia')
>>> isValidVariableName('')
(False, "Variables cannot be missing, None, or ''", "Variabiles deesse non possunt, nemo, vel ''")
>>> isValidVariableName(None)
(False, "Variables cannot be missing, None, or ''", "Variabiles deesse non possunt, nemo, vel ''")
>>> isValidVariableName(23)
(False, "Variables must be string-like", "Variabiles debent esse linea-similis")
>>> isValidVariableName('a_b_c')
(True, '', '')
"""
if not name:
return (
False,
"Variables cannot be missing, None, or ''",
_translate("Variables cannot be missing, None, or ''")
)
if not isinstance(name, str):
return (
False,
"Variables must be string-like",
_translate("Variables must be string-like")
)
try:
name = str(name) # convert from unicode if possible
except Exception:
if type(name) in [str, np.unicode_]:
raise exceptions.ConditionsImportError(
"name %s (type %s) contains non-ASCII characters (e.g. accents)" % (name, type(name)),
translated=_translate("name %s (type %s) contains non-ASCII characters (e.g. accents)") % (name, type(name))
)
else:
raise exceptions.ConditionsImportError(
"name %s (type %s) could not be converted to a string",
translated=_translate("name %s (type %s) could not be converted to a string") % (name, type(name))
)
if name[0].isdigit():
return (
False,
"Variables cannot begin with numeric character",
_translate("Variables cannot begin with numeric character")
)
if _nonalphanumeric_re.search(name):
return (
False,
"Variables cannot contain punctuation or spaces",
_translate("Variables cannot contain punctuation or spaces")
)
return True, '', ''
def _getExcelCellName(col, row):
"""Returns the excel cell name for a row and column (zero-indexed)
>>> _getExcelCellName(0,0)
'A1'
>>> _getExcelCellName(2,1)
'C2'
"""
# BEWARE - openpyxl uses indexing at 1, to fit with Excel
return "%s%i" % (get_column_letter(col + 1), row + 1)
def importTrialTypes(fileName, returnFieldNames=False):
"""importTrialTypes is DEPRECATED (as of v1.70.00)
Please use `importConditions` for identical functionality.
"""
logging.warning("importTrialTypes is DEPRECATED (as of v1.70.00). "
"Please use `importConditions` for identical "
"functionality.")
return importConditions(fileName, returnFieldNames)
def sliceFromString(sliceString):
"""Convert a text string into a valid slice object
which can be used as indices for a list or array.
>>> sliceFromString("0:10")
slice(0,10,None)
>>> sliceFromString("0::3")
slice(0,None,3)
>>> sliceFromString("-8:")
slice(-8,None,None)
"""
sliceArgs = []
for val in sliceString.split(':'):
if len(val) == 0:
sliceArgs.append(None)
else:
sliceArgs.append(int(round(float(val))))
# nb int(round(float(x))) is needed for x='4.3'
return slice(*sliceArgs)
def indicesFromString(indsString):
"""Convert a text string into a valid list of indices
"""
# "6"
try:
inds = int(round(float(indsString)))
return [inds]
except Exception:
pass
# "-6::2"
try:
inds = sliceFromString(indsString)
return inds
except Exception:
pass
# "1,4,8"
try:
inds = list(eval(indsString))
return inds
except Exception:
pass
def listFromString(val, excludeEmpties=False):
"""Take a string that looks like a list (with commas and/or [] and make
an actual python list"""
# was previously called strToList and str2list might have been an option!
# I'll leave those here for anyone doing a find-in-path for those
if type(val) == tuple:
return list(val)
elif type(val) == list:
return list(val) # nothing to do
elif type(val) != str:
raise ValueError("listFromString requires a string as its input not {}"
.format(repr(val)))
# try to evaluate with ast (works for "'yes,'no'" or "['yes', 'no']")
try:
iterable = ast.literal_eval(val)
if type(iterable) == tuple:
iterable = list(iterable)
return iterable
except (ValueError, SyntaxError):
pass # e.g. "yes, no" won't work. We'll go on and try another way
val = val.strip() # in case there are spaces
if val.startswith(('[', '(')) and val.endswith((']', ')')):
val = val[1:-1]
asList = val.split(",")
if excludeEmpties:
asList = [this.strip() for this in asList if this]
else:
asList = [this.strip() for this in asList]
return asList
def importConditions(fileName, returnFieldNames=False, selection=""):
"""Imports a list of conditions from an .xlsx, .csv, or .pkl file
The output is suitable as an input to :class:`TrialHandler`
`trialList` or to :class:`MultiStairHandler` as a `conditions` list.
If `fileName` ends with:
- .csv: import as a comma-separated-value file
(header + row x col)
- .xlsx: import as Excel 2007 (xlsx) files.
No support for older (.xls) is planned.
- .pkl: import from a pickle file as list of lists
(header + row x col)
The file should contain one row per type of trial needed and one column
for each parameter that defines the trial type. The first row should give
parameter names, which should:
- be unique
- begin with a letter (upper or lower case)
- contain no spaces or other punctuation (underscores are permitted)
`selection` is used to select a subset of condition indices to be used
It can be a list/array of indices, a python `slice` object or a string to
be parsed as either option.
e.g.:
- "1,2,4" or [1,2,4] or (1,2,4) are the same
- "2:5" # 2, 3, 4 (doesn't include last whole value)
- "-10:2:" # tenth from last to the last in steps of 2
- slice(-10, 2, None) # the same as above
- random(5) * 8 # five random vals 0-7
"""
def _attemptImport(fileName):
"""Attempts to import file with specified settings and raises
ConditionsImportError if fails due to invalid format
:param filename: str
:return: trialList, fieldNames
"""
if fileName.endswith(('.csv', '.tsv')):
trialsArr = None
errs = []
# list of possible delimiters
delims = (",", ".", ";", "\t")
# try a variety of separator / decimal pairs
for sep, dec in [
# most common in US, EU
(',', '.'),
(';', ','),
# other possible formats
('\t', '.'),
('\t', ','),
(';', '.')
]:
# try to load
try:
thisAttempt = pd.read_csv(
fileName, encoding='utf-8-sig', sep=sep, decimal=dec
)
# if there's only one header, check that it doesn't contain delimiters
# (one column with delims probably means it's parsed without error but not
# recognised columns correctly)
if len(thisAttempt.columns) == 1:
for delim in delims:
if delim in thisAttempt.columns[0]:
msg = _translate(
"Could not load {}. \n"
"Delimiter in heading: {} in {}."
).format(fileName, delim, thisAttempt.columns[0])
err = exceptions.ConditionsImportError(msg)
errs.append(err)
raise err
# if it's all good, use received array
trialsArr = thisAttempt
except:
continue
else:
# if successful, check the variable names
_assertValidVarNames(trialsArr.columns, fileName)
# skip other pairs now we've got it
break
# if all options failed, raise last error
if errs and trialsArr is None:
raise errs[-1]
elif trialsArr is None:
raise ValueError(
_translate("Could not parse file {}.").format(fileName)
)
# if we made it herre, we successfully loaded the file
for col in trialsArr.columns:
for row, cell in enumerate(trialsArr[col]):
if isinstance(cell, str):
tryVal = cell.replace(",", ".")
try:
trialsArr[col][row] = float(tryVal)
except ValueError:
pass
logging.debug(u"Read csv file with pandas: {}".format(fileName))
elif fileName.endswith(('.xlsx', '.xlsm')):
trialsArr = pd.read_excel(fileName, engine='openpyxl')
logging.debug(u"Read Excel file with pandas: {}".format(fileName))
elif fileName.endswith('.xls'):
trialsArr = pd.read_excel(fileName, engine='xlrd')
logging.debug(u"Read Excel file with pandas: {}".format(fileName))
# then try to convert array to trialList and fieldnames
unnamed = trialsArr.columns.to_series().str.contains('^Unnamed: ')
trialsArr = trialsArr.loc[:, ~unnamed] # clear unnamed cols
logging.debug(u"Clearing unnamed columns from {}".format(fileName))
trialList, fieldNames = pandasToDictList(trialsArr)
return trialList, fieldNames
def _assertValidVarNames(fieldNames, fileName):
"""screens a list of names as candidate variable names. if all
names are OK, return silently; else raise with msg
"""
fileName = pathToString(fileName)
if not all(fieldNames):
raise exceptions.ConditionsImportError(
"Conditions file %s: Missing parameter name(s); empty cell(s) in the first row?" % fileName,
translated=_translate("Conditions file %s: Missing parameter name(s); empty cell(s) in the first row?") % fileName
)
for name in fieldNames:
OK, msg, translated = isValidVariableName(name)
if not OK:
# tailor message to importConditions
msg = msg.replace('Variables', 'Parameters (column headers)')
translated = msg.replace('Variables', 'Parameters (column headers)')
raise exceptions.ConditionsImportError(
'Bad name: %s%s"%s"' % (name, os.linesep, msg),
translated='Bad name: %s%s"%s"' % (name, os.linesep, translated)
)
if fileName in ['None', 'none', None]:
if returnFieldNames:
return [], []
return []
if not os.path.isfile(fileName):
raise exceptions.ConditionsImportError(
"Conditions file not found: %s" % fileName,
translated=_translate("Conditions file not found: %s") % fileName
)
def pandasToDictList(dataframe):
"""Convert a pandas dataframe to a list of dicts.
This helper function is used by csv or excel imports via pandas
"""
# convert the resulting dataframe to a numpy recarray
trialsArr = dataframe.to_records(index=False)
# Check for new line characters in strings, and replace escaped characters
for record in trialsArr:
for idx, element in enumerate(record):
if isinstance(element, str):
record[idx] = element.replace('\\n', '\n')
if trialsArr.shape == ():
# convert 0-D to 1-D with one element:
trialsArr = trialsArr[np.newaxis]
fieldNames = list(trialsArr.dtype.names)
_assertValidVarNames(fieldNames, fileName)
# convert the record array into a list of dicts
trialList = []
for trialN, trialType in enumerate(trialsArr):
thisTrial = OrderedDict()
for fieldN, fieldName in enumerate(fieldNames):
val = trialsArr[trialN][fieldN]
if isinstance(val, str):
if val.startswith('[') and val.endswith(']'):
# val = eval('%s' %unicode(val.decode('utf8')))
val = eval(val)
elif type(val) == np.string_:
val = str(val.decode('utf-8-sig'))
# if it looks like a list, convert it:
if val.startswith('[') and val.endswith(']'):
# val = eval('%s' %unicode(val.decode('utf8')))
val = eval(val)
elif np.isnan(val):
val = None
thisTrial[fieldName] = val
trialList.append(thisTrial)
return trialList, fieldNames
if (fileName.endswith(('.csv', '.tsv'))
or (fileName.endswith(('.xlsx', '.xls', '.xlsm')) and haveXlrd)):
trialList, fieldNames = _attemptImport(fileName=fileName)
elif fileName.endswith(('.xlsx','.xlsm')): # no xlsread so use openpyxl
if not haveOpenpyxl:
raise exceptions.ConditionsImportError(
"openpyxl or xlrd is required for loading excel files, but neither was found.",
_translate("openpyxl or xlrd is required for loading excel files, but neither was found.")
)
# data_only was added in 1.8
if Version(openpyxl.__version__) < Version('1.8'):
wb = load_workbook(filename=fileName)
else:
wb = load_workbook(filename=fileName, data_only=True)
ws = wb.worksheets[0]
logging.debug(u"Read excel file with openpyxl: {}".format(fileName))
try:
# in new openpyxl (2.3.4+) get_highest_xx is deprecated
nCols = ws.max_column
nRows = ws.max_row
except Exception:
# version openpyxl 1.5.8 (in Standalone 1.80) needs this
nCols = ws.get_highest_column()
nRows = ws.get_highest_row()
# get parameter names from the first row header
fieldNames = []
rangeCols = []
for colN in range(nCols):
if Version(openpyxl.__version__) < Version('2.0'):
fieldName = ws.cell(_getExcelCellName(col=colN, row=0)).value
else:
# From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1)
fieldName = ws.cell(row=1, column=colN + 1).value
if fieldName:
# If column is named, add its name to fieldNames
fieldNames.append(fieldName)
rangeCols.append(colN)
_assertValidVarNames(fieldNames, fileName)
# loop trialTypes
trialList = []
for rowN in range(1, nRows): # skip header first row
thisTrial = {}
for rangeColsIndex, colN in enumerate(rangeCols):
if Version(openpyxl.__version__) < Version('2.0'):
val = ws.cell(_getExcelCellName(col=colN, row=0)).value
else:
# From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1)
val = ws.cell(row=rowN + 1, column=colN + 1).value
# if it looks like a list or tuple, convert it
if (isinstance(val, str) and
(val.startswith('[') and val.endswith(']') or
val.startswith('(') and val.endswith(')'))):
val = eval(val)
# if it has any line breaks correct them
if isinstance(val, str):
val = val.replace('\\n', '\n')
# Convert from eu style decimals: replace , with . and try to make it a float
if isinstance(val, str):
tryVal = val.replace(",", ".")
try:
val = float(tryVal)
except ValueError:
pass
fieldName = fieldNames[rangeColsIndex]
thisTrial[fieldName] = val
trialList.append(thisTrial)
elif fileName.endswith('.pkl'):
f = open(fileName, 'rb')
# Converting newline characters.
# 'b' is necessary in Python3 because byte object is
# returned when file is opened in binary mode.
buffer = f.read().replace(b'\r\n',b'\n').replace(b'\r',b'\n')
try:
trialsArr = pickle.loads(buffer)
except Exception:
raise exceptions.ConditionsImportError(
'Could not open %s as conditions' % fileName,
translated=_translate('Could not open %s as conditions') % fileName
)
f.close()
trialList = []
# In Python3, strings returned by pickle() are unhashable so we have to
# convert them to str.
trialsArr = [[str(item) if isinstance(item, str) else item
for item in row] for row in trialsArr]
fieldNames = trialsArr[0] # header line first
_assertValidVarNames(fieldNames, fileName)
for row in trialsArr[1:]:
thisTrial = {}
for fieldN, fieldName in enumerate(fieldNames):
# type is correct, being .pkl
thisTrial[fieldName] = row[fieldN]
trialList.append(thisTrial)
else:
raise exceptions.ConditionsImportError(
'Your conditions file should be an xlsx, csv, dlm, tsv or pkl file',
translated=_translate('Your conditions file should be an xlsx, csv, dlm, tsv or pkl file')
)
# if we have a selection then try to parse it
if isinstance(selection, str) and len(selection) > 0:
selection = indicesFromString(selection)
if not isinstance(selection, slice):
for n in selection:
try:
assert n == int(n)
except AssertionError:
raise exceptions.ConditionsImportError(
"importConditions() was given some `indices` but could not parse them",
translated=_translate("importConditions() was given some `indices` but could not parse them")
)
# the selection might now be a slice or a series of indices
if isinstance(selection, slice):
trialList = trialList[selection]
elif len(selection) > 0:
allConds = trialList
trialList = []
print(selection)
print(len(allConds))
for ii in selection:
trialList.append(allConds[int(ii)])
logging.exp('Imported %s as conditions, %d conditions, %d params' %
(fileName, len(trialList), len(fieldNames)))
if returnFieldNames:
return (trialList, fieldNames)
else:
return trialList
def createFactorialTrialList(factors):
"""Create a trialList by entering a list of factors with names (keys)
and levels (values) it will return a trialList in which all factors
have been factorially combined (so for example if there are two factors
with 3 and 5 levels the trialList will be a list of 3*5 = 15, each
specifying the values for a given trial
Usage::
trialList = createFactorialTrialList(factors)
:Parameters:
factors : a dictionary with names (keys) and levels (values) of the
factors
Example::
factors={"text": ["red", "green", "blue"],
"letterColor": ["red", "green"],
"size": [0, 1]}
mytrials = createFactorialTrialList(factors)
"""
# the first step is to place all the factorial combinations in a list of
# lists
tempListOfLists = [[]]
for key in factors:
# this takes the levels of each factor as a set of values
# (a list) at a time
alist = factors[key]
tempList = []
for value in alist:
# now we loop over the values in a given list,
# and add each value of the other lists
for iterList in tempListOfLists:
tempList.append(iterList + [key, value])
tempListOfLists = tempList
# this second step is so we can return a list in the format of trialList
trialList = []
for atrial in tempListOfLists:
keys = atrial[0::2] # the even elements are keys
values = atrial[1::2] # the odd elements are values
atrialDict = {}
for i in range(len(keys)):
# this combines the key with the value
atrialDict[keys[i]] = values[i]
# append one trial at a time to the final trialList
trialList.append(atrialDict)
return trialList
def bootStraps(dat, n=1):
"""Create a list of n bootstrapped resamples of the data
SLOW IMPLEMENTATION (Python for-loop)
Usage:
``out = bootStraps(dat, n=1)``
Where:
dat
an NxM or 1xN array (each row is a different condition, each
column is a different trial)
n
number of bootstrapped resamples to create
out
- dim[0]=conditions
- dim[1]=trials
- dim[2]=resamples
"""
dat = np.asarray(dat)
if len(dat.shape) == 1:
# have presumably been given a series of data for one stimulus
# adds a dimension (arraynow has shape (1,Ntrials))
dat = np.array([dat])
nTrials = dat.shape[1]
# initialise a matrix to store output
resamples = np.zeros(dat.shape + (n,), dat.dtype)
rand = np.random.rand
for stimulusN in range(dat.shape[0]):
thisStim = dat[stimulusN, :] # fetch data for this stimulus
for sampleN in range(n):
indices = np.floor(nTrials * rand(nTrials)).astype('i')
resamples[stimulusN, :, sampleN] = np.take(thisStim, indices)
return resamples
def functionFromStaircase(intensities, responses, bins=10):
"""Create a psychometric function by binning data from a staircase
procedure. Although the default is 10 bins Jon now always uses 'unique'
bins (fewer bins looks pretty but leads to errors in slope estimation)
usage::
intensity, meanCorrect, n = functionFromStaircase(intensities,
responses, bins)
where:
intensities
are a list (or array) of intensities to be binned
responses
are a list of 0,1 each corresponding to the equivalent
intensity value
bins
can be an integer (giving that number of bins) or 'unique'
(each bin is made from aa data for exactly one intensity
value)
intensity
a numpy array of intensity values (where each is the center
of an intensity bin)
meanCorrect
a numpy array of mean % correct in each bin
n
a numpy array of number of responses contributing to each mean
"""
# convert to arrays
try:
# concatenate if multidimensional
intensities = np.concatenate(intensities)
responses = np.concatenate(responses)
except Exception:
intensities = np.array(intensities)
responses = np.array(responses)
# sort the responses
sort_ii = np.argsort(intensities)
sortedInten = np.take(intensities, sort_ii)
sortedResp = np.take(responses, sort_ii)
binnedResp = []
binnedInten = []
nPoints = []
if bins == 'unique':
intensities = np.round(intensities, decimals=8)
uniqueIntens = np.unique(intensities)
for thisInten in uniqueIntens:
theseResps = responses[intensities == thisInten]
binnedInten.append(thisInten)
binnedResp.append(np.mean(theseResps))
nPoints.append(len(theseResps))
else:
pointsPerBin = len(intensities)/bins
for binN in range(bins):
start = int(round(binN * pointsPerBin))
stop = int(round((binN + 1) * pointsPerBin))
thisResp = sortedResp[start:stop]
thisInten = sortedInten[start:stop]
binnedResp.append(np.mean(thisResp))
binnedInten.append(np.mean(thisInten))
nPoints.append(len(thisInten))
return binnedInten, binnedResp, nPoints
def getDateStr(format="%Y-%m-%d_%Hh%M.%S.%f", fractionalSecondDigits=3):
"""Uses ``datetime.now().strftime(format)``_ to generate a string
based on ISO 8601 but made safe for filename use::
"2022-01-14_18h35.05.386"
represents 14th Jan 2022 at 6:35pm with 5 sec and 386 ms
This is often useful appended to data filenames to provide unique names.
Parameters
----------
format : str
See the documentation for `datetime.datetime.strftime` for more
information on format syntax:
https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
default="%Y-%m-%d_%Hh%M.%S.%f"
fractionalSecondDigits : int
An integer value 1-6 indicating the number of digits of fractional
seconds to include if the `%f` parameter is included in the format.
This would normally give 6 digits (microseconds) but to get just
milliseconds you can set fractionalSecondDigits=3
"""
now = datetime.datetime.now().astimezone()
microsecs = now.strftime("%f")
nowStr = now.strftime(format)
if "%f" in format and (
fractionalSecondDigits < 1
or int(fractionalSecondDigits) != fractionalSecondDigits
):
raise TypeError("fractionalSecondDigits argument to getDateStr should "
f"be an integer greater than 1, not {fractionalSecondDigits}")
elif "%f" in format and fractionalSecondDigits > len(microsecs):
logging.warning("fractionalSecondDigits argument to getDateStr requested "
f"{fractionalSecondDigits} digits but only {len(microsecs)} "
f"are available. Truncating to {len(microsecs)}.")
elif "%f" in format:
nowStr = nowStr.replace(
microsecs, microsecs[:int(fractionalSecondDigits)],
)
return nowStr
def parsePipeSyntax(key, stripKey=True):
"""
Parse "pipe syntax" within an expInfo key / all keys in an expInfo dict. Pipe syntax is as follows:
|req = Required input
|cfg = Configuration parameter, hidden behind "read more" tag
|fix = Fixed parameter, meaning its value can't be changed
|hid = Hidden parameter, meaning it's not down by DlgFromDict
An unescaped * in the key is considered shorthand for |req.
Parameters
----------
key : str
A key to parse.
stripKey : bool
If True, trailing spaces will be removed from processed keys. Trailing spaces are removed from flags regardless.
Returns
-------
str
`value` with pipe syntax removed
list
List of flags found
"""
# add |req if an unescaped * is present
key = re.sub(r"(?<!\\)\*", "|req", key)
# get flags
key, *flags = key.split("|")
# remove duplicates
flags = list(set(flags))
# strip key if requested
if stripKey:
key = key.strip()
# strip each flag
flags = [flag.strip() for flag in flags]
return key, flags
def parsePipeSyntaxDict(expInfo, stripKey=True):
"""
Calls `parsePipeSyntax` on each key in an expInfo dict and returns two new dicts: One with values against sanitized
keys, the other with flags against processed keys.
Parameters
----------
expInfo : dict
Dict whose flags to process
stripKey : bool
If True, trailing spaces will be removed from keys. Trailing spaces are removed from flags regardless.
Returns
-------
dict
The values from `expInfo` with processed keys, i.e. no pipe syntax
dict
The flags extraced from processing pipe syntax with processed keys, i.e. no pipe syntax
"""
valuesDict = {}
flagsDict = {}
for key in expInfo:
# parse key for syntax
newKey, flags = parsePipeSyntax(key)
# store original value under parsed key
valuesDict[newKey] = expInfo[key]
# store parsed flags under parsed key
flagsDict[newKey] = flags
return valuesDict, flagsDict
| 31,186
|
Python
|
.py
| 710
| 33.612676
| 130
| 0.600171
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,359
|
fit.py
|
psychopy_psychopy/psychopy/data/fit.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# from scipy import optimize # DON'T. It's slow and crashes on some machines
class _baseFunctionFit():
"""Not needed by most users except as a superclass for developing
your own functions
Derived classes must have _eval and _inverse methods with @staticmethods
"""
def __init__(self, xx, yy, sems=1.0, guess=None, display=1,
expectedMin=0.5, optimize_kws=None):
super(_baseFunctionFit, self).__init__()
self.xx = np.array(xx)
self.yy = np.array(yy)
self.sems = np.array(sems)
if not hasattr(sems, "__len__"):
# annoyingly in numpy 1.13 len(numpy.array(1)) gives an error
self.sems.shape = (1,) # otherwise we can't get len (in numpy 1.13)
self.expectedMin = expectedMin
self.guess = guess
self.optimize_kws = {}
if optimize_kws is not None:
self.optimize_kws = optimize_kws
# for holding error calculations:
self.ssq = 0
self.rms = 0
self.chi = 0
# do the calculations:
self._doFit()
def _doFit(self):
"""The Fit class that derives this needs to specify its _evalFunction
"""
# get some useful variables to help choose starting fit vals
# self.params = optimize.fmin_powell(self._getErr, self.params,
# (self.xx,self.yy,self.sems),disp=self.display)
# self.params = optimize.fmin_bfgs(self._getErr, self.params, None,
# (self.xx,self.yy,self.sems),disp=self.display)
from scipy import optimize
# don't import optimize at top of script. Slow and not always present!
global _chance
_chance = self.expectedMin
if len(self.sems) == 1:
sems = None
else:
sems = self.sems
self.params, self.covar = optimize.curve_fit(
self._eval, self.xx, self.yy, p0=self.guess, sigma=sems,
**self.optimize_kws)
self.ssq = self._getErr(self.params, self.xx, self.yy, 1.0)
self.chi = self._getErr(self.params, self.xx, self.yy, self.sems)
self.rms = self.ssq/len(self.xx)
def _getErr(self, params, xx, yy, sems):
mod = self.eval(xx, params)
err = sum((yy - mod)**2 / sems)
return err
def eval(self, xx, params=None):
"""Evaluate xx for the current parameters of the model, or for
arbitrary params if these are given.
"""
if params is None:
params = self.params
global _chance
_chance = self.expectedMin
#_eval is a static method - must be done this way because the
# curve_fit function doesn't want to have any `self` object as
# first arg
yy = self._eval(xx, *params)
return yy
def inverse(self, yy, params=None):
"""Evaluate yy for the current parameters of the model,
or for arbitrary params if these are given.
"""
if params is None:
# so the user can set params for this particular inv
params = self.params
xx = self._inverse(yy, *params)
return xx
class FitWeibull(_baseFunctionFit):
"""Fit a Weibull function (either 2AFC or YN)
of the form::
y = chance + (1.0-chance)*(1-exp( -(xx/alpha)**(beta) ))
and with inverse::
x = alpha * (-log((1.0-y)/(1-chance)))**(1.0/beta)
After fitting the function you can evaluate an array of x-values
with ``fit.eval(x)``, retrieve the inverse of the function with
``fit.inverse(y)`` or retrieve the parameters from ``fit.params``
(a list with ``[alpha, beta]``)
"""
# static methods have no `self` and this is important for
# optimise.curve_fit
@staticmethod
def _eval(xx, alpha, beta):
global _chance
xx = np.asarray(xx)
yy = _chance + (1.0 - _chance) * (1 -
np.exp(-(xx/alpha)**beta))
return yy
@staticmethod
def _inverse(yy, alpha, beta):
global _chance
xx = alpha * (-np.log((1.0 - yy)/(1 - _chance))) ** (1.0/beta)
return xx
class FitNakaRushton(_baseFunctionFit):
"""Fit a Naka-Rushton function
of the form::
yy = rMin + (rMax-rMin) * xx**n/(xx**n+c50**n)
After fitting the function you can evaluate an array of x-values
with ``fit.eval(x)``, retrieve the inverse of the function with
``fit.inverse(y)`` or retrieve the parameters from ``fit.params``
(a list with ``[rMin, rMax, c50, n]``)
Note that this differs from most of the other functions in
not using a value for the expected minimum. Rather, it fits this
as one of the parameters of the model."""
# static methods have no `self` and this is important for
# optimise.curve_fit
@staticmethod
def _eval(xx, c50, n, rMin, rMax):
xx = np.asarray(xx)
if c50 <= 0:
c50 = 0.001
if n <= 0:
n = 0.001
if rMax <= 0:
n = 0.001
if rMin <= 0:
n = 0.001
yy = rMin + (rMax - rMin) * (xx**n / (xx**n + c50**n))
return yy
@staticmethod
def _inverse(yy, c50, n, rMin, rMax):
yScaled = (yy - rMin) / (rMax - rMin) # remove baseline and scale
# do we need to shift while fitting?
yScaled[yScaled < 0] = 0
xx = (yScaled * c50**n / (1 - yScaled))**(1 / n)
return xx
class FitLogistic(_baseFunctionFit):
"""Fit a Logistic function (either 2AFC or YN)
of the form::
y = chance + (1-chance)/(1+exp((PSE-xx)*JND))
and with inverse::
x = PSE - log((1-chance)/(yy-chance) - 1)/JND
After fitting the function you can evaluate an array of x-values
with ``fit.eval(x)``, retrieve the inverse of the function with
``fit.inverse(y)`` or retrieve the parameters from ``fit.params``
(a list with ``[PSE, JND]``)
"""
# static methods have no `self` and this is important for
# optimise.curve_fit
@staticmethod
def _eval(xx, PSE, JND):
global _chance
chance = _chance
xx = np.asarray(xx)
yy = chance + (1 - chance) / (1 + np.exp((PSE - xx) * JND))
return yy
@staticmethod
def _inverse(yy, PSE, JND):
global _chance
yy = np.asarray(yy)
xx = PSE - np.log((1 - _chance) / (yy - _chance) - 1) / JND
return xx
class FitCumNormal(_baseFunctionFit):
"""Fit a Cumulative Normal function (aka error function or erf)
of the form::
y = chance + (1-chance)*((special.erf((xx-xShift)/(sqrt(2)*sd))+1)*0.5)
and with inverse::
x = xShift+sqrt(2)*sd*(erfinv(((yy-chance)/(1-chance)-.5)*2))
After fitting the function you can evaluate an array of x-values
with fit.eval(x), retrieve the inverse of the function with
fit.inverse(y) or retrieve the parameters from fit.params (a list
with [centre, sd] for the Gaussian distribution forming the cumulative)
NB: Prior to version 1.74 the parameters had different meaning, relating
to xShift and slope of the function (similar to 1/sd). Although that is
more in with the parameters for the Weibull fit, for instance, it is less
in keeping with standard expectations of normal (Gaussian distributions)
so in version 1.74.00 the parameters became the [centre,sd] of the normal
distribution.
"""
# static methods have no `self` and this is important for
# optimise.curve_fit
@staticmethod
def _eval(xx, xShift, sd):
from scipy import special
global _chance
xx = np.asarray(xx)
# NB np.special.erf() goes from -1:1
yy = (_chance + (1 - _chance) *
((special.erf((xx - xShift) / (np.sqrt(2) * sd)) + 1) * 0.5))
return yy
@staticmethod
def _inverse(yy, xShift, sd):
from scipy import special
global _chance
yy = np.asarray(yy)
# xx = (special.erfinv((yy-chance)/(1-chance)*2.0-1)+xShift)/xScale
# NB: np.special.erfinv() goes from -1:1
xx = (xShift + np.sqrt(2) * sd *
special.erfinv(((yy - _chance) / (1 - _chance) - 0.5) * 2))
return xx
class FitFunction():
"""Deprecated: - use the specific functions; FitWeibull, FitLogistic...
"""
def __init__(self, *args, **kwargs):
raise DeprecationWarning("FitFunction is now fully DEPRECATED: use"
" FitLogistic, FitWeibull etc instead")
| 8,562
|
Python
|
.py
| 205
| 33.741463
| 80
| 0.600914
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,360
|
__init__.py
|
psychopy_psychopy/psychopy/data/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from packaging.version import Version
from .base import DataHandler
from .routine import Routine
from .experiment import ExperimentHandler
from .trial import TrialHandler, TrialHandler2, TrialHandlerExt, TrialType
from .staircase import (StairHandler, QuestHandler, PsiHandler,
MultiStairHandler)
from .counterbalance import Counterbalancer
from . import shelf
if sys.version_info.major == 3 and sys.version_info.minor >= 6:
from .staircase import QuestPlusHandler
from .utils import (checkValidFilePath, isValidVariableName, importTrialTypes,
sliceFromString, indicesFromString, importConditions,
createFactorialTrialList, bootStraps, functionFromStaircase,
getDateStr)
from .fit import (FitFunction, FitCumNormal, FitLogistic, FitNakaRushton,
FitWeibull)
try:
# import openpyxl
import openpyxl
if Version(openpyxl.__version__) >= Version('2.4.0'):
# openpyxl moved get_column_letter to utils.cell
from openpyxl.utils.cell import get_column_letter
else:
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
haveOpenpyxl = True
except ImportError:
haveOpenpyxl = False
try:
import xlrd
haveXlrd = True
except ImportError:
haveXlrd = False
| 1,413
|
Python
|
.py
| 37
| 32.378378
| 80
| 0.73538
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,361
|
experiment.py
|
psychopy_psychopy/psychopy/data/experiment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
import copy
import pickle
import atexit
import pandas as pd
from psychopy import constants, clock
from psychopy import logging
from psychopy.data.trial import TrialHandler2
from psychopy.tools.filetools import (openOutputFile, genDelimiter,
genFilenameFromDelimiter, handleFileCollision)
from psychopy.localization import _translate
from .utils import checkValidFilePath
from .base import _ComparisonMixin
class ExperimentHandler(_ComparisonMixin):
"""A container class for keeping track of multiple loops/handlers
Useful for generating a single data file from an experiment with many
different loops (e.g. interleaved staircases or loops within loops
:usage:
exp = data.ExperimentHandler(name="Face Preference",version='0.1.0')
"""
def __init__(self,
name='',
version='',
extraInfo=None,
runtimeInfo=None,
originPath=None,
savePickle=True,
saveWideText=True,
sortColumns=False,
dataFileName='',
autoLog=True,
appendFiles=False):
"""
:parameters:
name : a string or unicode
As a useful identifier later
version : usually a string (e.g. '1.1.0')
To keep track of which version of the experiment was run
extraInfo : a dictionary
Containing useful information about this run
(e.g. {'participant':'jwp','gender':'m','orientation':90} )
runtimeInfo : :class:`psychopy.info.RunTimeInfo`
Containing information about the system as detected at
runtime
originPath : string or unicode
The path and filename of the originating script/experiment
If not provided this will be determined as the path of the
calling script.
dataFileName : string
This is defined in advance and the file will be saved at any
point that the handler is removed or discarded (unless
.abort() had been called in advance).
The handler will attempt to populate the file even in the
event of a (not too serious) crash!
savePickle : True (default) or False
saveWideText : True (default) or False
sortColumns : str or bool
How (if at all) to sort columns in the data file, if none is given to saveAsWideText. Can be:
- "alphabetical", "alpha", "a" or True: Sort alphabetically by header name
- "priority", "pr" or "p": Sort according to priority
- other: Do not sort, columns remain in order they were added
autoLog : True (default) or False
"""
self.loops = []
self.loopsUnfinished = []
self.name = name
self.version = version
self.runtimeInfo = runtimeInfo
if extraInfo is None:
self.extraInfo = {}
else:
self.extraInfo = extraInfo
self.originPath = originPath
self.savePickle = savePickle
self.saveWideText = saveWideText
self.dataFileName = handleFileCollision(dataFileName, "rename")
self.sortColumns = sortColumns
self.thisEntry = {}
self.entries = [] # chronological list of entries
self._paramNamesSoFar = []
self.dataNames = ['thisRow.t', 'notes'] # names of all the data (eg. resp.keys)
self.columnPriority = {
'thisRow.t': constants.priority.CRITICAL - 1,
'notes': constants.priority.MEDIUM - 1,
}
self.autoLog = autoLog
self.appendFiles = appendFiles
self.status = constants.NOT_STARTED
# dict of filenames to collision method to be used next time it's saved
self._nextSaveCollision = {}
# list of call profiles for connected save methods
self.connectedSaveMethods = []
if dataFileName in ['', None]:
logging.warning('ExperimentHandler created with no dataFileName'
' parameter. No data will be saved in the event '
'of a crash')
else:
# fail now if we fail at all!
checkValidFilePath(dataFileName, makeValid=True)
atexit.register(self.close)
def __del__(self):
self.close()
@property
def currentLoop(self):
"""
Return the loop which we are currently in, this will either be a handle to a loop, such as
a :class:`~psychopy.data.TrialHandler` or :class:`~psychopy.data.StairHandler`, or the handle
of the :class:`~psychopy.data.ExperimentHandler` itself if we are not in a loop.
"""
# If there are unfinished (aka currently active) loops, return the most recent
if len(self.loopsUnfinished):
return self.loopsUnfinished[-1]
# If we are not in a loop, return handle to experiment handler
return self
def addLoop(self, loopHandler):
"""Add a loop such as a :class:`~psychopy.data.TrialHandler`
or :class:`~psychopy.data.StairHandler`
Data from this loop will be included in the resulting data files.
"""
self.loops.append(loopHandler)
self.loopsUnfinished.append(loopHandler)
# keep the loop updated that is now owned
loopHandler.setExp(self)
def loopEnded(self, loopHandler):
"""Informs the experiment handler that the loop is finished and not to
include its values in further entries of the experiment.
This method is called by the loop itself if it ends its iterations,
so is not typically needed by the user.
"""
if loopHandler in self.loopsUnfinished:
self.loopsUnfinished.remove(loopHandler)
def _getAllParamNames(self):
"""Returns the attribute names of loop parameters (trialN etc)
that the current set of loops contain, ready to build a wide-format
data file.
"""
names = copy.deepcopy(self._paramNamesSoFar)
# get names (or identifiers) for all contained loops
for thisLoop in self.loops:
theseNames, vals = self._getLoopInfo(thisLoop)
for name in theseNames:
if name not in names:
names.append(name)
return names
def _getExtraInfo(self):
"""Get the names and vals from the extraInfo dict (if it exists)
"""
if type(self.extraInfo) != dict:
names = []
vals = []
else:
names = list(self.extraInfo)
vals = list(self.extraInfo.values())
return names, vals
def _getLoopInfo(self, loop):
"""Returns the attribute names and values for the current trial
of a particular loop. Does not return data inputs from the subject,
only info relating to the trial execution.
"""
names = []
vals = []
name = loop.name
# standard attributes
for attr in ('thisRepN', 'thisTrialN', 'thisN', 'thisIndex',
'stepSizeCurrent'):
if hasattr(loop, attr):
attrName = name + '.' + attr.replace('Current', '')
# append the attribute name and the current value
names.append(attrName)
vals.append(getattr(loop, attr))
# method of constants
if hasattr(loop, 'thisTrial'):
trial = loop.thisTrial
if hasattr(trial, 'items'):
# is a TrialList object or a simple dict
for attr, val in list(trial.items()):
if attr not in self._paramNamesSoFar:
self._paramNamesSoFar.append(attr)
names.append(attr)
vals.append(val)
# single StairHandler
elif hasattr(loop, 'intensities'):
names.append(name + '.intensity')
if len(loop.intensities) > 0:
vals.append(loop.intensities[-1])
else:
vals.append(None)
return names, vals
def addData(self, name, value, row=None, priority=None):
"""
Add the data with a given name to the current experiment.
Typically the user does not need to use this function; if you added
your data to the loop and had already added the loop to the
experiment then the loop will automatically inform the experiment
that it has received data.
Multiple data name/value pairs can be added to any given entry of
the data file and is considered part of the same entry until the
nextEntry() call is made.
e.g.::
# add some data for this trial
exp.addData('resp.rt', 0.8)
exp.addData('resp.key', 'k')
# end of trial - move to next line in data output
exp.nextEntry()
Parameters
----------
name : str
Name of the column to add data as.
value : any
Value to add
row : int or None
Row in which to add this data. Leave as None to add to the current entry.
priority : int
Priority value to set the column to - higher priority columns appear nearer to the start of
the data file. Use values from `constants.priority` as landmark values:
- CRITICAL: Always at the start of the data file, generally reserved for Routine start times
- HIGH: Important columns which are near the front of the data file
- MEDIUM: Possibly important columns which are around the middle of the data file
- LOW: Columns unlikely to be important which are at the end of the data file
- EXCLUDE: Always at the end of the data file, actively marked as unimportant
"""
if name not in self.dataNames:
self.dataNames.append(name)
# could just copy() every value, but not always needed, so check:
try:
hash(value)
except TypeError:
# unhashable type (list, dict, ...) == mutable, so need a copy()
value = copy.deepcopy(value)
# if value is a Timestamp, resolve to a simple value
if isinstance(value, clock.Timestamp):
value = value.resolve()
# get entry from row number
entry = self.thisEntry
if row is not None:
entry = self.entries[row]
entry[name] = value
# set priority if given
if priority is not None:
self.setPriority(name, priority)
def getPriority(self, name):
"""
Get the priority value for a given column. If no priority value is
stored, returns best guess based on column name.
Parameters
----------
name : str
Column name
Returns
-------
int
The priority value stored/guessed for this column, most likely a value from `constants.priority`, one of:
- CRITICAL (30): Always at the start of the data file, generally reserved for Routine start times
- HIGH (20): Important columns which are near the front of the data file
- MEDIUM (10): Possibly important columns which are around the middle of the data file
- LOW (0): Columns unlikely to be important which are at the end of the data file
- EXCLUDE (-10): Always at the end of the data file, actively marked as unimportant
"""
if name not in self.columnPriority:
# store priority if not specified already
self.columnPriority[name] = self._guessPriority(name)
# return stored priority
return self.columnPriority[name]
def _guessPriority(self, name):
"""
Get a best guess at the priority of a column based on its name
Parameters
----------
name : str
Name of the column
Returns
-------
int
One of the following:
- HIGH (19): Important columns which are near the front of the data file
- MEDIUM (9): Possibly important columns which are around the middle of the data file
- LOW (-1): Columns unlikely to be important which are at the end of the data file
NOTE: Values returned from this function are 1 less than values in `constants.priority`,
columns whose priority was guessed are behind equivalently prioritised columns whose priority
was specified.
"""
# if there's a dot, get attribute name
if "." in name:
name = name.split(".")[-1]
# start off assuming low priority
priority = constants.priority.LOW
# if name is one of identified likely high priority columns, it's medium priority
if name in [
"keys", "rt", "x", "y", "leftButton", "numClicks", "numLooks", "clip", "response", "value",
"frameRate", "participant"
]:
priority = constants.priority.MEDIUM
return priority - 1
def setPriority(self, name, value=constants.priority.HIGH):
"""
Set the priority of a column in the data file.
Parameters
----------
name : str
Name of the column, e.g. `text.started`
value : int
Priority value to set the column to - higher priority columns appear nearer to the start of
the data file. Use values from `constants.priority` as landmark values:
- CRITICAL (30): Always at the start of the data file, generally reserved for Routine start times
- HIGH (20): Important columns which are near the front of the data file
- MEDIUM (10): Possibly important columns which are around the middle of the data file
- LOW (0): Columns unlikely to be important which are at the end of the data file
- EXCLUDE (-10): Always at the end of the data file, actively marked as unimportant
"""
self.columnPriority[name] = value
def addAnnotation(self, value):
"""
Add an annotation at the current point in the experiment
Parameters
----------
value : str
Value of the annotation
"""
self.addData("notes", value)
def timestampOnFlip(self, win, name, format=float):
"""Add a timestamp (in the future) to the current row
Parameters
----------
win : psychopy.visual.Window
The window object that we'll base the timestamp flip on
name : str
The name of the column in the datafile being written,
such as 'myStim.stopped'
format : str, class or None
Format in which to return time, see clock.Timestamp.resolve() for more info. Defaults to `float`.
"""
# make sure the name is used when writing the datafile
if name not in self.dataNames:
self.dataNames.append(name)
# tell win to record timestamp on flip
win.timeOnFlip(self.thisEntry, name, format=format)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
"""
Status of this experiment, from psychopy.constants.
Parameters
----------
value : int
One of the values from psychopy.constants.
"""
# log change
valStr = {
constants.NOT_STARTED: "NOT_STARTED",
constants.STARTED: "STARTED",
constants.PAUSED: "PAUSED",
constants.RECORDING: "RECORDING",
constants.STOPPED: "STOPPED",
constants.SEEKING: "SEEKING",
constants.STOPPING: "STOPPING",
constants.INVALID: "INVALID"
}[value]
logging.exp(f"{self.name}: status = {valStr}", obj=self)
# make change
self._status = value
def pause(self):
"""
Set status to be PAUSED.
"""
# warn if experiment is already paused
if self.status == constants.PAUSED:
logging.warn(_translate(
"Attempted to pause experiment '{}', but it is already paused. "
"Status will remain unchanged.".format(self.name)
))
# set own status
self.status = constants.PAUSED
def resume(self):
"""
Set status to be STARTED.
"""
# warn if experiment is already running
if self.status == constants.STARTED:
logging.warn(_translate(
"Attempted to resume experiment '{}', but it is not paused. "
"Status will remain unchanged.".format(self.name)
))
# set own status
self.status = constants.STARTED
def stop(self):
"""
Set status to be FINISHED.
"""
# warn if experiment is already paused
if self.status == constants.FINISHED:
logging.warn(_translate(
"Attempted to stop experiment '{}', but it is already stopping. "
"Status will remain unchanged.".format(self.name)
))
# set own status
self.status = constants.STOPPED
def skipTrials(self, n=1):
"""
Skip ahead n trials - the trials inbetween will be marked as "skipped". If you try to
skip past the last trial, will log a warning and skip *to* the last trial.
Parameters
----------
n : int
Number of trials to skip ahead
"""
# return if there isn't a TrialHandler2 active
if not isinstance(self.currentLoop, TrialHandler2):
return
# skip trials in current loop
return self.currentLoop.skipTrials(n)
def rewindTrials(self, n=1):
"""
Skip ahead n trials - the trials inbetween will be marked as "skipped". If you try to
skip past the last trial, will log a warning and skip *to* the last trial.
Parameters
----------
n : int
Number of trials to skip ahead
"""
# return if there isn't a TrialHandler2 active
if not isinstance(self.currentLoop, TrialHandler2):
return
# rewind trials in current loop
return self.currentLoop.rewindTrials(n)
def getAllTrials(self):
"""
Returns all trials (elapsed, current and upcoming) with an index indicating which trial is
the current trial.
Returns
-------
list[Trial]
List of trials, in order (oldest to newest)
int
Index of the current trial in this list
"""
# return None if there isn't a TrialHandler2 active
if not isinstance(self.currentLoop, TrialHandler2):
return [None], 0
# get all trials from current loop
return self.currentLoop.getAllTrials()
def getCurrentTrial(self):
"""
Returns the current trial (`.thisTrial`)
Returns
-------
Trial
The current trial
"""
# return None if there isn't a TrialHandler2 active
if not isinstance(self.currentLoop, TrialHandler2):
return None
return self.currentLoop.getCurrentTrial()
def getFutureTrial(self, n=1):
"""
Returns the condition for n trials into the future, without
advancing the trials. Returns 'None' if attempting to go beyond
the last trial in the current loop, or if there is no current loop.
"""
# return None if there isn't a TrialHandler2 active
if not isinstance(self.currentLoop, TrialHandler2):
return None
# get future trial from current loop
return self.currentLoop.getFutureTrial(n)
def getFutureTrials(self, n=1, start=0):
"""
Returns Trial objects for a given range in the future. Will start looking at `start` trials
in the future and will return n trials from then, so e.g. to get all trials from 2 in the
future to 5 in the future you would use `start=2` and `n=3`.
Parameters
----------
n : int, optional
How many trials into the future to look, by default 1
start : int, optional
How many trials into the future to start looking at, by default 0
Returns
-------
list[Trial or None]
List of Trial objects n long. Any trials beyond the last trial are None.
"""
# blank list to store trials in
trials = []
# iterate through n trials
for i in range(n):
# add each to the list
trials.append(
self.getFutureTrial(start + i)
)
return trials
def nextEntry(self):
"""Calling nextEntry indicates to the ExperimentHandler that the
current trial has ended and so further addData() calls correspond
to the next trial.
"""
this = self.thisEntry
# fetch data from each (potentially-nested) loop
for thisLoop in self.loopsUnfinished:
self.updateEntryFromLoop(thisLoop)
# add the extraInfo dict to the data
if type(self.extraInfo) == dict:
this.update(self.extraInfo)
self.entries.append(this)
# add new entry with its
self.thisEntry = {}
def updateEntryFromLoop(self, thisLoop):
"""
Add all values from the given loop to the current entry.
Parameters
----------
thisLoop : BaseLoopHandler
Loop to get fields from
"""
# for each name and value in the current trial...
names, vals = self._getLoopInfo(thisLoop)
for n, name in enumerate(names):
# add/update value
self.thisEntry[name] = vals[n]
# make sure name is in data names
if name not in self.dataNames:
self.dataNames.append(name)
def getAllEntries(self):
"""Fetches a copy of all the entries including a final (orphan) entry
if that exists. This allows entries to be saved even if nextEntry() is
not yet called.
:return: copy (not pointer) to entries
"""
# check for orphan final data (not committed as a complete entry)
entries = copy.copy(self.entries)
if self.thisEntry: # thisEntry is not empty
entries.append(self.thisEntry)
return entries
def queueNextCollision(self, fileCollisionMethod, fileName=None):
"""
Tell this ExperimentHandler than, next time the named file is saved, it should handle
collisions a certain way. This is useful if you want to save multiple times within an
experiment.
Parameters
----------
fileCollisionMethod : str
File collision method to use, see `saveAsWideText` or `saveAsPickle` for
details.
fileName : str
Filename to queue collision on, if None (default) will use this ExperimentHandler's
`dataFileName`
"""
# handle default
if fileName is None:
fileName = self.dataFileName
# make filename iterable
if not isinstance(fileName, (list, tuple)):
fileName = [fileName]
# queue collision
for thisFileName in fileName:
self._nextSaveCollision[thisFileName] = fileCollisionMethod
def connectSaveMethod(self, fcn, *args, **kwargs):
"""
Tell this experiment handler to call the given function with the given arguments and
keyword arguments whenever it saves its own data.
Parameters
----------
fcn : function
Function to call
*args
Positional arguments to be given to the function when it's called
**kwargs
Keyword arguments to be given to the function when it's called
"""
# create a call profile for the given function
profile = {
'fcn': fcn,
'args': args,
'kwargs': kwargs
}
# connect it
self.connectedSaveMethods.append(profile)
def save(self):
"""
Work out from own settings how to save, then use the appropriate method (saveAsWideText,
saveAsPickle, etc.)
"""
savedNames = []
if self.dataFileName not in ['', None]:
if self.autoLog:
msg = 'Saving data for %s ExperimentHandler' % self.name
logging.debug(msg)
if self.savePickle:
savedNames.append(
self.saveAsPickle(self.dataFileName)
)
if self.saveWideText:
savedNames.append(
self.saveAsWideText(self.dataFileName + '.csv')
)
else:
logging.warn(
"ExperimentHandler.save was called on an ExperimentHandler with no dataFileName set."
)
# call connected save functions
for profile in self.connectedSaveMethods:
profile['fcn'](*profile['args'], **profile['kwargs'])
return savedNames
def saveAsWideText(self,
fileName,
delim='auto',
matrixOnly=False,
appendFile=None,
encoding='utf-8-sig',
fileCollisionMethod=None,
sortColumns=None):
"""Saves a long, wide-format text file, with one line representing
the attributes and data for a single trial. Suitable for analysis
in R and SPSS.
If `appendFile=True` then the data will be added to the bottom of
an existing file. Otherwise, if the file exists already it will
be kept and a new file will be created with a slightly different
name. If you want to overwrite the old file, pass 'overwrite'
to ``fileCollisionMethod``.
If `matrixOnly=True` then the file will not contain a header row,
which can be handy if you want to append data to an existing file
of the same format.
Parameters
----------
fileName:
if extension is not specified, '.csv' will be appended if
the delimiter is ',', else '.tsv' will be appended.
Can include path info.
delim:
allows the user to use a delimiter other than the default
tab ("," is popular with file extension ".csv")
matrixOnly:
outputs the data with no header row.
appendFile:
will add this output to the end of the specified file if
it already exists.
encoding:
The encoding to use when saving a the file.
Defaults to `utf-8-sig`.
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
sortColumns : str or bool
How (if at all) to sort columns in the data file. Can be:
- "alphabetical", "alpha", "a" or True: Sort alphabetically by header name
- "priority", "pr" or "p": Sort according to priority
- other: Do not sort, columns remain in order they were added
Returns
-------
str
Final filename (including _1, _2, etc. and file extension) which data was saved as
"""
# set default delimiter if none given
delimOptions = {
'comma': ",",
'semicolon': ";",
'tab': "\t"
}
if delim == 'auto':
delim = genDelimiter(fileName)
elif delim in delimOptions:
delim = delimOptions[delim]
if appendFile is None:
appendFile = self.appendFiles
# check for queued collision methods if using default, fallback to rename
if fileCollisionMethod is None and fileName in self._nextSaveCollision:
fileCollisionMethod = self._nextSaveCollision.pop(fileName)
elif fileCollisionMethod is None:
fileCollisionMethod = "rename"
# create the file or send to stdout
fileName = genFilenameFromDelimiter(fileName, delim)
f = openOutputFile(fileName, append=appendFile,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding)
names = self._getAllParamNames()
for name in self.dataNames:
if name not in names:
names.append(name)
# names from the extraInfo dictionary
names.extend(self._getExtraInfo()[0])
if len(names) < 1:
logging.error("No data was found, so data file may not look as expected.")
# if sort columns not specified, use default from self
if sortColumns is None:
sortColumns = self.sortColumns
# sort names as requested
if sortColumns in ("alphabetical", "alpha", "a", True):
# sort alphabetically
names.sort()
elif sortColumns in ("priority", "pr" or "p"):
# map names to their priority
priorityMap = []
for name in names:
priority = self.columnPriority.get(name, self._guessPriority(name))
priorityMap.append((priority, name))
names = [name for priority, name in sorted(priorityMap, reverse=True)]
# write a header line
if not matrixOnly:
for heading in names:
f.write(u'%s%s' % (heading, delim))
f.write('\n')
# write the data for each entry
for entry in self.getAllEntries():
for name in names:
if name in entry:
ename = str(entry[name])
if ',' in ename or '\n' in ename:
fmt = u'"%s"%s'
else:
fmt = u'%s%s'
f.write(fmt % (entry[name], delim))
else:
f.write(delim)
f.write('\n')
if f != sys.stdout:
f.close()
logging.info('saved data to %r' % f.name)
return fileName
def saveAsPickle(self, fileName, fileCollisionMethod=None):
"""Basically just saves a copy of self (with data) to a pickle file.
This can be reloaded if necessary and further analyses carried out.
Parameters
----------
fileCollisionMethod : str
Collision method passed to :func:`~psychopy.tools.fileerrortools.handleFileCollision`
Returns
-------
str
Final filename (including _1, _2, etc. and file extension) which data was saved as
"""
# Store the current state of self.savePickle and self.saveWideText
# for later use:
# We are going to set both to False before saving,
# so PsychoPy won't try to save again after loading the pickled
# .psydat file from disk.
#
# After saving, the initial state of self.savePickle and
# self.saveWideText is restored.
#
# See
# https://groups.google.com/d/msg/psychopy-dev/Z4m_UX88q8U/UGuh1eeyjMEJ
savePickle = self.savePickle
saveWideText = self.saveWideText
# append extension
if not fileName.endswith('.psydat'):
fileName += '.psydat'
# check for queued collision methods if using default, fallback to rename
if fileCollisionMethod is None and fileName in self._nextSaveCollision:
fileCollisionMethod = self._nextSaveCollision.pop(fileName)
elif fileCollisionMethod is None:
fileCollisionMethod = "rename"
self.savePickle = False
self.saveWideText = False
origEntries = self.entries
self.entries = self.getAllEntries()
with openOutputFile(fileName=fileName, append=False,
fileCollisionMethod=fileCollisionMethod) as f:
pickle.dump(self, f)
if (fileName is not None) and (fileName != 'stdout'):
logging.info('saved data to %s' % f.name)
self.entries = origEntries # revert list of completed entries post-save
self.savePickle = savePickle
self.saveWideText = saveWideText
return fileName
def getJSON(self, priorityThreshold=constants.priority.EXCLUDE+1):
"""
Get the experiment data as a JSON string.
Parameters
----------
priorityThreshold : int
Output will only include columns whose priority is greater than or equal to this value. Use values in
psychopy.constants.priority as a guideline for priority levels. Default is -9 (constants.priority.EXCLUDE +
1)
Returns
-------
str
JSON string with the following fields:
- 'type': Indicates that this is data from an ExperimentHandler (will always be "trials_data")
- 'trials': `list` of `dict`s representing requested trials data
- 'priority': `dict` of column names
"""
# get columns which meet threshold
cols = [col for col in self.dataNames if self.getPriority(col) >= priorityThreshold]
# convert just relevant entries to a DataFrame
trials = pd.DataFrame(self.entries, columns=cols).fillna(value="")
# put in context
context = {
'type': "trials_data",
'thisTrial': self.thisEntry,
'trials': trials.to_dict(orient="records"),
'priority': self.columnPriority,
'threshold': priorityThreshold,
}
return json.dumps(context, indent=True, allow_nan=False, default=str)
def close(self):
self.save()
self.abort()
self.autoLog = False
def abort(self):
"""Inform the ExperimentHandler that the run was aborted.
Experiment handler will attempt automatically to save data
(even in the event of a crash if possible). So if you quit your
script early you may want to tell the Handler not to save out
the data files for this run. This is the method that allows you
to do that.
"""
self.savePickle = False
self.saveWideText = False
| 34,909
|
Python
|
.py
| 810
| 32.067901
| 119
| 0.598702
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,362
|
base.py
|
psychopy_psychopy/psychopy/data/base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import weakref
import pickle
import os
import sys
import copy
import inspect
import codecs
import numpy as np
import pandas as pd
import json_tricks
from packaging.version import Version
import psychopy
from psychopy import logging
from psychopy.tools.filetools import (openOutputFile, genDelimiter,
genFilenameFromDelimiter, pathToString)
from psychopy.tools.fileerrortools import handleFileCollision
from psychopy.tools.arraytools import extendArr
from .utils import _getExcelCellName
try:
import openpyxl
if Version(openpyxl.__version__) >= Version('2.4.0'):
# openpyxl moved get_column_letter to utils.cell
from openpyxl.utils.cell import get_column_letter
else:
from openpyxl.cell import get_column_letter
from openpyxl import load_workbook, Workbook
haveOpenpyxl = True
except ImportError:
haveOpenpyxl = False
_experiments = weakref.WeakValueDictionary()
class _ComparisonMixin():
def __eq__(self, other):
# NoneType and booleans, for example, don't have a .__dict__ attribute.
try:
getattr(other, '__dict__')
except AttributeError:
return False
# Check if the dictionary keys are the same before proceeding.
if set(self.__dict__.keys()) != set(other.__dict__.keys()):
return False
# Loop over all keys, implementing special handling for certain
# data types.
for key, val in self.__dict__.items():
if isinstance(val, np.ma.core.MaskedArray):
if not np.ma.allclose(val, getattr(other, key)):
return False
elif isinstance(val, np.ndarray):
if not np.allclose(val, getattr(other, key)):
return False
elif isinstance(val, (pd.DataFrame, pd.Series)):
if not val.equals(getattr(other, key)):
return False
else:
if val != getattr(other, key):
return False
return True
def __ne__(self, other):
return not self == other
class _BaseTrialHandler(_ComparisonMixin):
def setExp(self, exp):
"""Sets the ExperimentHandler that this handler is attached to
Do NOT attempt to set the experiment using::
trials._exp = myExperiment
because it needs to be performed using the `weakref` module.
"""
# need to use a weakref to avoid creating a circular reference that
# prevents effective object deletion
expId = id(exp)
_experiments[expId] = exp
self._exp = expId
# origin will have been stored by the exp so don't store again:
self.origin = None
def getExp(self):
"""Return the ExperimentHandler that this handler is attached to,
if any. Returns None if not attached
"""
if self._exp is None or self._exp not in _experiments:
return None
else:
return _experiments[self._exp]
def _terminate(self):
"""Remove references to ourself in experiments and terminate the loop
"""
# remove ourself from the list of unfinished loops in the experiment
exp = self.getExp()
if exp != None:
exp.loopEnded(self)
# and halt the loop
raise StopIteration
def saveAsPickle(self, fileName, fileCollisionMethod='rename'):
"""Basically just saves a copy of the handler (with data) to a
pickle file.
This can be reloaded if necessary and further analyses carried out.
:Parameters:
fileCollisionMethod: Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
"""
fileName = pathToString(fileName)
if self.thisTrialN < 0 and self.thisRepN < 0:
# if both are < 1 we haven't started
if self.autoLog:
logging.info('.saveAsPickle() called but no trials completed.'
' Nothing saved')
return -1
if not fileName.endswith('.psydat'):
fileName += '.psydat'
with openOutputFile(fileName=fileName, append=False,
fileCollisionMethod=fileCollisionMethod) as f:
pickle.dump(self, f)
logging.info('saved data to %s' % f.name)
def saveAsText(self, fileName,
stimOut=None,
dataOut=('n', 'all_mean', 'all_std', 'all_raw'),
delim=None,
matrixOnly=False,
appendFile=True,
summarised=True,
fileCollisionMethod='rename',
encoding='utf-8-sig'):
"""
Write a text file with the data and various chosen stimulus attributes
:Parameters:
fileName:
will have .tsv appended and can include path info.
stimOut:
the stimulus attributes to be output. To use this you need to
use a list of dictionaries and give here the names of dictionary
keys that you want as strings
dataOut:
a list of strings specifying the dataType and the analysis to
be performed,in the form `dataType_analysis`. The data can be
any of the types that you added using trialHandler.data.add()
and the analysis can be either 'raw' or most things in the
numpy library, including; 'mean','std','median','max','min'...
The default values will output the raw, mean and std of all
datatypes found
delim:
allows the user to use a delimiter other than tab
("," is popular with file extension ".csv")
matrixOnly:
outputs the data with no header row or extraInfo attached
appendFile:
will add this output to the end of the specified file if
it already exists
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file. Defaults to `utf-8-sig`.
"""
fileName = pathToString(fileName)
if stimOut is None:
stimOut = []
if self.thisTrialN < 0 and self.thisRepN < 0:
# if both are < 1 we haven't started
if self.autoLog:
logging.info('TrialHandler.saveAsText called but no trials'
' completed. Nothing saved')
return -1
dataArray = self._createOutputArray(stimOut=stimOut,
dataOut=dataOut,
matrixOnly=matrixOnly)
# set default delimiter if none given
if delim is None:
delim = genDelimiter(fileName)
# create the file or send to stdout
fileName = genFilenameFromDelimiter(fileName, delim)
with openOutputFile(fileName=fileName, append=appendFile,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding) as f:
# loop through lines in the data matrix
for line in dataArray:
for cellN, entry in enumerate(line):
# surround in quotes to prevent effect of delimiter
if delim in str(entry):
f.write(u'"%s"' % str(entry))
else:
f.write(str(entry))
if cellN < (len(line) - 1):
f.write(delim)
f.write("\n") # add an EOL at end of each line
if (fileName is not None) and (fileName != 'stdout') and self.autoLog:
logging.info('saved data to %s' % f.name)
def printAsText(self, stimOut=None,
dataOut=('all_mean', 'all_std', 'all_raw'),
delim='\t',
matrixOnly=False):
"""Exactly like saveAsText() except that the output goes
to the screen instead of a file
"""
if stimOut is None:
stimOut = []
self.saveAsText('stdout', stimOut, dataOut, delim, matrixOnly)
def saveAsExcel(self, fileName, sheetName='rawData',
stimOut=None,
dataOut=('n', 'all_mean', 'all_std', 'all_raw'),
matrixOnly=False,
appendFile=True,
fileCollisionMethod='rename'):
"""
Save a summary data file in Excel OpenXML format workbook
(:term:`xlsx`) for processing in most spreadsheet packages.
This format is compatible with versions of Excel (2007 or greater)
and and with OpenOffice (>=3.0).
It has the advantage over the simpler text files (see
:func:`TrialHandler.saveAsText()` )
that data can be stored in multiple named sheets within the file.
So you could have a single file named after your experiment and
then have one worksheet for each participant. Or you could have
one file for each participant and then multiple sheets for
repeated sessions etc.
The file extension `.xlsx` will be added if not given already.
:Parameters:
fileName: string
the name of the file to create or append. Can include
relative or absolute path
sheetName: string
the name of the worksheet within the file
stimOut: list of strings
the attributes of the trial characteristics to be output.
To use this you need to have provided a list of dictionaries
specifying to trialList parameter of the TrialHandler and
give here the names of strings specifying entries in that
dictionary
dataOut: list of strings
specifying the dataType and the analysis to
be performed, in the form `dataType_analysis`. The data
can be any of the types that you added using
trialHandler.data.add() and the analysis can be either
'raw' or most things in the numpy library, including
'mean','std','median','max','min'. e.g. `rt_max` will give
a column of max reaction times across the trials assuming
that `rt` values have been stored. The default values will
output the raw, mean and std of all datatypes found.
appendFile: True or False
If False any existing file with this name will be
kept and a new file will be created with a slightly different
name. If you want to overwrite the old file, pass 'overwrite'
to ``fileCollisionMethod``.
If True then a new worksheet will be appended.
If a worksheet already exists with that name a number will
be added to make it unique.
fileCollisionMethod: string
Collision method (``rename``,``overwrite``, ``fail``) passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
This is ignored if ``append`` is ``True``.
"""
fileName = pathToString(fileName)
if stimOut is None:
stimOut = []
if self.thisTrialN < 0 and self.thisRepN < 0:
# if both are < 1 we haven't started
if self.autoLog:
logging.info('TrialHandler.saveAsExcel called but no '
'trials completed. Nothing saved')
return -1
# NB this was based on the limited documentation (1 page wiki) for
# openpyxl v1.0
if not haveOpenpyxl:
raise ImportError('openpyxl is required for saving files in'
' Excel (xlsx) format, but was not found.')
# return -1
# create the data array to be sent to the Excel file
dataArray = self._createOutputArray(stimOut=stimOut,
dataOut=dataOut,
matrixOnly=matrixOnly)
if not fileName.endswith('.xlsx'):
fileName += '.xlsx'
# create or load the file
if appendFile and os.path.isfile(fileName):
wb = load_workbook(fileName)
newWorkbook = False
else:
if not appendFile:
# the file exists but we're not appending, a new file will
# be saved with a slightly different name, unless
# fileCollisionMethod = ``overwrite``
fileName = handleFileCollision(fileName,
fileCollisionMethod)
wb = Workbook() # create new workbook
wb.properties.creator = 'PsychoPy' + psychopy.__version__
newWorkbook = True
if newWorkbook:
ws = wb.worksheets[0]
ws.title = sheetName
else:
ws = wb.create_sheet()
ws.title = sheetName
# loop through lines in the data matrix
for lineN, line in enumerate(dataArray):
if line is None:
continue
for colN, entry in enumerate(line):
if entry is None:
entry = ''
try:
# if it can convert to a number (from numpy) then do it
val = float(entry)
except Exception:
val = u"{}".format(entry)
ws.cell(column=colN+1, row=lineN+1, value=val)
wb.save(filename=fileName)
def saveAsJson(self,
fileName=None,
encoding='utf-8-sig',
fileCollisionMethod='rename'):
"""
Serialize the object to the JSON format.
Parameters
----------
fileName: string, or None
the name of the file to create or append. Can include a relative or
absolute path. If `None`, will not write to a file, but return an
in-memory JSON object.
encoding : string, optional
The encoding to use when writing the file.
fileCollisionMethod : string
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`. Can be
either of `'rename'`, `'overwrite'`, or `'fail'`.
Notes
-----
Currently, a copy of the object is created, and the copy's .origin
attribute is set to an empty string before serializing
because loading the created JSON file would sometimes fail otherwise.
"""
fileName = pathToString(fileName)
self_copy = copy.deepcopy(self)
self_copy.origin = ''
msg = ('Setting attribute .origin to empty string during JSON '
'serialization.')
logging.warn(msg)
if (fileName is None) or (fileName == 'stdout'):
return json_tricks.dumps(self_copy)
else:
with openOutputFile(fileName=fileName,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding) as f:
json_tricks.dump(self_copy, f)
logging.info('Saved JSON data to %s' % f.name)
def getOriginPathAndFile(self, originPath=None):
"""Attempts to determine the path of the script that created this
data file and returns both the path to that script and its contents.
Useful to store the entire experiment with the data.
If originPath is provided (e.g. from Builder) then this is used
otherwise the calling script is the originPath (fine from a
standard python script).
"""
# self.originPath and self.origin (the contents of the origin file)
if originPath == -1:
return -1, None # the user wants to avoid storing this
elif originPath is None or not os.path.isfile(originPath):
try:
originPath = inspect.getouterframes(
inspect.currentframe())[2][1]
if self.autoLog:
logging.debug("Using %s as origin file" % originPath)
except Exception:
if self.autoLog:
logging.debug("Failed to find origin file using "
"inspect.getouterframes")
return '', ''
if os.path.isfile(originPath): # do we NOW have a path?
with codecs.open(originPath, "r", encoding="utf-8-sig") as f:
origin = f.read()
else:
origin = None
return originPath, origin
class DataHandler(_ComparisonMixin, dict):
"""For handling data (used by TrialHandler, principally, rather than
by users directly)
Numeric data are stored as numpy masked arrays where the mask is set
True for missing entries. When any non-numeric data (string, list or
array) get inserted using DataHandler.add(val) the array is converted
to a standard (not masked) numpy array with dtype='O' and where missing
entries have value = "--".
Attributes:
- ['key']=data arrays containing values for that key
(e.g. data['accuracy']=...)
- dataShape=shape of data (x,y,...z,nReps)
- dataTypes=list of keys as strings
"""
def __init__(self, dataTypes=None, trials=None, dataShape=None):
self.trials = trials
self.dataTypes = [] # names will be added during addDataType
self.isNumeric = {}
# if given dataShape use it - otherwise guess!
if dataShape:
self.dataShape = dataShape
elif self.trials:
self.dataShape = list(np.asarray(trials.trialList, 'O').shape)
self.dataShape.append(trials.nReps)
# initialise arrays now if poss
if dataTypes and self.dataShape:
for thisType in dataTypes:
self.addDataType(thisType)
def __eq__(self, other):
# We ignore an attached TrialHandler object, otherwise we will end up
# in an infinite loop, as this DataHandler is attached to the
# TrialHandler!
from psychopy.data import TrialHandler
if isinstance(self.trials, TrialHandler):
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
del self_copy.trials, other_copy.trials
result = super(DataHandler, self_copy).__eq__(other_copy)
msg = ('TrialHandler object detected in .trials. Excluding it from '
'comparison.')
logging.warning(msg)
else:
result = super(DataHandler, self).__eq__(other)
return result
def addDataType(self, names, shape=None):
"""Add a new key to the data dictionary of particular shape if
specified (otherwise the shape of the trial matrix in the trial
handler. Data are initialised to be zero everywhere. Not needed
by user: appropriate types will be added during initialisation
and as each xtra type is needed.
"""
if not shape:
shape = self.dataShape
if not isinstance(names, str):
# recursively call this function until we have a string
for thisName in names:
self.addDataType(thisName)
else:
# create the appropriate array in the dict
# initially use numpy masked array of floats with mask=True
# for missing vals. convert to a numpy array with dtype='O'
# if non-numeric data given. NB don't use masked array with
# dytpe='O' together - they don't unpickle
self[names] = np.ma.zeros(shape, 'f') # masked array of floats
self[names].mask = True
# add the name to the list
self.dataTypes.append(names)
self.isNumeric[names] = True # until we need otherwise
def add(self, thisType, value, position=None):
"""Add data to an existing data type (and add a new one if necess)
"""
if not thisType in self:
self.addDataType(thisType)
if position is None:
# 'ran' is always the first thing to update
repN = sum(self['ran'][self.trials.thisIndex])
if thisType != 'ran':
# because it has already been updated
repN -= 1
# make a list where 1st digit is trial number
position = [self.trials.thisIndex]
position.append(repN)
# check whether data falls within bounds
posArr = np.asarray(position)
shapeArr = np.asarray(self.dataShape)
if not np.alltrue(posArr < shapeArr):
# array isn't big enough
logging.warning('need a bigger array for: ' + thisType)
# not implemented yet!
self[thisType] = extendArr(self[thisType], posArr)
# check for ndarrays with more than one value and for non-numeric data
if (self.isNumeric[thisType] and
((type(value) == np.ndarray and len(value) > 1) or
(type(value) not in [float, int]))):
self._convertToObjectArray(thisType)
# insert the value
self[thisType][position[0], int(position[1])] = value
def _convertToObjectArray(self, thisType):
"""Convert this datatype from masked numeric array to unmasked
object array
"""
dat = self[thisType]
# create an array of Object type
self[thisType] = np.array(dat.data, dtype='O')
# masked vals should be "--", others keep data
# we have to repeat forcing to 'O' or text gets truncated to 4chars
self[thisType] = np.where(dat.mask, '--', dat).astype('O')
self.isNumeric[thisType] = False
| 22,189
|
Python
|
.py
| 478
| 34.046025
| 80
| 0.590482
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,363
|
routine.py
|
psychopy_psychopy/psychopy/data/routine.py
|
from psychopy import constants
class Routine:
"""
Object representing a Routine, used to store start/stop times and other aspects of Routine settings.
Parameters
----------
name : str
Name of the Routine
components : list[object]
List of handles to Components associated with this Routine
maxDuration : float or None
Maximum time this Routine can take. None if there is no maximum.
Attributes
----------
tStart : float or None
Time (UTC) when this Routine started
tStartRefresh : float or None
Time (UTC) of the first frame flip of this Routine
tStop : float or None
Time (UTC) when this Routine ended
tStopRefresh : float or None
Time (UTC) of the last frame flip of this Routine
maxDurationReached : bool
True if this Routine ended by its max duration being reached
skipped : bool
True if this Routine was skipped by the "Skip if..." parameter of its settings
forceEnded : bool
True if this Routine was forcibly ended (e.g. by a key press)
status : int
Value from psychopy.constants.status indicating whether this Routine has started, is finished, etc.
"""
def __init__(
self,
name,
components=[],
maxDuration=None,
):
self.name = name
self.components = components
self.maxDuration = maxDuration
# start all times as None
self.tStart = None
self.tStartRefresh = None
self.tStop = None
self.tStopRefresh = None
# start off assuming not skipped, timed out or force ended
self.maxDurationReached = False
self.skipped = False
self.forceEnded = False
# starting status
self.status = constants.NOT_STARTED
| 1,830
|
Python
|
.py
| 51
| 28.45098
| 107
| 0.655701
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,364
|
alerttools.py
|
psychopy_psychopy/psychopy/alerts/alerttools.py
|
import ast
from numpy import array
from esprima import parseScript
from psychopy.tools import monitorunittools
from psychopy.alerts._alerts import alert
from psychopy.tools.fontmanager import FontManager
fontMGR = FontManager()
class TestWin:
"""
Creates a false window with necessary attributes for converting component
Parameters to pixels.
"""
def __init__(self, exp):
self.useRetina = True
self.exp = exp
self.monitor = self.exp.settings.monitor
winSize = self.exp.settings.params['Window size (pixels)'].val
if winSize and isinstance(winSize, str):
self.size = ast.literal_eval(winSize)
elif winSize and (isinstance(winSize, list) or isinstance(winSize, tuple)):
self.size = winSize
else:
self.size = (1024, 768)
def validDuration(t, hz, toleranceFrames=0.01):
"""Test whether this is a possible time duration given the frame rate"""
# best not to use mod operator for floats. e.g. 0.5%0.01 gives 0.00999
# (due to a float round error?)
# nFrames = t*hz so test if round(nFrames)==nFrames but with a tolerance
nFrames = float(t) * hz # t might not be float if given as "0.5"?
return abs(nFrames - round(nFrames)) < toleranceFrames
def convertParamToPix(value, win, units):
"""
Convert value to numpy array
Parameters
----------
value : str, int, float, list, tuple
Parameter value to be converted to pixels
win : TestWin object
A false window with necessary attributes for converting component
parameters to pixels
units : str
Screen units
Returns
-------
numpy array
Parameter converted to pixels in numpy array
"""
if isinstance(value, str):
value = array(ast.literal_eval(value))
else:
value = array(value)
return monitorunittools.convertToPix(value, array([0, 0]), units=units, win=win) * 2
def testFloat(val):
"""
Test value for float.
Used to detect use of variables, strings and none types, which cannot be checked.
"""
try:
return type(float(val)) == float
except Exception:
return False
def testSize(component, win, units):
"""
Runs size testing for component
Parameters
----------
component: Component
The component used for size testing
win : TestWin object
Used for testing component size in bounds
units : str`
Screen units
"""
if 'size' not in component.params:
return
try:
size = convertParamToPix(component.params['size'].val, win, units)
except Exception: # Use of variables fails check
return
# Test X
if size[0] > win.size[0]:
alert(2115, component, {'dimension': 'X'})
# Test Y
if size[1] > win.size[1]:
alert(2115, component, {'dimension': 'Y'})
# Test if smaller than 1 pixel (X dimension)
if size[0] < 1:
alert(2120, component, {'dimension': 'X'})
# Test if smaller than 1 pixel (Y dimension)
if size[1] < 1:
alert(2120, component, {'dimension': 'Y'})
def testPos(component, win, units):
"""
Runs position testing for component
Parameters
----------
component: Component
The component used for size testing
win : TestWin object
Used for testing component position in bounds
units : str`
Screen units
"""
if 'pos' not in component.params:
return
try:
pos = convertParamToPix(component.params['pos'].val, win, units)
except Exception: # Use of variables fails check
return
# Test X position
if abs(pos[0]) > win.size[0]:
alert(2155, component, {'dimension': 'X'})
# Test Y position
if abs(pos[1]) > win.size[1]:
alert(2155, component, {'dimension': 'Y'})
def testStartEndTiming(component):
"""
Tests stimuli starts before end time.
Parameters
----------
component: Component
The component used for size testing
"""
if "startType" not in component.params or "stopType" not in component.params :
return
if (component.params['startType'] not in ["time (s)", "frame N"]
or component.params['stopType'] not in ["time (s)", "frame N"]):
return
start = {'type': component.params['startType'].val, 'val' : component.params['startVal'].val}
stop = {'type': component.params['stopType'].val, 'val' : component.params['stopVal'].val}
# Check for string / variable
if not all([testFloat(start['val']), testFloat(stop['val'])]):
return
if [start['type'], stop['type']] == ["time (s)", "time (s)"]:
if float(start['val']) > float(stop['val']):
alert(4105, component, {'type': 'time'})
if [start['type'], stop['type']] == ["frame N", "frame N"]:
if int(float(start['val'])) > int(float(stop['val'].strip())):
alert(4105, component, {'type': 'frame'})
def testAchievableVisualOnsetOffset(component):
"""Test whether start and end times are less than 1 screen refresh.
"""
if component.type not in ["Text", "Aperture", "Dots", "EnvGrating", "Form",
"Grating", "Image", "Movie", "NoiseStim", "Polygon"]:
return
if "startType" not in component.params or "stopType" not in component.params:
return
startVal = component.params['startVal'].val
stopVal = component.params['stopVal'].val
if testFloat(startVal):
if component.params['startType'] == "time (s)":
# Test times are greater than 1 screen refresh for 60Hz and 100Hz monitors
if not float.is_integer(float(startVal)) and float(startVal) < 1.0 / 60:
alert(3110, component, {'type': 'start', 'time': startVal, 'Hz': 60})
if not float.is_integer(float(startVal)) and float(startVal) < 1.0 / 100:
alert(3110, component, {'type': 'start', 'time': startVal, 'Hz': 100})
if testFloat(stopVal):
if component.params['stopType'] == "duration (s)":
# Test times are greater than 1 screen refresh for 60Hz and 100Hz monitors
if not float.is_integer(float(stopVal)) and float(stopVal) < 1.0 / 60:
alert(3110, component, {'type': 'stop', 'time': stopVal, 'Hz': 60})
if not float.is_integer(float(stopVal)) and float(stopVal) < 1.0 / 100:
alert(3110, component, {'type': 'stop', 'time': stopVal, 'Hz': 100})
def testValidVisualStimTiming(component):
"""Test whether visual stimuli presented accurately for times requested,
relative to screen refresh rate of 60 and 100Hz monitors.
"""
if component.type not in ["Text", "Aperture", "Dots", "EnvGrating", "Form",
"Grating", "Image", "Movie", "NoiseStim", "Polygon"]:
return
if "startType" not in component.params or "stopType" not in component.params:
return
# Check for string / variable
startVal = component.params['startVal'].val
stopVal = component.params['stopVal'].val
if testFloat(startVal):
if component.params['startType'] == "time (s)":
# Test times are valid multiples of screen refresh for 60Hz and 100Hz monitors
if not validDuration(startVal, 60):
alert(3115, component, {'type': 'start', 'time': startVal, 'Hz': 60})
if testFloat(stopVal):
if component.params['stopType'] == "duration (s)":
# Test times are valid multiples of screen refresh for 60Hz and 100Hz monitors
if not validDuration(stopVal, 60):
alert(3115, component, {'type': 'stop', 'time': stopVal, 'Hz': 60})
def testFramesAsInt(component):
"""
Test whole numbers are used for frames.
"""
if "startType" not in component.params or "stopType" not in component.params :
return
startVal = component.params['startVal'].val
stopVal = component.params['stopVal'].val
if testFloat(startVal):
if component.params['startType'] in ["frame N", "duration (frames)"]:
# Test frames are whole numbers
if not float.is_integer(float(startVal)):
alert(4115, component, {'type': 'start', 'frameType': component.params['startType']})
if testFloat(stopVal):
if component.params['stopType'] in ["frame N", "duration (frames)"]:
# Test frames are whole numbers
if not float.is_integer(float(stopVal)):
alert(4115, component, {'type': 'stop', 'frameType': component.params['stopType']})
def testDisabled(component):
"""
Tests whether a component is enabled.
Parameters
----------
component: Component
The component used for testing
"""
if "disabled" not in component.params:
return
if component.params['disabled'].val:
alert(4305, component, strFields={'name': component.params['name']})
def testFont(component):
"""
Tests whether font is stored locally or whether it needs to be retrieved from Google Fonts
Parameters
----------
component: Component
The component used for testing
"""
if 'font' in component.params:
fontInfo = fontMGR.getFontsMatching(component.params['font'].val, fallback=False)
if not fontInfo:
alert(4320, strFields={'param': component.params['font']})
def testDollarSyntax(component):
"""
Tests that use of dollar signs in Builder components to denote literal interpretation are used correctly
Parameters
----------
component: Component
The component used for testing
"""
valid = {}
for (key, param) in component.params.items():
if not param.dollarSyntax()[0]:
alert(4315, strFields={'component': component, 'param': param})
return valid
def checkPythonSyntax(component, tab):
"""
Checks each Python code component tabs for syntax errors.
Note, catalogue message is formatted using a dict that contains:
{
'codeTab': The code component tab as string,
'code': The code containing the error,
'lineNumber': The line number of error as string
}
Parameters
----------
component: Component
The code component being tested
tab: str
The name of the code component tab being tested
"""
try:
compile(str(component.params[tab].val), "path", 'exec')
except Exception as err:
strFields = {'codeTab': tab, 'lineNumber': err.lineno, 'code': err.text}
# Dont sent traceback because strFields gives better localisation of error
alert(4205, component, strFields)
def checkJavaScriptSyntax(component, tab):
"""
Checks each JS code component tabs for syntax errors.
Note, catalogue message is formatted using a dict that contains:
{
'codeTab': The code component tab as string,
'lineNumber': The line number and error msg as string
}
Parameters
----------
component: Component
The code component being tested
tab: str
The name of the code component tab being tested
"""
try:
parseScript(str(component.params[tab].val))
except Exception as err:
strFields = {'codeTab': tab, 'lineNumber': err.message}
# Dont sent traceback because strFields gives better localisation of error
alert(4210, component, strFields)
| 11,529
|
Python
|
.py
| 283
| 33.508834
| 108
| 0.637208
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,365
|
_alerts.py
|
psychopy_psychopy/psychopy/alerts/_alerts.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import traceback
import yaml
import os
import sys
import codecs
from psychopy.localization import _translate
"""
The Alerts module is used for generating alerts during PsychoPy integrity checks.
Attributes
----------
catalog : AlertCatalog
For loading alert catalogues, or definitions of each alert, from a catalog of yaml files.
Each catalogue entry has a code key, with values of code, category, msg, and url.
Each entry has equivalent reStructuredText entries for insertion into help pages.
alertLog : List
For storing alerts that are otherwise lost when flushing standard stream. The stored
lists can be used to feed AlertPanel using in Project Info and new Runner frame.
"""
_activeAlertHandlers = []
class AlertCatalog:
"""A class for loading alerts from the alerts catalogue yaml file"""
def __init__(self):
self.alert = self.load()
@property
def alertPath(self):
return Path(__file__).parent / "alertsCatalogue"
@property
def alertFiles(self):
return list(self.alertPath.glob("*[0-9].*"))
def load(self):
"""Loads alert catalogue yaml files
Returns
-------
dict
The alerts catalogue as a Python dictionary
"""
alertDict = {}
for filePath in self.alertFiles:
# '{}'.format(filePath) instead of simple open(filePath,'r')
# is needed for Py2 support only
with codecs.open('{}'.format(filePath), 'r', 'utf-8') as ymlFile:
entry = yaml.load(ymlFile, Loader=yaml.SafeLoader)
if entry is None:
continue # this might be a stub for future entry
ID = entry['code']
alertDict[ID] = entry
if 'url' not in entry: # give a default URL
entry['url'] = ('https://psychopy.org/alerts/{}.html'
.format(ID))
return alertDict
class AlertEntry:
"""An Alerts data class holding alert data as attributes
Attributes
----------
code: int
The 4 digit code for retrieving alert from AlertCatalogue
cat: str
The category of the alert
url: str
A URL for pointing towards information resources for solving the issue
obj: object
The object related to the alert e.g., TextComponent object.
type: str
Type of component being tested
name: str
Name of component being tested
msg: str
The alert message
trace: sys.exec_info() traceback object
The traceback
Parameters
----------
code: int
The 4 digit code for retrieving alert from AlertCatalogue
obj: object
The object related to the alert e.g., TextComponent object.
strFields: dict
Dict containing relevant values for formatting messages
trace: sys.exec_info() traceback object
The traceback
"""
def __init__(self, code, obj, strFields=None, trace=None):
self.label = catalog.alert[code]['label']
self.code = catalog.alert[code]['code']
self.cat = catalog.alert[code]['cat']
self.url = catalog.alert[code]['url']
self.obj = obj
if hasattr(obj, 'type'):
self.type = obj.type
else:
self.type = None
if hasattr(obj, "params"):
self.name = obj.params['name'].val
else:
self.name = None
# _translate(catalog.alert[code]['msg']) works, but string literals
# in _translate() (i.e., 'msg' in this case) cause false detection
# by pybabel.
msg = catalog.alert[code]['msg']
if strFields:
self.msg = _translate(msg).format(**strFields)
else:
self.msg = _translate(msg)
if trace:
self.trace = ''.join(traceback.format_exception(
trace[0], trace[1], trace[2]))
else:
self.trace = None
def alert(code=None, obj=object, strFields=None, trace=None):
"""The alert function is used for writing alerts to the standard error stream.
Only the ErrorHandler class can receive alerts via the "receiveAlert" method.
Parameters
----------
code: int
The 4 digit code for retrieving alert from AlertCatalogue
obj: object
The object related to the alert e.g., TextComponent object
strFields: dict
Dict containing relevant values for formatting messages
trace: sys.exec_info() traceback object
The traceback
"""
msg = AlertEntry(code, obj, strFields, trace)
# format the warning into a string for console and logging targets
msgAsStr = ("Alert {code}: {msg}\n"
"For more info see https://docs.psychopy.org/alerts/{code}.html"
.format(type=msg.type,
name=msg.name,
code=msg.code,
cat=msg.cat,
msg=msg.msg,
trace=msg.trace))
if len(_activeAlertHandlers):
# if we have any active handlers, send to them
for handler in _activeAlertHandlers:
# send alert
handler.receiveAlert(msg)
elif hasattr(sys.stderr, 'receiveAlert'):
# if there aren't any, but stdout can receive alerts, send to stdout
sys.stderr.receiveAlert(msg)
else:
# otherwise, just write as a string to stdout
sys.stderr.write(msgAsStr)
def isAlertHandler(handler):
"""
Is the given handler an alert handler?
Parameters
----------
handler : ScriptOutputCtrl
Handler to query.
Returns
-------
bool
True if the given handler is an alert handler.
"""
return handler in _activeAlertHandlers
def addAlertHandler(handler):
"""
Add a handler to the list of active alert handlers.
Parameters
----------
handler : ScriptOutputCtrl
Handler to add.
"""
if not isAlertHandler(handler):
_activeAlertHandlers.append(handler)
def removeAlertHandler(handler):
"""
Remove a handler from the list of active alert handlers.
Parameters
----------
handler : ScriptOutputCtrl
Handler to remove.
"""
if isAlertHandler(handler):
_activeAlertHandlers.pop(
_activeAlertHandlers.index(handler)
)
# Create catalog
catalog = AlertCatalog()
alertLog = []
| 6,556
|
Python
|
.py
| 184
| 27.532609
| 93
| 0.621523
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,366
|
__init__.py
|
psychopy_psychopy/psychopy/alerts/__init__.py
|
"""
The Alert module
"""
from ._alerts import alert, isAlertHandler, addAlertHandler, removeAlertHandler, catalog
import yaml
from pathlib import Path
import re
def validateCatalogue(dev=False):
"""
Check that every value in the catalogue corresponds to a yaml file.
dev : If True, then missing entries will be created from template. Useful when writing alerts.
"""
# Define root folder
root = Path(__file__).parent / "alertsCatalogue"
# Make template object
with open(root / "alertTemplate.yaml") as f:
template = f.read()
# Create blank array to store missing alert keys in
missing = []
def validate(spec):
# Start off valid
valid = True
# Get category
if "cat" in spec:
cat = spec['cat']
else:
cat = "Unknown"
for key, val in spec.items():
if key == "cat" or key % 1000 == 0 or key == 9999:
# Skip category tags
continue
if isinstance(val, str):
# Check whether file exists
file = root / f"{key}.yaml"
valid = valid and file.is_file()
# If in dev mode, make a new yaml file from template
if dev and not file.is_file():
newAlert = template
# Replace whatever is possible at this stage
newAlert = re.sub(r"(?<=code\: )\d\d\d\d", str(key), newAlert)
newAlert = re.sub(r"(?<=cat\: ).*", str(cat), newAlert)
newAlert = re.sub(r"(?<=label\: ).*", str(val), newAlert)
# Save new file
with open(file, "w") as f:
f.write(newAlert)
f.close()
# Store missing key
if not valid:
missing.append(key)
if isinstance(val, dict):
# Recursively search through dicts
valid = valid and validate(val)
return valid
# Load catalog
with open(root / "alertCategories.yaml") as f:
spec = yaml.load(f, Loader=yaml.FullLoader)
# Validate
valid = validate(spec)
return valid, missing
| 2,231
|
Python
|
.py
| 59
| 27.084746
| 98
| 0.546168
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,367
|
_errorHandler.py
|
psychopy_psychopy/psychopy/alerts/_errorHandler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
class _BaseErrorHandler:
"""A base class for handling PsychoPy alerts and exceptions.
"""
def __init__(self, alwaysReceive=True, autoFlush=True):
self.errors = []
self.alerts = []
self.alwaysReceive = alwaysReceive
self.autoFlush = autoFlush
def write(self, toWrite):
"""This is needed for any Python Exceptions, which assume the stderr
is a file-like object. But we might well simply store the message for
printing later.
"""
self.errors.append(toWrite)
if self.autoFlush:
self.flush()
def flush(self):
"""Print errors and alerts to console and clear errors.
"""
for err in self.errors:
print(err)
self.errors = []
def receiveAlert(self, alert):
"""
Handles PsychoPy alerts (sent by _alerts.alert).
This function should ONLY be called by _alerts.alert.
Parameters:
-----------
alert: psychopy.alert._alert.AlertEntry object
A data object containing alert information.
"""
self.alerts.append(alert)
if self.autoFlush:
self.flush()
def __del__(self):
self.flush()
| 1,468
|
Python
|
.py
| 41
| 28.04878
| 79
| 0.620592
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,368
|
generateAlertmsg.py
|
psychopy_psychopy/psychopy/alerts/alertsCatalogue/generateAlertmsg.py
|
from psychopy.alerts import catalog
from psychopy import core
import sys
write_mode = 'w'
alertmsgFile = 'alertmsg.py'
try:
fp = open(alertmsgFile, write_mode)
fp.write('#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n')
fp.write('# This file was generated by generateAlertmsg.py.\n')
fp.write('# Following strings are used to localize alerts.\n')
fp.write('# Rebuild this file if alert messages in *.yaml files'
' are modified.\n\n')
fp.write('from psychopy.localization import _translate\n\n')
except Exception:
# If hints.py could not be opend as a writable file, output to STDOUT.
fp = sys.stdout
fp.write('# Warning: could not open alertmsg.py. STDOUT is selected.')
for id in catalog.alert:
if not isinstance(id, int) or id in (9998, 9999):
continue
fp.write('# Alert {}\n'.format(id))
msg = catalog.alert[id]['msg'].replace(
'\\', '\\\\').replace('"', '\\"') # escape double quotation marks
fp.write('_translate("{}")\n\n'.format(msg))
fp.close()
cmd = ['autopep8', alertmsgFile, '--in-place']
core.shellCall(cmd)
| 1,116
|
Python
|
.py
| 27
| 36.925926
| 74
| 0.664193
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,369
|
alertmsg.py
|
psychopy_psychopy/psychopy/alerts/alertsCatalogue/alertmsg.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file was generated by generateAlertmsg.py.
# Following strings are used to localize alerts.
# Rebuild this file if alert messages in *.yaml files are modified.
from psychopy.localization import _translate
# Alert 2115
_translate(
"Your stimulus size exceeds the {dimension} dimension of your window.")
# Alert 2120
_translate(
"Your stimulus size is smaller than 1 pixel ({dimension} dimension).")
# Alert 2155
_translate(
"Your stimulus position exceeds the {dimension} dimension of your window.")
# Alert 3110
_translate(
"Your stimulus {type} time of {time} is less than a screen refresh for a {Hz}Hz monitor.")
# Alert 3115
_translate(
"Your stimulus {type} time of {time} seconds cannot be accurately presented for {time} on a {Hz}Hz monitor.")
# Alert 4051
_translate(
"Experiment was built in a future version of PsychoPy ({version}), we recommend either updating PsychoPy or changing the \"Use Version\" setting in Experiment Settings to this version.")
# Alert 4052
_translate(
"Experiment was built in a past version of PsychoPy ({version}), saving it in this version may add parameters which cannot be parsed.")
# Alert 4105
_translate(
"Your stimulus start {type} exceeds the stop {type}. Consider using a {type} duration.")
# Alert 4115
_translate(
"Your stimulus {type} type {frameType} must be expressed as a whole number.")
# Alert 4120
_translate(
"Component {component} has stop time set as a duration, but doesn't have a start time")
# Alert 4125
_translate(
"Microphone component `{name}` given blank stop time, using max duration allowed by buffer size ({stopVal}s).")
# Alert 4205
_translate(
"Python Syntax Error in '{codeTab}' tab. See '{code}' on line number {lineNumber} of the '{codeTab}' tab.")
# Alert 4210
_translate(
"JavaScript Syntax Error in '{codeTab}' tab. See '{lineNumber}' in the '{codeTab}' tab.")
# Alert 4305
_translate(
"The component {name} is currently disabled and will not be written to your experiment script.")
# Alert 4310
_translate("Cannot calculate parameter.")
# Alert 4315
_translate(
"Builder cannot interpret value \"{param.val}\" of {param.label} for {component.type} component \"{component.params[name].val}\" as a dollar sign has been used incorrectly.")
# Alert 4320
_translate(
"Font `{param.val}` not found locally, will check Google Fonts on next run.")
# Alert 4325
_translate(
"Font `{font} {style}` not available in weight `{weight}`, component `{name}` will default to Open Sans Regular.")
# Alert 4330
_translate(
"Recording device '{device}' not found, using default device instead.")
# Alert 4335
_translate(
"The component or routine '{comp}' is only implemented online, so will do nothing when running locally.")
# Alert 4340
_translate(
"The component or routine '{comp}' is only implemented locally, so will do nothing when running online.")
# Alert 4405
_translate(
"Editable textbox component {textbox} and keyboard component {keyboard} in the same routine may compete for keypresses")
# Alert 4505
_translate("Experiment includes components or routines which use eyetracking, but no eye tracker is configured.")
# Alert 4510
_translate(
"A {eyetracker} eye tracker has been configured, but no calibration routine is present.")
# Alert 4520
_translate("As {brand} eyetrackers do not support animations in their calibration routine, animation values have not been used in your calibration routine.")
# Alert 4530
_translate("Eyetrackers by {brand} do not support manual pacing")
# Alert 4540
_translate(
"Window mode is set to be windowed, but eyetracking requires the window to be full screen.")
# Alert 4545
_translate(
"A monitor config is required for accurate eyetracking measurements, but none was found.")
# Alert 4550
_translate(
"Eye tracker experiments should use 'ioHub' as the Input -> Keyboard Backend setting")
# Alert 4605
_translate(
"Audio transcription service \"{transcriber}\" is not supported online.")
# Alert 4610
_translate(
"Audio transcription service \"{transcriber}\" is not supported offline.")
# Alert 4615
_translate(
"Chosen transcriber '{engine}' requires an API key, please supply one in Preferences.")
# Alert 4705
_translate(
"Column name '{param}' in conditions file already exists as a variable in this experiment ({category}).")
# Alert 4710
_translate("Column name '{param}' is likely to cause name clashes. {msg}.")
# Alert 5055
_translate(
"Device parameter of microphone component \"{name}\" will not be used online.")
# Alert 6105
_translate(
"The file you are attempting to run does not seem to exist, the full path supplied to Runner was {path}")
# Alert 7105
_translate(
"Component {name} comes from the plugin {plugin}, which is either not installed or not activated.")
# Alert 8105
_translate(
"Color space attribute `.{colorSpaceAttrib}` is no longer in use, as colors are no longer tied to one space.")
# Alert 8110
_translate(
"RGB attribute `.{rgbAttrib}` is no longer in use, as non-RGB colors now handle their own conversion.")
| 5,189
|
Python
|
.py
| 118
| 41.474576
| 190
| 0.746521
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,370
|
MonitorCenter.py
|
psychopy_psychopy/psychopy/monitors/MonitorCenter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import time
import os
import locale
import wx
from wx import grid
from wx.lib import intctrl
from psychopy.localization import _translate
from psychopy import monitors, hardware, logging
from psychopy.app import dialogs
DEBUG = False
NOTEBOOKSTYLE = False
NO_MEASUREMENTS = False
if DEBUG:
logging.console.setLevel(logging.DEBUG)
else:
logging.console.setLevel(logging.INFO)
try:
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import (FigureCanvasWxAgg
as FigureCanvas)
from matplotlib.figure import Figure
except Exception:
pass
import numpy
# wx4 changed EVT_GRID_CELL_CHANGE -> EVT_GRID_CELL_CHANGED
if not hasattr(wx.grid, 'EVT_GRID_CELL_CHANGED'):
wx.grid.EVT_GRID_CELL_CHANGED = wx.grid.EVT_GRID_CELL_CHANGE
# wx IDs for menu items
def newIds(n):
return [wx.NewIdRef(count=1) for i in range(n)]
[idMenuSave] = newIds(1)
# wx IDs for controllers (admin panel)
[idCtrlMonList, idCtrlCalibList, idBtnCopyCalib, idBtnSaveMon] = newIds(4)
[idBtnNewMon, idBtnDeleteMon, idBtnNewCalib, idBtnDeleteCalib] = newIds(4)
# wx IDs for controllers (info panel)
[idCtrlScrDist, idCtrlScrWidth, idCtrlCalibDate, idCtrlCalibNotes] = newIds(4)
def unicodeToFloat(val):
"""Convert a unicode object from wx dialogs into a float, accounting for
locale settings (comma might be dec place)
"""
if val == 'None':
val = None
else:
try:
val = locale.atof(val)
except ValueError:
return None # ignore values that can't be a float
return val
class SimpleGrid(grid.Grid): # , wxGridAutoEditMixin):
def __init__(self, parent, id=-1, rows=(), cols=(), data=None):
self.parent = parent
self.moveTo = None
self.nRows, self.nCols = len(rows), len(cols)
# ,wx.Size( 300, 150 ))
grid.Grid.__init__(self, parent, -1, wx.Point(0, 0))
self.numEditor = grid.GridCellFloatEditor()
self.CreateGrid(self.nRows, self.nCols)
for nCol, col in enumerate(cols):
self.SetColLabelValue(nCol, col)
self.SetColFormatFloat(nCol, 4, 4)
# self.SetColMinimalWidth(nCol,800)
for nRow, row in enumerate(rows):
self.SetRowLabelValue(nRow, row)
for nRow in range(self.nRows):
for nCol in range(self.nCols):
self.SetCellEditor(nRow, nCol, self.numEditor)
self.numEditor.IncRef()
self.setData(data)
# self.SetMargins(-5,-5)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(grid.EVT_GRID_SELECT_CELL, self.onSelectCell)
def OnIdle(self, evt):
if self.moveTo != None:
self.SetGridCursor(self.moveTo[0], self.moveTo[1])
self.moveTo = None
evt.Skip()
def setData(self, data=None):
# update the data for the grid
for nRow in range(self.nRows):
for nCol in range(self.nCols):
if (data is not None and
nRow < data.shape[0] and
nCol < data.shape[1]):
self.SetCellValue(nRow, nCol, '%f' % data[nRow, nCol])
else:
self.SetCellValue(nRow, nCol, '0.000')
self.AutoSize()
def onSelectCell(self, evt=None):
# data might have changed so redo layout
self.AutoSize()
self.parent.Layout() # expands the containing sizer if needed
evt.Skip() # allow grid to handle the rest of the update
class PlotFrame(wx.Frame):
def __init__(self, parent, ID, title, plotCanvas=None,
pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, ID, title, pos, size, style)
panel = wx.Panel(self, -1)
self.sizer = wx.GridBagSizer(1, 1)
if plotCanvas is not None:
self.addCanvas(plotCanvas)
wx.EVT_SIZE(self, self.OnSize)
def addCanvas(self, canvas):
self.canvas = canvas
self.sizer.Add(canvas, pos=(0, 0), flag=wx.EXPAND)
self.SetSizerAndFit(self.sizer)
self.SetAutoLayout(True)
self.Show()
def OnSize(self, event):
self.canvas.SetSize(event.GetSize())
class MainFrame(wx.Frame):
def __init__(self, parent, title):
# create a default monitor with no name
self.currentMon = monitors.Monitor('', verbose=False)
self.currentMonName = None # use to test if monitor is placeholder
self.currentCalibName = None
self.unSavedMonitor = False
self.comPort = 1
self.photom = None
# start building the frame
wx.Frame.__init__(self, parent, -1, title, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE)
self.makeMenuBar()
if NOTEBOOKSTYLE:
# make the notebook
self.noteBook = wx.Notebook(self, -1)
# add the info page
self.infoPanel = wx.Panel(self.noteBook, -1)
self.noteBook.AddPage(self.infoPanel, _translate('Monitor Info'))
infoSizer = wx.BoxSizer(wx.HORIZONTAL)
infoSizer.Add(self.makeAdminBox(self.infoPanel), 1, wx.EXPAND)
infoSizer.Add(self.makeInfoBox(self.infoPanel), 1, wx.EXPAND)
self.infoPanel.SetAutoLayout(True)
self.infoPanel.SetSizerAndFit(infoSizer)
# add the calibration page
self.calibPanel = wx.Panel(self.noteBook, -1)
self.noteBook.AddPage(self.calibPanel, _translate('Calibration'))
calibSizer = self.makeCalibBox(self.calibPanel)
self.calibPanel.SetAutoLayout(True)
self.calibPanel.SetSizerAndFit(calibSizer)
self.noteBookSizer.Layout()
self.noteBookSizer.Fit(self)
else:
# just one page
self.infoPanel = wx.Panel(self, -1)
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
leftSizer = wx.BoxSizer(wx.VERTICAL)
rightSizer = wx.BoxSizer(wx.VERTICAL)
_style = wx.EXPAND | wx.ALL
leftSizer.Add(self.makeAdminBox(self.infoPanel), 1, _style, 2)
leftSizer.Add(self.makeInfoBox(self.infoPanel), 1, _style, 2)
rightSizer.Add(self.makeCalibBox(self.infoPanel), 1, _style, 2)
#
mainSizer.Add(leftSizer, 1, _style, 2)
mainSizer.Add(rightSizer, 1, _style, 2)
# finalise panel layout
mainSizer.Layout()
self.infoPanel.SetAutoLayout(True)
self.infoPanel.SetSizerAndFit(mainSizer)
# if wx version 2.5+:
self.SetSize(self.GetBestSize())
# self.CreateStatusBar()
# self.SetStatusText("Maybe put tooltips down here one day")
if os.path.isfile('psychopy.ico'):
try:
self.SetIcon(wx.Icon('psychopy.ico', wx.BITMAP_TYPE_ICO))
except Exception:
pass
self.Bind(wx.EVT_CLOSE, self.onCloseWindow)
self.updateMonList()
def makeMenuBar(self):
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
fileMenu.Append(idMenuSave,
_translate('Save\tCtrl+S'),
_translate('Save the current monitor'))
self.Bind(wx.EVT_MENU, self.onSaveMon, id=idMenuSave)
_hint = _translate(
'Close Monitor Center (but not other PsychoPy windows)')
fileMenu.Append(wx.ID_CLOSE,
_translate('Close Monitor Center\tCtrl+W'),
_hint)
self.Bind(wx.EVT_MENU, self.onCloseWindow, id=wx.ID_CLOSE)
menuBar.Append(fileMenu, _translate('&File'))
# Edit
editMenu = wx.Menu()
id = wx.NewIdRef(count=1)
_hint = _translate("Copy the current monitor's name to clipboard")
editMenu.Append(id, _translate('Copy\tCtrl+C'), _hint)
self.Bind(wx.EVT_MENU, self.onCopyMon, id=id)
menuBar.Append(editMenu, _translate('&Edit'))
self.SetMenuBar(menuBar)
def makeAdminBox(self, parent):
# make the box for the controls
boxLabel = wx.StaticBox(parent, -1, _translate('Choose Monitor'))
boxLabel.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
adminBox = wx.StaticBoxSizer(boxLabel)
# build the controls
self.ctrlMonList = wx.ListBox(parent, idCtrlMonList,
choices=['iiyama571', 'sonyG500'],
size=(350, 100))
self.Bind(wx.EVT_LISTBOX, self.onChangeMonSelection, self.ctrlMonList)
monButtonsBox = wx.BoxSizer(wx.VERTICAL)
self.btnNewMon = wx.Button(parent, idBtnNewMon, _translate('New...'))
self.Bind(wx.EVT_BUTTON, self.onNewMon, self.btnNewMon)
monButtonsBox.Add(self.btnNewMon)
self.btnNewMon.SetToolTip(wx.ToolTip(
_translate("Create a new monitor")))
self.btnSaveMon = wx.Button(parent, idBtnSaveMon, _translate('Save'))
self.Bind(wx.EVT_BUTTON, self.onSaveMon, self.btnSaveMon)
monButtonsBox.Add(self.btnSaveMon)
msg = _translate("Save all calibrations for this monitor")
self.btnSaveMon.SetToolTip(wx.ToolTip(msg))
self.btnDeleteMon = wx.Button(parent, idBtnDeleteMon,
_translate('Delete'))
self.Bind(wx.EVT_BUTTON, self.onDeleteMon, self.btnDeleteMon)
monButtonsBox.Add(self.btnDeleteMon)
msg = _translate("Delete this monitor entirely")
self.btnDeleteMon.SetToolTip(wx.ToolTip(msg))
self.ctrlCalibList = wx.ListBox(parent, idCtrlCalibList,
choices=[''],
size=(350, 100))
self.Bind(wx.EVT_LISTBOX, self.onChangeCalibSelection,
self.ctrlCalibList)
calibButtonsBox = wx.BoxSizer(wx.VERTICAL)
self.btnCopyCalib = wx.Button(parent, idBtnCopyCalib,
_translate('Copy...'))
self.Bind(wx.EVT_BUTTON, self.onCopyCalib, self.btnCopyCalib)
calibButtonsBox.Add(self.btnCopyCalib)
msg = _translate("Creates a new calibration entry for this monitor")
self.btnCopyCalib.SetToolTip(wx.ToolTip(msg))
self.btnDeleteCalib = wx.Button(
parent, idBtnDeleteCalib, _translate('Delete'))
self.Bind(wx.EVT_BUTTON, self.onDeleteCalib, self.btnDeleteCalib)
calibButtonsBox.Add(self.btnDeleteCalib)
msg = _translate("Remove this calibration entry (finalized when "
"monitor is saved)")
self.btnDeleteCalib.SetToolTip(wx.ToolTip(msg))
# add controls to box
adminBoxMainSizer = wx.FlexGridSizer(cols=2, hgap=6, vgap=6)
adminBoxMainSizer.AddMany([(1, 10), (1, 10), # 2 empty boxes 1x10pix
self.ctrlMonList, monButtonsBox,
self.ctrlCalibList, calibButtonsBox])
adminBox.Add(adminBoxMainSizer)
return adminBox
def makeInfoBox(self, parent):
# create the box
infoBox = wx.StaticBox(parent, -1, _translate('Monitor Info'))
infoBox.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
infoBoxSizer = wx.StaticBoxSizer(infoBox, wx.VERTICAL)
# scr distance
labelScrDist = wx.StaticText(parent, -1,
_translate("Screen Distance (cm):"),
style=wx.ALIGN_RIGHT)
self.ctrlScrDist = wx.TextCtrl(parent, idCtrlScrDist, "")
self.Bind(wx.EVT_TEXT, self.onChangeScrDist, self.ctrlScrDist)
# scr width
labelScrWidth = wx.StaticText(parent, -1,
_translate("Screen Width (cm):"),
style=wx.ALIGN_RIGHT)
self.ctrlScrWidth = wx.TextCtrl(parent, idCtrlScrWidth, "")
self.Bind(wx.EVT_TEXT, self.onChangeScrWidth, self.ctrlScrWidth)
# scr pixels
_size = _translate("Size (pixels; Horiz,Vert):")
labelScrPixels = wx.StaticText(parent, -1, _size,
style=wx.ALIGN_RIGHT)
self.ctrlScrPixHoriz = wx.TextCtrl(parent, -1, "", size=(50, 20))
self.Bind(wx.EVT_TEXT, self.onChangeScrPixHoriz, self.ctrlScrPixHoriz)
self.ctrlScrPixVert = wx.TextCtrl(parent, -1, '', size=(50, 20))
self.Bind(wx.EVT_TEXT, self.onChangeScrPixVert, self.ctrlScrPixVert)
ScrPixelsSizer = wx.BoxSizer(wx.HORIZONTAL)
ScrPixelsSizer.AddMany([self.ctrlScrPixHoriz, self.ctrlScrPixVert])
# date
labelCalibDate = wx.StaticText(parent, -1,
_translate("Calibration Date:"),
style=wx.ALIGN_RIGHT)
self.ctrlCalibDate = wx.TextCtrl(parent, idCtrlCalibDate, "",
size=(150, 20))
self.ctrlCalibDate.Disable()
# notes
labelCalibNotes = wx.StaticText(parent, -1,
_translate("Notes:"),
style=wx.ALIGN_RIGHT)
self.ctrlCalibNotes = wx.TextCtrl(parent, idCtrlCalibNotes, "",
size=(150, 150),
style=wx.TE_MULTILINE)
self.Bind(wx.EVT_TEXT, self.onChangeCalibNotes, self.ctrlCalibNotes)
# bits++
self.ctrlUseBits = wx.CheckBox(parent, -1, _translate('Use Bits++'))
self.Bind(wx.EVT_CHECKBOX, self.onChangeUseBits, self.ctrlUseBits)
infoBoxGrid = wx.FlexGridSizer(cols=2, hgap=6, vgap=6)
infoBoxGrid.AddMany([
(1, 10), (1, 10), # a pair of empty boxes each 1x10pix
(1, 10), self.ctrlUseBits,
labelScrDist, self.ctrlScrDist,
labelScrPixels, ScrPixelsSizer,
labelScrWidth, self.ctrlScrWidth,
labelCalibDate, self.ctrlCalibDate
])
infoBoxGrid.Layout()
infoBoxSizer.Add(infoBoxGrid)
# put the notes box below the main grid sizer
infoBoxSizer.Add(labelCalibNotes)
infoBoxSizer.Add(self.ctrlCalibNotes, 1, wx.EXPAND)
return infoBoxSizer
def makeCalibBox(self, parent):
boxLabel = wx.StaticBox(parent, -1, _translate('Calibration'))
boxLabel.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
calibBox = wx.StaticBoxSizer(boxLabel)
photometerBox = wx.FlexGridSizer(cols=2, hgap=6, vgap=6)
# com port entry number
self.comPortLabel = wx.StaticText(parent, -1, " ", size=(150, 20))
# photometer button
# photom type choices should not need localization:
self._photomTypeItems = list([p.longName for p in hardware.getAllPhotometers()] + ["Get more..."])
self.ctrlPhotomType = wx.Choice(parent, -1, name="Type:",
choices=self._photomTypeItems)
_ports = list(hardware.getSerialPorts())
self._photomChoices = [_translate("Scan all ports")] + _ports
_size = self.ctrlPhotomType.GetSize() + [0, 5]
self.ctrlPhotomPort = wx.ComboBox(parent, -1, name="Port:",
value=self._photomChoices[0],
choices=self._photomChoices,
size=_size)
self.ctrlPhotomType.Bind(wx.EVT_CHOICE, self.onChangePhotomType)
self.btnFindPhotometer = wx.Button(parent, -1,
_translate("Get Photometer"))
self.Bind(wx.EVT_BUTTON,
self.onBtnFindPhotometer, self.btnFindPhotometer)
# gamma controls
self.btnCalibrateGamma = wx.Button(
parent, -1, _translate("Gamma Calibration..."))
self.Bind(wx.EVT_BUTTON,
self.onCalibGammaBtn, self.btnCalibrateGamma)
self.btnTestGamma = wx.Button(
parent, -1, _translate("Gamma Test..."))
self.btnTestGamma.Enable(False)
# color controls
self.Bind(wx.EVT_BUTTON,
self.onCalibTestBtn, self.btnTestGamma)
self.btnCalibrateColor = wx.Button(
parent, -1, _translate("Chromatic Calibration..."))
self.btnCalibrateColor.Enable(False)
self.Bind(wx.EVT_BUTTON,
self.onCalibColorBtn, self.btnCalibrateColor)
self.btnPlotGamma = wx.Button(
parent, -1, _translate("Plot gamma"))
self.Bind(wx.EVT_BUTTON,
self.plotGamma, self.btnPlotGamma)
self.btnPlotSpectra = wx.Button(
parent, -1, _translate("Plot spectra"))
self.Bind(wx.EVT_BUTTON,
self.plotSpectra, self.btnPlotSpectra)
photometerBox.AddMany([self.ctrlPhotomType, self.btnFindPhotometer,
self.ctrlPhotomPort, (0, 0),
self.comPortLabel, (0, 0),
self.btnCalibrateGamma, (0, 0),
self.btnTestGamma, self.btnPlotGamma,
self.btnCalibrateColor, self.btnPlotSpectra])
# ----GAMMA------------
# calibration grid
gammaBox = wx.StaticBox(parent, -1, _translate('Linearization'))
gammaBox.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
gammaBoxSizer = wx.StaticBoxSizer(gammaBox, wx.VERTICAL)
# don't localize the choices
_choices = ['easy: a+kx^g', 'full: a+(b+kx)^g']
self.choiceLinearMethod = wx.Choice(parent, -1, name='formula:',
choices=_choices)
if self.currentMon.getLinearizeMethod() == 4:
self.choiceLinearMethod.SetSelection(1)
else:
self.choiceLinearMethod.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.onChangeLinearMethod,
self.choiceLinearMethod)
gammaBoxSizer.Add(self.choiceLinearMethod, 1, wx.ALL, 2)
self.gammaGrid = SimpleGrid(parent, id=-1,
cols=['Min', 'Max', 'Gamma',
'a', 'b', 'k'],
rows=['lum', 'R', 'G', 'B'])
gammaBoxSizer.Add(self.gammaGrid)
self.gammaGrid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, self.onChangeGammaGrid)
gammaBoxSizer.Layout()
# LMS grid
LMSbox = wx.StaticBox(parent, -1, 'LMS->RGB')
LMSboxSizer = wx.StaticBoxSizer(LMSbox, wx.VERTICAL)
self.LMSgrid = SimpleGrid(parent, id=-1,
cols=['L', 'M', 'S'],
rows=['R', 'G', 'B'])
LMSboxSizer.Add(self.LMSgrid)
LMSboxSizer.Layout()
self.LMSgrid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, self.onChangeLMSgrid)
# DKL grid
DKLbox = wx.StaticBox(parent, -1, 'DKL->RGB')
DKLboxSizer = wx.StaticBoxSizer(DKLbox, wx.VERTICAL)
self.DKLgrid = SimpleGrid(parent, id=-1,
cols=['Lum', 'L-M', 'L+M-S'],
rows=['R', 'G', 'B'])
DKLboxSizer.Add(self.DKLgrid)
DKLboxSizer.Layout()
self.DKLgrid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, self.onChangeDKLgrid)
calibBoxMainSizer = wx.BoxSizer(wx.VERTICAL)
calibBoxMainSizer.AddMany([photometerBox,
gammaBoxSizer,
LMSboxSizer,
DKLboxSizer])
calibBoxMainSizer.Layout()
if NOTEBOOKSTYLE:
return calibBoxMainSizer
else:
# put the main sizer into a labeled box
calibBox.Add(calibBoxMainSizer)
return calibBox
def loadMonitor(self, name=None):
self.currentMon = monitors.Monitor(name, verbose=False)
self.currentCalibName = self.currentMon.setCurrent(-1)
self.updateCalibList()
self.unSavedMonitor = False
def updateMonList(self):
# refresh list of all available monitors on path
monList = monitors.getAllMonitors()
self.ctrlMonList.Set(monList)
# if we had selected a monitor, make sure it's still selected
if len(monList) > 0:
if self.currentMonName is not None:
self.ctrlMonList.SetStringSelection(self.currentMonName)
else:
self.ctrlMonList.SetSelection(0)
self.onChangeMonSelection(event=-1)
# do we need to update the calibList always after this?
return 1
else:
# there are no monitors - create an empty one to popoulate the
# fields
self.currentMon = monitors.Monitor('', verbose=False)
self.currentMonName = None
return 0 # there were no monitors on the path
def updateCalibList(self, thisList=None):
"""update the list of calibrations either from the current
monitor or to a given list
"""
if thisList is None: # fetch it from monitor file
thisList = self.currentMon.calibNames
# populate the listbox
self.ctrlCalibList.Set(thisList)
# select the current calib
if self.currentCalibName in thisList:
self.ctrlCalibList.SetStringSelection(self.currentCalibName)
self.onChangeCalibSelection(event=-1)
# application callbacks
def onCloseWindow(self, event):
if self.unSavedMonitor:
# warn user that data will be lost
msg = _translate(
'Save changes to monitor settings before quitting?')
dlg = dialogs.MessageDialog(self, message=msg, type='Warning')
resp = dlg.ShowModal()
if resp == wx.ID_CANCEL:
return 1 # return before quitting
elif resp == wx.ID_YES:
# save then quit
self.currentMon.save()
elif resp == wx.ID_NO:
pass # don't save just quit
dlg.Destroy()
self.onCopyMon() # save current monitor name to clipboard
self.Destroy()
# admin callbacks
def onChangeMonSelection(self, event):
if self.unSavedMonitor:
if self.currentMonName == self.ctrlMonList.GetStringSelection():
# it didn't really change
return 1
# warn user that data will be lost
msg = _translate('Save changes to monitor?')
dlg = dialogs.MessageDialog(self, msg, type='Warning')
resp = dlg.ShowModal()
dlg.Destroy()
if resp == wx.ID_CANCEL:
# revert and return
self.ctrlMonList.SetStringSelection(self.currentMonName)
return False # return before quitting
elif resp == wx.ID_YES:
# save then change
self.currentMon.save()
elif resp == wx.ID_NO:
pass # don't save just change
self.currentMonName = self.ctrlMonList.GetStringSelection()
self.loadMonitor(self.currentMonName)
def onChangeCalibSelection(self, event, newCalib=None):
# get data under current calibration
if newCalib is None:
# probably came from an event - check the new name
newCalib = self.ctrlCalibList.GetStringSelection()
# do the load and check new name
self.currentCalibName = self.currentMon.setCurrent(newCalib)
# insert values from new calib into GUI
_date = monitors.strFromDate(self.currentMon.getCalibDate())
self.ctrlCalibDate.SetValue(_date)
_dist = self.currentMon.getDistance() or 0
self.ctrlScrDist.SetValue(locale.str(_dist))
_width = self.currentMon.getWidth() or 0
self.ctrlScrWidth.SetValue(locale.str(_width))
_sizePix = self.currentMon.getSizePix() or [0, 0]
self.ctrlScrPixHoriz.SetValue(locale.str(_sizePix[0]))
self.ctrlScrPixVert.SetValue(locale.str(_sizePix[1]))
# self.ctrlScrGamma.SetValue(str(self.currentMon.getGamma()))
self.ctrlCalibNotes.SetValue(self.currentMon.getNotes() or '')
self.ctrlUseBits.SetValue(self.currentMon.getUseBits())
self.gammaGrid.setData(self.currentMon.getGammaGrid())
if self.currentMon.getLinearizeMethod() == 4:
self.choiceLinearMethod.SetSelection(1)
else:
self.choiceLinearMethod.SetSelection(0)
self.LMSgrid.setData(self.currentMon.getLMS_RGB())
self.DKLgrid.setData(self.currentMon.getDKL_RGB())
self.enableDisableCtrls()
self.unSavedMonitor = False
return 1
def enableDisableCtrls(self):
# update controls for current monitor
if not 'lumsPre' in self.currentMon.currentCalib:
self.btnPlotGamma.Enable(True)
else:
self.btnPlotGamma.Enable(True)
if not 'spectraRGB' in self.currentMon.currentCalib:
self.btnPlotSpectra.Enable(False)
else:
self.btnPlotSpectra.Enable(True)
if self.currentMon.getLevelsPre() is None:
self.choiceLinearMethod.Disable()
else:
self.choiceLinearMethod.Enable()
def onCopyMon(self, event=None):
"""Copy monitor name to clipboard, to paste elsewhere
"""
if wx.TheClipboard.Open():
wx.TheClipboard.Clear()
wx.TheClipboard.SetData(wx.TextDataObject(self.currentMon.name))
wx.TheClipboard.Close()
def onSaveMon(self, event):
"""Saves calibration entry to location.
Note that the calibration date will reflect the save date/time
"""
self.currentMon.save()
self.unSavedMonitor = False
def onCopyCalib(self, event):
"""Creates a new calibration entry for the monitor.
Note that the calibration date will reflect the save date/time
"""
# use time as initial guess at name
calibTime = time.localtime()
calibTimeStr = monitors.strFromDate(calibTime)
# then use dialogue so user can override
msg = _translate(
'Name of this calibration (for monitor "%(name)s") will be:)')
infoStr = msg % {'name': self.currentMon.name}
dlg = wx.TextEntryDialog(self, message=infoStr,
value=calibTimeStr,
caption=_translate('Input text'))
if dlg.ShowModal() == wx.ID_OK:
newCalibName = dlg.GetValue()
# update the GUI to reflect new calibration
self.currentMon.copyCalib(newCalibName)
self.currentMon.setCalibDate(calibTime)
self.onChangeCalibSelection(1, newCalibName)
self.updateCalibList()
self.unSavedMonitor = True
dlg.Destroy()
def onNewMon(self, event):
# open a dialogue to get the name
dlg = wx.TextEntryDialog(self, _translate('New monitor name:'),
caption=_translate('Input text'))
if dlg.ShowModal() == wx.ID_OK:
self.currentMonName = dlg.GetValue()
self.ctrlMonList.Append(self.currentMonName)
self.ctrlMonList.SetStringSelection(self.currentMonName)
self.currentMon = monitors.Monitor(
self.currentMonName, verbose=True)
self.updateCalibList()
self.onChangeCalibSelection(event=1)
self.unSavedMonitor = True
dlg.Destroy()
def onDeleteMon(self, event):
monToDel = self.currentMonName
msg = _translate('Are you sure you want to delete all details for %s? '
'(cannot be undone)')
dlg = dialogs.MessageDialog(parent=self, message=msg % monToDel,
type='Warning')
response = dlg.ShowModal()
dlg.Destroy()
if response == wx.ID_YES:
# delete it (try to remove both calib and json files)
for fileEnding in ['.calib', '.json']:
monitorFileName = os.path.join(monitors.monitorFolder,
monToDel + fileEnding)
if os.path.exists(monitorFileName):
os.remove(monitorFileName)
self.currentMon = None
self.currentMonName = None
self.updateMonList()
# load most recent calibration instead
# this will load calibration "-1" (last calib)
self.onChangeMonSelection(event=None)
self.updateCalibList()
def onDeleteCalib(self, event):
calToDel = self.ctrlCalibList.GetStringSelection()
# warn user that data will be lost
msg = _translate('Are you sure you want to delete this calibration? '
'(cannot be undone)')
dlg = dialogs.MessageDialog(parent=self,
message=msg,
type='Warning')
if dlg.ShowModal() == wx.ID_YES:
# delete it
self.currentMon.delCalib(calToDel)
# load most recent calibration instead
# this will load calibration "-1" (last calib)
self.onChangeCalibSelection(event=None, newCalib=-1)
self.updateCalibList()
dlg.Destroy()
# info callbacks
def onChangeCalibDate(self, event):
# do we want the user to change a calib date?
pass
def onChangeCalibNotes(self, event):
newVal = self.ctrlCalibNotes.GetValue()
self.currentMon.setNotes(newVal)
self.unSavedMonitor = True
def onChangeScrDist(self, event):
newVal = unicodeToFloat(self.ctrlScrDist.GetValue())
# zero means "not set" but can't be used in calculations
if newVal == 0:
newVal = None
self.currentMon.setDistance(newVal)
self.unSavedMonitor = True
def onChangeScrWidth(self, event):
newVal = unicodeToFloat(self.ctrlScrWidth.GetValue())
# zero means "not set" but can't be used in calculations
if newVal == 0:
newVal = None
self.currentMon.setWidth(newVal)
self.unSavedMonitor = True
def onChangeScrPixHoriz(self, event):
this = self.currentMon.currentCalib
if self.currentMon.getSizePix() is None:
self.currentMon.setSizePix([0,0])
newVal = unicodeToFloat(self.ctrlScrPixHoriz.GetValue())
this['sizePix'] = [newVal, this['sizePix'][1]]
self.unSavedMonitor = True
def onChangeScrPixVert(self, event):
this = self.currentMon.currentCalib
if self.currentMon.getSizePix() is None:
self.currentMon.setSizePix([0,0])
newVal = unicodeToFloat(self.ctrlScrPixVert.GetValue())
this['sizePix'] = [this['sizePix'][0], newVal]
self.unSavedMonitor = True
# calib callbacks
def onChangeGammaGrid(self, event):
# convert to float
newVal = self.gammaGrid.GetCellValue(event.GetRow(), event.GetCol())
newVal = unicodeToFloat(newVal)
# insert in grid
row, col = event.GetRow(), event.GetCol()
self.currentMon.currentCalib['gammaGrid'][row, col] = newVal
self.unSavedMonitor = True
def onChangeLMSgrid(self, event):
# convert to float
newVal = self.LMSgrid.GetCellValue(event.GetRow(), event.GetCol())
newVal = unicodeToFloat(newVal)
# insert in grid
row, col = event.GetRow(), event.GetCol()
self.currentMon.currentCalib['lms_rgb'][row, col] = newVal
self.unSavedMonitor = True
def onChangeDKLgrid(self, event):
# convert to float
newVal = self.DKLgrid.GetCellValue(event.GetRow(), event.GetCol())
newVal = unicodeToFloat(newVal)
# insert in grid
row, col = event.GetRow(), event.GetCol()
self.currentMon.currentCalib['dkl_rgb'][row, col] = newVal
self.unSavedMonitor = True
def onCalibGammaBtn(self, event):
if NO_MEASUREMENTS:
# recalculate from previous measure
lumsPre = self.currentMon.getLumsPre()
lumLevels = self.currentMon.getLevelsPre()
else:
# present a dialogue to get details for calibration
calibDlg = GammaDlg(self, self.currentMon)
if calibDlg.ShowModal() != wx.ID_OK:
calibDlg.Destroy()
return 1
nPoints = int(calibDlg.ctrlNPoints.GetValue())
stimSize = unicodeToFloat(calibDlg.ctrlStimSize.GetValue())
useBits = calibDlg.ctrlUseBits.GetValue()
calibDlg.Destroy()
autoMode = calibDlg.methodChoiceBx.GetStringSelection()
# lib starts at zero but here we allow 1
screen = int(calibDlg.ctrlScrN.GetValue()) - 1
# run the calibration itself
lumLevels = monitors.DACrange(nPoints)
_size = self.currentMon.getSizePix()
lumsPre = monitors.getLumSeries(photometer=self.photom,
lumLevels=lumLevels,
useBits=useBits,
autoMode=autoMode,
winSize=_size,
stimSize=stimSize,
monitor=self.currentMon,
screen=screen)
# allow user to type in values
if autoMode == 'semi':
inputDlg = GammaLumValsDlg(parent=self, levels=lumLevels)
lumsPre = inputDlg.show() # will be [] if user cancels
inputDlg.Destroy()
# fit the gamma curves
if lumsPre is None or len(lumsPre) > 1:
self.onCopyCalib(1) # create a new dated calibration
self.currentMon.setLumsPre(lumsPre) # save for future
self.currentMon.setLevelsPre(lumLevels) # save for future
self.btnPlotGamma.Enable(True)
self.choiceLinearMethod.Enable()
# do the fits
self.doGammaFits(lumLevels, lumsPre)
else:
logging.warning('No lum values captured/entered')
def doGammaFits(self, levels, lums):
linMethod = self.currentMon.getLinearizeMethod()
if linMethod == 4:
msg = 'Fitting gamma equation (%i) to luminance data'
logging.info(msg % linMethod)
currentCal = numpy.ones([4, 6], 'f') * numpy.nan
for gun in [0, 1, 2, 3]:
gamCalc = monitors.GammaCalculator(
levels, lums[gun, :], eq=linMethod)
currentCal[gun, 0] = gamCalc.min # min
currentCal[gun, 1] = gamCalc.max # max
currentCal[gun, 2] = gamCalc.gamma # gamma
currentCal[gun, 3] = gamCalc.a # gamma
currentCal[gun, 4] = gamCalc.b # gamma
currentCal[gun, 5] = gamCalc.k # gamma
else:
currentCal = numpy.ones([4, 3], 'f') * numpy.nan
msg = 'Fitting gamma equation (%i) to luminance data'
logging.info(msg % linMethod)
for gun in [0, 1, 2, 3]:
gamCalc = monitors.GammaCalculator(
levels, lums[gun, :], eq=linMethod)
currentCal[gun, 0] = lums[gun, 0] # min
currentCal[gun, 1] = lums[gun, -1] # max
currentCal[gun, 2] = gamCalc.gamma # gamma
self.gammaGrid.setData(currentCal)
self.currentMon.setGammaGrid(currentCal)
self.unSavedMonitor = True
def onChangeLinearMethod(self, event):
newMethod = self.choiceLinearMethod.GetStringSelection()
if newMethod.startswith('full'):
self.currentMon.setLineariseMethod(4)
else:
self.currentMon.setLineariseMethod(1)
self.unSavedMonitor = True
if self.currentMon.getLumsPre().any() != None:
self.doGammaFits(self.currentMon.getLevelsPre(),
self.currentMon.getLumsPre())
def onCalibTestBtn(self, event):
# set the gamma and test calibration
currentCal = self.currentMon.currentCalib['gammaGrid']
calibDlg = GammaDlg(self, self.currentMon)
if calibDlg.ShowModal() != wx.ID_OK:
calibDlg.Destroy()
return 1
nPoints = int(calibDlg.ctrlNPoints.GetValue())
stimSize = unicodeToFloat(calibDlg.ctrlStimSize.GetValue())
useBits = calibDlg.ctrlUseBits.GetValue()
calibDlg.Destroy()
autoMode = calibDlg.methodChoiceBx.GetStringSelection()
# lib starts at zero but here we allow 1
screen = int(calibDlg.ctrlScrN.GetValue()) - 1
lumLevels = monitors.DACrange(nPoints)
# gamma=None causes the function to use monitor settings
lumsPost = monitors.getLumSeries(photometer=self.photom,
lumLevels=lumLevels,
useBits=useBits,
autoMode=autoMode,
winSize=self.currentMon.getSizePix(),
stimSize=stimSize,
monitor=self.currentMon,
gamma=None,
screen=screen,)
if len(lumsPost) > 1:
self.currentMon.setLumsPost(lumsPost) # save for future
self.currentMon.setLevelsPost(lumLevels) # save for future
self.unSavedMonitor = True
def onCalibColorBtn(self, event):
if NO_MEASUREMENTS:
# get previous spectra:
nm, spectra = self.currentMon.getSpectra()
else:
# do spectral measurement:
useBits = self.currentMon.getUseBits()
_size = self.currentMon.getSizePix()
nm, spectra = monitors.getRGBspectra(stimSize=0.5,
photometer=self.photom,
winSize=_size)
self.currentMon.setSpectra(nm, spectra)
self.btnPlotSpectra.Enable(True) # can now plot spectra
self.unSavedMonitor = True
self.onCopyCalib(1) # create a new dated calibration
# dkl
dkl_rgb = monitors.makeDKL2RGB(nm, spectra)
self.currentMon.setDKL_RGB(dkl_rgb)
self.DKLgrid.setData(dkl_rgb)
# lms
lms_rgb = monitors.makeLMS2RGB(nm, spectra)
self.currentMon.setLMS_RGB(lms_rgb)
self.LMSgrid.setData(lms_rgb)
def onChangeUseBits(self, event):
newVal = self.ctrlUseBits.GetValue()
self.currentMon.setUseBits(newVal)
self.unSavedMonitor = True
def onCtrlPhotomType(self, event):
pass
def onChangePhotomType(self, evt=None):
if evt.GetSelection() == len(self._photomTypeItems) - 1:
# if they chose "Get more...", clear selection and open plugin dlg
self.ctrlPhotomType.SetSelection(-1)
from ..app.plugin_manager.dialog import EnvironmentManagerDlg
dlg = EnvironmentManagerDlg(self)
dlg.pluginMgr.pluginList.searchCtrl.SetValue("photometer")
dlg.pluginMgr.pluginList.search()
dlg.Show()
else:
evt.Skip()
def onBtnFindPhotometer(self, event):
# safer to get by index, but GetStringSelection will work for
# nonlocalized technical names:
photName = self.ctrlPhotomType.GetStringSelection()
# not sure how
photPort = self.ctrlPhotomPort.GetValue().strip()
# [0] == Scan all ports
if not photPort or photPort == self._photomChoices[0]:
photPort = None
elif photPort.isdigit():
photPort = int(photPort)
# search all ports
self.comPortLabel.SetLabel(_translate('Scanning ports...'))
self.Update()
self.photom = hardware.findPhotometer(device=photName, ports=photPort)
if self.photom is not None and self.photom.OK:
self.btnFindPhotometer.Disable()
self.btnCalibrateGamma.Enable(True)
self.btnTestGamma.Enable(True)
if hasattr(self.photom, 'getLastSpectrum'):
self.btnCalibrateColor.Enable(True)
msg = _translate('%(photomType)s found on %(photomPort)s')
self.comPortLabel.SetLabel(msg %
{'photomType': self.photom.type,
'photomPort': self.photom.portString})
else:
self.comPortLabel.SetLabel(_translate('No photometers found'))
self.photom = None
# does this device need a dark calibration?
if (hasattr(self.photom, 'getNeedsCalibrateZero') and
self.photom.getNeedsCalibrateZero()):
# prompt user if we need a dark calibration for the device
if self.photom.getNeedsCalibrateZero():
wx.Dialog(self, title=_translate(
'Dark calibration of ColorCAL'))
msg = _translate('Your ColorCAL needs to be calibrated first.'
' Please block all light from getting into '
'the lens and press OK.')
while self.photom.getNeedsCalibrateZero():
txt = _translate('Dark calibration of ColorCAL')
dlg = dialogs.MessageDialog(self, message=msg,
title=txt,
type='Info')
# info dlg has only an OK button
resp = dlg.ShowModal()
if resp == wx.ID_CANCEL:
self.photom = None
self.comPortLabel.SetLabel('')
return 0
elif resp == wx.ID_OK:
self.photom.calibrateZero()
# this failed at least once. Try again.
msg = _translate('Try again. Cover the lens fully and '
'press OK')
def plotGamma(self, event=None):
msg = _translate('%(monName)s %(calibName)s Gamma Functions')
figTitle = msg % {'monName': self.currentMonName,
'calibName': self.currentCalibName}
plotWindow = PlotFrame(self, 1003, figTitle)
figure = Figure(figsize=(5, 5), dpi=80)
figureCanvas = FigureCanvas(plotWindow, -1, figure)
plt = figure.add_subplot(111)
plt.cla()
gammaGrid = self.currentMon.getGammaGrid()
lumsPre = self.currentMon.getLumsPre()
levelsPre = self.currentMon.getLevelsPre()
lumsPost = self.currentMon.getLumsPost()
# Handle the case where the button is pressed but no gamma data is
# available.
if lumsPre is None:
return # nop
elif lumsPre.any() != None:
colors = 'krgb'
xxSmooth = numpy.arange(0, 255.5, 0.5)
eq = self.currentMon.getLinearizeMethod()
for gun in range(4): # includes lum
gamma = gammaGrid[gun, 2]
minLum = gammaGrid[gun, 0]
maxLum = gammaGrid[gun, 1]
if eq <= 2:
# plot fitted curve
curve = monitors.gammaFun(xxSmooth, minLum, maxLum, gamma,
eq=eq, a=None, b=None, k=None)
plt.plot(xxSmooth, curve, colors[gun] + '-',
linewidth=1.5)
if self.currentMon.getLinearizeMethod() == 4:
a, b, k = gammaGrid[gun, 3:]
# plot fitted curve
curve = monitors.gammaFun(xxSmooth, minLum, maxLum, gamma,
eq=eq, a=a, b=b, k=k)
plt.plot(xxSmooth, curve, colors[gun] + '-',
linewidth=1.5)
else:
pass
# polyFit = self.currentMon._gammaInterpolator[gun]
# curve = xxSmooth*0.0
# for expon, coeff in enumerate(polyFit):
# curve += coeff*xxSmooth**expon
# plt.plot(xxSmooth, curve, colors[gun]+'-', linewidth=1.5)
# plot POINTS
plt.plot(levelsPre, lumsPre[gun, :], colors[gun] + 'o',
linewidth=1.5)
lumsPost = self.currentMon.getLumsPost()
levelsPost = self.currentMon.getLevelsPost()
if lumsPost is not None:
for gun in range(4): # includes lum,r,g,b
lums = lumsPost[gun, :]
gamma = gammaGrid[gun, 2]
minLum = min(lums)
maxLum = max(lums)
# plot CURVE
plt.plot([levelsPost[0], levelsPost[-1]],
[minLum, maxLum], colors[gun] + '--', linewidth=1.5)
# plot POINTS
plt.plot(levelsPost, lums, 'o', markerfacecolor='w',
markeredgecolor=colors[gun], linewidth=1.5)
figureCanvas.draw() # update the canvas
plotWindow.addCanvas(figureCanvas)
def plotSpectra(self, event=None):
msg = _translate('%(monName)s %(calibName)s Spectra')
figTitle = msg % {'monName': self.currentMonName,
'calibName': self.currentCalibName}
plotWindow = PlotFrame(self, 1003, figTitle)
figure = Figure(figsize=(5, 5), dpi=80)
figureCanvas = FigureCanvas(plotWindow, -1, figure)
plt = figure.add_subplot(111)
plt.cla()
nm, spectraRGB = self.currentMon.getSpectra()
if nm != None:
plt.plot(nm, spectraRGB[0, :], 'r-', linewidth=1.5)
plt.plot(nm, spectraRGB[1, :], 'g-', linewidth=2)
plt.plot(nm, spectraRGB[2, :], 'b-', linewidth=2)
figureCanvas.draw() # update the canvas
plotWindow.addCanvas(figureCanvas)
class GammaLumValsDlg(wx.Dialog):
"""A dialogue to manually get the luminance values recorded for each level
"""
def __init__(self, parent, levels):
wx.Dialog.__init__(self, parent, -1,
_translate('Recorded luminance values'),
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
pad = 5
panel = wx.Panel(self, -1)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.makeCalibBox(parent=panel, levels=levels), 1,
wx.EXPAND | wx.ALL, pad)
butBox = wx.BoxSizer(wx.HORIZONTAL)
btnOK = wx.Button(panel, wx.ID_OK, _translate(" OK "))
btnOK.SetDefault()
btnCANC = wx.Button(panel, wx.ID_CANCEL, _translate(" Cancel "))
butBox.AddStretchSpacer(1)
butBox.Add(btnOK, 1, wx.BOTTOM, pad)
butBox.Add(btnCANC, 1, wx.BOTTOM | wx.RIGHT, pad)
mainSizer.Add(butBox, flag=wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM,
border=10)
# finalise panel layout
panel.SetAutoLayout(True)
panel.SetSizerAndFit(mainSizer)
mainSizer.Layout()
self.SetSize(self.GetBestSize())
def makeCalibBox(self, parent, levels):
'''do my best to make a calibration box'''
gammaBox = wx.StaticBox(parent, -1, _translate('Luminance Values'))
gammaBox.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL))
gammaBoxSizer = wx.StaticBoxSizer(gammaBox, wx.VERTICAL)
theCols = list(map(str, levels))
self.gammaGrid = SimpleGrid(parent, id=-1,
cols=theCols,
rows=['lum', 'R', 'G', 'B'])
gammaBoxSizer.Add(self.gammaGrid)
self.gammaGrid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, self.onChangeGammaGrid)
gammaBoxSizer.Layout()
return gammaBoxSizer
def onChangeGammaGrid(self, event):
"""The first column = black, so it gets set same for all, fix
"""
if event.GetCol() == 0:
newVal = self.gammaGrid.GetCellValue(event.GetRow(),
event.GetCol())
newVal = unicodeToFloat(newVal)
for nRow in range(self.gammaGrid.nRows):
self.gammaGrid.SetCellValue(nRow, 0, '%f' % newVal)
def getData(self):
"""Retrieve the data from the grid in same format as auto calibration
"""
data = []
for nRow in range(self.gammaGrid.nRows):
bob = []
for nCol in range(self.gammaGrid.nCols):
bob.append(self.gammaGrid.GetCellValue(nRow, nCol))
data.append(list(map(float, bob)))
return data
def show(self):
"""Show dialog, retrieve data, empty if cancel
"""
ok = self.ShowModal()
if ok == wx.ID_OK:
return numpy.array(self.getData())
else:
return numpy.array([])
class GammaDlg(wx.Dialog):
def __init__(self, parent, monitor):
self.method = 'auto'
self.nPoints = 8
assert isinstance(monitor, monitors.Monitor)
self.useBits = monitor.getUseBits()
wx.Dialog.__init__(self, parent, -1, _translate('Gamma Calibration'),
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
mainSizer = wx.FlexGridSizer(cols=2, hgap=1, vgap=1)
# select method of calib (auto, semi-auto, manual)
# todo: make the input tablefor manual method
self.methodChoiceBx = wx.Choice(self, -1, choices=['auto', 'semi'])
self.methodChoiceBx.SetStringSelection('auto')
self.Bind(wx.EVT_CHOICE, self.onMethodChange, self.methodChoiceBx)
self.ctrlUseBits = wx.CheckBox(self, -1, _translate('Use Bits++'))
self.ctrlUseBits.SetValue(self.useBits)
msg = _translate('Number of calibration points:')
self.labelNPoints = wx.StaticText(self, -1, msg)
self.ctrlNPoints = intctrl.IntCtrl(self, -1, value=8)
msg = _translate('Screen number (primary is 1)')
self.labelScrN = wx.StaticText(self, -1, msg)
self.ctrlScrN = intctrl.IntCtrl(self, -1, value=1)
msg = _translate('Patch size (fraction of screen):')
self.labelStimSize = wx.StaticText(self, -1, msg)
self.ctrlStimSize = wx.TextCtrl(self, -1, '0.3')
pad = 5
mainSizer.Add((0, 0), 1, wx.ALL, pad)
mainSizer.Add(self.methodChoiceBx, 1, wx.ALL, pad)
mainSizer.Add(self.labelScrN, 1, wx.ALL, pad)
mainSizer.Add(self.ctrlScrN, 1, wx.ALL, pad)
mainSizer.Add(self.labelNPoints, 1, wx.ALL, pad)
mainSizer.Add(self.ctrlNPoints, 1, wx.ALL, pad)
mainSizer.Add(self.labelStimSize, 1, wx.ALL, pad)
mainSizer.Add(self.ctrlStimSize, 1, wx.ALL, pad)
mainSizer.Add((0, 0), 1, wx.ALL, pad)
mainSizer.Add(self.ctrlUseBits, 1, wx.ALL, pad)
btnOK = wx.Button(self, wx.ID_OK, _translate(" OK "))
btnOK.SetDefault()
mainSizer.Add(btnOK, 1, wx.TOP | wx.BOTTOM | wx.ALIGN_RIGHT, pad)
btnCANC = wx.Button(self, wx.ID_CANCEL, _translate(" Cancel "))
mainSizer.Add(btnCANC, 1,
wx.TOP | wx.BOTTOM | wx.RIGHT | wx.ALIGN_RIGHT, pad)
self.Center()
# mainSizer.Fit(self)
self.SetAutoLayout(True)
self.SetSizerAndFit(mainSizer)
def onMethodChange(self, event):
pass
class MonitorCenter(wx.App):
def OnInit(self):
frame = MainFrame(None, _translate('PsychoPy Monitor Center'))
frame.Show(True)
self.SetTopWindow(frame)
return True
if __name__ == '__main__':
app = MonitorCenter(0)
app.MainLoop()
| 52,539
|
Python
|
.py
| 1,096
| 35.001825
| 106
| 0.586292
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,371
|
psychopy-icon.svg
|
psychopy_psychopy/psychopy/monitors/psychopy-icon.svg
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="164.41785"
height="164.41785"
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="psychopy.svg">
<defs
id="defs4">
<linearGradient
id="linearGradient3853"
inkscape:collect="always">
<stop
id="stop3855"
offset="0"
style="stop-color:#808080;stop-opacity:1" />
<stop
id="stop3857"
offset="1"
style="stop-color:#ffffff;stop-opacity:1" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3841">
<stop
style="stop-color:#666666;stop-opacity:1"
offset="0"
id="stop3843" />
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="1"
id="stop3845" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3831">
<stop
style="stop-color:#b3b3b3;stop-opacity:1"
offset="0"
id="stop3833" />
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="1"
id="stop3835" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3772">
<stop
style="stop-color:#222222;stop-opacity:1;"
offset="0"
id="stop3774" />
<stop
style="stop-color:#222222;stop-opacity:0;"
offset="1"
id="stop3776" />
</linearGradient>
<linearGradient
id="linearGradient3750">
<stop
id="stop3752"
offset="0"
style="stop-color:#000000;stop-opacity:1;" />
<stop
style="stop-color:#141414;stop-opacity:1;"
offset="0.12876198"
id="stop3762" />
<stop
id="stop3764"
offset="0.22307248"
style="stop-color:#262626;stop-opacity:1;" />
<stop
style="stop-color:#393939;stop-opacity:1;"
offset="0.31924155"
id="stop3766" />
<stop
id="stop3768"
offset="0.42719939"
style="stop-color:#4f4f4f;stop-opacity:1;" />
<stop
style="stop-color:#bbbbbb;stop-opacity:1;"
offset="0.64108545"
id="stop3760" />
<stop
style="stop-color:#d0d0d0;stop-opacity:1;"
offset="0.72748369"
id="stop3758" />
<stop
style="stop-color:#e5e5e5;stop-opacity:1;"
offset="0.81589973"
id="stop3756" />
<stop
id="stop3754"
offset="1"
style="stop-color:#f7f7f7;stop-opacity:1;" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3714">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop3716" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop3718" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient3643"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient3720"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient3778"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient3829"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient3847"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient3851"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient2903"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient2905"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient2907"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient2909"
gradientUnits="userSpaceOnUse"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient2911"
gradientUnits="userSpaceOnUse"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient2913"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3853"
id="linearGradient3054"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3841"
id="linearGradient3056"
gradientUnits="userSpaceOnUse"
x1="404.85583"
y1="692.75299"
x2="497.16815"
y2="814.81799" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3750"
id="linearGradient3058"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect"
x1="423.71222"
y1="782.60333"
x2="441.418"
y2="793.06329" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3714"
id="linearGradient3060"
gradientUnits="userSpaceOnUse"
x1="365.3989"
y1="486.5683"
x2="408.96875"
y2="614.06213" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3772"
id="linearGradient3062"
gradientUnits="userSpaceOnUse"
x1="471.34607"
y1="873.54211"
x2="467.4519"
y2="766.62939" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3831"
id="linearGradient3064"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.98832452,0,0,0.98832452,-44.22406,-133.90409)"
spreadMethod="reflect"
x1="411.04434"
y1="691.27911"
x2="476.58185"
y2="762.19476" />
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.5749948"
inkscape:cx="182.52396"
inkscape:cy="208.14249"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:window-width="1920"
inkscape:window-height="1021"
inkscape:window-x="0"
inkscape:window-y="27"
inkscape:window-maximized="1"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-326.81408,-525.12119)">
<g
id="g3859">
<path
sodipodi:type="arc"
style="fill:#999999;fill-opacity:1;stroke:none"
id="path3794"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(1.0077834,0,0,1.0077834,-53.148013,-148.49809)" />
<path
sodipodi:type="arc"
style="fill:url(#linearGradient3054);fill-opacity:1;stroke:none"
id="path3849"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(-1.0038917,0,0,-1.0038917,869.4093,1360.2396)" />
<path
transform="matrix(0.93384114,0,0,0.93384114,-19.237984,-93.042077)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3837"
style="fill:url(#linearGradient3056);fill-opacity:1;stroke:none"
sodipodi:type="arc" />
<path
sodipodi:type="arc"
style="fill:url(#linearGradient3058);fill-opacity:1;stroke:#616161;stroke-width:1.11514485;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:0.65098039;stroke-dasharray:none"
id="path3627"
sodipodi:cx="458.60153"
sodipodi:cy="749.99072"
sodipodi:rx="81.573997"
sodipodi:ry="81.573997"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
transform="matrix(0.89674453,0,0,0.89674453,-2.22543,-65.219961)" />
<path
inkscape:connector-curvature="0"
id="path3692"
d="m 409.03125,534.1875 c -40.4002,0 -73.15625,32.75605 -73.15625,73.15625 0,6.44798 0.83978,12.70002 2.40625,18.65625 19.41506,-25.1435 49.8382,-41.34375 84.0625,-41.34375 22.13372,0 42.7086,6.75694 59.71875,18.34375 -2.23978,-38.38544 -34.08407,-68.8125 -73.03125,-68.8125 z"
style="fill:url(#linearGradient3060);fill-opacity:1;stroke:none" />
<path
transform="matrix(0.89674453,0,0,0.89674453,-2.22543,-65.219961)"
d="m 540.17553,749.99072 c 0,45.05208 -36.52192,81.574 -81.574,81.574 -45.05207,0 -81.574,-36.52192 -81.574,-81.574 0,-45.05207 36.52193,-81.57399 81.574,-81.57399 45.05208,0 81.574,36.52192 81.574,81.57399 z"
sodipodi:ry="81.573997"
sodipodi:rx="81.573997"
sodipodi:cy="749.99072"
sodipodi:cx="458.60153"
id="path3770"
style="fill:url(#linearGradient3062);fill-opacity:1;stroke:none"
sodipodi:type="arc" />
<path
inkscape:connector-curvature="0"
id="path2853"
d="m 409.03144,526.70387 c -44.52607,0 -80.64007,36.114 -80.64007,80.64007 0,44.52607 36.114,80.60844 80.64007,80.60844 44.52607,0 80.60844,-36.08237 80.60844,-80.60844 0,-44.52607 -36.08237,-80.64007 -80.60844,-80.64007 z m 0,5.62899 c 41.41541,0 74.97945,33.59566 74.97945,75.01108 0,41.41541 -33.56404,74.97945 -74.97945,74.97945 -41.41542,0 -75.01108,-33.56404 -75.01108,-74.97945 0,-41.41542 33.59566,-75.01108 75.01108,-75.01108 z"
style="fill:url(#linearGradient3064);fill-opacity:1;stroke:none" />
</g>
</g>
</svg>
| 13,526
|
Python
|
.py
| 394
| 26.515228
| 446
| 0.61351
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,372
|
calibTools.py
|
psychopy_psychopy/psychopy/monitors/calibTools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tools to help with calibrations
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from .calibData import wavelength_5nm, juddVosXYZ1976_5nm, cones_SmithPokorny
from psychopy import __version__, logging, hardware
try:
import serial
haveSerial = True
except (ModuleNotFoundError, ImportError):
haveSerial = False
import errno
import os
import time
import glob
import sys
from copy import deepcopy, copy
import numpy as np
from scipy import interpolate
import json_tricks # allows json to dump/load np.arrays and dates
DEBUG = False
# set and create (if necess) the data folder
# this will be the
# Linux/Mac: ~/.psychopy2/monitors
# win32: <UserDocs>/Application Data/PsychoPy/monitors
join = os.path.join
if sys.platform == 'win32':
# we used this for a while (until 0.95.4) but not the proper place for
# windows app data
oldMonitorFolder = join(os.path.expanduser('~'), '.psychopy3', 'monitors')
monitorFolder = join(os.environ['APPDATA'], 'psychopy3', 'monitors')
if os.path.isdir(oldMonitorFolder) and not os.path.isdir(monitorFolder):
os.renames(oldMonitorFolder, monitorFolder)
else:
monitorFolder = join(os.environ['HOME'], '.psychopy3', 'monitors')
# HACK for Python2.7! On system where `monitorFolder` contains special characters,
# for example because the Windows profile name does, `monitorFolder` must be
# decoded to Unicode to prevent errors later on. However, this is not a proper
# fix, since *everything* should be decoded to Unicode, and not just this
# specific pathname. Right now, errors will still occur if `monitorFolder` is
# combined with `str`-type objects that contain non-ASCII characters.
if isinstance(monitorFolder, bytes):
monitorFolder = monitorFolder.decode(sys.getfilesystemencoding())
try:
os.makedirs(monitorFolder)
except OSError as err:
if err.errno != errno.EEXIST:
raise
class Monitor:
"""Creates a monitor object for storing calibration details.
This will be loaded automatically from disk if the
monitor name is already defined (see methods).
Many settings from the stored monitor can easily be overridden
either by adding them as arguments during the initial call.
**arguments**:
- ``width, distance, gamma`` are details about the calibration
- ``notes`` is a text field to store any useful info
- ``useBits`` True, False, None
- ``verbose`` True, False, None
- ``currentCalib`` is a dictionary object containing various
fields for a calibration. Use with caution since the
dictionary may not contain all the necessary fields that
a monitor object expects to find.
**eg**:
``myMon = Monitor('sony500', distance=114)``
Fetches the info on the sony500 and overrides its usual distance
to be 114cm for this experiment.
These can be saved to the monitor file using
:func:`~psychopy.monitors.Monitor.save`
or not (in which case the changes will be lost)
"""
def __init__(self, name,
width=None,
distance=None,
gamma=None,
notes=None,
useBits=None,
verbose=True,
currentCalib=None,
autoLog=True):
"""
"""
# make sure that all necessary settings have some value
super(Monitor, self).__init__()
self.__type__ = 'psychoMonitor'
self.name = name
self.autoLog = autoLog
self.currentCalib = currentCalib or {}
self.currentCalibName = strFromDate(time.mktime(time.localtime()))
self.calibs = {}
self.calibNames = []
self._gammaInterpolator = None
self._gammaInterpolator2 = None
self._loadAll()
if len(self.calibNames) > 0:
self.setCurrent(-1) # will fetch previous vals if monitor exists
if self.autoLog:
logging.info('Loaded monitor calibration from %s' %
self.calibNames)
else:
self.newCalib()
logging.warning("Monitor specification not found. "
"Creating a temporary one...")
# override current monitor settings with the vals given
if width:
self.setWidth(width)
if distance:
self.setDistance(distance)
if gamma:
self.setGamma(gamma)
if notes:
self.setNotes(notes)
if useBits != None:
self.setUseBits(useBits)
def gammaIsDefault(self):
"""Determine whether we're using the default gamma values
"""
thisGamma = self.getGamma()
# run the test just on this
array = np.array
return (thisGamma is None or np.all(array(thisGamma) == array([1, 1, 1])))
# functions to set params of current calibration
def setSizePix(self, pixels):
"""Set the size of the screen in pixels x,y
"""
self.currentCalib['sizePix'] = pixels
def setWidth(self, width):
"""Of the viewable screen (cm)
"""
self.currentCalib['width'] = width
def setDistance(self, distance):
"""To the screen (cm)
"""
self.currentCalib['distance'] = distance
def setCalibDate(self, date=None):
"""Sets the current calibration to have a date/time or to the current
date/time if none given. (Also returns the date as set)
"""
if date is None:
date = time.mktime(time.localtime())
self.currentCalib['calibDate'] = date
return date
def setGamma(self, gamma):
"""Sets the gamma value(s) for the monitor.
This only uses a single gamma value for the three
guns, which is fairly approximate. Better to use
setGammaGrid (which uses one gamma value for each gun)
"""
self.currentCalib['gamma'] = gamma
def setGammaGrid(self, gammaGrid):
"""Sets the min,max,gamma values for the each gun
"""
self.currentCalib['gammaGrid'] = gammaGrid
def setLineariseMethod(self, method):
"""Sets the method for linearising
0 uses y=a+(bx)^gamma
1 uses y=(a+bx)^gamma
2 uses linear interpolation over the curve
"""
self.currentCalib['linearizeMethod'] = method
def setMeanLum(self, meanLum):
"""Records the mean luminance (for reference only)
"""
self.currentCalib['meanLum'] = meanLum
def setLumsPre(self, lums):
"""Sets the last set of luminance values measured during calibration
"""
self.currentCalib['lumsPre'] = lums
def setLumsPost(self, lums):
"""Sets the last set of luminance values measured AFTER calibration
"""
self.currentCalib['lumsPost'] = lums
def setLevelsPre(self, levels):
"""Sets the last set of luminance values measured during calibration
"""
self.currentCalib['levelsPre'] = levels
def setLevelsPost(self, levels):
"""Sets the last set of luminance values measured AFTER calibration
"""
self.currentCalib['levelsPost'] = levels
def setDKL_RGB(self, dkl_rgb):
"""Sets the DKL->RGB conversion matrix for a chromatically
calibrated monitor (matrix is a 3x3 num array).
"""
self.currentCalib['dkl_rgb'] = dkl_rgb
def setSpectra(self, nm, rgb):
"""Sets the phosphor spectra measured by the spectrometer
"""
self.currentCalib['spectraNM'] = nm
self.currentCalib['spectraRGB'] = rgb
def setLMS_RGB(self, lms_rgb):
"""Sets the LMS->RGB conversion matrix for a chromatically
calibrated monitor (matrix is a 3x3 num array).
"""
self.currentCalib['lms_rgb'] = lms_rgb
self.setPsychopyVersion(__version__)
def setPsychopyVersion(self, version):
"""To store the version of PsychoPy that this calibration used
"""
self.currentCalib['psychopyVersion'] = version
def setNotes(self, notes):
"""For you to store notes about the calibration
"""
self.currentCalib['notes'] = notes
def setUseBits(self, usebits):
"""DEPRECATED: Use the new hardware classes to control these devices
"""
self.currentCalib['usebits'] = usebits
# equivalent get functions
def getSizePix(self):
"""Returns the size of the current calibration in pixels,
or None if not defined
"""
size = None
if 'sizePix' in self.currentCalib:
size = self.currentCalib['sizePix']
# check various invalid sizes
if not hasattr(size, '__iter__') or len(size)!=2:
return None
# make sure it's a list (not tuple) with no None vals
sizeOut = [(val or 0) for val in size]
return sizeOut
def getWidth(self):
"""Of the viewable screen in cm, or None if not known
"""
return self.currentCalib['width']
def getDistance(self):
"""Returns distance from viewer to the screen in cm,
or None if not known
"""
return self.currentCalib['distance']
def getCalibDate(self):
"""As a python date object (convert to string using
calibTools.strFromDate"""
return self.currentCalib['calibDate']
def getGamma(self):
"""Returns just the gamma value (not the whole grid)
"""
gridInCurrent = 'gammaGrid' in self.currentCalib
if (gridInCurrent and not np.all(self.getGammaGrid()[1:, 2] == 1)):
return self.getGammaGrid()[1:, 2]
elif 'gamma' in self.currentCalib:
return self.currentCalib['gamma']
else:
return None
def getGammaGrid(self):
"""Gets the min,max,gamma values for the each gun
"""
if 'gammaGrid' in self.currentCalib:
# Make sure it's an array, so you can look at the shape
grid = np.asarray(self.currentCalib['gammaGrid'])
if grid.shape != [4, 6]:
newGrid = np.zeros([4, 6], 'f') * np.nan # start as NaN
newGrid[:grid.shape[0], :grid.shape[1]] = grid
grid = self.currentCalib['gammaGrid'] = newGrid
return grid
else:
return None
def getLinearizeMethod(self):
"""Gets the method that this monitor is using to linearize the guns
"""
if 'linearizeMethod' in self.currentCalib:
return self.currentCalib['linearizeMethod']
elif 'lineariseMethod' in self.currentCalib:
return self.currentCalib['lineariseMethod']
else:
return None
def getMeanLum(self):
"""Returns the mean luminance of the screen if explicitly stored
"""
if 'meanLum' in self.currentCalib:
return self.currentCalib['meanLum']
else:
return None
def getLumsPre(self):
"""Gets the measured luminance values from last calibration"""
if 'lumsPre' in self.currentCalib:
return self.currentCalib['lumsPre']
else:
return None
def getLumsPost(self):
"""Gets the measured luminance values from last calibration TEST"""
if 'lumsPost' in self.currentCalib:
return self.currentCalib['lumsPost']
else:
return None
def getLevelsPre(self):
"""Gets the measured luminance values from last calibration"""
if 'levelsPre' in self.currentCalib:
return self.currentCalib['levelsPre']
else:
return None
def getLevelsPost(self):
"""Gets the measured luminance values from last calibration TEST"""
if 'levelsPost' in self.currentCalib:
return self.currentCalib['levelsPost']
else:
return None
def getSpectra(self):
"""Gets the wavelength values from the last spectrometer measurement
(if available)
usage:
- nm, power = monitor.getSpectra()
"""
if 'spectraNM' in self.currentCalib:
return (self.currentCalib['spectraNM'],
self.currentCalib['spectraRGB'])
else:
return None, None
def getDKL_RGB(self, RECOMPUTE=False):
"""Returns the DKL->RGB conversion matrix. If one has been saved
this will be returned. Otherwise, if power spectra are available
for the monitor a matrix will be calculated.
"""
if not 'dkl_rgb' in self.currentCalib:
RECOMPUTE = True
if RECOMPUTE:
nm, power = self.getSpectra()
if nm is None:
return None
else:
return makeDKL2RGB(nm, power)
else:
return self.currentCalib['dkl_rgb']
def getLMS_RGB(self, recompute=False):
"""Returns the LMS->RGB conversion matrix.
If one has been saved this will be returned.
Otherwise (if power spectra are available for the
monitor) a matrix will be calculated.
"""
if not 'lms_rgb' in self.currentCalib:
recompute = True
if recompute:
nm, power = self.getSpectra()
if nm is None:
return None
else:
return makeLMS2RGB(nm, power)
else:
return self.currentCalib['lms_rgb']
def getPsychopyVersion(self):
"""Returns the version of PsychoPy that was used to create
this calibration
"""
return self.currentCalib['psychopyVersion']
def getNotes(self):
"""Notes about the calibration
"""
return self.currentCalib['notes']
def getUseBits(self):
"""Was this calibration carried out with a bits++ box
"""
return self.currentCalib['usebits']
# other (admin functions)
def _loadAll(self):
"""Fetches the calibrations for this monitor from disk, storing them
as self.calibs
"""
ext = ".json"
# the name of the actual file:
thisFileName = os.path.join(monitorFolder, self.name + ext)
if not os.path.exists(thisFileName):
self.calibNames = []
else:
with open(thisFileName, 'r') as thisFile:
# Passing encoding parameter to json.loads has been
# deprecated and removed in Python 3.9
self.calibs = json_tricks.load(
thisFile, ignore_comments=False,
preserve_order=False)
self.calibNames = sorted(self.calibs)
def newCalib(self, calibName=None, width=None,
distance=None, gamma=None, notes=None, useBits=False,
verbose=True):
"""create a new (empty) calibration for this monitor and
makes this the current calibration
"""
dateTime = time.mktime(time.localtime())
if calibName is None:
calibName = strFromDate(dateTime)
# add to the list of calibrations
self.calibNames.append(calibName)
self.calibs[calibName] = {}
self.setCurrent(calibName)
# populate with some default values:
self.setCalibDate(dateTime)
self.setGamma(gamma)
self.setWidth(width)
self.setDistance(distance)
self.setNotes(notes)
self.setPsychopyVersion(__version__)
self.setUseBits(useBits)
newGrid = np.ones((4, 3), 'd')
newGrid[:, 0] *= 0
self.setGammaGrid(newGrid)
self.setLineariseMethod(1)
def setCurrent(self, calibration=-1):
"""Sets the current calibration for this monitor.
Note that a single file can hold multiple calibrations each
stored under a different key (the date it was taken)
The argument is either a string (naming the calib) or an integer
**eg**:
``myMon.setCurrent('mainCalib')``
fetches the calibration named mainCalib. You can name
calibrations what you want but PsychoPy will give them names
of date/time by default. In Monitor Center you can 'copy...'
a calibration and give it a new name to keep a second version.
``calibName = myMon.setCurrent(0)``
fetches the first calibration (alphabetically) for this monitor
``calibName = myMon.setCurrent(-1)``
fetches the last **alphabetical** calibration for this monitor
(this is default). If default names are used for calibrations
(ie date/time stamp) then this will import the most recent.
"""
# find the appropriate file
# get desired calibration name if necess
if (isinstance(calibration, str) and
calibration in self.calibNames):
self.currentCalibName = calibration
elif type(calibration) == int and calibration <= len(self.calibNames):
self.currentCalibName = self.calibNames[calibration]
else:
print("No record of that calibration")
return False
# do the import
self.currentCalib = self.calibs[self.currentCalibName]
return self.currentCalibName
def delCalib(self, calibName):
"""Remove a specific calibration from the current monitor.
Won't be finalised unless monitor is saved
"""
# remove from our list
self.calibNames.remove(calibName)
self.calibs.pop(calibName)
if self.currentCalibName == calibName:
self.setCurrent(-1)
return 1
def save(self):
"""Save the current calibrations to disk.
This will write a `json` file to the `monitors` subfolder of your
PsychoPy configuration folder (typically `~/.psychopy3/monitors` on
Linux and macOS, and `%APPDATA%\\psychopy3\\monitors` on Windows).
"""
self._saveJSON()
def saveMon(self):
"""Equivalent of :func:`~psychopy.monitors.Monitor.save`.
"""
self.save()
def _saveJSON(self):
thisFileName = os.path.join(monitorFolder, self.name + ".json")
# convert time structs to timestamps (floats)
for calibName in self.calibs:
calib = self.calibs[calibName]
if isinstance(calib['calibDate'], time.struct_time):
calib['calibDate'] = time.mktime(calib['calibDate'])
with open(thisFileName, 'w') as outfile:
json_tricks.dump(self.calibs, outfile, indent=2,
allow_nan=True)
def copyCalib(self, calibName=None):
"""Stores the settings for the current calibration settings as
new monitor.
"""
if calibName is None:
calibName = strFromDate(time.mktime(time.localtime()))
# add to the list of calibrations
self.calibNames.append(calibName)
self.calibs[calibName] = deepcopy(self.currentCalib)
self.setCurrent(calibName)
def linearizeLums(self, desiredLums, newInterpolators=False,
overrideGamma=None):
"""lums should be uncalibrated luminance values (e.g. a linear ramp)
ranging 0:1
"""
linMethod = self.getLinearizeMethod()
desiredLums = np.asarray(desiredLums)
output = desiredLums * 0.0 # needs same size as input
# gamma interpolation
if linMethod == 3:
lumsPre = copy(self.getLumsPre())
if self._gammaInterpolator is not None and not newInterpolators:
pass # we already have an interpolator
elif lumsPre is not None:
if self.autoLog:
logging.info('Creating linear interpolation for gamma')
# we can make an interpolator
self._gammaInterpolator = []
self._gammaInterpolator2 = []
# each of these interpolators is a function!
levelsPre = self.getLevelsPre() / 255.0
for gun in range(4):
# scale to 0:1
lumsPre[gun, :] = \
(lumsPre[gun, :] - lumsPre[gun, 0] /
(lumsPre[gun, -1] - lumsPre[gun, 0]))
self._gammaInterpolator.append(
interpolate.interp1d(lumsPre[gun, :],
levelsPre,
kind='linear'))
# interpFunc = Interpolation.InterpolatingFunction(
# (lumsPre[gun,:],), levelsPre)
# polyFunc = interpFunc.fitPolynomial(3)
# self._gammaInterpolator2.append( [polyFunc.coeff])
else:
# no way to do this! Calibrate the monitor
logging.error("Can't do a gamma interpolation on your "
"monitor without calibrating!")
return desiredLums
# then do the actual interpolations
if len(desiredLums.shape) > 1:
for gun in range(3):
# gun+1 because we don't want luminance interpolator
_gammaIntrpGun = self._gammaInterpolator[gun + 1]
output[:, gun] = _gammaIntrpGun(desiredLums[:, gun])
else:
# just luminance
output = self._gammaInterpolator[0](desiredLums)
# use a fitted gamma equation (1 or 2)
elif linMethod in [1, 2, 4]:
# get the min,max lums
gammaGrid = self.getGammaGrid()
if gammaGrid is not None:
# if we have info about min and max luminance then use it
minLum = gammaGrid[1, 0]
maxLum = gammaGrid[1:4, 1]
b = gammaGrid[1:4, 4]
if overrideGamma is not None:
gamma = overrideGamma
else:
gamma = gammaGrid[1:4, 2]
maxLumWhite = gammaGrid[0, 1]
gammaWhite = gammaGrid[0, 2]
if self.autoLog:
logging.debug('using gamma grid' + str(gammaGrid))
else:
# just do the calculation using gamma
minLum = 0
maxLumR, maxLumG, maxLumB, maxLumWhite = 1, 1, 1, 1
gamma = self.currentCalib['gamma']
gammaWhite = np.average(gamma)
# get the inverse gamma
if len(desiredLums.shape) > 1:
for gun in range(3):
output[:, gun] = gammaInvFun(desiredLums[:, gun],
minLum, maxLum[gun],
gamma[gun],
eq=linMethod, b=b[gun])
else:
output = gammaInvFun(desiredLums, minLum, maxLumWhite,
gammaWhite, eq=linMethod)
else:
msg = "Don't know how to linearise with method %i"
logging.error(msg % linMethod)
output = desiredLums
return output
def lineariseLums(self, desiredLums, newInterpolators=False,
overrideGamma=None):
"""Equivalent of :func:`~psychopy.monitors.Monitor.linearizeLums`.
"""
return self.linearizeLums(desiredLums=desiredLums,
newInterpolators=newInterpolators,
overrideGamma=overrideGamma)
class GammaCalculator:
"""Class for managing gamma tables
**Parameters:**
- inputs (required)= values at which you measured screen luminance either
in range 0.0:1.0, or range 0:255. Should include the min
and max of the monitor
Then give EITHER "lums" or "gamma":
- lums = measured luminance at given input levels
- gamma = your own gamma value (single float)
- bitsIN = number of values in your lookup table
- bitsOUT = number of bits in the DACs
myTable.gammaModel
myTable.gamma
"""
def __init__(self,
inputs=(),
lums=(),
gamma=None,
bitsIN=8, # how values in the LUT
bitsOUT=8,
eq=1): # how many values can the DACs output
super(GammaCalculator, self).__init__()
self.lumsInitial = list(lums)
self.inputs = inputs
self.bitsIN = bitsIN
self.bitsOUT = bitsOUT
self.eq = eq
# set or or get input levels
if len(inputs) == 0 and len(lums) > 0:
self.inputs = DACrange(len(lums))
else:
self.inputs = list(inputs)
# set or get gammaVal
# user is specifying their own gamma value
if len(lums) == 0 or gamma != None:
self.gamma = gamma
elif len(lums) > 0:
self.min, self.max, self.gammaModel = self.fitGammaFun(
self.inputs, self.lumsInitial)
if eq == 4:
self.gamma, self.a, self.k = self.gammaModel
self.b = (lums[0] - self.a) ** (1.0 / self.gamma)
else:
self.gamma = self.gammaModel[0]
self.a = self.b = self.k = None
else:
raise AttributeError("gammaTable needs EITHER a gamma value"
" or some luminance measures")
def fitGammaFun(self, x, y):
"""
Fits a gamma function to the monitor calibration data.
**Parameters:**
-xVals are the monitor look-up-table vals, either 0-255 or 0.0-1.0
-yVals are the measured luminances from a photometer/spectrometer
"""
import scipy.optimize as optim
minGamma = 0.8
maxGamma = 20.0
gammaGuess = 2.0
y = np.asarray(y)
minLum = y[0]
maxLum = y[-1]
if self.eq == 4:
aGuess = minLum / 5.0
kGuess = (maxLum - aGuess) ** (1.0 / gammaGuess) - aGuess
guess = [gammaGuess, aGuess, kGuess]
bounds = [[0.8, 5.0], [0.00001, minLum - 0.00001], [2, 200]]
else:
guess = [gammaGuess]
bounds = [[0.8, 5.0]]
# gamma = optim.fmin(self.fitGammaErrFun, guess, (x, y, minLum, maxLum))
# gamma = optim.fminbound(self.fitGammaErrFun,
# minGamma, maxGamma,
# args=(x,y, minLum, maxLum))
params = optim.fmin_tnc(self.fitGammaErrFun, np.array(guess),
approx_grad=True,
args=(x, y, minLum, maxLum),
bounds=bounds, messages=0)
return minLum, maxLum, params[0]
def fitGammaErrFun(self, params, x, y, minLum, maxLum):
"""Provides an error function for fitting gamma function
(used by fitGammaFun)
"""
if self.eq == 4:
gamma, a, k = params
_m = gammaFun(x, minLum, maxLum, gamma, eq=self.eq, a=a, k=k)
model = np.asarray(_m)
else:
gamma = params[0]
_m = gammaFun(x, minLum, maxLum, gamma, eq=self.eq)
model = np.asarray(_m)
SSQ = np.sum((model - y)**2)
return SSQ
def makeDKL2RGB(nm, powerRGB):
"""Creates a 3x3 DKL->RGB conversion matrix from the spectral input powers
"""
interpolateCones = interpolate.interp1d(wavelength_5nm,
cones_SmithPokorny)
interpolateJudd = interpolate.interp1d(wavelength_5nm,
juddVosXYZ1976_5nm)
judd = interpolateJudd(nm)
cones = interpolateCones(nm)
judd = np.asarray(judd)
cones = np.asarray(cones)
rgb_to_cones = np.dot(cones, np.transpose(powerRGB))
# get LMS weights for Judd vl
lumwt = np.dot(judd[1, :], np.linalg.pinv(cones))
# cone weights for achromatic primary
dkl_to_cones = np.dot(rgb_to_cones, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
# cone weights for L-M primary
dkl_to_cones[0, 1] = lumwt[1] / lumwt[0]
dkl_to_cones[1, 1] = -1
dkl_to_cones[2, 1] = lumwt[2]
# weights for S primary
dkl_to_cones[0, 2] = 0
dkl_to_cones[1, 2] = 0
dkl_to_cones[2, 2] = -1
# Now have primaries expressed as cone excitations
# get coefficients for cones ->monitor
cones_to_rgb = np.linalg.inv(rgb_to_cones)
# get coefficients for DKL cone weights to monitor
dkl_to_rgb = np.dot(cones_to_rgb, dkl_to_cones)
# normalise each col
dkl_to_rgb[:, 0] /= max(abs(dkl_to_rgb[:, 0]))
dkl_to_rgb[:, 1] /= max(abs(dkl_to_rgb[:, 1]))
dkl_to_rgb[:, 2] /= max(abs(dkl_to_rgb[:, 2]))
return dkl_to_rgb
def makeLMS2RGB(nm, powerRGB):
"""Creates a 3x3 LMS->RGB conversion matrix from the spectral input powers
"""
interpolateCones = interpolate.interp1d(wavelength_5nm,
cones_SmithPokorny)
coneSens = interpolateCones(nm)
rgb_to_cones = np.dot(coneSens, np.transpose(powerRGB))
cones_to_rgb = np.linalg.inv(rgb_to_cones)
return cones_to_rgb
def makeXYZ2RGB(red_xy,
green_xy,
blue_xy,
whitePoint_xy=(0.3127, 0.329),
reverse=False):
"""Create a linear RGB conversion matrix.
Returns a matrix to convert CIE-XYZ (1931) tristimulus values to linear RGB
given CIE-xy (1931) primaries and white point. By default, the returned
matrix transforms CIE-XYZ to linear RGB coordinates. Use 'reverse=True' to
get the inverse transformation. The chromaticity coordinates of the
display's phosphor 'guns' are usually measured with a spectrophotometer.
The routines here are based on methods found at:
https://www.ryanjuckett.com/rgb-color-space-conversion/
Parameters
----------
red_xy : tuple, list or ndarray
Chromaticity coordinate (CIE-xy) of the 'red' gun.
green_xy: tuple, list or ndarray
Chromaticity coordinate (CIE-xy) of the 'green' gun.
blue_xy : tuple, list or ndarray
Chromaticity coordinate (CIE-xy) of the 'blue' gun.
whitePoint_xy : tuple, list or ndarray
Chromaticity coordinate (CIE-xy) of the white point, default is D65.
reverse : bool
Return the inverse transform sRGB -> XYZ. Default is `False`.
Returns
-------
ndarray
3x3 conversion matrix
Examples
--------
Construct a conversion matrix to transform CIE-XYZ coordinates to linear
(not gamma corrected) RGB values::
# nominal primaries for sRGB (or BT.709)
red = (0.6400, 0.3300)
green = (0.300, 0.6000)
blue = (0.1500, 0.0600)
whiteD65 = (0.3127, 0.329)
conversionMatrix = makeXYZ2RGB(red, green, blue, whiteD65)
# The value of `conversionMatrix` should have similar coefficients to
# that presented in the BT.709 standard.
#
# [[ 3.24096994 -1.53738318 -0.49861076]
# [-0.96924364 1.8759675 0.04155506]
# [ 0.05563008 -0.20397696 1.05697151]]
#
"""
# convert CIE-xy chromaticity coordinates to xyY and put them into a matrix
mat_xyY_primaries = np.asarray((
(red_xy[0], red_xy[1], 1.0 - red_xy[0] - red_xy[1]),
(green_xy[0], green_xy[1], 1.0 - green_xy[0] - green_xy[1]),
(blue_xy[0], blue_xy[1], 1.0 - blue_xy[0] - blue_xy[1])
)).T
# convert white point to CIE-XYZ
whtp_XYZ = np.asarray(
np.dot(1.0 / whitePoint_xy[1],
np.asarray((
whitePoint_xy[0],
whitePoint_xy[1],
1.0 - whitePoint_xy[0] - whitePoint_xy[1])
)
)
)
# compute the final matrix (sRGB -> XYZ)
u = np.diag(np.dot(whtp_XYZ, np.linalg.inv(mat_xyY_primaries).T))
to_return = np.matmul(mat_xyY_primaries, u)
if not reverse: # for XYZ -> sRGB conversion matrix (we usually want this!)
return np.linalg.inv(to_return)
return to_return
def getLumSeries(lumLevels=8,
winSize=(800, 600),
monitor=None,
gamma=1.0,
allGuns=True,
useBits=False,
autoMode='auto',
stimSize=0.3,
photometer=None,
screen=0):
"""Automatically measures a series of gun values and measures
the luminance with a photometer.
:Parameters:
photometer : a photometer object
e.g. a :class:`~psychopy.hardware.pr.PR65` or
:class:`~psychopy.hardware.minolta.LS100` from
hardware.findPhotometer()
lumLevels : (default=8)
array of values to test or single value for n evenly spaced
test values
gamma : (default=1.0) the gamma value at which to test
autoMode : 'auto' or 'semi'(='auto')
If 'auto' the program will present the screen
and automatically take a measurement before moving on.
If set to 'semi' the program will wait for a keypress before
moving on but will not attempt to make a measurement (use this
to make a measurement with your own device).
Any other value will simply move on without pausing on each
screen (use this to see that the display is performing as
expected).
"""
import psychopy.event
import psychopy.visual
from psychopy import core
if photometer is None:
havePhotom = False
elif not hasattr(photometer, 'getLum'):
msg = ("photometer argument to monitors.getLumSeries should be a "
"type of photometer object, not a %s")
logging.error(msg % type(photometer))
return None
else:
havePhotom = True
if useBits:
# all gamma transforms will occur in calling the Bits++ LUT
# which improves the precision (14bit not 8bit gamma)
bitsMode = 'fast'
else:
bitsMode = None
if gamma == 1:
initRGB = 0.5 ** (1 / 2.0) * 2 - 1
else:
initRGB = 0.8
# setup screen and "stimuli"
myWin = psychopy.visual.Window(fullscr=0, size=winSize,
gamma=gamma, units='norm', monitor=monitor,
allowGUI=True, winType='pyglet',
bitsMode=bitsMode, screen=screen)
instructions = ("Point the photometer at the central bar. "
"Hit a key when ready (or wait 30s)")
message = psychopy.visual.TextStim(myWin, text=instructions, height=0.1,
pos=(0, -0.85), rgb=[1, -1, -1])
noise = np.random.rand(512, 512).round() * 2 - 1
backPatch = psychopy.visual.PatchStim(myWin, tex=noise, size=2,
units='norm',
sf=[winSize[0] / 512.0,
winSize[1] / 512.0])
testPatch = psychopy.visual.PatchStim(myWin,
tex='sqr',
size=stimSize,
rgb=initRGB,
units='norm')
# stay like this until key press (or 30secs has passed)
waitClock = core.Clock()
tRemain = 30
while tRemain > 0:
tRemain = 30 - waitClock.getTime()
backPatch.draw()
testPatch.draw()
instructions = ("Point the photometer at the central white bar. "
"Hit a key when ready (or wait %iss)")
message.setText(instructions % tRemain, log=False)
message.draw()
myWin.flip()
if len(psychopy.event.getKeys()):
break # we got a keypress so move on
if autoMode != 'semi':
message.setText('Q to quit at any time')
else:
message.setText('Spacebar for next patch')
# LS100 likes to take at least one bright measurement
# assuming the same for the CS100A
if havePhotom and photometer.type == 'LS100':
junk = photometer.getLum()
if havePhotom and photometer.type == 'CS100A':
junk = photometer.getLum()
# what are the test values of luminance
if type(lumLevels) in (int, float):
toTest = DACrange(lumLevels)
else:
toTest = np.asarray(lumLevels)
if allGuns:
guns = [0, 1, 2, 3] # gun=0 is the white luminance measure
else:
guns = [0]
# this will hold the measured luminance values
lumsList = np.zeros((len(guns), len(toTest)), 'd')
# for each gun, for each value run test
for gun in guns:
for valN, DACval in enumerate(toTest):
lum = (DACval / 127.5) - 1 # get into range -1:1
# only do luminanc=-1 once
if lum == -1 and gun > 0:
continue
# set the patch color
if gun > 0:
rgb = [-1, -1, -1]
rgb[gun - 1] = lum
else:
rgb = [lum, lum, lum]
backPatch.draw()
testPatch.setColor(rgb)
testPatch.draw()
message.draw()
myWin.flip()
# allowing the screen to settle (no good reason!)
time.sleep(0.2)
# take measurement
if havePhotom and autoMode == 'auto':
actualLum = photometer.getLum()
print("At DAC value %i\t: %.2fcd/m^2" % (DACval, actualLum))
if lum == -1 or not allGuns:
# if the screen is black set all guns to this lum value!
lumsList[:, valN] = actualLum
else:
# otherwise just this gun
lumsList[gun, valN] = actualLum
# check for quit request
for thisKey in psychopy.event.getKeys():
if thisKey in ('q', 'Q', 'escape'):
myWin.close()
return np.array([])
elif autoMode == 'semi':
print("At DAC value %i" % DACval)
done = False
while not done:
# check for quit request
for thisKey in psychopy.event.getKeys():
if thisKey in ('q', 'Q', 'escape'):
myWin.close()
return np.array([])
elif thisKey in (' ', 'space'):
done = True
myWin.close() # we're done with the visual stimuli
if havePhotom:
return lumsList
else:
return np.array([])
def getLumSeriesPR650(lumLevels=8,
winSize=(800, 600),
monitor=None,
gamma=1.0,
allGuns=True,
useBits=False,
autoMode='auto',
stimSize=0.3,
photometer='COM1'):
"""DEPRECATED (since v1.60.01): Use
:class:`psychopy.monitors.getLumSeries()` instead
"""
logging.warning(
"DEPRECATED (since v1.60.01): Use monitors.getLumSeries() instead")
val = getLumSeries(lumLevels,
winSize, monitor,
gamma, allGuns, useBits,
autoMode, stimSize, photometer)
return val
def getRGBspectra(stimSize=0.3, winSize=(800, 600), photometer='COM1'):
"""
usage:
getRGBspectra(stimSize=0.3, winSize=(800,600), photometer='COM1')
:params:
- 'photometer' could be a photometer object or a serial port
name on which a photometer might be found (not recommended)
"""
import psychopy.event
import psychopy.visual
if hasattr(photometer, 'getLastSpectrum'):
photom = photometer
else:
# setup photom
photom = hardware.Photometer(photometer)
if photom != None:
havephotom = 1
else:
havephotom = 0
# setup screen and "stimuli"
myWin = psychopy.visual.Window(fullscr=0, rgb=0.0, size=winSize,
units='norm')
instructions = ("Point the photometer at the central square. "
"Hit a key when ready")
message = psychopy.visual.TextStim(myWin, text=instructions, height=0.1,
pos=(0.0, -0.8), rgb=-1.0)
message.draw()
testPatch = psychopy.visual.PatchStim(myWin, tex=None,
size=stimSize * 2, rgb=0.3)
testPatch.draw()
myWin.flip()
# stay like this until key press (or 30secs has passed)
psychopy.event.waitKeys(30)
spectra = []
for thisColor in [[1, -1, -1], [-1, 1, -1], [-1, -1, 1]]:
# update stimulus
testPatch.setColor(thisColor)
testPatch.draw()
myWin.flip()
# make measurement
photom.measure()
spectra.append(photom.getLastSpectrum(parse=False))
myWin.close()
nm, power = photom.parseSpectrumOutput(spectra)
return nm, power
def DACrange(n):
"""Returns an array of n DAC values spanning 0-255
"""
# NB python ranges exclude final val
return np.arange(0.0, 256.0, 255.0 / float(n - 1)).astype(np.uint8)
def getAllMonitors():
"""Find the names of all monitors for which calibration files exist
"""
monitorList = glob.glob(os.path.join(monitorFolder, '*.json'))
split = os.path.split
splitext = os.path.splitext
# skip the folder and the extension for each file
monitorList = [splitext(split(thisFile)[-1])[0]
for thisFile in monitorList]
return monitorList
def gammaFun(xx, minLum, maxLum, gamma, eq=1, a=None, b=None, k=None):
"""Returns gamma-transformed luminance values.
y = gammaFun(x, minLum, maxLum, gamma)
a and b are calculated directly from minLum, maxLum, gamma
**Parameters:**
- **xx** are the input values (range 0-255 or 0.0-1.0)
- **minLum** = the minimum luminance of your monitor
- **maxLum** = the maximum luminance of your monitor (for this gun)
- **gamma** = the value of gamma (for this gun)
"""
# scale x to be in range minLum:maxLum
xx = np.array(xx, 'd')
maxXX = max(xx)
invGamma = 1.0 / float(gamma) # used a lot below, so compute it here
if maxXX > 2.0:
# xx = xx * maxLum / 255.0 + minLum
xx = xx / 255.0
else: # assume data are in range 0:1
pass
# xx = xx * maxLum + minLum
# eq1: y = a + (b*xx)**gamma
# eq2: y = (a + b * xx)**gamma
# eq4: y = a + (b + k*xx)**gamma # Pelli & Zhang 1991
if eq == 1:
a = minLum
b = (maxLum - a) ** invGamma
yy = a + (b * xx) ** gamma
elif eq == 2:
a = minLum ** invGamma
b = maxLum ** invGamma - a
yy = (a + b * xx) ** gamma
elif eq == 3:
# NB method 3 was an interpolation method that didn't work well
raise ValueError('Parameter `eq` must be one of 1, 2 or 4.')
elif eq == 4:
nMissing = sum([a is None, b is None, k is None])
# check params
if nMissing > 1:
msg = "For eq=4, gammaFun needs 2 of a, b, k to be specified"
raise AttributeError(msg)
elif nMissing == 1:
if a is None:
a = minLum - b ** invGamma # when y=min, x=0
elif b is None:
if a >= minLum:
b = 0.1 ** invGamma # can't take inv power of -ve
else:
b = (minLum - a) ** invGamma # when y=min, x=0
elif k is None:
k = (maxLum - a) ** invGamma - b # when y=max, x=1
# this is the same as Pelli and Zhang (but different inverse function)
yy = a + (b + k * xx) ** gamma # Pelli and Zhang (1991)
else:
raise ValueError('Parameter `eq` must be one of 1, 2 or 4.')
return yy
def gammaInvFun(yy, minLum, maxLum, gamma, b=None, eq=1):
"""Returns inverse gamma function for desired luminance values.
x = gammaInvFun(y, minLum, maxLum, gamma)
a and b are calculated directly from minLum, maxLum, gamma
**Parameters:**
- **xx** are the input values (range 0-255 or 0.0-1.0)
- **minLum** = the minimum luminance of your monitor
- **maxLum** = the maximum luminance of your monitor (for this gun)
- **gamma** = the value of gamma (for this gun)
- **eq** determines the gamma equation used;
eq==1[default]: yy = a + (b * xx)**gamma
eq==2: yy = (a + b*xx)**gamma
"""
# x should be 0:1
# y should be 0:1, then converted to minLum:maxLum
# eq1: y = a + (b * xx)**gamma
# eq2: y = (a + b * xx)**gamma
# eq4: y = a + (b + kxx)**gamma
invGamma = 1.0 / float(gamma)
if max(yy) == 255:
yy = np.asarray(yy, 'd') / 255.0
elif min(yy) < 0 or max(yy) > 1:
logging.warning(
'User supplied values outside the expected range (0:1)')
else:
yy = np.asarray(yy, 'd')
if eq == 1:
xx = np.asarray(yy) ** invGamma
elif eq == 2:
yy = np.asarray(yy) * (maxLum - minLum) + minLum
a = minLum ** invGamma
b = maxLum ** invGamma - a
xx = (yy ** invGamma - a) / float(b)
maxLUT = (maxLum ** invGamma - a) / float(b)
minLUT = (minLum ** invGamma - a) / float(b)
xx = (xx / (maxLUT - minLUT)) - minLUT
elif eq == 3:
# NB method 3 was an interpolation method that didn't work well
raise ValueError('Parameter `eq` must be one of 1, 2 or 4.')
elif eq == 4:
# this is not the same as Zhang and Pelli's inverse
# see https://www.psychopy.org/general/gamma.html for derivation
a = minLum - b ** gamma
k = (maxLum - a) ** invGamma - b
xx = (((1 - yy) * b**gamma + yy * (b + k)**gamma) ** invGamma - b) / float(k)
else:
raise ValueError('Parameter `eq` must be one of 1, 2 or 4.')
# then return to range (0:1)
# xx = xx / (maxLUT - minLUT) - minLUT
return xx
def strFromDate(date):
"""Simply returns a string with a std format from a date object
"""
if type(date) == float:
date = time.localtime(date)
return time.strftime("%Y_%m_%d %H:%M", date)
| 46,742
|
Python
|
.py
| 1,117
| 31.28111
| 85
| 0.580828
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,373
|
calibData.py
|
psychopy_psychopy/psychopy/monitors/calibData.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Data useful for calibrations (Smith-Pokorny cone fundamentals etc...)
"""
import numpy
# 380 to 780 inclusive with 5nm steps
wavelength_5nm = numpy.arange(380, 785, 5)
juddVosXYZ1976_5nm = numpy.asarray([
[0.003, 0.005, 0.011, 0.021, 0.038, 0.063, 0.100, 0.158, 0.229, 0.281,
0.311, 0.331, 0.333, 0.317, 0.289, 0.260, 0.233, 0.210, 0.175, 0.133,
0.092, 0.057, 0.032, 0.015, 0.005, 0.002, 0.009, 0.029, 0.064, 0.111,
0.167, 0.228, 0.293, 0.362, 0.436, 0.515, 0.597, 0.681, 0.764, 0.844,
0.916, 0.977, 1.023, 1.051, 1.055, 1.036, 0.992, 0.929, 0.843, 0.740,
0.633, 0.534, 0.441, 0.355, 0.279, 0.215, 0.162, 0.118, 0.086, 0.063,
0.046, 0.032, 0.022, 0.016, 0.011, 0.008, 0.006, 0.004, 0.003, 0.002,
0.001, 0.001, 0.001, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000],
[0.000, 0.000, 0.001, 0.002, 0.003, 0.005, 0.007, 0.012, 0.018, 0.023,
0.027, 0.033, 0.038, 0.042, 0.047, 0.052, 0.060, 0.073, 0.091, 0.113,
0.139, 0.170, 0.208, 0.258, 0.323, 0.405, 0.503, 0.608, 0.710, 0.795,
0.862, 0.915, 0.954, 0.980, 0.995, 1.000, 0.995, 0.979, 0.952, 0.916,
0.870, 0.816, 0.757, 0.695, 0.631, 0.567, 0.503, 0.442, 0.381, 0.321,
0.265, 0.217, 0.175, 0.138, 0.107, 0.082, 0.061, 0.044, 0.032, 0.023,
0.017, 0.012, 0.008, 0.006, 0.004, 0.003, 0.002, 0.001, 0.001, 0.001,
0.001, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000],
[0.012, 0.024, 0.049, 0.095, 0.174, 0.290, 0.461, 0.732, 1.066, 1.315,
1.467, 1.580, 1.617, 1.568, 1.472, 1.374, 1.292, 1.236, 1.114, 0.942,
0.756, 0.586, 0.447, 0.341, 0.264, 0.206, 0.154, 0.109, 0.077, 0.056,
0.041, 0.029, 0.020, 0.013, 0.009, 0.006, 0.004, 0.003, 0.002, 0.002,
0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000], ])
cones_SmithPokorny = numpy.asarray([
[0.000000, 0.000000, 0.000000, 0.000000, 0.002660, 0.004380, 0.006890,
0.010800, 0.015800, 0.020000, 0.023300, 0.026800, 0.030100, 0.032400,
0.034300, 0.036800, 0.041200, 0.050200, 0.062700, 0.079800, 0.102000,
0.128000, 0.162000, 0.206000, 0.263000, 0.337000, 0.423000, 0.520000,
0.617000, 0.700000, 0.773000, 0.834000, 0.883000, 0.923000, 0.954000,
0.977000, 0.993000, 1.000000, 0.997000, 0.986000, 0.965000, 0.934000,
0.894000, 0.848000, 0.795000, 0.735000, 0.670000, 0.602000, 0.530000,
0.454000, 0.380000, 0.315000, 0.256000, 0.204000, 0.159000, 0.122000,
0.091400, 0.067000, 0.048200, 0.035000, 0.025700, 0.018000, 0.012400,
0.008660, 0.006210, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 0.000000, 0.000000, 0.002820, 0.004750, 0.007670,
0.012400, 0.018900, 0.025400, 0.031700, 0.039500, 0.047700, 0.055500,
0.063500, 0.073100, 0.086000, 0.107000, 0.130000, 0.157000, 0.189000,
0.224000, 0.267000, 0.324000, 0.396000, 0.491000, 0.595000, 0.706000,
0.808000, 0.884000, 0.941000, 0.978000, 0.997000, 0.999000, 0.987000,
0.961000, 0.922000, 0.870000, 0.806000, 0.732000, 0.651000, 0.564000,
0.477000, 0.393000, 0.318000, 0.250000, 0.193000, 0.147000, 0.110000,
0.080800, 0.058300, 0.041800, 0.029600, 0.020700, 0.014400, 0.010100,
0.006990, 0.004850, 0.003330, 0.002330, 0.001640, 0.001110, 0.000750,
0.000517, 0.000368, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 0.000000, 0.000000, 0.108000, 0.179000, 0.285000,
0.453000, 0.659000, 0.813000, 0.908000, 0.977000, 1.000000, 0.970000,
0.910000, 0.850000, 0.799000, 0.775000, 0.689000, 0.582000, 0.468000,
0.362000, 0.276000, 0.212000, 0.164000, 0.128000, 0.095600, 0.067600,
0.047400, 0.034700, 0.025600, 0.018200, 0.012400, 0.008260, 0.005450,
0.003650, 0.002530, 0.001840, 0.001440, 0.001260, 0.001160, 0.001000,
0.000812, 0.000741, 0.000610, 0.000479, 0.000312, 0.000240, 0.000198,
0.000132, 0.000090, 0.000068, 0.000053, 0.000038, 0.000025, 0.000019,
0.000014, 0.000010, 0.000008, 0.000005, 0.000004, 0.000003, 0.000002,
0.000001, 0.000001, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 0.000000, 0.000000, 0.000000], ])
| 4,934
|
Python
|
.py
| 75
| 60.626667
| 79
| 0.622323
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,374
|
__init__.py
|
psychopy_psychopy/psychopy/monitors/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from .calibTools import *
"""# probably only need something like:
DACrange, GammaCalculator, Monitor, Photometer,
findPR650, gammaFun, gammaInvFun, getAllMonitors,
getLumSeries, getLumSeriesPR650, getRGBspectra,
makeDKL2RGB, makeLMS2RGB,
monitorFolder, pr650code,
wavelength_5nm, juddVosXYZ1976_5nm, cones_SmithPokorny
)
"""
# create a test monitor if there isn't one already
if 'testMonitor' not in getAllMonitors():
defMon = Monitor('testMonitor',
width=30,
distance=57,
gamma=1.0,
# can't always localize the notes easily;
# need app created first to import localization and
# use _translate( ) => issues
notes='default (not very useful) monitor')
defMon.setSizePix([1024, 768])
defMon.save()
| 1,114
|
Python
|
.py
| 27
| 33.222222
| 79
| 0.649446
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,375
|
getLumSeries.py
|
psychopy_psychopy/psychopy/monitors/getLumSeries.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This function can be run as a script or imported and run as a function.
The advantage of running as a script is that this won't interact with your
existing namespace (e.g. avbin can load because scipy won't already have
been loaded).
"""
def getLumSeries(lumLevels=8,
winSize=(800, 600),
monitor=None,
gamma=1.0,
allGuns=True,
useBits=False,
autoMode='auto',
stimSize=0.3,
photometer=None):
"""Automatically measures a series of gun values and measures
the luminance with a photometer.
:Parameters:
photometer : a photometer object
e.g. a :class:`~psychopy.hardware.pr.PR65` or
:class:`~psychopy.hardware.minolta.LS100` from
hardware.findPhotometer()
lumLevels : (default=8)
array of values to test or single value for n evenly
spaced test values
gamma : (default=1.0) the gamma value at which to test
autoMode : 'auto' or 'semi'(='auto')
If 'auto' the program will present the screen
and automatically take a measurement before moving on.
If set to 'semi' the program will wait for a keypress before
moving on but will not attempt to make a measurement (use this
to make a measurement with your own device).
Any other value will simply move on without pausing on each
screen (use this to see that the display is performing as
expected).
"""
from .calibTools import getLumSeries as _new_getLumSeries
return _new_getLumSeries(lumLevels, winSize, monitor, gamma, allGuns, useBits, autoMode, stimSize, photometer, screen=0)
| 1,833
|
Python
|
.py
| 39
| 36.692308
| 124
| 0.632642
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,376
|
signal.py
|
psychopy_psychopy/psychopy/voicekey/signal.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes for signals to be sent upon voice-key trip events.
"""
import threading
class _BaseVoiceKeySignal(threading.Thread):
"""Class to support sending a signal upon detection of an event.
Non-blocking unless you .join() the thread. An adjustable `delay` allows
a deferred start.
Subclass and override `signal`.
"""
def __init__(self, sec=0.010, delay=0, on=1, off=0):
super(_BaseVoiceKeySignal, self).__init__(None, 'EventSignal', None)
self.sec = sec
self.delay = delay
self.on = on
self.off = off
self.running = False
# self.daemon = True
self.id = None
def __repr__(self):
text = '<{0} instance, id={1}>'
return text.format(self.__class__.__name__, self.id)
def run(self):
self.running = True
self.signal()
self.running = False
def signal(self):
pass
def stop(self):
self.running = False
| 1,013
|
Python
|
.py
| 31
| 26.16129
| 76
| 0.607621
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,377
|
demo_vks.py
|
psychopy_psychopy/psychopy/voicekey/demo_vks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . signal import _BaseVoiceKeySignal
class DemoVoiceKeySignal(_BaseVoiceKeySignal):
"""Demo: print to stdout.
"""
def signal(self):
print('>> demo signal <<')
| 233
|
Python
|
.py
| 8
| 25.125
| 46
| 0.656109
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,378
|
vk_tools.py
|
psychopy_psychopy/psychopy/voicekey/vk_tools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Digital signal processing functions; pyo table, file, & sample conversions
"""
import os
import sys
import time
import numpy as np
from scipy.signal import butter, lfilter
try:
import pyo64 as pyo
except Exception:
import pyo
class PyoFormatException(Exception):
pass
# --- time-related helper functions --------------------------
# Ensure we have a high-resolution clock; code from PsychoPy (Sol Simpson)
if sys.platform == 'win32':
from ctypes import byref, c_int64, windll
_fcounter = c_int64()
_qpfreq = c_int64()
windll.Kernel32.QueryPerformanceFrequency(byref(_qpfreq))
_qpfreq = float(_qpfreq.value)
_winQPC = windll.Kernel32.QueryPerformanceCounter
def get_time():
"""High-precision replacement for time.time() on Windows.
"""
_winQPC(byref(_fcounter))
return _fcounter.value / _qpfreq
else:
import timeit
get_time = timeit.default_timer
MIN_SLEEP = 0.001 # used in sleep() function
def sleep(sec=0):
"""Use time.sleep with a minimum duration sleep threshold.
"""
time.sleep(max(MIN_SLEEP, sec))
# --- digital signal processing helper functions --------------------------
_butter_cache = {}
def _butter(order, band, rate=44100):
"""Cache-ing version of scipy.signal's butter().
Allows faster band-pass filtering during real-time processing.
"""
global _butter_cache
_h = hash((order, band, rate))
if not _h in _butter_cache:
low, high = band
nyqfreq = float(rate) / 2
lowf = low / nyqfreq
highf = high / nyqfreq
_butter_cache[_h] = butter(order, (lowf, highf), btype='band')
return _butter_cache[_h]
def bandpass_pre_cache(lows=(80, 100, 120),
highs=(1200, 3000, 8000),
bands=((2000, 8000),), # content-filtered speech
rate=44100):
"""Call _butter now to cache some useful (b, a) values.
"""
for low in lows:
for high in highs:
_butter(6, (low, high), rate=rate)
for band in bands:
_butter(6, band, rate=rate)
def bandpass(data, low=80, high=1200, rate=44100, order=6):
"""Return bandpass filtered `data`.
"""
b, a = _butter(order, (low, high), rate)
return lfilter(b, a, data)
def rms(data):
"""Basic audio-power measure: root-mean-square of data.
Identical to `std` when the mean is zero; faster to compute just rms.
"""
if data.dtype == np.int16:
md2 = data.astype(float) ** 2 # int16 wrap around --> negative
else:
md2 = data ** 2
return np.sqrt(np.mean(md2))
def std(data):
"""Like rms, but also subtracts the mean (= slower).
"""
return np.std(data)
def smooth(data, win=16, tile=True):
"""Running smoothed average, via convolution over `win` window-size.
`tile` with the mean at start and end by default; otherwise replace with 0.
"""
weights = np.ones(win) / win
data_c = np.convolve(data, weights)[win - 1:-(win - 1)]
if tile:
pre = np.tile(data_c[0], win // 2)
post = np.tile(data_c[-1], win // 2)
else:
pre = post = np.zeros(win // 2)
data_pre_c = np.concatenate((pre, data_c))
data_pre_c_post = np.concatenate((data_pre_c, post))
return data_pre_c_post[:len(data)]
def zero_crossings(data):
"""Return a vector of length n-1 of zero-crossings within vector `data`.
1 if the adjacent values switched sign, or
0 if they stayed the same sign.
"""
zx = np.zeros(len(data))
zx[np.where(data[:-1] * data[1:] < 0)] = 1
return zx
def tone(freq=440, sec=2, rate=44100, vol=.99):
"""Return a np.array suitable for use as a tone (pure sine wave).
"""
samples = sec * rate
time_steps = np.arange(0., 1., 1. / samples)
scaling = 2 * np.pi * freq * sec
return np.sin(time_steps * scaling) * vol
def apodize(data, ms=5, rate=44100):
"""Apply a Hanning window (5ms) to reduce a sound's 'click' onset / offset.
"""
hw_size = int(min(rate // (1000 / ms), len(data) // 15))
hanning_window = np.hanning(2 * hw_size + 1)
data[:hw_size] *= hanning_window[:hw_size]
data[-hw_size:] *= hanning_window[-hw_size:]
return data
# --- pyo helper functions ------------------------------------------------
# format codes for _get_pyo_codes():
pyo_formats = {'wav': 0, 'aif': 1, 'aiff': 1, 'au': 2, 'raw': 3,
'sd2': 4, 'flac': 5, 'caf': 6, 'ogg': 7}
pyo_dtype = {'int16': 0, 'int24': 1, 'int32': 2, 'float32': 3,
'float64': 4, 'U-Law': 5, 'A-Law': 6}
def _get_pyo_codes(fmt='', dtype='int16', file_out=''):
"""Convert file and data formats to int codes, e.g., wav int16 -> (0, 0).
"""
if not fmt:
dot_ext = os.path.splitext(file_out)[1]
fmt = dot_ext.lower().strip('.')
if fmt in pyo_formats:
file_fmt = pyo_formats[fmt]
else:
msg = 'format `{0}` not supported'.format(file_out)
raise PyoFormatException(msg)
if fmt in ['sd2', 'flac']:
ok_dfmt = {'int16': 0, 'int24': 1}
else:
ok_dfmt = pyo_dtype
if dtype in ok_dfmt:
data_fmt = pyo_dtype[dtype]
else:
msg = 'data format `{0}` not supported for `{1}`'.format(
dtype, file_out)
raise PyoFormatException(msg)
return file_fmt, data_fmt
def samples_from_table(table, start=0, stop=-1, rate=44100):
"""Return samples as a np.array read from a pyo table.
A (start, stop) selection in seconds may require a non-default rate.
"""
samples = np.array(table.getTable())
if (start, stop) != (0, -1):
if stop > start:
samples = samples[start * rate:stop * rate]
elif start:
samples = samples[start * rate:]
return samples
def table_from_samples(samples, start=0, stop=-1, rate=44100):
"""Return a pyo DataTable constructed from samples.
A (start, stop) selection in seconds may require a non-default rate.
"""
if type(samples) == np.ndarray:
samples = samples.tolist()
if type(samples) != list:
raise TypeError('samples should be a list or np.array')
if (start, stop) != (0, -1):
if stop > start:
samples = samples[start * rate:stop * rate]
elif start:
samples = samples[start * rate:]
table = pyo.DataTable(size=len(samples), init=samples)
return table
def table_from_file(file_in, start=0, stop=-1):
"""Read data from files, any pyo format, returns (rate, pyo SndTable)
"""
table = pyo.SndTable()
try:
table.setSound(file_in, start=start, stop=stop)
except TypeError:
msg = 'bad file `{0}`, or format not supported'.format(file_in)
raise PyoFormatException(msg)
rate = pyo.sndinfo(file_in)[2]
return rate, table
def samples_from_file(file_in, start=0, stop=-1):
"""Read data from files, returns tuple (rate, np.array(.float64))
"""
if not os.path.isfile(file_in):
raise IOError('no such file `{0}`'.format(file_in))
rate, table = table_from_file(file_in, start=start, stop=stop)
return rate, np.array(table.getTable())
def samples_to_file(samples, rate, file_out, fmt='', dtype='int16'):
"""Write data to file, using requested format or infer from file .ext.
Only integer `rate` values are supported.
See http://ajaxsoundstudio.com/pyodoc/api/functions/sndfile.html
"""
file_fmt, data_fmt = _get_pyo_codes(fmt, dtype, file_out)
if type(samples) == np.ndarray:
samples = samples.tolist()
if type(samples) != list:
raise TypeError('samples should be a list or np.array')
try:
pyo.savefile(samples, path=file_out, sr=int(rate), channels=1,
fileformat=file_fmt, sampletype=data_fmt)
except Exception:
msg = 'could not save `{0}`; permissions or other issue?'
raise IOError(msg.format(file_out))
def table_to_file(table, file_out, fmt='', dtype='int16'):
"""Write data to file, using requested format or infer from file .ext.
"""
file_fmt, data_fmt = _get_pyo_codes(fmt, dtype, file_out)
try:
pyo.savefileFromTable(table=table, path=file_out,
fileformat=file_fmt, sampletype=data_fmt)
except Exception:
msg = 'could not save `{0}`; permissions or other issue?'
raise IOError(msg.format(file_out))
| 8,483
|
Python
|
.py
| 216
| 33.125
| 79
| 0.616604
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,379
|
__init__.py
|
psychopy_psychopy/psychopy/voicekey/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""voicekey: A toolkit for programming virtual voice-keys.
Copyright (c) Jeremy R. Gray, 2015
License: Distributed under the terms of the GPLv3
Dev status: beta. Can work well in some circumstances, not widely tested.
_BaseVoiceKey is the main abstract class. Subclass and override the detect()
method. See SimpleThresholdVoiceKey or OnsetVoiceKey for examples.
"""
__version__ = 0.5
# pyo: see http://ajaxsoundstudio.com/pyodoc
try:
import pyo64 as pyo
have_pyo64 = True
except Exception:
import pyo
have_pyo64 = False
# pyo_server will point to a booted pyo server once pyo_init() is called:
pyo_server = None
# helper functions for time, signal processing, and file I/O:
from . vk_tools import *
# Constants:
T_BASELINE_PERIOD = 0.200 # sec; time assumed not to contain any speech
T_BASELINE_ON = 0.035 # sec; self.baseline is between T_BASELINE_ON ..OFF
T_BASELINE_OFF = 0.180 # sec
TOO_LOUD = 0.01
TOO_QUIET = 10 ** -7
RATE = 44100 # default sampling rate
# max recording time 30 minutes; longer is ok but not tested, lots of lag:
MAX_RECORDING_SEC = 1800
class VoiceKeyException(Exception):
pass
class _BaseVoiceKey:
"""Abstract base class for virtual voice-keys.
Accepts data as real-time input (from a microphone by default) or off-line
(if `file_in` is a valid file).
Over-ride detect() and other methods as needed. See examples.
"""
def __init__(self, sec=0, file_out='', file_in='', **config):
"""
:Parameters:
sec:
duration to record in seconds
file_out:
name for output filename (for microphone input)
file_in:
name of input file for sound source (not microphone)
config: kwargs dict of parameters for configuration. defaults are:
'msPerChunk': 2; duration of each real-time analysis chunk, in ms
'signaler': default None
'autosave': True; False means manual saving to a file is still
possible (by calling .save() but not called automatically upon
stopping
'chnl_in' : microphone channel;
see psychopy.sound.backend.get_input_devices()
'chnl_out': not implemented; output device to use
'start': 0, select section from a file based on (start, stop) time
'stop': -1, end of file (default)
'vol': 0.99, volume 0..1
'low': 100, Hz, low end of bandpass; can vary for M/F speakers
'high': 3000, Hz, high end of bandpass
'threshold': 10
'baseline': 0; 0 = auto-detect; give a non-zero value to use that
'more_processing': True; compute more stats per chunk including
bandpass; try False if 32-bit python can't keep up
'zero_crossings': True
"""
if not (pyo_server and pyo_server.getIsBooted() and
pyo_server.getIsStarted()):
msg = 'Need a running pyo server: call voicekey.pyo_init()'
raise VoiceKeyException(msg)
self.rate = pyo_server.getSamplingRate() # pyo_init enforces 16000+ Hz
self.sec = float(sec)
if self.sec > MAX_RECORDING_SEC:
msg = 'for recording, time in seconds cannot be longer than {0}'
raise VoiceKeyException(msg.format(MAX_RECORDING_SEC))
# detect whether given a numpy array directly
# TO-DO: self.array_in handling needs code review
source = file_in
self.array_in = []
if type(source) in [np.ndarray]:
self.array_in = source
file_in = '<array len={0}>'.format(len(source))
self.file_in, self.file_out = file_in, file_out
# Configuration defaults:
self.config = {'msPerChunk': 2,
'signaler': None,
'autosave': True,
'chnl_in': 0, # pyo.pa_get_default_input()
# 'chnl_out': 2, # pyo.pa_get_default_output() no go
'start': 0,
'stop': -1,
'vol': 0.99,
'low': 100,
'high': 3000,
'threshold': 10,
'baseline': 0,
'more_processing': True,
'zero_crossings': True}
self.config.update(config)
self.baseline = self.config['baseline']
self.bad_baseline = False
self.stopped = False
self.msPerChunk = float(self.config['msPerChunk'])
if not 0.65 <= self.msPerChunk <= 32:
msg = 'msPerChunk should be 0.65 to 32; suggested = 2'
raise ValueError(msg)
self._set_source()
self._set_defaults()
self._set_signaler()
self._set_tables()
def _set_source(self):
"""Data source: file_in, array, or microphone
"""
if os.path.isfile(self.file_in):
_rate, self._sndTable = table_from_file(self.file_in,
start=self.config['start'],
stop=self.config['stop'])
if _rate != self.rate:
print('file sample rate differs from the voice-key rate.')
self._source = pyo.TableRead(self._sndTable,
freq=self._sndTable.getRate(),
mul=self.config['vol'])
self.sec = self._sndTable.getDur()
elif len(self.array_in):
self._sndTable = table_from_samples(self.array_in,
start=self.config['start'],
stop=self.config['stop'],
rate=self.rate)
self._source = pyo.TableRead(self._sndTable,
freq=self._sndTable.getRate(),
mul=self.config['vol'])
self.sec = self._sndTable.size / self.rate
else:
# fall through to source = microphone
ch = self.config['chnl_in']
self._source = pyo.Input(chnl=ch, mul=self.config['vol'])
def _set_defaults(self):
"""Set remaining defaults, initialize lists to hold summary stats
"""
# adjust self.sec based on start, stop times:
if (self.config['start'], self.config['stop']) != (0, -1):
if self.config['stop'] > self.config['start']:
self.sec = self.config['stop'] - self.config['start']
elif self.config['start'] <= self.sec:
self.sec = self.sec - self.config['start']
self.chunks = int(self.sec * 1000. / self.msPerChunk) # ideal no slip
# total chunk count and current-chunk index:
self.count = 0
self.filename = self.file_out or 'rec.wav'
self.filesize = None
# timing data for diagnostics
self.elapsed = 0
self.t_enter = [] # time at chunk entry
self.t_exit = [] # time at chunk exit
self.t_proc = [] # proportion of chunk-time spent doing _do_chunk
# data cache:
self.data = [] # raw unprocessed data, in chunks
self.power = []
self.power_bp = []
self.power_above = []
self.zcross = []
self.max_bp = 0
self.max_bp_chunk = None
bandpass_pre_cache(rate=self.rate) # for faster bandpass filtering
# default event parameters:
self.event_detected = False
self.event_lag = 0 # lag required to detect the event prior to trip()
self.event_time = 0 # becomes time of detected event = time at trip()
self.event_onset = 0 # best estimate of the onset of the event
def _set_signaler(self):
"""Set the signaler to be called by trip()
"""
if not self.config['signaler']:
self.config['signaler'] = None # _BaseVoiceKeySignal()
self.event_signaler = self.config['signaler']
def _set_tables(self):
"""Set up the pyo tables (allocate memory, etc).
One source -> three pyo tables: chunk=short, whole=all, baseline.
triggers fill tables from self._source; make triggers in .start()
"""
sec_per_chunk = self.msPerChunk / 1000.
self._chunktable = pyo.NewTable(length=sec_per_chunk)
self._wholetable = pyo.NewTable(length=self.sec)
if self.baseline < TOO_QUIET:
self._baselinetable = pyo.NewTable(length=T_BASELINE_OFF)
def _set_baseline(self):
"""Set self.baseline = rms(silent period) using _baselinetable data.
Called automatically (via pyo trigger) when the baseline table
is full. This is better than using chunks (which have gaps between
them) or the whole table (which can be very large = slow to work
with).
"""
data = np.array(self._baselinetable.getTable())
tstart = int(T_BASELINE_ON * self.rate)
segment_power = rms(data[tstart:])
# Look for bad baseline period:
if self.baseline > TOO_LOUD:
self.bad_baseline = True
# Dubiously quiet is bad too:
if segment_power < TOO_QUIET:
self.stop()
msg = ('Baseline period is TOO quiet\nwrong input '
'channel selected? device-related initial delay?')
raise ValueError(msg)
self.baseline = max(segment_power, 1)
def _process(self, chunk):
"""Calculate and store basic stats about the current chunk.
This gets called every chunk -- keep it efficient, esp 32-bit python
"""
# band-pass filtering:
if self.config['more_processing']:
bp_chunk = bandpass(chunk, self.config['low'],
self.config['high'], self.rate)
else:
bp_chunk = chunk
# loudness after bandpass filtering:
self.power_bp.append(rms(bp_chunk))
_mx = max(bp_chunk)
if _mx > self.max_bp:
self.max_bp = _mx
self.max_bp_chunk = self.count # chunk containing the max
if self.config['more_processing']:
# more bandpass
bp3k_chunk = bandpass(chunk, self.config['low'], 3000, self.rate)
bp8k_chunk = bandpass(chunk, self.config['low'], 8000, self.rate)
# "content filtered speech" (~ affect only):
bp2k8k_chunk = bandpass(chunk, 2000, 8000, self.rate)
# basic loudness:
self.power.append(rms(chunk))
# above a threshold or not:
above_01 = int(self.power[self.count] > self.config['threshold'])
self.power_above.append(above_01)
if self.config['zero_crossings']:
# zero-crossings per ms:
zx = zero_crossings(bp_chunk)
self.zcross.append(np.sum(zx) / self.msPerChunk)
def detect(self):
"""Override to define a detection algorithm.
if condition:
self.trip()
See SimpleThresholdVoiceKey for a minimal usage example, or
VoicelessPlosiveVoiceKey for a more involved one.
"""
raise NotImplementedError('override; see SimpleThresholdVoiceKey')
def trip(self):
"""Trip the voice-key; does not stop recording.
"""
# calls .start() on the event-signaler thread. Only `detect()` should
# call `trip()`. Customize `.detect()` rather than the logic here.
self.event_detected = True
self.event_time = self.elapsed
if hasattr(self, 'event_signaler') and self.event_signaler:
self.event_signaler.start()
def _do_chunk(self):
"""Core function to handle a chunk (= a few ms) of input.
There can be small temporal gaps between or within chunks, i.e.,
`slippage`. Adjust several parameters until this is small: msPerChunk,
and what processing is done within ._process().
A trigger (`_chunktrig`) signals that `_chunktable` has been filled
and has set `_do_chunk` as the function to call upon triggering.
`.play()` the trigger again to start recording the next chunk.
"""
if self.stopped:
return
self.t_enter.append(get_time())
self.elapsed = self.t_enter[-1] - self.t_enter[0]
self.t_baseline_has_elapsed = bool(self.elapsed > T_BASELINE_PERIOD)
# Get the table content as np.array
chunk = np.asarray(self._chunktable.getTable())
chunk = np.int16(chunk * 2 ** 15)
self.data.append(chunk)
# Calc basic stats, then use to detect features
self._process(chunk)
self.detect() # conditionally call trip()
# Trigger a new chunk recording, or stop if stopped or time is up:
t_end = get_time()
if t_end - self.t_enter[0] < self.sec:
if not self.stopped:
self._chunktrig.play() # *** triggers the next chunk ***
self.count += 1
else:
self.stop()
self.t_exit.append(t_end)
def start(self, silent=False):
"""Start reading and processing audio data from a file or microphone.
"""
if self.stopped:
raise VoiceKeyException('cannot start a stopped recording')
self.t_start = get_time()
# triggers: fill tables, call _do_chunk & _set_baseline:
self._chunktrig = pyo.Trig()
self._chunkrec = pyo.TrigTableRec(self._source, self._chunktrig,
self._chunktable)
self._chunklooper = pyo.TrigFunc(self._chunkrec["trig"],
self._do_chunk)
self._wholetrig = pyo.Trig()
self._wholerec = pyo.TrigTableRec(self._source, self._wholetrig,
self._wholetable)
self._wholestopper = pyo.TrigFunc(self._wholerec["trig"], self.stop)
# skip if a baseline value was given in config:
if not self.baseline:
self._baselinetrig = pyo.Trig()
self._baselinerec = pyo.TrigTableRec(self._source,
self._baselinetrig,
self._baselinetable)
self._calc_baseline = pyo.TrigFunc(self._baselinerec["trig"],
self._set_baseline)
# send _source to sound-output (speakers etc) as well:
if self.file_in and not silent:
self._source.out()
# start calling self._do_chunk by flipping its trigger;
# _do_chunk then triggers itself via _chunktrigger until done:
self._chunktrig.play()
self._wholetrig.play()
self._baselinetrig.play()
return self
@property
def slippage(self):
"""Diagnostic: Ratio of the actual (elapsed) time to the ideal time.
Ideal ratio = 1 = sample-perfect acquisition of msPerChunk, without
any gaps between or within chunks. 1. / slippage is the proportion of
samples contributing to chunk stats.
"""
if len(self.t_enter) > 1:
diffs = np.array(self.t_enter[1:]) - np.array(self.t_enter[:-1])
ratio = np.mean(diffs) * 1000. / self.msPerChunk
else:
ratio = 0
return ratio
@property
def started(self):
"""Boolean property, whether `.start()` has been called.
"""
return bool(hasattr(self, '_chunklooper')) # .start() has been called
def stop(self):
"""Stop a voice-key in progress.
Ends and saves the recording if using microphone input.
"""
# Will be stopped at self.count (= the chunk index), but that is less
# reliable than self.elapsed due to any slippage.
if self.stopped:
return
self.stopped = True
self.t_stop = get_time()
self._source.stop()
self._chunktrig.stop()
self._wholetrig.stop()
if self.config['autosave']:
self.save()
# Calc the proportion of the available time spent doing _do_chunk:
for ch in range(len(self.t_exit)):
t_diff = self.t_exit[ch] - self.t_enter[ch]
self.t_proc.append(t_diff * 1000 / self.msPerChunk)
def join(self, sec=None):
"""Sleep for `sec` or until end-of-input, and then call stop().
"""
sleep(sec or self.sec - self.elapsed)
self.stop()
def wait_for_event(self, plus=0):
"""Start, join, and wait until the voice-key trips, or it times out.
Optionally wait for some extra time, `plus`, before calling `stop()`.
"""
if not self.started:
self.start()
while not self.event_time and not self.stopped:
sleep(self.msPerChunk / 1000.)
if not self.stopped:
naptime = min(plus, self.sec - self.elapsed) # approx...
if naptime > 0:
sleep(naptime)
self.stop()
# next sleep() helps avoid pyo error:
# "ReferenceError: weakly-referenced object no longer exists"
sleep(1.5 * self.msPerChunk / 1000.)
return self.elapsed
def save(self, ftype='', dtype='int16'):
"""Save new data to file, return the size of the saved file (or None).
The file format is inferred from the filename extension, e.g., `flac`.
This will be overridden by the `ftype` if one is provided; defaults to
`wav` if nothing else seems reasonable. The optional `dtype` (e.g.,
`int16`) can be any of the sample types supported by `pyo`.
"""
if self.file_in or not self.count:
return
self.save_fmt = os.path.splitext(self.filename)[1].lstrip('.')
fmt = ftype or self.save_fmt or 'wav'
if not self.filename.endswith('.' + fmt):
self.filename += '.' + fmt
# Save the recording (continuous, non-chunked):
end_index = int(self.elapsed * self.rate) # ~samples
if end_index < self._wholetable.size:
dataf = np.asarray(self._wholetable.getTable()[:end_index])
samples_to_file(dataf, self.rate, self.filename,
fmt=fmt, dtype=dtype)
self.sec = pyo.sndinfo(self.filename)[1]
else:
table_to_file(self._wholetable, self.filename,
fmt=fmt, dtype=dtype)
self.filesize = os.path.getsize(self.filename)
return self.filesize
class SimpleThresholdVoiceKey(_BaseVoiceKey):
"""Class for simple threshold voice key (loudness-based onset detection).
The "hello world" of voice-keys.
"""
def detect(self):
"""Trip if the current chunk's audio power > 10 * baseline loudness.
"""
if self.event_detected or not self.baseline:
return
current = self.power[-1]
threshold = 10 * self.baseline
if current > threshold:
self.trip()
class OnsetVoiceKey(_BaseVoiceKey):
"""Class for speech onset detection.
Uses bandpass-filtered signal (100-3000Hz). When the voice key trips,
the best voice-onset RT estimate is saved as `self.event_onset`, in sec.
"""
def detect(self):
"""Trip if recent audio power is greater than the baseline.
"""
if self.event_detected or not self.baseline:
return
window = 5 # recent hold duration window, in chunks
threshold = 10 * self.baseline
conditions = all([x > threshold for x in self.power_bp[-window:]])
if conditions:
self.event_lag = window * self.msPerChunk / 1000.
self.event_onset = self.elapsed - self.event_lag
self.trip()
self.event_time = self.event_onset
class OffsetVoiceKey(_BaseVoiceKey):
"""Class to detect the offset of a single-word utterance.
"""
def __init__(self, sec=10, file_out='', file_in='', delay=0.3, **kwargs):
"""Record and ends the recording after speech offset. When the voice
key trips, the best voice-offset RT estimate is saved as
`self.event_offset`, in seconds.
:Parameters:
`sec`: duration of recording in the absence of speech or
other sounds.
`delay`: extra time to record after speech offset, default 0.3s.
The same methods are available as for class OnsetVoiceKey.
"""
config = {'sec': sec,
'file_out': file_out,
'file_in': file_in,
'delay': delay}
kwargs.update(config)
super(OffsetVoiceKey, self).__init__(**kwargs)
def detect(self):
"""Listen for onset, offset, delay, then end the recording.
"""
if self.event_detected or not self.baseline:
return
if not self.event_onset:
window = 5 # chunks
threshold = 10 * self.baseline
conditions = all([x > threshold for x in self.power_bp[-window:]])
if conditions:
self.event_lag = window * self.msPerChunk / 1000.
self.event_onset = self.elapsed - self.event_lag
self.event_offset = 0
elif not self.event_offset:
window = 25
threshold = 10 * self.baseline
# segment = np.array(self.power_bp[-window:])
conditions = all([x < threshold for x in self.power_bp[-window:]])
# conditions = np.all(segment < threshold)
if conditions:
self.event_lag = window * self.msPerChunk / 1000.
self.event_offset = self.elapsed - self.event_lag
self.event_time = self.event_offset # for plotting
elif self.elapsed > self.event_offset + self.config['delay']:
self.trip()
self.stop()
# ----- Convenience classes -------------------------------------------------
class Recorder(_BaseVoiceKey):
"""Convenience class: microphone input only (no real-time analysis).
Using `record()` is like `.join()`: it will block execution. But it will
also try to save the recording automatically even if interrupted (whereas
`.start().join()` will not do so). This might be especially useful when
making long recordings.
"""
def __init__(self, sec=2, filename='rec.wav'):
super(Recorder, self).__init__(sec, file_out=filename)
# def _set_defaults(self):
# pass
def __del__(self):
if hasattr(self, 'filename') and not os.path.isfile(self.filename):
self.save()
def _set_baseline(self):
pass
def detect(self):
pass
def _process(self, *args, **kwargs):
pass
def record(self, sec=None):
try:
self.start().join(sec)
except Exception:
self.save()
raise
class Player(_BaseVoiceKey):
"""Convenience class: sound output only (no real-time analysis).
"""
def __init__(self, sec=None, source='rec.wav',
start=0, stop=-1, rate=44100):
if type(source) in [np.ndarray]:
sec = len(source) / rate
elif os.path.isfile(source):
sec = pyo.sndinfo(source)[1]
config = {'start': start,
'stop': stop}
super(Player, self).__init__(sec, file_in=source, **config)
# def _set_defaults(self): # ideally override but need more refactoring
# pass
def _set_baseline(self):
pass
def detect(self):
pass
def _process(self, *args, **kwargs):
pass
def play(self, sec=None):
self.start().join(sec)
# ----- pyo initialization (essential) -------------------------------------
def pyo_init(rate=44100, nchnls=1, buffersize=32, duplex=1):
"""Start and boot a global pyo server, restarting if needed.
"""
global pyo_server
if rate < 16000:
raise ValueError('sample rate must be 16000 or higher')
# re-init
if hasattr(pyo_server, 'shutdown'):
pyo_server.stop()
sleep(0.25) # make sure enough time passes for the server to shutdown
pyo_server.shutdown()
sleep(0.25)
pyo_server.reinit(sr=rate, nchnls=nchnls,
buffersize=buffersize, duplex=duplex)
else:
pyo_server = pyo.Server(sr=rate,
nchnls=nchnls, # 1 = mono
buffersize=buffersize, # ideal = 64 or higher
duplex=duplex) # 1 = input + output
pyo_server.boot().start()
# avoid mac issue of losing first 0.5s if no sound played for ~1 minute:
if sys.platform == 'darwin':
z2 = np.zeros(2)
_sndTable = pyo.DataTable(size=2, init=z2.T.tolist(), chnls=nchnls)
_snd = pyo.TableRead(_sndTable, freq=rate, mul=0)
_snd.play()
time.sleep(0.510)
| 25,258
|
Python
|
.py
| 554
| 34.377256
| 82
| 0.576585
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,380
|
labjack_vks.py
|
psychopy_psychopy/psychopy/voicekey/labjack_vks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Class for voicekey trip signaling via Labjack device
"""
from . signal import _BaseVoiceKeySignal
from . vk_tools import get_time, sleep
try:
from labjack.u3 import U3
labjack_U3 = U3
have_labjack = 'from labjack'
# ImportError: # other errors too, like labjack.u12.U12Exception (??)
except Exception:
try:
from psychopy.hardware import labjacks
labjack_U3 = labjacks.u3.U3
have_labjack = 'from psychopy.hardware.labjacks'
except Exception: # ImportError:
have_labjack = False
class LabJackU3VoiceKeySignal(_BaseVoiceKeySignal):
"""Class for using a LabJack U3 device as a signal channel.
"""
def __init__(self, sec=0.010, delay=0, on=1, off=0, address=6008):
if not have_labjack:
raise ImportError("could not import labjack (for LabJack u3)")
super(LabJackU3VoiceKeySignal, self).__init__(sec, delay, on, off)
self.device = labjack_U3()
self.address = address
self.id = address
def signal(self):
"""Send a signal at the desired time.
After an optional `delay`, set a port to `on` for `sec`, then to `off`.
A delay is not recommended unless your system's time.sleep() function
has ms-level precision (yes Mac or linux, typically no for Windows).
"""
if self.delay:
sleep(self.delay)
t0 = get_time()
self.device.writeRegister(self.address, self.on)
# check time and self.running:
while self.running and get_time() - t0 < self.sec:
sleep(0.001)
self.device.writeRegister(self.address, self.off)
| 1,684
|
Python
|
.py
| 42
| 33.380952
| 79
| 0.654835
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,381
|
parallel_vks.py
|
psychopy_psychopy/psychopy/voicekey/parallel_vks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Completely untested. Doesn't work at all on Mac (no parallel).
"""
from . signal import _BaseVoiceKeySignal
from . vk_tools import get_time, sleep
try:
from psychopy import parallel
have_parallel = True
except Exception: # ImportError:
have_parallel = False
class ParallelVoiceKeySignal(_BaseVoiceKeySignal):
"""Class for using PsychoPy parallel as a signal channel.
"""
def __init__(self, sec=0.010, delay=0, on=1, off=0, address=0x378, pin=2):
if not have_parallel:
raise ImportError("could not import parallel (for signaler)")
super(ParallelVoiceKeySignal, self).__init__(sec, delay, on, off)
self.id = '({0}, pin={1})'.format(hex(address), pin)
self.address = address
self.pin = pin # 2-9
self.port = parallel.ParallelPort(address)
def signal(self):
"""Send a signal at the desired time.
After an optional `delay`, set a pin to `on` for `sec`, then to `off`.
A delay is not recommended unless your system's time.sleep() function
has ms-level precision (yes Mac or linux, typically no for Windows).
"""
if self.delay:
sleep(self.delay)
t0 = get_time()
self.port.setData(self.on)
# check time and self.running:
while self.running and get_time() - t0 < self.sec:
sleep(0.001)
self.port.setData(self.off)
| 1,464
|
Python
|
.py
| 36
| 33.888889
| 78
| 0.640141
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,382
|
stringtools.py
|
psychopy_psychopy/psychopy/tools/stringtools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Various string functions for working with strings.
#
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import re
import ast
# Regex for identifying a valid Pavlovia project name
import urllib
from pathlib import Path
valid_proj_name = re.compile(r'(\w|-)+')
def is_url(source):
"""
Check whether a string is a valid url.
"""
# Make sure source is a string
source = str(source)
# Try to parse source, return False if it fails
try:
url = urllib.parse.urlparse(source)
except ValueError:
return False
# If parsed successfully, return True if we have a scheme and net location
return all((url.scheme, url.netloc))
def is_file(source):
"""
Check whether a string or Path object is a valid file.
"""
# If source is already a Path, just use its is_file method
if isinstance(source, Path):
return source.is_file()
# Make sure source is a string
source = str(source)
# Try to create a Path object, return False if it fails
try:
path = Path(source)
except ValueError:
return False
# If creates successfully, return True if is_file
try:
isFile = path.is_file()
except OSError:
isFile = False
return isFile
class CaseSwitcher:
"""
Collection of static methods for switching case in strings. Can currently convert between:
- camelCase
- PascalCase
- Title Case
"""
@staticmethod
def camel2pascal(value):
"""
Convert from camelCase to PascalCase
"""
# capitalise first letter
value = value[0].upper() + value[1:]
return value
@staticmethod
def camel2title(value):
"""
Convert from camelCase to Title Case
"""
# convert to pascal
value = CaseSwitcher.camel2pascal(value)
# convert to title
value = CaseSwitcher.pascal2title(value)
return value
@staticmethod
def camel2snake(value):
"""
Convert from camelCase to snake_case
"""
# convert to title
value = CaseSwitcher.camel2title(value)
# convert to snake
value = CaseSwitcher.title2snake(value)
return value
@staticmethod
def pascal2camel(value):
"""
Convert from PascalCase to camelCase
"""
# decapitalise first letter
value = value[0].lower() + value[1:]
return value
@staticmethod
def pascal2title(value):
"""
Convert from PascalCase to Title Case
"""
def _titleize(match):
"""
Replace a regex match for a lowercase letter followed by an uppercase letter with the same two letters, in
uppercase, with a space inbetween.
"""
# get matching text (should be a lower case letter then an upper case letter)
txt = match[0]
# add a space
txt = txt[0] + " " + txt[1]
return txt
# make regex substitution
value = re.sub(
pattern=r"([a-z][A-Z])",
repl=_titleize,
string=value
)
return value
@staticmethod
def pascal2snake(value):
"""
Convert from PascalCase to snake_case
"""
# convert to title
value = CaseSwitcher.pascal2title(value)
# convert to snake
value = CaseSwitcher.title2snake(value)
return value
@staticmethod
def title2camel(value):
"""
Convert from Title Case to camelCase
"""
# convert to pascal
value = CaseSwitcher.title2pascal(value)
# convert to camel
value = CaseSwitcher.pascal2camel(value)
return value
@staticmethod
def title2pascal(value):
"""
Convert from Title Case to PascalCase
"""
# remove spaces
value = value.replace(" ", "")
return value
@staticmethod
def title2snake(value):
"""
Convert from Title Case to snake_case
"""
# lowercase
value = value.lower()
# replace spaces with underscores
value = value.replace(" ", "_")
return value
@staticmethod
def snake2camel(value):
"""
Convert from snake_case to camelCase
"""
# convert to pascal
value = CaseSwitcher.snake2pascal(value)
# convert to camel
value = CaseSwitcher.pascal2camel(value)
return value
@staticmethod
def snake2pascal(value):
"""
Convert from snake_case to PascalCase
"""
# convert to title
value = CaseSwitcher.snake2title(value)
# convert to pascal
value = CaseSwitcher.title2pascal(value)
return value
@staticmethod
def snake2title(value):
"""
Convert from snake_case to Title Case
"""
def _titleize(match):
"""
Replace a regex match for a lowercase letter followed by an uppercase letter with the same two letters, in
uppercase, with a space inbetween.
"""
# get matching text (should be a lower case letter then an upper case letter)
txt = match[0]
# add a space and capitalise
txt = " " + txt[1].upper()
return txt
# make regex substitution
value = re.sub(
pattern=r"(_[a-z])",
repl=_titleize,
string=value
)
# capitalise first letter
value = value[0].upper() + value[1:]
return value
def wrap(value, chars, delim=r"\s|\-"):
"""
Wrap a string at a number of characters.
Parameters
----------
value : str
String to wrap
chars : int
Number of characters to split at
delim : str
Regex string delimeter to split words at, default is a space or a hyphen (r"\s|\-")
Returns
-------
str
Wrapped string
"""
newValue = ""
letter = 0
# iterate through each word
for n, word in enumerate(re.split(pattern=r"(" + delim + r")", string=value)):
# count its letters
letter += len(word)
# split word if it's very long
if len(word) > chars:
word = word[:chars] + "-\n" + word[chars:]
# if this brings the current letters this line to more than the wrap limit, insert a line break
if letter > chars and n > 0 and not re.match(pattern=delim, string=word):
newValue += "\n"
letter = len(word)
# insert word
newValue += word
return newValue
def makeValidVarName(name, case="camel"):
"""
Transform a string into a valid variable name
Parameters
----------
name : str
Original name to be transformed
case : str
Case style for variable name to be in. Options are:
upper: UPPERCASE
title: TitleCase
camel: camelCase
snake: snake_case
lower: lowercase
"""
# Mark which underscores which need preserving
private = name.startswith("_")
protected = name.startswith("__")
core = name.endswith("__")
# Replace all different wordbreaks with _
for wb in (" ", ".", ","):
name = name.replace(wb, "_")
# Insert a _ between lower/upper pairs and char/number pairs
lastChar = ""
processed = ""
for c in name:
# Insert a _ if...
if any([
lastChar.islower() and c.isupper(), # previous char was lower and this is upper
lastChar.isnumeric() and c.isalpha(), # previous char was a number and this is a letter
lastChar.isalpha() and c.isnumeric(), # previous char was a letter and this is a number
]):
processed += "_"
# Append char
processed += c
# Store last char
lastChar = c
name = processed
# Remove non-word characters
processed = ""
for c in name:
if c.isidentifier() or c.isdecimal():
processed += c
else:
processed += "_"
name = processed
# Split by underscore
name = name.split("_")
name = [word for word in name if len(word)]
# Remove numbers from start
while name[0].isnumeric():
name = name[1:]
# Process each word
processed = []
for i, word in enumerate(name):
# Handle case
word = word.lower()
if case in ("upper"):
word = word.upper()
if case in ("title", "camel"):
if case == "camel" and i == 0:
word = word.lower()
else:
word = word.title()
if case in ("snake", "lower"):
word = word.lower()
# Append word
processed.append(word)
name = processed
# Recombine
if case == "snake":
name = "_".join(name)
else:
name = "".join(name)
# Add special underscores
if private:
# If private, prepend _
name = "_" + name
if protected:
# If also protected, prepend another _
name = "_" + name
if core:
# If styled like a core variable (e.g. __file__), append __
name = name + "__"
return name
def _actualizeAstValue(item):
"""
Convert an AST value node to a usable Python object
"""
if isinstance(item, ast.Str):
# Handle ast string
return item.s
elif hasattr(ast, 'Bytes') and isinstance(item, ast.Bytes):
# Handle ast bytes
return item.s
elif isinstance(item, ast.Num):
# Handle ast numbers
return item.n
elif isinstance(item, ast.Tuple):
# Handle ast array
return tuple(_actualizeAstValue(i) for i in item.elts)
def getVariables(code):
"""
Use AST tree parsing to convert a string of valid Python code to a dict containing each variable created and its
value.
"""
assert isinstance(code, str), "First input to `getArgs()` must be a string"
# Make blank output dict
vars = {}
# Construct tree
tree = compile(code, '', 'exec', flags=ast.PyCF_ONLY_AST)
# Iterate through each line
for line in tree.body:
if hasattr(line, "targets") and hasattr(line, "value"):
# Append targets and values this line to arguments dict
for target in line.targets:
if hasattr(target, "id"):
vars[target.id] = _actualizeAstValue(line.value)
return vars
def getArgs(code):
"""
Use AST tree parsing to convert a string of valid Python arguments to a dict containing each argument used and its
value.
"""
assert isinstance(code, str), "First input to `getArgs()` must be a string"
# Make blank output dict
args = {}
# Add outer brackets if needed
if not (code.startswith("(") and code.endswith(")")):
code = "(" + code + ")"
# Move it all to one line
code = code.replace("\n", "")
# Append dict constructor
code = "dict" + code
# Construct tree
tree = compile(code, '', 'exec', flags=ast.PyCF_ONLY_AST)
# Get keywords
keywords = tree.body[0].value.keywords
for kw in keywords:
if hasattr(kw, "arg") and hasattr(kw, "value"):
# Append keyword and value to arguments dict
args[kw.arg] = _actualizeAstValue(kw.value)
return args
| 11,617
|
Python
|
.py
| 368
| 23.934783
| 118
| 0.59298
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,383
|
environmenttools.py
|
psychopy_psychopy/psychopy/tools/environmenttools.py
|
def getFromNames(names, namespace):
"""
Get a component, or any other object handle, from a string containing its variable name.
Parameters
==========
names : str, list or tuple
String representing the name of a variable, or a list/tuple (or listlike string) of names.
namespace : dict or None
dict mapping names to values, if unsure just use `globals()`
"""
# If listlike string, split into list
if isinstance(names, str) and "," in names:
# Strip perentheses
if (
(names.startswith("[") and names.endswith("]"))
or (names.startswith("(") and names.endswith(")"))
):
names = names[1:-1]
# Split at commas
namesList = []
for thisName in names.split(","):
# Strip spaces
thisName = thisName.strip()
# Strip quotes
if (
(thisName.startswith('"') and thisName.endswith('"'))
or (thisName.startswith("'") and thisName.endswith("'"))
):
thisName = thisName[1:-1]
# Append
namesList.append(thisName)
names = namesList
# If single name, put in list
from collections.abc import Iterable
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
# Get objects
objs = []
for nm in names:
# Strip spaces
if isinstance(nm, str):
nm = nm.strip()
# Get (use original value if not present)
obj = namespace.get(nm, nm)
# Append
objs.append(obj)
return objs
def setExecEnvironment(env):
# Get builtin exec function
import builtins
# Create new exec function in given environment
def exec(call_str):
builtins.exec(call_str, env)
exec.__doc__ = builtins.exec.__doc__
# Return function
return exec
| 1,927
|
Python
|
.py
| 56
| 25.75
| 98
| 0.578242
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,384
|
plottools.py
|
psychopy_psychopy/psychopy/tools/plottools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Functions and classes related to plotting
"""
def plotFrameIntervals(intervals):
"""Plot a histogram of the frame intervals.
Where `intervals` is either a filename to a file, saved by
Window.saveFrameIntervals, or simply a list (or array) of frame intervals
"""
from pylab import hist, show, plot
if type(intervals) == str:
f = open(intervals, 'r')
intervals = eval("[%s]" % (f.readline()))
# hist(intervals, int(len(intervals)/10))
plot(intervals)
show()
| 748
|
Python
|
.py
| 19
| 35.315789
| 79
| 0.688797
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,385
|
apptools.py
|
psychopy_psychopy/psychopy/tools/apptools.py
|
class SortTerm:
def __init__(self, value,
aLabel=None, dLabel=None,
ascending=False):
# Substitute labels
if aLabel is None:
aLabel = value
if dLabel is None:
dLabel = value
# Store details
self.value = value
self.ascending = ascending
self._aLabel = aLabel
self._dLabel = dLabel
@property
def label(self):
if self.ascending:
return self._aLabel
else:
return self._dLabel
def __str__(self):
if self.ascending:
return self.value + "+"
else:
return self.value + "-"
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.value == other.value
| 829
|
Python
|
.py
| 29
| 18.896552
| 49
| 0.52381
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,386
|
fontmanager.py
|
psychopy_psychopy/psychopy/tools/fontmanager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
# Shader compilation code
# -----------------------------------------------------------------------------
#
# Copyright Tristam Macdonald 2008.
#
# Distributed under the Boost Software License, Version 1.0
# (see http://www.boost.org/LICENSE_1_0.txt)
#
import re
import sys, os
import math
import numpy as np
import ctypes
import freetype as ft
from pyglet import gl # import OpenGL.GL not compatible with Big Sur (2020)
from pathlib import Path
import requests
from psychopy import logging
from psychopy import prefs
from psychopy.exceptions import MissingFontError
# OS Font paths
_X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF",
"/usr/X11/lib/X11/fonts",
# here is the new standard location for fonts
"/usr/share/fonts",
# documented as a good place to install new fonts
"/usr/local/share/fonts",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype",
]
_OSXFontDirectories = [
"/Library/Fonts/",
str(Path.home() / "Library" / "Fonts"),
"/Network/Library/Fonts",
"/System/Library/Fonts",
# fonts installed via MacPorts
"/opt/local/share/fonts",
]
_weightMap = {
# Map of various potential values for "bold" and the numeric font weight which they correspond to
100: 100, "thin": 100, "hairline": 100,
200: 200, "extralight": 200, "ultralight": 200,
300: 300, "light": 300,
400: 400, False: 400, "normal": 400, "regular": 400,
500: 500, "medium": 500,
600: 600, "semibold": 600, "demibold": 600,
700: 700, "bold": 700, True: 700,
800: 800, "extrabold": 800, "ultrabold": 800,
900: 900, "black": 900, "heavy": 900,
950: 950, "extrablack": 950, "ultrablack": 950
}
supportedExtensions = ['ttf', 'otf', 'ttc', 'dfont', 'truetype']
def unicode(s, fmt='utf-8'):
"""Force to unicode if bytes"""
if type(s) == bytes:
return s.decode(fmt)
else:
return s
# this class was to get aorund the issue of constantly having to convert to
# and from utf-8 because the ft.Face class uses b'' for family_name,
# family_style but the problems run deeper than that (hot mess!). Maybe ft will
# update with better Py3 support?
# class Face(ft.Face):
# """This is the same as freetype Face class but with unicode face"""
# def __init__(self, *args, **kwargs):
# self._ftFace = ft.Face(self, *args, **kwargs)
# # store original properties of the ft.Face
# self._family_name = ft.Face
#
# @property
# def family_name(self):
# return unicode(self._ftFace.family_name)
#
# @property
# def style_name(self):
# return unicode(self._ftFace.style_name)
#
# def __get__(self, att):
# if att in self.__dict__:
# return self.__dict__[att]
# else:
# try:
# return getattr(self._ftFace, att)
# except AttributeError:
# raise AttributeError("freetype.Face has no attribute '{}'"
# .format(att))
class _TextureAtlas:
""" A TextureAtlas is the texture used by the GLFont to store the glyphs
Group multiple small data regions into a larger texture.
The algorithm is based on the article by Jukka Jylänki : "A Thousand Ways
to Pack the Bin - A Practical Approach to Two-Dimensional Rectangle Bin
Packing", February 27, 2010. More precisely, this is an implementation of
the Skyline Bottom-Left algorithm based on C++ sources provided by Jukka
Jylänki at: http://clb.demon.fi/files/RectangleBinPack/
Example usage:
--------------
atlas = TextureAtlas(512,512,3)
region = atlas.get_region(20,20)
...
atlas.set_region(region, data)
"""
def __init__(self, width=2048, height=2048, format='alpha',
name='fontname'): # name just for logging purposes
"""
Initialize a new atlas of given size.
Parameters
----------
width : int
Width of the underlying texture
height : int
Height of the underlying texture
format : 'alpha' or 'rgb'
Depth of the underlying texture
"""
self.name = name
self.width = int(math.pow(2, int(math.log(width, 2) + 0.5)))
self.height = int(math.pow(2, int(math.log(height, 2) + 0.5)))
self.format = format
self.nodes = [(0, 0, self.width), ]
self.textureID = 0
self.used = 0
if format == 'rgb':
self.data = np.zeros((self.height, self.width, 3),
dtype=np.ubyte)
elif format == 'alpha':
self.data = np.zeros((self.height, self.width),
dtype=np.ubyte)
else:
raise TypeError("TextureAtlas should have format of 'alpha' or "
"'rgb' not {}".format(repr(format)))
def set_region(self, region, data):
"""
Set a given region width provided data.
Parameters
----------
region : (int,int,int,int)
an allocated region (x,y,width,height)
data : numpy array
data to be copied into given region
"""
x, y, width, height = region
if self.format == 'rgb':
self.data[int(y):int(y + height), int(x):int(x + width), :] = data
else:
self.data[int(y):int(y + height), int(x):int(x + width)] = data
def get_region(self, width, height):
"""
Get a free region of given size and allocate it
Parameters
----------
width : int
Width of region to allocate
height : int
Height of region to allocate
Return
------
A newly allocated region as (x,y,width,height) or (-1,-1,0,0)
"""
best_height = sys.maxsize
best_index = -1
best_width = sys.maxsize
region = 0, 0, width, height
for i in range(len(self.nodes)):
y = self.fit(i, width, height)
if y >= 0:
node = self.nodes[i]
if (y + height < best_height or
(y + height == best_height and node[2] < best_width)):
best_height = y + height
best_index = i
best_width = node[2]
region = node[0], y, width, height
if best_index == -1:
return -1, -1, 0, 0
node = region[0], region[1] + height, width
self.nodes.insert(best_index, node)
i = best_index + 1
while i < len(self.nodes):
node = self.nodes[i]
prev_node = self.nodes[i - 1]
if node[0] < prev_node[0] + prev_node[2]:
shrink = prev_node[0] + prev_node[2] - node[0]
x, y, w = self.nodes[i]
self.nodes[i] = x + shrink, y, w - shrink
if self.nodes[i][2] <= 0:
del self.nodes[i]
i -= 1
else:
break
else:
break
i += 1
self.merge()
self.used += width * height
return region
def fit(self, index, width, height):
"""
Test if region (width,height) fit into self.nodes[index]
Parameters
----------
index : int
Index of the internal node to be tested
width : int
Width or the region to be tested
height : int
Height or the region to be tested
"""
node = self.nodes[index]
x, y = node[0], node[1]
width_left = width
if x + width > self.width:
return -1
i = index
while width_left > 0:
node = self.nodes[i]
y = max(y, node[1])
if y + height > self.height:
return -1
width_left -= node[2]
i += 1
return y
def merge(self):
"""
Merge nodes
"""
i = 0
while i < len(self.nodes) - 1:
node = self.nodes[i]
next_node = self.nodes[i + 1]
if node[1] == next_node[1]:
self.nodes[i] = node[0], node[1], node[2] + next_node[2]
del self.nodes[i + 1]
else:
i += 1
def upload(self):
"""Upload the local atlas data into graphics card memory
"""
if not self.textureID:
self.textureID = gl.GLuint(0)
gl.glGenTextures(1, ctypes.byref(self.textureID))
logging.debug("Uploading Texture Font {} to graphics card"
.format(self.name))
gl.glBindTexture(gl.GL_TEXTURE_2D, self.textureID)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
if self.format == 'alpha':
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_ALPHA,
self.width, self.height, 0,
gl.GL_ALPHA, gl.GL_UNSIGNED_BYTE, self.data.ctypes)
else:
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB,
self.width, self.height, 0,
gl.GL_RGB, gl.GL_UNSIGNED_BYTE, self.data.ctypes)
logging.debug("Upload of Texture Font {} complete"
.format(self.name))
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
class GLFont:
"""
A GLFont gathers a set of glyphs for a given font filename and size.
size : int
Distance between the tops of capital letters and the bottoms of descenders
height : int
Total distance from one baseline to the next
capheight : int
Position of the tops of capital letters relative to the baseline
ascender : int
Position of the tops of ascenders relative to the baseline
descender : int
Position of the bottoms of descenders relative to the baseline
linegap : int
Distance between the bottoms of this line's descenders and the tops of the next line's ascenders
leading : int
Position of the tops of the next line's ascenders relative to this line's baseline
"""
def __init__(self, filename, size, lineSpacing=1, textureSize=2048):
"""
Initialize font
Parameters:
-----------
atlas: TextureAtlas
Texture atlas where glyph texture will be stored
filename: str
Font filename
size : float
Font size
lineSpacing : float
Leading between lines, proportional to font size
"""
self.scale = 64.0
self.atlas = _TextureAtlas(textureSize, textureSize, format='alpha')
self.format = self.atlas.format
self.filename = filename
self.face = ft.Face(str(filename)) # ft.Face doesn't support Pathlib yet
self.size = size
self.glyphs = {}
self.info = FontInfo(filename, self.face)
self._dirty = False
# Get metrics
metrics = self.face.size
self.ascender = metrics.ascender / self.scale
self.descender = metrics.descender / self.scale
self.height = metrics.height / self.scale
# Set spacing
self.lineSpacing = lineSpacing
def __getitem__(self, charcode):
"""
x.__getitem__(y) <==> x[y]
"""
if charcode not in self.glyphs:
self.fetch('%c' % charcode)
return self.glyphs[charcode]
def __str__(self):
"""Returns a string rep of the font, such as 'Arial_24_bold' """
return "{}_{}".format(self.info, self.size)
@property
def leading(self):
"""
Position of the next row's ascender line relative to this row's base line.
"""
return self.ascender - self.height
@leading.setter
def leading(self, value):
self.height = self.ascender - value
@property
def linegap(self):
return -(self.leading - self.descender)
@linegap.setter
def linegap(self, value):
self.leading = self.descender - value
@property
def capheight(self):
"""
Position of the top of capital letters relative to the base line.
"""
return self.descender + self.size
@capheight.setter
def capheight(self, value):
self.size = value - self.descender
@property
def size(self):
"""
Distance from the descender line to the capheight line.
"""
if hasattr(self, "_size"):
return self._size
@size.setter
def size(self, value):
self._size = value
self.face.set_char_size(int(self.size * self.scale))
@property
def lineSpacing(self):
return self.height / (self.ascender - self.descender)
@lineSpacing.setter
def lineSpacing(self, value):
self.height = value * (self.ascender - self.descender)
@property
def name(self):
"""Name of the Font (e.g. 'Arial_24_bold')
"""
return str(self)
@property
def textureID(self):
"""
Get underlying texture identity .
"""
if self._dirty:
self.atlas.upload()
self._dirty = False
return self.atlas.textureID
def preload(self, nMax=None):
"""
:return:
"""
if nMax is None:
note = "entire glyph set"
else:
note = "{} glyphs".format(nMax)
logging.debug("Preloading {} for Texture Font {}"
.format(note, self.name))
face = ft.Face(str(self.filename)) # ft.Face doesn't support Pathlib
chrs = (list(face.get_chars()))[:nMax]
charcodes = [chr(c[1]) for c in chrs]
self.fetch(charcodes, face=face)
logging.debug("Preloading of glyph set for Texture Font {} complete"
.format(self.name))
def fetch(self, charcodes='', face=None):
"""
Build glyphs corresponding to individual characters in charcodes.
Parameters:
-----------
charcodes: [str | unicode]
Set of characters to be represented
"""
if face is None:
face = ft.Face(str(self.filename)) # doesn't support Pathlib yet
# if current glyph is same as last then maybe blank glyph?
lastGlyph = None
possibleBlank = None
nBlanks = 0
for charcode in charcodes:
if charcode in self.glyphs:
continue
face.set_pixel_sizes(int(self.size), int(self.size))
self._dirty = True
flags = ft.FT_LOAD_RENDER | ft.FT_LOAD_FORCE_AUTOHINT
flags |= ft.FT_LOAD_TARGET_LCD
face.load_char(charcode, flags)
bitmap = face.glyph.bitmap
# check if this looks like a blank (same as a prev glyph)
if bitmap.buffer == lastGlyph:
possibleBlank = lastGlyph
if bitmap.buffer == possibleBlank: # whether newly detected or not
nBlanks += 1
continue
lastGlyph = bitmap.buffer
left = face.glyph.bitmap_left
top = face.glyph.bitmap_top
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
if self.format == 'rgb':
x, y, w, h = self.atlas.get_region(width / 5, rows + 2)
else:
x, y, w, h = self.atlas.get_region(width + 2, rows + 2)
if x < 0:
msg = ("Failed to fit char into font texture ({} at size {}px)"
.format(face.family_name, self.size))
raise RuntimeError(msg)
x, y = x + 1, y + 1
w, h = w - 2, h - 2
data = np.array(bitmap.buffer).reshape(rows, pitch)
data = data[:h, :w]
if self.format == 'rgb':
Z = (((data / 255.0) ** 1.5) * 255).astype(np.ubyte)
self.atlas.set_region((x, y, w, h), data)
# Build glyph
size = w, h
offset = left, top
advance = (face.glyph.advance.x / self.scale,
face.glyph.advance.y / self.scale)
u0 = (x + 0.0) / float(self.atlas.width)
v0 = (y + 0.0) / float(self.atlas.height)
u1 = (x + w - 0.0) / float(self.atlas.width)
v1 = (y + h - 0.0) / float(self.atlas.height)
texcoords = (u0, v0, u1, v1)
glyph = TextureGlyph(charcode, size, offset, advance, texcoords)
self.glyphs[charcode] = glyph
# Generate kerning
# for g in self.glyphs.values():
# kerning = face.get_kerning(g.charcode, charcode,
# mode=ft.FT_KERNING_UNFITTED)
# if kerning.x != 0:
# glyph.kerning[g.charcode] = kerning.x / self.scale
#
# kerning = face.get_kerning(charcode, g.charcode,
# mode=ft.FT_KERNING_UNFITTED)
# if kerning.x != 0:
# g.kerning[charcode] = kerning.x / self.scale
logging.debug("TextBox2 loaded {} chars with {} blanks and {} valid"
.format(len(charcodes), nBlanks, len(charcodes) - nBlanks))
def saveToCache(self):
"""Store the current font texture as an image file.
As yet we aren't storing the offset, advance and texcoords as needed to
retrieve the necessary chars, but it's a start!
(see TextureGlyph(charcode, size, offset, advance, texcoords) )
"""
from PIL import Image
im = Image.fromarray(self.atlas.data)
fname = "{}/.psychopy3/{}_{}_texture.png".format(
os.path.expanduser("~"), self.name, self.size)
im.save(fname)
def upload(self):
"""Upload the font data into graphics card memory.
"""
self.atlas.upload()
class TextureGlyph:
"""
A texture glyph gathers information relative to the size/offset/advance and
texture coordinates of a single character. It is generally built
automatically by a TextureFont.
"""
def __init__(self, charcode, size, offset, advance, texcoords):
"""
Build a new texture glyph
Parameter:
----------
charcode : char
Represented character
size: tuple of 2 ints
Glyph size in pixels
offset: tuple of 2 floats
Glyph offset relatively to anchor point
advance: tuple of 2 floats
Glyph advance
texcoords: tuple of 4 floats
Texture coordinates of bottom-left and top-right corner
"""
self.charcode = charcode
self.size = size
self.offset = offset
self.advance = advance
self.texcoords = texcoords
self.kerning = {}
def get_kerning(self, charcode):
""" Get kerning information
Parameters:
-----------
charcode: char
Character preceding this glyph
"""
if charcode in self.kerning.keys():
return self.kerning[charcode]
else:
return 0
def findFontFiles(folders=(), recursive=True):
"""Search for font files in the folder (or system folders)
Parameters
----------
folders: iterable
folders to search. If empty then search typical system folders
Returns
-------
list of pathlib.Path objects
"""
searchPaths = folders
if searchPaths is None or len(searchPaths)==0:
if sys.platform == 'win32':
searchPaths = [] # just leave it to matplotlib as below
elif sys.platform == 'darwin':
# on mac matplotlib doesn't include 'ttc' files (which are fine)
searchPaths = _OSXFontDirectories
elif sys.platform.startswith('linux'):
searchPaths = _X11FontDirectories
# search those folders
fontPaths = []
for thisFolder in searchPaths:
thisFolder = Path(thisFolder)
try:
for thisExt in supportedExtensions:
if recursive:
fontPaths.extend(thisFolder.rglob("*.{}".format(thisExt)))
else:
fontPaths.extend(thisFolder.glob("*.{}".format(thisExt)))
except PermissionError:
logging.warning(f"The fonts folder '{thisFolder}' exists but the current user doesn't have read "
"access to it. Fonts from that folder won't be available to TextBox")
# if we failed let matplotlib have a go
if not fontPaths:
from matplotlib import font_manager
fontPaths = font_manager.findSystemFonts()
# search resources folder and user's own fonts folder
for thisFolder in [Path(prefs.paths['fonts']), Path(prefs.paths['resources']) / "fonts"]:
for thisExt in supportedExtensions:
if recursive:
fontPaths.extend(thisFolder.rglob("*.{}".format(thisExt)))
else:
fontPaths.extend(thisFolder.glob("*.{}".format(thisExt)))
return fontPaths
class FontManager():
"""FontManager provides a simple API for finding and loading font files
(.ttf) via the FreeType lib
The FontManager finds supported font files on the computer and
initially creates a dictionary containing the information about
available fonts. This can be used to quickly determine what font family
names are available on the computer and what styles (bold, italic) are
supported for each family.
This font information can then be used to create the resources necessary
to display text using a given font family, style, size, color, and dpi.
The FontManager is currently used by the psychopy.visual.TextBox stim
type. A user script can access the FontManager via:
fonts = visual.textbox2.getFontManager()
A user script never creates an instance of the FontManager class and
should always access it using visual.textbox.getFontManager().
Once a font of a given size and dpi has been created; it is cached by the
FontManager and can be used by all TextBox instances created within the
experiment.
"""
freetype_import_error = None
_glFonts = {}
fontStyles = []
_fontInfos = {} # JWP: dict of name:FontInfo objects
def __init__(self, monospaceOnly=False):
self.addFontDirectory(prefs.paths['resources'])
# if FontManager.freetype_import_error:
# raise Exception('Appears the freetype library could not load.
# Error: %s'%(str(FontManager.freetype_import_error)))
self.monospaceOnly = monospaceOnly
self.updateFontInfo(monospaceOnly)
def __str__(self):
S = "Loaded:\n"
if len(self._glFonts):
for name in self._glFonts:
S += " {}\n".format(name)
else:
S += "None\n"
S += ("Available: {} see fonts.getFontFamilyNames()\n"
.format(len(self.getFontFamilyNames())))
return S
def getDefaultSansFont(self):
"""Load and return the FontInfo for the first found default font"""
for name in ['Verdana', 'DejaVu Sans', 'Bitstream Vera Sans', 'Tahoma']:
these = self.getFontsMatching(name, fallback=False)
if not these:
continue
if type(these) in (list, set):
this = these[0]
# if str or Path then get a FontInfo object
if type(this) in [str, Path]:
this = self.addFontFiles(this)
return this
raise MissingFontError("Failed to find any of the default fonts. "
"Existing fonts: {}"
.format(list(self._fontInfos)))
def getFontFamilyNames(self):
"""Returns a list of the available font family names.
"""
return list(self._fontInfos.keys())
def getFontStylesForFamily(self, family_name):
"""For the given family, a list of style names supported is
returned.
"""
style_dict = self._fontInfos.get(family_name)
if style_dict:
return list(style_dict.keys())
def getFontFamilyStyles(self):
"""Returns a list where each element of the list is a itself a
two element list of [fontName,[fontStyle_names_list]]
"""
return self.fontStyles
def getFontsMatching(self, fontName, bold=False, italic=False,
fontStyle=None, fallback=True):
"""
Returns the list of FontInfo instances that match the provided
fontName and style information. If no matching fonts are
found, None is returned.
"""
if type(fontName) != bytes:
fontName = bytes(fontName, sys.getfilesystemencoding())
# Convert value of "bold" to a numeric font weight
if bold in _weightMap or str(bold).lower().strip() in _weightMap:
bold = _weightMap[bold]
else:
bold = _weightMap[False] # Default to regular
style_dict = self._fontInfos.get(fontName)
if not style_dict:
if not fallback:
return None
similar = self.getFontNamesSimilar(fontName)
if len(similar) == 0:
logging.warning("Font {} was requested. No similar font found.".format(repr(fontName)))
return [self.getDefaultSansFont()]
elif len(similar) == 1:
logging.warning("Font {} was requested. Exact match wasn't "
"found but we will proceed with {}?"
.format(repr(fontName), repr(similar[0])))
style_dict = self._fontInfos.get(similar[0])
else: # more than 1 alternatives. Which to use?
raise ValueError("Font {} was requested. Exact match wasn't "
"found, but maybe one of these was intended:"
"{}?".format(repr(fontName), similar))
if not style_dict:
return None
# check if we have a valid style too
if fontStyle and fontStyle in style_dict:
return style_dict[fontStyle]
for style, fonts in style_dict.items():
b, i = self.booleansFromStyleName(style)
if b == bold and i == italic:
return fonts
return None
def getFontNamesSimilar(self, fontName):
if type(fontName) != bytes:
fontName = bytes(fontName, sys.getfilesystemencoding())
allNames = list(self._fontInfos)
similar = [this for this in allNames if
(fontName.lower() in this.lower())]
return similar
def addGoogleFont(self, fontName):
"""Add a font directly from the Google Font repository, saving it to the user prefs folder"""
# Construct and send Google Font url from name
repoURL = f"https://fonts.googleapis.com/css2?family={ fontName.replace(' ', '+') }&display=swap"
repoResp = requests.get(repoURL)
if not repoResp.ok:
# If font name is not found, raise error
raise MissingFontError("Font `{}` could not be retrieved from the Google Font library.".format(fontName))
# Get and send file url from returned CSS data
fileURL = re.findall(r"(?<=src: url\().*(?=\) format)", repoResp.content.decode())[0]
fileFormat = re.findall(r"(?<=format\(\').*(?=\'\)\;)", repoResp.content.decode())[0]
fileResp = requests.get(fileURL)
if not fileResp.ok:
# If font file is not available, raise error
raise MissingFontError("OST file for Google font `{}` could not be accessed".format(fontName))
# Save retrieved font as an OST file
fileName = Path(prefs.paths['fonts']) / f"{fontName}.{fileFormat}"
logging.info("Font \"{}\" was successfully installed at: {}".format(fontName, prefs.paths['fonts']))
with open(fileName, "wb") as fileObj:
fileObj.write(fileResp.content)
# Add font and return
return self.addFontFile(fileName)
def addFontFile(self, fontPath, monospaceOnly=False):
"""Add a Font File to the FontManger font search space. The
fontPath must be a valid path including the font file name.
Relative paths can be used, with the current working directory being
the origin.
If monospaceOnly is True, the font file will only be added if it is a
monospace font.
Adding a Font to the FontManager is not persistent across runs of
the script, so any extra font paths need to be added each time the
script starts.
"""
fi_list = set()
if os.path.isfile(fontPath) and os.path.exists(fontPath):
try:
face = ft.Face(str(fontPath))
except Exception:
logging.warning("Font Manager failed to load file {}"
.format(fontPath))
return
if face.family_name is None:
logging.warning("{} doesn't have valid font family name"
.format(fontPath))
return
if monospaceOnly:
if face.is_fixed_width:
fi_list.add(self._createFontInfo(fontPath, face))
else:
fi_list.add(self._createFontInfo(fontPath, face))
return fi_list
def addFontFiles(self, fontPaths, monospaceOnly=False):
""" Add a list of font files to the FontManger font search space.
Each element of the fontPaths list must be a valid path including
the font file name. Relative paths can be used, with the current
working directory being the origin.
If monospaceOnly is True, each font file will only be added if it is
a monospace font.
Adding fonts to the FontManager is not persistent across runs of
the script, so any extra font paths need to be added each time the
script starts.
"""
fi_list = []
for fp in fontPaths:
self.addFontFile(fp, monospaceOnly)
self.fontStyles.sort()
return fi_list
def addFontDirectory(self, fontDir, monospaceOnly=False, recursive=False):
"""
Add any font files found in fontDir to the FontManger font search
space. Each element of the fontPaths list must be a valid path
including the font file name. Relative paths can be used, with the
current working directory being the origin.
If monospaceOnly is True, each font file will only be added if it is
a monospace font (as only monospace fonts are currently supported by
TextBox).
Adding fonts to the FontManager is not persistent across runs of
the script, so any extra font paths need to be added each time the
script starts.
"""
fontPaths = findFontFiles([fontDir], recursive=recursive)
return self.addFontFiles(fontPaths)
# Class methods for FontManager below this comment should not need to be
# used by user scripts in most situations. Accessing them is okay.
def getFont(self, name, size=32, bold=False, italic=False, lineSpacing=1,
monospace=False):
"""
Return a FontAtlas object that matches the family name, style info,
and size provided. FontAtlas objects are cached, so if multiple
TextBox instances use the same font (with matching font properties)
then the existing FontAtlas is returned. Otherwise, a new FontAtlas is
created , added to the cache, and returned.
"""
fontInfos = self.getFontsMatching(name, bold, italic, fallback=False)
if not fontInfos:
# If font not found, try to retrieve it from Google
try:
self.addGoogleFont(name)
except (MissingFontError, ValueError):
pass
# Then try again with fallback
fontInfos = self.getFontsMatching(name, bold, italic, fallback=True)
if not fontInfos:
return False
# If font is found, make glfont
fontInfo = fontInfos[0]
identifier = "{}_{}_{}".format(str(fontInfo), size, lineSpacing)
glFont = self._glFonts.get(identifier)
if glFont is None:
glFont = GLFont(fontInfo.path, size, lineSpacing=lineSpacing)
self._glFonts[identifier] = glFont
return glFont
def updateFontInfo(self, monospaceOnly=False):
self._fontInfos.clear()
del self.fontStyles[:]
fonts_found = findFontFiles()
self.addFontFiles(fonts_found, monospaceOnly)
def booleansFromStyleName(self, style):
"""
For the given style name, return a
bool indicating if the font is bold, and a second indicating
if it is italics.
"""
italic = False
bold = False
s = style.lower().strip()
if type(s) == bytes:
s = s.decode('utf-8')
# Work out Italic
italic = False # Default false
if s.find('italic') >= 0 or s.find('oblique') >= 0:
italic = True
# Work out font weight
bold = _weightMap[False] # Default regular weight
for key in _weightMap:
if s.find(str(key)) >= 0:
bold = _weightMap[key]
return bold, italic
def _createFontInfo(self, fp, fface):
""""""
fns = (fface.family_name, fface.style_name)
if fns in self.fontStyles:
pass
else:
self.fontStyles.append(
(fface.family_name, fface.style_name))
styles_for_font_dict = FontManager._fontInfos.setdefault(
fface.family_name, {})
fonts_for_style = styles_for_font_dict.setdefault(fface.style_name, [])
fi = FontInfo(fp, fface)
fonts_for_style.append(fi)
return fi
def __del__(self):
self.font_store = None
if self._glFonts:
self._glFonts.clear()
self._glFonts = None
if self._fontInfos:
self._fontInfos.clear()
self._fontInfos = None
class FontInfo():
def __init__(self, fp, face):
self.path = fp
self.family = unicode(face.family_name)
self.style = unicode(face.style_name)
self.charmaps = [charmap.encoding_name for charmap in face.charmaps]
self.num_faces = face.num_faces
self.num_glyphs = face.num_glyphs
# self.size_info= [dict(width=s.width,height=s.height,
# x_ppem=s.x_ppem,y_ppem=s.y_ppem) for s in face.available_sizes]
self.units_per_em = face.units_per_EM
self.monospace = face.is_fixed_width
self.charmap_id = face.charmap.index
self.label = "%s_%s" % (face.family_name, face.style_name)
def __str__(self):
"""Generate a string identifier for this font name_style
"""
fullName = "{}".format(self.family)
if self.style:
fullName += "_" + self.style
return fullName
def asdict(self):
d = {}
for k, v in self.__dict__.items():
if k[0] != '_':
d[k] = v
return d
| 36,429
|
Python
|
.py
| 880
| 31.470455
| 117
| 0.579711
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,387
|
viewtools.py
|
psychopy_psychopy/psychopy/tools/viewtools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tools for working with view projections for 2- and 3-D rendering.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = ['Frustum',
'visualAngle',
'computeFrustum',
'computeFrustumFOV',
'projectFrustum',
'projectFrustumToPlane',
'generalizedPerspectiveProjection',
'orthoProjectionMatrix',
'perspectiveProjectionMatrix',
'lookAt',
'pointToNdc',
'cursorToRay',
'visible',
'visibleBBox']
import numpy as np
from collections import namedtuple
import psychopy.tools.mathtools as mt
DEG_TO_RAD = np.pi / 360.0
VEC_FWD_AND_UP = np.array(((0., 0., -1.), (0., 1., 0.)), dtype=np.float32)
def visualAngle(size, distance, degrees=True, out=None, dtype=None):
"""Get the visual angle for an object of `size` at `distance`. Object is
assumed to be fronto-parallel with the viewer.
This function supports vector inputs. Values for `size` and `distance` can
be arrays or single values. If both inputs are arrays, they must have the
same size.
Parameters
----------
size : float or array_like
Size of the object in meters.
distance : float or array_like
Distance to the object in meters.
degrees : bool
Return result in degrees, if `False` result will be in radians.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
float
Visual angle.
Examples
--------
Calculating the visual angle (vertical FOV) of a monitor screen::
monDist = 0.5 # monitor distance, 50cm
monHeight = 0.45 # monitor height, 45cm
vertFOV = visualAngle(monHeight, monDist)
Compute visual angle at multiple distances for objects with the same size::
va = visualAngle(0.20, [1.0, 2.0, 3.0]) # returns
# [11.42118627 5.72481045 3.81830487]
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
size, distance = np.atleast_1d(size, distance)
if out is not None:
out[:] = 2 * np.arctan(size / (2 * distance), dtype=dtype)
if degrees:
out[:] = np.degrees(out, dtype=dtype)
toReturn = out
else:
toReturn = 2 * np.arctan(size / (2 * distance), dtype=dtype)
if degrees:
toReturn[:] = np.degrees(toReturn, dtype=dtype)
return toReturn
# convenient named tuple for storing frustum parameters
Frustum = namedtuple(
'Frustum',
['left', 'right', 'bottom', 'top', 'nearVal', 'farVal'])
def computeFrustum(scrWidth,
scrAspect,
scrDist,
convergeOffset=0.0,
eyeOffset=0.0,
nearClip=0.01,
farClip=100.0,
dtype=None):
"""Calculate frustum parameters. If an eye offset is provided, an asymmetric
frustum is returned which can be used for stereoscopic rendering.
Parameters
----------
scrWidth : float
The display's width in meters.
scrAspect : float
Aspect ratio of the display (width / height).
scrDist : float
Distance to the screen from the view in meters. Measured from the center
of their eyes.
convergeOffset : float
Offset of the convergence plane from the screen. Objects falling on this
plane will have zero disparity. For best results, the convergence plane
should be set to the same distance as the screen (0.0 by default).
eyeOffset : float
Half the inter-ocular separation (i.e. the horizontal distance between
the nose and center of the pupil) in meters. If eyeOffset is 0.0, a
symmetric frustum is returned.
nearClip : float
Distance to the near clipping plane in meters from the viewer. Should be
at least less than `scrDist`.
farClip : float
Distance to the far clipping plane from the viewer in meters. Must be
>nearClip.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
Array of frustum parameters. Can be directly passed to
glFrustum (e.g. glFrustum(*f)).
Notes
-----
* The view point must be transformed for objects to appear correctly.
Offsets in the X-direction must be applied +/- eyeOffset to account for
inter-ocular separation. A transformation in the Z-direction must be
applied to account for screen distance. These offsets MUST be applied to
the GL_MODELVIEW matrix, not the GL_PROJECTION matrix! Doing so may break
lighting calculations.
Examples
--------
Creating a frustum and setting a window's projection matrix::
scrWidth = 0.5 # screen width in meters
scrAspect = win.size[0] / win.size[1]
scrDist = win.scrDistCM * 100.0 # monitor setting, can be anything
frustum = viewtools.computeFrustum(scrWidth, scrAspect, scrDist)
Accessing frustum parameters::
left, right, bottom, top, nearVal, farVal = frustum
# ... or ...
left = frustum.left
Off-axis frustums for stereo rendering::
# compute view matrix for each eye, these value usually don't change
eyeOffset = (-0.035, 0.035) # +/- IOD / 2.0
scrDist = 0.50 # 50cm
scrWidth = 0.53 # 53cm
scrAspect = 1.778
leftFrustum = viewtools.computeFrustum(
scrWidth, scrAspect, scrDist, eyeOffset[0])
rightFrustum = viewtools.computeFrustum(
scrWidth, scrAspect, scrDist, eyeOffset[1])
# make sure your view matrix accounts for the screen distance and eye
# offsets!
Using computed view frustums with a window::
win.projectionMatrix = viewtools.perspectiveProjectionMatrix(*frustum)
# generate a view matrix looking ahead with correct viewing distance,
# origin is at the center of the screen. Assumes eye is centered with
# the screen.
eyePos = [0.0, 0.0, scrDist]
screenPos = [0.0, 0.0, 0.0] # look at screen center
eyeUp = [0.0, 1.0, 0.0]
win.viewMatrix = viewtools.lookAt(eyePos, screenPos, eyeUp)
win.applyViewTransform() # call before drawing
"""
# mdc - uses display size instead of the horizontal FOV gluPerspective needs
d = scrWidth / 2.0
ratio = nearClip / float((convergeOffset + scrDist))
right = (d - eyeOffset) * ratio
left = (d + eyeOffset) * -ratio
top = d / float(scrAspect) * ratio
bottom = -top
return np.asarray((left, right, bottom, top, nearClip, farClip),
dtype=dtype)
def computeFrustumFOV(scrFOV,
scrAspect,
scrDist,
convergeOffset=0.0,
eyeOffset=0.0,
nearClip=0.01,
farClip=100.0,
dtype=None):
"""Compute a frustum for a given field-of-view (FOV).
Similar to `computeFrustum`, but computes a frustum based on FOV rather than
screen dimensions.
Parameters
----------
scrFOV : float
Vertical FOV in degrees (fovY).
scrAspect : float
Aspect between the horizontal and vertical FOV (ie. fovX / fovY).
scrDist : float
Distance to the screen from the view in meters. Measured from the center
of the viewer's eye(s).
convergeOffset : float
Offset of the convergence plane from the screen. Objects falling on this
plane will have zero disparity. For best results, the convergence plane
should be set to the same distance as the screen (0.0 by default).
eyeOffset : float
Half the inter-ocular separation (i.e. the horizontal distance between
the nose and center of the pupil) in meters. If eyeOffset is 0.0, a
symmetric frustum is returned.
nearClip : float
Distance to the near clipping plane in meters from the viewer. Should be
at least less than `scrDist`. Never should be 0.
farClip : float
Distance to the far clipping plane from the viewer in meters. Must be
>nearClip.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Examples
--------
Equivalent to `gluPerspective`::
frustum = computeFrustumFOV(45.0, 1.0, 0.5)
projectionMatrix = perspectiveProjectionMatrix(*frustum)
"""
d = np.tan(scrFOV * DEG_TO_RAD)
ratio = nearClip / float((convergeOffset + scrDist))
right = (d - eyeOffset) * ratio
left = (d + eyeOffset) * -ratio
top = d / float(scrAspect) * ratio
bottom = -top
return np.asarray((left, right, bottom, top, nearClip, farClip),
dtype=dtype)
def projectFrustum(frustum, dist, dtype=None):
"""Project a frustum on a fronto-parallel plane and get the width and height
of the required drawing area.
This function can be used to determine the size of the drawing area required
for a given frustum on a screen. This is useful for cases where the observer
is viewing the screen through a physical aperture that limits the FOV to a
sub-region of the display. You must convert the size in meters to units of
your screen and apply any offsets.
Parameters
----------
frustum : array_like
Frustum parameters (left, right, bottom, top, near, far), you can
exclude `far` since it is not used in this calculation. However, the
function will still succeed if given.
dist : float
Distance to project points to in meters.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
Width and height (w, h) of the area intersected by the given frustum at
`dist`.
Examples
--------
Compute the viewport required to draw in the area where the frustum
intersects the screen::
# needed information
scrWidthM = 0.52
scrDistM = 0.72
scrWidthPIX = 1920
scrHeightPIX = 1080
scrAspect = scrWidthPIX / float(scrHeightPIX)
pixPerMeter = scrWidthPIX / scrWidthM
# Compute a frustum for 20 degree vertical FOV at distance of the
# screen.
frustum = computeFrustumFOV(20., scrAspect, scrDistM)
# get the dimensions of the frustum
w, h = projectFrustum(frustum, scrDistM) * pixPerMeter
# get the origin of the viewport, relative to center of screen.
x = (scrWidthPIX - w) / 2.
y = (scrHeightPIX - h) / 2.
# if there is an eye offset ...
# x = (scrWidthPIX - w + eyeOffsetM * pixPerMeter) / 2.
# viewport rectangle
rect = np.asarray((x, y, w, h), dtype=int)
You can then set the viewport/scissor rectangle of the buffer to restrict
drawing to `rect`.
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
frustum = np.asarray(frustum, dtype=dtype)
l, r, t, b = np.abs(frustum[:4] * dist / frustum[4], dtype=dtype)
return np.array((l + r, t + b), dtype=dtype)
def projectFrustumToPlane(frustum, planeOrig, dtype=None):
"""Project a frustum on a fronto-parallel plane and get the coordinates of
the corners in physical space.
Parameters
----------
frustum : array_like
Frustum parameters (left, right, bottom, top, near, far), you can
exclude `far` since it is not used in this calculation. However, the
function will still succeed if given.
planeOrig : float
Distance of plane to project points on in meters.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
4x3 array of coordinates in the physical reference frame with origin
at the eye.
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
frustum = np.asarray(frustum, dtype=dtype)
l, r, t, b = frustum[:4] * planeOrig / frustum[4]
d = -planeOrig
return np.array(((l, t, d), (l, b, d), (r, b, d), (r, t, d)), dtype=dtype)
def generalizedPerspectiveProjection(posBottomLeft,
posBottomRight,
posTopLeft,
eyePos,
nearClip=0.01,
farClip=100.0,
dtype=None):
"""Generalized derivation of projection and view matrices based on the
physical configuration of the display system.
This implementation is based on Robert Kooima's 'Generalized Perspective
Projection' method [1]_.
Parameters
----------
posBottomLeft : list of float or ndarray
Bottom-left 3D coordinate of the screen in meters.
posBottomRight : list of float or ndarray
Bottom-right 3D coordinate of the screen in meters.
posTopLeft : list of float or ndarray
Top-left 3D coordinate of the screen in meters.
eyePos : list of float or ndarray
Coordinate of the eye in meters.
nearClip : float
Near clipping plane distance from viewer in meters.
farClip : float
Far clipping plane distance from viewer in meters.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
tuple
The 4x4 projection and view matrix.
See Also
--------
computeFrustum : Compute frustum parameters.
Notes
-----
* The resulting projection frustums are off-axis relative to the center of
the display.
* The returned matrices are row-major. Values are floats with 32-bits
of precision stored as a contiguous (C-order) array.
References
----------
.. [1] Kooima, R. (2009). Generalized perspective projection. J. Sch.
Electron. Eng. Comput. Sci.
Examples
--------
Computing a projection and view matrices for a window::
projMatrix, viewMatrix = viewtools.generalizedPerspectiveProjection(
posBottomLeft, posBottomRight, posTopLeft, eyePos)
# set the window matrices
win.projectionMatrix = projMatrix
win.viewMatrix = viewMatrix
# before rendering
win.applyEyeTransform()
Stereo-pair rendering example from Kooima (2009)::
# configuration of screen and eyes
posBottomLeft = [-1.5, -0.75, -18.0]
posBottomRight = [1.5, -0.75, -18.0]
posTopLeft = [-1.5, 0.75, -18.0]
posLeftEye = [-1.25, 0.0, 0.0]
posRightEye = [1.25, 0.0, 0.0]
# create projection and view matrices
leftProjMatrix, leftViewMatrix = generalizedPerspectiveProjection(
posBottomLeft, posBottomRight, posTopLeft, posLeftEye)
rightProjMatrix, rightViewMatrix = generalizedPerspectiveProjection(
posBottomLeft, posBottomRight, posTopLeft, posRightEye)
"""
# get data type of arrays
dtype = np.float64 if dtype is None else np.dtype(dtype).type
# convert everything to numpy arrays
posBottomLeft = np.asarray(posBottomLeft, dtype=dtype)
posBottomRight = np.asarray(posBottomRight, dtype=dtype)
posTopLeft = np.asarray(posTopLeft, dtype=dtype)
eyePos = np.asarray(eyePos, dtype=dtype)
# orthonormal basis of the screen plane
vr = posBottomRight - posBottomLeft
vr /= np.linalg.norm(vr)
vu = posTopLeft - posBottomLeft
vu /= np.linalg.norm(vu)
vn = np.cross(vr, vu)
vn /= np.linalg.norm(vn)
# screen corner vectors
va = posBottomLeft - eyePos
vb = posBottomRight - eyePos
vc = posTopLeft - eyePos
dist = -np.dot(va, vn)
nearOverDist = nearClip / dist
left = np.dot(vr, va) * nearOverDist
right = np.dot(vr, vb) * nearOverDist
bottom = np.dot(vu, va) * nearOverDist
top = np.dot(vu, vc) * nearOverDist
# projection matrix to return
projMat = perspectiveProjectionMatrix(
left, right, bottom, top, nearClip, farClip, dtype=dtype)
# view matrix to return, first compute the rotation component
rotMat = np.zeros((4, 4), dtype=dtype)
rotMat[0, :3] = vr
rotMat[1, :3] = vu
rotMat[2, :3] = vn
rotMat[3, 3] = 1.0
transMat = np.identity(4, dtype=dtype)
transMat[:3, 3] = -eyePos
return projMat, np.matmul(rotMat, transMat)
def orthoProjectionMatrix(left, right, bottom, top, nearClip=0.01, farClip=100.,
out=None, dtype=None):
"""Compute an orthographic projection matrix with provided frustum
parameters.
Parameters
----------
left : float
Left clipping plane coordinate.
right : float
Right clipping plane coordinate.
bottom : float
Bottom clipping plane coordinate.
top : float
Top clipping plane coordinate.
nearClip : float
Near clipping plane distance from viewer.
farClip : float
Far clipping plane distance from viewer.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
4x4 projection matrix
See Also
--------
perspectiveProjectionMatrix : Compute a perspective projection matrix.
Notes
-----
* The returned matrix is row-major. Values are floats with 32-bits of
precision stored as a contiguous (C-order) array.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
projMat = np.zeros((4, 4,), dtype=dtype) if out is None else out
if out is not None:
projMat.fill(0.0)
u = dtype(2.0)
projMat[0, 0] = u / (right - left)
projMat[1, 1] = u / (top - bottom)
projMat[2, 2] = -u / (farClip - nearClip)
projMat[0, 3] = -((right + left) / (right - left))
projMat[1, 3] = -((top + bottom) / (top - bottom))
projMat[2, 3] = -((farClip + nearClip) / (farClip - nearClip))
projMat[3, 3] = 1.0
return projMat
def perspectiveProjectionMatrix(left, right, bottom, top, nearClip=0.01,
farClip=100., out=None, dtype=None):
"""Compute an perspective projection matrix with provided frustum
parameters. The frustum can be asymmetric.
Parameters
----------
left : float
Left clipping plane coordinate.
right : float
Right clipping plane coordinate.
bottom : float
Bottom clipping plane coordinate.
top : float
Top clipping plane coordinate.
nearClip : float
Near clipping plane distance from viewer.
farClip : float
Far clipping plane distance from viewer.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
4x4 projection matrix
See Also
--------
orthoProjectionMatrix : Compute a orthographic projection matrix.
Notes
-----
* The returned matrix is row-major. Values are floats with 32-bits of
precision stored as a contiguous (C-order) array.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
projMat = np.zeros((4, 4,), dtype=dtype) if out is None else out
if out is not None:
projMat.fill(0.0)
u = dtype(2.0)
projMat[0, 0] = (u * nearClip) / (right - left)
projMat[1, 1] = (u * nearClip) / (top - bottom)
projMat[0, 2] = (right + left) / (right - left)
projMat[1, 2] = (top + bottom) / (top - bottom)
projMat[2, 2] = -(farClip + nearClip) / (farClip - nearClip)
projMat[3, 2] = -1.0
projMat[2, 3] = -(u * farClip * nearClip) / (farClip - nearClip)
return projMat
def lookAt(eyePos, centerPos, upVec=(0.0, 1.0, 0.0), out=None, dtype=None):
"""Create a transformation matrix to orient a view towards some point. Based
on the same algorithm as 'gluLookAt'. This does not generate a projection
matrix, but rather the matrix to transform the observer's view in the scene.
For more information see:
https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml
Parameters
----------
eyePos : list of float or ndarray
Eye position in the scene.
centerPos : list of float or ndarray
Position of the object center in the scene.
upVec : list of float or ndarray, optional
Vector defining the up vector. Default is +Y is up.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
4x4 view matrix
Notes
-----
* The returned matrix is row-major. Values are floats with 32-bits of
precision stored as a contiguous (C-order) array.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
toReturn = np.zeros((4, 4,), dtype=dtype) if out is None else out
if out is not None:
toReturn.fill(0.0)
eyePos = np.asarray(eyePos, dtype=dtype)
centerPos = np.asarray(centerPos, dtype=dtype)
upVec = np.asarray(upVec, dtype=dtype)
f = centerPos - eyePos
f /= np.linalg.norm(f)
upVec /= np.linalg.norm(upVec)
s = np.cross(f, upVec)
u = np.cross(s / np.linalg.norm(s), f)
rotMat = np.zeros((4, 4), dtype=dtype)
rotMat[0, :3] = s
rotMat[1, :3] = u
rotMat[2, :3] = -f
rotMat[3, 3] = 1.0
transMat = np.identity(4, dtype=dtype)
transMat[:3, 3] = -eyePos
return np.matmul(rotMat, transMat, out=toReturn)
def viewMatrix(pos, ori=(0., 0., 0., -1.), out=None, dtype=None):
"""Get a view matrix from a pose.
A pose consists of a position coordinate [X, Y, Z, 1] and orientation
quaternion [X, Y, Z, W]. Assumes that the identity pose has a forward vector
pointing along the -Z axis and up vector along the +Y axis. The quaternion
for `ori` must be normalized.
Parameters
----------
pos : ndarray, tuple, or list of float
Position vector [x, y, z].
ori : tuple, list or ndarray of float
Orientation quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
# convert if needed
pos = np.asarray(pos, dtype=dtype)
ori = np.asarray(ori, dtype=dtype)
axes = np.asarray(VEC_FWD_AND_UP, dtype=dtype) # convert to type
toReturn = np.zeros((4, 4), dtype=dtype) if out is None else out
# generate rotation matrix
b, c, d, a = ori[:]
vsqr = np.square(ori)
R = np.zeros((3, 3,), dtype=dtype)
u = dtype(2.0)
R[0, 0] = vsqr[3] + vsqr[0] - vsqr[1] - vsqr[2]
R[1, 0] = u * (b * c + a * d)
R[2, 0] = u * (b * d - a * c)
R[0, 1] = u * (b * c - a * d)
R[1, 1] = vsqr[3] - vsqr[0] + vsqr[1] - vsqr[2]
R[2, 1] = u * (c * d + a * b)
R[0, 2] = u * (b * d + a * c)
R[1, 2] = u * (c * d - a * b)
R[2, 2] = vsqr[3] - vsqr[0] - vsqr[1] + vsqr[2]
# transform the axes
transformedAxes = axes.dot(R.T)
fwdVec = transformedAxes[0, :] + pos
upVec = transformedAxes[1, :]
toReturn[:, :] = lookAt(pos, fwdVec, upVec, dtype=dtype)
return toReturn
def pointToNdc(wcsPos, viewMatrix, projectionMatrix, out=None, dtype=None):
"""Map the position of a point in world space to normalized device
coordinates/space.
Parameters
----------
wcsPos : tuple, list or ndarray
Nx3 position vector(s) (xyz) in world space coordinates.
viewMatrix : ndarray
4x4 view matrix.
projectionMatrix : ndarray
4x4 projection matrix.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
3x1 vector of normalized device coordinates with type 'float32'
Notes
-----
* The point is not visible, falling outside of the viewing frustum, if the
returned coordinates fall outside of -1 and 1 along any dimension.
* In the rare instance the point falls directly on the eye in world
space where the frustum converges to a point (singularity), the divisor
will be zero during perspective division. To avoid this, the divisor is
'bumped' to 1e-5.
* This function assumes the display area is rectilinear. Any distortion or
warping applied in normalized device or viewport space is not considered.
Examples
--------
Determine if a point is visible::
point = (0.0, 0.0, 10.0) # behind the observer
ndc = pointToNdc(point, win.viewMatrix, win.projectionMatrix)
isVisible = not np.any((ndc > 1.0) | (ndc < -1.0))
Convert NDC to viewport (or pixel) coordinates::
scrRes = (1920, 1200)
point = (0.0, 0.0, -5.0) # forward -5.0 from eye
x, y, z = pointToNdc(point, win.viewMatrix, win.projectionMatrix)
pixelX = ((x + 1.0) / 2.0) * scrRes[0])
pixelY = ((y + 1.0) / 2.0) * scrRes[1])
# object at point will appear at (pixelX, pixelY)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
wcsPos = np.asarray(wcsPos, dtype=dtype) # convert to array
toReturn = np.zeros_like(wcsPos, dtype=dtype) if out is None else out
# forward transform from world to clipping space
viewProjMatrix = np.zeros((4, 4,), dtype=dtype)
np.matmul(projectionMatrix, viewMatrix, viewProjMatrix)
pnts, rtn = np.atleast_2d(wcsPos, toReturn)
# convert to 4-vector with W=1.0
wcsVec = np.zeros((pnts.shape[0], 4), dtype=dtype)
wcsVec[:, :3] = wcsPos
wcsVec[:, 3] = 1.0
# convert to homogeneous clip space
wcsVec = mt.applyMatrix(viewProjMatrix, wcsVec, dtype=dtype)
# handle the singularity where perspective division will fail
wcsVec[np.abs(wcsVec[:, 3]) <= np.finfo(dtype).eps] = np.finfo(dtype).eps
rtn[:, :] = wcsVec[:, :3] / wcsVec[:, 3:] # xyz / w
return toReturn
def cursorToRay(cursorX, cursorY, winSize, viewport, projectionMatrix,
normalize=True, out=None, dtype=None):
"""Convert a 2D mouse coordinate to a 3D ray.
Takes a 2D window/mouse coordinate and transforms it to a 3D direction
vector from the viewpoint in eye space (vector origin is [0, 0, 0]). The
center of the screen projects to vector [0, 0, -1].
Parameters
----------
cursorX, cursorY : float or int
Window coordinates. These need to be scaled if you are using a
framebuffer that does not have 1:1 pixel mapping (i.e. retina display).
winSize : array_like
Size of the window client area [w, h].
viewport : array_like
Viewport rectangle [x, y, w, h] being used.
projectionMatrix : ndarray
4x4 projection matrix being used.
normalize : bool
Normalize the resulting vector.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
Direction vector (x, y, z).
Examples
--------
Place a 3D stim at the mouse location 5.0 scene units (meters) away::
# define camera
camera = RigidBodyPose((-3.0, 5.0, 3.5))
camera.alignTo((0, 0, 0))
# in the render loop
dist = 5.0
mouseRay = vt.cursorToRay(x, y, win.size, win.viewport, win.projectionMatrix)
mouseRay *= dist # scale the vector
# set the sphere position by transforming vector to world space
sphere.thePose.pos = camera.transform(mouseRay)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
toReturn = np.zeros((3,), dtype=dtype) if out is None else out
projectionMatrix = np.asarray(projectionMatrix, dtype=dtype)
# compute the inverse model/view and projection matrix
invPM = np.linalg.inv(projectionMatrix)
# transform psychopy mouse coordinates to viewport coordinates
cursorX = cursorX + (winSize[0] / 2.0)
cursorY = cursorY + (winSize[1] / 2.0)
# get the NDC coordinates of the
projX = 2. * (cursorX - viewport[0]) / viewport[2] - 1.0
projY = 2. * (cursorY - viewport[1]) / viewport[3] - 1.0
vecNear = np.array((projX, projY, 0.0, 1.0), dtype=dtype)
vecFar = np.array((projX, projY, 1.0, 1.0), dtype=dtype)
vecNear[:] = vecNear.dot(invPM.T)
vecFar[:] = vecFar.dot(invPM.T)
vecNear /= vecNear[3]
vecFar /= vecFar[3]
# direction vector
toReturn[:] = (vecFar - vecNear)[:3]
if normalize:
mt.normalize(toReturn, out=toReturn)
return toReturn
def visibleBBox(extents, mvp, dtype=None):
"""Check if a bounding box is visible.
This function checks if a bonding box intersects a frustum defined by the
current projection matrix, after being transformed by the model-view matrix.
Parameters
----------
extents : array_like
Bounding box minimum and maximum extents as a 2x3 array. The first row
if the minimum extents along each axis, and the second row the maximum
extents (eg. [[minX, minY, minZ], [maxX, maxY, maxZ]]).
mvp : array_like
4x4 MVP matrix.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray or bool
Visibility test results.
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
# convert input if needed
extents = np.asarray(extents, dtype=dtype)
if not extents.shape == (2, 3):
raise ValueError("Invalid array dimensions for `extents`.")
# ensure matrix is array
mvp = np.asarray(mvp, dtype=dtype)
# convert BBox to corners
corners = mt.computeBBoxCorners(extents, dtype=dtype)
# apply the matrix
corners = corners.dot(mvp.T)
# break up into components
x, y, z = corners[:, 0], corners[:, 1], corners[:, 2]
wpos, wneg = corners[:, 3], -corners[:, 3]
# test if box falls all to one side of the frustum
if np.logical_xor(np.all(x <= wneg), np.all(x >= wpos)): # x-axis
return False
elif np.logical_xor(np.all(y <= wneg), np.all(y >= wpos)): # y-axis
return False
elif np.logical_xor(np.all(z <= wneg), np.all(z >= wpos)): # z-axis
return False
else:
return True
def visible(points, mvp, mode='discrete', dtype=None):
"""Test if points are visible.
This function is useful for visibility culling, where objects are only drawn
if a portion of them are visible. This test can avoid costly drawing calls
and OpenGL state changes if the object is not visible.
Parameters
----------
points : array_like
Point(s) or bounding box to test. Input array must be Nx3 or Nx4, where
each row is a point. It is recommended that the input be Nx4 since the
`w` component will be appended if the input is Nx3 which adds overhead.
mvp : array_like
4x4 MVP matrix.
mode : str
Test mode. If `'discrete'`, rows of `points` are treated as individual
points. This function will return an array of boolean values with length
equal to the number of rows in `points`, where the value at each index
corresponds to the visibility test results for points at the matching
row index of `points`. If `'group'` a single boolean value is returned,
which is `False` if all points fall to one side of the frustum.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
bool or ndarray
Test results. The type returned depends on `mode`.
Examples
--------
Visibility culling, only a draw line connecting two points if visible::
linePoints = [[-1.0, -1.0, -1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0]]
mvp = np.matmul(win.projectionMatrix, win.viewMatrix)
if visible(linePoints, mvp, mode='group'):
# drawing commands here ...
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
# convert input if needed
points = np.asarray(points, dtype=dtype)
# keep track of dimension, return only a single value if ndim==1
ndim = points.ndim
# ensure matrix is array
mvp = np.asarray(mvp, dtype=dtype)
# convert to 2d view
points = np.atleast_2d(np.asarray(points, dtype=dtype))
if points.shape[1] == 3: # make sure we are using Nx4
temp = np.zeros((points.shape[0], 4), dtype=dtype)
temp[:, :3] = points
temp[:, 3] = 1.0
points = temp
# apply the matrix
points = points.dot(mvp.T)
# break up into components
x, y, z = points[:, 0], points[:, 1], points[:, 2]
wpos, wneg = points[:, 3], -points[:, 3]
# test using the appropriate mode
if mode == 'discrete':
toReturn = np.logical_and.reduce(
(x > wneg, x < wpos, y > wneg, y < wpos, z > wneg, z < wpos))
return toReturn[0] if ndim == 1 else toReturn
elif mode == 'group':
# Check conditions for each axis. If all points fall to one side or
# another, the bounding box is not visible. If all points fall outside
# of both sides of the frustum along the same axis, that means the box
# passes through the frustum or the viewer is inside the bounding box
# and therefore is visible. We do an XOR to capture conditions where all
# points fall all to one side only. Lastly, if any point is in the
# bounding box, it will indicate that it's visible.
#
# mdc - This has been vectorized to be super fast, however maybe someone
# smarter than me can figure out something better.
#
if np.logical_xor(np.all(x <= wneg), np.all(x >= wpos)): # x-axis
return False
elif np.logical_xor(np.all(y <= wneg), np.all(y >= wpos)): # y-axis
return False
elif np.logical_xor(np.all(z <= wneg), np.all(z >= wpos)): # z-axis
return False
else:
return True
else:
raise ValueError(
"Invalid `mode` specified, should be either 'discrete' or 'group'.")
| 38,001
|
Python
|
.py
| 882
| 35.764172
| 85
| 0.639922
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,388
|
animationtools.py
|
psychopy_psychopy/psychopy/tools/animationtools.py
|
import numpy as np
def sinusoidalGrowth(startSize, apexSize, duration, time):
"""
Grow or shrink a stimulus over time by a sinusoidal function.
startSize : layout.Size
Size of the stimulus at the start of the animation
stopSize : layout.Size
Size of the stimulus at the apex of the animation
duration : int, float
How long (in seconds) should the animation take to go from its start to its apex?
time : float
How long (in seconds) has passed since the animation started?
"""
# Convert sizes to numpy arrays
if isinstance(startSize, (list, tuple)):
startSize = np.array(startSize)
if isinstance(apexSize, (list, tuple)):
apexSize = np.array(apexSize)
# Get total size change
delta = apexSize - startSize
# Adjust time according to duration
time = time / duration % 2
# Get proportion of delta to adjust by
time = time - 0.5
adj = np.sin(time * np.pi)
adj = (adj + 1) / 2
# Adjust
return startSize + (delta * adj)
def sinusoidalMovement(startPos, apexPos, duration, time):
"""
Move a stimulus over time by a sinusoidal function.
startPos : layout.Position
Position of the stimulus at the start of the animation
apexPos : layout.Position
Position of the stimulus at the apex of the animation
duration : int, float
How long (in seconds) should the animation take to go from its start to its apex?
time : float
How long (in seconds) has passed since the animation started?
"""
# Position and Size are both Vectors, so growth and movement are the same calculation
return sinusoidalGrowth(startPos, apexPos, duration, time)
| 1,720
|
Python
|
.py
| 42
| 35.119048
| 89
| 0.691801
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,389
|
stereotools.py
|
psychopy_psychopy/psychopy/tools/stereotools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tools for stereoscopy.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
| 260
|
Python
|
.py
| 7
| 35.857143
| 79
| 0.729084
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,390
|
systemtools.py
|
psychopy_psychopy/psychopy/tools/systemtools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tools for interacting with the operating system and getting information about
# the system.
#
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'CAMERA_API_AVFOUNDATION',
'CAMERA_API_DIRECTSHOW',
'CAMERA_API_UNKNOWN',
'CAMERA_API_NULL',
'CAMERA_LIB_FFPYPLAYER',
'CAMERA_LIB_UNKNOWN',
'CAMERA_LIB_NULL',
'CAMERA_UNKNOWN_VALUE',
'CAMERA_NULL_VALUE',
'AUDIO_LIBRARY_PTB',
'getCameras',
'getAudioDevices',
'getAudioCaptureDevices',
'getAudioPlaybackDevices',
'getKeyboards',
'getSerialPorts',
'systemProfilerMacOS',
'getInstalledDevices',
'isPsychopyInFocus'
]
# Keep imports to a minimum here! We don't want to import the whole stack to
# simply populate a drop-down list. Try to keep platform-specific imports inside
# the functions, not on the top-level scope for this module.
import platform
import subprocess
# if platform.system() == 'Windows':
# # this has to be imported here before anything else
# import winrt.windows.devices.enumeration as windows_devices
import sys
import os
import glob
import subprocess as sp
import json
from psychopy.preferences import prefs
from psychopy import logging
# ------------------------------------------------------------------------------
# Constants
#
CAMERA_API_AVFOUNDATION = u'AVFoundation' # mac
CAMERA_API_DIRECTSHOW = u'DirectShow' # windows
# CAMERA_API_VIDEO4LINUX = u'Video4Linux' # linux
# CAMERA_API_OPENCV = u'OpenCV' # opencv, cross-platform API
CAMERA_API_UNKNOWN = u'Unknown' # unknown API
CAMERA_API_NULL = u'Null' # empty field
CAMERA_LIB_FFPYPLAYER = u'FFPyPlayer'
CAMERA_LIB_UNKNOWN = u'Unknown'
CAMERA_LIB_NULL = u'Null'
CAMERA_UNKNOWN_VALUE = u'Unknown' # fields where we couldn't get a value
CAMERA_NULL_VALUE = u'Null' # fields where we couldn't get a value
# audio library identifiers
AUDIO_LIBRARY_PTB = 'ptb' # PsychPortAudio from Psychtoolbox
SERIAL_MAX_ENUM_PORTS = 32 # can be as high as 256 on Win32, not used on Unix
# ------------------------------------------------------------------------------
# Detect VMs (for GitHub Actions, Travis...)
#
def isVM_CI():
"""Attempts to detect TravisCI or GitHub actions virtual machines os.env
Returns the type of VM ('travis', 'github', 'conda') being run or None
"""
import os
if (str(os.environ.get('GITHUB_WORKFLOW')) != 'None'):
return 'github'
elif ("{}".format(os.environ.get('TRAVIS')).lower() == 'true'):
return 'travis'
elif ("{}".format(os.environ.get('CONDA')).lower() == 'true'):
return 'conda'
# ------------------------------------------------------------------------------
# Audio playback and capture devices
#
def getAudioDevices():
"""Get all audio devices.
This function gets all audio devices attached to the system, either playback
or capture. Uses the `psychtoolbox` library to obtain the relevant
information.
This command is supported on Windows, MacOSX and Linux. On Windows, WASAPI
devices are preferred to achieve precise timing and will be returned by
default. To get all audio devices (including non-WASAPI ones), set the
preference `audioForceWASAPI` to `False`.
Returns
-------
dict
Dictionary where the keys are devices names and values are mappings
whose fields contain information about the device.
Examples
--------
Get audio devices installed on this system::
allDevs = getAudioDevices()
The following dictionary is returned by the above command when called on an
Apple MacBook Pro (2022)::
{
'MacBook Pro Microphone': { # audio capture device
'index': 0,
'name': 'MacBook Pro Microphone',
'hostAPI': 'Core Audio',
'outputChannels': 0,
'outputLatency': (0.01, 0.1),
'inputChannels': 1,
'inputLatency': (0.0326984126984127, 0.04285714285714286),
'defaultSampleRate': 44100.0,
'audioLib': 'ptb'
},
'MacBook Pro Speakers': { # audio playback device
'index': 1,
'name': 'MacBook Pro Speakers',
'hostAPI': 'Core Audio',
'outputChannels': 2,
'outputLatency': (0.008480725623582767, 0.018639455782312925),
'inputChannels': 0,
'inputLatency': (0.01, 0.1),
'defaultSampleRate': 44100.0,
'audioLib': 'ptb'
}
}
To determine whether something is a playback or capture device, check the
number of output and input channels, respectively::
# determine if a device is for audio capture
isCapture = allDevs['MacBook Pro Microphone']['inputChannels'] > 0
# determine if a device is for audio playback
isPlayback = allDevs['MacBook Pro Microphone']['outputChannels'] > 0
You may also call :func:`getAudioCaptureDevices` and
:func:`getAudioPlaybackDevices` to get just audio capture and playback
devices.
"""
# use the PTB backend for audio
import psychtoolbox.audio as audio
try:
enforceWASAPI = bool(prefs.hardware["audioForceWASAPI"])
except KeyError:
enforceWASAPI = True # use default if option not present in settings
# query PTB for devices
try:
if enforceWASAPI and sys.platform == 'win32':
allDevs = audio.get_devices(device_type=13)
else:
allDevs = audio.get_devices()
except Exception as err:
# if device detection fails, log warning rather than raising error
logging.warning(str(err))
allDevs = []
# make sure we have an array of descriptors
allDevs = [allDevs] if isinstance(allDevs, dict) else allDevs
# format the PTB dictionary to PsychoPy standards
toReturn = {}
for dev in allDevs:
thisAudioDev = {
'index': int(dev['DeviceIndex']),
'name': dev['DeviceName'],
'hostAPI': dev['HostAudioAPIName'],
'outputChannels': int(dev['NrOutputChannels']),
'outputLatency': (
dev['LowOutputLatency'], dev['HighOutputLatency']),
'inputChannels': int(dev['NrInputChannels']),
'inputLatency': (
dev['LowInputLatency'], dev['HighInputLatency']),
'defaultSampleRate': dev['DefaultSampleRate'],
'audioLib': AUDIO_LIBRARY_PTB
}
toReturn[thisAudioDev['name']] = thisAudioDev
return toReturn
def getAudioCaptureDevices():
"""Get audio capture devices (i.e. microphones) installed on the system.
This command is supported on Windows, MacOSX and Linux. On Windows, WASAPI
devices are preferred to achieve precise timing and will be returned by
default. To get all audio capture devices (including non-WASAPI ones), set
the preference `audioForceWASAPI` to `False`.
Uses the `psychtoolbox` library to obtain the relevant information.
Returns
-------
dict
Dictionary where the keys are devices names and values are mappings
whose fields contain information about the capture device. See
:func:`getAudioDevices()` examples to see the format of the output.
"""
allDevices = getAudioDevices() # gat all devices
inputDevices = [] # dict for input devices
if not allDevices:
return inputDevices # empty
# filter for capture devices
for name, devInfo in allDevices.items():
devInfo['device_name'] = name
if devInfo['inputChannels'] < 1:
continue
inputDevices.append(devInfo) # is capture device
return inputDevices
def getAudioPlaybackDevices():
"""Get audio playback devices (i.e. speakers) installed on the system.
This command is supported on Windows, MacOSX and Linux. On Windows, WASAPI
devices are preferred to achieve precise timing and will be returned by
default. To get all audio playback devices (including non-WASAPI ones), set
the preference `audioForceWASAPI` to `False`.
Uses the `psychtoolbox` library to obtain the relevant information.
Returns
-------
dict
Dictionary where the keys are devices names and values are mappings
whose fields contain information about the playback device. See
:func:`getAudioDevices()` examples to see the format of the output.
"""
allDevices = getAudioDevices() # gat all devices
outputDevices = {} # dict for output devices
if not allDevices:
return outputDevices # empty
# filter for playback devices
for name, devInfo in allDevices.items():
if devInfo['outputChannels'] < 1:
continue
outputDevices[name] = devInfo # is playback device
return outputDevices
# ------------------------------------------------------------------------------
# Cameras
#
def _getCameraInfoMacOS():
"""Get a list of capabilities for the specified associated with a camera
attached to the system.
This is used by `getCameraInfo()` for querying camera details on *MacOS*.
Don't call this function directly unless testing. Requires `AVFoundation`
and `CoreMedia` libraries.
Returns
-------
list of CameraInfo
List of camera descriptors.
"""
if platform.system() != 'Darwin':
raise OSError(
"Cannot query cameras with this function, platform not 'Darwin'.")
# import objc # may be needed in the future for more advanced stuff
import AVFoundation as avf # only works on MacOS
import CoreMedia as cm
# get a list of capture devices
allDevices = avf.AVCaptureDevice.devices()
# get video devices
videoDevices = {}
devIdx = 0
for device in allDevices:
devFormats = device.formats()
if devFormats[0].mediaType() != 'vide': # not a video device
continue
# camera details
cameraName = device.localizedName()
# found video formats
supportedFormats = []
for _format in devFormats:
# get the format description object
formatDesc = _format.formatDescription()
# get dimensions in pixels of the video format
dimensions = cm.CMVideoFormatDescriptionGetDimensions(formatDesc)
frameHeight = dimensions.height
frameWidth = dimensions.width
# Extract the codec in use, pretty useless since FFMPEG uses its
# own conventions, we'll need to map these ourselves to those
# values
codecType = cm.CMFormatDescriptionGetMediaSubType(formatDesc)
# Convert codec code to a FourCC code using the following byte
# operations.
#
# fourCC = ((codecCode >> 24) & 0xff,
# (codecCode >> 16) & 0xff,
# (codecCode >> 8) & 0xff,
# codecCode & 0xff)
#
pixelFormat4CC = ''.join(
[chr((codecType >> bits) & 0xff) for bits in (24, 16, 8, 0)])
# Get the range of supported framerate, use the largest since the
# ranges are rarely variable within a format.
frameRateRange = _format.videoSupportedFrameRateRanges()[0]
frameRateMax = frameRateRange.maxFrameRate()
# frameRateMin = frameRateRange.minFrameRate() # don't use for now
# Create a new camera descriptor
thisCamInfo = {
'index': devIdx,
'name': cameraName,
'pixelFormat': pixelFormat4CC,
'codecFormat': CAMERA_NULL_VALUE,
'frameSize': (int(frameWidth), int(frameHeight)),
'frameRate': frameRateMax,
'cameraAPI': CAMERA_API_AVFOUNDATION
}
supportedFormats.append(thisCamInfo)
devIdx += 1
# add to output dictionary
videoDevices[cameraName] = supportedFormats
return videoDevices
# def _getCameraInfoWindowsWinRT():
# """Get a list of capabilities for the specified associated with a camera
# attached to the system.
#
# This is used by `getCameraInfo()` for querying camera details on Windows.
# Don't call this function directly unless testing. Requires `ffpyplayer`
# to use this function.
#
# Returns
# -------
# list of CameraInfo
# List of camera descriptors.
#
# """
# if platform.system() != 'Windows':
# raise OSError(
# "Cannot query cameras with this function, platform not 'Windows'.")
#
# import asyncio
#
# async def findCameras():
# """Get all video camera devices."""
# videoDeviceClass = 4 # for video capture devices
# return await windows_devices.DeviceInformation.find_all_async(
# videoDeviceClass)
#
# # interrogate the OS using WinRT to acquire camera data
# foundCameras = asyncio.run(findCameras())
#
# # get all the supported modes for the camera
# videoDevices = {}
#
# # iterate over cameras
# for idx in range(foundCameras.size):
# try:
# cameraData = foundCameras.get_at(idx)
# except RuntimeError:
# continue
#
# # get required fields
# cameraName = cameraData.name
#
# videoDevices[cameraName] = {
# 'index': idx,
# 'name': cameraName
# }
#
# return videoDevices
def _getCameraInfoWindows():
"""Get a list of capabilities for the specified associated with a camera
attached to the system.
This is used by `getCameraInfo()` for querying camera details on Windows.
Don't call this function directly unless testing. Requires `ffpyplayer`
to use this function.
Returns
-------
list of CameraInfo
List of camera descriptors.
"""
if platform.system() != 'Windows':
raise OSError(
"Cannot query cameras with this function, platform not 'Windows'.")
# import this to get camera details
# NB - In the future, we should consider using WinRT to query this info
# to avoid the ffpyplayer dependency.
from ffpyplayer.tools import list_dshow_devices
# FFPyPlayer can query the OS via DirectShow for Windows cameras
videoDevs, _, names = list_dshow_devices()
# get all the supported modes for the camera
videoDevices = []
# iterate over names
devIndex = 0
for devURI in videoDevs.keys():
supportedFormats = []
cameraName = names[devURI]
for _format in videoDevs[devURI]:
pixelFormat, codecFormat, frameSize, frameRateRng = _format
_, frameRateMax = frameRateRng
thisCamInfo = {
'device_name': cameraName,
'index': devIndex,
'pixelFormat': pixelFormat,
'codecFormat': codecFormat,
'frameSize': frameSize,
'frameRate': frameRateMax,
'cameraAPI': CAMERA_API_DIRECTSHOW
}
supportedFormats.append(thisCamInfo)
devIndex += 1
videoDevices.append(supportedFormats)
return videoDevices
# array of registered PIDs which PsychoPy considers to be safe
_pids = [
os.getpid(),
os.getppid()
]
def registerPID(pid):
"""
Register a given window with PsychoPy, marking it as safe to e.g. perform keylogging in.
Parameters
----------
pid : int
Process ID (PID) of the window to register
"""
global _pids
# add to list of registered IDs
if pid not in _pids:
_pids.append(pid)
def getCurrentPID():
"""
Get the PID of the window which currently has focus.
"""
if sys.platform == "win32":
import win32gui
import win32process
# get ID of top window
winID = win32gui.GetForegroundWindow()
# get parent PID (in case it's a child of a registered process)
winID = win32process.GetWindowThreadProcessId(winID)[-1]
elif sys.platform == "darwin":
from AppKit import NSWorkspace
import psutil
# get active application info
win = NSWorkspace.sharedWorkspace().frontmostApplication()
# get ID of active application
winID = win.processIdentifier()
# get parent PID (in case it's a child of a registered process)
winID = psutil.Process(winID).ppid()
elif sys.platform == "linux":
# get window ID
proc = subprocess.Popen(
['xprop', '-root', '_NET_ACTIVE_WINDOW'],
stdout=subprocess.PIPE
)
stdout, _ = proc.communicate()
winID = str(stdout).split("#")[-1].strip()
else:
raise OSError(
f"Cannot get window PID on system '{sys.platform}'."
)
return winID
def isRegisteredApp():
"""
Query whether the PID of the currently focused window is recognised by PsychoPy, i.e. whether
it is safe to perform keylogging in.
The PsychoPy process is marked as safe by default, any others need to be added as safe via
`registerPID` with the window's PID.
Returns
-------
bool
True if a PsychoPy window is in focus, False otherwise.
"""
# is the current PID in the _pids array?
return getCurrentPID() in _pids
# Mapping for platform specific camera getter functions used by `getCameras`.
# We're doing this to allow for plugins to add support for cameras on other
# platforms.
_cameraGetterFuncTbl = {
'Darwin': _getCameraInfoMacOS,
'Windows': _getCameraInfoWindows
}
def getCameras():
"""Get information about installed cameras and their formats on this system.
The command presently only works on Window and MacOSX. Linux support for
cameras is not available yet.
Returns
-------
dict
Mapping where camera names (`str`) are keys and values are and array of
`CameraInfo` objects.
"""
systemName = platform.system() # get the system name
# lookup the function for the given platform
getCamerasFunc = _cameraGetterFuncTbl.get(systemName, None)
if getCamerasFunc is None: # if unsupported
raise OSError(
"Cannot get cameras, unsupported platform '{}'.".format(
systemName))
return getCamerasFunc()
# ------------------------------------------------------------------------------
# Keyboards
#
def getKeyboards():
"""Get information about attached keyboards.
This command works on Windows, MacOSX and Linux.
Returns
-------
dict
Dictionary where the keys are device names and values are mappings
whose fields contain information about that device. See the *Examples*
section for field names.
Notes
-----
* Keyboard names are generated (taking the form of "Generic Keyboard n") if
the OS does not report the name.
Examples
--------
Get keyboards attached to this system::
installedKeyboards = getKeyboards()
Running the previous command on an Apple MacBook Pro (2022) returns the
following dictionary::
{
'TouchBarUserDevice': {
'usagePageValue': 1,
'usageValue': 6,
'usageName': 'Keyboard',
'index': 4,
'transport': '',
'vendorID': 1452,
'productID': 34304,
'version': 0.0,
'manufacturer': '',
'product': 'TouchBarUserDevice',
'serialNumber': '',
'locationID': 0,
'interfaceID': -1,
'totalElements': 1046,
'features': 0,
'inputs': 1046,
'outputs': 0,
'collections': 1,
'axes': 0,
'buttons': 0,
'hats': 0,
'sliders': 0,
'dials': 0,
'wheels': 0,
'touchDeviceType': -1,
'maxTouchpoints': -1},
'Generic Keyboard 0': {
'usagePageValue': 1,
'usageValue': 6,
'usageName': 'Keyboard',
'index': 13,
# snip ...
'dials': 0,
'wheels': 0,
'touchDeviceType': -1,
'maxTouchpoints': -1
}
}
"""
# use PTB to query keyboards, might want to also use IOHub at some point
from psychtoolbox import hid
# use PTB to query for keyboards
indices, names, keyboards = hid.get_keyboard_indices()
toReturn = []
if not indices:
return toReturn # just return if no keyboards found
# ensure these are all the same length
assert len(indices) == len(names) == len(keyboards), \
"Got inconsistent array length from `get_keyboard_indices()`"
missingNameIdx = 0 # for keyboard with empty names
for i, kbIdx in enumerate(indices):
name = names[i]
if not name:
name = ' '.join(('Generic Keyboard', str(missingNameIdx)))
missingNameIdx += 1
keyboard = keyboards[i]
keyboard['device_name'] = name
# reformat values since PTB returns everything as a float
for key, val in keyboard.items():
if isinstance(val, float) and key not in ('version',):
keyboard[key] = int(val)
toReturn.append(keyboard)
return toReturn
# ------------------------------------------------------------------------------
# Connectivity
#
def getSerialPorts():
"""Get serial ports attached to this system.
Serial ports are used for inter-device communication using the RS-232/432
protocol. This function gets a list of available ports and their default
configurations as specified by the OS. Ports that are in use by another
process are not returned.
This command is supported on Windows, MacOSX and Linux. On Windows, all
available ports are returned regardless if anything is connected to them,
so long as they aren't in use. On Unix(-likes) such as MacOSX and Linux,
port are only returned if there is a device attached and is not being
accessed by some other process. MacOSX and Linux also have no guarantee port
names are persistent, where a physical port may not always be assigned the
same name or enum index when a device is connected or after a system
reboot.
Returns
-------
dict
Mapping (`dict`) where keys are serial port names (`str`) and values
are mappings of the default settings of the port (`dict`). See
*Examples* below for the format of the returned data.
Examples
--------
Getting available serial ports::
allPorts = getSerialPorts()
On a MacBook Pro (2022) with an Arduino Mega (2560) connected to the USB-C
port, the following dictionary is returned::
{
'/dev/cu.Bluetooth-Incoming-Port': {
'index': 0,
'port': '/dev/cu.Bluetooth-Incoming-Port',
'baudrate': 9600,
'bytesize': 8,
'parity': 'N',
'stopbits': 1,
'xonxoff': False,
'rtscts': False,
'dsrdtr': False
},
'/dev/cu.usbmodem11101': {
'index': 1,
# ... snip ...
'dsrdtr': False
},
'/dev/tty.Bluetooth-Incoming-Port': {
'index': 2,
# ... snip ...
},
'/dev/tty.usbmodem11101': {
'index': 3,
# ... snip ...
}
}
"""
try:
import serial # pyserial
except ImportError:
raise ImportError("Cannot import `pyserial`, check your installation.")
# get port names
thisSystem = platform.system()
if thisSystem == 'Windows':
portNames = [
'COM{}'.format(i + 1) for i in range(SERIAL_MAX_ENUM_PORTS)]
elif thisSystem == 'Darwin':
portNames = glob.glob('/dev/tty.*') + glob.glob('/dev/cu.*')
portNames.sort() # ensure we get things back in the same order
elif thisSystem == 'Linux' or thisSystem == 'Linux2':
portNames = glob.glob('/dev/tty[A-Za-z]*')
portNames.sort() # ditto
else:
raise EnvironmentError(
"System '{}' is not supported by `getSerialPorts()`".format(
thisSystem))
# enumerate over ports now that we have the names
portEnumIdx = 0
toReturn = []
for name in portNames:
try:
with serial.Serial(name) as ser:
portConf = { # port information dict
'device_name': name,
'index': portEnumIdx,
'port': ser.port,
'baudrate': ser.baudrate,
'bytesize': ser.bytesize,
'parity': ser.parity,
'stopbits': ser.stopbits,
# 'timeout': ser.timeout,
# 'writeTimeout': ser.write_timeout,
# 'interByteTimeout': ser.inter_byte_timeout,
'xonxoff': ser.xonxoff,
'rtscts': ser.rtscts,
'dsrdtr': ser.dsrdtr,
# 'rs485_mode': ser.rs485_mode
}
toReturn.append(portConf)
portEnumIdx += 1
except (OSError, serial.SerialException):
# no port found with `name` or cannot be opened
pass
return toReturn
# ------------------------------------------------------------------------------
# Miscellaneous utilities
#
def systemProfilerWindowsOS(
parseStr=True,
connected=None,
problem=None,
instanceid=None,
deviceid=None,
classname=None,
classid=None,
problemcode=None,
busname=None,
busid=None,
bus=False,
deviceids=False,
relations=False,
services=False,
stack=False,
drivers=False,
interfaces=False,
properties=False,
resources=False):
"""Get information about devices via Windows'
[pnputil](https://learn.microsoft.com/en-us/windows-hardware/drivers/devtest/pnputil-command-syntax#enum-devices).
Parameters
----------
parseStr : bool
Whether to parse the string output from pnputil into a dict (True) or
keep it as a string for each device (False)
connected : bool or None
Filter by connection state of devices, leave as None for no filter.
problem : bool or None
Filter by devices with problems, leave as None for no filter.
instanceid : str or None
Filter by device instance ID, leave as None for no filter.
deviceid : str or None
Filter by device hardware and compatible ID, leave as None for no
filter. Only works on Windows 11 (version 22H2 and up).
classname : str or None
Filter by device class name, leave as None for no filter.
classid : str or None
Filter by device class GUID, leave as None for no filter.
problemcode : str or None
Filter by specific problem code, leave as None for no filter.
busname : str or None
Filter by bus enumerator name, leave as None for no filter. Only works
on Windows 11 (version 21H2 and up).
busid : str or None
Filter by bus type GUID, leave as None for no filter. Only works on
Windows 11 (version 21H2 and up).
bus : bool
Display bus enumerator name and bus type GUID. Only works on Windows 11
(version 21H2 and up).
deviceids : bool
Display hardware and compatible IDs. Only works on Windows 11
(version 21H2 and up).
relations : bool
Display parent and child device relations.
services : bool
Display device services. Only works on Windows 11 (version 21H2 and up).
stack : bool
Display effective device stack information. Only works on Windows 11
(version 21H2 and up).
drivers : bool
Display matching and installed drivers.
interfaces : bool
Display device interfaces. Only works on Windows 11 (version 21H2 and up).
properties : bool
Display all device properties. Only works on Windows 11 (version 21H2 and up).
resources : bool
Display device resources. Only works on Windows 11 (version 22H2 and up).
Returns
-------
list
List of devices, with their details parsed into dicts if parseStr is True.
"""
def _constructCommand():
"""
Construct command based on method input.
Returns
-------
str
The command to pass to terminal.
"""
# make sure mutually exclusive inputs aren't supplied
assert instanceid is None or deviceid is None, (
"Cannot filter by both instance and device ID, please leave one input as None."
)
assert instanceid is None or deviceid is None, (
"Cannot filter by both class name and class ID, please leave one input as None."
)
assert busname is None or busid is None, (
"Cannot filter by both class name and class ID, please leave one input as None."
)
# start off with the core command
cmd = ["pnputil", "/enum-devices"]
# append connected flag
if connected is True:
cmd.append("/connected")
elif connected is False:
cmd.append("/disconnected")
# append problem flag
if problem and problemcode is not None:
cmd.append("/problem")
# append filter flags if they're not None
for key, val in {
'instanceid': instanceid,
'deviceid': deviceid,
'class': classname or classid,
'problem': problemcode,
'bus': busid or busname,
}.items():
if val is None:
continue
cmd.append(f"/{key}")
cmd.append(f'"{val}"')
# append detail flags if they're True
for key, val in {
'bus': all((bus, busname is None, busid is None)),
'deviceids': deviceids,
'relations': relations,
'services': services,
'stack': stack,
'drivers': drivers,
'interfaces': interfaces,
'properties': properties,
'resources': resources
}.items():
if val:
cmd.append(f"/{key}")
# log command for debugging purposes
logging.debug("Calling command '{}'".format(" ".join(cmd)))
return cmd
def _parseDeviceStr(deviceStr):
"""
Parse the string of a single device into a dict
Parameters
----------
deviceStr : str
String in the format returned by pnputil.
Returns
-------
dict
Dict of device details
"""
# dict for this device
device = {}
# values to keep track of relative position in dict
stack = []
val = key = None
lastLvl = -4
# split device into lines
allLines = deviceStr.split("\r\n")
for lineNum, line in enumerate(allLines):
# get key:value pair
extension = False
if line.endswith(":"):
key, val = line, ""
elif ": " in line:
key, val = line.split(": ", maxsplit=1)
else:
# with no key, this value extends the last - make sure the last is a list
if val == "":
val = []
if not isinstance(val, list):
val = [val]
# add to previous value
val.append(line.strip())
extension = True
# sanitise val and key
key = str(key)
if isinstance(val, str):
val = val.strip()
# figure out if we've moved up/down a level based on spaces before key name
lvl = len(key) - len(key.strip())
# update stack so we know where we are
if not extension:
if lvl > lastLvl:
stack.append(key.strip())
elif lvl < lastLvl:
backInx = -int((lastLvl-lvl)/4)+1
stack = stack[:backInx]
stack.append(key.strip())
else:
stack[-1] = key.strip()
# set current value in stack
subdict = device
for i, subkey in enumerate(stack):
# if we're at the final key, set value
if i == len(stack) - 1:
subdict[subkey] = val
else:
# if this is the first entry in a subdict, make sure the subdict *is* a dict
if not isinstance(subdict[subkey], dict):
subdict[subkey] = {}
subdict = subdict[subkey]
# take note of last level
lastLvl = lvl
return device
# send command
cmd = _constructCommand()
p = sp.Popen(
" ".join(cmd),
stdout=sp.PIPE,
stderr=sp.PIPE,
)
# receive output in utf8
resp, err = p.communicate()
resp = resp.decode("utf-8", errors="ignore")
# list to store output
devices = []
# split into devices
for thisDeviceStr in resp.split("\r\n\r\nInstance ID")[1:]:
thisDeviceStr = "Instance ID" + thisDeviceStr
if parseStr:
thisDevice = _parseDeviceStr(thisDeviceStr)
else:
thisDevice = thisDeviceStr
# add device to devices list
devices.append(thisDevice)
return devices
def systemProfilerMacOS(dataTypes=None, detailLevel='basic', timeout=180):
"""Call the MacOS system profiler and return data in a JSON format.
Parameters
----------
dataTypes : str, list or None
Identifier(s) for the data to retrieve. All data types available will
be returned if `None`. See output of shell command `system_profiler
-listDataTypes` for all possible values. Specifying data types also
speeds up the time it takes for this function to return as superfluous
information is not queried.
detailLevel : int or str
Level of detail for the report. Possible values are `'mini'`, `'basic'`,
or `'full'`. Note that increasing the level of detail will expose
personally identifying information in the resulting report. Best
practice is to use the lowest level of detail needed to obtain the
desired information, or use `dataTypes` to limit what information is
returned.
timeout : float or int
Amount of time to spend gathering data in seconds. Default is 180
seconds, while specifying 0 means no timeout.
Returns
-------
str
Result of the `system_profiler` call as a JSON formatted string. You can
pass the string to a JSON library to parse out what information is
desired.
Examples
--------
Get details about cameras attached to this system::
dataTypes = "SPCameraDataType" # data to query
systemReportJSON = systemProfilerMacOS(dataTypes, detailLevel='basic')
# >>> print(systemReportJSON)
# {
# "SPCameraDataType" : [
# ...
# ]
# }
Parse the result using a JSON library::
import json
systemReportJSON = systemProfilerMacOS(
"SPCameraDataType", detailLevel='mini')
cameraInfo = json.loads(systemReportJSON)
# >>> print(cameraInfo)
# {'SPCameraDataType': [{'_name': 'Live! Cam Sync 1080p',
# 'spcamera_model-id': 'UVC Camera VendorID_1054 ProductID_16541',
# 'spcamera_unique-id': '0x2200000041e409d'}]
"""
if platform.system() != 'Darwin':
raise OSError(
"Cannot call `systemProfilerMacOS`, detected OS is not 'darwin'."
)
if isinstance(dataTypes, (tuple, list)):
dataTypesStr = " ".join(dataTypes)
elif isinstance(dataTypes, str):
dataTypesStr = dataTypes
elif dataTypes is None:
dataTypesStr = ""
else:
raise TypeError(
"Expected type `list`, `tuple`, `str` or `NoneType` for parameter "
"`dataTypes`")
if detailLevel not in ('mini', 'basic', 'full'):
raise ValueError(
"Value for parameter `detailLevel` should be one of 'mini', 'basic'"
" or 'full'."
)
# build the command
shellCmd = ['system_profiler']
if dataTypesStr:
shellCmd.append(dataTypesStr)
shellCmd.append('-json') # ask for report in JSON formatted string
shellCmd.append('-detailLevel') # set detail level
shellCmd.append(detailLevel)
shellCmd.append('-timeout') # set timeout
shellCmd.append(str(timeout))
# call the system profiler
systemProfilerCall = sp.Popen(
shellCmd,
stdout=sp.PIPE)
systemProfilerRet = systemProfilerCall.communicate()[0] # bytes
# We're going to need to handle errors from this command at some point, for
# now we're leaving that up to the user.
return json.loads(systemProfilerRet.decode("utf-8")) # convert to string
# Cache data from the last call of `getInstalledDevices()` to avoid having to
# query the system again. This is useful for when we want to access the same
# data multiple times in a script.
_installedDeviceCache = None # cache for installed devices
def getInstalledDevices(deviceType='all', refresh=False):
"""Get information about installed devices.
This command gets information about all devices relevant to PsychoPy that
are installed on the system and their supported settings.
Parameters
----------
deviceType : str
Type of device to query. Possible values are `'all'`, `'speaker'`,
`'microphone'`, `'keyboard'`, or `'serial'`. Default is `'all'`.
refresh : bool
Whether to refresh the cache of installed devices. Default is `False`.
Returns
-------
dict
Mapping of hardware devices and their supported settings. See *Examples*
Examples
--------
Get all installed devices::
allDevices = getInstalledDevices('all')
Get all installed audio devices and access supported settings::
audioDevices = getInstalledDevices('audio')
speakers = audioDevices['speakers']
microphones = audioDevices['microphones']
# get supported sampling rates for the first microphone
micSampleRates = microphones[0]['sampling_rate'] # list of ints
Convert the result to JSON::
import json
allDevices = getInstalledDevices('all')
allDevicesJSON = json.dumps(allDevices, indent=4)
print(allDevicesJSON) # print the result
"""
# These functions are used to get information about installed devices using
# valrious methods. Were possible, we should avoid importing any libraries
# that aren't part of the standard library to avoid dependencies and
# overhead.
def _getInstalledAudioDevices():
"""Get information about installed audio playback and capture devices
and their supported settings.
This uses PTB to query the system for audio devices and their supported
settings. The result is returned as a dictionary.
Returns
-------
dict
Supported microphone settings for connected audio capture devices.
"""
allAudioDevices = getAudioDevices()
# get all microphones by name
foundDevices = []
for _, devInfo in allAudioDevices.items():
if devInfo["name"] not in foundDevices: # unique names only
if devInfo["inputChannels"] > 0:
foundDevices.append((
devInfo["name"], devInfo["index"],
devInfo["inputChannels"], 'microphone'))
if devInfo["outputChannels"] > 0:
foundDevices.append((
devInfo["name"], devInfo["index"],
devInfo["outputChannels"],'speaker'))
# now get settings for each audi odevice
devSettings = {'microphone': [], 'speaker': []}
for devName, devIndex, devChannels, devClass in foundDevices:
supportedSampleRates = []
for _, devInfo in allAudioDevices.items():
# check if we have a dictionary for this device
if devInfo["name"] != devName:
continue
supportedSampleRates.append(
int(devInfo["defaultSampleRate"]))
channels = devInfo[
"outputChannels" if devClass == 'speakers' else "inputChannels"]
devSettings[devClass].append(
{
"device_name": devName,
"device_index": devIndex,
"sampling_rate": supportedSampleRates,
"channels": devChannels
}
)
return devSettings
def _getInstalledCameras():
"""Get information about installed cameras and their supported settings.
This uses various libraries to query the system for cameras and their
supported settings. The result is returned as a dictionary.
Returns
-------
dict
Supported camera settings for connected cameras.
"""
allCameras = getCameras()
# colect settings for each camera we found
deviceSettings = []
for devIdx, devInfo in enumerate(allCameras):
devName = devInfo[0]['device_name']
allModes = []
for thisInfo in devInfo:
# create mode string
modeStr = "{}x{}@{}Hz".format(
thisInfo["frameSize"][0], thisInfo["frameSize"][1],
thisInfo["frameRate"])
allModes.append(modeStr)
deviceSettings.append({
"device_name": devName,
"device_index": devIdx,
# "pixel_format": devInfo["pixelFormat"],
# "codec": devInfo["codecFormat"],
"mode": allModes
})
return {'camera': deviceSettings}
# check if we support getting the requested device type
if deviceType not in ('all', 'speaker', 'microphone', 'camera',
'keyboard', 'serial'):
raise ValueError(
"Requested device type '{}' is not supported.".format(deviceType)
)
global _installedDeviceCache # use the global cache
if not refresh and _installedDeviceCache is not None:
toReturn = _installedDeviceCache
else:
# refresh device cache if requested or if it's empty
toReturn = {}
toReturn.update(_getInstalledAudioDevices()) # audio devices
toReturn.update({'keyboard': getKeyboards()}) # keyboards
toReturn.update({'serial': getSerialPorts()}) # serial ports
if not platform.system().startswith('Linux'): # cameras
toReturn.update(_getInstalledCameras())
else:
logging.error(
"Cannot get camera settings on Linux, not supported.")
_installedDeviceCache = toReturn # update the cache
# append supported actions from device manager
from psychopy.hardware.manager import _deviceMethods
for deviceType in toReturn:
# get supported actions for this device type
actions = _deviceMethods.get(deviceType, {})
# we only want the names here
actions = list(actions)
# append to each dict
for i in range(len(toReturn[deviceType])):
toReturn[deviceType][i]['actions'] = actions
if deviceType != 'all': # return only the requested device type
return toReturn[deviceType]
return toReturn
if __name__ == "__main__":
pass
| 44,734
|
Python
|
.py
| 1,113
| 31.265948
| 118
| 0.604281
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,391
|
colorspacetools.py
|
psychopy_psychopy/psychopy/tools/colorspacetools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Tools related to working with various color spaces.
The routines provided in the module are used to transform color coordinates
between spaces. Most of the functions here are *vectorized*, allowing for array
inputs to convert multiple color values at once.
**As of version 2021.0 of PsychoPy**, users ought to use the
:class:`~psychopy.colors.Color` class for working with color coordinate values.
"""
__all__ = ['srgbTF', 'rec709TF', 'cielab2rgb', 'cielch2rgb', 'dkl2rgb',
'dklCart2rgb', 'rgb2dklCart', 'hsv2rgb', 'rgb2lms', 'lms2rgb',
'rgb2hsv', 'rescaleColor']
import numpy
from psychopy import logging
from psychopy.tools.coordinatetools import sph2cart
def unpackColors(colors): # used internally, not exported by __all__
"""Reshape an array of color values to Nx3 format.
Many color conversion routines operate on color data in Nx3 format, where
rows are color space coordinates. 1x3 and NxNx3 input are converted to Nx3
format. The original shape and dimensions are also returned, allowing the
color values to be returned to their original format using 'reshape'.
Parameters
----------
colors : ndarray, list or tuple of floats
Nx3 or NxNx3 array of colors, last dim must be size == 3 specifying each
color coordinate.
Returns
-------
tuple
Nx3 ndarray of converted colors, original shape, original dims.
"""
# handle the various data types and shapes we might get as input
colors = numpy.asarray(colors, dtype=float)
orig_shape = colors.shape
orig_dim = colors.ndim
if orig_dim == 1 and orig_shape[0] == 3:
colors = numpy.array(colors, ndmin=2)
elif orig_dim == 2 and orig_shape[1] == 3:
pass # NOP, already in correct format
elif orig_dim == 3 and orig_shape[2] == 3:
colors = numpy.reshape(colors, (-1, 3))
else:
raise ValueError(
"Invalid input dimensions or shape for input colors.")
return colors, orig_shape, orig_dim
def rescaleColor(rgb, convertTo='signed', clip=False):
"""Rescale RGB colors.
This function can be used to convert RGB value triplets from the PsychoPy
signed color format to the normalized OpenGL format.
PsychoPy represents colors using values between -1 and 1. However, colors
are commonly represented using values between 0 and 1 when working with
OpenGL and various other contexts. This function simply rescales values to
switch between these formats.
Parameters
----------
rgb : `array_like`
1-, 2-, 3-D vector of RGB coordinates to convert. The last dimension
should be length-3 in all cases, specifying a single coordinate.
convertTo : `str`
If 'signed', this function will assume `rgb` is in OpenGL format [0:1]
and rescale them to PsychoPy's format [-1:1]. If 'unsigned', input
values are treated as OpenGL format and will be rescaled to use
PsychoPy's. Default is 'signed'.
clip : bool
Clip values to the range that can be represented on a display. This is
an optional step. Default is `False`.
Returns
-------
ndarray
Rescaled values with the same shape as `rgb`.
Notes
-----
The `convertTo` argument also accepts strings 'opengl' and 'psychopy'
as substitutes for 'signed' and 'unsigned', respectively. This might be more
explicit in some contexts.
Examples
--------
Convert a signed RGB value to unsigned format::
rgb_signed = [-1, 0, 1]
rgb_unsigned = rescaleColor(rgb_signed, convertTo='unsigned')
"""
# While pretty simple, this operation is done often enough to justify having
# its own function to avoid writing it out all the time. It also explicitly
# shows the direction of which values are being rescaled to make code more
# readable.
if convertTo == 'signed' or convertTo == 'psychopy':
rgb_out = rgb * 2 - 1 # from OpenGL to PsychoPy format
elif convertTo == 'unsigned' or convertTo == 'opengl':
rgb_out = (rgb + 1) / 2. # from PsychoPy to OpenGL
else:
raise ValueError("Invalid value for `convertTo`, can either be "
"'signed' or 'unsigned'.")
if clip:
rgb_out = numpy.clip(rgb_out, -1 if convertTo == 'signed' else 0, 1)
return rgb_out
def srgbTF(rgb, reverse=False, **kwargs):
"""Apply sRGB transfer function (or gamma) to linear RGB values.
Input values must have been transformed using a conversion matrix derived
from sRGB primaries relative to D65.
Parameters
----------
rgb : tuple, list or ndarray of floats
Nx3 or NxNx3 array of linear RGB values, last dim must be size == 3
specifying RBG values.
reverse : boolean
If True, the reverse transfer function will convert sRGB -> linear RGB.
Returns
-------
ndarray
Array of transformed colors with same shape as input.
"""
rgb, orig_shape, orig_dim = unpackColors(rgb)
# apply the sRGB TF
if not reverse:
# applies the sRGB transfer function (linear RGB -> sRGB)
to_return = numpy.where(
rgb <= 0.0031308,
rgb * 12.92,
(1.0 + 0.055) * rgb ** (1.0 / 2.4) - 0.055)
else:
# do the inverse (sRGB -> linear RGB)
to_return = numpy.where(
rgb <= 0.04045,
rgb / 12.92,
((rgb + 0.055) / 1.055) ** 2.4)
if orig_dim == 1:
to_return = to_return[0]
elif orig_dim == 3:
to_return = numpy.reshape(to_return, orig_shape)
return to_return
def rec709TF(rgb, **kwargs):
"""Apply the Rec 709 transfer function (or gamma) to linear RGB values.
This transfer function is defined in the ITU-R BT.709 (2015) recommendation
document (https://www.itu.int/rec/R-REC-BT.709-6-201506-I/en) and is
commonly used with HDTV televisions.
Parameters
----------
rgb : tuple, list or ndarray of floats
Nx3 or NxNx3 array of linear RGB values, last dim must be size == 3
specifying RBG values.
Returns
-------
ndarray
Array of transformed colors with same shape as input.
"""
rgb, orig_shape, orig_dim = unpackColors(rgb)
# applies the Rec.709 transfer function (linear RGB -> Rec.709 RGB)
# mdc - I didn't compute the inverse for this one.
to_return = numpy.where(rgb >= 0.018,
1.099 * rgb ** 0.45 - 0.099,
4.5 * rgb)
if orig_dim == 1:
to_return = to_return[0]
elif orig_dim == 3:
to_return = numpy.reshape(to_return, orig_shape)
return to_return
def cielab2rgb(lab,
whiteXYZ=None,
conversionMatrix=None,
transferFunc=None,
clip=False,
**kwargs):
"""Transform CIE L*a*b* (1976) color space coordinates to RGB tristimulus
values.
CIE L*a*b* are first transformed into CIE XYZ (1931) color space, then the
RGB conversion is applied. By default, the sRGB conversion matrix is used
with a reference D65 white point. You may specify your own RGB conversion
matrix and white point (in CIE XYZ) appropriate for your display.
Parameters
----------
lab : tuple, list or ndarray
1-, 2-, 3-D vector of CIE L*a*b* coordinates to convert. The last
dimension should be length-3 in all cases specifying a single
coordinate.
whiteXYZ : tuple, list or ndarray
1-D vector coordinate of the white point in CIE-XYZ color space. Must be
the same white point needed by the conversion matrix. The default
white point is D65 if None is specified, defined as X, Y, Z = 0.9505,
1.0000, 1.0890.
conversionMatrix : tuple, list or ndarray
3x3 conversion matrix to transform CIE-XYZ to RGB values. The default
matrix is sRGB with a D65 white point if None is specified. Note that
values must be gamma corrected to appear correctly according to the sRGB
standard.
transferFunc : pyfunc or None
Signature of the transfer function to use. If None, values are kept as
linear RGB (it's assumed your display is gamma corrected via the
hardware CLUT). The TF must be appropriate for the conversion matrix
supplied (default is sRGB). Additional arguments to 'transferFunc' can
be passed by specifying them as keyword arguments. Gamma functions that
come with PsychoPy are 'srgbTF' and 'rec709TF', see their docs for more
information.
clip : bool
Make all output values representable by the display. However, colors
outside of the display's gamut may not be valid!
Returns
-------
ndarray
Array of RGB tristimulus values.
Example
-------
Converting a CIE L*a*b* color to linear RGB::
import psychopy.tools.colorspacetools as cst
cielabColor = (53.0, -20.0, 0.0) # greenish color (L*, a*, b*)
rgbColor = cst.cielab2rgb(cielabColor)
Using a transfer function to convert to sRGB::
rgbColor = cst.cielab2rgb(cielabColor, transferFunc=cst.srgbTF)
"""
lab, orig_shape, orig_dim = unpackColors(lab)
if conversionMatrix is None:
# XYZ -> sRGB conversion matrix, assumes D65 white point
# mdc - computed using makeXYZ2RGB with sRGB primaries
conversionMatrix = numpy.asarray([
[3.24096994, -1.53738318, -0.49861076],
[-0.96924364, 1.8759675, 0.04155506],
[0.05563008, -0.20397696, 1.05697151]
])
if whiteXYZ is None:
# D65 white point in CIE-XYZ color space
# See: https://en.wikipedia.org/wiki/SRGB
whiteXYZ = numpy.asarray([0.9505, 1.0000, 1.0890])
L = lab[:, 0] # lightness
a = lab[:, 1] # green (-) <-> red (+)
b = lab[:, 2] # blue (-) <-> yellow (+)
wht_x, wht_y, wht_z = whiteXYZ # white point in CIE-XYZ color space
# convert Lab to CIE-XYZ color space
# uses reverse transformation found here:
# https://en.wikipedia.org/wiki/Lab_color_space
xyz_array = numpy.zeros(lab.shape)
s = (L + 16.0) / 116.0
xyz_array[:, 0] = s + (a / 500.0)
xyz_array[:, 1] = s
xyz_array[:, 2] = s - (b / 200.0)
# evaluate the inverse f-function
delta = 6.0 / 29.0
xyz_array = numpy.where(xyz_array > delta,
xyz_array ** 3.0,
(xyz_array - (4.0 / 29.0)) * (3.0 * delta ** 2.0))
# multiply in white values
xyz_array[:, 0] *= wht_x
xyz_array[:, 1] *= wht_y
xyz_array[:, 2] *= wht_z
# convert to sRGB using the specified conversion matrix
rgb_out = numpy.asarray(numpy.dot(xyz_array, conversionMatrix.T))
# apply sRGB gamma correction if requested
if transferFunc is not None:
rgb_out = transferFunc(rgb_out, **kwargs)
# clip unrepresentable colors if requested
if clip:
rgb_out = numpy.clip(rgb_out, 0.0, 1.0)
# make the output match the dimensions/shape of input
if orig_dim == 1:
rgb_out = rgb_out[0]
elif orig_dim == 3:
rgb_out = numpy.reshape(rgb_out, orig_shape)
return rescaleColor(rgb_out, convertTo='psychopy')
def cielch2rgb(lch,
whiteXYZ=None,
conversionMatrix=None,
transferFunc=None,
clip=False,
**kwargs):
"""Transform CIE `L*C*h*` coordinates to RGB tristimulus values.
Parameters
----------
lch : tuple, list or ndarray
1-, 2-, 3-D vector of CIE `L*C*h*` coordinates to convert. The last
dimension should be length-3 in all cases specifying a single
coordinate. The hue angle *h is expected in degrees.
whiteXYZ : tuple, list or ndarray
1-D vector coordinate of the white point in CIE-XYZ color space. Must be
the same white point needed by the conversion matrix. The default
white point is D65 if None is specified, defined as X, Y, Z = 0.9505,
1.0000, 1.0890
conversionMatrix : tuple, list or ndarray
3x3 conversion matrix to transform CIE-XYZ to RGB values. The default
matrix is sRGB with a D65 white point if None is specified. Note that
values must be gamma corrected to appear correctly according to the sRGB
standard.
transferFunc : pyfunc or None
Signature of the transfer function to use. If None, values are kept as
linear RGB (it's assumed your display is gamma corrected via the
hardware CLUT). The TF must be appropriate for the conversion matrix
supplied. Additional arguments to 'transferFunc' can be passed by
specifying them as keyword arguments. Gamma functions that come with
PsychoPy are 'srgbTF' and 'rec709TF', see their docs for more
information.
clip : boolean
Make all output values representable by the display. However, colors
outside of the display's gamut may not be valid!
Returns
-------
ndarray
array of RGB tristimulus values
"""
lch, orig_shape, orig_dim = unpackColors(lch)
# convert values to L*a*b*
lab = numpy.empty(lch.shape, dtype=lch.dtype)
lab[:, 0] = lch[:, 0]
lab[:, 1] = lch[:, 1] * numpy.math.cos(numpy.math.radians(lch[:, 2]))
lab[:, 2] = lch[:, 1] * numpy.math.sin(numpy.math.radians(lch[:, 2]))
# convert to RGB using the CIE L*a*b* function
rgb_out = cielab2rgb(lab,
whiteXYZ=whiteXYZ,
conversionMatrix=conversionMatrix,
transferFunc=transferFunc,
clip=clip,
**kwargs)
# make the output match the dimensions/shape of input
if orig_dim == 1:
rgb_out = rgb_out[0]
elif orig_dim == 3:
rgb_out = numpy.reshape(rgb_out, orig_shape)
return rgb_out # don't do signed RGB conversion, done by cielab2rgb
def dkl2rgb(dkl, conversionMatrix=None):
"""Convert from DKL color space (Derrington, Krauskopf & Lennie) to RGB.
Requires a conversion matrix, which will be generated from generic
Sony Trinitron phosphors if not supplied (note that this will not be
an accurate representation of the color space unless you supply a
conversion matrix).
Examples
--------
Converting a single DKL color to RGB::
dkl = [90, 0, 1]
rgb = dkl2rgb(dkl, conversionMatrix)
"""
# make sure the input is an array
dkl = numpy.asarray(dkl)
if conversionMatrix is None:
conversionMatrix = numpy.asarray([
# (note that dkl has to be in cartesian coords first!)
# LUMIN %L-M %L+M-S
[1.0000, 1.0000, -0.1462], # R
[1.0000, -0.3900, 0.2094], # G
[1.0000, 0.0180, -1.0000]]) # B
logging.warning('This monitor has not been color-calibrated. '
'Using default DKL conversion matrix.')
if len(dkl.shape) == 3:
dkl_NxNx3 = dkl
# convert a 2D (image) of Spherical DKL colours to RGB space
origShape = dkl_NxNx3.shape # remember for later
NxN = origShape[0] * origShape[1] # find nPixels
dkl = numpy.reshape(dkl_NxNx3, [NxN, 3]) # make Nx3
rgb = dkl2rgb(dkl, conversionMatrix) # convert
return numpy.reshape(rgb, origShape) # reshape and return
else:
dkl_Nx3 = dkl
# its easier to use in the other orientation!
dkl_3xN = numpy.transpose(dkl_Nx3)
if numpy.size(dkl_3xN) == 3:
RG, BY, LUM = sph2cart(dkl_3xN[0],
dkl_3xN[1],
dkl_3xN[2])
else:
RG, BY, LUM = sph2cart(dkl_3xN[0, :],
dkl_3xN[1, :],
dkl_3xN[2, :])
dkl_cartesian = numpy.asarray([LUM, RG, BY])
rgb = numpy.dot(conversionMatrix, dkl_cartesian)
# return in the shape we received it:
return numpy.transpose(rgb)
def dklCart2rgb(LUM, LM, S, conversionMatrix=None):
"""Like dkl2rgb except that it uses cartesian coords (LM,S,LUM)
rather than spherical coords for DKL (elev, azim, contr).
NB: this may return rgb values >1 or <-1
"""
NxNx3 = list(LUM.shape)
NxNx3.append(3)
dkl_cartesian = numpy.asarray(
[LUM.reshape([-1]), LM.reshape([-1]), S.reshape([-1])])
if conversionMatrix is None:
conversionMatrix = numpy.asarray([
# (note that dkl has to be in cartesian coords first!)
# LUMIN %L-M %L+M-S
[1.0000, 1.0000, -0.1462], # R
[1.0000, -0.3900, 0.2094], # G
[1.0000, 0.0180, -1.0000]]) # B
rgb = numpy.dot(conversionMatrix, dkl_cartesian)
return numpy.reshape(numpy.transpose(rgb), NxNx3)
def rgb2hsv(rgb):
"""Convert values from linear RGB to HSV colorspace.
Parameters
----------
rgb : `array_like`
1-, 2-, 3-D vector of RGB coordinates to convert. The last dimension
should be length-3 in all cases, specifying a single coordinate.
Returns
-------
ndarray
HSV values with the same shape as the input.
"""
# Based on https://www.geeksforgeeks.org/program-change-rgb-color-model-hsv-color-model/
rgb, orig_shape, orig_dim = unpackColors(rgb)
# need to rescale RGB values to 0.0 and 1.0
rgb = rescaleColor(rgb, convertTo='unsigned')
# get row min/max indices
rmax = numpy.argmax(rgb, axis=1)
rmin = numpy.argmin(rgb, axis=1)
# get min/max values for each color coordinate
sel = numpy.arange(len(rgb))
cmax = rgb[sel, rmax]
cmin = rgb[sel, rmin]
# compute the difference between the max and min color value
delta = cmax - cmin
# vector to return HSV values
hsv_out = numpy.zeros_like(rgb, dtype=float)
# --- calculate vibrancy ---
dzero = delta == 0 # if delta is zero the color is a shade of grey
inv_dzero = None
if numpy.any(dzero): # vibrancy is 1
hsv_out[dzero, 2] = numpy.sum(rgb[dzero], axis=1) / 3.
inv_dzero = ~dzero
if inv_dzero is not None:
hsv_out[inv_dzero, 2] = cmax[inv_dzero]
else:
hsv_out[:, 2] = cmax[:] # no B/W colors
# --- calculate saturation ---
hsv_out[:, 1] = numpy.where(cmax > 0.0, delta / cmax, 0.0)
# --- calculate hues ---
# views of each column
r = rgb[:, 0]
g = rgb[:, 1]
b = rgb[:, 2]
# select on rows where the RGB gun value is max and not `dzero`
sel_r = (rmax == 0) & inv_dzero if inv_dzero is not None else rmax == 0
sel_g = (rmax == 1) & inv_dzero if inv_dzero is not None else rmax == 1
sel_b = (rmax == 2) & inv_dzero if inv_dzero is not None else rmax == 2
if numpy.any(sel_r): # if red == cmax
hsv_out[sel_r, 0] = \
(60 * ((g[sel_r] - b[sel_r]) / delta[sel_r]) + 360) % 360
if numpy.any(sel_g): # if green == cmax
hsv_out[sel_g, 0] = \
(60 * ((b[sel_g] - r[sel_g]) / delta[sel_g]) + 120) % 360
if numpy.any(sel_b): # if blue == cmax
hsv_out[sel_b, 0] = \
(60 * ((r[sel_b] - g[sel_b]) / delta[sel_b]) + 240) % 360
# round the hue angle value
hsv_out[:, 0] = numpy.round(hsv_out[:, 0])
# make the output match the dimensions/shape of input
if orig_dim == 1:
hsv_out = hsv_out[0]
elif orig_dim == 3:
hsv_out = numpy.reshape(hsv_out, orig_shape)
return hsv_out
def hsv2rgb(hsv_Nx3):
"""Convert from HSV color space to RGB gun values.
usage::
rgb_Nx3 = hsv2rgb(hsv_Nx3)
Note that in some uses of HSV space the Hue component is given in
radians or cycles (range 0:1]). In this version H is given in
degrees (0:360).
Also note that the RGB output ranges -1:1, in keeping with other
PsychoPy functions.
"""
# based on method in
# https://en.wikipedia.org/wiki/HSL_and_HSV#Converting_to_RGB
hsv_Nx3 = numpy.asarray(hsv_Nx3, dtype=float)
# we expect a 2D array so convert there if needed
origShape = hsv_Nx3.shape
hsv_Nx3 = hsv_Nx3.reshape([-1, 3])
H_ = (hsv_Nx3[:, 0] % 360) / 60.0 # this is H' in the wikipedia version
# multiply H and V to give chroma (color intensity)
C = hsv_Nx3[:, 1] * hsv_Nx3[:, 2]
X = C * (1 - abs(H_ % 2 - 1))
# rgb starts
rgb = hsv_Nx3 * 0 # only need to change things that are no longer zero
II = (0 <= H_) * (H_ < 1)
rgb[II, 0] = C[II]
rgb[II, 1] = X[II]
II = (1 <= H_) * (H_ < 2)
rgb[II, 0] = X[II]
rgb[II, 1] = C[II]
II = (2 <= H_) * (H_ < 3)
rgb[II, 1] = C[II]
rgb[II, 2] = X[II]
II = (3 <= H_) * (H_ < 4)
rgb[II, 1] = X[II]
rgb[II, 2] = C[II]
II = (4 <= H_) * (H_ < 5)
rgb[II, 0] = X[II]
rgb[II, 2] = C[II]
II = (5 <= H_) * (H_ < 6)
rgb[II, 0] = C[II]
rgb[II, 2] = X[II]
m = (hsv_Nx3[:, 2] - C)
rgb += m.reshape([len(m), 1]) # V-C is sometimes called m
return rgb.reshape(origShape) * 2 - 1
def lms2rgb(lms_Nx3, conversionMatrix=None):
"""Convert from cone space (Long, Medium, Short) to RGB.
Requires a conversion matrix, which will be generated from generic
Sony Trinitron phosphors if not supplied (note that you will not get
an accurate representation of the color space unless you supply a
conversion matrix)
usage::
rgb_Nx3 = lms2rgb(dkl_Nx3(el,az,radius), conversionMatrix)
"""
# its easier to use in the other orientation!
lms_3xN = numpy.transpose(lms_Nx3)
if conversionMatrix is None:
cones_to_rgb = numpy.asarray([
# L M S
[4.97068857, -4.14354132, 0.17285275], # R
[-0.90913894, 2.15671326, -0.24757432], # G
[-0.03976551, -0.14253782, 1.18230333]]) # B
logging.warning('This monitor has not been color-calibrated. '
'Using default LMS conversion matrix.')
else:
cones_to_rgb = conversionMatrix
rgb = numpy.dot(cones_to_rgb, lms_3xN)
return numpy.transpose(rgb) # return in the shape we received it
def rgb2lms(rgb_Nx3, conversionMatrix=None):
"""Convert from RGB to cone space (LMS).
Requires a conversion matrix, which will be generated from generic
Sony Trinitron phosphors if not supplied (note that you will not get
an accurate representation of the color space unless you supply a
conversion matrix)
usage::
lms_Nx3 = rgb2lms(rgb_Nx3(el,az,radius), conversionMatrix)
"""
# its easier to use in the other orientation!
rgb_3xN = numpy.transpose(rgb_Nx3)
if conversionMatrix is None:
cones_to_rgb = numpy.asarray([
# L M S
[4.97068857, -4.14354132, 0.17285275], # R
[-0.90913894, 2.15671326, -0.24757432], # G
[-0.03976551, -0.14253782, 1.18230333]]) # B
logging.warning('This monitor has not been color-calibrated. '
'Using default LMS conversion matrix.')
else:
cones_to_rgb = conversionMatrix
rgb_to_cones = numpy.linalg.inv(cones_to_rgb)
lms = numpy.dot(rgb_to_cones, rgb_3xN)
return numpy.transpose(lms) # return in the shape we received it
def rgb2dklCart(picture, conversionMatrix=None):
"""Convert an RGB image into Cartesian DKL space.
"""
# Turn the picture into an array so we can do maths
picture = numpy.array(picture)
# Find the original dimensions of the picture
origShape = picture.shape
# this is the inversion of the dkl2rgb conversion matrix
if conversionMatrix is None:
conversionMatrix = numpy.asarray([
# LUMIN-> %L-M-> L+M-S
[0.25145542, 0.64933633, 0.09920825],
[0.78737943, -0.55586618, -0.23151325],
[0.26562825, 0.63933074, -0.90495899]])
logging.warning('This monitor has not been color-calibrated. '
'Using default DKL conversion matrix.')
else:
conversionMatrix = numpy.linalg.inv(conversionMatrix)
# Reshape the picture so that it can multiplied by the conversion matrix
red = picture[:, :, 0]
green = picture[:, :, 1]
blue = picture[:, :, 2]
dkl = numpy.asarray([red.reshape([-1]),
green.reshape([-1]),
blue.reshape([-1])])
# Multiply the picture by the conversion matrix
dkl = numpy.dot(conversionMatrix, dkl)
# Reshape the picture so that it's back to it's original shape
dklPicture = numpy.reshape(numpy.transpose(dkl), origShape)
return dklPicture
| 25,030
|
Python
|
.py
| 567
| 36.520282
| 92
| 0.625745
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,392
|
wizard.py
|
psychopy_psychopy/psychopy/tools/wizard.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Libraries for wizards, currently firstrun configuration and benchmark.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# Author: Jeremy Gray, Oct 2012; localization 2014
from pyglet.gl import gl_info
import os
import sys
import wx
import numpy as np
import platform
import codecs
from packaging.version import Version
if Version(wx.__version__) < Version('2.9'):
tmpApp = wx.PySimpleApp()
else:
tmpApp = wx.App(False)
from psychopy.localization import _translate
from psychopy import (info, data, visual, gui, core, __version__,
prefs, event)
# can't just do the following, or messes up poedit autodiscovery:
# _localized = {k: _translate(k) for k in _loKeys}
class BaseWizard():
"""Base class of ConfigWizard and BenchmarkWizard.
"""
def __init__(self):
super(BaseWizard, self).__init__()
def runDiagnostics(self, win, verbose=False):
"""Return list of (key, val, msg, warn) tuple, set self.warnings
All tuple elements will be of <type str>.
msg can depend on val; warn==True indicates a concern.
Plain text is returned, expected to be used in html <table>.
Hyperlinks can be embedded as <a href="...">
"""
report = [] # add item tuples in display order
# get lots of info and do quick-to-render visual (want 0 frames drop):
# for me, grating draw times are: mean 0.53 ms, SD 0.77 ms
items = info.RunTimeInfo(win=win, refreshTest='grating',
verbose=True, userProcsDetailed=True)
totalRAM = items['systemMemTotalRAM']
freeRAM = items['systemMemFreeRAM']
warn = False
if freeRAM == 'unknown':
if totalRAM != 'unknown':
totalRAM = "%.1fG" % (totalRAM/1024.0)
txt = _translate(
'could not assess available physical RAM; total %s')
msg = txt % totalRAM
report.append(('available memory', 'unknown', msg, warn))
else:
txt = _translate(
'physical RAM available for configuration test '
'(of %.1fG total)')
msg = txt % (totalRAM/1024.)
if freeRAM < 300: # in M
txt = _translate(
'Warning: low available physical RAM for '
'configuration test (of %.1fG total)')
msg = txt % (totalRAM/1024.)
warn = True
report.append(('available memory', str(freeRAM) + 'M',
msg, warn))
# ----- PSYCHOPY: -----
warn = False
report.append(('PsychoPy', '', '', False)) # not localized
report.append(('psychopy', __version__,
_translate('avoid upgrading during an experiment'),
False))
msg = _translate(
'can be set in <a href="https://www.psychopy.org/general/'
'prefs.html#application-settings-app">Preferences -> App</a>')
report.append(('locale', items['systemLocale'], msg, False))
msg = ''
v = Version
thisV = v(items['pythonVersion'])
if (thisV < v('2.7') or (v('3.0') <= thisV < v('3.6'))
):
msg = _translate("Warning: python 2.7 or 3.6 are recommended; "
"2.6 and 3.5 might work. Others probably won't.")
warn = True
if 'EPD' in items['pythonFullVersion']:
msg += ' Enthought Python Distribution'
elif 'PsychoPy3.app' in items['pythonExecutable']:
msg += ' (PsychoPy StandAlone)'
bits, linkage = platform.architecture()
# if not bits.startswith('32'):
# msg = 'Warning: 32-bit python required; ' + msg
report.append(
('python version',
items['pythonVersion'] + ' (%s)' % bits,
msg, warn))
warn = False
if verbose:
msg = ''
if items['pythonWxVersion'] < '2.8.10':
msg = _translate('Warning: wx 2.8.10 or higher required')
warn = True
report.append(('wx', items['pythonWxVersion'], '', warn))
report.append(
('pyglet', items['pythonPygletVersion'][:32], '', False))
report.append(('rush', str(items['psychopyHaveExtRush']),
_translate('for high-priority threads'), False))
# ----- VISUAL: -----
report.append(('Visual', '', '', False))
warn = False
# openGL settings:
msg = ''
if items['openGLVersion'] < '2.':
msg = _translate(
'Warning: <a href="https://www.psychopy.org/general/timing'
'/reducingFrameDrops.html?highlight=OpenGL+2.0">OpenGL '
'2.0 or higher is ideal</a>.')
warn = True
report.append(('openGL version', items['openGLVersion'], msg, warn))
report.append(('openGL vendor', items['openGLVendor'], '', False))
report.append(('screen size', ' x '.join(
map(str, items['windowSize_pix'])), '', False))
# report.append(('wait blanking', str(items['windowWaitBlanking']), '',
# False))
warn = False
msg = ''
if not items['windowHaveShaders']:
msg = _translate(
'Warning: <a href="https://www.psychopy.org/general/timing'
'/reducingFrameDrops.html?highlight=shader">Rendering of'
' complex stimuli will be slow</a>.')
warn = True
report.append(('have shaders', str(
items['windowHaveShaders']), msg, warn))
warn = False
msg = _translate(
'during the drifting <a href="https://www.psychopy.org/api/'
'visual/gratingstim.html">GratingStim</a>')
if items['windowRefreshTimeMedian_ms'] < 3.3333333:
msg = _translate(
"Warning: too fast? visual sync'ing with the monitor"
" seems unlikely at 300+ Hz")
warn = True
report.append(('visual sync (refresh)', "%.2f ms/frame" %
items['windowRefreshTimeMedian_ms'], msg, warn))
msg = _translate('SD < 0.5 ms is ideal (want low variability)')
warn = False
if items['windowRefreshTimeSD_ms'] > .5:
msg = _translate(
'Warning: the refresh rate has high frame-to-frame '
'variability (SD > 0.5 ms)')
warn = True
report.append(('refresh stability (SD)', "%.2f ms" %
items['windowRefreshTimeSD_ms'], msg, warn))
# draw 100 dots as a minimally demanding visual test:
# first get baseline frame-rate (safe as possible, no drawing):
avg, sd, median = visual.getMsPerFrame(win)
dots100 = visual.DotStim(
win, nDots=100, speed=0.005, dotLife=12, dir=90,
coherence=0.2, dotSize=8, fieldShape='circle', autoLog=False)
win.recordFrameIntervals = True
win.frameIntervals = []
win.flip()
for i in range(180):
dots100.draw()
win.flip()
msg = _translate(
'during <a href="https://www.psychopy.org/api/visual/'
'dotstim.html">DotStim</a> with 100 random dots')
warn = False
intervalsMS = np.array(win.frameIntervals) * 1000
nTotal = len(intervalsMS)
nDropped = sum(intervalsMS > (1.5 * median))
if nDropped:
msg = _translate(
'Warning: could not keep up during <a href="https://'
'www.psychopy.org/api/visual/dotstim.html">DotStim</a>'
' with 100 random dots.')
warn = True
report.append(('no dropped frames', '%i / %i' % (nDropped, nTotal),
msg, warn))
win.recordFrameIntervals = False
if verbose:
report.append(('openGL max vertices',
str(items['openGLmaxVerticesInVertexArray']),
'', False))
keyList = ('GL_ARB_multitexture', 'GL_EXT_framebuffer_object',
'GL_ARB_fragment_program', 'GL_ARB_shader_objects',
'GL_ARB_vertex_shader', 'GL_ARB_texture_float',
'GL_ARB_texture_non_power_of_two', 'GL_STEREO')
for key in keyList:
val = items['openGLext.' + key] # boolean
if not val:
val = '<strong>' + str(val) + '</strong>'
report.append((key, str(val), '', False))
# ----- AUDIO: -----
report.append(('Audio', '', '', False))
msg = ''
warn = False
if not 'systemPyoVersion' in items:
msg = _translate(
'Warning: pyo is needed for sound and microphone.')
warn = True
items['systemPyoVersion'] = _translate('(missing)')
# elif items['systemPyoVersion'] < '0.6.2':
# msg = 'pyo 0.6.2 compiled with --no-messages will
# suppress start-up messages'
report.append(('pyo', items['systemPyoVersion'], msg, warn))
# TO-DO: add microphone + playback as sound test
# ----- NUMERIC: -----
report.append(('Numeric', '', '', False))
report.append(('numpy', items['pythonNumpyVersion'],
_translate('vector-based (fast) calculations'), False))
report.append(('scipy', items['pythonScipyVersion'],
_translate('scientific / numerical'), False))
report.append(('matplotlib', items['pythonMatplotlibVersion'],
_translate('plotting; fast contains(), overlaps()'),
False))
# ----- SYSTEM: -----
report.append(('System', '', '', False))
report.append(('platform', items['systemPlatform'], '', False))
msg = _translate('for online help, usage statistics, software '
'updates, and google-speech')
warn = False
if items['systemHaveInternetAccess'] is not True:
items['systemHaveInternetAccess'] = 'False'
msg = _translate('Warning: could not connect (no proxy attempted)')
warn = True
# TO-DO: dlg to query whether to try to auto-detect (can take a
# while), or allow manual entry of proxy str, save into prefs
val = str(items['systemHaveInternetAccess'])
report.append(('internet access', val, msg, warn))
report.append(('auto proxy',
str(self.prefs.connections['autoProxy']),
_translate('try to auto-detect a proxy if needed; see'
' <a href="https://www.psychopy.org/general'
'/prefs.html#connection-settings-connection'
's">Preferences -> Connections</a>'),
False))
if not self.prefs.connections['proxy'].strip():
prx = ' --'
else:
prx = str(self.prefs.connections['proxy'])
report.append(('proxy setting', prx,
_translate('current manual proxy setting from <a '
'href="https://www.psychopy.org/general/'
'prefs.html#connection-settings-connections'
'">Preferences -> Connections</a>'), False))
txt = 'CPU speed test'
report.append((txt, "%.3f s" % items['systemTimeNumpySD1000000_sec'],
_translate('numpy.std() of 1,000,000 data points'),
False))
# TO-DO: more speed benchmarks
# - load large image file from disk
# - transfer image to GPU
# ----- IMPORTS (relevant for developers & non-StandAlone): -----
if verbose: # always False for a real first-run
report.append((_translate('Python packages'), '', '', False))
packages = ['PIL', 'openpyxl', 'setuptools', 'pytest',
'sphinx', 'psignifit', 'pyserial', 'pp',
'pynetstation', 'labjack']
if sys.platform == 'win32':
packages.append('pywin32')
packages.append('winioport')
pkgError = ModuleNotFoundError
for pkg in packages:
try:
if pkg == 'PIL':
import PIL
ver = PIL.__version__
elif pkg == 'pynetstation':
import egi_pynetstation
ver = 'import ok'
elif pkg == 'pyserial':
import serial
ver = serial.VERSION
elif pkg == 'pywin32':
import win32api
ver = 'import ok'
else:
exec('import ' + pkg)
try:
ver = eval(pkg + '.__version__')
except Exception:
ver = 'imported but no version info'
report.append((pkg, ver, '', False))
except (pkgError, AttributeError):
msg = _translate('could not import package %s')
report.append((pkg, ' --', msg % pkg, False))
# rewrite to avoid assumption of locale en_US:
self.warnings = list(
set([key for key, val, msg, warn in report if warn]))
return report
def summary(self, items=None):
"""Return a list of (item, color) for gui display. For non-fatal items
"""
config = {}
for item in items:
config[item[0]] = [item[1], item[2], item[3]] # [3] = warn or not
green = '#009933'
red = '#CC3300'
check = u"\u2713 "
summary = [(check + _translate('video card drivers'), green)]
ofInterest = ('python version', 'available memory', 'openGL version',
'visual sync (refresh)', 'refresh stability (SD)',
'no dropped frames', 'internet access')
# ofInterest.append('background processes')
for item in ofInterest:
if not item in config:
continue # eg, microphone latency
if config[item][2]: # warn True
summary.append(("X " + _translate(item), red))
else:
summary.append((check + _translate(item), green))
return summary
def htmlReport(self, items=None, fatal=False):
"""Return an html report given a list of (key, val, msg, warn) items.
format triggers: 'Critical issue' in fatal gets highlighted
warn == True -> highlight key and val
val == msg == '' -> use key as section heading
"""
imgfile = os.path.join(self.prefs.paths['resources'],
'psychopySplash.png')
_head = (u'<html><head><meta http-equiv="Content-Type" '
'content="text/html; charset=utf-8"></head><body>' +
'<a href="https://www.psychopy.org"><img src="%s" '
'width=396 height=156></a>')
self.header = _head % imgfile
# self.iconhtml = '<a href="https://www.psychopy.org"><img src="%s"
# width=48 height=48></a>' % self.iconfile
_foot = _translate('This page was auto-generated by the '
'PsychoPy configuration wizard on %s')
self.footer = ('<center><font size=-1>' +
_foot % data.getDateStr(format="%Y-%m-%d, %H:%M") +
'</font></center>')
htmlDoc = self.header
if fatal:
# fatal is a list of strings:
htmlDoc += ('<h2><font color="red">' +
_translate('Configuration problem') + '</font></h2><hr>')
for item in fatal:
item = item.replace('Critical issue', '<p><strong>')
item += _translate('Critical issue') + '</strong>'
htmlDoc += item + "<hr>"
else:
# items is a list of tuples:
htmlDoc += ('<h2><font color="green">' +
_translate('Configuration report') + '</font></h2>\n')
numWarn = len(self.warnings)
if numWarn == 0:
htmlDoc += _translate('<p>All values seem reasonable (no '
'warnings, but there might still be '
'room for improvement).</p>\n')
elif numWarn == 1:
_warn = _translate('1 suboptimal value was detected</font>, '
'see details below (%s).</p>\n')
htmlDoc += ('<p><font color="red">' +
_warn % (self.warnings[0]))
elif numWarn > 1:
_warn = _translate('%(num)i suboptimal values were detected'
'</font>, see details below (%(warn)s).'
'</p>\n')
htmlDoc += ('<p><font color="red">' +
_warn % {'num': numWarn,
'warn': ', '.join(self.warnings)})
htmlDoc += '''<script type="text/javascript">
// Loops through all rows in document and changes display
// property of rows with a specific ID
// toggle('ok', '') will display all rows
// toggle('ok', 'none') hides ok rows, leaving Warning
// rows shown
function toggle(ID, display_value) {
var tr=document.getElementsByTagName('tr'),
i;
for (i=0;i<tr.length;i++) {
if (tr[i].id == ID) tr[i].style.display = display_value;
}
}
</script>
<p>
<button onClick="toggle('ok', 'none');">'''
htmlDoc += _translate('Only show suboptimal values') + \
'</button>' + \
'''<button onClick="toggle('ok', '');">''' + \
_translate('Show all information') + '</button></p>'
htmlDoc += _translate('''<p>Resources:
| <a href="https://www.psychopy.org/documentation.html">On-line documentation</a>
| Download <a href="https://www.psychopy.org/PsychoPyManual.pdf">PDF manual</a>
| <a href="https://discourse.psychopy.org">Search the user-group archives</a>
</p>''')
htmlDoc += '<hr><p></p> <table cellspacing=8 border=0>\n'
htmlDoc += ' <tr><td><font size=+1><strong>' + \
_translate('Configuration test</strong> or setting') +\
'</font></td><td><font size=+1><strong>' + _translate('Version or value') +\
'</strong></font></td><td><font size=+1><em>' + \
_translate('Notes') + '</em></font></td>'
for (key, val, msg, warn) in items:
if val == msg == '':
key = '<font color="darkblue" size="+1"><strong>' + \
_translate(key) + '</strong></font>'
else:
key = ' ' + _translate(key)
if warn:
key = '<font style=color:red><strong>' + \
_translate(key) + '</strong></font>'
val = '<font style=color:red><strong>' + val + '</strong></font>'
id = 'Warning'
else:
id = 'ok'
htmlDoc += ' <tr id="%s"><td>' % id
htmlDoc += key + '</td><td>' + val + '</td><td><em>' + msg + '</em></td></tr>\n'
htmlDoc += ' </table><hr>'
htmlDoc += self.footer
if not fatal and numWarn:
htmlDoc += """<script type="text/javascript">toggle('ok', 'none'); </script>"""
htmlDoc += '</body></html>'
self.reportText = htmlDoc
def save(self):
"""Save the html text as a file."""
with codecs.open(self.reportPath, 'wb', encoding='utf-8-sig') as f:
f.write(self.reportText)
class ConfigWizard(BaseWizard):
"""Walk through configuration diagnostics & generate report.
"""
def __init__(self, firstrun=False, interactive=True, log=True):
"""Check drivers, show GUIs, run diagnostics, show report.
"""
super(ConfigWizard, self).__init__()
self.firstrun = firstrun
self.prefs = prefs
self.appName = 'PsychoPy3'
self.name = self.appName + _translate(' Configuration Wizard')
self.reportPath = os.path.join(
self.prefs.paths['userPrefsDir'], 'firstrunReport.html')
# self.iconfile = os.path.join(self.prefs.paths['resources'],
# 'psychopy.png')
# dlg.SetIcon(wx.Icon(self.iconfile, wx.BITMAP_TYPE_PNG)) # no error
# but no effect
dlg = gui.Dlg(title=self.name)
dlg.addText('')
if firstrun:
dlg.addText(_translate("Welcome to PsychoPy3!"), color='blue')
dlg.addText('')
dlg.addText(_translate("It looks like you are running PsychoPy "
"for the first time."))
dlg.addText(_translate("This wizard will help you get started "
"quickly and smoothly."))
else:
dlg.addText(_translate("Welcome to the configuration wizard."))
# test for fatal configuration errors:
fatalItemsList = []
cardInfo = gl_info.get_renderer().replace('OpenGL Engine', '').strip()
if not driversOkay():
dlg.addText('')
dlg.addText(_translate("The first configuration check is your "
"video card's drivers. The current"),
color='red')
dlg.addText(_translate("drivers cannot support PsychoPy, so "
"you'll need to update the drivers."),
color='red')
msg = _translate("""<p>Critical issue:\n</p><p>Your video card (%(card)s) has drivers
that cannot support the high-performance features that PsychoPy depends on.
Fortunately, it's typically free and straightforward to get new drivers
directly from the manufacturer.</p>
<p><strong>To update the drivers:</strong>
<li> You'll need administrator privileges.
<li> On Windows, don't use the windows option to check for updates
- it can report that there are no updates available.
<li> If your card is made by NVIDIA, go to
<a href="https://www.nvidia.com/download/index.aspx">the NVIDIA website</a>
and use the 'auto detect' option. Try here for
<a href="https://www.amd.com/fr/support">ATI / Radeon drivers</a>. Or try
<a href="https://www.google.com/search?q=download+drivers+%(card2)s">
this google search</a> [google.com].
<li> Download and install the driver.
<li> Reboot the computer.
<li> Restart PsychoPy.</p>
<p>If you updated the drivers and still get this message, you'll
need a different video card to use PsychoPy. Click
<a href="https://www.psychopy.org/installation.html#recommended-hardware">here
for more information</a> [psychopy.org].</p>
""")
fatalItemsList.append(msg % {'card': cardInfo,
'card2': cardInfo.replace(' ', '+')})
if not cardOkay():
msg = _translate("""<p>Critical issue:\n</p>""")
msg += cardInfo
fatalItemsList.append(msg)
# other fatal conditions? append a 'Critical issue' msg to itemsList
if not fatalItemsList:
dlg.addText(_translate("We'll go through a series of configura"
"tion checks in about 10 seconds. "))
dlg.addText('')
if firstrun: # explain things more
dlg.addText(_translate('Note: The display will switch to '
'full-screen mode and will '))
dlg.addText(_translate("then switch back. You don't need "
"to do anything."))
dlg.addText(_translate('Optional: For best results, please quit'
' all email programs, web-browsers, '))
dlg.addText(_translate(
'Dropbox, backup or sync services, and others.'))
dlg.addText('')
dlg.addText(_translate('Click OK to start, or Cancel to skip.'))
if not self.firstrun:
dlg.addField(label=_translate('Full details'),
initial=self.prefs.app['debugMode'])
else:
dlg.addText('')
dlg.addText(_translate(
'Click OK for more information, or Cancel to skip.'))
# show the first dialog:
dlg.addText('')
if interactive:
dlg.show()
if fatalItemsList:
self.htmlReport(fatal=fatalItemsList)
self.save()
# user ends up in browser:
url = 'file://' + self.reportPath
if interactive:
wx.LaunchDefaultBrowser(url)
return
if interactive and not dlg.OK:
return # no configuration tests run
# run the diagnostics:
verbose = interactive and not self.firstrun and dlg.data[0]
win = visual.Window(fullscr=interactive, allowGUI=False,
monitor='testMonitor', autoLog=log)
itemsList = self.runDiagnostics(win, verbose) # sets self.warnings
win.close()
self.htmlReport(itemsList)
self.save()
# display summary & options:
dlg = gui.Dlg(title=self.name)
dlg.addText('')
dlg.addText(_translate('Configuration testing complete!'))
summary = self.summary(items=itemsList)
numWarn = len(self.warnings)
if numWarn == 0:
msg = _translate('All values seem reasonable (no warnings).')
elif numWarn == 1:
txt = _translate('1 suboptimal value was detected (%s)')
msg = txt % self.warnings[0]
else:
txt = _translate(
'%(num)i suboptimal values were detected (%(warn)s, ...)')
msg = txt % {'num': len(self.warnings),
'warn': self.warnings[0]}
dlg.addText(msg)
for item in summary:
dlg.addText(item[0], item[1]) # (key, color)
dlg.addText('')
dlg.addText(_translate(
'Click OK for full details (will open in a web-browser),'))
dlg.addText(_translate('or Cancel to stay in PsychoPy.'))
dlg.addText('')
if interactive:
dlg.show()
if dlg.OK:
url = 'file://' + self.reportPath
wx.LaunchDefaultBrowser(url)
return
class BenchmarkWizard(BaseWizard):
"""Class to get system info, run benchmarks
"""
def __init__(self, fullscr=True, interactive=True, log=True):
super(BenchmarkWizard, self).__init__()
self.firstrun = False
self.prefs = prefs
self.appName = 'PsychoPy3'
self.name = self.appName + _translate(' Benchmark Wizard')
dlg = gui.Dlg(title=self.name)
dlg.addText('')
dlg.addText(_translate('Benchmarking takes ~20-30 seconds to gather'))
dlg.addText(_translate('configuration and performance data. Begin?'))
dlg.addText('')
if interactive:
dlg.show()
if not dlg.OK:
return
self._prepare()
win = visual.Window(fullscr=fullscr, allowGUI=False,
monitor='testMonitor', autoLog=False)
# do system info etc first to get fps, add to list later because
# it's nicer for benchmark results to appears at top of the report:
diagnostics = self.runDiagnostics(win, verbose=True)
info = {}
for k, v, m, w in diagnostics:
# list of tuples --> dict, ignore msg m, warning w
info[k] = v
fps = 1000.0/float(info['visual sync (refresh)'].split()[0])
itemsList = [('Benchmark', '', '', False)]
itemsList.append(('benchmark version', '0.1', _translate(
'dots & configuration'), False))
itemsList.append(('full-screen', str(fullscr),
_translate('visual window for drawing'), False))
if int(info['no dropped frames'].split('/')[0]) != 0: # eg, "0 / 180"
start = 50 # if 100 dots had problems earlier, here start lower
else:
start = 200
for shape in ('circle', 'square'):
# order matters: circle crashes first
dotsList = self.runLotsOfDots(win, fieldShape=shape,
starting=start, baseline=fps)
itemsList.extend(dotsList)
# start square where circle breaks down
start = int(dotsList[-1][1])
itemsList.extend(diagnostics)
win.close()
itemsDict = {}
for itm in itemsList:
if 'proxy setting' in itm[0] or not itm[1]:
continue
itemsDict[itm[0]] = itm[1].replace('<strong>', '').replace(
'</strong>', '').replace(' ', '').replace(' ', '')
# present dialog, upload only if opt-in:
dlg = gui.Dlg(title=self.name)
dlg.addText('')
dlg.addText(_translate(
'Benchmark complete! (See the Coder output window.)'))
self.htmlReport(itemsList)
self.reportPath = os.path.join(self.prefs.paths['userPrefsDir'],
'benchmarkReport.html')
self.save()
dlg = gui.Dlg(title=self.name)
dlg.addText('')
dlg.addText(_translate(
'Click OK to view full configuration and benchmark data.'))
dlg.addText(_translate('Click Cancel to stay in PsychoPy.'))
dlg.addText('')
if interactive:
dlg.show()
if dlg.OK:
url = 'file://' + self.reportPath
wx.LaunchDefaultBrowser(url)
def _prepare(self):
"""Prep for bench-marking; currently just RAM-related on mac
"""
if sys.platform == 'darwin':
try:
# free up physical memory if possible
core.shellCall('purge')
except OSError:
pass
elif sys.platform == 'win32':
# This will run in background, perhaps best to launch it to
# run overnight the day before benchmarking:
# %windir%\system32\rundll32.exe advapi32.dll,ProcessIdleTasks
# rundll32.exe advapi32.dll,ProcessIdleTasks
pass
elif sys.platform.startswith('linux'):
# as root: sync; echo 3 > /proc/sys/vm/drop_caches
pass
else:
pass
def runLotsOfDots(self, win, fieldShape, starting=100, baseline=None):
"""DotStim stress test: draw many dots until drop lots of frames
report best dots as the highest dot count at which drop no frames
fieldShape = circle or square
starting = initial dot count; increases until failure
baseline = known frames per second; None means measure it here
"""
win.recordFrameIntervals = True
secs = 1 # how long to draw them for, at least 1s
# baseline frames per second:
if not baseline:
for i in range(5):
win.flip() # wake things up
win.fps() # reset
for i in range(60):
win.flip()
baseline = round(win.fps())
maxFrame = round(baseline * secs)
dotsInfo = []
win.flip()
bestDots = starting # this might over-estimate the actual best
dotCount = starting
count = visual.TextStim(win, text=str(dotCount), autoLog=False)
count.draw()
win.flip()
dots = visual.DotStim(win, color=(1.0, 1.0, 1.0), nDots=dotCount,
fieldShape=fieldShape, autoLog=False)
win.fps() # reset
frameCount = 0
while True:
dots.draw()
win.flip()
frameCount += 1
if frameCount > maxFrame:
fps = win.fps() # get frames per sec
if len(event.getKeys(['escape'])):
sys.exit()
if (fps < baseline * 0.6) or (dotCount > 5000):
# only break when start dropping a LOT of frames (80% or
# more) or exceeded 5,000 dots
dotsInfo.append(
('dots_' + fieldShape, str(bestDots), '', False))
break
frames_dropped = round(baseline - fps) # can be negative
if frames_dropped < 1: # can be negative
# only set best if no dropped frames:
bestDots = dotCount
# but do allow to continue in case do better with more dots:
dotCount += 100
# show the dot count:
count.setText(str(dotCount), log=False)
count.draw()
win.flip()
dots = visual.DotStim(win, color=(1.0, 1.0, 1.0),
fieldShape=fieldShape, nDots=dotCount,
autoLog=False)
frameCount = 0
win.fps() # reset
win.recordFrameIntervals = False
win.flip()
return tuple(dotsInfo)
def driversOkay():
"""Returns True if drivers should be okay for PsychoPy
"""
return not 'microsoft' in gl_info.get_vendor().lower()
def cardOkay():
"""Not implemented: Idea = Returns string: okay, maybe, bad
depending on the graphics card. Currently returns True always
"""
return True # until we have a list of known-good cards
# card = gl_info.get_renderer()
# knownGoodList = [] # perhaps load from a file
# if card in knownGoodList:
# return True
# knownBadList = []
# if card in knownBadList:
# return False
if __name__ == '__main__':
if '--config' in sys.argv:
ConfigWizard(firstrun=bool('--firstrun' in sys.argv))
elif '--benchmark' in sys.argv:
BenchmarkWizard()
else:
print("need to specify a wizard in sys.argv, e.g., --benchmark")
| 35,408
|
Python
|
.py
| 738
| 34.210027
| 97
| 0.525864
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,393
|
stimulustools.py
|
psychopy_psychopy/psychopy/tools/stimulustools.py
|
"""
Tools for interacting with various stimuli.
For example, lists of styles for Form/Slider, so that these static values
can be quickly imported from here rather than importing `psychopy.visual` (which is slow)
"""
import inspect
import json
import importlib
import numpy as np
from psychopy import logging
from psychopy.tools.attributetools import attributeSetter
formStyles = {
'light': {
'fillColor': [0.89, 0.89, 0.89],
'borderColor': None,
'itemColor': 'black',
'responseColor': 'black',
'markerColor': [0.89, -0.35, -0.28],
'font': "Open Sans",
},
'dark': {
'fillColor': [-0.19, -0.19, -0.14],
'borderColor': None,
'itemColor': 'white',
'responseColor': 'white',
'markerColor': [0.89, -0.35, -0.28],
'font': "Open Sans",
},
}
sliderStyles = ['slider', 'rating', 'radio', 'scrollbar', 'choice']
sliderStyleTweaks = ['labels45', 'triangleMarker']
class SerializationError(Exception):
"""
Error raised when serialize is called on a value which is not serializable.
"""
pass
def serialize(obj, includeClass=True):
"""
Get a JSON serializable dict representation of this stimulus, useful for recreating it in a
different process. Will attempt to create a dict based on the object's `__init__` method, so
that this can be used to create a new copy of the object. Attributes which are themselves
serializable will be recursively serialized.
Parameters
----------
obj : object
Object to serialize.
includeClass : bool
If True, serialized output will include a field `__class__` with the full path of the
object's class.
Raises
------
SerializationError
If the object is not already serialized and cannot be serialized.
Returns
-------
dict
Dict representing the, if not already serializable.
"""
# handle numpy types
if isinstance(obj, np.integer):
obj = int(obj)
elif isinstance(obj, np.floating):
obj = float(obj)
elif isinstance(obj, np.ndarray):
obj = obj.tolist()
# if an array, serialize items
if isinstance(obj, (list, tuple)):
return [serialize(val) for val in obj]
if isinstance(obj, dict):
return {serialize(key): serialize(val) for key, val in obj.items()}
# if already json serializable, return as is
try:
json.dumps(obj)
except TypeError:
pass
except:
# if we got something other than a TypeError, substitute None
return None
else:
return obj
# if object defines its own serialization, use it
if hasattr(obj, "serialize"):
return obj.serialize()
def _getAttr(obj, param):
"""
Get serialized attribute value.
"""
got = False
# if param looks to be stored in an attribute, add it
if hasattr(obj, param):
got = True
value = getattr(obj, param)
# if we have a get method for param, get its value
getParam = "get" + param[0].upper() + param[1:]
if hasattr(obj, getParam):
got = True
value = getattr(obj, getParam)()
# if we couldn't get a value, raise an error
if not got:
raise SerializationError(f"Could not get value for {type(obj).__name__}.{param}")
return serialize(value, includeClass=False)
# start off with an empty dict
arr = {}
# get init args
initArgs = inspect.getargspec(obj.__init__)
# if there are variable args, raise an error
if initArgs.varargs:
raise SerializationError("Cannot serialize object with variable args.")
# how many init args are required?
nReq = len(initArgs.args) - len(initArgs.defaults)
# get required params
for param in initArgs.args[:nReq]:
# skip self
if param == "self":
continue
# get attribute
arr[param] = _getAttr(obj, param)
# get optional params
for param in initArgs.args[nReq:]:
# get attribute, but don't worry if it fails
try:
arr[param] = _getAttr(obj, param)
except SerializationError:
pass
# add class
if includeClass:
arr['__class__'] = type(obj).__name__
arr['__module__'] = type(obj).__module__
return arr
class ActualizationError(Exception):
"""
Error raised when attempting to actualize from a dict which doesn't have the necessary info to
actualize.
"""
pass
def actualize(params):
"""
Create an object from a serializable dict, the kind created by `serialize`. The dict won't be
serialized, so it's okay for it to have live object handles included (for example, replacing
'win' with the handle of an extant Window object).
Parameters
----------
params : dict
Dict describing the init parameters of an object. Should include the keys:
- '__class__': The name of the object's class
- '__module__': Import path of the module containing the object's class
"""
# shallow copy dict so we can safely pop from it
params = params.copy()
# make sure we have the necessary keys
for key in ("__module__", "__class__"):
if key not in params:
raise ActualizationError(
f"Cannot actualise from dict which does not contain '{key}', dict was: {params}"
)
# get class
mod = importlib.import_module(params.pop('__module__'))
cls = getattr(mod, params.pop('__class__'))
# initialise
obj = cls(**params)
return obj
| 5,694
|
Python
|
.py
| 161
| 28.57764
| 99
| 0.631838
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,394
|
gltools.py
|
psychopy_psychopy/psychopy/tools/gltools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""OpenGL related helper functions.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'createProgram',
'createProgramObjectARB',
'compileShader',
'compileShaderObjectARB',
'embedShaderSourceDefs',
'deleteObject',
'deleteObjectARB',
'attachShader',
'attachObjectARB',
'detachShader',
'detachObjectARB',
'linkProgram',
'linkProgramObjectARB',
'validateProgram',
'validateProgramARB',
'useProgram',
'useProgramObjectARB',
'getInfoLog',
'getUniformLocations',
'getAttribLocations',
'createQueryObject',
'QueryObjectInfo',
'beginQuery',
'endQuery',
'getQuery',
'getAbsTimeGPU',
'createFBO',
'attach',
'isComplete',
'deleteFBO',
'blitFBO',
'useFBO',
'createRenderbuffer',
'deleteRenderbuffer',
'createTexImage2D',
'createTexImage2DMultisample',
'deleteTexture',
'VertexArrayInfo',
'createVAO',
'drawVAO',
'deleteVAO',
'VertexBufferInfo',
'createVBO',
'bindVBO',
'unbindVBO',
'mapBuffer',
'unmapBuffer',
'deleteVBO',
'setVertexAttribPointer',
'enableVertexAttribArray',
'disableVertexAttribArray',
'createMaterial',
'useMaterial',
'createLight',
'useLights',
'setAmbientLight',
'ObjMeshInfo',
'loadObjFile',
'loadMtlFile',
'createUVSphere',
'createPlane',
'createMeshGridFromArrays',
'createMeshGrid',
'createBox',
'transformMeshPosOri',
'calculateVertexNormals',
'getIntegerv',
'getFloatv',
'getString',
'getOpenGLInfo',
'createTexImage2D',
'createTexImage2dFromFile',
'bindTexture',
'unbindTexture',
'createCubeMap',
'TexCubeMap',
'getModelViewMatrix',
'getProjectionMatrix'
]
import ctypes
from io import StringIO
from collections import namedtuple
import pyglet.gl as GL # using Pyglet for now
from contextlib import contextmanager
from PIL import Image
import numpy as np
import os
import sys
import platform
import warnings
import psychopy.tools.mathtools as mt
from psychopy.visual.helpers import setColor, findImageFile
_thisPlatform = platform.system()
# create a query counter to get absolute GPU time
QUERY_COUNTER = None # prevent genQueries from being called
# compatible Numpy and OpenGL types for common GL type enums
GL_COMPAT_TYPES = {
GL.GL_FLOAT: (np.float32, GL.GLfloat),
GL.GL_DOUBLE: (np.float64, GL.GLdouble),
GL.GL_UNSIGNED_SHORT: (np.uint16, GL.GLushort),
GL.GL_UNSIGNED_INT: (np.uint32, GL.GLuint),
GL.GL_INT: (np.int32, GL.GLint),
GL.GL_SHORT: (np.int16, GL.GLshort),
GL.GL_HALF_FLOAT: (np.float16, GL.GLhalfARB),
GL.GL_UNSIGNED_BYTE: (np.uint8, GL.GLubyte),
GL.GL_BYTE: (np.int8, GL.GLbyte),
np.float32: (GL.GL_FLOAT, GL.GLfloat),
np.float64: (GL.GL_DOUBLE, GL.GLdouble),
np.uint16: (GL.GL_UNSIGNED_SHORT, GL.GLushort),
np.uint32: (GL.GL_UNSIGNED_INT, GL.GLuint),
np.int32: (GL.GL_INT, GL.GLint),
np.int16: (GL.GL_SHORT, GL.GLshort),
np.float16: (GL.GL_HALF_FLOAT, GL.GLhalfARB),
np.uint8: (GL.GL_UNSIGNED_BYTE, GL.GLubyte),
np.int8: (GL.GL_BYTE, GL.GLbyte)
}
# -------------------------------
# Shader Program Helper Functions
# -------------------------------
#
def createProgram():
"""Create an empty program object for shaders.
Returns
-------
int
OpenGL program object handle retrieved from a `glCreateProgram` call.
Examples
--------
Building a program with vertex and fragment shader attachments::
myProgram = createProgram() # new shader object
# compile vertex and fragment shader sources
vertexShader = compileShader(vertShaderSource, GL.GL_VERTEX_SHADER)
fragmentShader = compileShader(fragShaderSource, GL.GL_FRAGMENT_SHADER)
# attach shaders to program
attachShader(myProgram, vertexShader)
attachShader(myProgram, fragmentShader)
# link the shader, makes `myProgram` attachments executable by their
# respective processors and available for use
linkProgram(myProgram)
# optional, validate the program
validateProgram(myProgram)
# optional, detach and discard shader objects
detachShader(myProgram, vertexShader)
detachShader(myProgram, fragmentShader)
deleteObject(vertexShader)
deleteObject(fragmentShader)
You can install the program for use in the current rendering state by
calling::
useProgram(myShader) # OR glUseProgram(myShader)
# set uniforms/attributes and start drawing here ...
"""
return GL.glCreateProgram()
def createProgramObjectARB():
"""Create an empty program object for shaders.
This creates an *Architecture Review Board* (ARB) program variant which is
compatible with older GLSL versions and OpenGL coding practices (eg.
immediate mode) on some platforms. Use *ARB variants of shader helper
functions (eg. `compileShaderObjectARB` instead of `compileShader`) when
working with these ARB program objects. This was included for legacy support
of existing PsychoPy shaders. However, it is recommended that you use
:func:`createShader` and follow more recent OpenGL design patterns for new
code (if possible of course).
Returns
-------
int
OpenGL program object handle retrieved from a `glCreateProgramObjectARB`
call.
Examples
--------
Building a program with vertex and fragment shader attachments::
myProgram = createProgramObjectARB() # new shader object
# compile vertex and fragment shader sources
vertexShader = compileShaderObjectARB(
vertShaderSource, GL.GL_VERTEX_SHADER_ARB)
fragmentShader = compileShaderObjectARB(
fragShaderSource, GL.GL_FRAGMENT_SHADER_ARB)
# attach shaders to program
attachObjectARB(myProgram, vertexShader)
attachObjectARB(myProgram, fragmentShader)
# link the shader, makes `myProgram` attachments executable by their
# respective processors and available for use
linkProgramObjectARB(myProgram)
# optional, validate the program
validateProgramARB(myProgram)
# optional, detach and discard shader objects
detachObjectARB(myProgram, vertexShader)
detachObjectARB(myProgram, fragmentShader)
deleteObjectARB(vertexShader)
deleteObjectARB(fragmentShader)
Use the program in the current OpenGL state::
useProgramObjectARB(myProgram)
"""
return GL.glCreateProgramObjectARB()
def compileShader(shaderSrc, shaderType):
"""Compile shader GLSL code and return a shader object. Shader objects can
then be attached to programs an made executable on their respective
processors.
Parameters
----------
shaderSrc : str, list of str
GLSL shader source code.
shaderType : GLenum
Shader program type (eg. `GL_VERTEX_SHADER`, `GL_FRAGMENT_SHADER`,
`GL_GEOMETRY_SHADER`, etc.)
Returns
-------
int
OpenGL shader object handle retrieved from a `glCreateShader` call.
Examples
--------
Compiling GLSL source code and attaching it to a program object::
# GLSL vertex shader source
vertexSource = \
'''
#version 330 core
layout (location = 0) in vec3 vertexPos;
void main()
{
gl_Position = vec4(vertexPos, 1.0);
}
'''
# compile it, specifying `GL_VERTEX_SHADER`
vertexShader = compileShader(vertexSource, GL.GL_VERTEX_SHADER)
attachShader(myProgram, vertexShader) # attach it to `myProgram`
"""
shaderId = GL.glCreateShader(shaderType)
if isinstance(shaderSrc, (list, tuple,)):
nSources = len(shaderSrc)
srcPtr = (ctypes.c_char_p * nSources)()
srcPtr[:] = [i.encode() for i in shaderSrc]
else:
nSources = 1
srcPtr = ctypes.c_char_p(shaderSrc.encode())
GL.glShaderSource(
shaderId,
nSources,
ctypes.cast(
ctypes.byref(srcPtr),
ctypes.POINTER(ctypes.POINTER(ctypes.c_char))),
None)
GL.glCompileShader(shaderId)
result = GL.GLint()
GL.glGetShaderiv(
shaderId, GL.GL_COMPILE_STATUS, ctypes.byref(result))
if result.value == GL.GL_FALSE: # failed to compile for whatever reason
sys.stderr.write(getInfoLog(shaderId) + '\n')
deleteObject(shaderId)
raise RuntimeError("Shader compilation failed, check log output.")
return shaderId
def compileShaderObjectARB(shaderSrc, shaderType):
"""Compile shader GLSL code and return a shader object. Shader objects can
then be attached to programs an made executable on their respective
processors.
Parameters
----------
shaderSrc : str, list of str
GLSL shader source code text.
shaderType : GLenum
Shader program type. Must be `*_ARB` enums such as `GL_VERTEX_SHADER_ARB`,
`GL_FRAGMENT_SHADER_ARB`, `GL_GEOMETRY_SHADER_ARB`, etc.
Returns
-------
int
OpenGL shader object handle retrieved from a `glCreateShaderObjectARB`
call.
"""
shaderId = GL.glCreateShaderObjectARB(shaderType)
if isinstance(shaderSrc, (list, tuple,)):
nSources = len(shaderSrc)
srcPtr = (ctypes.c_char_p * nSources)()
srcPtr[:] = [i.encode() for i in shaderSrc]
else:
nSources = 1
srcPtr = ctypes.c_char_p(shaderSrc.encode())
GL.glShaderSourceARB(
shaderId,
nSources,
ctypes.cast(
ctypes.byref(srcPtr),
ctypes.POINTER(ctypes.POINTER(ctypes.c_char))),
None)
GL.glCompileShaderARB(shaderId)
result = GL.GLint()
GL.glGetObjectParameterivARB(
shaderId, GL.GL_OBJECT_COMPILE_STATUS_ARB, ctypes.byref(result))
if result.value == GL.GL_FALSE: # failed to compile for whatever reason
sys.stderr.write(getInfoLog(shaderId) + '\n')
deleteObjectARB(shaderId)
raise RuntimeError("Shader compilation failed, check log output.")
return shaderId
def embedShaderSourceDefs(shaderSrc, defs):
"""Embed preprocessor definitions into GLSL source code.
This function generates and inserts ``#define`` statements into existing
GLSL source code, allowing one to use GLSL preprocessor statements to alter
program source at compile time.
Passing ``{'MAX_LIGHTS': 8, 'NORMAL_MAP': False}`` to `defs` will create and
insert the following ``#define`` statements into `shaderSrc`::
#define MAX_LIGHTS 8
#define NORMAL_MAP 0
As per the GLSL specification, the ``#version`` directive must be specified
at the top of the file before any other statement (with the exception of
comments). If a ``#version`` directive is present, generated ``#define``
statements will be inserted starting at the following line. If no
``#version`` directive is found in `shaderSrc`, the statements will be
prepended to `shaderSrc`.
Using preprocessor directives, multiple shader program routines can reside
in the same source text if enclosed by ``#ifdef`` and ``#endif`` statements
as shown here::
#ifdef VERTEX
// vertex shader code here ...
#endif
#ifdef FRAGMENT
// pixel shader code here ...
#endif
Both the vertex and fragment shader can be built from the same GLSL code
listing by setting either ``VERTEX`` or ``FRAGMENT`` as `True`::
vertexShader = gltools.compileShaderObjectARB(
gltools.embedShaderSourceDefs(glslSource, {'VERTEX': True}),
GL.GL_VERTEX_SHADER_ARB)
fragmentShader = gltools.compileShaderObjectARB(
gltools.embedShaderSourceDefs(glslSource, {'FRAGMENT': True}),
GL.GL_FRAGMENT_SHADER_ARB)
In addition, ``#ifdef`` blocks can be used to prune render code paths. Here,
this GLSL snippet shows a shader having diffuse color sampled from a texture
is conditional on ``DIFFUSE_TEXTURE`` being `True`, if not, the material
color is used instead::
#ifdef DIFFUSE_TEXTURE
uniform sampler2D diffuseTexture;
#endif
...
#ifdef DIFFUSE_TEXTURE
// sample color from texture
vec4 diffuseColor = texture2D(diffuseTexture, gl_TexCoord[0].st);
#else
// code path for no textures, just output material color
vec4 diffuseColor = gl_FrontMaterial.diffuse;
#endif
This avoids needing to provide two separate GLSL program sources to build
shaders to handle cases where a diffuse texture is or isn't used.
Parameters
----------
shaderSrc : str
GLSL shader source code.
defs : dict
Names and values to generate ``#define`` statements. Keys must all be
valid GLSL preprocessor variable names of type `str`. Values can only be
`int`, `float`, `str`, `bytes`, or `bool` types. Boolean values `True`
and `False` are converted to integers `1` and `0`, respectively.
Returns
-------
str
GLSL source code with ``#define`` statements inserted.
Examples
--------
Defining ``MAX_LIGHTS`` as `8` in a fragment shader program at runtime::
fragSrc = embedShaderSourceDefs(fragSrc, {'MAX_LIGHTS': 8})
fragShader = compileShaderObjectARB(fragSrc, GL_FRAGMENT_SHADER_ARB)
"""
# generate GLSL `#define` statements
glslDefSrc = ""
for varName, varValue in defs.items():
if not isinstance(varName, str):
raise ValueError("Definition name must be type `str`.")
if isinstance(varValue, (int, bool,)):
varValue = int(varValue)
elif isinstance(varValue, (float,)):
pass
#varValue = varValue
elif isinstance(varValue, bytes):
varValue = '"{}"'.format(varValue.decode('UTF-8'))
elif isinstance(varValue, str):
varValue = '"{}"'.format(varValue)
else:
raise TypeError("Invalid type for value of `{}`.".format(varName))
glslDefSrc += '#define {n} {v}\n'.format(n=varName, v=varValue)
# find where the `#version` directive occurs
versionDirIdx = shaderSrc.find("#version")
if versionDirIdx != -1:
srcSplitIdx = shaderSrc.find("\n", versionDirIdx) + 1 # after newline
srcOut = shaderSrc[:srcSplitIdx] + glslDefSrc + shaderSrc[srcSplitIdx:]
else:
# no version directive in source, just prepend defines
srcOut = glslDefSrc + shaderSrc
return srcOut
def deleteObject(obj):
"""Delete a shader or program object.
Parameters
----------
obj : int
Shader or program object handle. Must have originated from a
:func:`createProgram`, :func:`compileShader`, `glCreateProgram` or
`glCreateShader` call.
"""
if GL.glIsShader(obj):
GL.glDeleteShader(obj)
elif GL.glIsProgram(obj):
GL.glDeleteProgram(obj)
else:
raise ValueError('Cannot delete, not a program or shader object.')
def deleteObjectARB(obj):
"""Delete a program or shader object.
Parameters
----------
obj : int
Program handle to attach `shader` to. Must have originated from a
:func:`createProgramObjectARB`, :func:`compileShaderObjectARB,
`glCreateProgramObjectARB` or `glCreateShaderObjectARB` call.
"""
GL.glDeleteObjectARB(obj)
def attachShader(program, shader):
"""Attach a shader to a program.
Parameters
----------
program : int
Program handle to attach `shader` to. Must have originated from a
:func:`createProgram` or `glCreateProgram` call.
shader : int
Handle of shader object to attach. Must have originated from a
:func:`compileShader` or `glCreateShader` call.
"""
if not GL.glIsProgram(program):
raise ValueError("Value `program` is not a program object.")
elif not GL.glIsShader(shader):
raise ValueError("Value `shader` is not a shader object.")
else:
GL.glAttachShader(program, shader)
def attachObjectARB(program, shader):
"""Attach a shader object to a program.
Parameters
----------
program : int
Program handle to attach `shader` to. Must have originated from a
:func:`createProgramObjectARB` or `glCreateProgramObjectARB` call.
shader : int
Handle of shader object to attach. Must have originated from a
:func:`compileShaderObjectARB` or `glCreateShaderObjectARB` call.
"""
if not GL.glIsProgram(program):
raise ValueError("Value `program` is not a program object.")
elif not GL.glIsShader(shader):
raise ValueError("Value `shader` is not a shader object.")
else:
GL.glAttachObjectARB(program, shader)
def detachShader(program, shader):
"""Detach a shader object from a program.
Parameters
----------
program : int
Program handle to detach `shader` from. Must have originated from a
:func:`createProgram` or `glCreateProgram` call.
shader : int
Handle of shader object to detach. Must have been previously attached
to `program`.
"""
if not GL.glIsProgram(program):
raise ValueError("Value `program` is not a program.")
elif not GL.glIsShader(shader):
raise ValueError("Value `shader` is not a shader object.")
else:
GL.glDetachShader(program, shader)
def detachObjectARB(program, shader):
"""Detach a shader object from a program.
Parameters
----------
program : int
Program handle to detach `shader` from. Must have originated from a
:func:`createProgramObjectARB` or `glCreateProgramObjectARB` call.
shader : int
Handle of shader object to detach. Must have been previously attached
to `program`.
"""
if not GL.glIsProgram(program):
raise ValueError("Value `program` is not a program.")
elif not GL.glIsShader(shader):
raise ValueError("Value `shader` is not a shader object.")
else:
GL.glDetachObjectARB(program, shader)
def linkProgram(program):
"""Link a shader program. Any attached shader objects will be made
executable to run on associated GPU processor units when the program is
used.
Parameters
----------
program : int
Program handle to link. Must have originated from a
:func:`createProgram` or `glCreateProgram` call.
Raises
------
ValueError
Specified `program` handle is invalid.
RuntimeError
Program failed to link. Log will be dumped to `sterr`.
"""
if GL.glIsProgram(program):
GL.glLinkProgram(program)
else:
raise ValueError("Value `program` is not a shader program.")
# check for errors
result = GL.GLint()
GL.glGetProgramiv(program, GL.GL_LINK_STATUS, ctypes.byref(result))
if result.value == GL.GL_FALSE: # failed to link for whatever reason
sys.stderr.write(getInfoLog(program) + '\n')
raise RuntimeError(
'Failed to link shader program. Check log output.')
def linkProgramObjectARB(program):
"""Link a shader program object. Any attached shader objects will be made
executable to run on associated GPU processor units when the program is
used.
Parameters
----------
program : int
Program handle to link. Must have originated from a
:func:`createProgramObjectARB` or `glCreateProgramObjectARB` call.
Raises
------
ValueError
Specified `program` handle is invalid.
RuntimeError
Program failed to link. Log will be dumped to `sterr`.
"""
if GL.glIsProgram(program):
GL.glLinkProgramARB(program)
else:
raise ValueError("Value `program` is not a shader program.")
# check for errors
result = GL.GLint()
GL.glGetObjectParameterivARB(
program,
GL.GL_OBJECT_LINK_STATUS_ARB,
ctypes.byref(result))
if result.value == GL.GL_FALSE: # failed to link for whatever reason
sys.stderr.write(getInfoLog(program) + '\n')
raise RuntimeError(
'Failed to link shader program. Check log output.')
def validateProgram(program):
"""Check if the program can execute given the current OpenGL state.
Parameters
----------
program : int
Handle of program to validate. Must have originated from a
:func:`createProgram` or `glCreateProgram` call.
"""
# check validation info
result = GL.GLint()
GL.glValidateProgram(program)
GL.glGetProgramiv(program, GL.GL_VALIDATE_STATUS, ctypes.byref(result))
if result.value == GL.GL_FALSE:
sys.stderr.write(getInfoLog(program) + '\n')
raise RuntimeError('Shader program validation failed.')
def validateProgramARB(program):
"""Check if the program can execute given the current OpenGL state. If
validation fails, information from the driver is dumped giving the reason.
Parameters
----------
program : int
Handle of program object to validate. Must have originated from a
:func:`createProgramObjectARB` or `glCreateProgramObjectARB` call.
"""
# check validation info
result = GL.GLint()
GL.glValidateProgramARB(program)
GL.glGetObjectParameterivARB(
program,
GL.GL_OBJECT_VALIDATE_STATUS_ARB,
ctypes.byref(result))
if result.value == GL.GL_FALSE:
sys.stderr.write(getInfoLog(program) + '\n')
raise RuntimeError('Shader program validation failed.')
def useProgram(program):
"""Use a program object's executable shader attachments in the current
OpenGL rendering state.
In order to install the program object in the current rendering state, a
program must have been successfully linked by calling :func:`linkProgram` or
`glLinkProgram`.
Parameters
----------
program : int
Handle of program to use. Must have originated from a
:func:`createProgram` or `glCreateProgram` call and was successfully
linked. Passing `0` or `None` disables shader programs.
Examples
--------
Install a program for use in the current rendering state::
useProgram(myShader)
Disable the current shader program by specifying `0`::
useProgram(0)
"""
if program is None:
program = 0
if GL.glIsProgram(program) or program == 0:
GL.glUseProgram(program)
else:
raise ValueError('Specified `program` is not a program object.')
def useProgramObjectARB(program):
"""Use a program object's executable shader attachments in the current
OpenGL rendering state.
In order to install the program object in the current rendering state, a
program must have been successfully linked by calling
:func:`linkProgramObjectARB` or `glLinkProgramObjectARB`.
Parameters
----------
program : int
Handle of program object to use. Must have originated from a
:func:`createProgramObjectARB` or `glCreateProgramObjectARB` call and
was successfully linked. Passing `0` or `None` disables shader programs.
Examples
--------
Install a program for use in the current rendering state::
useProgramObjectARB(myShader)
Disable the current shader program by specifying `0`::
useProgramObjectARB(0)
Notes
-----
Some drivers may support using `glUseProgram` for objects created by calling
:func:`createProgramObjectARB` or `glCreateProgramObjectARB`.
"""
if program is None:
program = 0
if GL.glIsProgram(program) or program == 0:
GL.glUseProgramObjectARB(program)
else:
raise ValueError('Specified `program` is not a program object.')
def getInfoLog(obj):
"""Get the information log from a shader or program.
This retrieves a text log from the driver pertaining to the shader or
program. For instance, a log can report shader compiler output or validation
results. The verbosity and formatting of the logs are platform-dependent,
where one driver may provide more information than another.
This function works with both standard and ARB program object variants.
Parameters
----------
obj : int
Program or shader to retrieve a log from. If a shader, the handle must
have originated from a :func:`compileShader`, `glCreateShader`,
:func:`createProgramObjectARB` or `glCreateProgramObjectARB` call. If a
program, the handle must have came from a :func:`createProgram`,
:func:`createProgramObjectARB`, `glCreateProgram` or
`glCreateProgramObjectARB` call.
Returns
-------
str
Information log data. Logs can be empty strings if the driver has no
information available.
"""
logLength = GL.GLint()
if GL.glIsShader(obj) == GL.GL_TRUE:
GL.glGetShaderiv(
obj, GL.GL_INFO_LOG_LENGTH, ctypes.byref(logLength))
elif GL.glIsProgram(obj) == GL.GL_TRUE:
GL.glGetProgramiv(
obj, GL.GL_INFO_LOG_LENGTH, ctypes.byref(logLength))
else:
raise ValueError(
"Specified value of `obj` is not a shader or program.")
logBuffer = ctypes.create_string_buffer(logLength.value)
GL.glGetShaderInfoLog(obj, logLength, None, logBuffer)
return logBuffer.value.decode('UTF-8')
def getUniformLocations(program, builtins=False):
"""Get uniform names and locations from a given shader program object.
This function works with both standard and ARB program object variants.
Parameters
----------
program : int
Handle of program to retrieve uniforms. Must have originated from a
:func:`createProgram`, :func:`createProgramObjectARB`, `glCreateProgram`
or `glCreateProgramObjectARB` call.
builtins : bool, optional
Include built-in GLSL uniforms (eg. `gl_ModelViewProjectionMatrix`).
Default is `False`.
Returns
-------
dict
Uniform names and locations.
"""
if not GL.glIsProgram(program):
raise ValueError(
"Specified value of `program` is not a program object handle.")
arraySize = GL.GLint()
nameLength = GL.GLsizei()
# cache uniform locations to avoid looking them up before setting them
nUniforms = GL.GLint()
GL.glGetProgramiv(program, GL.GL_ACTIVE_UNIFORMS, ctypes.byref(nUniforms))
unifLoc = None
if nUniforms.value > 0:
maxUniformLength = GL.GLint()
GL.glGetProgramiv(
program,
GL.GL_ACTIVE_UNIFORM_MAX_LENGTH,
ctypes.byref(maxUniformLength))
unifLoc = {}
for uniformIdx in range(nUniforms.value):
unifType = GL.GLenum()
unifName = (GL.GLchar * maxUniformLength.value)()
GL.glGetActiveUniform(
program,
uniformIdx,
maxUniformLength,
ctypes.byref(nameLength),
ctypes.byref(arraySize),
ctypes.byref(unifType),
unifName)
# get location
loc = GL.glGetUniformLocation(program, unifName)
# don't include if -1, these are internal types like 'gl_Vertex'
if not builtins:
if loc != -1:
unifLoc[unifName.value] = loc
else:
unifLoc[unifName.value] = loc
return unifLoc
def getAttribLocations(program, builtins=False):
"""Get attribute names and locations from the specified program object.
This function works with both standard and ARB program object variants.
Parameters
----------
program : int
Handle of program to retrieve attributes. Must have originated from a
:func:`createProgram`, :func:`createProgramObjectARB`, `glCreateProgram`
or `glCreateProgramObjectARB` call.
builtins : bool, optional
Include built-in GLSL attributes (eg. `gl_Vertex`). Default is `False`.
Returns
-------
dict
Attribute names and locations.
"""
if not GL.glIsProgram(program):
raise ValueError(
"Specified value of `program` is not a program object handle.")
arraySize = GL.GLint()
nameLength = GL.GLsizei()
nAttribs = GL.GLint()
GL.glGetProgramiv(program, GL.GL_ACTIVE_ATTRIBUTES, ctypes.byref(nAttribs))
attribLoc = None
if nAttribs.value > 0:
maxAttribLength = GL.GLint()
GL.glGetProgramiv(
program,
GL.GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
ctypes.byref(maxAttribLength))
attribLoc = {}
for attribIdx in range(nAttribs.value):
attribType = GL.GLenum()
attribName = (GL.GLchar * maxAttribLength.value)()
GL.glGetActiveAttrib(
program,
attribIdx,
maxAttribLength,
ctypes.byref(nameLength),
ctypes.byref(arraySize),
ctypes.byref(attribType),
attribName)
# get location
loc = GL.glGetAttribLocation(program, attribName.value)
# don't include if -1, these are internal types like 'gl_Vertex'
if not builtins:
if loc != -1:
attribLoc[attribName.value] = loc
else:
attribLoc[attribName.value] = loc
return attribLoc
# -----------------------------------
# GL Query Objects
# -----------------------------------
class QueryObjectInfo:
"""Object for querying information. This includes GPU timing information."""
__slots__ = ['name', 'target']
def __init__(self, name, target):
self.name = name
self.target = target
def isValid(self):
"""Check if the name associated with this object is valid."""
return GL.glIsQuery(self.name) == GL.GL_TRUE
def createQueryObject(target=GL.GL_TIME_ELAPSED):
"""Create a GL query object.
Parameters
----------
target : Glenum or int
Target for the query.
Returns
-------
QueryObjectInfo
Query object.
Examples
--------
Get GPU time elapsed executing rendering/GL calls associated with some
stimuli (this is not the difference in absolute time between consecutive
`beginQuery` and `endQuery` calls!)::
# create a new query object
qGPU = createQueryObject(GL_TIME_ELAPSED)
beginQuery(query)
myStim.draw() # OpenGL calls here
endQuery(query)
# get time elapsed in seconds spent on the GPU
timeRendering = getQueryValue(qGPU) * 1e-9
You can also use queries to test if vertices are occluded, as their samples
would be rejected during depth testing::
drawVAO(shape0, GL_TRIANGLES) # draw the first object
# check if the object was completely occluded
qOcclusion = createQueryObject(GL_ANY_SAMPLES_PASSED)
# draw the next shape within query context
beginQuery(qOcclusion)
drawVAO(shape1, GL_TRIANGLES) # draw the second object
endQuery(qOcclusion)
isOccluded = getQueryValue(qOcclusion) == 1
This can be leveraged to perform occlusion testing/culling, where you can
render a `cheap` version of your mesh/shape, then the more expensive version
if samples were passed.
"""
result = GL.GLuint()
GL.glGenQueries(1, ctypes.byref(result))
return QueryObjectInfo(result, target)
def beginQuery(query):
"""Begin query.
Parameters
----------
query : QueryObjectInfo
Query object descriptor returned by :func:`createQueryObject`.
"""
if isinstance(query, (QueryObjectInfo,)):
GL.glBeginQuery(query.target, query.name)
else:
raise TypeError('Type of `query` must be `QueryObjectInfo`.')
def endQuery(query):
"""End a query.
Parameters
----------
query : QueryObjectInfo
Query object descriptor returned by :func:`createQueryObject`,
previously passed to :func:`beginQuery`.
"""
if isinstance(query, (QueryObjectInfo,)):
GL.glEndQuery(query.target)
else:
raise TypeError('Type of `query` must be `QueryObjectInfo`.')
def getQuery(query):
"""Get the value stored in a query object.
Parameters
----------
query : QueryObjectInfo
Query object descriptor returned by :func:`createQueryObject`,
previously passed to :func:`endQuery`.
"""
params = GL.GLuint64(0)
if isinstance(query, QueryObjectInfo):
GL.glGetQueryObjectui64v(
query.name,
GL.GL_QUERY_RESULT,
ctypes.byref(params))
return params.value
else:
raise TypeError('Argument `query` must be `QueryObjectInfo` instance.')
def getAbsTimeGPU():
"""Get the absolute GPU time in nanoseconds.
Returns
-------
int
Time elapsed in nanoseconds since the OpenGL context was fully realized.
Examples
--------
Get the current GPU time in seconds::
timeInSeconds = getAbsTimeGPU() * 1e-9
Get the GPU time elapsed::
t0 = getAbsTimeGPU()
# some drawing commands here ...
t1 = getAbsTimeGPU()
timeElapsed = (t1 - t0) * 1e-9 # take difference, convert to seconds
"""
global QUERY_COUNTER
if QUERY_COUNTER is None:
GL.glGenQueries(1, ctypes.byref(QUERY_COUNTER))
GL.glQueryCounter(QUERY_COUNTER, GL.GL_TIMESTAMP)
params = GL.GLuint64(0)
GL.glGetQueryObjectui64v(
QUERY_COUNTER,
GL.GL_QUERY_RESULT,
ctypes.byref(params))
return params.value
# -----------------------------------
# Framebuffer Objects (FBO) Functions
# -----------------------------------
#
# The functions below simplify the creation and management of Framebuffer
# Objects (FBOs). FBO are containers for image buffers (textures or
# renderbuffers) frequently used for off-screen rendering.
#
# FBO descriptor
Framebuffer = namedtuple(
'Framebuffer',
['id',
'target',
'userData']
)
def createFBO(attachments=()):
"""Create a Framebuffer Object.
Parameters
----------
attachments : :obj:`list` or :obj:`tuple` of :obj:`tuple`
Optional attachments to initialize the Framebuffer with. Attachments are
specified as a list of tuples. Each tuple must contain an attachment
point (e.g. GL_COLOR_ATTACHMENT0, GL_DEPTH_ATTACHMENT, etc.) and a
buffer descriptor type (Renderbuffer or TexImage2D). If using a combined
depth/stencil format such as GL_DEPTH24_STENCIL8, GL_DEPTH_ATTACHMENT
and GL_STENCIL_ATTACHMENT must be passed the same buffer. Alternatively,
one can use GL_DEPTH_STENCIL_ATTACHMENT instead. If using multisample
buffers, all attachment images must use the same number of samples!. As
an example, one may specify attachments as 'attachments=((
GL.GL_COLOR_ATTACHMENT0, frameTexture), (GL.GL_DEPTH_STENCIL_ATTACHMENT,
depthRenderBuffer))'.
Returns
-------
Framebuffer
Framebuffer descriptor.
Notes
-----
- All buffers must have the same number of samples.
- The 'userData' field of the returned descriptor is a dictionary that
can be used to store arbitrary data associated with the FBO.
- Framebuffers need a single attachment to be complete.
Examples
--------
Create an empty framebuffer with no attachments::
fbo = createFBO() # invalid until attachments are added
Create a render target with multiple color texture attachments::
colorTex = createTexImage2D(1024,1024) # empty texture
depthRb = createRenderbuffer(800,600,internalFormat=GL.GL_DEPTH24_STENCIL8)
# attach images
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, fbo.id)
attach(GL.GL_COLOR_ATTACHMENT0, colorTex)
attach(GL.GL_DEPTH_ATTACHMENT, depthRb)
attach(GL.GL_STENCIL_ATTACHMENT, depthRb)
# or attach(GL.GL_DEPTH_STENCIL_ATTACHMENT, depthRb)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
# above is the same as
with useFBO(fbo):
attach(GL.GL_COLOR_ATTACHMENT0, colorTex)
attach(GL.GL_DEPTH_ATTACHMENT, depthRb)
attach(GL.GL_STENCIL_ATTACHMENT, depthRb)
Examples of userData some custom function might access::
fbo.userData['flags'] = ['left_eye', 'clear_before_use']
Using a depth only texture (for shadow mapping?)::
depthTex = createTexImage2D(800, 600,
internalFormat=GL.GL_DEPTH_COMPONENT24,
pixelFormat=GL.GL_DEPTH_COMPONENT)
fbo = createFBO([(GL.GL_DEPTH_ATTACHMENT, depthTex)]) # is valid
# discard FBO descriptor, just give me the ID
frameBuffer = createFBO().id
"""
fboId = GL.GLuint()
GL.glGenFramebuffers(1, ctypes.byref(fboId))
# create a framebuffer descriptor
fboDesc = Framebuffer(fboId, GL.GL_FRAMEBUFFER, dict())
# initial attachments for this framebuffer
if attachments:
with useFBO(fboDesc):
for attachPoint, imageBuffer in attachments:
attach(attachPoint, imageBuffer)
return fboDesc
def attach(attachPoint, imageBuffer):
"""Attach an image to a specified attachment point on the presently bound
FBO.
Parameters
----------
attachPoint :obj:`int`
Attachment point for 'imageBuffer' (e.g. GL.GL_COLOR_ATTACHMENT0).
imageBuffer : :obj:`TexImage2D` or :obj:`Renderbuffer`
Framebuffer-attachable buffer descriptor.
Examples
--------
Attach an image to attachment points on the framebuffer::
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, fbo)
attach(GL.GL_COLOR_ATTACHMENT0, colorTex)
attach(GL.GL_DEPTH_STENCIL_ATTACHMENT, depthRb)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, lastBoundFbo)
# same as above, but using a context manager
with useFBO(fbo):
attach(GL.GL_COLOR_ATTACHMENT0, colorTex)
attach(GL.GL_DEPTH_STENCIL_ATTACHMENT, depthRb)
"""
# We should also support binding GL names specified as integers. Right now
# you need as descriptor which contains the target and name for the buffer.
#
if isinstance(imageBuffer, (TexImage2D, TexImage2DMultisample)):
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
attachPoint,
imageBuffer.target,
imageBuffer.id, 0)
elif isinstance(imageBuffer, Renderbuffer):
GL.glFramebufferRenderbuffer(
GL.GL_FRAMEBUFFER,
attachPoint,
imageBuffer.target,
imageBuffer.id)
def isComplete():
"""Check if the currently bound framebuffer is complete.
Returns
-------
bool
`True` if the presently bound FBO is complete.
"""
return GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == \
GL.GL_FRAMEBUFFER_COMPLETE
def deleteFBO(fbo):
"""Delete a framebuffer.
"""
GL.glDeleteFramebuffers(
1, fbo.id if isinstance(fbo, Framebuffer) else int(fbo))
def blitFBO(srcRect, dstRect=None, filter=GL.GL_LINEAR):
"""Copy a block of pixels between framebuffers via blitting. Read and draw
framebuffers must be bound prior to calling this function. Beware, the
scissor box and viewport are changed when this is called to dstRect.
Parameters
----------
srcRect : :obj:`list` of :obj:`int`
List specifying the top-left and bottom-right coordinates of the region
to copy from (<X0>, <Y0>, <X1>, <Y1>).
dstRect : :obj:`list` of :obj:`int` or :obj:`None`
List specifying the top-left and bottom-right coordinates of the region
to copy to (<X0>, <Y0>, <X1>, <Y1>). If None, srcRect is used for
dstRect.
filter : :obj:`int`
Interpolation method to use if the image is stretched, default is
GL_LINEAR, but can also be GL_NEAREST.
Returns
-------
None
Examples
--------
Blitting pixels from on FBO to another::
# bind framebuffer to read pixels from
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, srcFbo)
# bind framebuffer to draw pixels to
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, dstFbo)
gltools.blitFBO((0,0,800,600), (0,0,800,600))
# unbind both read and draw buffers
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
"""
# in most cases srcRect and dstRect will be the same.
if dstRect is None:
dstRect = srcRect
# GL.glViewport(*dstRect)
# GL.glEnable(GL.GL_SCISSOR_TEST)
# GL.glScissor(*dstRect)
GL.glBlitFramebuffer(srcRect[0], srcRect[1], srcRect[2], srcRect[3],
dstRect[0], dstRect[1], dstRect[2], dstRect[3],
GL.GL_COLOR_BUFFER_BIT, # colors only for now
filter)
# GL.glDisable(GL.GL_SCISSOR_TEST)
@contextmanager
def useFBO(fbo):
"""Context manager for Framebuffer Object bindings. This function yields
the framebuffer name as an integer.
Parameters
----------
fbo :obj:`int` or :obj:`Framebuffer`
OpenGL Framebuffer Object name/ID or descriptor.
Yields
-------
int
OpenGL name of the framebuffer bound in the context.
Examples
--------
Using a framebuffer context manager::
# FBO bound somewhere deep in our code
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, someOtherFBO)
...
# create a new FBO, but we have no idea what the currently bound FBO is
fbo = createFBO()
# use a context to bind attachments
with bindFBO(fbo):
attach(GL.GL_COLOR_ATTACHMENT0, colorTex)
attach(GL.GL_DEPTH_ATTACHMENT, depthRb)
attach(GL.GL_STENCIL_ATTACHMENT, depthRb)
isComplete = gltools.isComplete()
# someOtherFBO is still bound!
"""
prevFBO = GL.GLint()
GL.glGetIntegerv(GL.GL_FRAMEBUFFER_BINDING, ctypes.byref(prevFBO))
toBind = fbo.id if isinstance(fbo, Framebuffer) else int(fbo)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, toBind)
try:
yield toBind
finally:
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, prevFBO.value)
# ------------------------------
# Renderbuffer Objects Functions
# ------------------------------
#
# The functions below handle the creation and management of Renderbuffers
# Objects.
#
# Renderbuffer descriptor type
Renderbuffer = namedtuple(
'Renderbuffer',
['id',
'target',
'width',
'height',
'internalFormat',
'samples',
'multiSample', # boolean, check if a texture is multisample
'userData'] # dictionary for user defined data
)
def createRenderbuffer(width, height, internalFormat=GL.GL_RGBA8, samples=1):
"""Create a new Renderbuffer Object with a specified internal format. A
multisample storage buffer is created if samples > 1.
Renderbuffers contain image data and are optimized for use as render
targets. See https://www.khronos.org/opengl/wiki/Renderbuffer_Object for
more information.
Parameters
----------
width : :obj:`int`
Buffer width in pixels.
height : :obj:`int`
Buffer height in pixels.
internalFormat : :obj:`int`
Format for renderbuffer data (e.g. GL_RGBA8, GL_DEPTH24_STENCIL8).
samples : :obj:`int`
Number of samples for multi-sampling, should be >1 and power-of-two.
Work with one sample, but will raise a warning.
Returns
-------
Renderbuffer
A descriptor of the created renderbuffer.
Notes
-----
The 'userData' field of the returned descriptor is a dictionary that can
be used to store arbitrary data associated with the buffer.
"""
width = int(width)
height = int(height)
# create a new renderbuffer ID
rbId = GL.GLuint()
GL.glGenRenderbuffers(1, ctypes.byref(rbId))
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, rbId)
if samples > 1:
# determine if the 'samples' value is valid
maxSamples = getIntegerv(GL.GL_MAX_SAMPLES)
if (samples & (samples - 1)) != 0:
raise ValueError('Invalid number of samples, must be power-of-two.')
elif samples > maxSamples:
raise ValueError('Invalid number of samples, must be <{}.'.format(
maxSamples))
# create a multisample render buffer storage
GL.glRenderbufferStorageMultisample(
GL.GL_RENDERBUFFER,
samples,
internalFormat,
width,
height)
else:
GL.glRenderbufferStorage(
GL.GL_RENDERBUFFER,
internalFormat,
width,
height)
# done, unbind it
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, 0)
return Renderbuffer(rbId,
GL.GL_RENDERBUFFER,
width,
height,
internalFormat,
samples,
samples > 1,
dict())
def deleteRenderbuffer(renderBuffer):
"""Free the resources associated with a renderbuffer. This invalidates the
renderbuffer's ID.
"""
GL.glDeleteRenderbuffers(1, renderBuffer.id)
# -----------------
# Texture Functions
# -----------------
# 2D texture descriptor. You can 'wrap' existing texture IDs with TexImage2D to
# use them with functions that require that type as input.
#
class TexImage2D:
"""Descriptor for a 2D texture.
This class is used for bookkeeping 2D textures stored in video memory.
Information about the texture (eg. `width` and `height`) is available via
class attributes. Attributes should never be modified directly.
"""
__slots__ = ['width',
'height',
'target',
'_name',
'level',
'internalFormat',
'pixelFormat',
'dataType',
'unpackAlignment',
'_texParams',
'_isBound',
'_unit',
'_texParamsNeedUpdate']
def __init__(self,
name=0,
target=GL.GL_TEXTURE_2D,
width=64,
height=64,
level=0,
internalFormat=GL.GL_RGBA,
pixelFormat=GL.GL_RGBA,
dataType=GL.GL_FLOAT,
unpackAlignment=4,
texParams=None):
"""
Parameters
----------
name : `int` or `GLuint`
OpenGL handle for texture. Is `0` if uninitialized.
target : :obj:`int`
The target texture should only be either GL_TEXTURE_2D or
GL_TEXTURE_RECTANGLE.
width : :obj:`int`
Texture width in pixels.
height : :obj:`int`
Texture height in pixels.
level : :obj:`int`
LOD number of the texture, should be 0 if GL_TEXTURE_RECTANGLE is
the target.
internalFormat : :obj:`int`
Internal format for texture data (e.g. GL_RGBA8, GL_R11F_G11F_B10F).
pixelFormat : :obj:`int`
Pixel data format (e.g. GL_RGBA, GL_DEPTH_STENCIL)
dataType : :obj:`int`
Data type for pixel data (e.g. GL_FLOAT, GL_UNSIGNED_BYTE).
unpackAlignment : :obj:`int`
Alignment requirements of each row in memory. Default is 4.
texParams : :obj:`list` of :obj:`tuple` of :obj:`int`
Optional texture parameters specified as `dict`. These values are
passed to `glTexParameteri`. Each tuple must contain a parameter
name and value. For example, `texParameters={
GL.GL_TEXTURE_MIN_FILTER: GL.GL_LINEAR, GL.GL_TEXTURE_MAG_FILTER:
GL.GL_LINEAR}`. These can be changed and will be updated the next
time this instance is passed to :func:`bindTexture`.
"""
# fields for texture information
self.name = name
self.width = width
self.height = height
self.target = target
self.level = level
self.internalFormat = internalFormat
self.pixelFormat = pixelFormat
self.dataType = dataType
self.unpackAlignment = unpackAlignment
self._texParams = {}
# set texture parameters
if texParams is not None:
for key, val in texParams.items():
self._texParams[key] = val
# internal data
self._isBound = False # True if the texture has been bound
self._unit = None # texture unit assigned to this texture
self._texParamsNeedUpdate = True # update texture parameters
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not isinstance(value, GL.GLuint):
self._name = GL.GLuint(int(value))
else:
self._name = value
@property
def size(self):
"""Size of the texture [w, h] in pixels (`int`, `int`)."""
return self.width, self.height
@property
def texParams(self):
"""Texture parameters."""
self._texParamsNeedUpdate = True
return self._texParams
@texParams.setter
def texParams(self, value):
"""Texture parameters."""
self._texParamsNeedUpdate = True
self._texParams = value
def createTexImage2D(width, height, target=GL.GL_TEXTURE_2D, level=0,
internalFormat=GL.GL_RGBA8, pixelFormat=GL.GL_RGBA,
dataType=GL.GL_FLOAT, data=None, unpackAlignment=4,
texParams=None):
"""Create a 2D texture in video memory. This can only create a single 2D
texture with targets `GL_TEXTURE_2D` or `GL_TEXTURE_RECTANGLE`.
Parameters
----------
width : :obj:`int`
Texture width in pixels.
height : :obj:`int`
Texture height in pixels.
target : :obj:`int`
The target texture should only be either GL_TEXTURE_2D or
GL_TEXTURE_RECTANGLE.
level : :obj:`int`
LOD number of the texture, should be 0 if GL_TEXTURE_RECTANGLE is the
target.
internalFormat : :obj:`int`
Internal format for texture data (e.g. GL_RGBA8, GL_R11F_G11F_B10F).
pixelFormat : :obj:`int`
Pixel data format (e.g. GL_RGBA, GL_DEPTH_STENCIL)
dataType : :obj:`int`
Data type for pixel data (e.g. GL_FLOAT, GL_UNSIGNED_BYTE).
data : :obj:`ctypes` or :obj:`None`
Ctypes pointer to image data. If None is specified, the texture will be
created but pixel data will be uninitialized.
unpackAlignment : :obj:`int`
Alignment requirements of each row in memory. Default is 4.
texParams : :obj:`dict`
Optional texture parameters specified as `dict`. These values are passed
to `glTexParameteri`. Each tuple must contain a parameter name and
value. For example, `texParameters={GL.GL_TEXTURE_MIN_FILTER:
GL.GL_LINEAR, GL.GL_TEXTURE_MAG_FILTER: GL.GL_LINEAR}`.
Returns
-------
TexImage2D
A `TexImage2D` descriptor.
Notes
-----
The 'userData' field of the returned descriptor is a dictionary that can
be used to store arbitrary data associated with the texture.
Previous textures are unbound after calling 'createTexImage2D'.
Examples
--------
Creating a texture from an image file::
import pyglet.gl as GL # using Pyglet for now
# empty texture
textureDesc = createTexImage2D(1024, 1024, internalFormat=GL.GL_RGBA8)
# load texture data from an image file using Pillow and NumPy
from PIL import Image
import numpy as np
im = Image.open(imageFile) # 8bpp!
im = im.transpose(Image.FLIP_TOP_BOTTOM) # OpenGL origin is at bottom
im = im.convert("RGBA")
pixelData = np.array(im).ctypes # convert to ctypes!
width = pixelData.shape[1]
height = pixelData.shape[0]
textureDesc = gltools.createTexImage2D(
width,
height,
internalFormat=GL.GL_RGBA,
pixelFormat=GL.GL_RGBA,
dataType=GL.GL_UNSIGNED_BYTE,
data=pixelData,
unpackAlignment=1,
texParameters=[(GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR),
(GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)])
GL.glBindTexture(GL.GL_TEXTURE_2D, textureDesc.id)
"""
width = int(width)
height = int(height)
if width <= 0 or height <= 0:
raise ValueError("Invalid image dimensions {} x {}.".format(
width, height))
if target == GL.GL_TEXTURE_RECTANGLE:
if level != 0:
raise ValueError("Invalid level for target GL_TEXTURE_RECTANGLE, "
"must be 0.")
GL.glEnable(GL.GL_TEXTURE_RECTANGLE)
texId = GL.GLuint()
GL.glGenTextures(1, ctypes.byref(texId))
GL.glBindTexture(target, texId)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, int(unpackAlignment))
GL.glTexImage2D(target, level, internalFormat,
width, height, 0,
pixelFormat, dataType, data)
# apply texture parameters
if texParams is not None:
for pname, param in texParams.items():
GL.glTexParameteri(target, pname, param)
# new texture descriptor
tex = TexImage2D(name=texId,
target=target,
width=width,
height=height,
internalFormat=internalFormat,
level=level,
pixelFormat=pixelFormat,
dataType=dataType,
unpackAlignment=unpackAlignment,
texParams=texParams)
tex._texParamsNeedUpdate = False
GL.glBindTexture(target, 0)
return tex
def createTexImage2dFromFile(imgFile, transpose=True):
"""Load an image from file directly into a texture.
This is a convenience function to quickly get an image file loaded into a
2D texture. The image is converted to RGBA format. Texture parameters are
set for linear interpolation.
Parameters
----------
imgFile : str
Path to the image file.
transpose : bool
Flip the image so it appears upright when displayed in OpenGL image
coordinates.
Returns
-------
TexImage2D
Texture descriptor.
"""
# Attempt to find file with substitution (handles e.g. default.png)
tryImg = findImageFile(imgFile, checkResources=True)
if tryImg is not None:
imgFile = tryImg
im = Image.open(imgFile) # 8bpp!
if transpose:
im = im.transpose(Image.FLIP_TOP_BOTTOM) # OpenGL origin is at bottom
im = im.convert("RGBA")
pixelData = np.array(im).ctypes # convert to ctypes!
width = pixelData.shape[1]
height = pixelData.shape[0]
textureDesc = createTexImage2D(
width,
height,
internalFormat=GL.GL_RGBA,
pixelFormat=GL.GL_RGBA,
dataType=GL.GL_UNSIGNED_BYTE,
data=pixelData,
unpackAlignment=1,
texParams={GL.GL_TEXTURE_MAG_FILTER: GL.GL_LINEAR,
GL.GL_TEXTURE_MIN_FILTER: GL.GL_LINEAR})
return textureDesc
class TexCubeMap:
"""Descriptor for a cube map texture..
This class is used for bookkeeping cube maps stored in video memory.
Information about the texture (eg. `width` and `height`) is available via
class attributes. Attributes should never be modified directly.
"""
__slots__ = ['width',
'height',
'target',
'_name',
'level',
'internalFormat',
'pixelFormat',
'dataType',
'unpackAlignment',
'_texParams',
'_isBound',
'_unit',
'_texParamsNeedUpdate']
def __init__(self,
name=0,
target=GL.GL_TEXTURE_CUBE_MAP,
width=64,
height=64,
level=0,
internalFormat=GL.GL_RGBA,
pixelFormat=GL.GL_RGBA,
dataType=GL.GL_FLOAT,
unpackAlignment=4,
texParams=None):
"""
Parameters
----------
name : `int` or `GLuint`
OpenGL handle for texture. Is `0` if uninitialized.
target : :obj:`int`
The target texture should only be `GL_TEXTURE_CUBE_MAP`.
width : :obj:`int`
Texture width in pixels.
height : :obj:`int`
Texture height in pixels.
level : :obj:`int`
LOD number of the texture.
internalFormat : :obj:`int`
Internal format for texture data (e.g. GL_RGBA8, GL_R11F_G11F_B10F).
pixelFormat : :obj:`int`
Pixel data format (e.g. GL_RGBA, GL_DEPTH_STENCIL)
dataType : :obj:`int`
Data type for pixel data (e.g. GL_FLOAT, GL_UNSIGNED_BYTE).
unpackAlignment : :obj:`int`
Alignment requirements of each row in memory. Default is 4.
texParams : :obj:`list` of :obj:`tuple` of :obj:`int`
Optional texture parameters specified as `dict`. These values are
passed to `glTexParameteri`. Each tuple must contain a parameter
name and value. For example, `texParameters={
GL.GL_TEXTURE_MIN_FILTER: GL.GL_LINEAR, GL.GL_TEXTURE_MAG_FILTER:
GL.GL_LINEAR}`. These can be changed and will be updated the next
time this instance is passed to :func:`bindTexture`.
"""
# fields for texture information
self.name = name
self.width = width
self.height = height
self.target = target
self.level = level
self.internalFormat = internalFormat
self.pixelFormat = pixelFormat
self.dataType = dataType
self.unpackAlignment = unpackAlignment
self._texParams = {}
# set texture parameters
if texParams is not None:
for key, val in texParams.items():
self._texParams[key] = val
# internal data
self._isBound = False # True if the texture has been bound
self._unit = None # texture unit assigned to this texture
self._texParamsNeedUpdate = True # update texture parameters
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not isinstance(value, GL.GLuint):
self._name = GL.GLuint(int(value))
else:
self._name = value
@property
def size(self):
"""Size of a single cubemap face [w, h] in pixels (`int`, `int`)."""
return self.width, self.height
@property
def texParams(self):
"""Texture parameters."""
self._texParamsNeedUpdate = True
return self._texParams
@texParams.setter
def texParams(self, value):
"""Texture parameters."""
self._texParamsNeedUpdate = True
self._texParams = value
def createCubeMap(width, height, target=GL.GL_TEXTURE_CUBE_MAP, level=0,
internalFormat=GL.GL_RGBA, pixelFormat=GL.GL_RGBA,
dataType=GL.GL_UNSIGNED_BYTE, data=None, unpackAlignment=4,
texParams=None):
"""Create a cubemap.
Parameters
----------
name : `int` or `GLuint`
OpenGL handle for the cube map. Is `0` if uninitialized.
target : :obj:`int`
The target texture should only be `GL_TEXTURE_CUBE_MAP`.
width : :obj:`int`
Texture width in pixels.
height : :obj:`int`
Texture height in pixels.
level : :obj:`int`
LOD number of the texture.
internalFormat : :obj:`int`
Internal format for texture data (e.g. GL_RGBA8, GL_R11F_G11F_B10F).
pixelFormat : :obj:`int`
Pixel data format (e.g. GL_RGBA, GL_DEPTH_STENCIL)
dataType : :obj:`int`
Data type for pixel data (e.g. GL_FLOAT, GL_UNSIGNED_BYTE).
data : list or tuple
List of six ctypes pointers to image data for each cubemap face. Image
data is assigned to a face by index [+X, -X, +Y, -Y, +Z, -Z]. All images
must have the same size as specified by `width` and `height`.
unpackAlignment : :obj:`int`
Alignment requirements of each row in memory. Default is 4.
texParams : :obj:`list` of :obj:`tuple` of :obj:`int`
Optional texture parameters specified as `dict`. These values are
passed to `glTexParameteri`. Each tuple must contain a parameter
name and value. For example, `texParameters={
GL.GL_TEXTURE_MIN_FILTER: GL.GL_LINEAR, GL.GL_TEXTURE_MAG_FILTER:
GL.GL_LINEAR}`. These can be changed and will be updated the next
time this instance is passed to :func:`bindTexture`.
"""
texId = GL.GLuint()
GL.glGenTextures(1, ctypes.byref(texId))
GL.glBindTexture(target, texId)
# create faces of the cube map
for face in range(6):
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, int(unpackAlignment))
GL.glTexImage2D(GL.GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, level,
internalFormat, width, height, 0, pixelFormat, dataType,
data[face] if data is not None else data)
# apply texture parameters
if texParams is not None:
for pname, param in texParams.items():
GL.glTexParameteri(target, pname, param)
GL.glBindTexture(target, 0)
tex = TexCubeMap(name=texId,
target=target,
width=width,
height=height,
internalFormat=internalFormat,
level=level,
pixelFormat=pixelFormat,
dataType=dataType,
unpackAlignment=unpackAlignment,
texParams=texParams)
return tex
def bindTexture(texture, unit=None, enable=True):
"""Bind a texture.
Function binds `texture` to `unit` (if specified). If `unit` is `None`, the
texture will be bound but not assigned to a texture unit.
Parameters
----------
texture : TexImage2D
Texture descriptor to bind.
unit : int, optional
Texture unit to associated the texture with.
enable : bool
Enable textures upon binding.
"""
if not texture._isBound:
if enable:
GL.glEnable(texture.target)
GL.glBindTexture(texture.target, texture.name)
texture._isBound = True
if unit is not None:
texture._unit = unit
GL.glActiveTexture(GL.GL_TEXTURE0 + unit)
# update texture parameters if they have been accessed (changed?)
if texture._texParamsNeedUpdate:
for pname, param in texture._texParams.items():
GL.glTexParameteri(texture.target, pname, param)
texture._texParamsNeedUpdate = False
def unbindTexture(texture=None):
"""Unbind a texture.
Parameters
----------
texture : TexImage2D
Texture descriptor to unbind.
"""
if texture._isBound:
# set the texture unit
if texture._unit is not None:
GL.glActiveTexture(GL.GL_TEXTURE0 + texture._unit)
texture._unit = None
GL.glBindTexture(texture.target, 0)
texture._isBound = False
GL.glDisable(texture.target)
else:
raise RuntimeError('Trying to unbind a texture that was not previously'
'bound.')
# Descriptor for 2D mutlisampled texture
TexImage2DMultisample = namedtuple(
'TexImage2D',
['id',
'target',
'width',
'height',
'internalFormat',
'samples',
'multisample',
'userData'])
def createTexImage2DMultisample(width, height,
target=GL.GL_TEXTURE_2D_MULTISAMPLE, samples=1,
internalFormat=GL.GL_RGBA8, texParameters=()):
"""Create a 2D multisampled texture.
Parameters
----------
width : :obj:`int`
Texture width in pixels.
height : :obj:`int`
Texture height in pixels.
target : :obj:`int`
The target texture (e.g. GL_TEXTURE_2D_MULTISAMPLE).
samples : :obj:`int`
Number of samples for multi-sampling, should be >1 and power-of-two.
Work with one sample, but will raise a warning.
internalFormat : :obj:`int`
Internal format for texture data (e.g. GL_RGBA8, GL_R11F_G11F_B10F).
texParameters : :obj:`list` of :obj:`tuple` of :obj:`int`
Optional texture parameters specified as a list of tuples. These values
are passed to 'glTexParameteri'. Each tuple must contain a parameter
name and value. For example, texParameters=[(GL.GL_TEXTURE_MIN_FILTER,
GL.GL_LINEAR), (GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)]
Returns
-------
TexImage2DMultisample
A TexImage2DMultisample descriptor.
"""
width = int(width)
height = int(height)
if width <= 0 or height <= 0:
raise ValueError("Invalid image dimensions {} x {}.".format(
width, height))
# determine if the 'samples' value is valid
maxSamples = getIntegerv(GL.GL_MAX_SAMPLES)
if (samples & (samples - 1)) != 0:
raise ValueError('Invalid number of samples, must be power-of-two.')
elif samples <= 0 or samples > maxSamples:
raise ValueError('Invalid number of samples, must be <{}.'.format(
maxSamples))
colorTexId = GL.GLuint()
GL.glGenTextures(1, ctypes.byref(colorTexId))
GL.glBindTexture(target, colorTexId)
GL.glTexImage2DMultisample(
target, samples, internalFormat, width, height, GL.GL_TRUE)
# apply texture parameters
if texParameters:
for pname, param in texParameters:
GL.glTexParameteri(target, pname, param)
GL.glBindTexture(target, 0)
return TexImage2DMultisample(colorTexId,
target,
width,
height,
internalFormat,
samples,
True,
dict())
def deleteTexture(texture):
"""Free the resources associated with a texture. This invalidates the
texture's ID.
"""
if not texture._isBound:
GL.glDeleteTextures(1, texture.name)
texture.name = 0 # invalidate
else:
raise RuntimeError("Attempting to delete texture which is presently "
"bound.")
# --------------------------
# Vertex Array Objects (VAO)
#
class VertexArrayInfo:
"""Vertex array object (VAO) descriptor.
This class only stores information about the VAO it refers to, it does not
contain any actual array data associated with the VAO. Calling
:func:`createVAO` returns instances of this class.
If `isLegacy` is `True`, attribute binding states are using deprecated (but
still supported) pointer definition calls (eg. `glVertexPointer`). This is
to ensure backwards compatibility. The keys stored in `activeAttribs` must
be `GLenum` types such as `GL_VERTEX_ARRAY`.
Parameters
----------
name : int
OpenGL handle for the VAO.
count : int
Number of vertex elements. If `indexBuffer` is not `None`, count
corresponds to the number of elements in the index buffer.
activeAttribs : dict
Attributes and buffers defined as part of this VAO state. Keys are
attribute pointer indices or capabilities (ie. GL_VERTEX_ARRAY).
Modifying these values will not update the VAO state.
indexBuffer : VertexBufferInfo, optional
Buffer object for indices.
attribDivisors : dict, optional
Divisors for each attribute.
isLegacy : bool
Array pointers were defined using the deprecated OpenGL API. If `True`,
the VAO may work with older GLSL shaders versions and the fixed-function
pipeline.
userData : dict or None, optional
Optional user defined data associated with this VAO.
"""
__slots__ = ['name', 'count', 'activeAttribs', 'indexBuffer', 'isLegacy',
'userData', 'attribDivisors']
def __init__(self,
name=0,
count=0,
activeAttribs=None,
indexBuffer=None,
attribDivisors=None,
isLegacy=False,
userData=None):
self.name = name
self.activeAttribs = activeAttribs
self.count = count
self.indexBuffer = indexBuffer
self.attribDivisors = attribDivisors
self.isLegacy = isLegacy
if userData is None:
self.userData = {}
elif isinstance(userData, dict):
self.userData = userData
else:
raise TypeError('Invalid type for `userData`.')
def __hash__(self):
return hash((self.name, self.isLegacy))
def __eq__(self, other):
"""Equality test between VAO object names."""
return self.name == other.name
def __ne__(self, other):
"""Inequality test between VAO object names."""
return self.name != other.name
def createVAO(attribBuffers, indexBuffer=None, attribDivisors=None, legacy=False):
"""Create a Vertex Array object (VAO). VAOs store buffer binding states,
reducing CPU overhead when drawing objects with vertex data stored in VBOs.
Define vertex attributes within a VAO state by passing a mapping for
generic attribute indices and VBO buffers.
Parameters
----------
attribBuffers : dict
Attributes and associated VBOs to add to the VAO state. Keys are
vertex attribute pointer indices, values are VBO descriptors to define.
Values can be `tuples` where the first value is the buffer descriptor,
the second is the number of attribute components (`int`, either 2, 3 or
4), the third is the offset (`int`), and the last is whether to
normalize the array (`bool`).
indexBuffer : VertexBufferInfo
Optional index buffer.
attribDivisors : dict
Attribute divisors to set. Keys are vertex attribute pointer indices,
values are the number of instances that will pass between updates of an
attribute. Setting attribute divisors is only permitted if `legacy` is
`False`.
legacy : bool, optional
Use legacy attribute pointer functions when setting the VAO state. This
is for compatibility with older GL implementations. Key specified to
`attribBuffers` must be `GLenum` types such as `GL_VERTEX_ARRAY` to
indicate the capability to use.
Examples
--------
Create a vertex array object and enable buffer states within it::
vao = createVAO({0: vertexPos, 1: texCoords, 2: vertexNormals})
Using an interleaved vertex buffer, all attributes are in the same buffer
(`vertexAttr`). We need to specify offsets for each attribute by passing a
buffer in a `tuple` with the second value specifying the offset::
# buffer with interleaved layout `00011222` per-attribute
vao = createVAO(
{0: (vertexAttr, 3), # size 3, offset 0
1: (vertexAttr, 2, 3), # size 2, offset 3
2: (vertexAttr, 3, 5, True)}) # size 3, offset 5, normalize
You can mix interleaved and single-use buffers::
vao = createVAO(
{0: (vertexAttr, 3, 0), 1: (vertexAttr, 3, 3), 2: vertexColors})
Specifying an optional index array, this is used for indexed drawing of
primitives::
vao = createVAO({0: vertexPos}, indexBuffer=indices)
The returned `VertexArrayInfo` instance will have attribute
``isIndexed==True``.
Drawing vertex arrays using a VAO, will use the `indexBuffer` if available::
# draw the array
drawVAO(vao, mode=GL.GL_TRIANGLES)
Use legacy attribute pointer bindings when building a VAO for compatibility
with the fixed-function pipeline and older GLSL versions::
attribBuffers = {GL_VERTEX_ARRAY: vertexPos, GL_NORMAL_ARRAY: normals}
vao = createVAO(attribBuffers, legacy=True)
If you wish to used instanced drawing, you can specify attribute divisors
this way::
vao = createVAO(
{0: (vertexAttr, 3, 0), 1: (vertexAttr, 3, 3), 2: vertexColors},
attribDivisors={2: 1})
"""
if not attribBuffers: # in case an empty list is passed
raise ValueError("No buffers specified.")
# create a vertex buffer ID
vaoId = GL.GLuint()
if _thisPlatform != 'Darwin':
GL.glGenVertexArrays(1, ctypes.byref(vaoId))
GL.glBindVertexArray(vaoId)
else:
GL.glGenVertexArraysAPPLE(1, ctypes.byref(vaoId))
GL.glBindVertexArrayAPPLE(vaoId)
# add attribute pointers
activeAttribs = {}
bufferIndices = []
for i, buffer in attribBuffers.items():
if isinstance(buffer, (list, tuple,)):
if len(buffer) == 1:
buffer = buffer[0] # size 1 tuple or list eg. (buffer,)
size = buffer.shape[1]
offset = 0
normalize = False
elif len(buffer) == 2:
buffer, size = buffer
offset = 0
normalize = False
elif len(buffer) == 3:
buffer, size, offset = buffer
normalize = False
elif len(buffer) == 4:
buffer, size, offset, normalize = buffer
else:
raise ValueError('Invalid attribute values.')
else:
size = buffer.shape[1]
offset = 0
normalize = False
enableVertexAttribArray(i, legacy)
setVertexAttribPointer(i, buffer, size, offset, normalize, legacy)
activeAttribs[i] = buffer
bufferIndices.append(buffer.shape[0])
# bind the EBO if available
if indexBuffer is not None:
if indexBuffer.target == GL.GL_ELEMENT_ARRAY_BUFFER:
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, indexBuffer.name)
if len(indexBuffer.shape) > 1:
count = indexBuffer.shape[0] * indexBuffer.shape[1]
else:
count = indexBuffer.shape[0]
else:
raise ValueError(
'Index buffer does not have target `GL_ELEMENT_ARRAY_BUFFER`.')
else:
if bufferIndices.count(bufferIndices[0]) != len(bufferIndices):
warnings.warn(
'Input arrays have unequal number of rows, using shortest for '
'`count`.')
count = min(bufferIndices)
else:
count = bufferIndices[0]
# set attribute divisors
if attribDivisors is not None:
if legacy is True:
raise ValueError(
'Cannot set attribute divisors when `legacy` is `True.')
for key, val in attribDivisors.items():
GL.glVertexAttribDivisor(key, val)
if _thisPlatform != 'Darwin':
GL.glBindVertexArray(0)
else:
GL.glBindVertexArrayAPPLE(0)
return VertexArrayInfo(vaoId.value,
count,
activeAttribs,
indexBuffer,
attribDivisors,
legacy)
def drawVAO(vao, mode=GL.GL_TRIANGLES, start=0, count=None, instanceCount=None,
flush=False):
"""Draw a vertex array object. Uses `glDrawArrays` or `glDrawElements` if
`instanceCount` is `None`, or else `glDrawArraysInstanced` or
`glDrawElementsInstanced` is used.
Parameters
----------
vao : VertexArrayObject
Vertex Array Object (VAO) to draw.
mode : int, optional
Drawing mode to use (e.g. GL_TRIANGLES, GL_QUADS, GL_POINTS, etc.)
start : int, optional
Starting index for array elements. Default is `0` which is the beginning
of the array.
count : int, optional
Number of indices to draw from `start`. Must not exceed `vao.count` -
`start`.
instanceCount : int or None
Number of instances to draw. If >0 and not `None`, instanced drawing
will be used.
flush : bool, optional
Flush queued drawing commands before returning.
Examples
--------
Creating a VAO and drawing it::
# draw the VAO, renders the mesh
drawVAO(vaoDesc, GL.GL_TRIANGLES)
"""
# draw the array
if _thisPlatform != 'Darwin':
GL.glBindVertexArray(vao.name)
else:
GL.glBindVertexArrayAPPLE(vao.name)
if count is None:
count = vao.count
else:
if count > vao.count - start:
raise ValueError(
"Value of `count` cannot exceed `{}`.".format(
vao.count - start))
if vao.indexBuffer is not None:
if instanceCount is None:
GL.glDrawElements(mode, count, vao.indexBuffer.dataType, start)
else:
GL.glDrawElementsInstanced(mode, count, vao.indexBuffer.dataType,
start, instanceCount)
else:
if instanceCount is None:
GL.glDrawArrays(mode, start, count)
else:
GL.glDrawArraysInstanced(mode, start, count, instanceCount)
if flush:
GL.glFlush()
# reset
if _thisPlatform != 'Darwin':
GL.glBindVertexArray(0)
else:
GL.glBindVertexArrayAPPLE(0)
def deleteVAO(vao):
"""Delete a Vertex Array Object (VAO). This does not delete array buffers
bound to the VAO.
Parameters
----------
vao : VertexArrayInfo
VAO to delete. All fields in the descriptor except `userData` will be
reset.
"""
if isinstance(vao, VertexArrayInfo):
if vao.name:
GL.glDeleteVertexArrays(1, GL.GLuint(vao.name))
vao.name = 0
vao.isLegacy = False
vao.indexBuffer = None
vao.activeAttribs = {}
vao.count = 0
# ---------------------------
# Vertex Buffer Objects (VBO)
#
class VertexBufferInfo:
"""Vertex buffer object (VBO) descriptor.
This class only stores information about the VBO it refers to, it does not
contain any actual array data associated with the VBO. Calling
:func:`createVBO` returns instances of this class.
It is recommended to use `gltools` functions :func:`bindVBO`,
:func:`unbindVBO`, :func:`mapBuffer`, etc. when working with these objects.
Parameters
----------
name : GLuint or int
OpenGL handle for the buffer.
target : GLenum or int, optional
Target used when binding the buffer (e.g. `GL_VERTEX_ARRAY` or
`GL_ELEMENT_ARRAY_BUFFER`). Default is `GL_VERTEX_ARRAY`)
usage : GLenum or int, optional
Usage type for the array (i.e. `GL_STATIC_DRAW`).
dataType : Glenum, optional
Data type of array. Default is `GL_FLOAT`.
size : int, optional
Size of the buffer in bytes.
stride : int, optional
Number of bytes between adjacent attributes. If `0`, values are assumed
to be tightly packed.
shape : tuple or list, optional
Shape of the array used to create this VBO.
userData : dict, optional
Optional user defined data associated with the VBO. If `None`,
`userData` will be initialized as an empty dictionary.
"""
__slots__ = ['name', 'target', 'usage', 'dataType',
'size', 'stride', 'shape', 'userData']
def __init__(self,
name=0,
target=GL.GL_ARRAY_BUFFER,
usage=GL.GL_STATIC_DRAW,
dataType=GL.GL_FLOAT,
size=0,
stride=0,
shape=(0,),
userData=None):
self.name = name
self.target = target
self.usage = usage
self.dataType = dataType
self.size = size
self.stride = stride
self.shape = shape
if userData is None:
self.userData = {}
elif isinstance(userData, dict):
self.userData = userData
else:
raise TypeError('Invalid type for `userData`.')
def __hash__(self):
return hash((self.name,
self.target,
self.dataType,
self.usage,
self.size,
self.shape))
def __eq__(self, other):
"""Equality test between VBO object names."""
return self.name == other.name
def __ne__(self, other):
"""Inequality test between VBO object names."""
return self.name != other.name
@property
def hasBuffer(self):
"""Check if the VBO assigned to `name` is a buffer."""
if self.name != 0 and GL.glIsBuffer(self.name):
return True
return False
@property
def isIndex(self):
"""`True` if the buffer referred to by this object is an index array."""
if self.name != 0 and GL.glIsBuffer(self.name):
return self.target == GL.GL_ELEMENT_ARRAY_BUFFER
return False
def validate(self):
"""Check if the data contained in this descriptor matches what is
actually present in the OpenGL state.
Returns
-------
bool
`True` if the information contained in this descriptor matches the
OpenGL state.
"""
# fail automatically if these conditions are true
if self.name == 0 or GL.glIsBuffer(self.name) != GL.GL_TRUE:
return False
if self.target == GL.GL_ARRAY_BUFFER:
bindTarget = GL.GL_VERTEX_ARRAY_BUFFER_BINDING
elif self.target == GL.GL_ELEMENT_ARRAY_BUFFER:
bindTarget = GL.GL_ELEMENT_ARRAY_BUFFER_BINDING
else:
raise ValueError(
'Invalid `target` type, must be `GL_ARRAY_BUFFER` or '
'`GL_ELEMENT_ARRAY_BUFFER`.')
# get current binding so we don't disturb the current state
currentVBO = GL.GLint()
GL.glGetIntegerv(bindTarget, ctypes.byref(currentVBO))
# bind buffer at name to validate
GL.glBindBuffer(self.target, self.name)
# get buffer parameters
actualSize = GL.GLint()
GL.glGetBufferParameteriv(
self.target, GL.GL_BUFFER_SIZE, ctypes.byref(actualSize))
actualUsage = GL.GLint()
GL.glGetBufferParameteriv(
self.target, GL.GL_BUFFER_USAGE, ctypes.byref(actualUsage))
# check values against those in this object
isValid = False
if self.usage == actualUsage.value and self.size == actualSize.value:
isValid = True
# return to the original binding
GL.glBindBuffer(self.target, currentVBO.value)
return isValid
def createVBO(data,
target=GL.GL_ARRAY_BUFFER,
dataType=GL.GL_FLOAT,
usage=GL.GL_STATIC_DRAW):
"""Create an array buffer object (VBO).
Creates a VBO using input data, usually as a `ndarray` or `list`. Attributes
common to one vertex should occupy a single row of the `data` array.
Parameters
----------
data : array_like
A 2D array of values to write to the array buffer. The data type of the
VBO is inferred by the type of the array. If the input is a Python
`list` or `tuple` type, the data type of the array will be `GL_FLOAT`.
target : :obj:`int`
Target used when binding the buffer (e.g. `GL_VERTEX_ARRAY` or
`GL_ELEMENT_ARRAY_BUFFER`). Default is `GL_VERTEX_ARRAY`.
dataType : Glenum, optional
Data type of array. Input data will be recast to an appropriate type if
necessary. Default is `GL_FLOAT`.
usage : GLenum or int, optional
Usage type for the array (i.e. `GL_STATIC_DRAW`).
Returns
-------
VertexBufferInfo
A descriptor with vertex buffer information.
Examples
--------
Creating a vertex buffer object with vertex data::
# vertices of a triangle
verts = [[ 1.0, 1.0, 0.0], # v0
[ 0.0, -1.0, 0.0], # v1
[-1.0, 1.0, 0.0]] # v2
# load vertices to graphics device, return a descriptor
vboDesc = createVBO(verts)
Drawing triangles or quads using vertex buffer data::
nIndices, vSize = vboDesc.shape # element size
bindVBO(vboDesc)
setVertexAttribPointer(
GL_VERTEX_ARRAY, vSize, vboDesc.dataType, legacy=True)
enableVertexAttribArray(GL_VERTEX_ARRAY, legacy=True)
if vSize == 3:
drawMode = GL_TRIANGLES
elif vSize == 4:
drawMode = GL_QUADS
glDrawArrays(drawMode, 0, nIndices)
glFlush()
disableVertexAttribArray(GL_VERTEX_ARRAY, legacy=True)
unbindVBO()
Custom data can be associated with this vertex buffer by specifying
`userData`::
myVBO = createVBO(data)
myVBO.userData['startIdx'] = 14 # first index to draw with
# use it later
nIndices, vSize = vboDesc.shape # element size
startIdx = myVBO.userData['startIdx']
endIdx = nIndices - startIdx
glDrawArrays(GL_TRIANGLES, startIdx, endIdx)
glFlush()
"""
# build input array
npType, glType = GL_COMPAT_TYPES[dataType]
data = np.asarray(data, dtype=npType)
# get buffer size and pointer
bufferSize = data.size * ctypes.sizeof(glType)
if data.ndim > 1:
bufferStride = data.shape[1] * ctypes.sizeof(glType)
else:
bufferStride = 0
bufferPtr = data.ctypes.data_as(ctypes.POINTER(glType))
# create a vertex buffer ID
bufferName = GL.GLuint()
GL.glGenBuffers(1, ctypes.byref(bufferName))
# bind and upload
GL.glBindBuffer(target, bufferName)
GL.glBufferData(target, bufferSize, bufferPtr, usage)
GL.glBindBuffer(target, 0)
vboInfo = VertexBufferInfo(
bufferName,
target,
usage,
dataType,
bufferSize,
bufferStride,
data.shape) # leave userData empty
return vboInfo
def bindVBO(vbo):
"""Bind a VBO to the current GL state.
Parameters
----------
vbo : VertexBufferInfo
VBO descriptor to bind.
Returns
-------
bool
`True` is the binding state was changed. Returns `False` if the state
was not changed due to the buffer already being bound.
"""
if isinstance(vbo, VertexBufferInfo):
GL.glBindBuffer(vbo.target, vbo.name)
else:
raise TypeError('Specified `vbo` is not at `VertexBufferInfo`.')
def unbindVBO(vbo):
"""Unbind a vertex buffer object (VBO).
Parameters
----------
vbo : VertexBufferInfo
VBO descriptor to unbind.
"""
if isinstance(vbo, VertexBufferInfo):
GL.glBindBuffer(vbo.target, 0)
else:
raise TypeError('Specified `vbo` is not at `VertexBufferInfo`.')
def mapBuffer(vbo, start=0, length=None, read=True, write=True, noSync=False):
"""Map a vertex buffer object to client memory. This allows you to modify
its contents.
If planning to update VBO vertex data, make sure the VBO `usage` types are
`GL_DYNAMIC_*` or `GL_STREAM_*` or else serious performance issues may
arise.
Warnings
--------
Modifying buffer data must be done carefully, or else system stability may
be affected. Do not use the returned view `ndarray` outside of successive
:func:`mapBuffer` and :func:`unmapBuffer` calls. Do not use the mapped
buffer for rendering until after :func:`unmapBuffer` is called.
Parameters
----------
vbo : VertexBufferInfo
Vertex buffer to map to client memory.
start : int
Initial index of the sub-range of the buffer to modify.
length : int or None
Number of elements of the sub-array to map from `offset`. If `None`, all
elements to from `offset` to the end of the array are mapped.
read : bool, optional
Allow data to be read from the buffer (sets `GL_MAP_READ_BIT`). This is
ignored if `noSync` is `True`.
write : bool, optional
Allow data to be written to the buffer (sets `GL_MAP_WRITE_BIT`).
noSync : bool, optional
If `True`, GL will not wait until the buffer is free (i.e. not being
processed by the GPU) to map it (sets `GL_MAP_UNSYNCHRONIZED_BIT`). The
contents of the previous storage buffer are discarded and the driver
returns a new one. This prevents the CPU from stalling until the buffer
is available.
Returns
-------
ndarray
View of the data. The type of the returned array is one which best
matches the data type of the buffer.
Examples
--------
Map a buffer and edit it::
arr = mapBuffer(vbo)
arr[:, :] += 2.0 # add 2 to all values
unmapBuffer(vbo) # call when done
# Don't ever modify `arr` after calling `unmapBuffer`. Delete it if
# necessary to prevent it form being used.
del arr
Modify a sub-range of data by specifying `start` and `length`, indices
correspond to values, not byte offsets::
arr = mapBuffer(vbo, start=12, end=24)
arr[:, :] *= 10.0
unmapBuffer(vbo)
"""
npType, glType = GL_COMPAT_TYPES[vbo.dataType]
start *= ctypes.sizeof(glType)
if length is None:
length = vbo.size
else:
length *= ctypes.sizeof(glType)
accessFlags = GL.GL_NONE
if noSync: # if set, don't set GL_MAP_READ_BIT
accessFlags |= GL.GL_MAP_UNSYNCHRONIZED_BIT
elif read:
accessFlags |= GL.GL_MAP_READ_BIT
if write:
accessFlags |= GL.GL_MAP_WRITE_BIT
bindVBO(vbo) # bind the buffer for mapping
# get pointer to the buffer
bufferPtr = GL.glMapBufferRange(
vbo.target,
GL.GLintptr(start),
GL.GLintptr(length),
accessFlags)
bufferArray = np.ctypeslib.as_array(
ctypes.cast(bufferPtr, ctypes.POINTER(glType)),
shape=vbo.shape)
return bufferArray
def unmapBuffer(vbo):
"""Unmap a previously mapped buffer. Must be called after :func:`mapBuffer`
is called and before any drawing operations which use the buffer are
called. Failing to call this before using the buffer could result in a
system error.
Parameters
----------
vbo : VertexBufferInfo
Vertex buffer descriptor.
Returns
-------
bool
`True` if the buffer has been successfully modified. If `False`, the
data was corrupted for some reason and needs to be resubmitted.
"""
return GL.glUnmapBuffer(vbo.target) == GL.GL_TRUE
def deleteVBO(vbo):
"""Delete a vertex buffer object (VBO).
Parameters
----------
vbo : VertexBufferInfo
Descriptor of VBO to delete.
"""
if GL.glIsBuffer(vbo.name):
GL.glDeleteBuffers(1, vbo.name)
vbo.name = GL.GLuint(0)
def setVertexAttribPointer(index,
vbo,
size=None,
offset=0,
normalize=False,
legacy=False):
"""Define an array of vertex attribute data with a VBO descriptor.
In modern OpenGL implementations, attributes are 'generic', where an
attribute pointer index does not correspond to any special vertex property.
Usually the usage for an attribute is defined in the shader program. It is
recommended that shader programs define attributes using the `layout`
parameters::
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
Setting attribute pointers can be done like this::
setVertexAttribPointer(0, posVbo)
setVertexAttribPointer(1, texVbo)
setVertexAttribPointer(2, normVbo)
For compatibility with older OpenGL specifications, some drivers will alias
vertex pointers unless they are explicitly defined in the shader. This
allows VAOs the be used with the fixed-function pipeline or older GLSL
versions.
On nVidia graphics drivers (and maybe others), the following attribute
pointers indices are aliased with reserved GLSL names:
* gl_Vertex - 0
* gl_Normal - 2
* gl_Color - 3
* gl_SecondaryColor - 4
* gl_FogCoord - 5
* gl_MultiTexCoord0 - 8
* gl_MultiTexCoord1 - 9
* gl_MultiTexCoord2 - 10
* gl_MultiTexCoord3 - 11
* gl_MultiTexCoord4 - 12
* gl_MultiTexCoord5 - 13
* gl_MultiTexCoord6 - 14
* gl_MultiTexCoord7 - 15
Specifying `legacy` as `True` will allow for old-style pointer definitions.
You must specify the capability as a `GLenum` associated with the pointer
in this case::
setVertexAttribPointer(GL_VERTEX_ARRAY, posVbo, legacy=True)
setVertexAttribPointer(GL_TEXTURE_COORD_ARRAY, texVbo, legacy=True)
setVertexAttribPointer(GL_NORMAL_ARRAY, normVbo, legacy=True)
Parameters
----------
index : int
Index of the attribute to modify. If `legacy=True`, this value should
be a `GLenum` type corresponding to the capability to bind the buffer
to, such as `GL_VERTEX_ARRAY`, `GL_TEXTURE_COORD_ARRAY`,
`GL_NORMAL_ARRAY`, etc.
vbo : VertexBufferInfo
VBO descriptor.
size : int, optional
Number of components per vertex attribute, can be either 1, 2, 3, or 4.
If `None` is specified, the component size will be inferred from the
`shape` of the VBO. You must specify this value if the VBO is
interleaved.
offset : int, optional
Starting index of the attribute in the buffer.
normalize : bool, optional
Normalize fixed-point format values when accessed.
legacy : bool, optional
Use legacy vertex attributes (ie. `GL_VERTEX_ARRAY`,
`GL_TEXTURE_COORD_ARRAY`, etc.) for backwards compatibility.
Examples
--------
Define a generic attribute from a vertex buffer descriptor::
# set the vertex location attribute
setVertexAttribPointer(0, vboDesc) # 0 is vertex in our shader
GL.glColor3f(1.0, 0.0, 0.0) # red triangle
# draw the triangle
nIndices, vSize = vboDesc.shape # element size
GL.glDrawArrays(GL.GL_TRIANGLES, 0, nIndices)
If our VBO has interleaved attributes, we can specify `offset` to account
for that::
# define interleaved vertex attributes
# | Position | Texture | Normals |
vQuad = [[ -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], # v0
[ -1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], # v1
[ 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0], # v2
[ 1.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]] # v3
# create a VBO with interleaved attributes
vboInterleaved = createVBO(np.asarray(vQuad, dtype=np.float32))
# ... before rendering, set the attribute pointers
GL.glBindBuffer(vboInterleaved.target, vboInterleaved.name)
gltools.setVertexAttribPointer(
0, vboInterleaved, size=3, offset=0) # vertex pointer
gltools.setVertexAttribPointer(
8, vboInterleaved, size=2, offset=3) # texture pointer
gltools.setVertexAttribPointer(
3, vboInterleaved, size=3, offset=5) # normals pointer
# Note, we specified `bind=False` since we are managing the binding
# state. It is recommended that you do this when setting up interleaved
# buffers to avoid re-binding the same buffer.
# draw red, full screen quad
GL.glColor3f(1.0, 0.0, 0.0)
GL.glDrawArrays(GL.GL_QUADS, 0, vboInterleaved.shape[1])
# call these when done if `enable=True`
gltools.disableVertexAttribArray(0)
gltools.disableVertexAttribArray(8)
gltools.disableVertexAttribArray(1)
# unbind the buffer
GL.glBindBuffer(vboInterleaved.target, 0)
"""
if vbo.target != GL.GL_ARRAY_BUFFER:
raise ValueError('VBO must have `target` type `GL_ARRAY_BUFFER`.')
_, glType = GL_COMPAT_TYPES[vbo.dataType]
if size is None:
size = vbo.shape[1]
offset *= ctypes.sizeof(glType)
bindVBO(vbo)
if not legacy:
GL.glEnableVertexAttribArray(index)
GL.glVertexAttribPointer(
index,
size,
vbo.dataType,
GL.GL_TRUE if normalize else GL.GL_FALSE,
vbo.stride,
offset)
else:
GL.glEnableClientState(index)
if index == GL.GL_VERTEX_ARRAY:
GL.glVertexPointer(size, vbo.dataType, vbo.stride, offset)
elif index == GL.GL_NORMAL_ARRAY:
GL.glNormalPointer(vbo.dataType, vbo.stride, offset)
elif index == GL.GL_TEXTURE_COORD_ARRAY:
GL.glTexCoordPointer(size, vbo.dataType, vbo.stride, offset)
elif index == GL.GL_COLOR_ARRAY:
GL.glColorPointer(size, vbo.dataType, vbo.stride, offset)
elif index == GL.GL_SECONDARY_COLOR_ARRAY:
GL.glSecondaryColorPointer(size, vbo.dataType, vbo.stride, offset)
elif index == GL.GL_FOG_COORD_ARRAY:
GL.glFogCoordPointer(vbo.dataType, vbo.stride, offset)
else:
raise ValueError('Invalid `index` enum specified.')
unbindVBO(vbo)
def enableVertexAttribArray(index, legacy=False):
"""Enable a vertex attribute array. Attributes will be used for use by
subsequent draw operations. Be sure to call :func:`disableVertexAttribArray`
on the same attribute to prevent currently enabled attributes from affecting
later rendering.
Parameters
----------
index : int
Index of the attribute to enable. If `legacy=True`, this value should
be a `GLenum` type corresponding to the capability to bind the buffer
to, such as `GL_VERTEX_ARRAY`, `GL_TEXTURE_COORD_ARRAY`,
`GL_NORMAL_ARRAY`, etc.
legacy : bool, optional
Use legacy vertex attributes (ie. `GL_VERTEX_ARRAY`,
`GL_TEXTURE_COORD_ARRAY`, etc.) for backwards compatibility.
"""
if not legacy:
GL.glEnableVertexAttribArray(index)
else:
GL.glEnableClientState(index)
def disableVertexAttribArray(index, legacy=False):
"""Disable a vertex attribute array.
Parameters
----------
index : int
Index of the attribute to enable. If `legacy=True`, this value should
be a `GLenum` type corresponding to the capability to bind the buffer
to, such as `GL_VERTEX_ARRAY`, `GL_TEXTURE_COORD_ARRAY`,
`GL_NORMAL_ARRAY`, etc.
legacy : bool, optional
Use legacy vertex attributes (ie. `GL_VERTEX_ARRAY`,
`GL_TEXTURE_COORD_ARRAY`, etc.) for backwards compatibility.
"""
if not legacy:
GL.glDisableVertexAttribArray(index)
else:
GL.glDisableClientState(index)
# -------------------------
# Material Helper Functions
# -------------------------
#
# Materials affect the appearance of rendered faces. These helper functions and
# datatypes simplify the creation of materials for rendering stimuli.
#
Material = namedtuple('Material', ['face', 'params', 'textures', 'userData'])
def createMaterial(params=(), textures=(), face=GL.GL_FRONT_AND_BACK):
"""Create a new material.
Parameters
----------
params : :obj:`list` of :obj:`tuple`, optional
List of material modes and values. Each mode is assigned a value as
(mode, color). Modes can be GL_AMBIENT, GL_DIFFUSE, GL_SPECULAR,
GL_EMISSION, GL_SHININESS or GL_AMBIENT_AND_DIFFUSE. Colors must be
a tuple of 4 floats which specify reflectance values for each RGBA
component. The value of GL_SHININESS should be a single float. If no
values are specified, an empty material will be created.
textures : :obj:`list` of :obj:`tuple`, optional
List of texture units and TexImage2D descriptors. These will be written
to the 'textures' field of the returned descriptor. For example,
[(GL.GL_TEXTURE0, texDesc0), (GL.GL_TEXTURE1, texDesc1)]. The number of
texture units per-material is GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS.
face : :obj:`int`, optional
Faces to apply material to. Values can be GL_FRONT_AND_BACK, GL_FRONT
and GL_BACK. The default is GL_FRONT_AND_BACK.
Returns
-------
Material :
A descriptor with material properties.
Examples
--------
Creating a new material with given properties::
# The values for the material below can be found at
# http://devernay.free.fr/cours/opengl/materials.html
# create a gold material
gold = createMaterial([
(GL.GL_AMBIENT, (0.24725, 0.19950, 0.07450, 1.0)),
(GL.GL_DIFFUSE, (0.75164, 0.60648, 0.22648, 1.0)),
(GL.GL_SPECULAR, (0.628281, 0.555802, 0.366065, 1.0)),
(GL.GL_SHININESS, 0.4 * 128.0)])
Use the material when drawing::
useMaterial(gold)
drawVAO( ... ) # all meshes will be gold
useMaterial(None) # turn off material when done
Create a red plastic material, but define reflectance and shine later::
red_plastic = createMaterial()
# you need to convert values to ctypes!
red_plastic.values[GL_AMBIENT] = (GLfloat * 4)(0.0, 0.0, 0.0, 1.0)
red_plastic.values[GL_DIFFUSE] = (GLfloat * 4)(0.5, 0.0, 0.0, 1.0)
red_plastic.values[GL_SPECULAR] = (GLfloat * 4)(0.7, 0.6, 0.6, 1.0)
red_plastic.values[GL_SHININESS] = 0.25 * 128.0
# set and draw
useMaterial(red_plastic)
drawVertexbuffers( ... ) # all meshes will be red plastic
useMaterial(None)
"""
# setup material mode/value slots
matDesc = Material(
face,
{mode: None for mode in (
GL.GL_AMBIENT,
GL.GL_DIFFUSE,
GL.GL_SPECULAR,
GL.GL_EMISSION,
GL.GL_SHININESS)},
dict(),
dict())
if params:
for mode, param in params:
matDesc.params[mode] = \
(GL.GLfloat * 4)(*param) \
if mode != GL.GL_SHININESS else GL.GLfloat(param)
if textures:
maxTexUnits = getIntegerv(GL.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS)
for unit, texDesc in textures:
if unit <= GL.GL_TEXTURE0 + (maxTexUnits - 1):
matDesc.textures[unit] = texDesc
else:
raise ValueError("Invalid texture unit enum.")
return matDesc
class SimpleMaterial:
"""Class representing a simple material.
This class stores material information to modify the appearance of drawn
primitives with respect to lighting, such as color (diffuse, specular,
ambient, and emission), shininess, and textures. Simple materials are
intended to work with features supported by the fixed-function OpenGL
pipeline.
"""
def __init__(self,
win=None,
diffuseColor=(.5, .5, .5),
specularColor=(-1., -1., -1.),
ambientColor=(-1., -1., -1.),
emissionColor=(-1., -1., -1.),
shininess=10.0,
colorSpace='rgb',
diffuseTexture=None,
specularTexture=None,
opacity=1.0,
contrast=1.0,
face='front'):
"""
Parameters
----------
win : `~psychopy.visual.Window` or `None`
Window this material is associated with, required for shaders and
some color space conversions.
diffuseColor : array_like
Diffuse material color (r, g, b, a) with values between 0.0 and 1.0.
specularColor : array_like
Specular material color (r, g, b, a) with values between 0.0 and
1.0.
ambientColor : array_like
Ambient material color (r, g, b, a) with values between 0.0 and 1.0.
emissionColor : array_like
Emission material color (r, g, b, a) with values between 0.0 and
1.0.
shininess : float
Material shininess, usually ranges from 0.0 to 128.0.
colorSpace : float
Color space for `diffuseColor`, `specularColor`, `ambientColor`, and
`emissionColor`.
diffuseTexture : TexImage2D
specularTexture : TexImage2D
opacity : float
Opacity of the material. Ranges from 0.0 to 1.0 where 1.0 is fully
opaque.
contrast : float
Contrast of the material colors.
face : str
Face to apply material to. Values are `front`, `back` or `both`.
"""
self.win = win
self._diffuseColor = np.zeros((3,), np.float32)
self._specularColor = np.zeros((3,), np.float32)
self._ambientColor = np.zeros((3,), np.float32)
self._emissionColor = np.zeros((3,), np.float32)
self._shininess = float(shininess)
# internal RGB values post colorspace conversion
self._diffuseRGB = np.array((0., 0., 0., 1.), np.float32)
self._specularRGB = np.array((0., 0., 0., 1.), np.float32)
self._ambientRGB = np.array((0., 0., 0., 1.), np.float32)
self._emissionRGB = np.array((0., 0., 0., 1.), np.float32)
# which faces to apply the material
if face == 'front':
self._face = GL.GL_FRONT
elif face == 'back':
self._face = GL.GL_BACK
elif face == 'both':
self._face = GL.GL_FRONT_AND_BACK
else:
raise ValueError("Invalid `face` specified, must be 'front', "
"'back' or 'both'.")
self.colorSpace = colorSpace
self.opacity = opacity
self.contrast = contrast
self.diffuseColor = diffuseColor
self.specularColor = specularColor
self.ambientColor = ambientColor
self.emissionColor = emissionColor
self._diffuseTexture = diffuseTexture
self._normalTexture = None
self._useTextures = False # keeps track if textures are being used
@property
def diffuseTexture(self):
"""Diffuse color of the material."""
return self._diffuseTexture
@diffuseTexture.setter
def diffuseTexture(self, value):
self._diffuseTexture = value
@property
def diffuseColor(self):
"""Diffuse color of the material."""
return self._diffuseColor
@diffuseColor.setter
def diffuseColor(self, value):
self._diffuseColor = np.asarray(value, np.float32)
setColor(self, value, colorSpace=self.colorSpace, operation=None,
rgbAttrib='diffuseRGB', colorAttrib='diffuseColor',
colorSpaceAttrib='colorSpace')
@property
def diffuseRGB(self):
"""Diffuse color of the material."""
return self._diffuseRGB[:3]
@diffuseRGB.setter
def diffuseRGB(self, value):
# make sure the color we got is 32-bit float
self._diffuseRGB = np.zeros((4,), np.float32)
self._diffuseRGB[:3] = (value * self.contrast + 1) / 2.0
self._diffuseRGB[3] = self.opacity
@property
def specularColor(self):
"""Specular color of the material."""
return self._specularColor
@specularColor.setter
def specularColor(self, value):
self._specularColor = np.asarray(value, np.float32)
setColor(self, value, colorSpace=self.colorSpace, operation=None,
rgbAttrib='specularRGB', colorAttrib='specularColor',
colorSpaceAttrib='colorSpace')
@property
def specularRGB(self):
"""Diffuse color of the material."""
return self._specularRGB[:3]
@specularRGB.setter
def specularRGB(self, value):
# make sure the color we got is 32-bit float
self._specularRGB = np.zeros((4,), np.float32)
self._specularRGB[:3] = (value * self.contrast + 1) / 2.0
self._specularRGB[3] = self.opacity
@property
def ambientColor(self):
"""Ambient color of the material."""
return self._ambientColor
@ambientColor.setter
def ambientColor(self, value):
self._ambientColor = np.asarray(value, np.float32)
setColor(self, value, colorSpace=self.colorSpace, operation=None,
rgbAttrib='ambientRGB', colorAttrib='ambientColor',
colorSpaceAttrib='colorSpace')
@property
def ambientRGB(self):
"""Diffuse color of the material."""
return self._ambientRGB[:3]
@ambientRGB.setter
def ambientRGB(self, value):
# make sure the color we got is 32-bit float
self._ambientRGB = np.zeros((4,), np.float32)
self._ambientRGB[:3] = (value * self.contrast + 1) / 2.0
self._ambientRGB[3] = self.opacity
@property
def emissionColor(self):
"""Emission color of the material."""
return self._emissionColor
@emissionColor.setter
def emissionColor(self, value):
self._emissionColor = np.asarray(value, np.float32)
setColor(self, value, colorSpace=self.colorSpace, operation=None,
rgbAttrib='emissionRGB', colorAttrib='emissionColor',
colorSpaceAttrib='colorSpace')
@property
def emissionRGB(self):
"""Diffuse color of the material."""
return self._emissionRGB[:3]
@emissionRGB.setter
def emissionRGB(self, value):
# make sure the color we got is 32-bit float
self._emissionRGB = np.zeros((4,), np.float32)
self._emissionRGB[:3] = (value * self.contrast + 1) / 2.0
self._emissionRGB[3] = self.opacity
@property
def shininess(self):
return self._shininess
@shininess.setter
def shininess(self, value):
self._shininess = float(value)
def useMaterial(material, useTextures=True):
"""Use a material for proceeding vertex draws.
Parameters
----------
material : :obj:`Material` or None
Material descriptor to use. Default material properties are set if None
is specified. This is equivalent to disabling materials.
useTextures : :obj:`bool`
Enable textures. Textures specified in a material descriptor's 'texture'
attribute will be bound and their respective texture units will be
enabled. Note, when disabling materials, the value of useTextures must
match the previous call. If there are no textures attached to the
material, useTexture will be silently ignored.
Returns
-------
None
Notes
-----
1. If a material mode has a value of None, a color with all components 0.0
will be assigned.
2. Material colors and shininess values are accessible from shader programs
after calling 'useMaterial'. Values can be accessed via built-in
'gl_FrontMaterial' and 'gl_BackMaterial' structures (e.g.
gl_FrontMaterial.diffuse).
Examples
--------
Use a material when drawing::
useMaterial(metalMaterials.gold)
drawVAO( ... ) # all meshes drawn will be gold
useMaterial(None) # turn off material when done
"""
if material is not None:
GL.glDisable(GL.GL_COLOR_MATERIAL) # disable color tracking
face = material._face
GL.glColorMaterial(face, GL.GL_AMBIENT_AND_DIFFUSE)
# convert data in light class to ctypes
diffuse = np.ctypeslib.as_ctypes(material._diffuseRGB)
specular = np.ctypeslib.as_ctypes(material._specularRGB)
ambient = np.ctypeslib.as_ctypes(material._ambientRGB)
emission = np.ctypeslib.as_ctypes(material._emissionRGB)
# pass values to OpenGL
GL.glMaterialfv(face, GL.GL_DIFFUSE, diffuse)
GL.glMaterialfv(face, GL.GL_SPECULAR, specular)
GL.glMaterialfv(face, GL.GL_AMBIENT, ambient)
GL.glMaterialfv(face, GL.GL_EMISSION, emission)
GL.glMaterialf(face, GL.GL_SHININESS, material.shininess)
# setup textures
if useTextures and material.diffuseTexture is not None:
material._useTextures = True
GL.glEnable(GL.GL_TEXTURE_2D)
if material.diffuseTexture is not None:
bindTexture(material.diffuseTexture, 0)
else:
material._useTextures = False
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glDisable(GL.GL_TEXTURE_2D)
else:
for mode, param in defaultMaterial.params.items():
GL.glEnable(GL.GL_COLOR_MATERIAL)
GL.glMaterialfv(GL.GL_FRONT_AND_BACK, mode, param)
def clearMaterial(material):
"""Stop using a material."""
for mode, param in defaultMaterial.params.items():
GL.glMaterialfv(GL.GL_FRONT_AND_BACK, mode, param)
if material._useTextures:
if material.diffuseTexture is not None:
unbindTexture(material.diffuseTexture)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_COLOR_MATERIAL) # disable color tracking
# -------------------------
# Lighting Helper Functions
# -------------------------
Light = namedtuple('Light', ['params', 'userData'])
def createLight(params=()):
"""Create a point light source.
"""
# setup light mode/value slots
lightDesc = Light({mode: None for mode in (
GL.GL_AMBIENT,
GL.GL_DIFFUSE,
GL.GL_SPECULAR,
GL.GL_POSITION,
GL.GL_SPOT_CUTOFF,
GL.GL_SPOT_DIRECTION,
GL.GL_SPOT_EXPONENT,
GL.GL_CONSTANT_ATTENUATION,
GL.GL_LINEAR_ATTENUATION,
GL.GL_QUADRATIC_ATTENUATION)}, dict())
# configure lights
if params:
for mode, value in params:
if value is not None:
if mode in [GL.GL_AMBIENT, GL.GL_DIFFUSE, GL.GL_SPECULAR,
GL.GL_POSITION]:
lightDesc.params[mode] = (GL.GLfloat * 4)(*value)
elif mode == GL.GL_SPOT_DIRECTION:
lightDesc.params[mode] = (GL.GLfloat * 3)(*value)
else:
lightDesc.params[mode] = GL.GLfloat(value)
return lightDesc
def useLights(lights, setupOnly=False):
"""Use specified lights in successive rendering operations. All lights will
be transformed using the present modelview matrix.
Parameters
----------
lights : :obj:`List` of :obj:`Light` or None
Descriptor of a light source. If None, lighting is disabled.
setupOnly : :obj:`bool`, optional
Do not enable lighting or lights. Specify True if lighting is being
computed via fragment shaders.
"""
if lights is not None:
if len(lights) > getIntegerv(GL.GL_MAX_LIGHTS):
raise IndexError("Number of lights specified > GL_MAX_LIGHTS.")
GL.glEnable(GL.GL_NORMALIZE)
for index, light in enumerate(lights):
enumLight = GL.GL_LIGHT0 + index
# light properties
for mode, value in light.params.items():
if value is not None:
GL.glLightfv(enumLight, mode, value)
if not setupOnly:
GL.glEnable(enumLight)
if not setupOnly:
GL.glEnable(GL.GL_LIGHTING)
else:
# disable lights
if not setupOnly:
for enumLight in range(getIntegerv(GL.GL_MAX_LIGHTS)):
GL.glDisable(GL.GL_LIGHT0 + enumLight)
GL.glDisable(GL.GL_NORMALIZE)
GL.glDisable(GL.GL_LIGHTING)
def setAmbientLight(color):
"""Set the global ambient lighting for the scene when lighting is enabled.
This is equivalent to GL.glLightModelfv(GL.GL_LIGHT_MODEL_AMBIENT, color)
and does not contribute to the GL_MAX_LIGHTS limit.
Parameters
----------
color : :obj:`tuple`
Ambient lighting RGBA intensity for the whole scene.
Notes
-----
If unset, the default value is (0.2, 0.2, 0.2, 1.0) when GL_LIGHTING is
enabled.
"""
GL.glLightModelfv(GL.GL_LIGHT_MODEL_AMBIENT, (GL.GLfloat * 4)(*color))
# -------------------------
# 3D Model Helper Functions
# -------------------------
#
# These functions are used in the creation, manipulation and rendering of 3D
# model data.
#
class ObjMeshInfo:
"""Descriptor for mesh data loaded from a Wavefront OBJ file.
"""
__slots__ = [
'vertexPos',
'texCoords',
'normals',
'faces',
'extents',
'mtlFile']
def __init__(self,
vertexPos=None,
texCoords=None,
normals=None,
faces=None,
extents=None,
mtlFile=None):
self.vertexPos = vertexPos
self.texCoords = texCoords
self.normals = normals
self.faces = faces
self.extents = extents
self.mtlFile = mtlFile
def loadObjFile(objFile):
"""Load a Wavefront OBJ file (*.obj).
Loads vertex, normals, and texture coordinates from the provided `*.obj` file
into arrays. These arrays can be processed then loaded into vertex buffer
objects (VBOs) for rendering. The `*.obj` file must at least specify vertex
position data to be loaded successfully. Normals and texture coordinates are
optional.
Faces can be either triangles or quads, but not both. Faces are grouped by
their materials. Index arrays are generated for each material present in the
file.
Data from the returned `ObjMeshInfo` object can be used to create vertex
buffer objects and arrays for rendering. See `Examples` below for details on
how to do this.
Parameters
----------
objFile : :obj:`str`
Path to the `*.OBJ` file to load.
Returns
-------
ObjMeshInfo
Mesh data.
See Also
--------
loadMtlFile : Load a `*.mtl` file.
Notes
-----
1. This importer should work fine for most sanely generated files. Export
your model with Blender for best results, even if you used some other
package to create it.
2. The mesh cannot contain both triangles and quads.
Examples
--------
Loading a `*.obj` mode from file::
objModel = loadObjFile('/path/to/file.obj')
# load the material (*.mtl) file, textures are also loaded
mtllib = loadMtl('/path/to/' + objModel.mtlFile)
Creating separate vertex buffer objects (VBOs) for each vertex attribute::
vertexPosVBO = createVBO(objModel.vertexPos)
texCoordVBO = createVBO(objModel.texCoords)
normalsVBO = createVBO(objModel.normals)
Create vertex array objects (VAOs) to draw the mesh. We create VAOs for each
face material::
objVAOs = {} # dictionary for VAOs
# for each material create a VAO
# keys are material names, values are index buffers
for material, faces in objModel.faces.items():
# convert index buffer to VAO
indexBuffer = \
gltools.createVBO(
faces.flatten(), # flatten face index for element array
target=GL.GL_ELEMENT_ARRAY_BUFFER,
dataType=GL.GL_UNSIGNED_INT)
# see `setVertexAttribPointer` for more information about attribute
# pointer indices
objVAOs[material] = gltools.createVAO(
{0: vertexPosVBO, # 0 = gl_Vertex
8: texCoordVBO, # 8 = gl_MultiTexCoord0
2: normalsVBO}, # 2 = gl_Normal
indexBuffer=indexBuffer)
# if using legacy attribute pointers, do this instead ...
# objVAOs[key] = createVAO({GL_VERTEX_ARRAY: vertexPosVBO,
# GL_TEXTURE_COORD_ARRAY: texCoordVBO,
# GL_NORMAL_ARRAY: normalsVBO},
# indexBuffer=indexBuffer,
# legacy=True) # this needs to be `True`
To render the VAOs using `objVAOs` created above, do the following::
for material, vao in objVAOs.items():
useMaterial(mtllib[material])
drawVAO(vao)
useMaterial(None) # disable materials when done
Optionally, you can create a single-storage, interleaved VBO by using
`numpy.hstack`. On some GL implementations, using single-storage buffers
offers better performance::
interleavedData = numpy.hstack(
(objModel.vertexPos, objModel.texCoords, objModel.normals))
vertexData = createVBO(interleavedData)
Creating VAOs with interleaved, single-storage buffers require specifying
additional information, such as `size` and `offset`::
objVAOs = {}
for key, val in objModel.faces.items():
indexBuffer = \
gltools.createVBO(
faces.flatten(),
target=GL.GL_ELEMENT_ARRAY_BUFFER,
dataType=GL.GL_UNSIGNED_INT)
objVAOs[key] = createVAO({0: (vertexData, 3, 0), # size=3, offset=0
8: (vertexData, 2, 3), # size=2, offset=3
2: (vertexData, 3, 5), # size=3, offset=5
indexBuffer=val)
Drawing VAOs with interleaved buffers is exactly the same as shown before
with separate buffers.
"""
# open the file, read it into memory
with open(objFile, 'r') as f:
objBuffer = StringIO(f.read())
mtlFile = None
# unsorted attribute data lists
positionDefs = []
texCoordDefs = []
normalDefs = []
vertexAttrs = {}
# material groups
materialGroup = None
materialGroups = {}
nVertices = nTextureCoords = nNormals = nFaces = 0
vertexIdx = 0
# first pass, examine the file and load up vertex attributes
for line in objBuffer.readlines():
line = line.strip() # clean up like
if line.startswith('v '):
positionDefs.append(tuple(map(float, line[2:].split(' '))))
nVertices += 1
elif line.startswith('vt '):
texCoordDefs.append(tuple(map(float, line[3:].split(' '))))
nTextureCoords += 1
elif line.startswith('vn '):
normalDefs.append(tuple(map(float, line[3:].split(' '))))
nNormals += 1
elif line.startswith('f '):
faceAttrs = [] # attributes this face
for attrs in line[2:].split(' '): # triangle vertex attrs
if attrs not in vertexAttrs.keys():
vertexAttrs[attrs] = vertexIdx
vertexIdx += 1
faceAttrs.append(vertexAttrs[attrs])
materialGroups[materialGroup].append(faceAttrs)
nFaces += 1
elif line.startswith('o '): # ignored for now
pass
elif line.startswith('g '): # ignored for now
pass
elif line.startswith('usemtl '):
foundMaterial = line[7:]
if foundMaterial not in materialGroups.keys():
materialGroups[foundMaterial] = []
materialGroup = foundMaterial
elif line.startswith('mtllib '):
mtlFile = line.strip()[7:]
# at the very least, we need vertices and facedefs
if nVertices == 0 or nFaces == 0:
raise RuntimeError(
"Failed to load OBJ file, file contains no vertices or faces.")
# convert indices for materials to numpy arrays
for key, val in materialGroups.items():
materialGroups[key] = np.asarray(val, dtype=int)
# indicate if file has any texture coordinates of normals
hasTexCoords = nTextureCoords > 0
hasNormals = nNormals > 0
# lists for vertex attributes
vertexPos = []
vertexTexCoord = []
vertexNormal = []
# populate vertex attribute arrays
for attrs, idx in vertexAttrs.items():
attr = attrs.split('/')
vertexPos.append(positionDefs[int(attr[0]) - 1])
if len(attr) > 1: # has texture coords
if hasTexCoords:
if attr[1] != '': # texcoord field not empty
vertexTexCoord.append(texCoordDefs[int(attr[1]) - 1])
else:
vertexTexCoord.append([0., 0.]) # fill with zeros
if len(attr) > 2: # has normals too
if hasNormals:
vertexNormal.append(normalDefs[int(attr[2]) - 1])
else:
vertexNormal.append([0., 0., 0.]) # fill with zeros
# convert vertex attribute lists to numeric arrays
vertexPos = np.asarray(vertexPos)
vertexTexCoord = np.asarray(vertexTexCoord)
vertexNormal = np.asarray(vertexNormal)
# compute the extents of the model, needed for axis-aligned bounding boxes
extents = (vertexPos.min(axis=0), vertexPos.max(axis=0))
# resolve the path to the material file associated with the mesh
if mtlFile is not None:
mtlFile = os.path.join(os.path.split(objFile)[0], mtlFile)
return ObjMeshInfo(vertexPos,
vertexTexCoord,
vertexNormal,
materialGroups,
extents,
mtlFile)
def loadMtlFile(mtllib, texParams=None):
"""Load a material library file (*.mtl).
Parameters
----------
mtllib : str
Path to the material library file.
texParams : list or tuple
Optional texture parameters for loaded textures. Texture parameters are
specified as a list of tuples. Each item specifies the option and
parameter. For instance,
`[(GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR), ...]`. By default, linear
filtering is used for both the minifying and magnification filter
functions. This is adequate for most uses.
Returns
-------
dict
Dictionary of materials. Where each key is the material name found in
the file, and values are `Material` namedtuple objects.
See Also
--------
loadObjFile : Load an `*.OBJ` file.
Examples
--------
Load material associated with an `*.OBJ` file::
objModel = loadObjFile('/path/to/file.obj')
# load the material (*.mtl) file, textures are also loaded
mtllib = loadMtl('/path/to/' + objModel.mtlFile)
Use a material when rendering vertex arrays::
useMaterial(mtllib[material])
drawVAO(vao)
useMaterial(None) # disable materials when done
"""
# open the file, read it into memory
with open(mtllib, 'r') as mtlFile:
mtlBuffer = StringIO(mtlFile.read())
# default texture parameters
if texParams is None:
texParams = {GL.GL_TEXTURE_MAG_FILTER: GL.GL_LINEAR,
GL.GL_TEXTURE_MIN_FILTER: GL.GL_LINEAR}
foundMaterials = {}
foundTextures = {}
thisMaterial = 0
for line in mtlBuffer.readlines():
line = line.strip()
if line.startswith('newmtl '): # new material
thisMaterial = line[7:]
foundMaterials[thisMaterial] = SimpleMaterial()
elif line.startswith('Ns '): # specular exponent
foundMaterials[thisMaterial].shininess = line[3:]
elif line.startswith('Ks '): # specular color
specularColor = np.asarray(list(map(float, line[3:].split(' '))))
specularColor = 2.0 * specularColor - 1
foundMaterials[thisMaterial].specularColor = specularColor
elif line.startswith('Kd '): # diffuse color
diffuseColor = np.asarray(list(map(float, line[3:].split(' '))))
diffuseColor = 2.0 * diffuseColor - 1
foundMaterials[thisMaterial].diffuseColor = diffuseColor
elif line.startswith('Ka '): # ambient color
ambientColor = np.asarray(list(map(float, line[3:].split(' '))))
ambientColor = 2.0 * ambientColor - 1
foundMaterials[thisMaterial].ambientColor = ambientColor
elif line.startswith('map_Kd '): # diffuse color map
# load a diffuse texture from file
textureName = line[7:]
if textureName not in foundTextures.keys():
im = Image.open(
os.path.join(os.path.split(mtllib)[0], textureName))
im = im.transpose(Image.FLIP_TOP_BOTTOM)
im = im.convert("RGBA")
pixelData = np.array(im).ctypes
width = pixelData.shape[1]
height = pixelData.shape[0]
foundTextures[textureName] = createTexImage2D(
width,
height,
internalFormat=GL.GL_RGBA,
pixelFormat=GL.GL_RGBA,
dataType=GL.GL_UNSIGNED_BYTE,
data=pixelData,
unpackAlignment=1,
texParams=texParams)
foundMaterials[thisMaterial].diffuseTexture = \
foundTextures[textureName]
return foundMaterials
def createUVSphere(radius=0.5, sectors=16, stacks=16, flipFaces=False):
"""Create a UV sphere.
Procedurally generate a UV sphere by specifying its radius, and number of
stacks and sectors. The poles of the resulting sphere will be aligned with
the Z-axis.
Surface normals and texture coordinates are automatically generated. The
returned normals are computed to produce smooth shading.
Parameters
----------
radius : float, optional
Radius of the sphere in scene units (usually meters). Default is 0.5.
sectors, stacks : int, optional
Number of longitudinal and latitudinal sub-divisions. Default is 16 for
both.
flipFaces : bool, optional
If `True`, normals and face windings will be set to point inward towards
the center of the sphere. Texture coordinates will remain the same.
Default is `False`.
Returns
-------
tuple
Vertex attribute arrays (position, texture coordinates, and normals) and
triangle indices.
Examples
--------
Create a UV sphere and VAO to render it::
vertices, textureCoords, normals, faces = \
gltools.createUVSphere(sectors=32, stacks=32)
vertexVBO = gltools.createVBO(vertices)
texCoordVBO = gltools.createVBO(textureCoords)
normalsVBO = gltools.createVBO(normals)
indexBuffer = gltools.createVBO(
faces.flatten(),
target=GL.GL_ELEMENT_ARRAY_BUFFER,
dataType=GL.GL_UNSIGNED_INT)
vao = gltools.createVAO({0: vertexVBO, 8: texCoordVBO, 2: normalsVBO},
indexBuffer=indexBuffer)
# in the rendering loop
gltools.drawVAO(vao, GL.GL_TRIANGLES)
The color of the sphere can be changed by calling `glColor*`::
glColor4f(1.0, 0.0, 0.0, 1.0) # red
gltools.drawVAO(vao, GL.GL_TRIANGLES)
Raw coordinates can be transformed prior to uploading to VBOs. Here we can
rotate vertex positions and normals so the equator rests on Z-axis::
r = mt.rotationMatrix(90.0, (1.0, 0, 0.0)) # 90 degrees about +X axis
vertices = mt.applyMatrix(r, vertices)
normals = mt.applyMatrix(r, normals)
"""
# based of the code found here https://www.songho.ca/opengl/gl_sphere.html
sectorStep = 2.0 * np.pi / sectors
stackStep = np.pi / stacks
lengthInv = 1.0 / radius
vertices = []
normals = []
texCoords = []
for i in range(stacks + 1):
stackAngle = np.pi / 2.0 - i * stackStep
xy = radius * np.cos(stackAngle)
z = radius * np.sin(stackAngle)
for j in range(sectors + 1):
sectorAngle = j * sectorStep
x = xy * np.cos(sectorAngle)
y = xy * np.sin(sectorAngle)
vertices.append((x, y, z))
nx = x * lengthInv
ny = y * lengthInv
nz = z * lengthInv
normals.append((nx, ny, nz))
s = 1.0 - j / float(sectors)
t = i / float(stacks)
texCoords.append((s, t))
# generate index
indices = []
for i in range(stacks):
k1 = i * (sectors + 1)
k2 = k1 + sectors + 1
for j in range(sectors):
# case for caps
if not flipFaces:
if i != 0:
indices.append((k1, k2, k1 + 1))
if i != stacks - 1:
indices.append((k1 + 1, k2, k2 + 1))
else:
if i != 0:
indices.append((k1, k1 + 1, k2))
if i != stacks - 1:
indices.append((k1 + 1, k2 + 1, k2))
k1 += 1
k2 += 1
# convert to numpy arrays
vertices = np.ascontiguousarray(vertices, dtype=np.float32)
normals = np.ascontiguousarray(normals, dtype=np.float32)
texCoords = np.ascontiguousarray(texCoords, dtype=np.float32)
faces = np.ascontiguousarray(indices, dtype=np.uint32)
if flipFaces: # flip normals so they point inwards
normals *= -1.0
return vertices, texCoords, normals, faces
def createPlane(size=(1., 1.)):
"""Create a plane.
Procedurally generate a plane (or quad) mesh by specifying its size. Texture
coordinates are computed automatically, with origin at the bottom left of
the plane. The generated plane is perpendicular to the +Z axis, origin of
the plane is at its center.
Parameters
----------
size : tuple or float
Dimensions of the plane. If a single value is specified, the plane will
be square. Provide a tuple of floats to specify the width and length of
the plane (eg. `size=(0.2, 1.3)`).
Returns
-------
tuple
Vertex attribute arrays (position, texture coordinates, and normals) and
triangle indices.
Examples
--------
Create a plane mesh and draw it::
vertices, textureCoords, normals, faces = gltools.createPlane()
vertexVBO = gltools.createVBO(vertices)
texCoordVBO = gltools.createVBO(textureCoords)
normalsVBO = gltools.createVBO(normals)
indexBuffer = gltools.createVBO(
faces.flatten(),
target=GL.GL_ELEMENT_ARRAY_BUFFER,
dataType=GL.GL_UNSIGNED_INT)
vao = gltools.createVAO({0: vertexVBO, 8: texCoordVBO, 2: normalsVBO},
indexBuffer=indexBuffer)
# in the rendering loop
gltools.drawVAO(vao, GL.GL_TRIANGLES)
"""
if isinstance(size, (int, float,)):
sx = sy = float(size) / 2.
else:
sx = size[0] / 2.
sy = size[1] / 2.
vertices = np.ascontiguousarray(
[[-1., 1., 0.],
[ 1., 1., 0.],
[-1., -1., 0.],
[ 1., -1., 0.]])
if sx != 1.:
vertices[:, 0] *= sx
if sy != 1.:
vertices[:, 1] *= sy
# texture coordinates
texCoords = np.ascontiguousarray([[0., 1.], [1., 1.], [0., 0.], [1., 0.]])
# normals, facing +Z
normals = np.zeros_like(vertices)
normals[:, 0] = 0.
normals[:, 1] = 0.
normals[:, 2] = 1.
# generate face index
faces = np.ascontiguousarray([[0, 2, 1], [1, 2, 3]], dtype=np.uint32)
return vertices, texCoords, normals, faces
def createMeshGridFromArrays(xvals, yvals, zvals=None, tessMode='diag', computeNormals=True):
"""Create a mesh grid using coordinates from arrays.
Generates a mesh using data in provided in 2D arrays of vertex coordinates.
Triangle faces are automatically computed by this function by joining
adjacent vertices at neighbouring indices in the array. Texture coordinates
are generated covering the whole mesh, with origin at the bottom left.
Parameters
----------
xvals, yvals : array_like
NxM arrays of X and Y coordinates. Both arrays must have the same
shape. the resulting mesh will have a single vertex for each X and Y
pair. Faces will be generated to connect adjacent coordinates in the
array.
zvals : array_like, optional
NxM array of Z coordinates for each X and Y. Must have the same shape
as X and Y. If not specified, the Z coordinates will be filled with
zeros.
tessMode : str, optional
Tessellation mode. Specifies how faces are generated. Options are
'center', 'radial', and 'diag'. Default is 'diag'. Modes 'radial' and
'center' work best with odd numbered array dimensions.
computeNormals : bool, optional
Compute normals for the generated mesh. If `False`, all normals are set
to face in the +Z direction. Presently, computing normals is a slow
operation and may not be needed for some meshes.
Returns
-------
tuple
Vertex attribute arrays (position, texture coordinates, and normals) and
triangle indices.
Examples
--------
Create a 3D sine grating mesh using 2D arrays::
x = np.linspace(0, 1.0, 32)
y = np.linspace(1.0, 0.0, 32)
xx, yy = np.meshgrid(x, y)
zz = np.tile(np.sin(np.linspace(0.0, 32., 32)) * 0.02, (32, 1))
vertices, textureCoords, normals, faces = \
gltools.createMeshGridFromArrays(xx, yy, zz)
"""
vertices = np.vstack([xvals.ravel(), yvals.ravel()]).T
if zvals is not None:
assert xvals.shape == yvals.shape == zvals.shape
else:
assert xvals.shape == yvals.shape
if zvals is None:
# fill z with zeros if not provided
vertices = np.hstack([vertices, np.zeros((vertices.shape[0], 1))])
else:
vertices = np.hstack([vertices, np.atleast_2d(zvals.ravel()).T])
ny, nx = xvals.shape
# texture coordinates
u = np.linspace(0.0, 1.0, nx)
v = np.linspace(1.0, 0.0, ny)
uu, vv = np.meshgrid(u, v)
texCoords = np.vstack([uu.ravel(), vv.ravel()]).T
# generate face index
faces = []
if tessMode == 'diag':
for i in range(ny - 1):
k1 = i * nx
k2 = k1 + nx
for j in range(nx - 1):
faces.append([k1, k2, k1 + 1])
faces.append([k1 + 1, k2, k2 + 1])
k1 += 1
k2 += 1
else:
raise ValueError('Invalid value for `tessMode`.')
# convert to numpy arrays
vertices = np.ascontiguousarray(vertices, dtype=np.float32)
texCoords = np.ascontiguousarray(texCoords, dtype=np.float32)
faces = np.ascontiguousarray(faces, dtype=np.uint32)
# calculate surface normals for the mesh
if computeNormals:
normals = calculateVertexNormals(vertices, faces, shading='smooth')
else:
normals = np.zeros_like(vertices, dtype=np.float32)
normals[:, 2] = 1.
return vertices, texCoords, normals, faces
def createMeshGrid(size=(1., 1.), subdiv=0, tessMode='diag'):
"""Create a grid mesh.
Procedurally generate a grid mesh by specifying its size and number of
sub-divisions. Texture coordinates are computed automatically. The origin is
at the center of the mesh. The generated grid is perpendicular to the +Z
axis, origin of the grid is at its center.
Parameters
----------
size : tuple or float
Dimensions of the mesh. If a single value is specified, the plane will
be square. Provide a tuple of floats to specify the width and length of
the plane (eg. `size=(0.2, 1.3)`).
subdiv : int, optional
Number of subdivisions. Zero subdivisions are applied by default, and
the resulting mesh will only have vertices at the corners.
tessMode : str, optional
Tessellation mode. Specifies how faces are subdivided. Options are
'center', 'radial', and 'diag'. Default is 'diag'. Modes 'radial' and
'center' work best with an odd number of subdivisions.
Returns
-------
tuple
Vertex attribute arrays (position, texture coordinates, and normals) and
triangle indices.
Examples
--------
Create a grid mesh and draw it::
vertices, textureCoords, normals, faces = gltools.createPlane()
vertexVBO = gltools.createVBO(vertices)
texCoordVBO = gltools.createVBO(textureCoords)
normalsVBO = gltools.createVBO(normals)
indexBuffer = gltools.createVBO(
faces.flatten(),
target=GL.GL_ELEMENT_ARRAY_BUFFER,
dataType=GL.GL_UNSIGNED_INT)
vao = gltools.createVAO({0: vertexVBO, 8: texCoordVBO, 2: normalsVBO},
indexBuffer=indexBuffer)
# in the rendering loop
gltools.drawVAO(vao, GL.GL_TRIANGLES)
Randomly displace vertices off the plane of the grid by setting the `Z`
value per vertex::
vertices, textureCoords, normals, faces = \
gltools.createMeshGrid(subdiv=11)
numVerts = vertices.shape[0]
vertices[:, 2] = np.random.uniform(-0.02, 0.02, (numVerts,))) # Z
# you must recompute surface normals to get correct shading!
normals = gltools.calculateVertexNormals(vertices, faces)
# create a VAO as shown in the previous example here to draw it ...
"""
if isinstance(size, (int, float,)):
divx = divy = float(size) / 2.
else:
divx = size[0] / 2.
divy = size[1] / 2.
# generate plane vertices
x = np.linspace(-divx, divx, subdiv + 2)
y = np.linspace(divy, -divy, subdiv + 2)
xx, yy = np.meshgrid(x, y)
vertices = np.vstack([xx.ravel(), yy.ravel()]).T
vertices = np.hstack([vertices, np.zeros((vertices.shape[0], 1))]) # add z
# texture coordinates
u = np.linspace(0.0, 1.0, subdiv + 2)
v = np.linspace(1.0, 0.0, subdiv + 2)
uu, vv = np.meshgrid(u, v)
texCoords = np.vstack([uu.ravel(), vv.ravel()]).T
# normals, facing +Z
normals = np.zeros_like(vertices)
normals[:, 0] = 0.
normals[:, 1] = 0.
normals[:, 2] = 1.
# generate face index
faces = []
if tessMode == 'diag':
for i in range(subdiv + 1):
k1 = i * (subdiv + 2)
k2 = k1 + subdiv + 2
for j in range(subdiv + 1):
faces.append([k1, k2, k1 + 1])
faces.append([k1 + 1, k2, k2 + 1])
k1 += 1
k2 += 1
elif tessMode == 'center':
lx = len(x)
ly = len(y)
for i in range(subdiv + 1):
k1 = i * (subdiv + 2)
k2 = k1 + subdiv + 2
for j in range(subdiv + 1):
if k1 + j < k1 + int((lx / 2)):
if int(k1 / ly) + 1 > int(ly / 2):
faces.append([k1, k2, k1 + 1])
faces.append([k1 + 1, k2, k2 + 1])
else:
faces.append([k1, k2, k2 + 1])
faces.append([k1 + 1, k1, k2 + 1])
else:
if int(k1 / ly) + 1 > int(ly / 2):
faces.append([k1, k2, k2 + 1])
faces.append([k1 + 1, k1, k2 + 1])
else:
faces.append([k1, k2, k1 + 1])
faces.append([k1 + 1, k2, k2 + 1])
k1 += 1
k2 += 1
elif tessMode == 'radial':
lx = len(x)
ly = len(y)
for i in range(subdiv + 1):
k1 = i * (subdiv + 2)
k2 = k1 + subdiv + 2
for j in range(subdiv + 1):
if k1 + j < k1 + int((lx / 2)):
if int(k1 / ly) + 1 > int(ly / 2):
faces.append([k1, k2, k2 + 1])
faces.append([k1 + 1, k1, k2 + 1])
else:
faces.append([k1, k2, k1 + 1])
faces.append([k1 + 1, k2, k2 + 1])
else:
if int(k1 / ly) + 1 > int(ly / 2):
faces.append([k1, k2, k1 + 1])
faces.append([k1 + 1, k2, k2 + 1])
else:
faces.append([k1, k2, k2 + 1])
faces.append([k1 + 1, k1, k2 + 1])
k1 += 1
k2 += 1
else:
raise ValueError('Invalid value for `tessMode`.')
# convert to numpy arrays
vertices = np.ascontiguousarray(vertices, dtype=np.float32)
texCoords = np.ascontiguousarray(texCoords, dtype=np.float32)
normals = np.ascontiguousarray(normals, dtype=np.float32)
faces = np.ascontiguousarray(faces, dtype=np.uint32)
return vertices, texCoords, normals, faces
def createBox(size=(1., 1., 1.), flipFaces=False):
"""Create a box mesh.
Create a box mesh by specifying its `size` in three dimensions (x, y, z),
or a single value (`float`) to create a cube. The resulting box will be
centered about the origin. Texture coordinates and normals are automatically
generated for each face.
Setting `flipFaces=True` will make faces and normals point inwards, this
allows boxes to be viewed and lit correctly from the inside.
Parameters
----------
size : tuple or float
Dimensions of the mesh. If a single value is specified, the box will
be a cube. Provide a tuple of floats to specify the width, length, and
height of the box (eg. `size=(0.2, 1.3, 2.1)`).
flipFaces : bool, optional
If `True`, normals and face windings will be set to point inward towards
the center of the box. Texture coordinates will remain the same.
Default is `False`.
Returns
-------
tuple
Vertex attribute arrays (position, texture coordinates, and normals) and
triangle indices.
Examples
--------
Create a box mesh and draw it::
vertices, textureCoords, normals, faces = gltools.createBox()
vertexVBO = gltools.createVBO(vertices)
texCoordVBO = gltools.createVBO(textureCoords)
normalsVBO = gltools.createVBO(normals)
indexBuffer = gltools.createVBO(
faces.flatten(),
target=GL.GL_ELEMENT_ARRAY_BUFFER,
dataType=GL.GL_UNSIGNED_INT)
vao = gltools.createVAO({0: vertexVBO, 8: texCoordVBO, 2: normalsVBO},
indexBuffer=indexBuffer)
# in the rendering loop
gltools.drawVAO(vao, GL.GL_TRIANGLES)
"""
if isinstance(size, (int, float,)):
sx = sy = sz = float(size) / 2.
else:
sx, sy, sz = size
sx /= 2.
sy /= 2.
sz /= 2.
# vertices
vertices = np.ascontiguousarray([
[ 1., 1., 1.], [ 1., 1., -1.], [ 1., -1., 1.],
[ 1., -1., -1.], [-1., 1., -1.], [-1., 1., 1.],
[-1., -1., -1.], [-1., -1., 1.], [-1., 1., -1.],
[ 1., 1., -1.], [-1., 1., 1.], [ 1., 1., 1.],
[ 1., -1., -1.], [-1., -1., -1.], [ 1., -1., 1.],
[-1., -1., 1.], [-1., 1., 1.], [ 1., 1., 1.],
[-1., -1., 1.], [ 1., -1., 1.], [ 1., 1., -1.],
[-1., 1., -1.], [ 1., -1., -1.], [-1., -1., -1.]
], dtype=np.float32)
# multiply vertex coordinates by box dimensions
if sx != 1.:
vertices[:, 0] *= sx
if sy != 1.:
vertices[:, 1] *= sy
if sz != 1.:
vertices[:, 2] *= sz
# normals for each side
normals = np.repeat(
[[ 1., 0., 0.], # +X
[-1., 0., 0.], # -X
[ 0., 1., 0.], # +Y
[ 0., -1., 0.], # -Y
[ 0., 0., 1.], # +Z
[ 0., 0., -1.]], # -Z
4, axis=0)
normals = np.ascontiguousarray(normals, dtype=np.float32)
# texture coordinates for each side
texCoords = np.tile([[0., 1.], [1., 1.], [0., 0.], [1., 0.]], (6, 1))
texCoords = np.ascontiguousarray(texCoords, dtype=np.float32)
# vertex indices for faces
faces = np.ascontiguousarray([
[ 0, 2, 1], [ 1, 2, 3], # +X
[ 4, 6, 5], [ 5, 6, 7], # -X
[ 8, 10, 9], [ 9, 10, 11], # +Y
[12, 14, 13], [13, 14, 15], # -Y
[16, 18, 17], [17, 18, 19], # +Z
[20, 22, 21], [21, 22, 23] # -Z
], dtype=np.uint32)
if flipFaces:
faces = np.fliplr(faces)
normals *= -1.0
return vertices, texCoords, normals, faces
def transformMeshPosOri(vertices, normals, pos=(0., 0., 0.), ori=(0., 0., 0., 1.)):
"""Transform a mesh.
Transform mesh vertices and normals to a new position and orientation using
a position coordinate and rotation quaternion. Values `vertices` and
`normals` must be the same shape. This is intended to be used when editing
raw vertex data prior to rendering. Do not use this to change the
configuration of an object while rendering.
Parameters
----------
vertices : array_like
Nx3 array of vertices.
normals : array_like
Nx3 array of normals.
pos : array_like, optional
Position vector to transform mesh vertices. If Nx3, `vertices` will be
transformed by corresponding rows of `pos`.
ori : array_like, optional
Orientation quaternion in form [x, y, z, w]. If Nx4, `vertices` and
`normals` will be transformed by corresponding rows of `ori`.
Returns
-------
tuple
Transformed vertices and normals.
Examples
--------
Create and re-orient a plane to face upwards::
vertices, textureCoords, normals, faces = createPlane()
# rotation quaternion
qr = quatFromAxisAngle((1., 0., 0.), -90.0) # -90 degrees about +X axis
# transform the normals and points
vertices, normals = transformMeshPosOri(vertices, normals, ori=qr)
Any `create*` primitive generating function can be used inplace of
`createPlane`.
"""
# ensure these are contiguous
vertices = np.ascontiguousarray(vertices)
normals = np.ascontiguousarray(normals)
if not np.allclose(pos, [0., 0., 0.]):
vertices = mt.transform(pos, ori, vertices)
if not np.allclose(ori, [0., 0., 0., 1.]):
normals = mt.applyQuat(ori, normals)
return vertices, normals
def calculateVertexNormals(vertices, faces, shading='smooth'):
"""Calculate vertex normals given vertices and triangle faces.
Finds all faces sharing a vertex index and sets its normal to either
the face normal if `shading='flat'` or the average normals of adjacent
faces if `shading='smooth'`. Flat shading only works correctly if each
vertex belongs to exactly one face.
The direction of the normals are determined by the winding order of
triangles, assumed counter clock-wise (OpenGL default). Most model
editing software exports using this convention. If not, winding orders
can be reversed by calling::
faces = np.fliplr(faces)
In some case, creases may appear if vertices are at the same location,
but do not share the same index.
Parameters
----------
vertices : array_like
Nx3 vertex positions.
faces : array_like
Nx3 vertex indices.
shading : str, optional
Shading mode. Options are 'smooth' and 'flat'. Flat only works with
meshes where no vertex index is shared across faces.
Returns
-------
ndarray
Vertex normals array with the shame shape as `vertices`. Computed
normals are normalized.
Examples
--------
Recomputing vertex normals for a UV sphere::
# create a sphere and discard normals
vertices, textureCoords, _, faces = gltools.createUVSphere()
normals = gltools.calculateVertexNormals(vertices, faces)
"""
# compute surface normals for all faces
faceNormals = mt.surfaceNormal(vertices[faces])
normals = []
if shading == 'flat':
for vertexIdx in np.unique(faces):
match, _ = np.where(faces == vertexIdx)
normals.append(faceNormals[match, :])
elif shading == 'smooth':
# get all faces the vertex belongs to
for vertexIdx in np.unique(faces):
match, _ = np.where(faces == vertexIdx)
normals.append(mt.vertexNormal(faceNormals[match, :]))
return np.ascontiguousarray(normals) + 0.0
# -----------------------------
# Misc. OpenGL Helper Functions
# -----------------------------
def getIntegerv(parName):
"""Get a single integer parameter value, return it as a Python integer.
Parameters
----------
pName : int
OpenGL property enum to query (e.g. GL_MAJOR_VERSION).
Returns
-------
int
"""
val = GL.GLint()
GL.glGetIntegerv(parName, val)
return int(val.value)
def getFloatv(parName):
"""Get a single float parameter value, return it as a Python float.
Parameters
----------
pName : float
OpenGL property enum to query.
Returns
-------
float
"""
val = GL.GLfloat()
GL.glGetFloatv(parName, val)
return float(val.value)
def getString(parName):
"""Get a single string parameter value, return it as a Python UTF-8 string.
Parameters
----------
pName : int
OpenGL property enum to query (e.g. GL_VENDOR).
Returns
-------
str
"""
val = ctypes.cast(GL.glGetString(parName), ctypes.c_char_p).value
return val.decode('UTF-8')
def getModelViewMatrix():
"""Get the present model matrix from the OpenGL matrix stack.
Returns
-------
ndarray
4x4 model/view matrix.
"""
modelview = np.zeros((4, 4), dtype=np.float32)
GL.glGetFloatv(GL.GL_MODELVIEW_MATRIX, modelview.ctypes.data_as(
ctypes.POINTER(ctypes.c_float)))
modelview[:, :] = np.transpose(modelview)
return modelview
def getProjectionMatrix():
"""Get the present projection matrix from the OpenGL matrix stack.
Returns
-------
ndarray
4x4 projection matrix.
"""
proj = np.zeros((4, 4), dtype=np.float32, order='C')
GL.glGetFloatv(GL.GL_PROJECTION_MATRIX, proj.ctypes.data_as(
ctypes.POINTER(ctypes.c_float)))
proj[:, :] = np.transpose(proj)
return proj
# OpenGL information type
OpenGLInfo = namedtuple(
'OpenGLInfo',
['vendor',
'renderer',
'version',
'majorVersion',
'minorVersion',
'doubleBuffer',
'maxTextureSize',
'stereo',
'maxSamples',
'extensions',
'userData'])
def getOpenGLInfo():
"""Get general information about the OpenGL implementation on this machine.
This should provide a consistent means of doing so regardless of the OpenGL
interface we are using.
Returns are dictionary with the following fields::
vendor, renderer, version, majorVersion, minorVersion, doubleBuffer,
maxTextureSize, stereo, maxSamples, extensions
Supported extensions are returned as a list in the 'extensions' field. You
can check if a platform supports an extension by checking the membership of
the extension name in that list.
Returns
-------
OpenGLInfo
"""
return OpenGLInfo(getString(GL.GL_VENDOR),
getString(GL.GL_RENDERER),
getString(GL.GL_VERSION),
getIntegerv(GL.GL_MAJOR_VERSION),
getIntegerv(GL.GL_MINOR_VERSION),
getIntegerv(GL.GL_DOUBLEBUFFER),
getIntegerv(GL.GL_MAX_TEXTURE_SIZE),
getIntegerv(GL.GL_STEREO),
getIntegerv(GL.GL_MAX_SAMPLES),
[i for i in getString(GL.GL_EXTENSIONS).split(' ')],
dict())
# ---------------------
# OpenGL/VRML Materials
# ---------------------
#
# A collection of pre-defined materials for stimuli. Keep in mind that these
# materials only approximate real-world equivalents. Values were obtained from
# http://devernay.free.fr/cours/opengl/materials.html (08/24/18). There are four
# material libraries to use, where individual material descriptors are accessed
# via property names.
#
# Usage:
#
# useMaterial(metalMaterials.gold)
# drawVAO(myObject)
# ...
#
mineralMaterials = namedtuple(
'mineralMaterials',
['emerald', 'jade', 'obsidian', 'pearl', 'ruby', 'turquoise'])(
createMaterial(
[(GL.GL_AMBIENT, (0.0215, 0.1745, 0.0215, 1.0)),
(GL.GL_DIFFUSE, (0.07568, 0.61424, 0.07568, 1.0)),
(GL.GL_SPECULAR, (0.633, 0.727811, 0.633, 1.0)),
(GL.GL_SHININESS, 0.6 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.135, 0.2225, 0.1575, 1.0)),
(GL.GL_DIFFUSE, (0.54, 0.89, 0.63, 1.0)),
(GL.GL_SPECULAR, (0.316228, 0.316228, 0.316228, 1.0)),
(GL.GL_SHININESS, 0.1 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.05375, 0.05, 0.06625, 1.0)),
(GL.GL_DIFFUSE, (0.18275, 0.17, 0.22525, 1.0)),
(GL.GL_SPECULAR, (0.332741, 0.328634, 0.346435, 1.0)),
(GL.GL_SHININESS, 0.3 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.25, 0.20725, 0.20725, 1.0)),
(GL.GL_DIFFUSE, (1, 0.829, 0.829, 1.0)),
(GL.GL_SPECULAR, (0.296648, 0.296648, 0.296648, 1.0)),
(GL.GL_SHININESS, 0.088 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.1745, 0.01175, 0.01175, 1.0)),
(GL.GL_DIFFUSE, (0.61424, 0.04136, 0.04136, 1.0)),
(GL.GL_SPECULAR, (0.727811, 0.626959, 0.626959, 1.0)),
(GL.GL_SHININESS, 0.6 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.1, 0.18725, 0.1745, 1.0)),
(GL.GL_DIFFUSE, (0.396, 0.74151, 0.69102, 1.0)),
(GL.GL_SPECULAR, (0.297254, 0.30829, 0.306678, 1.0)),
(GL.GL_SHININESS, 0.1 * 128.0)])
)
metalMaterials = namedtuple(
'metalMaterials',
['brass', 'bronze', 'chrome', 'copper', 'gold', 'silver'])(
createMaterial(
[(GL.GL_AMBIENT, (0.329412, 0.223529, 0.027451, 1.0)),
(GL.GL_DIFFUSE, (0.780392, 0.568627, 0.113725, 1.0)),
(GL.GL_SPECULAR, (0.992157, 0.941176, 0.807843, 1.0)),
(GL.GL_SHININESS, 0.21794872 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.2125, 0.1275, 0.054, 1.0)),
(GL.GL_DIFFUSE, (0.714, 0.4284, 0.18144, 1.0)),
(GL.GL_SPECULAR, (0.393548, 0.271906, 0.166721, 1.0)),
(GL.GL_SHININESS, 0.2 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.25, 0.25, 0.25, 1.0)),
(GL.GL_DIFFUSE, (0.4, 0.4, 0.4, 1.0)),
(GL.GL_SPECULAR, (0.774597, 0.774597, 0.774597, 1.0)),
(GL.GL_SHININESS, 0.6 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.19125, 0.0735, 0.0225, 1.0)),
(GL.GL_DIFFUSE, (0.7038, 0.27048, 0.0828, 1.0)),
(GL.GL_SPECULAR, (0.256777, 0.137622, 0.086014, 1.0)),
(GL.GL_SHININESS, 0.1 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.24725, 0.1995, 0.0745, 1.0)),
(GL.GL_DIFFUSE, (0.75164, 0.60648, 0.22648, 1.0)),
(GL.GL_SPECULAR, (0.628281, 0.555802, 0.366065, 1.0)),
(GL.GL_SHININESS, 0.4 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.19225, 0.19225, 0.19225, 1.0)),
(GL.GL_DIFFUSE, (0.50754, 0.50754, 0.50754, 1.0)),
(GL.GL_SPECULAR, (0.508273, 0.508273, 0.508273, 1.0)),
(GL.GL_SHININESS, 0.4 * 128.0)])
)
plasticMaterials = namedtuple(
'plasticMaterials',
['black', 'cyan', 'green', 'red', 'white', 'yellow'])(
createMaterial(
[(GL.GL_AMBIENT, (0, 0, 0, 1.0)),
(GL.GL_DIFFUSE, (0.01, 0.01, 0.01, 1.0)),
(GL.GL_SPECULAR, (0.5, 0.5, 0.5, 1.0)),
(GL.GL_SHININESS, 0.25 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0.1, 0.06, 1.0)),
(GL.GL_DIFFUSE, (0.06, 0, 0.50980392, 1.0)),
(GL.GL_SPECULAR, (0.50196078, 0.50196078, 0.50196078, 1.0)),
(GL.GL_SHININESS, 0.25 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0, 0, 1.0)),
(GL.GL_DIFFUSE, (0.1, 0.35, 0.1, 1.0)),
(GL.GL_SPECULAR, (0.45, 0.55, 0.45, 1.0)),
(GL.GL_SHININESS, 0.25 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0, 0, 1.0)),
(GL.GL_DIFFUSE, (0.5, 0, 0, 1.0)),
(GL.GL_SPECULAR, (0.7, 0.6, 0.6, 1.0)),
(GL.GL_SHININESS, 0.25 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0, 0, 1.0)),
(GL.GL_DIFFUSE, (0.55, 0.55, 0.55, 1.0)),
(GL.GL_SPECULAR, (0.7, 0.7, 0.7, 1.0)),
(GL.GL_SHININESS, 0.25 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0, 0, 1.0)),
(GL.GL_DIFFUSE, (0.5, 0.5, 0, 1.0)),
(GL.GL_SPECULAR, (0.6, 0.6, 0.5, 1.0)),
(GL.GL_SHININESS, 0.25 * 128.0)])
)
rubberMaterials = namedtuple(
'rubberMaterials',
['black', 'cyan', 'green', 'red', 'white', 'yellow'])(
createMaterial(
[(GL.GL_AMBIENT, (0.02, 0.02, 0.02, 1.0)),
(GL.GL_DIFFUSE, (0.01, 0.01, 0.01, 1.0)),
(GL.GL_SPECULAR, (0.4, 0.4, 0.4, 1.0)),
(GL.GL_SHININESS, 0.078125 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0.05, 0.05, 1.0)),
(GL.GL_DIFFUSE, (0.4, 0.5, 0.5, 1.0)),
(GL.GL_SPECULAR, (0.04, 0.7, 0.7, 1.0)),
(GL.GL_SHININESS, 0.078125 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0, 0.05, 0, 1.0)),
(GL.GL_DIFFUSE, (0.4, 0.5, 0.4, 1.0)),
(GL.GL_SPECULAR, (0.04, 0.7, 0.04, 1.0)),
(GL.GL_SHININESS, 0.078125 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.05, 0, 0, 1.0)),
(GL.GL_DIFFUSE, (0.5, 0.4, 0.4, 1.0)),
(GL.GL_SPECULAR, (0.7, 0.04, 0.04, 1.0)),
(GL.GL_SHININESS, 0.078125 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.05, 0.05, 0.05, 1.0)),
(GL.GL_DIFFUSE, (0.5, 0.5, 0.5, 1.0)),
(GL.GL_SPECULAR, (0.7, 0.7, 0.7, 1.0)),
(GL.GL_SHININESS, 0.078125 * 128.0)]),
createMaterial(
[(GL.GL_AMBIENT, (0.05, 0.05, 0, 1.0)),
(GL.GL_DIFFUSE, (0.5, 0.5, 0.4, 1.0)),
(GL.GL_SPECULAR, (0.7, 0.7, 0.04, 1.0)),
(GL.GL_SHININESS, 0.078125 * 128.0)])
)
# default material according to the OpenGL spec.
defaultMaterial = createMaterial(
[(GL.GL_AMBIENT, (0.2, 0.2, 0.2, 1.0)),
(GL.GL_DIFFUSE, (0.8, 0.8, 0.8, 1.0)),
(GL.GL_SPECULAR, (0.0, 0.0, 0.0, 1.0)),
(GL.GL_EMISSION, (0.0, 0.0, 0.0, 1.0)),
(GL.GL_SHININESS, 0)])
| 163,565
|
Python
|
.py
| 4,012
| 32.231057
| 93
| 0.620711
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,395
|
fileerrortools.py
|
psychopy_psychopy/psychopy/tools/fileerrortools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Functions and classes related to file and directory error handling
"""
import os
import glob
from pathlib import Path
from psychopy import logging
def handleFileCollision(fileName, fileCollisionMethod):
"""Handle filename collisions by overwriting, renaming, or failing hard.
:Parameters:
fileCollisionMethod: 'overwrite', 'rename', 'fail'
If a file with the requested name already exists, specify
how to deal with it. 'overwrite' will overwrite existing
files in place, 'rename' will append an integer to create
a new file ('trials1.psydat', 'trials2.pysdat' etc) and
'error' will raise an IOError.
"""
if fileCollisionMethod == 'overwrite':
logging.warning('Data file, %s, will be overwritten' % fileName)
elif fileCollisionMethod == 'fail':
msg = ("Data file %s already exists. Set argument "
"fileCollisionMethod to overwrite.")
raise IOError(msg % fileName)
elif fileCollisionMethod == 'rename':
# convert to a Path object
fileObj = Path(fileName)
# use a glob star if we don't have an ext
if not fileObj.suffix:
fileObj = fileObj.parent / (fileObj.stem + ".*")
# get original file name
rootName = fileObj.stem
# get total number of sibling files to use as maximum for iteration
nSiblings = len(list(fileObj.parent.glob("*")))
# iteratively add numbers to the end until filename isn't taken
i = 0
while list(fileObj.parent.glob(fileObj.name)) and i < nSiblings:
i += 1
fileObj = fileObj.parent / (f"{rootName}_{i}" + fileObj.suffix)
# remove glob star from suffix if needed
if fileObj.suffix == ".*":
fileObj = fileObj.parent / fileObj.stem
# convert back to a string
fileName = str(fileObj)
# Check to make sure the new fileName hasn't been taken too.
if os.path.exists(fileName):
msg = ("New fileName %s has already been taken. Something "
"is wrong with the append counter.")
raise IOError(msg % fileName)
else:
msg = "Argument fileCollisionMethod was invalid: %s"
raise ValueError(msg % str(fileCollisionMethod))
return fileName
| 2,561
|
Python
|
.py
| 56
| 37.732143
| 79
| 0.652906
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,396
|
mathtools.py
|
psychopy_psychopy/psychopy/tools/mathtools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Various math functions for working with vectors, matrices, and quaternions.
#
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = ['normalize',
'lerp',
'slerp',
'multQuat',
'quatFromAxisAngle',
'quatToMatrix',
'scaleMatrix',
'rotationMatrix',
'transform',
'translationMatrix',
'concatenate',
'applyMatrix',
'invertQuat',
'quatToAxisAngle',
'posOriToMatrix',
'applyQuat',
'orthogonalize',
'reflect',
'cross',
'distance',
'dot',
'quatMagnitude',
'length',
'project',
'bisector',
'surfaceNormal',
'invertMatrix',
'angleTo',
'surfaceBitangent',
'surfaceTangent',
'vertexNormal',
'isOrthogonal',
'isAffine',
'perp',
'ortho3Dto2D',
'intersectRayPlane',
'matrixToQuat',
'lensCorrection',
'matrixFromEulerAngles',
'alignTo',
'quatYawPitchRoll',
'intersectRaySphere',
'intersectRayAABB',
'intersectRayOBB',
'intersectRayTriangle',
'scale',
'multMatrix',
'normalMatrix',
'fitBBox',
'computeBBoxCorners',
'zeroFix',
'accumQuat',
'fixTangentHandedness',
'articulate',
'forwardProject',
'reverseProject',
'lensCorrectionSpherical']
import numpy as np
import functools
import itertools
VEC_AXES = {'+x': (1, 0, 0), '-x': (-1, 0, 0),
'+y': (0, 1, 0), '-y': (0, -1, 0),
'+z': (0, 0, 1), '-z': (0, 0, -1)}
# ------------------------------------------------------------------------------
# Vector Operations
#
def length(v, squared=False, out=None, dtype=None):
"""Get the length of a vector.
Parameters
----------
v : array_like
Vector to normalize, can be Nx2, Nx3, or Nx4. If a 2D array is
specified, rows are treated as separate vectors.
squared : bool, optional
If ``True`` the squared length is returned. The default is ``False``.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
float or ndarray
Length of vector `v`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = np.asarray(v, dtype=dtype)
if v.ndim == 2:
assert v.shape[1] <= 4
toReturn = np.zeros((v.shape[0],), dtype=dtype) if out is None else out
v2d, vr = np.atleast_2d(v, toReturn) # 2d view of array
if squared:
vr[:, :] = np.sum(np.square(v2d), axis=1)
else:
vr[:, :] = np.sqrt(np.sum(np.square(v2d), axis=1))
elif v.ndim == 1:
assert v.shape[0] <= 4
if squared:
toReturn = np.sum(np.square(v))
else:
toReturn = np.sqrt(np.sum(np.square(v)))
else:
raise ValueError("Input arguments have invalid dimensions.")
return toReturn
def normalize(v, out=None, dtype=None):
"""Normalize a vector or quaternion.
v : array_like
Vector to normalize, can be Nx2, Nx3, or Nx4. If a 2D array is
specified, rows are treated as separate vectors. All vectors should have
nonzero length.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Normalized vector `v`.
Notes
-----
* If the vector has length is zero, a vector of all zeros is returned after
normalization.
Examples
--------
Normalize a vector::
v = [1., 2., 3., 4.]
vn = normalize(v)
The `normalize` function is vectorized. It's considerably faster to
normalize large arrays of vectors than to call `normalize` separately for
each one::
v = np.random.uniform(-1.0, 1.0, (1000, 4,)) # 1000 length 4 vectors
vn = np.zeros((1000, 4)) # place to write values
normalize(v, out=vn) # very fast!
# don't do this!
for i in range(1000):
vn[i, :] = normalize(v[i, :])
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
toReturn = np.array(v, dtype=dtype)
else:
toReturn = out
v2d = np.atleast_2d(toReturn) # 2d view of array
norm = np.linalg.norm(v2d, axis=1)
norm[norm == 0.0] = np.NaN # make sure if length==0 division succeeds
v2d /= norm[:, np.newaxis]
np.nan_to_num(v2d, copy=False) # fix NaNs
return toReturn
def orthogonalize(v, n, out=None, dtype=None):
"""Orthogonalize a vector relative to a normal vector.
This function ensures that `v` is perpendicular (or orthogonal) to `n`.
Parameters
----------
v : array_like
Vector to orthogonalize, can be Nx2, Nx3, or Nx4. If a 2D array is
specified, rows are treated as separate vectors.
n : array_like
Normal vector, must have same shape as `v`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Orthogonalized vector `v` relative to normal vector `n`.
Warnings
--------
If `v` and `n` are the same, the direction of the perpendicular vector is
indeterminate. The resulting vector is degenerate (all zeros).
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = np.asarray(v, dtype=dtype)
n = np.asarray(n, dtype=dtype)
if out is None:
toReturn = np.zeros_like(v, dtype=dtype)
else:
toReturn = out
toReturn.fill(0.0)
v, n, vr = np.atleast_2d(v, n, toReturn)
vr[:, :] = v
vr[:, :] -= n * np.sum(n * v, axis=1)[:, np.newaxis] # dot product
normalize(vr, out=vr)
return toReturn
def reflect(v, n, out=None, dtype=None):
"""Reflection of a vector.
Get the reflection of `v` relative to normal `n`.
Parameters
----------
v : array_like
Vector to reflect, can be Nx2, Nx3, or Nx4. If a 2D array is specified,
rows are treated as separate vectors.
n : array_like
Normal vector, must have same shape as `v`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Reflected vector `v` off normal `n`.
"""
# based off https://github.com/glfw/glfw/blob/master/deps/linmath.h
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = np.asarray(v, dtype=dtype)
n = np.asarray(n, dtype=dtype)
if out is None:
toReturn = np.zeros_like(v, dtype=dtype)
else:
toReturn = out
toReturn.fill(0.0)
v, n, vr = np.atleast_2d(v, n, toReturn)
vr[:, :] = v
vr[:, :] -= (dtype(2.0) * np.sum(n * v, axis=1))[:, np.newaxis] * n
return toReturn
def dot(v0, v1, out=None, dtype=None):
"""Dot product of two vectors.
The behaviour of this function depends on the format of the input arguments:
* If `v0` and `v1` are 1D, the dot product is returned as a scalar and `out`
is ignored.
* If `v0` and `v1` are 2D, a 1D array of dot products between corresponding
row vectors are returned.
* If either `v0` and `v1` are 1D and 2D, an array of dot products
between each row of the 2D vector and the 1D vector are returned.
Parameters
----------
v0, v1 : array_like
Vector(s) to compute dot products of (e.g. [x, y, z]). `v0` must have
equal or fewer dimensions than `v1`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Dot product(s) of `v0` and `v1`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v0 = np.asarray(v0, dtype=dtype)
v1 = np.asarray(v1, dtype=dtype)
if v0.ndim == v1.ndim == 2 or v0.ndim == 2 and v1.ndim == 1:
toReturn = np.zeros((v0.shape[0],), dtype=dtype) if out is None else out
vr = np.atleast_2d(toReturn) # make sure we have a 2d view
vr[:] = np.sum(v1 * v0, axis=1)
elif v0.ndim == v1.ndim == 1:
toReturn = np.sum(v1 * v0)
elif v0.ndim == 1 and v1.ndim == 2:
toReturn = np.zeros((v1.shape[0],), dtype=dtype) if out is None else out
vr = np.atleast_2d(toReturn) # make sure we have a 2d view
vr[:] = np.sum(v1 * v0, axis=1)
else:
raise ValueError("Input arguments have invalid dimensions.")
return toReturn
def cross(v0, v1, out=None, dtype=None):
"""Cross product of 3D vectors.
The behavior of this function depends on the dimensions of the inputs:
* If `v0` and `v1` are 1D, the cross product is returned as 1D vector.
* If `v0` and `v1` are 2D, a 2D array of cross products between
corresponding row vectors are returned.
* If either `v0` and `v1` are 1D and 2D, an array of cross products
between each row of the 2D vector and the 1D vector are returned.
Parameters
----------
v0, v1 : array_like
Vector(s) in form [x, y, z] or [x, y, z, 1].
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Cross product of `v0` and `v1`.
Notes
-----
* If input vectors are 4D, the last value of cross product vectors is always
set to one.
* If input vectors `v0` and `v1` are Nx3 and `out` is Nx4, the cross product
is computed and the last column of `out` is filled with ones.
Examples
--------
Find the cross product of two vectors::
a = normalize([1, 2, 3])
b = normalize([3, 2, 1])
c = cross(a, b)
If input arguments are 2D, the function returns the cross products of
corresponding rows::
# create two 6x3 arrays with random numbers
shape = (6, 3,)
a = normalize(np.random.uniform(-1.0, 1.0, shape))
b = normalize(np.random.uniform(-1.0, 1.0, shape))
cprod = np.zeros(shape) # output has the same shape as inputs
cross(a, b, out=cprod)
If a 1D and 2D vector are specified, the cross product of each row of the
2D array and the 1D array is returned as a 2D array::
a = normalize([1, 2, 3])
b = normalize(np.random.uniform(-1.0, 1.0, (6, 3,)))
cprod = np.zeros(a.shape)
cross(a, b, out=cprod)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v0 = np.asarray(v0, dtype=dtype)
v1 = np.asarray(v1, dtype=dtype)
if v0.ndim == v1.ndim == 2: # 2D x 2D
assert v0.shape == v1.shape
toReturn = np.zeros(v0.shape, dtype=dtype) if out is None else out
vr = np.atleast_2d(toReturn)
vr[:, 0] = v0[:, 1] * v1[:, 2] - v0[:, 2] * v1[:, 1]
vr[:, 1] = v0[:, 2] * v1[:, 0] - v0[:, 0] * v1[:, 2]
vr[:, 2] = v0[:, 0] * v1[:, 1] - v0[:, 1] * v1[:, 0]
if vr.shape[1] == 4:
vr[:, 3] = dtype(1.0)
elif v0.ndim == v1.ndim == 1: # 1D x 1D
assert v0.shape == v1.shape
toReturn = np.zeros(v0.shape, dtype=dtype) if out is None else out
toReturn[0] = v0[1] * v1[2] - v0[2] * v1[1]
toReturn[1] = v0[2] * v1[0] - v0[0] * v1[2]
toReturn[2] = v0[0] * v1[1] - v0[1] * v1[0]
if toReturn.shape[0] == 4:
toReturn[3] = dtype(1.0)
elif v0.ndim == 2 and v1.ndim == 1: # 2D x 1D
toReturn = np.zeros(v0.shape, dtype=dtype) if out is None else out
vr = np.atleast_2d(toReturn)
vr[:, 0] = v0[:, 1] * v1[2] - v0[:, 2] * v1[1]
vr[:, 1] = v0[:, 2] * v1[0] - v0[:, 0] * v1[2]
vr[:, 2] = v0[:, 0] * v1[1] - v0[:, 1] * v1[0]
if vr.shape[1] == 4:
vr[:, 3] = dtype(1.0)
elif v0.ndim == 1 and v1.ndim == 2: # 1D x 2D
toReturn = np.zeros(v1.shape, dtype=dtype) if out is None else out
vr = np.atleast_2d(toReturn)
vr[:, 0] = v1[:, 2] * v0[1] - v1[:, 1] * v0[2]
vr[:, 1] = v1[:, 0] * v0[2] - v1[:, 2] * v0[0]
vr[:, 2] = v1[:, 1] * v0[0] - v1[:, 0] * v0[1]
if vr.shape[1] == 4:
vr[:, 3] = dtype(1.0)
else:
raise ValueError("Input arguments have incorrect dimensions.")
return toReturn
def project(v0, v1, out=None, dtype=None):
"""Project a vector onto another.
Parameters
----------
v0 : array_like
Vector can be Nx2, Nx3, or Nx4. If a 2D array is specified, rows are
treated as separate vectors.
v1 : array_like
Vector to project onto `v0`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray or float
Projection of vector `v0` on `v1`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v0 = np.asarray(v0, dtype=dtype)
v1 = np.asarray(v1, dtype=dtype)
if v0.ndim == v1.ndim == 2 or v0.ndim == 1 and v1.ndim == 2:
toReturn = np.zeros_like(v1, dtype=dtype) if out is None else out
toReturn[:, :] = v1[:, :]
toReturn *= (dot(v0, v1, dtype=dtype) / length(v1))[:, np.newaxis]
elif v0.ndim == v1.ndim == 1:
toReturn = v1 * (dot(v0, v1, dtype=dtype) / np.sum(np.square(v1)))
elif v0.ndim == 2 and v1.ndim == 1:
toReturn = np.zeros_like(v0, dtype=dtype) if out is None else out
toReturn[:, :] = v1[:]
toReturn *= (dot(v0, v1, dtype=dtype) / length(v1))[:, np.newaxis]
else:
raise ValueError("Input arguments have invalid dimensions.")
toReturn += 0.0 # remove negative zeros
return toReturn
def lerp(v0, v1, t, out=None, dtype=None):
"""Linear interpolation (LERP) between two vectors/coordinates.
Parameters
----------
v0 : array_like
Initial vector/coordinate. Can be 2D where each row is a point.
v1 : array_like
Final vector/coordinate. Must be the same shape as `v0`.
t : float
Interpolation weight factor [0, 1].
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Vector at `t` with same shape as `v0` and `v1`.
Examples
--------
Find the coordinate of the midpoint between two vectors::
u = [0., 0., 0.]
v = [0., 0., 1.]
midpoint = lerp(u, v, 0.5) # 0.5 to interpolate half-way between points
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
t = dtype(t)
t0 = dtype(1.0) - t
v0 = np.asarray(v0, dtype=dtype)
v1 = np.asarray(v1, dtype=dtype)
toReturn = np.zeros_like(v0, dtype=dtype) if out is None else out
v0, v1, vr = np.atleast_2d(v0, v1, toReturn)
vr[:, :] = v0 * t0
vr[:, :] += v1 * t
return toReturn
def distance(v0, v1, out=None, dtype=None):
"""Get the distance between vectors/coordinates.
The behaviour of this function depends on the format of the input arguments:
* If `v0` and `v1` are 1D, the distance is returned as a scalar and `out` is
ignored.
* If `v0` and `v1` are 2D, an array of distances between corresponding row
vectors are returned.
* If either `v0` and `v1` are 1D and 2D, an array of distances
between each row of the 2D vector and the 1D vector are returned.
Parameters
----------
v0, v1 : array_like
Vectors to compute the distance between.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Distance between vectors `v0` and `v1`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v0 = np.asarray(v0, dtype=dtype)
v1 = np.asarray(v1, dtype=dtype)
if v0.ndim == v1.ndim == 2 or (v0.ndim == 2 and v1.ndim == 1):
dist = np.zeros((v1.shape[0],), dtype=dtype) if out is None else out
dist[:] = np.sqrt(np.sum(np.square(v1 - v0), axis=1))
elif v0.ndim == v1.ndim == 1:
dist = np.sqrt(np.sum(np.square(v1 - v0)))
elif v0.ndim == 1 and v1.ndim == 2:
dist = np.zeros((v1.shape[0],), dtype=dtype) if out is None else out
dist[:] = np.sqrt(np.sum(np.square(v0 - v1), axis=1))
else:
raise ValueError("Input arguments have invalid dimensions.")
return dist
def perp(v, n, norm=True, out=None, dtype=None):
"""Project `v` to be a perpendicular axis of `n`.
Parameters
----------
v : array_like
Vector to project [x, y, z], may be Nx3.
n : array_like
Normal vector [x, y, z], may be Nx3.
norm : bool
Normalize the resulting axis. Default is `True`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Perpendicular axis of `n` from `v`.
Examples
--------
Determine the local `up` (y-axis) of a surface or plane given `normal`::
normal = [0., 0.70710678, 0.70710678]
up = [1., 0., 0.]
yaxis = perp(up, normal)
Do a cross product to get the x-axis perpendicular to both::
xaxis = cross(yaxis, normal)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = np.asarray(v, dtype=dtype)
n = np.asarray(n, dtype=dtype)
toReturn = np.zeros_like(v, dtype=dtype) if out is None else out
v2d, n2d, r2d = np.atleast_2d(v, n, toReturn)
# from GLM `glm/gtx/perpendicular.inl`
r2d[:, :] = v2d - project(v2d, n2d, dtype=dtype)
if norm:
normalize(toReturn, out=toReturn)
toReturn += 0.0 # clear negative zeros
return toReturn
def bisector(v0, v1, norm=False, out=None, dtype=None):
"""Get the angle bisector.
Computes a vector which bisects the angle between `v0` and `v1`. Input
vectors `v0` and `v1` must be non-zero.
Parameters
----------
v0, v1 : array_like
Vectors to bisect [x, y, z]. Must be non-zero in length and have the
same shape. Inputs can be Nx3 where the bisector for corresponding
rows will be returned.
norm : bool, optional
Normalize the resulting bisector. Default is `False`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Bisecting vector [x, y, z].
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v0 = np.asarray(v0, dtype=dtype)
v1 = np.asarray(v1, dtype=dtype)
assert v0.shape == v1.shape
toReturn = np.zeros_like(v0, dtype=dtype) if out is None else out
v02d, v12d, r2d = np.atleast_2d(v0, v1, toReturn)
r2d[:, :] = v02d * length(v12d, dtype=dtype)[:, np.newaxis] + \
v12d * length(v02d, dtype=dtype)[:, np.newaxis]
if norm:
normalize(r2d, out=r2d)
return toReturn
def angleTo(v, point, degrees=True, out=None, dtype=None):
"""Get the relative angle to a point from a vector.
The behaviour of this function depends on the format of the input arguments:
* If `v0` and `v1` are 1D, the angle is returned as a scalar and `out` is
ignored.
* If `v0` and `v1` are 2D, an array of angles between corresponding row
vectors are returned.
* If either `v0` and `v1` are 1D and 2D, an array of angles
between each row of the 2D vector and the 1D vector are returned.
Parameters
----------
v : array_like
Direction vector [x, y, z].
point : array_like
Point(s) to compute angle to from vector `v`.
degrees : bool, optional
Return the resulting angles in degrees. If `False`, angles will be
returned in radians. Default is `True`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Distance between vectors `v0` and `v1`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = np.asarray(v, dtype=dtype)
point = np.asarray(point, dtype=dtype)
if v.ndim == point.ndim == 2 or (v.ndim == 2 and point.ndim == 1):
angle = np.zeros((v.shape[0],), dtype=dtype) if out is None else out
u = np.sqrt(length(v, squared=True, dtype=dtype) *
length(point, squared=True, dtype=dtype))
angle[:] = np.arccos(dot(v, point, dtype=dtype) / u)
elif v.ndim == 1 and point.ndim == 2:
angle = np.zeros((point.shape[0],), dtype=dtype) if out is None else out
u = np.sqrt(length(v, squared=True, dtype=dtype) *
length(point, squared=True, dtype=dtype))
angle[:] = np.arccos(dot(v, point, dtype=dtype) / u)
elif v.ndim == point.ndim == 1:
u = np.sqrt(length(v, squared=True, dtype=dtype) *
length(point, squared=True, dtype=dtype))
angle = np.arccos(dot(v, point, dtype=dtype) / u)
else:
raise ValueError("Input arguments have invalid dimensions.")
return np.degrees(angle) if degrees else angle
def sortClockwise(verts):
"""
Sort vertices clockwise from 12 O'Clock (aka vertex (0, 1)).
Parameters
==========
verts : array
Array of vertices to sort
"""
# Blank array of angles
angles = []
# Calculate angle of each vertex
for vert in verts:
# Get angle
ang = angleTo(v=[0, 1], point=vert)
# Flip angle if we're past 6 O'clock
if vert[0] < 0:
ang = 360 - ang
# Append to angles array
angles.append(ang)
# Sort vertices by angles array values
verts = [x for _, x in sorted(zip(angles, verts), key=lambda pair: pair[0])]
return verts
def surfaceNormal(tri, norm=True, out=None, dtype=None):
"""Compute the surface normal of a given triangle.
Parameters
----------
tri : array_like
Triangle vertices as 2D (3x3) array [p0, p1, p2] where each vertex is a
length 3 array [vx, xy, vz]. The input array can be 3D (Nx3x3) to
specify multiple triangles.
norm : bool, optional
Normalize computed surface normals if ``True``, default is ``True``.
out : ndarray, optional
Optional output array. Must have one fewer dimensions than `tri`. The
shape of the last dimension must be 3.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Surface normal of triangle `tri`.
Examples
--------
Compute the surface normal of a triangle::
vertices = [[-1., 0., 0.], [0., 1., 0.], [1, 0, 0]]
norm = surfaceNormal(vertices)
Find the normals for multiple triangles, and put results in a pre-allocated
array::
vertices = [[[-1., 0., 0.], [0., 1., 0.], [1, 0, 0]], # 2x3x3
[[1., 0., 0.], [0., 1., 0.], [-1, 0, 0]]]
normals = np.zeros((2, 3)) # normals from two triangles triangles
surfaceNormal(vertices, out=normals)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
tris = np.asarray(tri, dtype=dtype)
if tris.ndim == 2:
tris = np.expand_dims(tri, axis=0)
if tris.shape[0] == 1:
toReturn = np.zeros((3,), dtype=dtype) if out is None else out
else:
if out is None:
toReturn = np.zeros((tris.shape[0], 3), dtype=dtype)
else:
toReturn = out
# from https://www.khronos.org/opengl/wiki/Calculating_a_Surface_Normal
nr = np.atleast_2d(toReturn)
u = tris[:, 1, :] - tris[:, 0, :]
v = tris[:, 2, :] - tris[:, 1, :]
nr[:, 0] = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
nr[:, 1] = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
nr[:, 2] = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
if norm:
normalize(nr, out=nr)
return toReturn
def surfaceBitangent(tri, uv, norm=True, out=None, dtype=None):
"""Compute the bitangent vector of a given triangle.
This function can be used to generate bitangent vertex attributes for normal
mapping. After computing bitangents, one may orthogonalize them with vertex
normals using the :func:`orthogonalize` function, or within the fragment
shader. Uses texture coordinates at each triangle vertex to determine the
direction of the vector.
Parameters
----------
tri : array_like
Triangle vertices as 2D (3x3) array [p0, p1, p2] where each vertex is a
length 3 array [vx, xy, vz]. The input array can be 3D (Nx3x3) to
specify multiple triangles.
uv : array_like
Texture coordinates associated with each face vertex as a 2D array (3x2)
where each texture coordinate is length 2 array [u, v]. The input array
can be 3D (Nx3x2) to specify multiple texture coordinates if multiple
triangles are specified.
norm : bool, optional
Normalize computed bitangents if ``True``, default is ``True``.
out : ndarray, optional
Optional output array. Must have one fewer dimensions than `tri`. The
shape of the last dimension must be 3.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Surface bitangent of triangle `tri`.
Examples
--------
Computing the bitangents for two triangles from vertex and texture
coordinates (UVs)::
# array of triangle vertices (2x3x3)
tri = np.asarray([
[(-1.0, 1.0, 0.0), (-1.0, -1.0, 0.0), (1.0, -1.0, 0.0)], # 1
[(-1.0, 1.0, 0.0), (-1.0, -1.0, 0.0), (1.0, -1.0, 0.0)]]) # 2
# array of triangle texture coordinates (2x3x2)
uv = np.asarray([
[(0.0, 1.0), (0.0, 0.0), (1.0, 0.0)], # 1
[(0.0, 1.0), (0.0, 0.0), (1.0, 0.0)]]) # 2
bitangents = surfaceBitangent(tri, uv, norm=True) # bitangets (2x3)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
tris = np.asarray(tri, dtype=dtype)
if tris.ndim == 2:
tris = np.expand_dims(tri, axis=0)
if tris.shape[0] == 1:
toReturn = np.zeros((3,), dtype=dtype) if out is None else out
else:
if out is None:
toReturn = np.zeros((tris.shape[0], 3), dtype=dtype)
else:
toReturn = out
uvs = np.asarray(uv, dtype=dtype)
if uvs.ndim == 2:
uvs = np.expand_dims(uvs, axis=0)
# based off the implementation from
# https://learnopengl.com/Advanced-Lighting/Normal-Mapping
e1 = tris[:, 1, :] - tris[:, 0, :]
e2 = tris[:, 2, :] - tris[:, 0, :]
d1 = uvs[:, 1, :] - uvs[:, 0, :]
d2 = uvs[:, 2, :] - uvs[:, 0, :]
# compute the bitangent
nr = np.atleast_2d(toReturn)
nr[:, 0] = -d2[:, 0] * e1[:, 0] + d1[:, 0] * e2[:, 0]
nr[:, 1] = -d2[:, 0] * e1[:, 1] + d1[:, 0] * e2[:, 1]
nr[:, 2] = -d2[:, 0] * e1[:, 2] + d1[:, 0] * e2[:, 2]
f = dtype(1.0) / (d1[:, 0] * d2[:, 1] - d2[:, 0] * d1[:, 1])
nr *= f[:, np.newaxis]
if norm:
normalize(toReturn, out=toReturn, dtype=dtype)
return toReturn
def surfaceTangent(tri, uv, norm=True, out=None, dtype=None):
"""Compute the tangent vector of a given triangle.
This function can be used to generate tangent vertex attributes for normal
mapping. After computing tangents, one may orthogonalize them with vertex
normals using the :func:`orthogonalize` function, or within the fragment
shader. Uses texture coordinates at each triangle vertex to determine the
direction of the vector.
Parameters
----------
tri : array_like
Triangle vertices as 2D (3x3) array [p0, p1, p2] where each vertex is a
length 3 array [vx, xy, vz]. The input array can be 3D (Nx3x3) to
specify multiple triangles.
uv : array_like
Texture coordinates associated with each face vertex as a 2D array (3x2)
where each texture coordinate is length 2 array [u, v]. The input array
can be 3D (Nx3x2) to specify multiple texture coordinates if multiple
triangles are specified. If so `N` must be the same size as the first
dimension of `tri`.
norm : bool, optional
Normalize computed tangents if ``True``, default is ``True``.
out : ndarray, optional
Optional output array. Must have one fewer dimensions than `tri`. The
shape of the last dimension must be 3.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Surface normal of triangle `tri`.
Examples
--------
Compute surface normals, tangents, and bitangents for a list of triangles::
# triangle vertices (2x3x3)
vertices = [[[-1., 0., 0.], [0., 1., 0.], [1, 0, 0]],
[[1., 0., 0.], [0., 1., 0.], [-1, 0, 0]]]
# array of triangle texture coordinates (2x3x2)
uv = np.asarray([
[(0.0, 1.0), (0.0, 0.0), (1.0, 0.0)], # 1
[(0.0, 1.0), (0.0, 0.0), (1.0, 0.0)]]) # 2
normals = surfaceNormal(vertices)
tangents = surfaceTangent(vertices, uv)
bitangents = cross(normals, tangents) # or use `surfaceBitangent`
Orthogonalize a surface tangent with a vertex normal vector to get the
vertex tangent and bitangent vectors::
vertexTangent = orthogonalize(faceTangent, vertexNormal)
vertexBitangent = cross(vertexTangent, vertexNormal)
Ensure computed vectors have the same handedness, if not, flip the tangent
vector (important for applications like normal mapping)::
# tangent, bitangent, and normal are 2D
tangent[dot(cross(normal, tangent), bitangent) < 0.0, :] *= -1.0
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
tris = np.asarray(tri, dtype=dtype)
if tris.ndim == 2:
tris = np.expand_dims(tri, axis=0)
if tris.shape[0] == 1:
toReturn = np.zeros((3,), dtype=dtype) if out is None else out
else:
if out is None:
toReturn = np.zeros((tris.shape[0], 3), dtype=dtype)
else:
toReturn = out
uvs = np.asarray(uv, dtype=dtype)
if uvs.ndim == 2:
uvs = np.expand_dims(uvs, axis=0)
# based off the implementation from
# https://learnopengl.com/Advanced-Lighting/Normal-Mapping
e1 = tris[:, 1, :] - tris[:, 0, :]
e2 = tris[:, 2, :] - tris[:, 0, :]
d1 = uvs[:, 1, :] - uvs[:, 0, :]
d2 = uvs[:, 2, :] - uvs[:, 0, :]
# compute the bitangent
nr = np.atleast_2d(toReturn)
nr[:, 0] = d2[:, 1] * e1[:, 0] - d1[:, 1] * e2[:, 0]
nr[:, 1] = d2[:, 1] * e1[:, 1] - d1[:, 1] * e2[:, 1]
nr[:, 2] = d2[:, 1] * e1[:, 2] - d1[:, 1] * e2[:, 2]
f = dtype(1.0) / (d1[:, 0] * d2[:, 1] - d2[:, 0] * d1[:, 1])
nr *= f[:, np.newaxis]
if norm:
normalize(toReturn, out=toReturn, dtype=dtype)
return toReturn
def vertexNormal(faceNorms, norm=True, out=None, dtype=None):
"""Compute a vertex normal from shared triangles.
This function computes a vertex normal by averaging the surface normals of
the triangles it belongs to. If model has no vertex normals, first use
:func:`surfaceNormal` to compute them, then run :func:`vertexNormal` to
compute vertex normal attributes.
While this function is mainly used to compute vertex normals, it can also
be supplied triangle tangents and bitangents.
Parameters
----------
faceNorms : array_like
An array (Nx3) of surface normals.
norm : bool, optional
Normalize computed normals if ``True``, default is ``True``.
out : ndarray, optional
Optional output array.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Vertex normal.
Examples
--------
Compute a vertex normal from the face normals of the triangles it belongs
to::
normals = [[1., 0., 0.], [0., 1., 0.]] # adjacent face normals
vertexNorm = vertexNormal(normals)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
triNorms2d = np.atleast_2d(np.asarray(faceNorms, dtype=dtype))
nFaces = triNorms2d.shape[0]
if out is None:
toReturn = np.zeros((3,), dtype=dtype)
else:
toReturn = out
toReturn[0] = np.sum(triNorms2d[:, 0])
toReturn[1] = np.sum(triNorms2d[:, 1])
toReturn[2] = np.sum(triNorms2d[:, 2])
toReturn /= nFaces
if norm:
normalize(toReturn, out=toReturn, dtype=dtype)
return toReturn
def fixTangentHandedness(tangents, normals, bitangents, out=None, dtype=None):
"""Ensure the handedness of tangent vectors are all the same.
Often 3D computed tangents may not have the same handedness due to how
texture coordinates are specified. This function takes input surface vectors
are ensures that tangents have the same handedness. Use this function if you
notice that normal mapping shading appears reversed with respect to the
incident light direction. The output array of corrected tangents can be used
inplace of the original.
Parameters
----------
tangents, normals, bitangents : array_like
Input Nx3 arrays of triangle tangents, normals and bitangents. All
arrays must have the same size.
out : ndarray, optional
Optional output array for tangents. If not specified, a new array of
tangents will be allocated.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Array of tangents with handedness corrected.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
tangents = np.asarray(tangents, dtype=dtype)
normals = np.asarray(normals, dtype=dtype)
bitangents = np.asarray(bitangents, dtype=dtype)
toReturn = np.zeros_like(tangents, dtype=dtype) if out is None else out
toReturn[:, :] = tangents
toReturn[dot(cross(normals, tangents, dtype=dtype),
bitangents, dtype=dtype) < 0.0, :] *= -1.0
return toReturn
# ------------------------------------------------------------------------------
# Collision Detection, Interaction and Kinematics
#
def fitBBox(points, dtype=None):
"""Fit an axis-aligned bounding box around points.
This computes the minimum and maximum extents for a bounding box to
completely enclose `points`. Keep in mind the output in bounds are
axis-aligned and may not optimally fits the points (i.e. fits the points
with the minimum required volume). However, this should work well enough for
applications such as visibility testing (see
`~psychopy.tools.viewtools.volumeVisible` for more information..
Parameters
----------
points : array_like
Nx3 or Nx4 array of points to fit the bounding box to.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Extents (mins, maxs) as a 2x3 array.
See Also
--------
computeBBoxCorners : Convert bounding box extents to corners.
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
points = np.asarray(points, dtype=dtype)
extents = np.zeros((2, 3), dtype=dtype)
extents[0, :] = (np.min(points[:, 0]),
np.min(points[:, 1]),
np.min(points[:, 2]))
extents[1, :] = (np.max(points[:, 0]),
np.max(points[:, 1]),
np.max(points[:, 2]))
return extents
def computeBBoxCorners(extents, dtype=None):
"""Get the corners of an axis-aligned bounding box.
Parameters
----------
extents : array_like
2x3 array indicating the minimum and maximum extents of the bounding
box.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
8x4 array of points defining the corners of the bounding box.
Examples
--------
Compute the corner points of a bounding box::
minExtent = [-1, -1, -1]
maxExtent = [1, 1, 1]
corners = computeBBoxCorners([minExtent, maxExtent])
# [[ 1. 1. 1. 1.]
# [-1. 1. 1. 1.]
# [ 1. -1. 1. 1.]
# [-1. -1. 1. 1.]
# [ 1. 1. -1. 1.]
# [-1. 1. -1. 1.]
# [ 1. -1. -1. 1.]
# [-1. -1. -1. 1.]]
"""
extents = np.asarray(extents, dtype=dtype)
assert extents.shape == (2, 3,)
corners = np.zeros((8, 4), dtype=dtype)
idx = np.arange(0, 8)
corners[:, 0] = np.where(idx[:] & 1, extents[0, 0], extents[1, 0])
corners[:, 1] = np.where(idx[:] & 2, extents[0, 1], extents[1, 1])
corners[:, 2] = np.where(idx[:] & 4, extents[0, 2], extents[1, 2])
corners[:, 3] = 1.0
return corners
def intersectRayPlane(rayOrig, rayDir, planeOrig, planeNormal, dtype=None):
"""Get the point which a ray intersects a plane.
Parameters
----------
rayOrig : array_like
Origin of the line in space [x, y, z].
rayDir : array_like
Direction vector of the line [x, y, z].
planeOrig : array_like
Origin of the plane to test [x, y, z].
planeNormal : array_like
Normal vector of the plane [x, y, z].
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple or None
Position (`ndarray`) in space which the line intersects the plane and
the distance the intersect occurs from the origin (`float`). `None` is
returned if the line does not intersect the plane at a single point or
at all.
Examples
--------
Find the point in the scene a ray intersects the plane::
# plane information
planeOrigin = [0, 0, 0]
planeNormal = [0, 0, 1]
planeUpAxis = perp([0, 1, 0], planeNormal)
# ray
rayDir = [0, 0, -1]
rayOrigin = [0, 0, 5]
# get the intersect and distance in 3D world space
pnt, dist = intersectRayPlane(rayOrigin, rayDir, planeOrigin, planeNormal)
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
# based off the method from GLM
rayOrig = np.asarray(rayOrig, dtype=dtype)
rayDir = np.asarray(rayDir, dtype=dtype)
planeOrig = np.asarray(planeOrig, dtype=dtype)
planeNormal = np.asarray(planeNormal, dtype=dtype)
denom = dot(rayDir, planeNormal, dtype=dtype)
if denom == 0.0:
return None
# distance to collision
dist = dot((planeOrig - rayOrig), planeNormal, dtype=dtype) / denom
intersect = dist * rayDir + rayOrig
return intersect, dist
def intersectRaySphere(rayOrig, rayDir, sphereOrig=(0., 0., 0.), sphereRadius=1.0,
dtype=None):
"""Calculate the points which a ray/line intersects a sphere (if any).
Get the 3D coordinate of the point which the ray intersects the sphere and
the distance to the point from `orig`. The nearest point is returned if
the line intersects the sphere at multiple locations. All coordinates should
be in world/scene units.
Parameters
----------
rayOrig : array_like
Origin of the ray in space [x, y, z].
rayDir : array_like
Direction vector of the ray [x, y, z], should be normalized.
sphereOrig : array_like
Origin of the sphere to test [x, y, z].
sphereRadius : float
Sphere radius to test in scene units.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple
Coordinate in world space of the intersection and distance in scene
units from `orig`. Returns `None` if there is no intersection.
"""
# based off example from https://antongerdelan.net/opengl/raycasting.html
dtype = np.float64 if dtype is None else np.dtype(dtype).type
rayOrig = np.asarray(rayOrig, dtype=dtype)
rayDir = np.asarray(rayDir, dtype=dtype)
sphereOrig = np.asarray(sphereOrig, dtype=dtype)
sphereRadius = np.asarray(sphereRadius, dtype=dtype)
d = rayOrig - sphereOrig
b = np.dot(rayDir, d)
c = np.dot(d, d) - np.square(sphereRadius)
b2mc = np.square(b) - c # determinant
if b2mc < 0.0: # no roots, ray does not intersect sphere
return None
u = np.sqrt(b2mc)
nearestDist = np.minimum(-b + u, -b - u)
pos = (rayDir * nearestDist) + rayOrig
return pos, nearestDist
def intersectRayAABB(rayOrig, rayDir, boundsOffset, boundsExtents, dtype=None):
"""Find the point a ray intersects an axis-aligned bounding box (AABB).
Parameters
----------
rayOrig : array_like
Origin of the ray in space [x, y, z].
rayDir : array_like
Direction vector of the ray [x, y, z], should be normalized.
boundsOffset : array_like
Offset of the bounding box in the scene [x, y, z].
boundsExtents : array_like
Minimum and maximum extents of the bounding box.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple
Coordinate in world space of the intersection and distance in scene
units from `rayOrig`. Returns `None` if there is no intersection.
Examples
--------
Get the point on an axis-aligned bounding box that the cursor is over and
place a 3D stimulus there. The eye location is defined by `RigidBodyPose`
object `camera`::
# get the mouse position on-screen
mx, my = mouse.getPos()
# find the point which the ray intersects on the box
result = intersectRayAABB(
camera.pos,
camera.transformNormal(win.coordToRay((mx, my))),
myStim.pos,
myStim.thePose.bounds.extents)
# if the ray intersects, set the position of the cursor object to it
if result is not None:
cursorModel.thePose.pos = result[0]
cursorModel.draw() # don't draw anything if there is no intersect
Note that if the model is rotated, the bounding box may not be aligned
anymore with the axes. Use `intersectRayOBB` if your model rotates.
"""
# based of the example provided here:
# https://www.scratchapixel.com/lessons/3d-basic-rendering/
# minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
dtype = np.float64 if dtype is None else np.dtype(dtype).type
rayOrig = np.asarray(rayOrig, dtype=dtype)
rayDir = np.asarray(rayDir, dtype=dtype)
boundsOffset = np.asarray(boundsOffset, dtype=dtype)
extents = np.asarray(boundsExtents, dtype=dtype) + boundsOffset
invDir = 1.0 / rayDir
sign = np.zeros((3,), dtype=int)
sign[invDir < 0.0] = 1
tmin = (extents[sign[0], 0] - rayOrig[0]) * invDir[0]
tmax = (extents[1 - sign[0], 0] - rayOrig[0]) * invDir[0]
tymin = (extents[sign[1], 1] - rayOrig[1]) * invDir[1]
tymax = (extents[1 - sign[1], 1] - rayOrig[1]) * invDir[1]
if tmin > tymax or tymin > tmax:
return None
if tymin > tmin:
tmin = tymin
if tymax < tmax:
tmax = tymax
tzmin = (extents[sign[2], 2] - rayOrig[2]) * invDir[2]
tzmax = (extents[1 - sign[2], 2] - rayOrig[2]) * invDir[2]
if tmin > tzmax or tzmin > tmax:
return None
if tzmin > tmin:
tmin = tzmin
if tzmax < tmax:
tmax = tzmax
if tmin < 0:
if tmax < 0:
return None
return (rayDir * tmin) + rayOrig, tmin
def intersectRayOBB(rayOrig, rayDir, modelMatrix, boundsExtents, dtype=None):
"""Find the point a ray intersects an oriented bounding box (OBB).
Parameters
----------
rayOrig : array_like
Origin of the ray in space [x, y, z].
rayDir : array_like
Direction vector of the ray [x, y, z], should be normalized.
modelMatrix : array_like
4x4 model matrix of the object and bounding box.
boundsExtents : array_like
Minimum and maximum extents of the bounding box.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple
Coordinate in world space of the intersection and distance in scene
units from `rayOrig`. Returns `None` if there is no intersection.
Examples
--------
Get the point on an oriented bounding box that the cursor is over and place
a 3D stimulus there. The eye location is defined by `RigidBodyPose` object
`camera`::
# get the mouse position on-screen
mx, my = mouse.getPos()
# find the point which the ray intersects on the box
result = intersectRayOBB(
camera.pos,
camera.transformNormal(win.coordToRay((mx, my))),
myStim.thePose.getModelMatrix(),
myStim.thePose.bounds.extents)
# if the ray intersects, set the position of the cursor object to it
if result is not None:
cursorModel.thePose.pos = result[0]
cursorModel.draw() # don't draw anything if there is no intersect
"""
# based off algorithm:
# https://www.opengl-tutorial.org/miscellaneous/clicking-on-objects/
# picking-with-custom-ray-obb-function/
dtype = np.float64 if dtype is None else np.dtype(dtype).type
rayOrig = np.asarray(rayOrig, dtype=dtype)
rayDir = np.asarray(rayDir, dtype=dtype)
modelMatrix = np.asarray(modelMatrix, dtype=dtype)
boundsOffset = np.asarray(modelMatrix[:3, 3], dtype=dtype)
extents = np.asarray(boundsExtents, dtype=dtype)
tmin = 0.0
tmax = np.finfo(dtype).max
d = boundsOffset - rayOrig
# solve intersects for each pair of planes along each axis
for i in range(3):
axis = modelMatrix[:3, i]
e = np.dot(axis, d)
f = np.dot(rayDir, axis)
if np.fabs(f) > 1e-5:
t1 = (e + extents[0, i]) / f
t2 = (e + extents[1, i]) / f
if t1 > t2:
temp = t1
t1 = t2
t2 = temp
if t2 < tmax:
tmax = t2
if t1 > tmin:
tmin = t1
if tmin > tmax:
return None
else:
# very close to parallel with the face
if -e + extents[0, i] > 0.0 or -e + extents[1, i] < 0.0:
return None
return (rayDir * tmin) + rayOrig, tmin
def intersectRayTriangle(rayOrig, rayDir, tri, dtype=None):
"""Get the intersection of a ray and triangle(s).
This function can be used to achieve 'pixel-perfect' ray picking/casting on
meshes defined with triangles. However, high-poly meshes may lead to
performance issues.
Parameters
----------
rayOrig : array_like
Origin of the ray in space [x, y, z].
rayDir : array_like
Direction vector of the ray [x, y, z], should be normalized.
tri : array_like
Triangle vertices as 2D (3x3) array [p0, p1, p2] where each vertex is a
length 3 array [vx, xy, vz]. The input array can be 3D (Nx3x3) to
specify multiple triangles.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple
Coordinate in world space of the intersection, distance in scene
units from `rayOrig`, and the barycentric coordinates on the triangle
[x, y]. Returns `None` if there is no intersection.
"""
# based off `intersectRayTriangle` from GLM (https://glm.g-truc.net)
dtype = np.float64 if dtype is None else np.dtype(dtype).type
rayOrig = np.asarray(rayOrig, dtype=dtype)
rayDir = np.asarray(rayDir, dtype=dtype)
triVerts = np.asarray(tri, dtype=dtype)
edge1 = triVerts[1, :] - triVerts[0, :]
edge2 = triVerts[2, :] - triVerts[0, :]
baryPos = np.zeros((2,), dtype=dtype)
p = np.cross(rayDir, edge2)
det = np.dot(edge1, p)
if det > np.finfo(dtype).eps:
dist = rayOrig - triVerts[0, :]
baryPos[0] = np.dot(dist, p)
if baryPos[0] < 0.0 or baryPos[0] > det:
return None
ortho = np.cross(dist, edge1)
baryPos[1] = np.dot(rayDir, ortho)
if baryPos[1] < 0.0 or baryPos[0] + baryPos[1] > det:
return None
elif det < -np.finfo(dtype).eps:
dist = rayOrig - triVerts[0, :]
baryPos[0] = np.dot(dist, p)
if baryPos[0] > 0.0 or baryPos[0] < det:
return None
ortho = np.cross(dist, edge1)
baryPos[1] = np.dot(rayDir, ortho)
if baryPos[1] > 0.0 or baryPos[0] + baryPos[1] < det:
return None
else:
return None
invDet = 1.0 / det
dist = np.dot(edge2, ortho) * invDet
baryPos *= invDet
return (rayDir * dist) + rayOrig, dist, baryPos
def ortho3Dto2D(p, orig, normal, up, right=None, dtype=None):
"""Get the planar coordinates of an orthogonal projection of a 3D point onto
a 2D plane.
This function gets the nearest point on the plane which a 3D point falls on
the plane.
Parameters
----------
p : array_like
Point to be projected on the plane.
orig : array_like
Origin of the plane to test [x, y, z].
normal : array_like
Normal vector of the plane [x, y, z], must be normalized.
up : array_like
Normalized up (+Y) direction of the plane's coordinate system. Must be
perpendicular to `normal`.
right : array_like, optional
Perpendicular right (+X) axis. If not provided, the axis will be
computed via the cross product between `normal` and `up`.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Coordinates on the plane [X, Y] where the 3D point projects towards
perpendicularly.
Examples
--------
This function can be used with :func:`intersectRayPlane` to find the
location on the plane the ray intersects::
# plane information
planeOrigin = [0, 0, 0]
planeNormal = [0, 0, 1] # must be normalized
planeUpAxis = perp([0, 1, 0], planeNormal) # must also be normalized
# ray
rayDir = [0, 0, -1]
rayOrigin = [0, 0, 5]
# get the intersect in 3D world space
pnt = intersectRayPlane(rayOrigin, rayDir, planeOrigin, planeNormal)
# get the 2D coordinates on the plane the intersect occurred
planeX, planeY = ortho3Dto2D(pnt, planeOrigin, planeNormal, planeUpAxis)
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
p = np.asarray(p, dtype=dtype)
orig = np.asarray(orig, dtype=dtype)
normal = np.asarray(normal, dtype=dtype)
up = np.asarray(up, dtype=dtype)
toReturn = np.zeros((2,))
offset = p - orig
if right is None:
# derive X axis with cross product
toReturn[0] = dot(offset, cross(normal, up, dtype=dtype), dtype=dtype)
else:
toReturn[0] = dot(offset, np.asarray(right, dtype=dtype), dtype=dtype)
toReturn[1] = dot(offset, up)
return toReturn
def articulate(boneVecs, boneOris, dtype=None):
"""Articulate an armature.
This function is used for forward kinematics and posing by specifying a list
of 'bones'. A bone has a length and orientation, where sequential bones are
linked end-to-end. Returns the transformed origins of the bones in scene
coordinates and their orientations.
There are many applications for forward kinematics such as posing armatures
and stimuli for display (eg. mocap data). Another application is for getting
the location of the end effector of coordinate measuring hardware, where
encoders measure the joint angles and the length of linking members are
known. This can be used for computing pose from "Sword of Damocles"[1]_ like
hardware or some other haptic input devices which the participant wears (eg.
a glove that measures joint angles in the hand). The computed pose of the
joints can be used to interact with virtual stimuli.
Parameters
----------
boneVecs : array_like
Bone lengths [x, y, z] as an Nx3 array.
boneOris : array_like
Orientation of the bones as quaternions in form [x, y, z, w], relative
to the previous bone.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple
Array of bone origins and orientations. The first origin is root
position which is always at [0, 0, 0]. Use :func:`transform` to
reposition the armature, or create a transformation matrix and use
`applyMatrix` to translate and rotate the whole armature into position.
References
----------
.. [1] Sutherland, I. E. (1968). "A head-mounted three dimensional display".
Proceedings of AFIPS 68, pp. 757-764
Examples
--------
Compute the orientations and origins of segments of an arm::
# bone lengths
boneLengths = [[0., 1., 0.], [0., 1., 0.], [0., 1., 0.]]
# create quaternions for joints
shoulder = mt.quatFromAxisAngle('-y', 45.0)
elbow = mt.quatFromAxisAngle('+z', 45.0)
wrist = mt.quatFromAxisAngle('+z', 45.0)
# articulate the parts of the arm
boxPos, boxOri = mt.articulate(pos, [shoulder, elbow, wrist])
# assign positions and orientations to 3D objects
shoulderModel.thePose.posOri = (boxPos[0, :], boxOri[0, :])
elbowModel.thePose.posOri = (boxPos[1, :], boxOri[1, :])
wristModel.thePose.posOri = (boxPos[2, :], boxOri[2, :])
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
boneVecs = np.asarray(boneVecs, dtype=dtype)
boneOris = np.asarray(boneOris, dtype=dtype)
jointOri = accumQuat(boneOris, dtype=dtype) # get joint orientations
bonesRotated = applyQuat(jointOri, boneVecs, dtype=dtype) # rotate bones
# accumulate
bonesTranslated = np.asarray(
tuple(itertools.accumulate(bonesRotated[:], lambda a, b: a + b)),
dtype=dtype)
bonesTranslated -= bonesTranslated[0, :] # offset root length
return bonesTranslated, jointOri
# ------------------------------------------------------------------------------
# Quaternion Operations
#
def slerp(q0, q1, t, shortest=True, out=None, dtype=None):
"""Spherical linear interpolation (SLERP) between two quaternions.
The behaviour of this function depends on the types of arguments:
* If `q0` and `q1` are both 1-D and `t` is scalar, the interpolation at `t`
is returned.
* If `q0` and `q1` are both 2-D Nx4 arrays and `t` is scalar, an Nx4 array
is returned with each row containing the interpolation at `t` for each
quaternion pair at matching row indices in `q0` and `q1`.
Parameters
----------
q0 : array_like
Initial quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
q1 : array_like
Final quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
t : float
Interpolation weight factor within interval 0.0 and 1.0.
shortest : bool, optional
Ensure interpolation occurs along the shortest arc along the 4-D
hypersphere (default is `True`).
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Quaternion [x, y, z, w] at `t`.
Examples
--------
Interpolate between two orientations::
q0 = quatFromAxisAngle(90.0, degrees=True)
q1 = quatFromAxisAngle(-90.0, degrees=True)
# halfway between 90 and -90 is 0.0 or quaternion [0. 0. 0. 1.]
qr = slerp(q0, q1, 0.5)
Example of smooth rotation of an object with fixed angular velocity::
degPerSec = 10.0 # rotate a stimulus at 10 degrees per second
# initial orientation, axis rotates in the Z direction
qr = quatFromAxisAngle([0., 0., -1.], 0.0, degrees=True)
# amount to rotate every second
qv = quatFromAxisAngle([0., 0., -1.], degPerSec, degrees=True)
# ---- within main experiment loop ----
# `frameTime` is the time elapsed in seconds from last `slerp`.
qr = multQuat(qr, slerp((0., 0., 0., 1.), qv, degPerSec * frameTime))
_, angle = quatToAxisAngle(qr) # discard axis, only need angle
# myStim is a GratingStim or anything with an 'ori' argument which
# accepts angle in degrees
myStim.ori = angle
myStim.draw()
"""
# Implementation based on code found here:
# https://en.wikipedia.org/wiki/Slerp
#
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
q0 = normalize(q0, dtype=dtype)
q1 = normalize(q1, dtype=dtype)
assert q0.shape == q1.shape
toReturn = np.zeros(q0.shape, dtype=dtype) if out is None else out
toReturn.fill(0.0)
t = dtype(t)
q0, q1, qr = np.atleast_2d(q0, q1, toReturn)
d = np.clip(np.sum(q0 * q1, axis=1), -1.0, 1.0)
if shortest:
d[d < 0.0] *= -1.0
q1[d < 0.0] *= -1.0
theta0 = np.arccos(d)
theta = theta0 * t
sinTheta = np.sin(theta)
s1 = sinTheta / np.sin(theta0)
s0 = np.cos(theta[:, np.newaxis]) - d[:, np.newaxis] * s1[:, np.newaxis]
qr[:, :] = q0 * s0
qr[:, :] += q1 * s1[:, np.newaxis]
qr[:, :] += 0.0
return toReturn
def quatToAxisAngle(q, degrees=True, dtype=None):
"""Convert a quaternion to `axis` and `angle` representation.
This allows you to use quaternions to set the orientation of stimuli that
have an `ori` property.
Parameters
----------
q : tuple, list or ndarray of float
Quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
degrees : bool, optional
Indicate `angle` is to be returned in degrees, otherwise `angle` will be
returned in radians.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
tuple
Axis and angle of quaternion in form ([ax, ay, az], angle). If `degrees`
is `True`, the angle returned is in degrees, radians if `False`.
Examples
--------
Using a quaternion to rotate a stimulus a fixed angle each frame::
# initial orientation, axis rotates in the Z direction
qr = quatFromAxisAngle([0., 0., -1.], 0.0, degrees=True)
# rotation per-frame, here it's 0.1 degrees per frame
qf = quatFromAxisAngle([0., 0., -1.], 0.1, degrees=True)
# ---- within main experiment loop ----
# myStim is a GratingStim or anything with an 'ori' argument which
# accepts angle in degrees
qr = multQuat(qr, qf) # cumulative rotation
_, angle = quatToAxisAngle(qr) # discard axis, only need angle
myStim.ori = angle
myStim.draw()
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
q = normalize(q, dtype=dtype) # returns ndarray
v = np.sqrt(np.sum(np.square(q[:3])))
if np.count_nonzero(q[:3]):
axis = q[:3] / v
angle = dtype(2.0) * np.arctan2(v, q[3])
else:
axis = np.zeros((3,), dtype=dtype)
axis[0] = 1.
angle = 0.0
axis += 0.0
return axis, np.degrees(angle) if degrees else angle
def quatFromAxisAngle(axis, angle, degrees=True, dtype=None):
"""Create a quaternion to represent a rotation about `axis` vector by
`angle`.
Parameters
----------
axis : tuple, list, ndarray or str
Axis vector components or axis name. If a vector, input must be length
3 [x, y, z]. A string can be specified for rotations about world axes
(eg. `'+x'`, `'-z'`, `'+y'`, etc.)
angle : float
Rotation angle in radians (or degrees if `degrees` is `True`. Rotations
are right-handed about the specified `axis`.
degrees : bool, optional
Indicate `angle` is in degrees, otherwise `angle` will be treated as
radians.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Quaternion [x, y, z, w].
Examples
--------
Create a quaternion from specified `axis` and `angle`::
axis = [0., 0., -1.] # rotate about -Z axis
angle = 90.0 # angle in degrees
ori = quatFromAxisAngle(axis, angle, degrees=True) # using degrees!
"""
dtype = np.float64 if dtype is None else np.dtype(dtype).type
toReturn = np.zeros((4,), dtype=dtype)
if degrees:
halfRad = np.radians(angle, dtype=dtype) / dtype(2.0)
else:
halfRad = np.dtype(dtype).type(angle) / dtype(2.0)
try:
axis = VEC_AXES[axis] if isinstance(axis, str) else axis
except KeyError:
raise ValueError(
"Value of `axis` must be either '+X', '-X', '+Y', '-Y', '+Z' or "
"'-Z' or length 3 vector.")
axis = normalize(axis, dtype=dtype)
if np.count_nonzero(axis) == 0:
raise ValueError("Value for `axis` is zero-length.")
np.multiply(axis, np.sin(halfRad), out=toReturn[:3])
toReturn[3] = np.cos(halfRad)
toReturn += 0.0 # remove negative zeros
return toReturn
def quatYawPitchRoll(q, degrees=True, out=None, dtype=None):
"""Get the yaw, pitch, and roll of a quaternion's orientation relative to
the world -Z axis.
You can multiply the quaternion by the inverse of some other one to make the
returned values referenced to a local coordinate system.
Parameters
----------
q : tuple, list or ndarray of float
Quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
degrees : bool, optional
Indicate angles are to be returned in degrees, otherwise they will be
returned in radians.
out : ndarray
Optional output array. Must have same `shape` and `dtype` as what is
expected to be returned by this function of `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Yaw, pitch and roll [yaw, pitch, roll] of quaternion `q`.
"""
# based off code found here:
# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
# Yields the same results as PsychXR's LibOVRPose.getYawPitchRoll method.
dtype = np.float64 if dtype is None else np.dtype(dtype).type
q = np.asarray(q, dtype=dtype)
toReturn = np.zeros((3,), dtype=dtype) if out is None else out
sinRcosP = 2.0 * (q[3] * q[0] + q[1] * q[2])
cosRcosP = 1.0 - 2.0 * (q[0] * q[0] + q[1] * q[1])
toReturn[0] = np.arctan2(sinRcosP, cosRcosP)
sinp = 2.0 * (q[3] * q[1] - q[2] * q[0])
if np.fabs(sinp) >= 1.:
toReturn[1] = np.copysign(np.pi / 2., sinp)
else:
toReturn[1] = np.arcsin(sinp)
sinYcosP = 2.0 * (q[3] * q[2] + q[0] * q[1])
cosYcosP = 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2])
toReturn[2] = np.arctan2(sinYcosP, cosYcosP)
if degrees:
toReturn[:] = np.degrees(toReturn[:])
return toReturn
def quatMagnitude(q, squared=False, out=None, dtype=None):
"""Get the magnitude of a quaternion.
A quaternion is normalized if its magnitude is 1.
Parameters
----------
q : array_like
Quaternion(s) in form [x, y, z, w] where w is real and x, y, z are
imaginary components.
squared : bool, optional
If ``True`` return the squared magnitude. If you are just checking if a
quaternion is normalized, the squared magnitude will suffice to avoid
the square root operation.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
float or ndarray
Magnitude of quaternion `q`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
q = np.asarray(q, dtype=dtype)
if q.ndim == 1:
assert q.shape[0] == 4
if squared:
toReturn = np.sum(np.square(q))
else:
toReturn = np.sqrt(np.sum(np.square(q)))
elif q.ndim == 2:
assert q.shape[1] == 4
toReturn = np.zeros((q.shape[0],), dtype=dtype) if out is None else out
if squared:
toReturn[:] = np.sum(np.square(q), axis=1)
else:
toReturn[:] = np.sqrt(np.sum(np.square(q), axis=1))
else:
raise ValueError("Input argument 'q' has incorrect dimensions.")
return toReturn
def multQuat(q0, q1, out=None, dtype=None):
"""Multiply quaternion `q0` and `q1`.
The orientation of the returned quaternion is the combination of the input
quaternions.
Parameters
----------
q0, q1 : array_like
Quaternions to multiply in form [x, y, z, w] where w is real and x, y, z
are imaginary components. If 2D (Nx4) arrays are specified, quaternions
are multiplied row-wise between each array.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Combined orientations of `q0` amd `q1`.
Notes
-----
* Quaternions are normalized prior to multiplication.
Examples
--------
Combine the orientations of two quaternions::
a = quatFromAxisAngle([0, 0, -1], 45.0, degrees=True)
b = quatFromAxisAngle([0, 0, -1], 90.0, degrees=True)
c = multQuat(a, b) # rotates 135 degrees about -Z axis
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
q0 = normalize(q0, dtype=dtype)
q1 = normalize(q1, dtype=dtype)
assert q0.shape == q1.shape
toReturn = np.zeros(q0.shape, dtype=dtype) if out is None else out
toReturn.fill(0.0) # clear array
q0, q1, qr = np.atleast_2d(q0, q1, toReturn)
# multiply quaternions for each row of the operand arrays
qr[:, :3] = np.cross(q0[:, :3], q1[:, :3], axis=1)
qr[:, :3] += q0[:, :3] * np.expand_dims(q1[:, 3], axis=1)
qr[:, :3] += q1[:, :3] * np.expand_dims(q0[:, 3], axis=1)
qr[:, 3] = q0[:, 3]
qr[:, 3] *= q1[:, 3]
qr[:, 3] -= np.sum(np.multiply(q0[:, :3], q1[:, :3]), axis=1) # dot product
qr += 0.0
return toReturn
def invertQuat(q, out=None, dtype=None):
"""Get the multiplicative inverse of a quaternion.
This gives a quaternion which rotates in the opposite direction with equal
magnitude. Multiplying a quaternion by its inverse returns an identity
quaternion as both orientations cancel out.
Parameters
----------
q : ndarray, list, or tuple of float
Quaternion to invert in form [x, y, z, w] where w is real and x, y, z
are imaginary components. If `q` is 2D (Nx4), each row is treated as a
separate quaternion and inverted.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Inverse of quaternion `q`.
Examples
--------
Show that multiplying a quaternion by its inverse returns an identity
quaternion where [x=0, y=0, z=0, w=1]::
angle = 90.0
axis = [0., 0., -1.]
q = quatFromAxisAngle(axis, angle, degrees=True)
qinv = invertQuat(q)
qr = multQuat(q, qinv)
qi = np.array([0., 0., 0., 1.]) # identity quaternion
print(np.allclose(qi, qr)) # True
Notes
-----
* Quaternions are normalized prior to inverting.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
q = normalize(q, dtype=dtype)
toReturn = np.zeros(q.shape, dtype=dtype) if out is None else out
qn, qinv = np.atleast_2d(q, toReturn) # 2d views
# conjugate the quaternion
qinv[:, :3] = -qn[:, :3]
qinv[:, 3] = qn[:, 3]
qinv /= np.sum(np.square(qn), axis=1)[:, np.newaxis]
qinv += 0.0 # remove negative zeros
return toReturn
def applyQuat(q, points, out=None, dtype=None):
"""Rotate points/coordinates using a quaternion.
This is similar to using `applyMatrix` with a rotation matrix. However, it
is computationally less intensive to use `applyQuat` if one only wishes to
rotate points.
Parameters
----------
q : array_like
Quaternion to invert in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
points : array_like
2D array of vectors or points to transform, where each row is a single
point. Only the x, y, and z components (the first three columns) are
rotated. Additional columns are copied.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Transformed points.
Examples
--------
Rotate points using a quaternion::
points = [[1., 0., 0.], [0., -1., 0.]]
quat = quatFromAxisAngle(-90.0, [0., 0., -1.], degrees=True)
pointsRotated = applyQuat(quat, points)
# [[0. 1. 0.]
# [1. 0. 0.]]
Show that you get the same result as a rotation matrix::
axis = [0., 0., -1.]
angle = -90.0
rotMat = rotationMatrix(axis, angle)[:3, :3] # rotation sub-matrix only
rotQuat = quatFromAxisAngle(angle, axis, degrees=True)
points = [[1., 0., 0.], [0., -1., 0.]]
isClose = np.allclose(applyMatrix(rotMat, points), # True
applyQuat(rotQuat, points))
Specifying an array to `q` where each row is a quaternion transforms points
in corresponding rows of `points`::
points = [[1., 0., 0.], [0., -1., 0.]]
quats = [quatFromAxisAngle(-90.0, [0., 0., -1.], degrees=True),
quatFromAxisAngle(45.0, [0., 0., -1.], degrees=True)]
applyQuat(quats, points)
"""
# based on 'quat_mul_vec3' implementation from linmath.h
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
qin = np.asarray(q, dtype=dtype)
points = np.asarray(points, dtype=dtype)
if out is not None:
assert points.shape == out.shape
toReturn = np.zeros(points.shape, dtype=dtype) if out is None else out
pin, pout = np.atleast_2d(points, toReturn)
pout[:, :] = pin[:, :] # copy values into output array
if qin.ndim == 1:
assert qin.shape[0] == 4
t = cross(qin[:3], pin[:, :3]) * dtype(2.0)
u = cross(qin[:3], t)
t *= qin[3]
pout[:, :3] += t
pout[:, :3] += u
elif qin.ndim == 2:
assert qin.shape[1] == 4 and qin.shape[0] == pin.shape[0]
t = cross(qin[:, :3], pin[:, :3])
t *= dtype(2.0)
u = cross(qin[:, :3], t)
t *= np.expand_dims(qin[:, 3], axis=1)
pout[:, :3] += t
pout[:, :3] += u
else:
raise ValueError("Input arguments have invalid dimensions.")
return toReturn
def accumQuat(qlist, out=None, dtype=None):
"""Accumulate quaternion rotations.
Chain multiplies an Nx4 array of quaternions, accumulating their rotations.
This function can be used for computing the orientation of joints in an
armature for forward kinematics. The first quaternion is treated as the
'root' and the last is the orientation of the end effector.
Parameters
----------
q : array_like
Nx4 array of quaternions to accumulate, where each row is a quaternion.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified. In this case, the same shape as
`qlist`.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Nx4 array of quaternions.
Examples
--------
Get the orientation of joints in an armature if we know their relative
angles::
shoulder = quatFromAxisAngle('-x', 45.0) # rotate shoulder down 45 deg
elbow = quatFromAxisAngle('+x', 45.0) # rotate elbow up 45 deg
wrist = quatFromAxisAngle('-x', 45.0) # rotate wrist down 45 deg
finger = quatFromAxisAngle('+x', 0.0) # keep finger in-line with wrist
armRotations = accumQuat([shoulder, elbow, wrist, finger])
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
qlist = np.asarray(qlist, dtype=dtype)
qlist = np.atleast_2d(qlist)
qr = np.zeros_like(qlist, dtype=dtype) if out is None else out
qr[:, :] = tuple(itertools.accumulate(
qlist[:], lambda a, b: multQuat(a, b, dtype=dtype)))
return qr
def alignTo(v, t, out=None, dtype=None):
"""Compute a quaternion which rotates one vector to align with another.
Parameters
----------
v : array_like
Vector [x, y, z] to rotate. Can be Nx3, but must have the same shape as
`t`.
t : array_like
Target [x, y, z] vector to align to. Can be Nx3, but must have the same
shape as `v`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Quaternion which rotates `v` to `t`.
Examples
--------
Rotate some vectors to align with other vectors, inputs should be
normalized::
vec = [[1, 0, 0], [0, 1, 0], [1, 0, 0]]
targets = [[0, 1, 0], [0, -1, 0], [-1, 0, 0]]
qr = alignTo(vec, targets)
vecRotated = applyQuat(qr, vec)
numpy.allclose(vecRotated, targets) # True
Get matrix which orients vertices towards a point::
point = [5, 6, 7]
vec = [0, 0, -1] # initial facing is -Z (forward in GL)
targetVec = normalize(point - vec)
qr = alignTo(vec, targetVec) # get rotation to align
M = quatToMatrix(qr) # 4x4 transformation matrix
"""
# based off Quaternion::align from Quaternion.hpp from OpenMP
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = normalize(v, dtype=dtype)
t = normalize(t, dtype=dtype)
if out is None:
if v.ndim == 1:
toReturn = np.zeros((4,), dtype=dtype)
else:
toReturn = np.zeros((v.shape[0], 4), dtype=dtype)
else:
toReturn = out
qr, v2d, t2d = np.atleast_2d(toReturn, v, t)
b = bisector(v2d, t2d, norm=True, dtype=dtype)
cosHalfAngle = dot(v2d, b, dtype=dtype)
nonparallel = cosHalfAngle > 0.0 # rotation is not 180 degrees
qr[nonparallel, :3] = cross(v2d[nonparallel], b[nonparallel], dtype=dtype)
qr[nonparallel, 3] = cosHalfAngle[nonparallel]
if np.all(nonparallel): # don't bother handling special cases
return toReturn + 0.0
# deal with cases where the vectors are facing exact opposite directions
ry = np.logical_and(np.abs(v2d[:, 0]) >= np.abs(v2d[:, 1]), ~nonparallel)
rx = np.logical_and(~ry, ~nonparallel)
getLength = lambda x, y: np.sqrt(x * x + y * y)
if not np.all(rx):
invLength = getLength(v2d[ry, 0], v2d[ry, 2])
invLength = np.where(invLength > 0.0, 1.0 / invLength, invLength) # avoid x / 0
qr[ry, 0] = -v2d[ry, 2] * invLength
qr[ry, 2] = v2d[ry, 0] * invLength
if not np.all(ry): # skip if all the same edge case
invLength = getLength(v2d[rx, 1], v2d[rx, 2])
invLength = np.where(invLength > 0.0, 1.0 / invLength, invLength)
qr[rx, 1] = v2d[rx, 2] * invLength
qr[rx, 2] = -v2d[rx, 1] * invLength
return toReturn + 0.0
def matrixToQuat(m, out=None, dtype=None):
"""Convert a rotation matrix to a quaternion.
Parameters
----------
m : array_like
3x3 rotation matrix (row-major). A 4x4 affine transformation matrix may
be provided, assuming the top-left 3x3 sub-matrix is orthonormal and
is a rotation group.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Rotation quaternion.
Notes
-----
* Depending on the input, returned quaternions may not be exactly the same
as the one used to construct the rotation matrix (i.e. by calling
`quatToMatrix`), typically when a large rotation angle is used. However,
the returned quaternion should result in the same rotation when applied to
points.
Examples
--------
Converting a rotation matrix from the OpenGL matrix stack to a quaternion::
glRotatef(45., -1, 0, 0)
m = np.zeros((4, 4), dtype='float32') # store the matrix
GL.glGetFloatv(
GL.GL_MODELVIEW_MATRIX,
m.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
qr = matrixToQuat(m.T) # must be transposed
Interpolation between two 4x4 transformation matrices::
interpWeight = 0.5
posStart = mStart[:3, 3]
oriStart = matrixToQuat(mStart)
posEnd = mEnd[:3, 3]
oriEnd = matrixToQuat(mEnd)
oriInterp = slerp(qStart, qEnd, interpWeight)
posInterp = lerp(posStart, posEnd, interpWeight)
mInterp = posOriToMatrix(posInterp, oriInterp)
"""
# based off example `Maths - Conversion Matrix to Quaternion` from
# https://www.euclideanspace.com/
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
m = np.asarray(m, dtype=dtype)
if m.shape == (4, 4,) or m.shape == (3, 4,):
m = m[:3, :3] # keep only rotation group sub-matrix
elif m.shape == (3, 3,):
pass # fine, nop
else:
raise ValueError("Input matrix `m` must be 3x3 or 4x4.")
toReturn = np.zeros((4,), dtype=dtype) if out is None else out
tr = m[0, 0] + m[1, 1] + m[2, 2]
if tr > 0.0:
s = np.sqrt(tr + 1.0) * 2.0
toReturn[3] = dtype(0.25) * s
toReturn[0] = (m[2, 1] - m[1, 2]) / s
toReturn[1] = (m[0, 2] - m[2, 0]) / s
toReturn[2] = (m[1, 0] - m[0, 1]) / s
elif m[0, 0] > m[1, 1] and m[0, 0] > m[2, 2]:
s = np.sqrt(dtype(1.0) + m[0, 0] - m[1, 1] - m[2, 2]) * dtype(2.0)
toReturn[3] = (m[2, 1] - m[1, 2]) / s
toReturn[0] = dtype(0.25) * s
toReturn[1] = (m[0, 1] + m[1, 0]) / s
toReturn[2] = (m[0, 2] + m[2, 0]) / s
elif m[1, 1] > m[2, 2]:
s = np.sqrt(dtype(1.0) + m[1, 1] - m[0, 0] - m[2, 2]) * dtype(2.0)
toReturn[3] = (m[0, 2] - m[2, 0]) / s
toReturn[0] = (m[0, 1] + m[1, 0]) / s
toReturn[1] = dtype(0.25) * s
toReturn[2] = (m[1, 2] + m[2, 1]) / s
else:
s = np.sqrt(dtype(1.0) + m[2, 2] - m[0, 0] - m[1, 1]) * dtype(2.0)
toReturn[3] = (m[1, 0] - m[0, 1]) / s
toReturn[0] = (m[0, 2] + m[2, 0]) / s
toReturn[1] = (m[1, 2] + m[2, 1]) / s
toReturn[2] = dtype(0.25) * s
return toReturn
# ------------------------------------------------------------------------------
# Matrix Operations
#
def quatToMatrix(q, out=None, dtype=None):
"""Create a 4x4 rotation matrix from a quaternion.
Parameters
----------
q : tuple, list or ndarray of float
Quaternion to convert in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
out : ndarray or None
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray or None
4x4 rotation matrix in row-major order.
Examples
--------
Convert a quaternion to a rotation matrix::
point = [0., 1., 0., 1.] # 4-vector form [x, y, z, 1.0]
ori = [0., 0., 0., 1.]
rotMat = quatToMatrix(ori)
# rotate 'point' using matrix multiplication
newPoint = np.matmul(rotMat.T, point) # returns [-1., 0., 0., 1.]
Rotate all points in an array (each row is a coordinate)::
points = np.asarray([[0., 0., 0., 1.],
[0., 1., 0., 1.],
[1., 1., 0., 1.]])
newPoints = points.dot(rotMat)
Notes
-----
* Quaternions are normalized prior to conversion.
"""
# based off implementations from
# https://github.com/glfw/glfw/blob/master/deps/linmath.h
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
R = np.zeros((4, 4,), dtype=dtype)
else:
dtype = np.dtype(out.dtype).type
R = out
R.fill(0.0)
q = normalize(q, dtype=dtype)
b, c, d, a = q[:]
vsqr = np.square(q)
u = dtype(2.0)
R[0, 0] = vsqr[3] + vsqr[0] - vsqr[1] - vsqr[2]
R[1, 0] = u * (b * c + a * d)
R[2, 0] = u * (b * d - a * c)
R[0, 1] = u * (b * c - a * d)
R[1, 1] = vsqr[3] - vsqr[0] + vsqr[1] - vsqr[2]
R[2, 1] = u * (c * d + a * b)
R[0, 2] = u * (b * d + a * c)
R[1, 2] = u * (c * d - a * b)
R[2, 2] = vsqr[3] - vsqr[0] - vsqr[1] + vsqr[2]
R[3, 3] = dtype(1.0)
R[:, :] += 0.0 # remove negative zeros
return R
def scaleMatrix(s, out=None, dtype=None):
"""Create a scaling matrix.
The resulting matrix is the same as a generated by a `glScale` call.
Parameters
----------
s : array_like, float or int
Scaling factor(s). If `s` is scalar (float), scaling will be uniform.
Providing a vector of scaling values [sx, sy, sz] will result in an
anisotropic scaling matrix if any of the values differ.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
4x4 scaling matrix in row-major order.
"""
# from glScale
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
S = np.zeros((4, 4,), dtype=dtype)
else:
dtype = np.dtype(out.dtype).type
S = out
S.fill(0.0)
if isinstance(s, (float, int,)):
S[0, 0] = S[1, 1] = S[2, 2] = dtype(s)
else:
S[0, 0] = dtype(s[0])
S[1, 1] = dtype(s[1])
S[2, 2] = dtype(s[2])
S[3, 3] = 1.0
return S
def rotationMatrix(angle, axis=(0., 0., -1.), out=None, dtype=None):
"""Create a rotation matrix.
The resulting matrix will rotate points about `axis` by `angle`. The
resulting matrix is similar to that produced by a `glRotate` call.
Parameters
----------
angle : float
Rotation angle in degrees.
axis : array_like or str
Axis vector components or axis name. If a vector, input must be length
3. A string can be specified for rotations about world axes (eg. `'+x'`,
`'-z'`, `'+y'`, etc.)
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
4x4 scaling matrix in row-major order. Will be the same array as `out`
if specified, if not, a new array will be allocated.
Notes
-----
* Vector `axis` is normalized before creating the matrix.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
R = np.zeros((4, 4,), dtype=dtype)
else:
dtype = np.dtype(out.dtype).type
R = out
R.fill(0.0)
try:
axis = VEC_AXES[axis] if isinstance(axis, str) else axis
except KeyError:
raise ValueError(
"Value of `axis` must be either '+x', '-x', '+y', '-x', '+z' or "
"'-z' or length 3 vector.")
axis = normalize(axis, dtype=dtype)
if np.count_nonzero(axis) == 0:
raise ValueError("Value for `axis` is zero-length.")
angle = np.radians(angle, dtype=dtype)
c = np.cos(angle, dtype=dtype)
s = np.sin(angle, dtype=dtype)
xs, ys, zs = axis * s
x2, y2, z2 = np.square(axis) # type inferred by input
x, y, z = axis
cd = dtype(1.0) - c
R[0, 0] = x2 * cd + c
R[0, 1] = x * y * cd - zs
R[0, 2] = x * z * cd + ys
R[1, 0] = y * x * cd + zs
R[1, 1] = y2 * cd + c
R[1, 2] = y * z * cd - xs
R[2, 0] = x * z * cd - ys
R[2, 1] = y * z * cd + xs
R[2, 2] = z2 * cd + c
R[3, 3] = dtype(1.0)
R[:, :] += 0.0 # remove negative zeros
return R
def translationMatrix(t, out=None, dtype=None):
"""Create a translation matrix.
The resulting matrix is the same as generated by a `glTranslate` call.
Parameters
----------
t : ndarray, tuple, or list of float
Translation vector [tx, ty, tz].
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
4x4 translation matrix in row-major order. Will be the same array as
`out` if specified, if not, a new array will be allocated.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
T = np.identity(4, dtype=dtype)
else:
dtype = np.dtype(out.dtype).type
T = out
T.fill(0.0)
np.fill_diagonal(T, 1.0)
T[:3, 3] = np.asarray(t, dtype=dtype)
return T
def invertMatrix(m, out=None, dtype=None):
"""Invert a square matrix.
Parameters
----------
m : array_like
Square matrix to invert. Inputs can be 4x4, 3x3 or 2x2.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Matrix which is the inverse of `m`
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = out.dtype
m = np.asarray(m, dtype=dtype) # input as array
toReturn = np.empty_like(m, dtype=dtype) if out is None else out
toReturn.fill(0.0)
if m.shape == (4, 4,):
# Special handling of 4x4 matrices, if affine and orthogonal
# (homogeneous), simply transpose the matrix rather than doing a full
# invert.
if isOrthogonal(m[:3, :3]) and isAffine(m):
rg = m[:3, :3]
toReturn[:3, :3] = rg.T
toReturn[:3, 3] = -m[:3, 3].dot(rg)
#toReturn[0, 3] = \
# -(m[0, 0] * m[0, 3] + m[1, 0] * m[1, 3] + m[2, 0] * m[2, 3])
#toReturn[1, 3] = \
# -(m[0, 1] * m[0, 3] + m[1, 1] * m[1, 3] + m[2, 1] * m[2, 3])
#toReturn[2, 3] = \
# -(m[0, 2] * m[0, 3] + m[1, 2] * m[1, 3] + m[2, 2] * m[2, 3])
toReturn[3, 3] = 1.0
else:
toReturn[:, :] = np.linalg.inv(m)
elif m.shape[0] == m.shape[1]: # square, other than 4x4
toReturn[:, :] = np.linalg.inv(m) if not isOrthogonal(m) else m.T
else:
toReturn[:, :] = np.linalg.inv(m)
return toReturn
def multMatrix(matrices, reverse=False, out=None, dtype=None):
"""Chain multiplication of two or more matrices.
Multiply a sequence of matrices together, reducing to a single product
matrix. For instance, specifying `matrices` the sequence of matrices (A, B,
C, D) will return the product (((AB)C)D). If `reverse=True`, the product
will be (A(B(CD))).
Alternatively, a 3D array can be specified to `matrices` as a stack, where
an index along axis 0 references a 2D slice storing matrix values. The
product of the matrices along the axis will be returned. This is a bit more
efficient than specifying separate matrices in a sequence, but the
difference is negligible when only a few matrices are being multiplied.
Parameters
----------
matrices : list, tuple or ndarray
Sequence or stack of matrices to multiply. All matrices must have the
same dimensions.
reverse : bool, optional
Multiply matrices right-to-left. This is useful when dealing with
transformation matrices, where the order of operations for transforms
will appear the same as the order the matrices are specified. Default is
'False'. When `True`, this function behaves similarly to
:func:`concatenate`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Matrix product.
Notes
-----
* You may use `numpy.matmul` when dealing with only two matrices instead of
`multMatrix`.
* If a single matrix is specified, the returned product will have the same
values.
Examples
--------
Chain multiplication of SRT matrices::
translate = translationMatrix((0.035, 0, -0.5))
rotate = rotationMatrix(90.0, (0, 1, 0))
scale = scaleMatrix(2.0)
SRT = multMatrix((translate, rotate, scale))
Same as above, but matrices are in a 3x4x4 array::
matStack = np.array((translate, rotate, scale))
# or ...
# matStack = np.zeros((3, 4, 4))
# matStack[0, :, :] = translate
# matStack[1, :, :] = rotate
# matStack[2, :, :] = scale
SRT = multMatrix(matStack)
Using `reverse=True` allows you to specify transformation matrices in the
order which they will be applied::
SRT = multMatrix(np.array((scale, rotate, translate)), reverse=True)
"""
# convert matrix types
dtype = np.float64 if dtype is None else np.dtype(dtype).type
matrices = np.asarray(matrices, dtype=dtype) # convert to array
matrices = np.atleast_3d(matrices)
prod = functools.reduce(
np.matmul, matrices[:] if not reverse else matrices[::-1])
if out is not None:
toReturn = out
toReturn[:, :] = prod
else:
toReturn = prod
return toReturn
def concatenate(matrices, out=None, dtype=None):
"""Concatenate matrix transformations.
Chain multiply matrices describing transform operations into a single matrix
product, that when applied, transforms points and vectors with each
operation in the order they're specified.
Parameters
----------
matrices : list or tuple
List of matrices to concatenate. All matrices must all have the same
size, usually 4x4 or 3x3.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Matrix product.
See Also
--------
* multMatrix : Chain multiplication of matrices.
Notes
-----
* This function should only be used for combining transformation matrices.
Use `multMatrix` for general matrix chain multiplication.
Examples
--------
Create an SRT (scale, rotate, and translate) matrix to convert model-space
coordinates to world-space::
S = scaleMatrix([2.0, 2.0, 2.0]) # scale model 2x
R = rotationMatrix(-90., [0., 0., -1]) # rotate -90 about -Z axis
T = translationMatrix([0., 0., -5.]) # translate point 5 units away
# product matrix when applied to points will scale, rotate and transform
# in that order.
SRT = concatenate([S, R, T])
# transform a point in model-space coordinates to world-space
pointModel = np.array([0., 1., 0., 1.])
pointWorld = np.matmul(SRT, pointModel.T) # point in WCS
# ... or ...
pointWorld = matrixApply(SRT, pointModel)
Create a model-view matrix from a world-space pose represented by an
orientation (quaternion) and position (vector). The resulting matrix will
transform model-space coordinates to eye-space::
# eye pose as quaternion and vector
stimOri = quatFromAxisAngle([0., 0., -1.], -45.0)
stimPos = [0., 1.5, -5.]
# create model matrix
R = quatToMatrix(stimOri)
T = translationMatrix(stimPos)
M = concatenate(R, T) # model matrix
# create a view matrix, can also be represented as 'pos' and 'ori'
eyePos = [0., 1.5, 0.]
eyeFwd = [0., 0., -1.]
eyeUp = [0., 1., 0.]
V = lookAt(eyePos, eyeFwd, eyeUp) # from viewtools
# modelview matrix
MV = concatenate([M, V])
You can put the created matrix in the OpenGL matrix stack as shown below.
Note that the matrix must have a 32-bit floating-point data type and needs
to be loaded transposed since OpenGL takes matrices in column-major order::
GL.glMatrixMode(GL.GL_MODELVIEW)
# pyglet
MV = np.asarray(MV, dtype='float32') # must be 32-bit float!
ptrMV = MV.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
GL.glLoadTransposeMatrixf(ptrMV)
# PyOpenGL
MV = np.asarray(MV, dtype='float32')
GL.glLoadTransposeMatrixf(MV)
Furthermore, you can convert a point from model-space to homogeneous
clip-space by concatenating the projection, view, and model matrices::
# compute projection matrix, functions here are from 'viewtools'
screenWidth = 0.52
screenAspect = w / h
scrDistance = 0.55
frustum = computeFrustum(screenWidth, screenAspect, scrDistance)
P = perspectiveProjectionMatrix(*frustum)
# multiply model-space points by MVP to convert them to clip-space
MVP = concatenate([M, V, P])
pointModel = np.array([0., 1., 0., 1.])
pointClipSpace = np.matmul(MVP, pointModel.T)
"""
return multMatrix(matrices, reverse=True, out=out, dtype=dtype)
def matrixFromEulerAngles(rx, ry, rz, degrees=True, out=None, dtype=None):
"""Construct a 4x4 rotation matrix from Euler angles.
Rotations are combined by first rotating about the X axis, then Y, and
finally Z.
Parameters
----------
rx, ry, rz : float
Rotation angles (pitch, yaw, and roll).
degrees : bool, optional
Rotation angles are specified in degrees. If `False`, they will be
assumed as radians. Default is `True`.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
4x4 rotation matrix.
Examples
--------
Demonstration of how a combination of axis-angle rotations is equivalent
to a single call of `matrixFromEulerAngles`::
m1 = matrixFromEulerAngles(90., 45., 135.))
# construct rotation matrix from 3 orthogonal rotations
rx = rotationMatrix(90., (1, 0, 0)) # x-axis
ry = rotationMatrix(45., (0, 1, 0)) # y-axis
rz = rotationMatrix(135., (0, 0, 1)) # z-axis
m2 = concatenate([rz, ry, rx]) # note the order
print(numpy.allclose(m1, m2)) # True
Not only does `matrixFromEulerAngles` require less code, it also is
considerably more efficient than constructing and multiplying multiple
matrices.
"""
# from https://www.j3d.org/matrix_faq/matrfaq_latest.html
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
toReturn = np.zeros((4, 4,), dtype=dtype)
else:
dtype = np.dtype(dtype).type
toReturn = out
toReturn.fill(0.0)
angles = np.asarray([rx, ry, rz], dtype=dtype)
if degrees:
angles = np.radians(angles)
a, c, e = np.cos(angles)
b, d, f = np.sin(angles)
ad = a * d
bd = b * d
toReturn[0, 0] = c * e
toReturn[0, 1] = -c * f
toReturn[0, 2] = d
toReturn[1, 0] = bd * e + a * f
toReturn[1, 1] = -bd * f + a * e
toReturn[1, 2] = -b * c
toReturn[2, 0] = -ad * e + b * f
toReturn[2, 1] = ad * f + b * e
toReturn[2, 2] = a * c
toReturn[3, 3] = 1.0
return toReturn
def isOrthogonal(m):
"""Check if a square matrix is orthogonal.
If a matrix is orthogonal, its columns form an orthonormal basis and is
non-singular. An orthogonal matrix is invertible by simply taking the
transpose of the matrix.
Parameters
----------
m : array_like
Square matrix, either 2x2, 3x3 or 4x4.
Returns
-------
bool
`True` if the matrix is orthogonal.
"""
if not isinstance(m, (np.ndarray,)):
m = np.asarray(m)
assert 2 <= m.shape[0] <= 4 # 2x2 to 4x4
assert m.shape[0] == m.shape[1] # must be square
dtype = np.dtype(m.dtype).type
return np.allclose(np.matmul(m.T, m, dtype=dtype),
np.identity(m.shape[0], dtype))
def isAffine(m):
"""Check if a 4x4 square matrix describes an affine transformation.
Parameters
----------
m : array_like
4x4 transformation matrix.
Returns
-------
bool
`True` if the matrix is affine.
"""
assert m.shape[0] == m.shape[1] == 4
if not isinstance(m, (np.ndarray,)):
m = np.asarray(m)
dtype = np.dtype(m.dtype).type
eps = np.finfo(dtype).eps
return np.all(m[3, :3] < eps) and (dtype(1.0) - m[3, 3]) < eps
def applyMatrix(m, points, out=None, dtype=None):
"""Apply a matrix over a 2D array of points.
This function behaves similarly to the following `Numpy` statement::
points[:, :] = points.dot(m.T)
Transformation matrices specified to `m` must have dimensions 4x4, 3x4, 3x3
or 2x2. With the exception of 4x4 matrices, input `points` must have the
same number of columns as the matrix has rows. 4x4 matrices can be used to
transform both Nx4 and Nx3 arrays.
Parameters
----------
m : array_like
Matrix with dimensions 2x2, 3x3, 3x4 or 4x4.
points : array_like
2D array of points/coordinates to transform. Each row should have length
appropriate for the matrix being used.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Transformed coordinates.
Notes
-----
* Input (`points`) and output (`out`) arrays cannot be the same instance for
this function.
* In the case of 4x4 input matrices, this function performs optimizations
based on whether the input matrix is affine, greatly improving performance
when working with Nx3 arrays.
Examples
--------
Construct a matrix and transform a point::
# identity 3x3 matrix for this example
M = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
pnt = [1.0, 0.0, 0.0]
pntNew = applyMatrix(M, pnt)
Construct an SRT matrix (scale, rotate, transform) and transform an array of
points::
S = scaleMatrix([5.0, 5.0, 5.0]) # scale 5x
R = rotationMatrix(180., [0., 0., -1]) # rotate 180 degrees
T = translationMatrix([0., 1.5, -3.]) # translate point up and away
M = concatenate([S, R, T]) # create transform matrix
# points to transform
points = np.array([[0., 1., 0., 1.], [-1., 0., 0., 1.]]) # [x, y, z, w]
newPoints = applyMatrix(M, points) # apply the transformation
Convert CIE-XYZ colors to sRGB::
sRGBMatrix = [[3.2404542, -1.5371385, -0.4985314],
[-0.969266, 1.8760108, 0.041556 ],
[0.0556434, -0.2040259, 1.0572252]]
colorsRGB = applyMatrix(sRGBMatrix, colorsXYZ)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
m = np.asarray(m, dtype=dtype)
points = np.asarray(points, dtype=dtype)
if out is None:
toReturn = np.zeros_like(points, dtype=dtype)
else:
if id(out) == id(points):
raise ValueError('Output array cannot be same as input.')
toReturn = out
pout, p = np.atleast_2d(toReturn, points)
if m.shape[0] == m.shape[1] == 4: # 4x4 matrix
if pout.shape[1] == 3: # Nx3
pout[:, :] = p.dot(m[:3, :3].T)
pout += m[:3, 3]
# find `rcpW` as suggested in OpenXR's xr_linear.h header
# reciprocal of `w` if the matrix is not orthonormal
if not isAffine(m):
rcpW = 1.0 / (m[3, 0] * p[:, 0] +
m[3, 1] * p[:, 1] +
m[3, 2] * p[:, 2] +
m[3, 3])
pout *= rcpW[:, np.newaxis]
elif pout.shape[1] == 4: # Nx4
pout[:, :] = p.dot(m.T)
else:
raise ValueError(
'Input array dimensions invalid. Should be Nx3 or Nx4 when '
'input matrix is 4x4.')
elif m.shape[0] == 3 and m.shape[1] == 4: # 3x4 matrix
if pout.shape[1] == 3: # Nx3
pout[:, :] = p.dot(m[:3, :3].T)
pout += m[:3, 3]
else:
raise ValueError(
'Input array dimensions invalid. Should be Nx3 when input '
'matrix is 3x4.')
elif m.shape[0] == m.shape[1] == 3: # 3x3 matrix, e.g colors
if pout.shape[1] == 3: # Nx3
pout[:, :] = p.dot(m.T)
else:
raise ValueError(
'Input array dimensions invalid. Should be Nx3 when '
'input matrix is 3x3.')
elif m.shape[0] == m.shape[1] == pout.shape[1] == 2: # 2x2 matrix
if pout.shape[1] == 2: # Nx2
pout[:, :] = p.dot(m.T)
else:
raise ValueError(
'Input array dimensions invalid. Should be Nx2 when '
'input matrix is 2x2.')
else:
raise ValueError(
'Only a square matrix with dimensions 2, 3 or 4 can be used.')
return toReturn
def posOriToMatrix(pos, ori, out=None, dtype=None):
"""Convert a rigid body pose to a 4x4 transformation matrix.
A pose is represented by a position coordinate `pos` and orientation
quaternion `ori`.
Parameters
----------
pos : ndarray, tuple, or list of float
Position vector [x, y, z].
ori : tuple, list or ndarray of float
Orientation quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
4x4 transformation matrix.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
toReturn = np.zeros((4, 4,), dtype=dtype)
else:
dtype = np.dtype(dtype).type
toReturn = out
transMat = translationMatrix(pos, dtype=dtype)
rotMat = quatToMatrix(ori, dtype=dtype)
return np.matmul(transMat, rotMat, out=toReturn)
def transform(pos, ori, points, out=None, dtype=None):
"""Transform points using a position and orientation. Points are rotated
then translated.
Parameters
----------
pos : array_like
Position vector in form [x, y, z] or [x, y, z, 1].
ori : array_like
Orientation quaternion in form [x, y, z, w] where w is real and x, y, z
are imaginary components.
points : array_like
Point(s) [x, y, z] to transform.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Transformed points.
Examples
--------
Transform points by a position coordinate and orientation quaternion::
# rigid body pose
ori = quatFromAxisAngle([0., 0., -1.], 90.0, degrees=True)
pos = [0., 1.5, -3.]
# points to transform
points = np.array([[0., 1., 0., 1.], [-1., 0., 0., 1.]]) # [x, y, z, 1]
outPoints = np.zeros_like(points) # output array
transform(pos, ori, points, out=outPoints) # do the transformation
You can get the same results as the previous example using a matrix by doing
the following::
R = rotationMatrix(90., [0., 0., -1])
T = translationMatrix([0., 1.5, -3.])
M = concatenate([R, T])
applyMatrix(M, points, out=outPoints)
If you are defining transformations with quaternions and coordinates, you
can skip the costly matrix creation process by using `transform`.
Notes
-----
* In performance tests, `applyMatrix` is noticeably faster than `transform`
for very large arrays, however this is only true if you are applying the
same transformation to all points.
* If the input arrays for `points` or `pos` is Nx4, the last column is
ignored.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
pos = np.asarray(pos, dtype=dtype)
ori = np.asarray(ori, dtype=dtype)
points = np.asarray(points, dtype=dtype)
if out is None:
toReturn = np.zeros_like(points, dtype=dtype)
else:
if out.shape != points.shape:
raise ValueError(
"Array 'out' and 'points' do not have matching shapes.")
toReturn = out
pout, points, pos2d = np.atleast_2d(toReturn, points, pos) # create 2d views
# apply rotation
applyQuat(ori, points, out=pout)
# apply translation
pout[:, 0] += pos2d[:, 0]
pout[:, 1] += pos2d[:, 1]
pout[:, 2] += pos2d[:, 2]
return toReturn
def scale(sf, points, out=None, dtype=None):
"""Scale points by a factor.
This is useful for converting points between units, and to stretch or
compress points along a given axis. Scaling can be uniform which the same
factor is applied along all axes, or anisotropic along specific axes.
Parameters
----------
sf : array_like or float
Scaling factor. If scalar, all points will be scaled uniformly by that
factor. If a vector, scaling will be anisotropic along an axis.
points : array_like
Point(s) [x, y, z] to scale.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Scaled points.
Examples
--------
Apply uniform scaling to points, here we scale to convert points in
centimeters to meters::
CM_TO_METERS = 1.0 / 100.0
pointsCM = [[1, 2, 3], [4, 5, 6], [-1, 1, 0]]
pointsM = scale(CM_TO_METERS, pointsCM)
Anisotropic scaling along the X and Y axis::
pointsM = scale((SCALE_FACTOR_X, SCALE_FACTOR_Y), pointsCM)
Scale only on the X axis::
pointsM = scale((SCALE_FACTOR_X,), pointsCM)
Apply scaling on the Z axis only::
pointsM = scale((1.0, 1.0, SCALE_FACTOR_Z), pointsCM)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
points = np.asarray(points, dtype=dtype)
toReturn = np.zeros_like(points, dtype=dtype) if out is None else out
toReturn, points = np.atleast_2d(toReturn, points) # create 2d views
# uniform scaling
if isinstance(sf, (float, int)):
toReturn[:, :] = points * sf
elif isinstance(sf, (list, tuple, np.ndarray)): # anisotropic
sf = np.asarray(sf, dtype=dtype)
sfLen = len(sf)
if sfLen <= 3:
toReturn[:, :] = points
toReturn[:, :len(sf)] *= sf
else:
raise ValueError("Scale factor array must have length <= 3.")
return toReturn
def normalMatrix(modelMatrix, out=None, dtype=None):
"""Get the normal matrix from a model matrix.
Parameters
----------
modelMatrix : array_like
4x4 homogeneous model matrix.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Normal matrix.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
modelMatrix = np.asarray(modelMatrix, dtype=dtype)
toReturn = np.zeros((4, 4), dtype=dtype) if out is None else out
toReturn[:, :] = np.linalg.inv(modelMatrix).T
return toReturn
def forwardProject(objPos, modelView, proj, viewport=None, out=None, dtype=None):
"""Project a point in a scene to a window coordinate.
This function is similar to `gluProject` and can be used to find the window
coordinate which a point projects to.
Parameters
----------
objPos : array_like
Object coordinates (x, y, z). If an Nx3 array of coordinates is
specified, where each row contains a window coordinate this function
will return an array of projected coordinates with the same size.
modelView : array_like
4x4 combined model and view matrix for returned value to be object
coordinates. Specify only the view matrix for a coordinate in the scene.
proj : array_like
4x4 projection matrix used for rendering.
viewport : array_like
Viewport rectangle for the window [x, y, w, h]. If not specified, the
returned values will be in normalized device coordinates.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Normalized device or viewport coordinates [x, y, z] of the point. The
`z` component is similar to the depth buffer value for the object point.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
toReturn = np.zeros_like(objPos, dtype=dtype) if out is None else out
winCoord, objPos = np.atleast_2d(toReturn, objPos)
# transformation matrix
mvp = np.matmul(proj, modelView)
# must have `w` for this one
if objPos.shape[1] == 3:
temp = np.zeros((objPos.shape[1], 4), dtype=dtype)
temp[:, :3] = objPos
objPos = temp
# transform the points
objNorm = applyMatrix(mvp, objPos, dtype=dtype)
if viewport is not None:
# if we have a viewport, transform it
objNorm[:, :] += 1.0
winCoord[:, 0] = viewport[0] + viewport[2] * objNorm[:, 0]
winCoord[:, 1] = viewport[1] + viewport[3] * objNorm[:, 1]
winCoord[:, 2] = objNorm[:, 2]
winCoord[:, :] /= 2.0
else:
# already in NDC
winCoord[:, :] = objNorm
return toReturn # ref to winCoord
def reverseProject(winPos, modelView, proj, viewport=None, out=None, dtype=None):
"""Unproject window coordinates into object or scene coordinates.
This function works like `gluUnProject` and can be used to find to an object
or scene coordinate at the point on-screen (mouse coordinate or pixel). The
coordinate can then be used to create a direction vector from the viewer's
eye location. Another use of this function is to convert depth buffer
samples to object or scene coordinates. This is the inverse operation of
:func:`forwardProject`.
Parameters
----------
winPos : array_like
Window coordinates (x, y, z). If `viewport` is not specified, these
should be normalized device coordinates. If an Nx3 array of coordinates
is specified, where each row contains a window coordinate this function
will return an array of unprojected coordinates with the same size.
Usually, you only need to specify the `x` and `y` coordinate, leaving
`z` as zero. However, you can specify `z` if sampling from a depth map
or buffer to convert a depth sample to an actual location.
modelView : array_like
4x4 combined model and view matrix for returned value to be object
coordinates. Specify only the view matrix for a coordinate in the scene.
proj : array_like
4x4 projection matrix used for rendering.
viewport : array_like
Viewport rectangle for the window [x, y, w, h]. Do not specify one if
`winPos` is in already in normalized device coordinates.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Object or scene coordinates.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
toReturn = np.zeros_like(winPos, dtype=dtype) if out is None else out
objCoord, winPos = np.atleast_2d(toReturn, winPos)
# get inverse of model and projection matrix
invMVP = np.linalg.inv(np.matmul(proj, modelView))
if viewport is not None:
# if we have a viewport, we need to transform to NDC first
objCoord[:, 0] = ((2 * winPos[:, 0] - viewport[0]) / viewport[2])
objCoord[:, 1] = ((2 * winPos[:, 1] - viewport[1]) / viewport[3])
objCoord[:, 2] = 2 * winPos[:, 2]
objCoord -= 1
objCoord[:, :] = applyMatrix(invMVP, objCoord, dtype=dtype)
else:
# already in NDC, just apply
objCoord[:, :] = applyMatrix(invMVP, winPos, dtype=dtype)
return toReturn # ref to objCoord
# ------------------------------------------------------------------------------
# Misc. Math Functions
#
def zeroFix(a, inplace=False, threshold=None):
"""Fix zeros in an array.
This function truncates very small numbers in an array to zero and removes
any negative zeros.
Parameters
----------
a : ndarray
Input array, must be a Numpy array.
inplace : bool
Fix an array inplace. If `True`, the input array will be modified,
otherwise a new array will be returned with same `dtype` and shape with
the fixed values.
threshold : float or None
Threshold for truncation. If `None`, the machine epsilon value for the
input array `dtype` will be used. You can specify a custom threshold as
a float.
Returns
-------
ndarray
Output array with zeros fixed.
"""
toReturn = np.copy(a) if not inplace else a
toReturn += 0.0 # remove negative zeros
threshold = np.finfo(a.dtype).eps if threshold is None else float(threshold)
toReturn[np.abs(toReturn) < threshold] = 0.0 # make zero
return toReturn
def lensCorrection(xys, coefK=(1.0,), distCenter=(0., 0.), out=None,
dtype=None):
"""Lens correction (or distortion) using the division model with even
polynomial terms.
Calculate new vertex positions or texture coordinates to apply radial
warping, such as 'pincushion' and 'barrel' distortion. This is to compensate
for optical distortion introduced by lenses placed in the optical path of
the viewer and the display (such as in an HMD).
See references[1]_ for implementation details.
Parameters
----------
xys : array_like
Nx2 list of vertex positions or texture coordinates to distort. Works
correctly only if input values range between -1.0 and 1.0.
coefK : array_like or float
Distortion coefficients K_n. Specifying multiple values will add more
polynomial terms to the distortion formula. Positive values will produce
'barrel' distortion, whereas negative will produce 'pincushion'
distortion. In most cases, two or three coefficients are adequate,
depending on the degree of distortion.
distCenter : array_like, optional
X and Y coordinate of the distortion center (eg. (0.2, -0.4)).
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Array of distorted vertices.
Notes
-----
* At this time tangential distortion (i.e. due to a slant in the display)
cannot be corrected for.
References
----------
.. [1] Fitzgibbon, W. (2001). Simultaneous linear estimation of multiple
view geometry and lens distortion. Proceedings of the 2001 IEEE Computer
Society Conference on Computer Vision and Pattern Recognition (CVPR).
IEEE.
Examples
--------
Creating a lens correction mesh with barrel distortion (eg. for HMDs)::
vertices, textureCoords, normals, faces = gltools.createMeshGrid(
subdiv=11, tessMode='center')
# recompute vertex positions
vertices[:, :2] = mt.lensCorrection(vertices[:, :2], coefK=(5., 5.))
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
if isinstance(coefK, (float, int,)):
coefK = (coefK,)
xys = np.asarray(xys, dtype=dtype)
coefK = np.asarray(coefK, dtype=dtype)
d_minus_c = xys - np.asarray(distCenter, dtype=dtype)
r = np.power(length(d_minus_c, dtype=dtype)[:, np.newaxis],
np.arange(len(coefK), dtype=dtype) * 2. + 2.)
toReturn = np.zeros_like(xys, dtype=dtype) if out is None else out
denom = dtype(1.0) + dot(coefK, r, dtype=dtype)
toReturn[:, :] = xys + (d_minus_c / denom[:, np.newaxis])
return toReturn
def lensCorrectionSpherical(xys, coefK=1.0, aspect=1.0, out=None, dtype=None):
"""Simple lens correction.
Lens correction for a spherical lenses with distortion centered at the
middle of the display. See references[1]_ for implementation details.
Parameters
----------
xys : array_like
Nx2 list of vertex positions or texture coordinates to distort. Assumes
the output will be rendered to normalized device coordinates where
points range from -1.0 to 1.0.
coefK : float
Distortion coefficient. Use positive numbers for pincushion distortion
and negative for barrel distortion.
aspect : float
Aspect ratio of the target window or buffer (width / height).
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
ndarray
Array of distorted vertices.
References
----------
.. [1] Lens Distortion White Paper, Andersson Technologies LLC,
www.ssontech.com/content/lensalg.html (obtained 07/28/2020)
Examples
--------
Creating a lens correction mesh with barrel distortion (eg. for HMDs)::
vertices, textureCoords, normals, faces = gltools.createMeshGrid(
subdiv=11, tessMode='center')
# recompute vertex positions
vertices[:, :2] = mt.lensCorrection2(vertices[:, :2], coefK=2.0)
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(dtype).type
toReturn = np.empty_like(xys, dtype=dtype) if out is None else out
xys = np.asarray(xys, dtype=dtype)
toReturn[:, 0] = u = xys[:, 0]
toReturn[:, 1] = v = xys[:, 1]
coefKCubed = np.power(coefK, 3, dtype=dtype)
r2 = aspect * aspect * u * u + v * v
r2sqr = np.sqrt(r2, dtype=dtype)
f = 1. + r2 * (coefK + coefKCubed * r2sqr)
toReturn[:, 0] *= f
toReturn[:, 1] *= f
return toReturn
class infrange():
"""
Similar to base Python `range`, but allowing the step to be a float or even
0, useful for specifying ranges for logical comparisons.
"""
def __init__(self, min, max, step=0):
self.min = min
self.max = max
self.step = step
@property
def range(self):
return abs(self.max-self.min)
def __lt__(self, other):
return other > self.max
def __le__(self, other):
return other > self.min
def __gt__(self, other):
return self.min > other
def __ge__(self, other):
return self.max > other
def __contains__(self, item):
if self.step == 0:
return self.min < item < self.max
else:
return item in np.linspace(self.min, self.max, int(self.range/self.step)+1)
def __eq__(self, item):
if isinstance(item, self.__class__):
return all((
self.min == item.min,
self.max == item.max,
self.step == item.step
))
return item in self
def __add__(self, other):
return self.__class__(self.min+other, self.max+other, self.step)
def __sub__(self, other):
return self.__class__(self.min - other, self.max - other, self.step)
def __mul__(self, other):
return self.__class__(self.min * other, self.max * other, self.step * other)
def __truedic__(self, other):
return self.__class__(self.min / other, self.max / other, self.step / other)
if __name__ == "__main__":
pass
| 137,168
|
Python
|
.py
| 3,215
| 35.390047
| 88
| 0.615813
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,397
|
monitorunittools.py
|
psychopy_psychopy/psychopy/tools/monitorunittools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Functions and classes related to unit conversion respective to a particular
monitor"""
from psychopy import monitors
from psychopy import logging
import numpy as np
import re
from numpy import array, tan, pi, radians, hypot, degrees, arctan
from math import hypot as hypot3d
# Maps supported coordinate unit type names to the function that converts
# the given unit type to PsychoPy OpenGL pix unit space.
_unit2PixMappings = dict()
# the following are to be used by convertToPix
def _pix2pix(vertices, pos, win=None):
return pos + vertices
_unit2PixMappings['pix'] = _pix2pix
_unit2PixMappings['pixels'] = _pix2pix
def _cm2pix(vertices, pos, win):
return cm2pix(pos + vertices, win.monitor)
_unit2PixMappings['cm'] = _cm2pix
def _deg2pix(vertices, pos, win):
return deg2pix(pos + vertices, win.monitor)
_unit2PixMappings['deg'] = _deg2pix
_unit2PixMappings['degs'] = _deg2pix
def _degFlatPos2pix(vertices, pos, win):
posCorrected = deg2pix(pos, win.monitor, correctFlat=True)
vertices = deg2pix(vertices, win.monitor, correctFlat=False)
return posCorrected + vertices
_unit2PixMappings['degFlatPos'] = _degFlatPos2pix
def _degFlat2pix(vertices, pos, win):
return deg2pix(array(pos) + array(vertices), win.monitor,
correctFlat=True)
_unit2PixMappings['degFlat'] = _degFlat2pix
def _norm2pix(vertices, pos, win):
if win.useRetina:
return (pos + vertices) * win.size / 4.0
else:
return (pos + vertices) * win.size / 2.0
_unit2PixMappings['norm'] = _norm2pix
def _height2pix(vertices, pos, win):
if win.useRetina:
return (pos + vertices) * win.size[1] / 2.0
else:
return (pos + vertices) * win.size[1]
_unit2PixMappings['height'] = _height2pix
def posToPix(stim):
"""Returns the stim's position in pixels,
based on its pos, units, and win.
"""
return convertToPix([0, 0], stim.pos, stim.win.units, stim.win)
def convertToPix(vertices, pos, units, win):
"""Takes vertices and position, combines and converts to pixels
from any unit
The reason that `pos` and `vertices` are provided separately is that
it allows the conversion from deg to apply flat-screen correction to
each separately.
The reason that these use function args rather than relying on
self.pos is that some stimuli use other terms (e.g. ElementArrayStim
uses fieldPos).
"""
unit2pixFunc = _unit2PixMappings.get(units)
if unit2pixFunc:
return unit2pixFunc(vertices, pos, win)
else:
msg = "The unit type [{0}] is not registered with PsychoPy"
raise ValueError(msg.format(units))
def addUnitTypeConversion(unitLabel, mappingFunc):
"""Add support for converting units specified by unit_label to pixels
to be used by convertToPix (therefore a valid unit for your PsychoPy
stimuli)
mapping_func must have the function prototype:
def mapping_func(vertices, pos, win):
# Convert the input vertices, pos to pixel positions PsychoPy
# will use for OpenGL call.
# unit type -> pixel mapping logic here
# .....
return pix
"""
if unitLabel in _unit2PixMappings:
msg = "The unit type label [{0}] is already registered with PsychoPy"
raise ValueError(msg.format(unitLabel))
_unit2PixMappings[unitLabel] = mappingFunc
# Built in conversion functions follow ...
def cm2deg(cm, monitor, correctFlat=False):
"""Convert size in cm to size in degrees for a given Monitor object
"""
# check we have a monitor
if not isinstance(monitor, monitors.Monitor):
msg = ("cm2deg requires a monitors.Monitor object as the second "
"argument but received %s")
raise ValueError(msg % str(type(monitor)))
# get monitor dimensions
dist = monitor.getDistance()
# check they all exist
if dist is None:
msg = "Monitor %s has no known distance (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if correctFlat:
return np.degrees(np.arctan(cm / dist))
else:
return cm / (dist * 0.017455)
def deg2cm(degrees, monitor, correctFlat=False):
"""Convert size in degrees to size in pixels for a given Monitor object.
If `correctFlat == False` then the screen will be treated as if all
points are equal distance from the eye. This means that each "degree"
will be the same size irrespective of its position.
If `correctFlat == True` then the `degrees` argument must be an Nx2 matrix
for X and Y values (the two cannot be calculated separately in this case).
With `correctFlat == True` the positions may look strange because more
eccentric vertices will be spaced further apart.
"""
# check we have a monitor
if not hasattr(monitor, 'getDistance'):
msg = ("deg2cm requires a monitors.Monitor object as the second "
"argument but received %s")
raise ValueError(msg % str(type(monitor)))
# get monitor dimensions
dist = monitor.getDistance()
# check they all exist
if dist is None:
msg = "Monitor %s has no known distance (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if correctFlat:
rads = radians(degrees)
cmXY = np.zeros(rads.shape, 'd') # must be a double (not float)
if rads.shape == (2,):
x, y = rads
cmXY[0] = hypot(dist, tan(y) * dist) * tan(x)
cmXY[1] = hypot(dist, tan(x) * dist) * tan(y)
elif len(rads.shape) > 1 and rads.shape[1] == 2:
cmXY[:, 0] = hypot(dist, tan(rads[:, 1]) * dist) * tan(rads[:, 0])
cmXY[:, 1] = hypot(dist, tan(rads[:, 0]) * dist) * tan(rads[:, 1])
else:
msg = ("If using deg2cm with correctedFlat==True then degrees "
"arg must have shape [N,2], not %s")
raise ValueError(msg % (repr(rads.shape)))
# derivation:
# if hypotY is line from eyeball to [x,0] given by
# hypot(dist, tan(degX))
# then cmY is distance from [x,0] to [x,y] given by
# hypotY * tan(degY)
# similar for hypotX to get cmX
# alternative:
# we could do this by converting to polar coords, converting
# deg2cm and then going back to cartesian,
# but this would be slower(?)
return cmXY
else:
# the size of 1 deg at screen centre
return np.array(degrees) * dist * 0.017455
def cm2pix(cm, monitor):
"""Convert size in cm to size in pixels for a given Monitor object.
"""
# check we have a monitor
if not isinstance(monitor, monitors.Monitor):
msg = ("cm2pix requires a monitors.Monitor object as the"
" second argument but received %s")
raise ValueError(msg % str(type(monitor)))
# get monitor params and raise error if necess
scrWidthCm = monitor.getWidth()
scrSizePix = monitor.getSizePix()
if scrSizePix is None:
msg = "Monitor %s has no known size in pixels (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if scrWidthCm is None:
msg = "Monitor %s has no known width in cm (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
return cm * scrSizePix[0] / float(scrWidthCm)
def pix2cm(pixels, monitor):
"""Convert size in pixels to size in cm for a given Monitor object
"""
# check we have a monitor
if not isinstance(monitor, monitors.Monitor):
msg = ("cm2pix requires a monitors.Monitor object as the second"
" argument but received %s")
raise ValueError(msg % str(type(monitor)))
# get monitor params and raise error if necess
scrWidthCm = monitor.getWidth()
scrSizePix = monitor.getSizePix()
if scrSizePix is None:
msg = "Monitor %s has no known size in pixels (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if scrWidthCm is None:
msg = "Monitor %s has no known width in cm (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
return pixels * float(scrWidthCm) / scrSizePix[0]
def deg2pix(degrees, monitor, correctFlat=False):
"""Convert size in degrees to size in pixels for a given Monitor object
"""
# get monitor params and raise error if necess
scrWidthCm = monitor.getWidth()
scrSizePix = monitor.getSizePix()
if scrSizePix is None:
msg = "Monitor %s has no known size in pixels (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if scrWidthCm is None:
msg = "Monitor %s has no known width in cm (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
cmSize = deg2cm(degrees, monitor, correctFlat)
return cmSize * scrSizePix[0] / float(scrWidthCm)
def pix2deg(pixels, monitor, correctFlat=False):
"""Convert size in pixels to size in degrees for a given Monitor object
"""
# get monitor params and raise error if necess
scrWidthCm = monitor.getWidth()
scrSizePix = monitor.getSizePix()
if scrSizePix is None:
msg = "Monitor %s has no known size in pixels (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if scrWidthCm is None:
msg = "Monitor %s has no known width in cm (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
cmSize = pixels * float(scrWidthCm) / scrSizePix[0]
return cm2deg(cmSize, monitor, correctFlat)
| 9,715
|
Python
|
.py
| 218
| 38.380734
| 79
| 0.676443
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,398
|
movietools.py
|
psychopy_psychopy/psychopy/tools/movietools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for working with movies in PsychoPy.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'MovieFileWriter',
'closeAllMovieWriters',
'addAudioToMovie',
'MOVIE_WRITER_FFPYPLAYER',
'MOVIE_WRITER_OPENCV',
'MOVIE_WRITER_NULL',
'VIDEO_RESOLUTIONS'
]
import os
import time
import threading
import queue
import atexit
import numpy as np
import psychopy.logging as logging
# constants for specifying encoders for the movie writer
MOVIE_WRITER_FFPYPLAYER = u'ffpyplayer'
MOVIE_WRITER_OPENCV = u'opencv'
MOVIE_WRITER_NULL = u'null' # use prefs for default
# Common video resolutions in pixels (width, height). Users should be able to
# pass any of these strings to fields that require a video resolution. Setters
# should uppercase the string before comparing it to the keys in this dict.
VIDEO_RESOLUTIONS = {
'VGA': (640, 480),
'SVGA': (800, 600),
'XGA': (1024, 768),
'SXGA': (1280, 1024),
'UXGA': (1600, 1200),
'QXGA': (2048, 1536),
'WVGA': (852, 480),
'WXGA': (1280, 720),
'WXGA+': (1440, 900),
'WSXGA+': (1680, 1050),
'WUXGA': (1920, 1200),
'WQXGA': (2560, 1600),
'WQHD': (2560, 1440),
'WQXGA+': (3200, 1800),
'UHD': (3840, 2160),
'4K': (4096, 2160),
'8K': (7680, 4320)
}
# Keep track of open movie writers here. This is used to close all movie writers
# when the main thread exits. Any waiting frames are flushed to the file before
# the file is finalized. We identify movie writers by hashing the filename they
# are presently writing to.
_openMovieWriters = set()
class MovieFileWriter:
"""Create movies from a sequence of images.
This class allows for the creation of movies from a sequence of images using
FFMPEG (via the `ffpyplayer` or `cv2` libraries). Writing movies to disk is
a slow process, so this class uses a separate thread to write the movie in
the background. This means that you can continue to add images to the movie
while frames are still being written to disk. Movie writers are closed
automatically when the main thread exits. Any remaining frames are flushed
to the file before the file is finalized.
Writing audio tracks is not supported. If you need to add audio to your
movie, create the file with the video content first, then add the audio
track to the file. The :func:`addAudioToMovie` function can be used to do
this after the video and audio files have been saved to disk.
Parameters
----------
filename : str
The name (or path) of the file to write the movie to. The file extension
determines the movie format if `codec` is `None` for some backends.
Otherwise it must be explicitly specified.
size : tuple or str
The size of the movie in pixels (width, height). If a string is passed,
it should be one of the keys in the `VIDEO_RESOLUTIONS` dictionary.
fps : float
The number of frames per second.
codec : str or None
The codec to use for encoding the movie. This may be a codec identifier
(e.g., 'libx264') or a FourCC code. The value depends of the
`encoderLib` in use. If `None`, the writer will select the codec based
on the file extension of `filename` (if supported by the backend).
pixelFormat : str
Pixel format for frames being added to the movie. This should be
either 'rgb24' or 'rgba32'. The default is 'rgb24'. When passing frames
to `addFrame()` as a numpy array, the array should be in the format
specified here.
encoderLib : str
The library to use to handle encoding and writing the movie to disk. The
default is 'ffpyplayer'.
encoderOpts : dict or None
A dictionary of options to pass to the encoder. These option can be used
to control the quality of the movie, for example. The options depend on
the `encoderLib` in use. If `None`, the writer will use the default
options for the backend.
Examples
--------
Create a movie from a sequence of generated noise images::
import psychopy.tools.movietools as movietools
import numpy as np
# create a movie writer
writer = movietools.MovieFileWriter(
filename='myMovie.mp4',
size=(640, 480),
fps=30)
# open the movie for writing
writer.open()
# add some frames to the movie
for i in range(5 * writer.fps): # 5 seconds of video
# create a frame, just some random noise
frame = np.random.randint(0, 255, (640, 480, 3), dtype=np.uint8)
# add the frame to the movie
writer.addFrame(frame)
# close the movie, this completes the writing process
writer.close()
Setting additional options for the movie encoder requires passing a
dictionary of options to the `encoderOpts` parameter. The options depend on
the encoder library in use. For example, to set the quality of the movie
when using the `ffpyplayer` library, you can do the following::
ffmpegOpts = {'preset': 'medium', 'crf': 16} # medium quality, crf=16
writer = movietools.MovieFileWriter(
filename='myMovie.mp4',
size='720p',
fps=30,
encoderLib='ffpyplayer',
encoderOpts=ffmpegOpts)
The OpenCV backend specifies options differently. To set the quality of the
movie when using the OpenCV library with a codec that support variable
quality, you can do the following::
cvOpts = {'quality': 80} # set the quality to 80 (0-100)
writer = movietools.MovieFileWriter(
filename='myMovie.mp4',
size='720p',
fps=30,
encoderLib='opencv',
encoderOpts=cvOpts)
"""
# supported pixel formats as constants
PIXEL_FORMAT_RGB24 = 'rgb24'
PIXEL_FORMAT_RGBA32 = 'rgb32'
def __init__(self, filename, size, fps, codec=None, pixelFormat='rgb24',
encoderLib='ffpyplayer', encoderOpts=None):
# objects needed to build up the asynchronous movie writer interface
self._writerThread = None # thread for writing the movie file
self._frameQueue = queue.Queue() # queue for frames to be written
self._dataLock = threading.Lock() # lock for accessing shared data
self._lastVideoFile = None # last video file we wrote to
# set the file name
self._filename = None
self._absPath = None # use for generating a hash of the filename
self.filename = filename # use setter to init self._filename
# Select the default codec based on the encoder library, we want to use
# H264 for OpenCV and libx264 for ffpyplayer. If the user specifies a
# codec, we use that instead.
if encoderLib == 'ffpyplayer':
self._codec = codec or 'libx264' # default codec
elif encoderLib == 'opencv':
self._codec = codec or 'mp4v'
if len(self._codec) != 4:
raise ValueError('OpenCV codecs must be FourCC codes')
else:
raise ValueError('Unknown encoder library: {}'.format(encoderLib))
self._encoderLib = encoderLib
self._encoderOpts = {} if encoderOpts is None else encoderOpts
self._size = None
self.size = size # use setter to init self._size
self._fps = None
self.fps = fps # use setter to init self._fps
self._pixelFormat = pixelFormat
# frame interval in seconds
self._frameInterval = 1.0 / self._fps
# keep track of the number of bytes we saved to the movie file
self._pts = 0.0 # most recent presentation timestamp
self._bytesOut = 0
self._framesOut = 0
def __hash__(self):
"""Use the absolute file path as the hash value since we only allow one
instance per file.
"""
return hash(self._absPath)
@property
def filename(self):
"""The name (path) of the movie file (`str`).
This cannot be changed after the writer has been opened.
"""
return self._filename
@filename.setter
def filename(self, value):
if self.isOpen:
raise RuntimeError(
'Cannot change `filename` after the writer has been opened.')
self._filename = value
self._absPath = os.path.abspath(self._filename)
@property
def size(self):
"""The size `(w, h)` of the movie in pixels (`tuple` or `str`).
If a string is passed, it should be one of the keys in the
`VIDEO_RESOLUTIONS` dictionary.
This can not be changed after the writer has been opened.
"""
return self._size
@size.setter
def size(self, value):
if self.isOpen:
raise RuntimeError(
'Cannot change `size` after the writer has been opened.')
# if a string is passed, try to look up the size in the dictionary
if isinstance(value, str):
try:
value = VIDEO_RESOLUTIONS[value.upper()]
except KeyError:
raise ValueError(
f'Unknown video resolution: {value}. Must be one of: '
f'{", ".join(VIDEO_RESOLUTIONS.keys())}.')
if len(value) != 2:
raise ValueError('`size` must be a collection of two integers.')
self._size = tuple(value)
@property
def frameSize(self):
"""The size `(w, h)` of the movie in pixels (`tuple`).
This is an alias for `size` to synchronize naming with other video
classes around PsychoPy.
"""
return self._size
@frameSize.setter
def frameSize(self, value):
self.size = value
@property
def fps(self):
"""Output frames per second (`float`).
This is the number of frames per second that will be written to the
movie file. The default is 30.
"""
return self._fps
@fps.setter
def fps(self, value):
if self.isOpen:
raise RuntimeError(
'Cannot change `fps` after the writer has been opened.')
if value <= 0:
raise ValueError('`fps` must be greater than 0.')
self._fps = value
self._frameInterval = 1.0 / self._fps
@property
def frameRate(self):
"""Output frames per second (`float`).
This is an alias for `fps` to synchronize naming with other video
classes around PsychoPy.
"""
return self._fps
@frameRate.setter
def frameRate(self, value):
self.fps = value
@property
def codec(self):
"""The codec to use for encoding the movie (`str`).
This may be a codec identifier (e.g., 'libx264'), or a FourCC code (e.g.
'MPV4'). The value depends of the `encoderLib` in use. If `None`, the a
codec determined by the file extension will be used.
"""
return self._codec
@codec.setter
def codec(self, value):
if self.isOpen:
raise RuntimeError(
'Cannot change `codec` after the writer has been opened.')
self._codec = value
@property
def pixelFormat(self):
"""Pixel format for frames being added to the movie (`str`).
This should be either 'rgb24' or 'rgba32'. The default is 'rgb24'. When
passing frames to `addFrame()` as a numpy array, the array should be in
the format specified here.
"""
return self._pixelFormat
@pixelFormat.setter
def pixelFormat(self, value):
if self.isOpen:
raise RuntimeError(
'Cannot change `pixelFormat` after the writer has been opened.')
self._pixelFormat = value
@property
def encoderLib(self):
"""The library to use for writing the movie (`str`).
Can only be set before the movie file is opened. The default is
'ffpyplayer'.
"""
return self._encoderLib
@encoderLib.setter
def encoderLib(self, value):
if not self.isOpen:
raise RuntimeError(
'Cannot change `encoderLib` after the writer has been opened.')
self._encoderLib = value
@property
def encoderOpts(self):
"""Encoder options (`dict`).
These are passed directly to the encoder library. The default is an
empty dictionary.
"""
return self._encoderOpts
@encoderOpts.setter
def encoderOpts(self, value):
if not self.isOpen:
raise RuntimeError(
'Cannot change `encoderOpts` after the writer has been opened.')
self._encoderOpts = value
@property
def lastVideoFile(self):
"""The name of the last video file written to disk (`str` or `None`).
This is `None` if no video file has been written to disk yet. Only valid
after the movie file has been closed (i.e. after calling `close()`.)
"""
return self._lastVideoFile
@property
def isOpen(self):
"""Whether the movie file is open (`bool`).
If `True`, the movie file is open and frames can be added to it. If
`False`, the movie file is closed and no more frames can be added to it.
"""
if self._writerThread is None:
return False
return self._writerThread.is_alive()
@property
def framesOut(self):
"""Total number of frames written to the movie file (`int`).
Use this to monitor the progress of the movie file writing. This value
is updated asynchronously, so it may not be accurate if you are adding
frames to the movie file very quickly.
This value is retained after the movie file is closed. It is cleared
when a new movie file is opened.
"""
with self._dataLock:
return self._framesOut
@property
def bytesOut(self):
"""Total number of bytes (`int`) saved to the movie file.
Use this to monitor how much disk space is occupied by the frames that
have been written so far. This value is updated asynchronously, so it
may not be accurate if you are adding frames to the movie file very
quickly.
This value is retained after the movie file is closed. It is cleared
when a new movie file is opened.
"""
with self._dataLock:
return self._bytesOut
@property
def framesWaiting(self):
"""The number of frames waiting to be written to disk (`int`).
This value increases when you call `addFrame()` and decreases when the
frame is written to disk. This number can be reduced to zero by calling
`flush()`.
"""
return self._frameQueue.qsize()
@property
def totalFrames(self):
"""The total number of frames that will be written to the movie file
(`int`).
This incudes frames that have already been written to disk and frames
that are waiting to be written to disk.
"""
return self.framesOut + self.framesWaiting
@property
def frameInterval(self):
"""The time interval between frames (`float`).
This is the time interval between frames in seconds. This is the
reciprocal of the frame rate.
"""
return self._frameInterval
@property
def duration(self):
"""The duration of the movie in seconds (`float`).
This is the total duration of the movie in seconds based on the number
of frames that have been added to the movie and the frame rate. This
does not represent the actual duration of the movie file on disk, which
may be longer if frames are still being written to disk.
"""
return self.totalFrames * self._frameInterval
def _openFFPyPlayer(self):
"""Open a movie writer using FFPyPlayer.
This is called by `open()` if `encoderLib` is 'ffpyplayer'. It will
create a background thread to write the movie file. This method is not
intended to be called directly.
"""
# import in the class too avoid hard dependency on ffpyplayer
from ffpyplayer.writer import MediaWriter
from ffpyplayer.pic import SWScale
def _writeFramesAsync(filename, writerOpts, libOpts, frameQueue, readyBarrier,
dataLock):
"""Local function used to write frames to the movie file.
This is executed in a thread to allow the main thread to continue
adding frames to the movie while the movie is being written to
disk.
Parameters
----------
filename : str
Path of the movie file to write.
writerOpts : dict
Options to configure the movie writer. These are FFPyPlayer
settings and are passed directly to the `MediaWriter` object.
libOpts : dict
Option to configure FFMPEG with.
frameQueue : queue.Queue
A queue containing the frames to write to the movie file.
Pushing `None` to the queue will cause the thread to exit.
readyBarrier : threading.Barrier or None
A `threading.Barrier` object used to synchronize the movie
writer with other threads. This guarantees that the movie writer
is ready before frames are passed te the queue. If `None`,
no synchronization is performed.
dataLock : threading.Lock
A lock used to synchronize access to the movie writer object for
accessing variables.
"""
# create the movie writer, don't manipulate this object while the
# movie is being written to disk
try:
writer = MediaWriter(filename, [writerOpts], libOpts=libOpts)
except Exception: # catch all exceptions
raise RuntimeError("Failed to open movie file.")
# wait on a barrier
if readyBarrier is not None:
readyBarrier.wait()
while True:
frame = frameQueue.get() # waited on until a frame is added
if frame is None:
break
# get the frame data
colorData, pts = frame
# do color conversion
frameWidth, frameHeight = colorData.get_size()
sws = SWScale(
frameWidth, frameHeight,
colorData.get_pixel_format(),
ofmt='yuv420p')
# write the frame to the file
bytesOut = writer.write_frame(
img=sws.scale(colorData),
pts=pts,
stream=0)
# update the number of bytes saved
with dataLock:
self._bytesOut += bytesOut
self._framesOut += 1
writer.close()
# options to configure the writer
frameWidth, frameHeight = self.size
writerOptions = {
'pix_fmt_in': 'yuv420p', # default for now using mp4
'width_in': frameWidth,
'height_in': frameHeight,
'codec': self._codec,
'frame_rate': (int(self._fps), 1)}
# create a barrier to synchronize the movie writer with other threads
self._syncBarrier = threading.Barrier(2)
# initialize the thread, the thread will wait on frames to be added to
# the queue
self._writerThread = threading.Thread(
target=_writeFramesAsync,
args=(self._filename,
writerOptions,
self._encoderOpts,
self._frameQueue,
self._syncBarrier,
self._dataLock))
self._writerThread.start()
logging.debug("Waiting for movie writer thread to start...")
self._syncBarrier.wait() # wait for the thread to start
logging.debug("Movie writer thread started.")
def _openOpenCV(self):
"""Open a movie writer using OpenCV.
This is called by `open()` if `encoderLib` is 'opencv'. It will create
a background thread to write the movie file. This method is not
intended to be called directly.
"""
import cv2
def _writeFramesAsync(writer, filename, frameSize, frameQueue,
readyBarrier, dataLock):
"""Local function used to write frames to the movie file.
This is executed in a thread to allow the main thread to continue
adding frames to the movie while the movie is being written to
disk.
Parameters
----------
writer : cv2.VideoWriter
A `cv2.VideoWriter` object used to write the movie file.
filename : str
Path of the movie file to write.
frameSize : tuple
The size of the frames in pixels as a `(width, height)` tuple.
frameQueue : queue.Queue
A queue containing the frames to write to the movie file.
Pushing `None` to the queue will cause the thread to exit.
readyBarrier : threading.Barrier or None
A `threading.Barrier` object used to synchronize the movie
writer with other threads. This guarantees that the movie writer
is ready before frames are passed te the queue. If `None`,
no synchronization is performed.
dataLock : threading.Lock
A lock used to synchronize access to the movie writer object for
accessing variables.
"""
frameWidth, frameHeight = frameSize
# wait on a barrier
if readyBarrier is not None:
readyBarrier.wait()
# we can accept frames for writing now
while True:
frame = frameQueue.get()
if frame is None: # exit if we get `None`
break
colorData, _ = frame # get the frame data
# Resize and color conversion, this puts the data in the correct
# format for OpenCV's frame writer
colorData = cv2.resize(colorData, (frameWidth, frameHeight))
colorData = cv2.cvtColor(colorData, cv2.COLOR_RGB2BGR)
# write the actual frame out to the file
writer.write(colorData)
# number of bytes the last frame took
# bytesOut = writer.get(cv2.VIDEOWRITER_PROP_FRAMEBYTES)
bytesOut = os.stat(filename).st_size
# update values in a thread safe manner
with dataLock:
self._bytesOut = bytesOut
self._framesOut += 1
writer.release()
# Open the writer outside of the thread so exception opening it can be
# caught beforehand.
writer = cv2.VideoWriter(
self._filename,
cv2.CAP_FFMPEG, # use ffmpeg
cv2.VideoWriter_fourcc(*self._codec),
float(self._fps),
self._size,
1) # is color image?
if self._encoderOpts:
# only supported option for now is `quality`, this doesn't really
# work for teh default OpenCV codec for some reason :(
quality = self._encoderOpts.get('VIDEOWRITER_PROP_QUALITY', None) \
or self._encoderOpts.get('quality', None)
if quality is None:
quality = writer.get(cv2.VIDEOWRITER_PROP_QUALITY)
logging.debug("Quality not specified, using default value of "
f"{quality}.")
writer.set(cv2.VIDEOWRITER_PROP_QUALITY, float(quality))
logging.info(f"Setting movie writer quality to {quality}.")
if not writer.isOpened():
raise RuntimeError("Failed to open movie file.")
# create a barrier to synchronize the movie writer with other threads
self._syncBarrier = threading.Barrier(2)
# initialize the thread, the thread will wait on frames to be added to
# the queue
self._writerThread = threading.Thread(
target=_writeFramesAsync,
args=(writer,
self._filename,
self._size,
self._frameQueue,
self._syncBarrier,
self._dataLock))
self._writerThread.start()
_openMovieWriters.add(self) # add to the list of open movie writers
logging.debug("Waiting for movie writer thread to start...")
self._syncBarrier.wait() # wait for the thread to start
logging.debug("Movie writer thread started.")
def open(self):
"""Open the movie file for writing.
This creates a new thread that will write the movie file to disk in
the background.
After calling this method, you can add frames to the movie using
`addFrame()`. When you are done adding frames, call `close()` to
finalize the movie file.
"""
if self.isOpen:
raise RuntimeError('Movie writer is already open.')
# register ourselves as an open movie writer
global _openMovieWriters
# check if we already have a movie writer for this file
if self in _openMovieWriters:
raise ValueError(
'A movie writer is already open for file {}'.format(
self._filename))
logging.debug('Creating movie file for writing %s', self._filename)
# reset counters
self._bytesOut = self._framesOut = 0
self._pts = 0.0
# eventually we'll want to support other encoder libraries, for now
# we're just going to hardcode the encoder libraries we support
if self._encoderLib == 'ffpyplayer':
self._openFFPyPlayer()
elif self._encoderLib == 'opencv':
self._openOpenCV()
else:
raise ValueError(
"Unknown encoder library '{}'.".format(self._encoderLib))
_openMovieWriters.add(self) # add to the list of open movie writers
logging.info("Movie file '%s' opened for writing.", self._filename)
def flush(self):
"""Flush waiting frames to the movie file.
This will cause all frames waiting in the queue to be written to disk
before continuing the program i.e. the thread that called this method.
This is useful for ensuring that all frames are written to disk before
the program exits.
"""
# check if the writer thread present and is alive
if not self.isOpen:
raise RuntimeError('Movie writer is not open.')
# block until the queue is empty
nWaitingAtStart = self.framesWaiting
while not self._frameQueue.empty():
# simple check to see if the queue size is decreasing monotonically
nWaitingNow = self.framesWaiting
if nWaitingNow > nWaitingAtStart:
logging.warn(
"Queue length not decreasing monotonically during "
"`flush()`. This may indicate that frames are still being "
"added ({} -> {}).".format(
nWaitingAtStart, nWaitingNow)
)
nWaitingAtStart = nWaitingNow
time.sleep(0.001) # sleep for 1 ms
def close(self):
"""Close the movie file.
This shuts down the background thread and finalizes the movie file. Any
frames still waiting in the queue will be written to disk before the
movie file is closed. This will block the program until all frames are
written, therefore, it is recommended for `close()` to be called outside
any time-critical code.
"""
if self._writerThread is None:
return
logging.debug("Closing movie file '{}'.".format(self.filename))
# if the writer thread is alive still, then we need to shut it down
if self._writerThread.is_alive():
self._frameQueue.put(None) # signal the thread to exit
# flush remaining frames, if any
msg = ("File '{}' still has {} frame(s) queued to be written to "
"disk, waiting to complete.")
nWaiting = self.framesWaiting
if nWaiting > 0:
logging.warning(msg.format(self.filename, nWaiting))
self.flush()
self._writerThread.join() # waits until the thread exits
# unregister ourselves as an open movie writer
try:
global _openMovieWriters
_openMovieWriters.remove(self)
except AttributeError:
pass
# set the last video file for later use. This is handy for users wanting
# to add audio tracks to video files they created
self._lastVideoFile = self._filename
self._writerThread = None
logging.info("Movie file '{}' closed.".format(self.filename))
def _convertImage(self, image):
"""Convert an image to a pixel format appropriate for the encoder.
This is used internally to convert an image (i.e. frame) to the native
frame format which the encoder library can work with. At the very least,
this function should accept a `numpy.array` as a valid type for `image`
no matter what encoder library is being used.
Parameters
----------
image : Any
The image to convert.
Returns
-------
Any
The converted image. Resulting object type depends on the encoder
library being used.
"""
# convert the image to a format that the selected encoder library can
# work with
if self._encoderLib == 'ffpyplayer': # FFPyPlayer `MediaWriter`
import ffpyplayer.pic as pic
if isinstance(image, np.ndarray):
# make sure we are the correct format
image = np.ascontiguousarray(image, dtype=np.uint8).tobytes()
return pic.Image(
plane_buffers=[image],
pix_fmt=self._pixelFormat,
size=self._size)
elif isinstance(image, pic.Image):
# check if the format is valid
if image.get_pixel_format() != self._pixelFormat:
raise ValueError('Invalid pixel format for `image`.')
return image
else:
raise TypeError(
'Unsupported `image` type for OpenCV '
'`MediaWriter.write_frame().')
elif self._encoderLib == 'opencv': # OpenCV `VideoWriter`
if isinstance(image, np.ndarray):
image = image.reshape(self._size[0], self._size[1], 3)
return np.ascontiguousarray(image, dtype=np.uint8)
else:
raise TypeError(
'Unsupported `image` type for OpenCV `VideoWriter.write().')
else:
raise RuntimeError('Unsupported encoder library specified.')
def addFrame(self, image, pts=None):
"""Add a frame to the movie.
This adds a frame to the movie. The frame will be added to a queue and
written to disk by a background thread. This method will block until the
frame is added to the queue.
Any color space conversion or resizing will be performed in the caller's
thread. This may be threaded too in the future.
Parameters
----------
image : numpy.ndarray or ffpyplayer.pic.Image
The image to add to the movie. The image must be in RGB format and
have the same size as the movie. If the image is an `Image`
instance, it must have the same size as the movie.
pts : float or None
The presentation timestamp for the frame. This is the time at which
the frame should be displayed. The presentation timestamp is in
seconds and should be monotonically increasing. If `None`, the
presentation timestamp will be automatically generated based on the
chosen frame rate for the output video. Not all encoder libraries
support presentation timestamps, so this parameter may be ignored.
Returns
-------
float
Presentation timestamp assigned to the frame. Should match the value
passed in as `pts` if provided, otherwise it will be the computed
presentation timestamp.
"""
if not self.isOpen:
# nb - eventually we can allow frames to be added to a closed movie
# object and have them queued until the movie is opened which will
# commence writing
raise RuntimeError('Movie file not open for writing.')
# convert to a format for the selected writer library
colorData = self._convertImage(image)
# get computed presentation timestamp if not provided
pts = self._pts if pts is None else pts
# pass the image data to the writer thread
self._frameQueue.put((colorData, pts))
# update the presentation timestamp after adding the frame
self._pts += self._frameInterval
return pts
def __del__(self):
"""Close the movie file when the object is deleted.
"""
try:
self.close()
except AttributeError:
pass
def closeAllMovieWriters():
"""Signal all movie writers to close.
This function should only be called once at the end of the program. This can
be registered `atexit` to ensure that all movie writers are closed when the
program exits. If there are open file writers with frames still queued, this
function will block until all frames remaining are written to disk.
Use caution when calling this function when file writers are being used in a
multi-threaded environment. Threads that are writing movie frames must be
stopped prior to calling this function. If not, the thread may continue to
write frames to the queue during the flush operation and never exit.
"""
global _openMovieWriters
if not _openMovieWriters: # do nothing if no movie writers are open
return
logging.info('Closing all open ({}) movie writers now'.format(
len(_openMovieWriters)))
for movieWriter in _openMovieWriters.copy():
# flush the movie writer, this will block until all frames are written
movieWriter.close()
_openMovieWriters.clear() # clear the set to free references
# register the cleanup function to run when the program exits
atexit.register(closeAllMovieWriters)
def addAudioToMovie(outputFile, videoFile, audioFile, useThreads=True,
removeFiles=False, writerOpts=None):
"""Add an audio track to a video file.
This function will add an audio track to a video file. If the video file
already has an audio track, it will be replaced with the audio file
provided. If no audio file is provided, the audio track will be removed
from the video file.
The audio track should be exactly the same length as the video track.
Parameters
----------
outputFile : str
Path to the output video file where audio and video will be merged.
videoFile : str
Path to the input video file.
audioFile : str or None
Path to the audio file to add to the video file.
codec : str
The name of the audio codec to use. This should be a valid codec name
for the encoder library being used. If `None`, the default codec for
the encoder library will be used.
useThreads : bool
If `True`, the audio will be added in a separate thread. This allows the
audio to be added in the background while the program continues to run.
If `False`, the audio will be added in the main thread and the program
will block until the audio is added. Defaults to `True`.
removeFiles : bool
If `True`, the input video (`videoFile`) and audio (`audioFile`) files
will be removed (i.e. deleted from disk) after the audio has been added
to the video. Defaults to `False`.
writerOpts : dict or None
Options to pass to the movie writer. This should be a dictionary of
keyword arguments to pass to the movie writer. If `None`, the default
options for the movie writer will be used. Defaults to `None`. See
documentation for `moviepy.video.io.VideoFileClip.write_videofile` for
possible values.
Examples
--------
Combine a video file and an audio file into a single video file::
from psychopy.tools.movietools import addAudioToMovie
addAudioToMovie('output.mp4', 'video.mp4', 'audio.mp3')
"""
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.audio.AudioClip import CompositeAudioClip
# default options for the writer
moviePyOpts = {
'verbose': False,
'logger': None
}
if writerOpts is not None: # make empty dict if not provided
moviePyOpts.update(writerOpts)
def _renderVideo(outputFile, videoFile, audioFile, removeFiles, writerOpts):
"""Render the video file with the audio track.
"""
# merge audio and video tracks, we use MoviePy for this
videoClip = VideoFileClip(videoFile)
audioClip = AudioFileClip(audioFile)
videoClip.audio = CompositeAudioClip([audioClip])
# transcode with the format the user wants
videoClip.write_videofile(
outputFile,
**writerOpts) # expand out options
if removeFiles:
# remove the input files
os.remove(videoFile)
os.remove(audioFile)
# run the audio/video merge in the main thread
if not useThreads:
logging.debug('Adding audio to video file in main thread')
_renderVideo(
outputFile,
videoFile,
audioFile,
removeFiles,
moviePyOpts)
return
# run the audio/video merge in a separate thread
logging.debug('Adding audio to video file in separate thread')
compositorThread = threading.Thread(
target=_renderVideo,
args=(outputFile,
videoFile,
audioFile,
removeFiles,
moviePyOpts))
compositorThread.start()
if __name__ == "__main__":
pass
| 39,754
|
Python
|
.py
| 858
| 35.552448
| 86
| 0.622628
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
5,399
|
__init__.py
|
psychopy_psychopy/psychopy/tools/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Container for all miscellaneous functions and classes
"""
| 291
|
Python
|
.py
| 7
| 40.285714
| 79
| 0.748227
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|