id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
17,600
Kozea/cairocffi
cairocffi/context.py
Context.tag_begin
def tag_begin(self, tag_name, attributes=None): """Marks the beginning of the ``tag_name`` structure. Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the structure. The attributes string is of the form "key1=value2 key2=value2 ...". Values may be boolean (true/false or 1/0), integer, float, string, or an array. String values are enclosed in single quotes ('). Single quotes and backslashes inside the string should be escaped with a backslash. Boolean values may be set to true by only specifying the key. eg the attribute string "key" is the equivalent to "key=true". Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]". If no attributes are required, ``attributes`` can be omitted, an empty string or None. See cairo's Tags and Links Description for the list of tags and attributes. Invalid nesting of tags or invalid attributes will cause the context to shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``. See :meth:`tag_end`. :param tag_name: tag name :param attributes: tag attributes *New in cairo 1.16.* *New in cairocffi 0.9.* """ if attributes is None: attributes = '' cairo.cairo_tag_begin( self._pointer, _encode_string(tag_name), _encode_string(attributes)) self._check_status()
python
def tag_begin(self, tag_name, attributes=None): """Marks the beginning of the ``tag_name`` structure. Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the structure. The attributes string is of the form "key1=value2 key2=value2 ...". Values may be boolean (true/false or 1/0), integer, float, string, or an array. String values are enclosed in single quotes ('). Single quotes and backslashes inside the string should be escaped with a backslash. Boolean values may be set to true by only specifying the key. eg the attribute string "key" is the equivalent to "key=true". Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]". If no attributes are required, ``attributes`` can be omitted, an empty string or None. See cairo's Tags and Links Description for the list of tags and attributes. Invalid nesting of tags or invalid attributes will cause the context to shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``. See :meth:`tag_end`. :param tag_name: tag name :param attributes: tag attributes *New in cairo 1.16.* *New in cairocffi 0.9.* """ if attributes is None: attributes = '' cairo.cairo_tag_begin( self._pointer, _encode_string(tag_name), _encode_string(attributes)) self._check_status()
[ "def", "tag_begin", "(", "self", ",", "tag_name", ",", "attributes", "=", "None", ")", ":", "if", "attributes", "is", "None", ":", "attributes", "=", "''", "cairo", ".", "cairo_tag_begin", "(", "self", ".", "_pointer", ",", "_encode_string", "(", "tag_name", ")", ",", "_encode_string", "(", "attributes", ")", ")", "self", ".", "_check_status", "(", ")" ]
Marks the beginning of the ``tag_name`` structure. Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the structure. The attributes string is of the form "key1=value2 key2=value2 ...". Values may be boolean (true/false or 1/0), integer, float, string, or an array. String values are enclosed in single quotes ('). Single quotes and backslashes inside the string should be escaped with a backslash. Boolean values may be set to true by only specifying the key. eg the attribute string "key" is the equivalent to "key=true". Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]". If no attributes are required, ``attributes`` can be omitted, an empty string or None. See cairo's Tags and Links Description for the list of tags and attributes. Invalid nesting of tags or invalid attributes will cause the context to shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``. See :meth:`tag_end`. :param tag_name: tag name :param attributes: tag attributes *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Marks", "the", "beginning", "of", "the", "tag_name", "structure", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L2198-L2240
17,601
Kozea/cairocffi
cairocffi/context.py
Context.tag_end
def tag_end(self, tag_name): """Marks the end of the ``tag_name`` structure. Invalid nesting of tags will cause @cr to shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``. See :meth:`tag_begin`. :param tag_name: tag name *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_tag_end(self._pointer, _encode_string(tag_name)) self._check_status()
python
def tag_end(self, tag_name): """Marks the end of the ``tag_name`` structure. Invalid nesting of tags will cause @cr to shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``. See :meth:`tag_begin`. :param tag_name: tag name *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_tag_end(self._pointer, _encode_string(tag_name)) self._check_status()
[ "def", "tag_end", "(", "self", ",", "tag_name", ")", ":", "cairo", ".", "cairo_tag_end", "(", "self", ".", "_pointer", ",", "_encode_string", "(", "tag_name", ")", ")", "self", ".", "_check_status", "(", ")" ]
Marks the end of the ``tag_name`` structure. Invalid nesting of tags will cause @cr to shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``. See :meth:`tag_begin`. :param tag_name: tag name *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Marks", "the", "end", "of", "the", "tag_name", "structure", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L2242-L2258
17,602
Kozea/cairocffi
cairocffi/surfaces.py
_make_read_func
def _make_read_func(file_obj): """Return a CFFI callback that reads from a file-like object.""" @ffi.callback("cairo_read_func_t", error=constants.STATUS_READ_ERROR) def read_func(_closure, data, length): string = file_obj.read(length) if len(string) < length: # EOF too early return constants.STATUS_READ_ERROR ffi.buffer(data, length)[:len(string)] = string return constants.STATUS_SUCCESS return read_func
python
def _make_read_func(file_obj): """Return a CFFI callback that reads from a file-like object.""" @ffi.callback("cairo_read_func_t", error=constants.STATUS_READ_ERROR) def read_func(_closure, data, length): string = file_obj.read(length) if len(string) < length: # EOF too early return constants.STATUS_READ_ERROR ffi.buffer(data, length)[:len(string)] = string return constants.STATUS_SUCCESS return read_func
[ "def", "_make_read_func", "(", "file_obj", ")", ":", "@", "ffi", ".", "callback", "(", "\"cairo_read_func_t\"", ",", "error", "=", "constants", ".", "STATUS_READ_ERROR", ")", "def", "read_func", "(", "_closure", ",", "data", ",", "length", ")", ":", "string", "=", "file_obj", ".", "read", "(", "length", ")", "if", "len", "(", "string", ")", "<", "length", ":", "# EOF too early", "return", "constants", ".", "STATUS_READ_ERROR", "ffi", ".", "buffer", "(", "data", ",", "length", ")", "[", ":", "len", "(", "string", ")", "]", "=", "string", "return", "constants", ".", "STATUS_SUCCESS", "return", "read_func" ]
Return a CFFI callback that reads from a file-like object.
[ "Return", "a", "CFFI", "callback", "that", "reads", "from", "a", "file", "-", "like", "object", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L24-L33
17,603
Kozea/cairocffi
cairocffi/surfaces.py
_make_write_func
def _make_write_func(file_obj): """Return a CFFI callback that writes to a file-like object.""" if file_obj is None: return ffi.NULL @ffi.callback("cairo_write_func_t", error=constants.STATUS_WRITE_ERROR) def write_func(_closure, data, length): file_obj.write(ffi.buffer(data, length)) return constants.STATUS_SUCCESS return write_func
python
def _make_write_func(file_obj): """Return a CFFI callback that writes to a file-like object.""" if file_obj is None: return ffi.NULL @ffi.callback("cairo_write_func_t", error=constants.STATUS_WRITE_ERROR) def write_func(_closure, data, length): file_obj.write(ffi.buffer(data, length)) return constants.STATUS_SUCCESS return write_func
[ "def", "_make_write_func", "(", "file_obj", ")", ":", "if", "file_obj", "is", "None", ":", "return", "ffi", ".", "NULL", "@", "ffi", ".", "callback", "(", "\"cairo_write_func_t\"", ",", "error", "=", "constants", ".", "STATUS_WRITE_ERROR", ")", "def", "write_func", "(", "_closure", ",", "data", ",", "length", ")", ":", "file_obj", ".", "write", "(", "ffi", ".", "buffer", "(", "data", ",", "length", ")", ")", "return", "constants", ".", "STATUS_SUCCESS", "return", "write_func" ]
Return a CFFI callback that writes to a file-like object.
[ "Return", "a", "CFFI", "callback", "that", "writes", "to", "a", "file", "-", "like", "object", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L36-L45
17,604
Kozea/cairocffi
cairocffi/surfaces.py
_encode_filename
def _encode_filename(filename): # pragma: no cover """Return a byte string suitable for a filename. Unicode is encoded using an encoding adapted to what both cairo and the filesystem want. """ # Don't replace unknown characters as '?' is forbidden in Windows filenames errors = 'ignore' if os.name == 'nt' else 'replace' if not isinstance(filename, bytes): if os.name == 'nt' and cairo.cairo_version() >= 11510: # Since 1.15.10, cairo uses utf-8 filenames on Windows filename = filename.encode('utf-8', errors=errors) else: try: filename = filename.encode(sys.getfilesystemencoding()) except UnicodeEncodeError: # Use plain ASCII filenames as fallback filename = filename.encode('ascii', errors=errors) # TODO: avoid characters forbidden in filenames? return ffi.new('char[]', filename)
python
def _encode_filename(filename): # pragma: no cover """Return a byte string suitable for a filename. Unicode is encoded using an encoding adapted to what both cairo and the filesystem want. """ # Don't replace unknown characters as '?' is forbidden in Windows filenames errors = 'ignore' if os.name == 'nt' else 'replace' if not isinstance(filename, bytes): if os.name == 'nt' and cairo.cairo_version() >= 11510: # Since 1.15.10, cairo uses utf-8 filenames on Windows filename = filename.encode('utf-8', errors=errors) else: try: filename = filename.encode(sys.getfilesystemencoding()) except UnicodeEncodeError: # Use plain ASCII filenames as fallback filename = filename.encode('ascii', errors=errors) # TODO: avoid characters forbidden in filenames? return ffi.new('char[]', filename)
[ "def", "_encode_filename", "(", "filename", ")", ":", "# pragma: no cover", "# Don't replace unknown characters as '?' is forbidden in Windows filenames", "errors", "=", "'ignore'", "if", "os", ".", "name", "==", "'nt'", "else", "'replace'", "if", "not", "isinstance", "(", "filename", ",", "bytes", ")", ":", "if", "os", ".", "name", "==", "'nt'", "and", "cairo", ".", "cairo_version", "(", ")", ">=", "11510", ":", "# Since 1.15.10, cairo uses utf-8 filenames on Windows", "filename", "=", "filename", ".", "encode", "(", "'utf-8'", ",", "errors", "=", "errors", ")", "else", ":", "try", ":", "filename", "=", "filename", ".", "encode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ")", "except", "UnicodeEncodeError", ":", "# Use plain ASCII filenames as fallback", "filename", "=", "filename", ".", "encode", "(", "'ascii'", ",", "errors", "=", "errors", ")", "# TODO: avoid characters forbidden in filenames?", "return", "ffi", ".", "new", "(", "'char[]'", ",", "filename", ")" ]
Return a byte string suitable for a filename. Unicode is encoded using an encoding adapted to what both cairo and the filesystem want.
[ "Return", "a", "byte", "string", "suitable", "for", "a", "filename", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L48-L70
17,605
Kozea/cairocffi
cairocffi/surfaces.py
Surface.create_similar_image
def create_similar_image(self, content, width, height): """ Create a new image surface that is as compatible as possible for uploading to and the use in conjunction with this surface. However, this surface can still be used like any normal image surface. Initially the surface contents are all 0 (transparent if contents have transparency, black otherwise.) Use :meth:`create_similar` if you don't need an image surface. :param format: the :ref:`FORMAT` string for the new surface :param width: width of the new surface, (in device-space units) :param height: height of the new surface (in device-space units) :type format: str :type width: int :type height: int :returns: A new :class:`ImageSurface` instance. """ return Surface._from_pointer( cairo.cairo_surface_create_similar_image( self._pointer, content, width, height), incref=False)
python
def create_similar_image(self, content, width, height): """ Create a new image surface that is as compatible as possible for uploading to and the use in conjunction with this surface. However, this surface can still be used like any normal image surface. Initially the surface contents are all 0 (transparent if contents have transparency, black otherwise.) Use :meth:`create_similar` if you don't need an image surface. :param format: the :ref:`FORMAT` string for the new surface :param width: width of the new surface, (in device-space units) :param height: height of the new surface (in device-space units) :type format: str :type width: int :type height: int :returns: A new :class:`ImageSurface` instance. """ return Surface._from_pointer( cairo.cairo_surface_create_similar_image( self._pointer, content, width, height), incref=False)
[ "def", "create_similar_image", "(", "self", ",", "content", ",", "width", ",", "height", ")", ":", "return", "Surface", ".", "_from_pointer", "(", "cairo", ".", "cairo_surface_create_similar_image", "(", "self", ".", "_pointer", ",", "content", ",", "width", ",", "height", ")", ",", "incref", "=", "False", ")" ]
Create a new image surface that is as compatible as possible for uploading to and the use in conjunction with this surface. However, this surface can still be used like any normal image surface. Initially the surface contents are all 0 (transparent if contents have transparency, black otherwise.) Use :meth:`create_similar` if you don't need an image surface. :param format: the :ref:`FORMAT` string for the new surface :param width: width of the new surface, (in device-space units) :param height: height of the new surface (in device-space units) :type format: str :type width: int :type height: int :returns: A new :class:`ImageSurface` instance.
[ "Create", "a", "new", "image", "surface", "that", "is", "as", "compatible", "as", "possible", "for", "uploading", "to", "and", "the", "use", "in", "conjunction", "with", "this", "surface", ".", "However", "this", "surface", "can", "still", "be", "used", "like", "any", "normal", "image", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L209-L232
17,606
Kozea/cairocffi
cairocffi/surfaces.py
Surface.create_for_rectangle
def create_for_rectangle(self, x, y, width, height): """ Create a new surface that is a rectangle within this surface. All operations drawn to this surface are then clipped and translated onto the target surface. Nothing drawn via this sub-surface outside of its bounds is drawn onto the target surface, making this a useful method for passing constrained child surfaces to library routines that draw directly onto the parent surface, i.e. with no further backend allocations, double buffering or copies. .. note:: As of cairo 1.12, the semantics of subsurfaces have not been finalized yet unless the rectangle is in full device units, is contained within the extents of the target surface, and the target or subsurface's device transforms are not changed. :param x: The x-origin of the sub-surface from the top-left of the target surface (in device-space units) :param y: The y-origin of the sub-surface from the top-left of the target surface (in device-space units) :param width: Width of the sub-surface (in device-space units) :param height: Height of the sub-surface (in device-space units) :type x: float :type y: float :type width: float :type height: float :returns: A new :class:`Surface` object. *New in cairo 1.10.* """ return Surface._from_pointer( cairo.cairo_surface_create_for_rectangle( self._pointer, x, y, width, height), incref=False)
python
def create_for_rectangle(self, x, y, width, height): """ Create a new surface that is a rectangle within this surface. All operations drawn to this surface are then clipped and translated onto the target surface. Nothing drawn via this sub-surface outside of its bounds is drawn onto the target surface, making this a useful method for passing constrained child surfaces to library routines that draw directly onto the parent surface, i.e. with no further backend allocations, double buffering or copies. .. note:: As of cairo 1.12, the semantics of subsurfaces have not been finalized yet unless the rectangle is in full device units, is contained within the extents of the target surface, and the target or subsurface's device transforms are not changed. :param x: The x-origin of the sub-surface from the top-left of the target surface (in device-space units) :param y: The y-origin of the sub-surface from the top-left of the target surface (in device-space units) :param width: Width of the sub-surface (in device-space units) :param height: Height of the sub-surface (in device-space units) :type x: float :type y: float :type width: float :type height: float :returns: A new :class:`Surface` object. *New in cairo 1.10.* """ return Surface._from_pointer( cairo.cairo_surface_create_for_rectangle( self._pointer, x, y, width, height), incref=False)
[ "def", "create_for_rectangle", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ")", ":", "return", "Surface", ".", "_from_pointer", "(", "cairo", ".", "cairo_surface_create_for_rectangle", "(", "self", ".", "_pointer", ",", "x", ",", "y", ",", "width", ",", "height", ")", ",", "incref", "=", "False", ")" ]
Create a new surface that is a rectangle within this surface. All operations drawn to this surface are then clipped and translated onto the target surface. Nothing drawn via this sub-surface outside of its bounds is drawn onto the target surface, making this a useful method for passing constrained child surfaces to library routines that draw directly onto the parent surface, i.e. with no further backend allocations, double buffering or copies. .. note:: As of cairo 1.12, the semantics of subsurfaces have not been finalized yet unless the rectangle is in full device units, is contained within the extents of the target surface, and the target or subsurface's device transforms are not changed. :param x: The x-origin of the sub-surface from the top-left of the target surface (in device-space units) :param y: The y-origin of the sub-surface from the top-left of the target surface (in device-space units) :param width: Width of the sub-surface (in device-space units) :param height: Height of the sub-surface (in device-space units) :type x: float :type y: float :type width: float :type height: float :returns: A new :class:`Surface` object. *New in cairo 1.10.*
[ "Create", "a", "new", "surface", "that", "is", "a", "rectangle", "within", "this", "surface", ".", "All", "operations", "drawn", "to", "this", "surface", "are", "then", "clipped", "and", "translated", "onto", "the", "target", "surface", ".", "Nothing", "drawn", "via", "this", "sub", "-", "surface", "outside", "of", "its", "bounds", "is", "drawn", "onto", "the", "target", "surface", "making", "this", "a", "useful", "method", "for", "passing", "constrained", "child", "surfaces", "to", "library", "routines", "that", "draw", "directly", "onto", "the", "parent", "surface", "i", ".", "e", ".", "with", "no", "further", "backend", "allocations", "double", "buffering", "or", "copies", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L234-L277
17,607
Kozea/cairocffi
cairocffi/surfaces.py
Surface.set_fallback_resolution
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch): """ Set the horizontal and vertical resolution for image fallbacks. When certain operations aren't supported natively by a backend, cairo will fallback by rendering operations to an image and then overlaying that image onto the output. For backends that are natively vector-oriented, this method can be used to set the resolution used for these image fallbacks, (larger values will result in more detailed images, but also larger file sizes). Some examples of natively vector-oriented backends are the ps, pdf, and svg backends. For backends that are natively raster-oriented, image fallbacks are still possible, but they are always performed at the native device resolution. So this method has no effect on those backends. .. note:: The fallback resolution only takes effect at the time of completing a page (with :meth:`show_page` or :meth:`copy_page`) so there is currently no way to have more than one fallback resolution in effect on a single page. The default fallback resoultion is 300 pixels per inch in both dimensions. :param x_pixels_per_inch: horizontal resolution in pixels per inch :type x_pixels_per_inch: float :param y_pixels_per_inch: vertical resolution in pixels per inch :type y_pixels_per_inch: float """ cairo.cairo_surface_set_fallback_resolution( self._pointer, x_pixels_per_inch, y_pixels_per_inch) self._check_status()
python
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch): """ Set the horizontal and vertical resolution for image fallbacks. When certain operations aren't supported natively by a backend, cairo will fallback by rendering operations to an image and then overlaying that image onto the output. For backends that are natively vector-oriented, this method can be used to set the resolution used for these image fallbacks, (larger values will result in more detailed images, but also larger file sizes). Some examples of natively vector-oriented backends are the ps, pdf, and svg backends. For backends that are natively raster-oriented, image fallbacks are still possible, but they are always performed at the native device resolution. So this method has no effect on those backends. .. note:: The fallback resolution only takes effect at the time of completing a page (with :meth:`show_page` or :meth:`copy_page`) so there is currently no way to have more than one fallback resolution in effect on a single page. The default fallback resoultion is 300 pixels per inch in both dimensions. :param x_pixels_per_inch: horizontal resolution in pixels per inch :type x_pixels_per_inch: float :param y_pixels_per_inch: vertical resolution in pixels per inch :type y_pixels_per_inch: float """ cairo.cairo_surface_set_fallback_resolution( self._pointer, x_pixels_per_inch, y_pixels_per_inch) self._check_status()
[ "def", "set_fallback_resolution", "(", "self", ",", "x_pixels_per_inch", ",", "y_pixels_per_inch", ")", ":", "cairo", ".", "cairo_surface_set_fallback_resolution", "(", "self", ".", "_pointer", ",", "x_pixels_per_inch", ",", "y_pixels_per_inch", ")", "self", ".", "_check_status", "(", ")" ]
Set the horizontal and vertical resolution for image fallbacks. When certain operations aren't supported natively by a backend, cairo will fallback by rendering operations to an image and then overlaying that image onto the output. For backends that are natively vector-oriented, this method can be used to set the resolution used for these image fallbacks, (larger values will result in more detailed images, but also larger file sizes). Some examples of natively vector-oriented backends are the ps, pdf, and svg backends. For backends that are natively raster-oriented, image fallbacks are still possible, but they are always performed at the native device resolution. So this method has no effect on those backends. .. note:: The fallback resolution only takes effect at the time of completing a page (with :meth:`show_page` or :meth:`copy_page`) so there is currently no way to have more than one fallback resolution in effect on a single page. The default fallback resoultion is 300 pixels per inch in both dimensions. :param x_pixels_per_inch: horizontal resolution in pixels per inch :type x_pixels_per_inch: float :param y_pixels_per_inch: vertical resolution in pixels per inch :type y_pixels_per_inch: float
[ "Set", "the", "horizontal", "and", "vertical", "resolution", "for", "image", "fallbacks", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L342-L382
17,608
Kozea/cairocffi
cairocffi/surfaces.py
Surface.get_font_options
def get_font_options(self): """Retrieves the default font rendering options for the surface. This allows display surfaces to report the correct subpixel order for rendering on them, print surfaces to disable hinting of metrics and so forth. The result can then be used with :class:`ScaledFont`. :returns: A new :class:`FontOptions` object. """ font_options = FontOptions() cairo.cairo_surface_get_font_options( self._pointer, font_options._pointer) return font_options
python
def get_font_options(self): """Retrieves the default font rendering options for the surface. This allows display surfaces to report the correct subpixel order for rendering on them, print surfaces to disable hinting of metrics and so forth. The result can then be used with :class:`ScaledFont`. :returns: A new :class:`FontOptions` object. """ font_options = FontOptions() cairo.cairo_surface_get_font_options( self._pointer, font_options._pointer) return font_options
[ "def", "get_font_options", "(", "self", ")", ":", "font_options", "=", "FontOptions", "(", ")", "cairo", ".", "cairo_surface_get_font_options", "(", "self", ".", "_pointer", ",", "font_options", ".", "_pointer", ")", "return", "font_options" ]
Retrieves the default font rendering options for the surface. This allows display surfaces to report the correct subpixel order for rendering on them, print surfaces to disable hinting of metrics and so forth. The result can then be used with :class:`ScaledFont`. :returns: A new :class:`FontOptions` object.
[ "Retrieves", "the", "default", "font", "rendering", "options", "for", "the", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L397-L411
17,609
Kozea/cairocffi
cairocffi/surfaces.py
Surface.set_device_scale
def set_device_scale(self, x_scale, y_scale): """Sets a scale that is multiplied to the device coordinates determined by the CTM when drawing to surface. One common use for this is to render to very high resolution display devices at a scale factor, so that code that assumes 1 pixel will be a certain size will still work. Setting a transformation via cairo_translate() isn't sufficient to do this, since functions like cairo_device_to_user() will expose the hidden scale. Note that the scale affects drawing to the surface as well as using the surface in a source pattern. :param x_scale: the scale in the X direction, in device units. :param y_scale: the scale in the Y direction, in device units. *New in cairo 1.14.* *New in cairocffi 0.9.* """ cairo.cairo_surface_set_device_scale(self._pointer, x_scale, y_scale) self._check_status()
python
def set_device_scale(self, x_scale, y_scale): """Sets a scale that is multiplied to the device coordinates determined by the CTM when drawing to surface. One common use for this is to render to very high resolution display devices at a scale factor, so that code that assumes 1 pixel will be a certain size will still work. Setting a transformation via cairo_translate() isn't sufficient to do this, since functions like cairo_device_to_user() will expose the hidden scale. Note that the scale affects drawing to the surface as well as using the surface in a source pattern. :param x_scale: the scale in the X direction, in device units. :param y_scale: the scale in the Y direction, in device units. *New in cairo 1.14.* *New in cairocffi 0.9.* """ cairo.cairo_surface_set_device_scale(self._pointer, x_scale, y_scale) self._check_status()
[ "def", "set_device_scale", "(", "self", ",", "x_scale", ",", "y_scale", ")", ":", "cairo", ".", "cairo_surface_set_device_scale", "(", "self", ".", "_pointer", ",", "x_scale", ",", "y_scale", ")", "self", ".", "_check_status", "(", ")" ]
Sets a scale that is multiplied to the device coordinates determined by the CTM when drawing to surface. One common use for this is to render to very high resolution display devices at a scale factor, so that code that assumes 1 pixel will be a certain size will still work. Setting a transformation via cairo_translate() isn't sufficient to do this, since functions like cairo_device_to_user() will expose the hidden scale. Note that the scale affects drawing to the surface as well as using the surface in a source pattern. :param x_scale: the scale in the X direction, in device units. :param y_scale: the scale in the Y direction, in device units. *New in cairo 1.14.* *New in cairocffi 0.9.*
[ "Sets", "a", "scale", "that", "is", "multiplied", "to", "the", "device", "coordinates", "determined", "by", "the", "CTM", "when", "drawing", "to", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L413-L435
17,610
Kozea/cairocffi
cairocffi/surfaces.py
Surface.get_mime_data
def get_mime_data(self, mime_type): """Return mime data previously attached to surface using the specified mime type. :param mime_type: The MIME type of the image data. :type mime_type: ASCII string :returns: A CFFI buffer object, or :obj:`None` if no data has been attached with the given mime type. *New in cairo 1.10.* """ buffer_address = ffi.new('unsigned char **') buffer_length = ffi.new('unsigned long *') mime_type = ffi.new('char[]', mime_type.encode('utf8')) cairo.cairo_surface_get_mime_data( self._pointer, mime_type, buffer_address, buffer_length) return (ffi.buffer(buffer_address[0], buffer_length[0]) if buffer_address[0] != ffi.NULL else None)
python
def get_mime_data(self, mime_type): """Return mime data previously attached to surface using the specified mime type. :param mime_type: The MIME type of the image data. :type mime_type: ASCII string :returns: A CFFI buffer object, or :obj:`None` if no data has been attached with the given mime type. *New in cairo 1.10.* """ buffer_address = ffi.new('unsigned char **') buffer_length = ffi.new('unsigned long *') mime_type = ffi.new('char[]', mime_type.encode('utf8')) cairo.cairo_surface_get_mime_data( self._pointer, mime_type, buffer_address, buffer_length) return (ffi.buffer(buffer_address[0], buffer_length[0]) if buffer_address[0] != ffi.NULL else None)
[ "def", "get_mime_data", "(", "self", ",", "mime_type", ")", ":", "buffer_address", "=", "ffi", ".", "new", "(", "'unsigned char **'", ")", "buffer_length", "=", "ffi", ".", "new", "(", "'unsigned long *'", ")", "mime_type", "=", "ffi", ".", "new", "(", "'char[]'", ",", "mime_type", ".", "encode", "(", "'utf8'", ")", ")", "cairo", ".", "cairo_surface_get_mime_data", "(", "self", ".", "_pointer", ",", "mime_type", ",", "buffer_address", ",", "buffer_length", ")", "return", "(", "ffi", ".", "buffer", "(", "buffer_address", "[", "0", "]", ",", "buffer_length", "[", "0", "]", ")", "if", "buffer_address", "[", "0", "]", "!=", "ffi", ".", "NULL", "else", "None", ")" ]
Return mime data previously attached to surface using the specified mime type. :param mime_type: The MIME type of the image data. :type mime_type: ASCII string :returns: A CFFI buffer object, or :obj:`None` if no data has been attached with the given mime type. *New in cairo 1.10.*
[ "Return", "mime", "data", "previously", "attached", "to", "surface", "using", "the", "specified", "mime", "type", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L504-L523
17,611
Kozea/cairocffi
cairocffi/surfaces.py
Surface.write_to_png
def write_to_png(self, target=None): """Writes the contents of surface as a PNG image. :param target: A filename, a binary mode file-like object with a :meth:`~file.write` method, or :obj:`None`. :returns: If :obj:`target` is :obj:`None`, return the PNG contents as a byte string. """ return_bytes = target is None if return_bytes: target = io.BytesIO() if hasattr(target, 'write'): write_func = _make_write_func(target) _check_status(cairo.cairo_surface_write_to_png_stream( self._pointer, write_func, ffi.NULL)) else: _check_status(cairo.cairo_surface_write_to_png( self._pointer, _encode_filename(target))) if return_bytes: return target.getvalue()
python
def write_to_png(self, target=None): """Writes the contents of surface as a PNG image. :param target: A filename, a binary mode file-like object with a :meth:`~file.write` method, or :obj:`None`. :returns: If :obj:`target` is :obj:`None`, return the PNG contents as a byte string. """ return_bytes = target is None if return_bytes: target = io.BytesIO() if hasattr(target, 'write'): write_func = _make_write_func(target) _check_status(cairo.cairo_surface_write_to_png_stream( self._pointer, write_func, ffi.NULL)) else: _check_status(cairo.cairo_surface_write_to_png( self._pointer, _encode_filename(target))) if return_bytes: return target.getvalue()
[ "def", "write_to_png", "(", "self", ",", "target", "=", "None", ")", ":", "return_bytes", "=", "target", "is", "None", "if", "return_bytes", ":", "target", "=", "io", ".", "BytesIO", "(", ")", "if", "hasattr", "(", "target", ",", "'write'", ")", ":", "write_func", "=", "_make_write_func", "(", "target", ")", "_check_status", "(", "cairo", ".", "cairo_surface_write_to_png_stream", "(", "self", ".", "_pointer", ",", "write_func", ",", "ffi", ".", "NULL", ")", ")", "else", ":", "_check_status", "(", "cairo", ".", "cairo_surface_write_to_png", "(", "self", ".", "_pointer", ",", "_encode_filename", "(", "target", ")", ")", ")", "if", "return_bytes", ":", "return", "target", ".", "getvalue", "(", ")" ]
Writes the contents of surface as a PNG image. :param target: A filename, a binary mode file-like object with a :meth:`~file.write` method, or :obj:`None`. :returns: If :obj:`target` is :obj:`None`, return the PNG contents as a byte string.
[ "Writes", "the", "contents", "of", "surface", "as", "a", "PNG", "image", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L630-L653
17,612
Kozea/cairocffi
cairocffi/surfaces.py
ImageSurface.create_from_png
def create_from_png(cls, source): """Decode a PNG file into a new image surface. :param source: A filename or a binary mode file-like object with a :meth:`~file.read` method. If you already have a byte string in memory, use :class:`io.BytesIO`. :returns: A new :class:`ImageSurface` instance. """ if hasattr(source, 'read'): read_func = _make_read_func(source) pointer = cairo.cairo_image_surface_create_from_png_stream( read_func, ffi.NULL) else: pointer = cairo.cairo_image_surface_create_from_png( _encode_filename(source)) self = object.__new__(cls) Surface.__init__(self, pointer) # Skip ImageSurface.__init__ return self
python
def create_from_png(cls, source): """Decode a PNG file into a new image surface. :param source: A filename or a binary mode file-like object with a :meth:`~file.read` method. If you already have a byte string in memory, use :class:`io.BytesIO`. :returns: A new :class:`ImageSurface` instance. """ if hasattr(source, 'read'): read_func = _make_read_func(source) pointer = cairo.cairo_image_surface_create_from_png_stream( read_func, ffi.NULL) else: pointer = cairo.cairo_image_surface_create_from_png( _encode_filename(source)) self = object.__new__(cls) Surface.__init__(self, pointer) # Skip ImageSurface.__init__ return self
[ "def", "create_from_png", "(", "cls", ",", "source", ")", ":", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "read_func", "=", "_make_read_func", "(", "source", ")", "pointer", "=", "cairo", ".", "cairo_image_surface_create_from_png_stream", "(", "read_func", ",", "ffi", ".", "NULL", ")", "else", ":", "pointer", "=", "cairo", ".", "cairo_image_surface_create_from_png", "(", "_encode_filename", "(", "source", ")", ")", "self", "=", "object", ".", "__new__", "(", "cls", ")", "Surface", ".", "__init__", "(", "self", ",", "pointer", ")", "# Skip ImageSurface.__init__", "return", "self" ]
Decode a PNG file into a new image surface. :param source: A filename or a binary mode file-like object with a :meth:`~file.read` method. If you already have a byte string in memory, use :class:`io.BytesIO`. :returns: A new :class:`ImageSurface` instance.
[ "Decode", "a", "PNG", "file", "into", "a", "new", "image", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L742-L762
17,613
Kozea/cairocffi
cairocffi/surfaces.py
PDFSurface.add_outline
def add_outline(self, parent_id, utf8, link_attribs, flags=None): """Add an item to the document outline hierarchy. The outline has the ``utf8`` name and links to the location specified by ``link_attribs``. Link attributes have the same keys and values as the Link Tag, excluding the ``rect`` attribute. The item will be a child of the item with id ``parent_id``. Use ``PDF_OUTLINE_ROOT`` as the parent id of top level items. :param parent_id: the id of the parent item or ``PDF_OUTLINE_ROOT`` if this is a top level item. :param utf8: the name of the outline. :param link_attribs: the link attributes specifying where this outline links to. :param flags: outline item flags. :return: the id for the added item. *New in cairo 1.16.* *New in cairocffi 0.9.* """ if flags is None: flags = 0 value = cairo.cairo_pdf_surface_add_outline( self._pointer, parent_id, _encode_string(utf8), _encode_string(link_attribs), flags) self._check_status() return value
python
def add_outline(self, parent_id, utf8, link_attribs, flags=None): """Add an item to the document outline hierarchy. The outline has the ``utf8`` name and links to the location specified by ``link_attribs``. Link attributes have the same keys and values as the Link Tag, excluding the ``rect`` attribute. The item will be a child of the item with id ``parent_id``. Use ``PDF_OUTLINE_ROOT`` as the parent id of top level items. :param parent_id: the id of the parent item or ``PDF_OUTLINE_ROOT`` if this is a top level item. :param utf8: the name of the outline. :param link_attribs: the link attributes specifying where this outline links to. :param flags: outline item flags. :return: the id for the added item. *New in cairo 1.16.* *New in cairocffi 0.9.* """ if flags is None: flags = 0 value = cairo.cairo_pdf_surface_add_outline( self._pointer, parent_id, _encode_string(utf8), _encode_string(link_attribs), flags) self._check_status() return value
[ "def", "add_outline", "(", "self", ",", "parent_id", ",", "utf8", ",", "link_attribs", ",", "flags", "=", "None", ")", ":", "if", "flags", "is", "None", ":", "flags", "=", "0", "value", "=", "cairo", ".", "cairo_pdf_surface_add_outline", "(", "self", ".", "_pointer", ",", "parent_id", ",", "_encode_string", "(", "utf8", ")", ",", "_encode_string", "(", "link_attribs", ")", ",", "flags", ")", "self", ".", "_check_status", "(", ")", "return", "value" ]
Add an item to the document outline hierarchy. The outline has the ``utf8`` name and links to the location specified by ``link_attribs``. Link attributes have the same keys and values as the Link Tag, excluding the ``rect`` attribute. The item will be a child of the item with id ``parent_id``. Use ``PDF_OUTLINE_ROOT`` as the parent id of top level items. :param parent_id: the id of the parent item or ``PDF_OUTLINE_ROOT`` if this is a top level item. :param utf8: the name of the outline. :param link_attribs: the link attributes specifying where this outline links to. :param flags: outline item flags. :return: the id for the added item. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Add", "an", "item", "to", "the", "document", "outline", "hierarchy", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L873-L903
17,614
Kozea/cairocffi
cairocffi/surfaces.py
PDFSurface.set_metadata
def set_metadata(self, metadata, utf8): """Sets document metadata. The ``PDF_METADATA_CREATE_DATE`` and ``PDF_METADATA_MOD_DATE`` values must be in ISO-8601 format: YYYY-MM-DDThh:mm:ss. An optional timezone of the form "[+/-]hh:mm" or "Z" for UTC time can be appended. All other metadata values can be any UTF-8 string. :param metadata: the metadata item to set. :param utf8: metadata value. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_pdf_surface_set_metadata( self._pointer, metadata, _encode_string(utf8)) self._check_status()
python
def set_metadata(self, metadata, utf8): """Sets document metadata. The ``PDF_METADATA_CREATE_DATE`` and ``PDF_METADATA_MOD_DATE`` values must be in ISO-8601 format: YYYY-MM-DDThh:mm:ss. An optional timezone of the form "[+/-]hh:mm" or "Z" for UTC time can be appended. All other metadata values can be any UTF-8 string. :param metadata: the metadata item to set. :param utf8: metadata value. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_pdf_surface_set_metadata( self._pointer, metadata, _encode_string(utf8)) self._check_status()
[ "def", "set_metadata", "(", "self", ",", "metadata", ",", "utf8", ")", ":", "cairo", ".", "cairo_pdf_surface_set_metadata", "(", "self", ".", "_pointer", ",", "metadata", ",", "_encode_string", "(", "utf8", ")", ")", "self", ".", "_check_status", "(", ")" ]
Sets document metadata. The ``PDF_METADATA_CREATE_DATE`` and ``PDF_METADATA_MOD_DATE`` values must be in ISO-8601 format: YYYY-MM-DDThh:mm:ss. An optional timezone of the form "[+/-]hh:mm" or "Z" for UTC time can be appended. All other metadata values can be any UTF-8 string. :param metadata: the metadata item to set. :param utf8: metadata value. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Sets", "document", "metadata", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L905-L923
17,615
Kozea/cairocffi
cairocffi/surfaces.py
PDFSurface.set_thumbnail_size
def set_thumbnail_size(self, width, height): """Set thumbnail image size for the current and all subsequent pages. Setting a width or height of 0 disables thumbnails for the current and subsequent pages. :param width: thumbnail width. :param height: thumbnail height. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_pdf_surface_set_thumbnail_size( self._pointer, width, height)
python
def set_thumbnail_size(self, width, height): """Set thumbnail image size for the current and all subsequent pages. Setting a width or height of 0 disables thumbnails for the current and subsequent pages. :param width: thumbnail width. :param height: thumbnail height. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_pdf_surface_set_thumbnail_size( self._pointer, width, height)
[ "def", "set_thumbnail_size", "(", "self", ",", "width", ",", "height", ")", ":", "cairo", ".", "cairo_pdf_surface_set_thumbnail_size", "(", "self", ".", "_pointer", ",", "width", ",", "height", ")" ]
Set thumbnail image size for the current and all subsequent pages. Setting a width or height of 0 disables thumbnails for the current and subsequent pages. :param width: thumbnail width. :param height: thumbnail height. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Set", "thumbnail", "image", "size", "for", "the", "current", "and", "all", "subsequent", "pages", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L938-L953
17,616
Kozea/cairocffi
cairocffi/surfaces.py
PSSurface.dsc_comment
def dsc_comment(self, comment): """ Emit a comment into the PostScript output for the given surface. The comment is expected to conform to the PostScript Language Document Structuring Conventions (DSC). Please see that manual for details on the available comments and their meanings. In particular, the ``%%IncludeFeature`` comment allows a device-independent means of controlling printer device features. So the PostScript Printer Description Files Specification will also be a useful reference. The comment string must begin with a percent character (%) and the total length of the string (including any initial percent characters) must not exceed 255 bytes. Violating either of these conditions will place surface into an error state. But beyond these two conditions, this method will not enforce conformance of the comment with any particular specification. The comment string should not have a trailing newline. The DSC specifies different sections in which particular comments can appear. This method provides for comments to be emitted within three sections: the header, the Setup section, and the PageSetup section. Comments appearing in the first two sections apply to the entire document while comments in the BeginPageSetup section apply only to a single page. For comments to appear in the header section, this method should be called after the surface is created, but before a call to :meth:`dsc_begin_setup`. For comments to appear in the Setup section, this method should be called after a call to :meth:`dsc_begin_setup` but before a call to :meth:`dsc_begin_page_setup`. For comments to appear in the PageSetup section, this method should be called after a call to :meth:`dsc_begin_page_setup`. Note that it is only necessary to call :meth:`dsc_begin_page_setup` for the first page of any surface. After a call to :meth:`~Surface.show_page` or :meth:`~Surface.copy_page` comments are unambiguously directed to the PageSetup section of the current page. But it doesn't hurt to call this method at the beginning of every page as that consistency may make the calling code simpler. As a final note, cairo automatically generates several comments on its own. As such, applications must not manually generate any of the following comments: Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``, ``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``, ``%%LanguageLevel``, ``%%EndComments``. Setup section: ``%%BeginSetup``, ``%%EndSetup``. PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``, ``%%EndPageSetup``. Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``, ``%%Trailer``, ``%%EOF``. """ cairo.cairo_ps_surface_dsc_comment( self._pointer, _encode_string(comment)) self._check_status()
python
def dsc_comment(self, comment): """ Emit a comment into the PostScript output for the given surface. The comment is expected to conform to the PostScript Language Document Structuring Conventions (DSC). Please see that manual for details on the available comments and their meanings. In particular, the ``%%IncludeFeature`` comment allows a device-independent means of controlling printer device features. So the PostScript Printer Description Files Specification will also be a useful reference. The comment string must begin with a percent character (%) and the total length of the string (including any initial percent characters) must not exceed 255 bytes. Violating either of these conditions will place surface into an error state. But beyond these two conditions, this method will not enforce conformance of the comment with any particular specification. The comment string should not have a trailing newline. The DSC specifies different sections in which particular comments can appear. This method provides for comments to be emitted within three sections: the header, the Setup section, and the PageSetup section. Comments appearing in the first two sections apply to the entire document while comments in the BeginPageSetup section apply only to a single page. For comments to appear in the header section, this method should be called after the surface is created, but before a call to :meth:`dsc_begin_setup`. For comments to appear in the Setup section, this method should be called after a call to :meth:`dsc_begin_setup` but before a call to :meth:`dsc_begin_page_setup`. For comments to appear in the PageSetup section, this method should be called after a call to :meth:`dsc_begin_page_setup`. Note that it is only necessary to call :meth:`dsc_begin_page_setup` for the first page of any surface. After a call to :meth:`~Surface.show_page` or :meth:`~Surface.copy_page` comments are unambiguously directed to the PageSetup section of the current page. But it doesn't hurt to call this method at the beginning of every page as that consistency may make the calling code simpler. As a final note, cairo automatically generates several comments on its own. As such, applications must not manually generate any of the following comments: Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``, ``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``, ``%%LanguageLevel``, ``%%EndComments``. Setup section: ``%%BeginSetup``, ``%%EndSetup``. PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``, ``%%EndPageSetup``. Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``, ``%%Trailer``, ``%%EOF``. """ cairo.cairo_ps_surface_dsc_comment( self._pointer, _encode_string(comment)) self._check_status()
[ "def", "dsc_comment", "(", "self", ",", "comment", ")", ":", "cairo", ".", "cairo_ps_surface_dsc_comment", "(", "self", ".", "_pointer", ",", "_encode_string", "(", "comment", ")", ")", "self", ".", "_check_status", "(", ")" ]
Emit a comment into the PostScript output for the given surface. The comment is expected to conform to the PostScript Language Document Structuring Conventions (DSC). Please see that manual for details on the available comments and their meanings. In particular, the ``%%IncludeFeature`` comment allows a device-independent means of controlling printer device features. So the PostScript Printer Description Files Specification will also be a useful reference. The comment string must begin with a percent character (%) and the total length of the string (including any initial percent characters) must not exceed 255 bytes. Violating either of these conditions will place surface into an error state. But beyond these two conditions, this method will not enforce conformance of the comment with any particular specification. The comment string should not have a trailing newline. The DSC specifies different sections in which particular comments can appear. This method provides for comments to be emitted within three sections: the header, the Setup section, and the PageSetup section. Comments appearing in the first two sections apply to the entire document while comments in the BeginPageSetup section apply only to a single page. For comments to appear in the header section, this method should be called after the surface is created, but before a call to :meth:`dsc_begin_setup`. For comments to appear in the Setup section, this method should be called after a call to :meth:`dsc_begin_setup` but before a call to :meth:`dsc_begin_page_setup`. For comments to appear in the PageSetup section, this method should be called after a call to :meth:`dsc_begin_page_setup`. Note that it is only necessary to call :meth:`dsc_begin_page_setup` for the first page of any surface. After a call to :meth:`~Surface.show_page` or :meth:`~Surface.copy_page` comments are unambiguously directed to the PageSetup section of the current page. But it doesn't hurt to call this method at the beginning of every page as that consistency may make the calling code simpler. As a final note, cairo automatically generates several comments on its own. As such, applications must not manually generate any of the following comments: Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``, ``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``, ``%%LanguageLevel``, ``%%EndComments``. Setup section: ``%%BeginSetup``, ``%%EndSetup``. PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``, ``%%EndPageSetup``. Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``, ``%%Trailer``, ``%%EOF``.
[ "Emit", "a", "comment", "into", "the", "PostScript", "output", "for", "the", "given", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L1047-L1123
17,617
Kozea/cairocffi
cairocffi/surfaces.py
SVGSurface.set_document_unit
def set_document_unit(self, unit): """Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_svg_surface_set_document_unit(self._pointer, unit) self._check_status()
python
def set_document_unit(self, unit): """Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_svg_surface_set_document_unit(self._pointer, unit) self._check_status()
[ "def", "set_document_unit", "(", "self", ",", "unit", ")", ":", "cairo", ".", "cairo_svg_surface_set_document_unit", "(", "self", ".", "_pointer", ",", "unit", ")", "self", ".", "_check_status", "(", ")" ]
Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Use", "specified", "unit", "for", "width", "and", "height", "of", "generated", "SVG", "file", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L1309-L1336
17,618
Kozea/cairocffi
cairocffi/surfaces.py
SVGSurface.get_document_unit
def get_document_unit(self): """Get the unit of the SVG surface. If the surface passed as an argument is not a SVG surface, the function sets the error status to ``STATUS_SURFACE_TYPE_MISMATCH`` and returns :ref:`SVG_UNIT_USER`. :return: The SVG unit of the SVG surface. *New in cairo 1.16.* *New in cairocffi 0.9.* """ unit = cairo.cairo_svg_surface_get_document_unit(self._pointer) self._check_status() return unit
python
def get_document_unit(self): """Get the unit of the SVG surface. If the surface passed as an argument is not a SVG surface, the function sets the error status to ``STATUS_SURFACE_TYPE_MISMATCH`` and returns :ref:`SVG_UNIT_USER`. :return: The SVG unit of the SVG surface. *New in cairo 1.16.* *New in cairocffi 0.9.* """ unit = cairo.cairo_svg_surface_get_document_unit(self._pointer) self._check_status() return unit
[ "def", "get_document_unit", "(", "self", ")", ":", "unit", "=", "cairo", ".", "cairo_svg_surface_get_document_unit", "(", "self", ".", "_pointer", ")", "self", ".", "_check_status", "(", ")", "return", "unit" ]
Get the unit of the SVG surface. If the surface passed as an argument is not a SVG surface, the function sets the error status to ``STATUS_SURFACE_TYPE_MISMATCH`` and returns :ref:`SVG_UNIT_USER`. :return: The SVG unit of the SVG surface. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Get", "the", "unit", "of", "the", "SVG", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L1338-L1354
17,619
Kozea/cairocffi
cairocffi/surfaces.py
RecordingSurface.get_extents
def get_extents(self): """Return the extents of the recording-surface. :returns: A ``(x, y, width, height)`` tuple of floats, or :obj:`None` if the surface is unbounded. *New in cairo 1.12* """ extents = ffi.new('cairo_rectangle_t *') if cairo.cairo_recording_surface_get_extents(self._pointer, extents): return (extents.x, extents.y, extents.width, extents.height)
python
def get_extents(self): """Return the extents of the recording-surface. :returns: A ``(x, y, width, height)`` tuple of floats, or :obj:`None` if the surface is unbounded. *New in cairo 1.12* """ extents = ffi.new('cairo_rectangle_t *') if cairo.cairo_recording_surface_get_extents(self._pointer, extents): return (extents.x, extents.y, extents.width, extents.height)
[ "def", "get_extents", "(", "self", ")", ":", "extents", "=", "ffi", ".", "new", "(", "'cairo_rectangle_t *'", ")", "if", "cairo", ".", "cairo_recording_surface_get_extents", "(", "self", ".", "_pointer", ",", "extents", ")", ":", "return", "(", "extents", ".", "x", ",", "extents", ".", "y", ",", "extents", ".", "width", ",", "extents", ".", "height", ")" ]
Return the extents of the recording-surface. :returns: A ``(x, y, width, height)`` tuple of floats, or :obj:`None` if the surface is unbounded. *New in cairo 1.12*
[ "Return", "the", "extents", "of", "the", "recording", "-", "surface", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L1430-L1442
17,620
Kozea/cairocffi
cairocffi/patterns.py
Gradient.add_color_stop_rgba
def add_color_stop_rgba(self, offset, red, green, blue, alpha=1): """Adds a translucent color stop to a gradient pattern. The offset specifies the location along the gradient's control vector. For example, a linear gradient's control vector is from (x0,y0) to (x1,y1) while a radial gradient's control vector is from any point on the start circle to the corresponding point on the end circle. If two (or more) stops are specified with identical offset values, they will be sorted according to the order in which the stops are added (stops added earlier before stops added later). This can be useful for reliably making sharp color transitions instead of the typical blend. The color components and offset are in the range 0 to 1. If the values passed in are outside that range, they will be clamped. :param offset: Location along the gradient's control vector :param red: Red component of the color. :param green: Green component of the color. :param blue: Blue component of the color. :param alpha: Alpha component of the color. 1 (the default) is opaque, 0 fully transparent. :type offset: float :type red: float :type green: float :type blue: float :type alpha: float """ cairo.cairo_pattern_add_color_stop_rgba( self._pointer, offset, red, green, blue, alpha) self._check_status()
python
def add_color_stop_rgba(self, offset, red, green, blue, alpha=1): """Adds a translucent color stop to a gradient pattern. The offset specifies the location along the gradient's control vector. For example, a linear gradient's control vector is from (x0,y0) to (x1,y1) while a radial gradient's control vector is from any point on the start circle to the corresponding point on the end circle. If two (or more) stops are specified with identical offset values, they will be sorted according to the order in which the stops are added (stops added earlier before stops added later). This can be useful for reliably making sharp color transitions instead of the typical blend. The color components and offset are in the range 0 to 1. If the values passed in are outside that range, they will be clamped. :param offset: Location along the gradient's control vector :param red: Red component of the color. :param green: Green component of the color. :param blue: Blue component of the color. :param alpha: Alpha component of the color. 1 (the default) is opaque, 0 fully transparent. :type offset: float :type red: float :type green: float :type blue: float :type alpha: float """ cairo.cairo_pattern_add_color_stop_rgba( self._pointer, offset, red, green, blue, alpha) self._check_status()
[ "def", "add_color_stop_rgba", "(", "self", ",", "offset", ",", "red", ",", "green", ",", "blue", ",", "alpha", "=", "1", ")", ":", "cairo", ".", "cairo_pattern_add_color_stop_rgba", "(", "self", ".", "_pointer", ",", "offset", ",", "red", ",", "green", ",", "blue", ",", "alpha", ")", "self", ".", "_check_status", "(", ")" ]
Adds a translucent color stop to a gradient pattern. The offset specifies the location along the gradient's control vector. For example, a linear gradient's control vector is from (x0,y0) to (x1,y1) while a radial gradient's control vector is from any point on the start circle to the corresponding point on the end circle. If two (or more) stops are specified with identical offset values, they will be sorted according to the order in which the stops are added (stops added earlier before stops added later). This can be useful for reliably making sharp color transitions instead of the typical blend. The color components and offset are in the range 0 to 1. If the values passed in are outside that range, they will be clamped. :param offset: Location along the gradient's control vector :param red: Red component of the color. :param green: Green component of the color. :param blue: Blue component of the color. :param alpha: Alpha component of the color. 1 (the default) is opaque, 0 fully transparent. :type offset: float :type red: float :type green: float :type blue: float :type alpha: float
[ "Adds", "a", "translucent", "color", "stop", "to", "a", "gradient", "pattern", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/patterns.py#L219-L255
17,621
Kozea/cairocffi
cairocffi/fonts.py
_encode_string
def _encode_string(string): """Return a byte string, encoding Unicode with UTF-8.""" if not isinstance(string, bytes): string = string.encode('utf8') return ffi.new('char[]', string)
python
def _encode_string(string): """Return a byte string, encoding Unicode with UTF-8.""" if not isinstance(string, bytes): string = string.encode('utf8') return ffi.new('char[]', string)
[ "def", "_encode_string", "(", "string", ")", ":", "if", "not", "isinstance", "(", "string", ",", "bytes", ")", ":", "string", "=", "string", ".", "encode", "(", "'utf8'", ")", "return", "ffi", ".", "new", "(", "'char[]'", ",", "string", ")" ]
Return a byte string, encoding Unicode with UTF-8.
[ "Return", "a", "byte", "string", "encoding", "Unicode", "with", "UTF", "-", "8", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L16-L20
17,622
Kozea/cairocffi
cairocffi/fonts.py
ScaledFont.text_to_glyphs
def text_to_glyphs(self, x, y, text, with_clusters): """Converts a string of text to a list of glyphs, optionally with cluster mapping, that can be used to render later using this scaled font. The output values can be readily passed to :meth:`Context.show_text_glyphs`, :meth:`Context.show_glyphs` or related methods, assuming that the exact same :class:`ScaledFont` is used for the operation. :type x: float :type y: float :type with_clusters: bool :param x: X position to place first glyph. :param y: Y position to place first glyph. :param text: The text to convert, as an Unicode or UTF-8 string. :param with_clusters: Whether to compute the cluster mapping. :returns: A ``(glyphs, clusters, clusters_flags)`` tuple if :obj:`with_clusters` is true, otherwise just :obj:`glyphs`. See :meth:`Context.show_text_glyphs` for the data structure. .. note:: This method is part of what the cairo designers call the "toy" text API. It is convenient for short demos and simple programs, but it is not expected to be adequate for serious text-using applications. See :ref:`fonts` for details and :meth:`Context.show_glyphs` for the "real" text display API in cairo. """ glyphs = ffi.new('cairo_glyph_t **', ffi.NULL) num_glyphs = ffi.new('int *') if with_clusters: clusters = ffi.new('cairo_text_cluster_t **', ffi.NULL) num_clusters = ffi.new('int *') cluster_flags = ffi.new('cairo_text_cluster_flags_t *') else: clusters = ffi.NULL num_clusters = ffi.NULL cluster_flags = ffi.NULL # TODO: Pass len_utf8 explicitly to support NULL bytes? status = cairo.cairo_scaled_font_text_to_glyphs( self._pointer, x, y, _encode_string(text), -1, glyphs, num_glyphs, clusters, num_clusters, cluster_flags) glyphs = ffi.gc(glyphs[0], _keepref(cairo, cairo.cairo_glyph_free)) if with_clusters: clusters = ffi.gc( clusters[0], _keepref(cairo, cairo.cairo_text_cluster_free)) _check_status(status) glyphs = [ (glyph.index, glyph.x, glyph.y) for i in range(num_glyphs[0]) for glyph in [glyphs[i]]] if with_clusters: clusters = [ (cluster.num_bytes, cluster.num_glyphs) for i in range(num_clusters[0]) for cluster in [clusters[i]]] return glyphs, clusters, cluster_flags[0] else: return glyphs
python
def text_to_glyphs(self, x, y, text, with_clusters): """Converts a string of text to a list of glyphs, optionally with cluster mapping, that can be used to render later using this scaled font. The output values can be readily passed to :meth:`Context.show_text_glyphs`, :meth:`Context.show_glyphs` or related methods, assuming that the exact same :class:`ScaledFont` is used for the operation. :type x: float :type y: float :type with_clusters: bool :param x: X position to place first glyph. :param y: Y position to place first glyph. :param text: The text to convert, as an Unicode or UTF-8 string. :param with_clusters: Whether to compute the cluster mapping. :returns: A ``(glyphs, clusters, clusters_flags)`` tuple if :obj:`with_clusters` is true, otherwise just :obj:`glyphs`. See :meth:`Context.show_text_glyphs` for the data structure. .. note:: This method is part of what the cairo designers call the "toy" text API. It is convenient for short demos and simple programs, but it is not expected to be adequate for serious text-using applications. See :ref:`fonts` for details and :meth:`Context.show_glyphs` for the "real" text display API in cairo. """ glyphs = ffi.new('cairo_glyph_t **', ffi.NULL) num_glyphs = ffi.new('int *') if with_clusters: clusters = ffi.new('cairo_text_cluster_t **', ffi.NULL) num_clusters = ffi.new('int *') cluster_flags = ffi.new('cairo_text_cluster_flags_t *') else: clusters = ffi.NULL num_clusters = ffi.NULL cluster_flags = ffi.NULL # TODO: Pass len_utf8 explicitly to support NULL bytes? status = cairo.cairo_scaled_font_text_to_glyphs( self._pointer, x, y, _encode_string(text), -1, glyphs, num_glyphs, clusters, num_clusters, cluster_flags) glyphs = ffi.gc(glyphs[0], _keepref(cairo, cairo.cairo_glyph_free)) if with_clusters: clusters = ffi.gc( clusters[0], _keepref(cairo, cairo.cairo_text_cluster_free)) _check_status(status) glyphs = [ (glyph.index, glyph.x, glyph.y) for i in range(num_glyphs[0]) for glyph in [glyphs[i]]] if with_clusters: clusters = [ (cluster.num_bytes, cluster.num_glyphs) for i in range(num_clusters[0]) for cluster in [clusters[i]]] return glyphs, clusters, cluster_flags[0] else: return glyphs
[ "def", "text_to_glyphs", "(", "self", ",", "x", ",", "y", ",", "text", ",", "with_clusters", ")", ":", "glyphs", "=", "ffi", ".", "new", "(", "'cairo_glyph_t **'", ",", "ffi", ".", "NULL", ")", "num_glyphs", "=", "ffi", ".", "new", "(", "'int *'", ")", "if", "with_clusters", ":", "clusters", "=", "ffi", ".", "new", "(", "'cairo_text_cluster_t **'", ",", "ffi", ".", "NULL", ")", "num_clusters", "=", "ffi", ".", "new", "(", "'int *'", ")", "cluster_flags", "=", "ffi", ".", "new", "(", "'cairo_text_cluster_flags_t *'", ")", "else", ":", "clusters", "=", "ffi", ".", "NULL", "num_clusters", "=", "ffi", ".", "NULL", "cluster_flags", "=", "ffi", ".", "NULL", "# TODO: Pass len_utf8 explicitly to support NULL bytes?", "status", "=", "cairo", ".", "cairo_scaled_font_text_to_glyphs", "(", "self", ".", "_pointer", ",", "x", ",", "y", ",", "_encode_string", "(", "text", ")", ",", "-", "1", ",", "glyphs", ",", "num_glyphs", ",", "clusters", ",", "num_clusters", ",", "cluster_flags", ")", "glyphs", "=", "ffi", ".", "gc", "(", "glyphs", "[", "0", "]", ",", "_keepref", "(", "cairo", ",", "cairo", ".", "cairo_glyph_free", ")", ")", "if", "with_clusters", ":", "clusters", "=", "ffi", ".", "gc", "(", "clusters", "[", "0", "]", ",", "_keepref", "(", "cairo", ",", "cairo", ".", "cairo_text_cluster_free", ")", ")", "_check_status", "(", "status", ")", "glyphs", "=", "[", "(", "glyph", ".", "index", ",", "glyph", ".", "x", ",", "glyph", ".", "y", ")", "for", "i", "in", "range", "(", "num_glyphs", "[", "0", "]", ")", "for", "glyph", "in", "[", "glyphs", "[", "i", "]", "]", "]", "if", "with_clusters", ":", "clusters", "=", "[", "(", "cluster", ".", "num_bytes", ",", "cluster", ".", "num_glyphs", ")", "for", "i", "in", "range", "(", "num_clusters", "[", "0", "]", ")", "for", "cluster", "in", "[", "clusters", "[", "i", "]", "]", "]", "return", "glyphs", ",", "clusters", ",", "cluster_flags", "[", "0", "]", "else", ":", "return", "glyphs" ]
Converts a string of text to a list of glyphs, optionally with cluster mapping, that can be used to render later using this scaled font. The output values can be readily passed to :meth:`Context.show_text_glyphs`, :meth:`Context.show_glyphs` or related methods, assuming that the exact same :class:`ScaledFont` is used for the operation. :type x: float :type y: float :type with_clusters: bool :param x: X position to place first glyph. :param y: Y position to place first glyph. :param text: The text to convert, as an Unicode or UTF-8 string. :param with_clusters: Whether to compute the cluster mapping. :returns: A ``(glyphs, clusters, clusters_flags)`` tuple if :obj:`with_clusters` is true, otherwise just :obj:`glyphs`. See :meth:`Context.show_text_glyphs` for the data structure. .. note:: This method is part of what the cairo designers call the "toy" text API. It is convenient for short demos and simple programs, but it is not expected to be adequate for serious text-using applications. See :ref:`fonts` for details and :meth:`Context.show_glyphs` for the "real" text display API in cairo.
[ "Converts", "a", "string", "of", "text", "to", "a", "list", "of", "glyphs", "optionally", "with", "cluster", "mapping", "that", "can", "be", "used", "to", "render", "later", "using", "this", "scaled", "font", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L301-L366
17,623
Kozea/cairocffi
cairocffi/fonts.py
FontOptions.set_variations
def set_variations(self, variations): """Sets the OpenType font variations for the font options object. Font variations are specified as a string with a format that is similar to the CSS font-variation-settings. The string contains a comma-separated list of axis assignments, which each assignment consists of a 4-character axis name and a value, separated by whitespace and optional equals sign. :param variations: the new font variations, or ``None``. *New in cairo 1.16.* *New in cairocffi 0.9.* """ if variations is None: variations = ffi.NULL else: variations = _encode_string(variations) cairo.cairo_font_options_set_variations(self._pointer, variations) self._check_status()
python
def set_variations(self, variations): """Sets the OpenType font variations for the font options object. Font variations are specified as a string with a format that is similar to the CSS font-variation-settings. The string contains a comma-separated list of axis assignments, which each assignment consists of a 4-character axis name and a value, separated by whitespace and optional equals sign. :param variations: the new font variations, or ``None``. *New in cairo 1.16.* *New in cairocffi 0.9.* """ if variations is None: variations = ffi.NULL else: variations = _encode_string(variations) cairo.cairo_font_options_set_variations(self._pointer, variations) self._check_status()
[ "def", "set_variations", "(", "self", ",", "variations", ")", ":", "if", "variations", "is", "None", ":", "variations", "=", "ffi", ".", "NULL", "else", ":", "variations", "=", "_encode_string", "(", "variations", ")", "cairo", ".", "cairo_font_options_set_variations", "(", "self", ".", "_pointer", ",", "variations", ")", "self", ".", "_check_status", "(", ")" ]
Sets the OpenType font variations for the font options object. Font variations are specified as a string with a format that is similar to the CSS font-variation-settings. The string contains a comma-separated list of axis assignments, which each assignment consists of a 4-character axis name and a value, separated by whitespace and optional equals sign. :param variations: the new font variations, or ``None``. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Sets", "the", "OpenType", "font", "variations", "for", "the", "font", "options", "object", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L494-L515
17,624
Kozea/cairocffi
cairocffi/fonts.py
FontOptions.get_variations
def get_variations(self): """Gets the OpenType font variations for the font options object. See :meth:`set_variations` for details about the string format. :return: the font variations for the font options object. The returned string belongs to the ``options`` and must not be modified. It is valid until either the font options object is destroyed or the font variations in this object is modified with :meth:`set_variations`. *New in cairo 1.16.* *New in cairocffi 0.9.* """ variations = cairo.cairo_font_options_get_variations(self._pointer) if variations != ffi.NULL: return ffi.string(variations).decode('utf8', 'replace')
python
def get_variations(self): """Gets the OpenType font variations for the font options object. See :meth:`set_variations` for details about the string format. :return: the font variations for the font options object. The returned string belongs to the ``options`` and must not be modified. It is valid until either the font options object is destroyed or the font variations in this object is modified with :meth:`set_variations`. *New in cairo 1.16.* *New in cairocffi 0.9.* """ variations = cairo.cairo_font_options_get_variations(self._pointer) if variations != ffi.NULL: return ffi.string(variations).decode('utf8', 'replace')
[ "def", "get_variations", "(", "self", ")", ":", "variations", "=", "cairo", ".", "cairo_font_options_get_variations", "(", "self", ".", "_pointer", ")", "if", "variations", "!=", "ffi", ".", "NULL", ":", "return", "ffi", ".", "string", "(", "variations", ")", ".", "decode", "(", "'utf8'", ",", "'replace'", ")" ]
Gets the OpenType font variations for the font options object. See :meth:`set_variations` for details about the string format. :return: the font variations for the font options object. The returned string belongs to the ``options`` and must not be modified. It is valid until either the font options object is destroyed or the font variations in this object is modified with :meth:`set_variations`. *New in cairo 1.16.* *New in cairocffi 0.9.*
[ "Gets", "the", "OpenType", "font", "variations", "for", "the", "font", "options", "object", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L517-L535
17,625
Kozea/cairocffi
cairocffi/pixbuf.py
decode_to_pixbuf
def decode_to_pixbuf(image_data, width=None, height=None): """Decode an image from memory with GDK-PixBuf. The file format is detected automatically. :param image_data: A byte string :param width: Integer width in pixels or None :param height: Integer height in pixels or None :returns: A tuple of a new :class:`PixBuf` object and the name of the detected image format. :raises: :exc:`ImageLoadingError` if the image data is invalid or in an unsupported format. """ loader = ffi.gc( gdk_pixbuf.gdk_pixbuf_loader_new(), gobject.g_object_unref) error = ffi.new('GError **') if width and height: gdk_pixbuf.gdk_pixbuf_loader_set_size(loader, width, height) handle_g_error(error, gdk_pixbuf.gdk_pixbuf_loader_write( loader, ffi.new('guchar[]', image_data), len(image_data), error)) handle_g_error(error, gdk_pixbuf.gdk_pixbuf_loader_close(loader, error)) format_ = gdk_pixbuf.gdk_pixbuf_loader_get_format(loader) format_name = ( ffi.string(gdk_pixbuf.gdk_pixbuf_format_get_name(format_)) .decode('ascii') if format_ != ffi.NULL else None) pixbuf = gdk_pixbuf.gdk_pixbuf_loader_get_pixbuf(loader) if pixbuf == ffi.NULL: # pragma: no cover raise ImageLoadingError('Not enough image data (got a NULL pixbuf.)') return Pixbuf(pixbuf), format_name
python
def decode_to_pixbuf(image_data, width=None, height=None): """Decode an image from memory with GDK-PixBuf. The file format is detected automatically. :param image_data: A byte string :param width: Integer width in pixels or None :param height: Integer height in pixels or None :returns: A tuple of a new :class:`PixBuf` object and the name of the detected image format. :raises: :exc:`ImageLoadingError` if the image data is invalid or in an unsupported format. """ loader = ffi.gc( gdk_pixbuf.gdk_pixbuf_loader_new(), gobject.g_object_unref) error = ffi.new('GError **') if width and height: gdk_pixbuf.gdk_pixbuf_loader_set_size(loader, width, height) handle_g_error(error, gdk_pixbuf.gdk_pixbuf_loader_write( loader, ffi.new('guchar[]', image_data), len(image_data), error)) handle_g_error(error, gdk_pixbuf.gdk_pixbuf_loader_close(loader, error)) format_ = gdk_pixbuf.gdk_pixbuf_loader_get_format(loader) format_name = ( ffi.string(gdk_pixbuf.gdk_pixbuf_format_get_name(format_)) .decode('ascii') if format_ != ffi.NULL else None) pixbuf = gdk_pixbuf.gdk_pixbuf_loader_get_pixbuf(loader) if pixbuf == ffi.NULL: # pragma: no cover raise ImageLoadingError('Not enough image data (got a NULL pixbuf.)') return Pixbuf(pixbuf), format_name
[ "def", "decode_to_pixbuf", "(", "image_data", ",", "width", "=", "None", ",", "height", "=", "None", ")", ":", "loader", "=", "ffi", ".", "gc", "(", "gdk_pixbuf", ".", "gdk_pixbuf_loader_new", "(", ")", ",", "gobject", ".", "g_object_unref", ")", "error", "=", "ffi", ".", "new", "(", "'GError **'", ")", "if", "width", "and", "height", ":", "gdk_pixbuf", ".", "gdk_pixbuf_loader_set_size", "(", "loader", ",", "width", ",", "height", ")", "handle_g_error", "(", "error", ",", "gdk_pixbuf", ".", "gdk_pixbuf_loader_write", "(", "loader", ",", "ffi", ".", "new", "(", "'guchar[]'", ",", "image_data", ")", ",", "len", "(", "image_data", ")", ",", "error", ")", ")", "handle_g_error", "(", "error", ",", "gdk_pixbuf", ".", "gdk_pixbuf_loader_close", "(", "loader", ",", "error", ")", ")", "format_", "=", "gdk_pixbuf", ".", "gdk_pixbuf_loader_get_format", "(", "loader", ")", "format_name", "=", "(", "ffi", ".", "string", "(", "gdk_pixbuf", ".", "gdk_pixbuf_format_get_name", "(", "format_", ")", ")", ".", "decode", "(", "'ascii'", ")", "if", "format_", "!=", "ffi", ".", "NULL", "else", "None", ")", "pixbuf", "=", "gdk_pixbuf", ".", "gdk_pixbuf_loader_get_pixbuf", "(", "loader", ")", "if", "pixbuf", "==", "ffi", ".", "NULL", ":", "# pragma: no cover", "raise", "ImageLoadingError", "(", "'Not enough image data (got a NULL pixbuf.)'", ")", "return", "Pixbuf", "(", "pixbuf", ")", ",", "format_name" ]
Decode an image from memory with GDK-PixBuf. The file format is detected automatically. :param image_data: A byte string :param width: Integer width in pixels or None :param height: Integer height in pixels or None :returns: A tuple of a new :class:`PixBuf` object and the name of the detected image format. :raises: :exc:`ImageLoadingError` if the image data is invalid or in an unsupported format.
[ "Decode", "an", "image", "from", "memory", "with", "GDK", "-", "PixBuf", ".", "The", "file", "format", "is", "detected", "automatically", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/pixbuf.py#L69-L102
17,626
Kozea/cairocffi
cairocffi/pixbuf.py
decode_to_image_surface
def decode_to_image_surface(image_data, width=None, height=None): """Decode an image from memory into a cairo surface. The file format is detected automatically. :param image_data: A byte string :param width: Integer width in pixels or None :param height: Integer height in pixels or None :returns: A tuple of a new :class:`~cairocffi.ImageSurface` object and the name of the detected image format. :raises: :exc:`ImageLoadingError` if the image data is invalid or in an unsupported format. """ pixbuf, format_name = decode_to_pixbuf(image_data, width, height) surface = ( pixbuf_to_cairo_gdk(pixbuf) if gdk is not None else pixbuf_to_cairo_slices(pixbuf) if not pixbuf.get_has_alpha() else pixbuf_to_cairo_png(pixbuf)) return surface, format_name
python
def decode_to_image_surface(image_data, width=None, height=None): """Decode an image from memory into a cairo surface. The file format is detected automatically. :param image_data: A byte string :param width: Integer width in pixels or None :param height: Integer height in pixels or None :returns: A tuple of a new :class:`~cairocffi.ImageSurface` object and the name of the detected image format. :raises: :exc:`ImageLoadingError` if the image data is invalid or in an unsupported format. """ pixbuf, format_name = decode_to_pixbuf(image_data, width, height) surface = ( pixbuf_to_cairo_gdk(pixbuf) if gdk is not None else pixbuf_to_cairo_slices(pixbuf) if not pixbuf.get_has_alpha() else pixbuf_to_cairo_png(pixbuf)) return surface, format_name
[ "def", "decode_to_image_surface", "(", "image_data", ",", "width", "=", "None", ",", "height", "=", "None", ")", ":", "pixbuf", ",", "format_name", "=", "decode_to_pixbuf", "(", "image_data", ",", "width", ",", "height", ")", "surface", "=", "(", "pixbuf_to_cairo_gdk", "(", "pixbuf", ")", "if", "gdk", "is", "not", "None", "else", "pixbuf_to_cairo_slices", "(", "pixbuf", ")", "if", "not", "pixbuf", ".", "get_has_alpha", "(", ")", "else", "pixbuf_to_cairo_png", "(", "pixbuf", ")", ")", "return", "surface", ",", "format_name" ]
Decode an image from memory into a cairo surface. The file format is detected automatically. :param image_data: A byte string :param width: Integer width in pixels or None :param height: Integer height in pixels or None :returns: A tuple of a new :class:`~cairocffi.ImageSurface` object and the name of the detected image format. :raises: :exc:`ImageLoadingError` if the image data is invalid or in an unsupported format.
[ "Decode", "an", "image", "from", "memory", "into", "a", "cairo", "surface", ".", "The", "file", "format", "is", "detected", "automatically", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/pixbuf.py#L105-L125
17,627
Kozea/cairocffi
cairocffi/pixbuf.py
pixbuf_to_cairo_gdk
def pixbuf_to_cairo_gdk(pixbuf): """Convert from PixBuf to ImageSurface, using GDK. This method is fastest but GDK is not always available. """ dummy_context = Context(ImageSurface(constants.FORMAT_ARGB32, 1, 1)) gdk.gdk_cairo_set_source_pixbuf( dummy_context._pointer, pixbuf._pointer, 0, 0) return dummy_context.get_source().get_surface()
python
def pixbuf_to_cairo_gdk(pixbuf): """Convert from PixBuf to ImageSurface, using GDK. This method is fastest but GDK is not always available. """ dummy_context = Context(ImageSurface(constants.FORMAT_ARGB32, 1, 1)) gdk.gdk_cairo_set_source_pixbuf( dummy_context._pointer, pixbuf._pointer, 0, 0) return dummy_context.get_source().get_surface()
[ "def", "pixbuf_to_cairo_gdk", "(", "pixbuf", ")", ":", "dummy_context", "=", "Context", "(", "ImageSurface", "(", "constants", ".", "FORMAT_ARGB32", ",", "1", ",", "1", ")", ")", "gdk", ".", "gdk_cairo_set_source_pixbuf", "(", "dummy_context", ".", "_pointer", ",", "pixbuf", ".", "_pointer", ",", "0", ",", "0", ")", "return", "dummy_context", ".", "get_source", "(", ")", ".", "get_surface", "(", ")" ]
Convert from PixBuf to ImageSurface, using GDK. This method is fastest but GDK is not always available.
[ "Convert", "from", "PixBuf", "to", "ImageSurface", "using", "GDK", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/pixbuf.py#L128-L137
17,628
Kozea/cairocffi
cairocffi/pixbuf.py
pixbuf_to_cairo_slices
def pixbuf_to_cairo_slices(pixbuf): """Convert from PixBuf to ImageSurface, using slice-based byte swapping. This method is 2~5x slower than GDK but does not support an alpha channel. (cairo uses pre-multiplied alpha, but not Pixbuf.) """ assert pixbuf.get_colorspace() == gdk_pixbuf.GDK_COLORSPACE_RGB assert pixbuf.get_n_channels() == 3 assert pixbuf.get_bits_per_sample() == 8 width = pixbuf.get_width() height = pixbuf.get_height() rowstride = pixbuf.get_rowstride() pixels = ffi.buffer(pixbuf.get_pixels(), pixbuf.get_byte_length()) # TODO: remove this when cffi buffers support slicing with a stride. pixels = pixels[:] # Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB cairo_stride = ImageSurface.format_stride_for_width( constants.FORMAT_RGB24, width) data = bytearray(cairo_stride * height) big_endian = sys.byteorder == 'big' pixbuf_row_length = width * 3 # stride == row_length + padding cairo_row_length = width * 4 # stride == row_length + padding alpha = b'\xff' * width # opaque for y in range(height): offset = rowstride * y end = offset + pixbuf_row_length red = pixels[offset:end:3] green = pixels[offset + 1:end:3] blue = pixels[offset + 2:end:3] offset = cairo_stride * y end = offset + cairo_row_length if big_endian: # pragma: no cover data[offset:end:4] = alpha data[offset + 1:end:4] = red data[offset + 2:end:4] = green data[offset + 3:end:4] = blue else: data[offset + 3:end:4] = alpha data[offset + 2:end:4] = red data[offset + 1:end:4] = green data[offset:end:4] = blue data = array('B', data) return ImageSurface(constants.FORMAT_RGB24, width, height, data, cairo_stride)
python
def pixbuf_to_cairo_slices(pixbuf): """Convert from PixBuf to ImageSurface, using slice-based byte swapping. This method is 2~5x slower than GDK but does not support an alpha channel. (cairo uses pre-multiplied alpha, but not Pixbuf.) """ assert pixbuf.get_colorspace() == gdk_pixbuf.GDK_COLORSPACE_RGB assert pixbuf.get_n_channels() == 3 assert pixbuf.get_bits_per_sample() == 8 width = pixbuf.get_width() height = pixbuf.get_height() rowstride = pixbuf.get_rowstride() pixels = ffi.buffer(pixbuf.get_pixels(), pixbuf.get_byte_length()) # TODO: remove this when cffi buffers support slicing with a stride. pixels = pixels[:] # Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB cairo_stride = ImageSurface.format_stride_for_width( constants.FORMAT_RGB24, width) data = bytearray(cairo_stride * height) big_endian = sys.byteorder == 'big' pixbuf_row_length = width * 3 # stride == row_length + padding cairo_row_length = width * 4 # stride == row_length + padding alpha = b'\xff' * width # opaque for y in range(height): offset = rowstride * y end = offset + pixbuf_row_length red = pixels[offset:end:3] green = pixels[offset + 1:end:3] blue = pixels[offset + 2:end:3] offset = cairo_stride * y end = offset + cairo_row_length if big_endian: # pragma: no cover data[offset:end:4] = alpha data[offset + 1:end:4] = red data[offset + 2:end:4] = green data[offset + 3:end:4] = blue else: data[offset + 3:end:4] = alpha data[offset + 2:end:4] = red data[offset + 1:end:4] = green data[offset:end:4] = blue data = array('B', data) return ImageSurface(constants.FORMAT_RGB24, width, height, data, cairo_stride)
[ "def", "pixbuf_to_cairo_slices", "(", "pixbuf", ")", ":", "assert", "pixbuf", ".", "get_colorspace", "(", ")", "==", "gdk_pixbuf", ".", "GDK_COLORSPACE_RGB", "assert", "pixbuf", ".", "get_n_channels", "(", ")", "==", "3", "assert", "pixbuf", ".", "get_bits_per_sample", "(", ")", "==", "8", "width", "=", "pixbuf", ".", "get_width", "(", ")", "height", "=", "pixbuf", ".", "get_height", "(", ")", "rowstride", "=", "pixbuf", ".", "get_rowstride", "(", ")", "pixels", "=", "ffi", ".", "buffer", "(", "pixbuf", ".", "get_pixels", "(", ")", ",", "pixbuf", ".", "get_byte_length", "(", ")", ")", "# TODO: remove this when cffi buffers support slicing with a stride.", "pixels", "=", "pixels", "[", ":", "]", "# Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB", "cairo_stride", "=", "ImageSurface", ".", "format_stride_for_width", "(", "constants", ".", "FORMAT_RGB24", ",", "width", ")", "data", "=", "bytearray", "(", "cairo_stride", "*", "height", ")", "big_endian", "=", "sys", ".", "byteorder", "==", "'big'", "pixbuf_row_length", "=", "width", "*", "3", "# stride == row_length + padding", "cairo_row_length", "=", "width", "*", "4", "# stride == row_length + padding", "alpha", "=", "b'\\xff'", "*", "width", "# opaque", "for", "y", "in", "range", "(", "height", ")", ":", "offset", "=", "rowstride", "*", "y", "end", "=", "offset", "+", "pixbuf_row_length", "red", "=", "pixels", "[", "offset", ":", "end", ":", "3", "]", "green", "=", "pixels", "[", "offset", "+", "1", ":", "end", ":", "3", "]", "blue", "=", "pixels", "[", "offset", "+", "2", ":", "end", ":", "3", "]", "offset", "=", "cairo_stride", "*", "y", "end", "=", "offset", "+", "cairo_row_length", "if", "big_endian", ":", "# pragma: no cover", "data", "[", "offset", ":", "end", ":", "4", "]", "=", "alpha", "data", "[", "offset", "+", "1", ":", "end", ":", "4", "]", "=", "red", "data", "[", "offset", "+", "2", ":", "end", ":", "4", "]", "=", "green", "data", "[", "offset", "+", "3", ":", "end", ":", "4", "]", "=", "blue", "else", ":", "data", "[", "offset", "+", "3", ":", "end", ":", "4", "]", "=", "alpha", "data", "[", "offset", "+", "2", ":", "end", ":", "4", "]", "=", "red", "data", "[", "offset", "+", "1", ":", "end", ":", "4", "]", "=", "green", "data", "[", "offset", ":", "end", ":", "4", "]", "=", "blue", "data", "=", "array", "(", "'B'", ",", "data", ")", "return", "ImageSurface", "(", "constants", ".", "FORMAT_RGB24", ",", "width", ",", "height", ",", "data", ",", "cairo_stride", ")" ]
Convert from PixBuf to ImageSurface, using slice-based byte swapping. This method is 2~5x slower than GDK but does not support an alpha channel. (cairo uses pre-multiplied alpha, but not Pixbuf.)
[ "Convert", "from", "PixBuf", "to", "ImageSurface", "using", "slice", "-", "based", "byte", "swapping", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/pixbuf.py#L140-L187
17,629
Kozea/cairocffi
cairocffi/pixbuf.py
pixbuf_to_cairo_png
def pixbuf_to_cairo_png(pixbuf): """Convert from PixBuf to ImageSurface, by going through the PNG format. This method is 10~30x slower than GDK but always works. """ buffer_pointer = ffi.new('gchar **') buffer_size = ffi.new('gsize *') error = ffi.new('GError **') handle_g_error(error, pixbuf.save_to_buffer( buffer_pointer, buffer_size, ffi.new('char[]', b'png'), error, ffi.new('char[]', b'compression'), ffi.new('char[]', b'0'), ffi.NULL)) png_bytes = ffi.buffer(buffer_pointer[0], buffer_size[0]) return ImageSurface.create_from_png(BytesIO(png_bytes))
python
def pixbuf_to_cairo_png(pixbuf): """Convert from PixBuf to ImageSurface, by going through the PNG format. This method is 10~30x slower than GDK but always works. """ buffer_pointer = ffi.new('gchar **') buffer_size = ffi.new('gsize *') error = ffi.new('GError **') handle_g_error(error, pixbuf.save_to_buffer( buffer_pointer, buffer_size, ffi.new('char[]', b'png'), error, ffi.new('char[]', b'compression'), ffi.new('char[]', b'0'), ffi.NULL)) png_bytes = ffi.buffer(buffer_pointer[0], buffer_size[0]) return ImageSurface.create_from_png(BytesIO(png_bytes))
[ "def", "pixbuf_to_cairo_png", "(", "pixbuf", ")", ":", "buffer_pointer", "=", "ffi", ".", "new", "(", "'gchar **'", ")", "buffer_size", "=", "ffi", ".", "new", "(", "'gsize *'", ")", "error", "=", "ffi", ".", "new", "(", "'GError **'", ")", "handle_g_error", "(", "error", ",", "pixbuf", ".", "save_to_buffer", "(", "buffer_pointer", ",", "buffer_size", ",", "ffi", ".", "new", "(", "'char[]'", ",", "b'png'", ")", ",", "error", ",", "ffi", ".", "new", "(", "'char[]'", ",", "b'compression'", ")", ",", "ffi", ".", "new", "(", "'char[]'", ",", "b'0'", ")", ",", "ffi", ".", "NULL", ")", ")", "png_bytes", "=", "ffi", ".", "buffer", "(", "buffer_pointer", "[", "0", "]", ",", "buffer_size", "[", "0", "]", ")", "return", "ImageSurface", ".", "create_from_png", "(", "BytesIO", "(", "png_bytes", ")", ")" ]
Convert from PixBuf to ImageSurface, by going through the PNG format. This method is 10~30x slower than GDK but always works.
[ "Convert", "from", "PixBuf", "to", "ImageSurface", "by", "going", "through", "the", "PNG", "format", "." ]
450853add7e32eea20985b6aa5f54d9cb3cd04fe
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/pixbuf.py#L190-L204
17,630
coderholic/pyradio
pyradio/player.py
probePlayer
def probePlayer(requested_player=''): """ Probes the multimedia players which are available on the host system.""" ret_player = None if logger.isEnabledFor(logging.INFO): logger.info("Probing available multimedia players...") implementedPlayers = Player.__subclasses__() if logger.isEnabledFor(logging.INFO): logger.info("Implemented players: " + ", ".join([player.PLAYER_CMD for player in implementedPlayers])) if requested_player: req = requested_player.split(',') for r_player in req: if r_player == 'vlc': r_player = 'cvlc' for player in implementedPlayers: if player.PLAYER_CMD == r_player: ret_player = check_player(player) if ret_player is not None: return ret_player if ret_player is None: if logger.isEnabledFor(logging.INFO): logger.info('Requested player "{}" not supported'.format(r_player)) else: for player in implementedPlayers: ret_player = check_player(player) if ret_player is not None: break return ret_player
python
def probePlayer(requested_player=''): """ Probes the multimedia players which are available on the host system.""" ret_player = None if logger.isEnabledFor(logging.INFO): logger.info("Probing available multimedia players...") implementedPlayers = Player.__subclasses__() if logger.isEnabledFor(logging.INFO): logger.info("Implemented players: " + ", ".join([player.PLAYER_CMD for player in implementedPlayers])) if requested_player: req = requested_player.split(',') for r_player in req: if r_player == 'vlc': r_player = 'cvlc' for player in implementedPlayers: if player.PLAYER_CMD == r_player: ret_player = check_player(player) if ret_player is not None: return ret_player if ret_player is None: if logger.isEnabledFor(logging.INFO): logger.info('Requested player "{}" not supported'.format(r_player)) else: for player in implementedPlayers: ret_player = check_player(player) if ret_player is not None: break return ret_player
[ "def", "probePlayer", "(", "requested_player", "=", "''", ")", ":", "ret_player", "=", "None", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "\"Probing available multimedia players...\"", ")", "implementedPlayers", "=", "Player", ".", "__subclasses__", "(", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "\"Implemented players: \"", "+", "\", \"", ".", "join", "(", "[", "player", ".", "PLAYER_CMD", "for", "player", "in", "implementedPlayers", "]", ")", ")", "if", "requested_player", ":", "req", "=", "requested_player", ".", "split", "(", "','", ")", "for", "r_player", "in", "req", ":", "if", "r_player", "==", "'vlc'", ":", "r_player", "=", "'cvlc'", "for", "player", "in", "implementedPlayers", ":", "if", "player", ".", "PLAYER_CMD", "==", "r_player", ":", "ret_player", "=", "check_player", "(", "player", ")", "if", "ret_player", "is", "not", "None", ":", "return", "ret_player", "if", "ret_player", "is", "None", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'Requested player \"{}\" not supported'", ".", "format", "(", "r_player", ")", ")", "else", ":", "for", "player", "in", "implementedPlayers", ":", "ret_player", "=", "check_player", "(", "player", ")", "if", "ret_player", "is", "not", "None", ":", "break", "return", "ret_player" ]
Probes the multimedia players which are available on the host system.
[ "Probes", "the", "multimedia", "players", "which", "are", "available", "on", "the", "host", "system", "." ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L763-L793
17,631
coderholic/pyradio
pyradio/player.py
Player.play
def play(self, name, streamUrl, encoding = ''): """ use a multimedia player to play a stream """ self.close() self.name = name self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''} self.muted = False self.show_volume = True self.title_prefix = '' self.playback_is_on = False self.outputStream.write('Station: "{}"'.format(name), self.status_update_lock) if logger.isEnabledFor(logging.INFO): logger.info('Selected Station: "{}"'.format(name)) if encoding: self._station_encoding = encoding else: self._station_encoding = 'utf-8' opts = [] isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls'] opts = self._buildStartOpts(streamUrl, isPlayList) self.process = subprocess.Popen(opts, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, )) t.start() # start playback check timer thread try: self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler) self.connection_timeout_thread.start() except: self.connection_timeout_thread = None if (logger.isEnabledFor(logging.ERROR)): logger.error("playback detection thread start failed") if logger.isEnabledFor(logging.INFO): logger.info("Player started")
python
def play(self, name, streamUrl, encoding = ''): """ use a multimedia player to play a stream """ self.close() self.name = name self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''} self.muted = False self.show_volume = True self.title_prefix = '' self.playback_is_on = False self.outputStream.write('Station: "{}"'.format(name), self.status_update_lock) if logger.isEnabledFor(logging.INFO): logger.info('Selected Station: "{}"'.format(name)) if encoding: self._station_encoding = encoding else: self._station_encoding = 'utf-8' opts = [] isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls'] opts = self._buildStartOpts(streamUrl, isPlayList) self.process = subprocess.Popen(opts, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, )) t.start() # start playback check timer thread try: self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler) self.connection_timeout_thread.start() except: self.connection_timeout_thread = None if (logger.isEnabledFor(logging.ERROR)): logger.error("playback detection thread start failed") if logger.isEnabledFor(logging.INFO): logger.info("Player started")
[ "def", "play", "(", "self", ",", "name", ",", "streamUrl", ",", "encoding", "=", "''", ")", ":", "self", ".", "close", "(", ")", "self", ".", "name", "=", "name", "self", ".", "oldUserInput", "=", "{", "'Input'", ":", "''", ",", "'Volume'", ":", "''", ",", "'Title'", ":", "''", "}", "self", ".", "muted", "=", "False", "self", ".", "show_volume", "=", "True", "self", ".", "title_prefix", "=", "''", "self", ".", "playback_is_on", "=", "False", "self", ".", "outputStream", ".", "write", "(", "'Station: \"{}\"'", ".", "format", "(", "name", ")", ",", "self", ".", "status_update_lock", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'Selected Station: \"{}\"'", ".", "format", "(", "name", ")", ")", "if", "encoding", ":", "self", ".", "_station_encoding", "=", "encoding", "else", ":", "self", ".", "_station_encoding", "=", "'utf-8'", "opts", "=", "[", "]", "isPlayList", "=", "streamUrl", ".", "split", "(", "\"?\"", ")", "[", "0", "]", "[", "-", "3", ":", "]", "in", "[", "'m3u'", ",", "'pls'", "]", "opts", "=", "self", ".", "_buildStartOpts", "(", "streamUrl", ",", "isPlayList", ")", "self", ".", "process", "=", "subprocess", ".", "Popen", "(", "opts", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "t", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "updateStatus", ",", "args", "=", "(", "self", ".", "status_update_lock", ",", ")", ")", "t", ".", "start", "(", ")", "# start playback check timer thread", "try", ":", "self", ".", "connection_timeout_thread", "=", "threading", ".", "Timer", "(", "self", ".", "playback_timeout", ",", "self", ".", "playback_timeout_handler", ")", "self", ".", "connection_timeout_thread", ".", "start", "(", ")", "except", ":", "self", ".", "connection_timeout_thread", "=", "None", "if", "(", "logger", ".", "isEnabledFor", "(", "logging", ".", "ERROR", ")", ")", ":", "logger", ".", "error", "(", "\"playback detection thread start failed\"", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "\"Player started\"", ")" ]
use a multimedia player to play a stream
[ "use", "a", "multimedia", "player", "to", "play", "a", "stream" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L285-L319
17,632
coderholic/pyradio
pyradio/player.py
Player._sendCommand
def _sendCommand(self, command): """ send keystroke command to player """ if(self.process is not None): try: if logger.isEnabledFor(logging.DEBUG): logger.debug("Command: {}".format(command).strip()) self.process.stdin.write(command.encode('utf-8', 'replace')) self.process.stdin.flush() except: msg = "Error when sending: {}" if logger.isEnabledFor(logging.ERROR): logger.error(msg.format(command).strip(), exc_info=True)
python
def _sendCommand(self, command): """ send keystroke command to player """ if(self.process is not None): try: if logger.isEnabledFor(logging.DEBUG): logger.debug("Command: {}".format(command).strip()) self.process.stdin.write(command.encode('utf-8', 'replace')) self.process.stdin.flush() except: msg = "Error when sending: {}" if logger.isEnabledFor(logging.ERROR): logger.error(msg.format(command).strip(), exc_info=True)
[ "def", "_sendCommand", "(", "self", ",", "command", ")", ":", "if", "(", "self", ".", "process", "is", "not", "None", ")", ":", "try", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "\"Command: {}\"", ".", "format", "(", "command", ")", ".", "strip", "(", ")", ")", "self", ".", "process", ".", "stdin", ".", "write", "(", "command", ".", "encode", "(", "'utf-8'", ",", "'replace'", ")", ")", "self", ".", "process", ".", "stdin", ".", "flush", "(", ")", "except", ":", "msg", "=", "\"Error when sending: {}\"", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "ERROR", ")", ":", "logger", ".", "error", "(", "msg", ".", "format", "(", "command", ")", ".", "strip", "(", ")", ",", "exc_info", "=", "True", ")" ]
send keystroke command to player
[ "send", "keystroke", "command", "to", "player" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L321-L333
17,633
coderholic/pyradio
pyradio/player.py
MpvPlayer._format_title_string
def _format_title_string(self, title_string): """ format mpv's title """ return self._title_string_format_text_tag(title_string.replace(self.icy_tokkens[0], self.icy_title_prefix))
python
def _format_title_string(self, title_string): """ format mpv's title """ return self._title_string_format_text_tag(title_string.replace(self.icy_tokkens[0], self.icy_title_prefix))
[ "def", "_format_title_string", "(", "self", ",", "title_string", ")", ":", "return", "self", ".", "_title_string_format_text_tag", "(", "title_string", ".", "replace", "(", "self", ".", "icy_tokkens", "[", "0", "]", ",", "self", ".", "icy_title_prefix", ")", ")" ]
format mpv's title
[ "format", "mpv", "s", "title" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L529-L531
17,634
coderholic/pyradio
pyradio/player.py
MpPlayer._format_title_string
def _format_title_string(self, title_string): """ format mplayer's title """ if "StreamTitle='" in title_string: tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix) ret_string = tmp[:tmp.find("';")] else: ret_string = title_string if '"artist":"' in ret_string: """ work on format: ICY Info: START_SONG='{"artist":"Clelia Cafiero","title":"M. Mussorgsky-Quadri di un'esposizione"}'; Fund on "ClassicaViva Web Radio: Classical" """ ret_string = self.icy_title_prefix + ret_string[ret_string.find('"artist":')+10:].replace('","title":"', ' - ').replace('"}\';', '') return self._title_string_format_text_tag(ret_string)
python
def _format_title_string(self, title_string): """ format mplayer's title """ if "StreamTitle='" in title_string: tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix) ret_string = tmp[:tmp.find("';")] else: ret_string = title_string if '"artist":"' in ret_string: """ work on format: ICY Info: START_SONG='{"artist":"Clelia Cafiero","title":"M. Mussorgsky-Quadri di un'esposizione"}'; Fund on "ClassicaViva Web Radio: Classical" """ ret_string = self.icy_title_prefix + ret_string[ret_string.find('"artist":')+10:].replace('","title":"', ' - ').replace('"}\';', '') return self._title_string_format_text_tag(ret_string)
[ "def", "_format_title_string", "(", "self", ",", "title_string", ")", ":", "if", "\"StreamTitle='\"", "in", "title_string", ":", "tmp", "=", "title_string", "[", "title_string", ".", "find", "(", "\"StreamTitle='\"", ")", ":", "]", ".", "replace", "(", "\"StreamTitle='\"", ",", "self", ".", "icy_title_prefix", ")", "ret_string", "=", "tmp", "[", ":", "tmp", ".", "find", "(", "\"';\"", ")", "]", "else", ":", "ret_string", "=", "title_string", "if", "'\"artist\":\"'", "in", "ret_string", ":", "\"\"\" work on format:\n ICY Info: START_SONG='{\"artist\":\"Clelia Cafiero\",\"title\":\"M. Mussorgsky-Quadri di un'esposizione\"}';\n Fund on \"ClassicaViva Web Radio: Classical\"\n \"\"\"", "ret_string", "=", "self", ".", "icy_title_prefix", "+", "ret_string", "[", "ret_string", ".", "find", "(", "'\"artist\":'", ")", "+", "10", ":", "]", ".", "replace", "(", "'\",\"title\":\"'", ",", "' - '", ")", ".", "replace", "(", "'\"}\\';'", ",", "''", ")", "return", "self", ".", "_title_string_format_text_tag", "(", "ret_string", ")" ]
format mplayer's title
[ "format", "mplayer", "s", "title" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L625-L638
17,635
coderholic/pyradio
pyradio/player.py
MpPlayer._format_volume_string
def _format_volume_string(self, volume_string): """ format mplayer's volume """ return '[' + volume_string[volume_string.find(self.volume_string):].replace(' %','%').replace('ume', '')+'] '
python
def _format_volume_string(self, volume_string): """ format mplayer's volume """ return '[' + volume_string[volume_string.find(self.volume_string):].replace(' %','%').replace('ume', '')+'] '
[ "def", "_format_volume_string", "(", "self", ",", "volume_string", ")", ":", "return", "'['", "+", "volume_string", "[", "volume_string", ".", "find", "(", "self", ".", "volume_string", ")", ":", "]", ".", "replace", "(", "' %'", ",", "'%'", ")", ".", "replace", "(", "'ume'", ",", "''", ")", "+", "'] '" ]
format mplayer's volume
[ "format", "mplayer", "s", "volume" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L640-L642
17,636
coderholic/pyradio
pyradio/player.py
VlcPlayer._format_volume_string
def _format_volume_string(self, volume_string): """ format vlc's volume """ self.actual_volume = int(volume_string.split(self.volume_string)[1].split(',')[0].split()[0]) return '[Vol: {}%] '.format(int(100 * self.actual_volume / self.max_volume))
python
def _format_volume_string(self, volume_string): """ format vlc's volume """ self.actual_volume = int(volume_string.split(self.volume_string)[1].split(',')[0].split()[0]) return '[Vol: {}%] '.format(int(100 * self.actual_volume / self.max_volume))
[ "def", "_format_volume_string", "(", "self", ",", "volume_string", ")", ":", "self", ".", "actual_volume", "=", "int", "(", "volume_string", ".", "split", "(", "self", ".", "volume_string", ")", "[", "1", "]", ".", "split", "(", "','", ")", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ")", "return", "'[Vol: {}%] '", ".", "format", "(", "int", "(", "100", "*", "self", ".", "actual_volume", "/", "self", ".", "max_volume", ")", ")" ]
format vlc's volume
[ "format", "vlc", "s", "volume" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L706-L709
17,637
coderholic/pyradio
pyradio/player.py
VlcPlayer._format_title_string
def _format_title_string(self, title_string): """ format vlc's title """ sp = title_string.split(self.icy_tokkens[0]) if sp[0] == title_string: ret_string = title_string else: ret_string = self.icy_title_prefix + sp[1] return self._title_string_format_text_tag(ret_string)
python
def _format_title_string(self, title_string): """ format vlc's title """ sp = title_string.split(self.icy_tokkens[0]) if sp[0] == title_string: ret_string = title_string else: ret_string = self.icy_title_prefix + sp[1] return self._title_string_format_text_tag(ret_string)
[ "def", "_format_title_string", "(", "self", ",", "title_string", ")", ":", "sp", "=", "title_string", ".", "split", "(", "self", ".", "icy_tokkens", "[", "0", "]", ")", "if", "sp", "[", "0", "]", "==", "title_string", ":", "ret_string", "=", "title_string", "else", ":", "ret_string", "=", "self", ".", "icy_title_prefix", "+", "sp", "[", "1", "]", "return", "self", ".", "_title_string_format_text_tag", "(", "ret_string", ")" ]
format vlc's title
[ "format", "vlc", "s", "title" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L711-L718
17,638
coderholic/pyradio
pyradio/player.py
VlcPlayer._is_accepted_input
def _is_accepted_input(self, input_string): """ vlc input filtering """ ret = False accept_filter = (self.volume_string, "http stream debug: ") reject_filter = () for n in accept_filter: if n in input_string: ret = True break if ret: for n in reject_filter: if n in input_string: ret = False break return ret
python
def _is_accepted_input(self, input_string): """ vlc input filtering """ ret = False accept_filter = (self.volume_string, "http stream debug: ") reject_filter = () for n in accept_filter: if n in input_string: ret = True break if ret: for n in reject_filter: if n in input_string: ret = False break return ret
[ "def", "_is_accepted_input", "(", "self", ",", "input_string", ")", ":", "ret", "=", "False", "accept_filter", "=", "(", "self", ".", "volume_string", ",", "\"http stream debug: \"", ")", "reject_filter", "=", "(", ")", "for", "n", "in", "accept_filter", ":", "if", "n", "in", "input_string", ":", "ret", "=", "True", "break", "if", "ret", ":", "for", "n", "in", "reject_filter", ":", "if", "n", "in", "input_string", ":", "ret", "=", "False", "break", "return", "ret" ]
vlc input filtering
[ "vlc", "input", "filtering" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L720-L734
17,639
coderholic/pyradio
pyradio/player.py
VlcPlayer._no_mute_on_stop_playback
def _no_mute_on_stop_playback(self): """ make sure vlc does not stop muted """ if self.ctrl_c_pressed: return if self.isPlaying(): if self.actual_volume == -1: self._get_volume() while self.actual_volume == -1: pass if self.actual_volume == 0: self.actual_volume = int(self.max_volume*0.25) self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume)) elif self.muted: if self.actual_volume > 0: self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume))) self.show_volume = True
python
def _no_mute_on_stop_playback(self): """ make sure vlc does not stop muted """ if self.ctrl_c_pressed: return if self.isPlaying(): if self.actual_volume == -1: self._get_volume() while self.actual_volume == -1: pass if self.actual_volume == 0: self.actual_volume = int(self.max_volume*0.25) self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume)) elif self.muted: if self.actual_volume > 0: self._sendCommand('volume {}\n'.format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume))) self.show_volume = True
[ "def", "_no_mute_on_stop_playback", "(", "self", ")", ":", "if", "self", ".", "ctrl_c_pressed", ":", "return", "if", "self", ".", "isPlaying", "(", ")", ":", "if", "self", ".", "actual_volume", "==", "-", "1", ":", "self", ".", "_get_volume", "(", ")", "while", "self", ".", "actual_volume", "==", "-", "1", ":", "pass", "if", "self", ".", "actual_volume", "==", "0", ":", "self", ".", "actual_volume", "=", "int", "(", "self", ".", "max_volume", "*", "0.25", ")", "self", ".", "_sendCommand", "(", "'volume {}\\n'", ".", "format", "(", "self", ".", "actual_volume", ")", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'Unmuting VLC on exit: {} (25%)'", ".", "format", "(", "self", ".", "actual_volume", ")", ")", "elif", "self", ".", "muted", ":", "if", "self", ".", "actual_volume", ">", "0", ":", "self", ".", "_sendCommand", "(", "'volume {}\\n'", ".", "format", "(", "self", ".", "actual_volume", ")", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'VLC volume restored on exit: {0} ({1}%)'", ".", "format", "(", "self", ".", "actual_volume", ",", "int", "(", "100", "*", "self", ".", "actual_volume", "/", "self", ".", "max_volume", ")", ")", ")", "self", ".", "show_volume", "=", "True" ]
make sure vlc does not stop muted
[ "make", "sure", "vlc", "does", "not", "stop", "muted" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L741-L761
17,640
coderholic/pyradio
pyradio/config.py
PyRadioStations._check_stations_csv
def _check_stations_csv(self, usr, root): ''' Reclocate a stations.csv copy in user home for easy manage. E.g. not need sudo when you add new station, etc ''' if path.exists(path.join(usr, 'stations.csv')): return else: copyfile(root, path.join(usr, 'stations.csv'))
python
def _check_stations_csv(self, usr, root): ''' Reclocate a stations.csv copy in user home for easy manage. E.g. not need sudo when you add new station, etc ''' if path.exists(path.join(usr, 'stations.csv')): return else: copyfile(root, path.join(usr, 'stations.csv'))
[ "def", "_check_stations_csv", "(", "self", ",", "usr", ",", "root", ")", ":", "if", "path", ".", "exists", "(", "path", ".", "join", "(", "usr", ",", "'stations.csv'", ")", ")", ":", "return", "else", ":", "copyfile", "(", "root", ",", "path", ".", "join", "(", "usr", ",", "'stations.csv'", ")", ")" ]
Reclocate a stations.csv copy in user home for easy manage. E.g. not need sudo when you add new station, etc
[ "Reclocate", "a", "stations", ".", "csv", "copy", "in", "user", "home", "for", "easy", "manage", ".", "E", ".", "g", ".", "not", "need", "sudo", "when", "you", "add", "new", "station", "etc" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L89-L96
17,641
coderholic/pyradio
pyradio/config.py
PyRadioStations._is_playlist_in_config_dir
def _is_playlist_in_config_dir(self): """ Check if a csv file is in the config dir """ if path.dirname(self.stations_file) == self.stations_dir: self.foreign_file = False self.foreign_filename_only_no_extension = '' else: self.foreign_file = True self.foreign_filename_only_no_extension = self.stations_filename_only_no_extension self.foreign_copy_asked = False
python
def _is_playlist_in_config_dir(self): """ Check if a csv file is in the config dir """ if path.dirname(self.stations_file) == self.stations_dir: self.foreign_file = False self.foreign_filename_only_no_extension = '' else: self.foreign_file = True self.foreign_filename_only_no_extension = self.stations_filename_only_no_extension self.foreign_copy_asked = False
[ "def", "_is_playlist_in_config_dir", "(", "self", ")", ":", "if", "path", ".", "dirname", "(", "self", ".", "stations_file", ")", "==", "self", ".", "stations_dir", ":", "self", ".", "foreign_file", "=", "False", "self", ".", "foreign_filename_only_no_extension", "=", "''", "else", ":", "self", ".", "foreign_file", "=", "True", "self", ".", "foreign_filename_only_no_extension", "=", "self", ".", "stations_filename_only_no_extension", "self", ".", "foreign_copy_asked", "=", "False" ]
Check if a csv file is in the config dir
[ "Check", "if", "a", "csv", "file", "is", "in", "the", "config", "dir" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L134-L142
17,642
coderholic/pyradio
pyradio/config.py
PyRadioStations._playlist_format_changed
def _playlist_format_changed(self): """ Check if we have new or old format and report if format has changed Format type can change by editing encoding, deleting a non-utf-8 station etc. """ new_format = False for n in self.stations: if n[2] != '': new_format = True break if self.new_format == new_format: return False else: return True
python
def _playlist_format_changed(self): """ Check if we have new or old format and report if format has changed Format type can change by editing encoding, deleting a non-utf-8 station etc. """ new_format = False for n in self.stations: if n[2] != '': new_format = True break if self.new_format == new_format: return False else: return True
[ "def", "_playlist_format_changed", "(", "self", ")", ":", "new_format", "=", "False", "for", "n", "in", "self", ".", "stations", ":", "if", "n", "[", "2", "]", "!=", "''", ":", "new_format", "=", "True", "break", "if", "self", ".", "new_format", "==", "new_format", ":", "return", "False", "else", ":", "return", "True" ]
Check if we have new or old format and report if format has changed Format type can change by editing encoding, deleting a non-utf-8 station etc.
[ "Check", "if", "we", "have", "new", "or", "old", "format", "and", "report", "if", "format", "has", "changed" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L241-L256
17,643
coderholic/pyradio
pyradio/config.py
PyRadioStations.save_playlist_file
def save_playlist_file(self, stationFile=''): """ Save a playlist Create a txt file and write stations in it. Then rename it to final target return 0: All ok -1: Error writing file -2: Error renaming file """ if self._playlist_format_changed(): self.dirty_playlist = True self.new_format = not self.new_format if stationFile: st_file = stationFile else: st_file = self.stations_file if not self.dirty_playlist: if logger.isEnabledFor(logging.DEBUG): logger.debug('Playlist not modified...') return 0 st_new_file = st_file.replace('.csv', '.txt') tmp_stations = self.stations[:] tmp_stations.reverse() if self.new_format: tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '', '' ]) else: tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '' ]) tmp_stations.reverse() try: with open(st_new_file, 'w') as cfgfile: writter = csv.writer(cfgfile) for a_station in tmp_stations: writter.writerow(self._format_playlist_row(a_station)) except: if logger.isEnabledFor(logging.DEBUG): logger.debug('Cannot open playlist file for writing,,,') return -1 try: move(st_new_file, st_file) except: if logger.isEnabledFor(logging.DEBUG): logger.debug('Cannot rename playlist file...') return -2 self.dirty_playlist = False return 0
python
def save_playlist_file(self, stationFile=''): """ Save a playlist Create a txt file and write stations in it. Then rename it to final target return 0: All ok -1: Error writing file -2: Error renaming file """ if self._playlist_format_changed(): self.dirty_playlist = True self.new_format = not self.new_format if stationFile: st_file = stationFile else: st_file = self.stations_file if not self.dirty_playlist: if logger.isEnabledFor(logging.DEBUG): logger.debug('Playlist not modified...') return 0 st_new_file = st_file.replace('.csv', '.txt') tmp_stations = self.stations[:] tmp_stations.reverse() if self.new_format: tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '', '' ]) else: tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '' ]) tmp_stations.reverse() try: with open(st_new_file, 'w') as cfgfile: writter = csv.writer(cfgfile) for a_station in tmp_stations: writter.writerow(self._format_playlist_row(a_station)) except: if logger.isEnabledFor(logging.DEBUG): logger.debug('Cannot open playlist file for writing,,,') return -1 try: move(st_new_file, st_file) except: if logger.isEnabledFor(logging.DEBUG): logger.debug('Cannot rename playlist file...') return -2 self.dirty_playlist = False return 0
[ "def", "save_playlist_file", "(", "self", ",", "stationFile", "=", "''", ")", ":", "if", "self", ".", "_playlist_format_changed", "(", ")", ":", "self", ".", "dirty_playlist", "=", "True", "self", ".", "new_format", "=", "not", "self", ".", "new_format", "if", "stationFile", ":", "st_file", "=", "stationFile", "else", ":", "st_file", "=", "self", ".", "stations_file", "if", "not", "self", ".", "dirty_playlist", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'Playlist not modified...'", ")", "return", "0", "st_new_file", "=", "st_file", ".", "replace", "(", "'.csv'", ",", "'.txt'", ")", "tmp_stations", "=", "self", ".", "stations", "[", ":", "]", "tmp_stations", ".", "reverse", "(", ")", "if", "self", ".", "new_format", ":", "tmp_stations", ".", "append", "(", "[", "'# Find lots more stations at http://www.iheart.com'", ",", "''", ",", "''", "]", ")", "else", ":", "tmp_stations", ".", "append", "(", "[", "'# Find lots more stations at http://www.iheart.com'", ",", "''", "]", ")", "tmp_stations", ".", "reverse", "(", ")", "try", ":", "with", "open", "(", "st_new_file", ",", "'w'", ")", "as", "cfgfile", ":", "writter", "=", "csv", ".", "writer", "(", "cfgfile", ")", "for", "a_station", "in", "tmp_stations", ":", "writter", ".", "writerow", "(", "self", ".", "_format_playlist_row", "(", "a_station", ")", ")", "except", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'Cannot open playlist file for writing,,,'", ")", "return", "-", "1", "try", ":", "move", "(", "st_new_file", ",", "st_file", ")", "except", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'Cannot rename playlist file...'", ")", "return", "-", "2", "self", ".", "dirty_playlist", "=", "False", "return", "0" ]
Save a playlist Create a txt file and write stations in it. Then rename it to final target return 0: All ok -1: Error writing file -2: Error renaming file
[ "Save", "a", "playlist", "Create", "a", "txt", "file", "and", "write", "stations", "in", "it", ".", "Then", "rename", "it", "to", "final", "target" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L258-L306
17,644
coderholic/pyradio
pyradio/config.py
PyRadioStations._bytes_to_human
def _bytes_to_human(self, B): ''' Return the given bytes as a human friendly KB, MB, GB, or TB string ''' KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return '{0} B'.format(B) B = float(B) if KB <= B < MB: return '{0:.2f} KB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GB'.format(B/GB) elif TB <= B: return '{0:.2f} TB'.format(B/TB)
python
def _bytes_to_human(self, B): ''' Return the given bytes as a human friendly KB, MB, GB, or TB string ''' KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return '{0} B'.format(B) B = float(B) if KB <= B < MB: return '{0:.2f} KB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GB'.format(B/GB) elif TB <= B: return '{0:.2f} TB'.format(B/TB)
[ "def", "_bytes_to_human", "(", "self", ",", "B", ")", ":", "KB", "=", "float", "(", "1024", ")", "MB", "=", "float", "(", "KB", "**", "2", ")", "# 1,048,576", "GB", "=", "float", "(", "KB", "**", "3", ")", "# 1,073,741,824", "TB", "=", "float", "(", "KB", "**", "4", ")", "# 1,099,511,627,776", "if", "B", "<", "KB", ":", "return", "'{0} B'", ".", "format", "(", "B", ")", "B", "=", "float", "(", "B", ")", "if", "KB", "<=", "B", "<", "MB", ":", "return", "'{0:.2f} KB'", ".", "format", "(", "B", "/", "KB", ")", "elif", "MB", "<=", "B", "<", "GB", ":", "return", "'{0:.2f} MB'", ".", "format", "(", "B", "/", "MB", ")", "elif", "GB", "<=", "B", "<", "TB", ":", "return", "'{0:.2f} GB'", ".", "format", "(", "B", "/", "GB", ")", "elif", "TB", "<=", "B", ":", "return", "'{0:.2f} TB'", ".", "format", "(", "B", "/", "TB", ")" ]
Return the given bytes as a human friendly KB, MB, GB, or TB string
[ "Return", "the", "given", "bytes", "as", "a", "human", "friendly", "KB", "MB", "GB", "or", "TB", "string" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L321-L338
17,645
coderholic/pyradio
pyradio/config.py
PyRadioStations.append_station
def append_station(self, params, stationFile=''): """ Append a station to csv file return 0: All ok -2 - playlist not found -3 - negative number specified -4 - number not found -5: Error writing file -6: Error renaming file """ if self.new_format: if stationFile: st_file = stationFile else: st_file = self.stations_file st_file, ret = self._get_playlist_abspath_from_data(st_file) if ret < -1: return ret try: with open(st_file, 'a') as cfgfile: writter = csv.writer(cfgfile) writter.writerow(params) return 0 except: return -5 else: self.stations.append([ params[0], params[1], params[2] ]) self.dirty_playlist = True st_file, ret = self._get_playlist_abspath_from_data(stationFile) if ret < -1: return ret ret = self.save_playlist_file(st_file) if ret < 0: ret -= 4 return ret
python
def append_station(self, params, stationFile=''): """ Append a station to csv file return 0: All ok -2 - playlist not found -3 - negative number specified -4 - number not found -5: Error writing file -6: Error renaming file """ if self.new_format: if stationFile: st_file = stationFile else: st_file = self.stations_file st_file, ret = self._get_playlist_abspath_from_data(st_file) if ret < -1: return ret try: with open(st_file, 'a') as cfgfile: writter = csv.writer(cfgfile) writter.writerow(params) return 0 except: return -5 else: self.stations.append([ params[0], params[1], params[2] ]) self.dirty_playlist = True st_file, ret = self._get_playlist_abspath_from_data(stationFile) if ret < -1: return ret ret = self.save_playlist_file(st_file) if ret < 0: ret -= 4 return ret
[ "def", "append_station", "(", "self", ",", "params", ",", "stationFile", "=", "''", ")", ":", "if", "self", ".", "new_format", ":", "if", "stationFile", ":", "st_file", "=", "stationFile", "else", ":", "st_file", "=", "self", ".", "stations_file", "st_file", ",", "ret", "=", "self", ".", "_get_playlist_abspath_from_data", "(", "st_file", ")", "if", "ret", "<", "-", "1", ":", "return", "ret", "try", ":", "with", "open", "(", "st_file", ",", "'a'", ")", "as", "cfgfile", ":", "writter", "=", "csv", ".", "writer", "(", "cfgfile", ")", "writter", ".", "writerow", "(", "params", ")", "return", "0", "except", ":", "return", "-", "5", "else", ":", "self", ".", "stations", ".", "append", "(", "[", "params", "[", "0", "]", ",", "params", "[", "1", "]", ",", "params", "[", "2", "]", "]", ")", "self", ".", "dirty_playlist", "=", "True", "st_file", ",", "ret", "=", "self", ".", "_get_playlist_abspath_from_data", "(", "stationFile", ")", "if", "ret", "<", "-", "1", ":", "return", "ret", "ret", "=", "self", ".", "save_playlist_file", "(", "st_file", ")", "if", "ret", "<", "0", ":", "ret", "-=", "4", "return", "ret" ]
Append a station to csv file return 0: All ok -2 - playlist not found -3 - negative number specified -4 - number not found -5: Error writing file -6: Error renaming file
[ "Append", "a", "station", "to", "csv", "file" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L340-L375
17,646
coderholic/pyradio
pyradio/config.py
PyRadioConfig._check_config_file
def _check_config_file(self, usr): ''' Make sure a config file exists in the config dir ''' package_config_file = path.join(path.dirname(__file__), 'config') user_config_file = path.join(usr, 'config') ''' restore config from bck file ''' if path.exists(user_config_file + '.restore'): try: copyfile(user_config_file + '.restore', user_config_file) remove(self.user_config_file + '.restore') except: pass ''' Copy package config into user dir ''' if not path.exists(user_config_file): copyfile(package_config_file, user_config_file)
python
def _check_config_file(self, usr): ''' Make sure a config file exists in the config dir ''' package_config_file = path.join(path.dirname(__file__), 'config') user_config_file = path.join(usr, 'config') ''' restore config from bck file ''' if path.exists(user_config_file + '.restore'): try: copyfile(user_config_file + '.restore', user_config_file) remove(self.user_config_file + '.restore') except: pass ''' Copy package config into user dir ''' if not path.exists(user_config_file): copyfile(package_config_file, user_config_file)
[ "def", "_check_config_file", "(", "self", ",", "usr", ")", ":", "package_config_file", "=", "path", ".", "join", "(", "path", ".", "dirname", "(", "__file__", ")", ",", "'config'", ")", "user_config_file", "=", "path", ".", "join", "(", "usr", ",", "'config'", ")", "''' restore config from bck file '''", "if", "path", ".", "exists", "(", "user_config_file", "+", "'.restore'", ")", ":", "try", ":", "copyfile", "(", "user_config_file", "+", "'.restore'", ",", "user_config_file", ")", "remove", "(", "self", ".", "user_config_file", "+", "'.restore'", ")", "except", ":", "pass", "''' Copy package config into user dir '''", "if", "not", "path", ".", "exists", "(", "user_config_file", ")", ":", "copyfile", "(", "package_config_file", ",", "user_config_file", ")" ]
Make sure a config file exists in the config dir
[ "Make", "sure", "a", "config", "file", "exists", "in", "the", "config", "dir" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L566-L581
17,647
coderholic/pyradio
pyradio/config.py
PyRadioConfig.save_config
def save_config(self): """ Save config file Creates config.restore (back up file) Returns: -1: Error saving config 0: Config saved successfully 1: Config not saved (not modified""" if not self.opts['dirty_config'][1]: if logger.isEnabledFor(logging.INFO): logger.info('Config not saved (not modified)') return 1 txt ='''# PyRadio Configuration File # Player selection # This is the equivalent to the -u , --use-player command line parameter # Specify the player to use with PyRadio, or the player detection order # Example: # player = vlc # or # player = vlc,mpv, mplayer # Default value: mpv,mplayer,vlc player = {0} # Default playlist # This is the playlist to open if none is specified # You can scecify full path to CSV file, or if the playlist is in the # config directory, playlist name (filename without extension) or # playlist number (as reported by -ls command line option) # Default value: stations default_playlist = {1} # Default station # This is the equivalent to the -p , --play command line parameter # The station number within the default playlist to play # Value is 1..number of stations, "-1" or "False" means no auto play # "0" or "Random" means play a random station # Default value: False default_station = {2} # Default encoding # This is the encoding used by default when reading data provided by # a station (such as song title, etc.) If reading said data ends up # in an error, 'utf-8' will be used instead. # # A valid encoding list can be found at: # https://docs.python.org/2.7/library/codecs.html#standard-encodings # replacing 2.7 with specific version: # 3.0 up to current python version. # # Default value: utf-8 default_encoding = {3} # Connection timeout # PyRadio will wait for this number of seconds to get a station/server # message indicating that playback has actually started. # If this does not happen (within this number of seconds after the # connection is initiated), PyRadio will consider the station # unreachable, and display the "Failed to connect to: [station]" # message. # # Valid values: 5 - 60 # Default value: 10 connection_timeout = {4} # Default theme # Hardcooded themes: # dark (default) (8 colors) # light (8 colors) # dark_16_colors (16 colors dark theme alternative) # light_16_colors (16 colors light theme alternative) # black_on_white (bow) (256 colors) # white_on_black (wob) (256 colors) # Default value = 'dark' theme = {5} # Transparency setting # If False, theme colors will be used. # If True and a compositor is running, the stations' window # background will be transparent. If True and a compositor is # not running, the terminal's background color will be used. # Valid values: True, true, False, false # Default value: False use_transparency = {6} # Playlist management # # Specify whether you will be asked to confirm # every station deletion action # Valid values: True, true, False, false # Default value: True confirm_station_deletion = {7} # Specify whether you will be asked to confirm # playlist reloading, when the playlist has not # been modified within Pyradio # Valid values: True, true, False, false # Default value: True confirm_playlist_reload = {8} # Specify whether you will be asked to save a # modified playlist whenever it needs saving # Valid values: True, true, False, false # Default value: False auto_save_playlist = {9} ''' copyfile(self.config_file, self.config_file + '.restore') if self.opts['default_station'][1] is None: self.opts['default_station'][1] = '-1' try: with open(self.config_file, 'w') as cfgfile: cfgfile.write(txt.format(self.opts['player'][1], self.opts['default_playlist'][1], self.opts['default_station'][1], self.opts['default_encoding'][1], self.opts['connection_timeout'][1], self.opts['theme'][1], self.opts['use_transparency'][1], self.opts['confirm_station_deletion'][1], self.opts['confirm_playlist_reload'][1], self.opts['auto_save_playlist'][1])) except: if logger.isEnabledFor(logging.ERROR): logger.error('Error saving config') return -1 try: remove(self.config_file + '.restore') except: pass if logger.isEnabledFor(logging.INFO): logger.info('Config saved') self.opts['dirty_config'][1] = False return 0
python
def save_config(self): """ Save config file Creates config.restore (back up file) Returns: -1: Error saving config 0: Config saved successfully 1: Config not saved (not modified""" if not self.opts['dirty_config'][1]: if logger.isEnabledFor(logging.INFO): logger.info('Config not saved (not modified)') return 1 txt ='''# PyRadio Configuration File # Player selection # This is the equivalent to the -u , --use-player command line parameter # Specify the player to use with PyRadio, or the player detection order # Example: # player = vlc # or # player = vlc,mpv, mplayer # Default value: mpv,mplayer,vlc player = {0} # Default playlist # This is the playlist to open if none is specified # You can scecify full path to CSV file, or if the playlist is in the # config directory, playlist name (filename without extension) or # playlist number (as reported by -ls command line option) # Default value: stations default_playlist = {1} # Default station # This is the equivalent to the -p , --play command line parameter # The station number within the default playlist to play # Value is 1..number of stations, "-1" or "False" means no auto play # "0" or "Random" means play a random station # Default value: False default_station = {2} # Default encoding # This is the encoding used by default when reading data provided by # a station (such as song title, etc.) If reading said data ends up # in an error, 'utf-8' will be used instead. # # A valid encoding list can be found at: # https://docs.python.org/2.7/library/codecs.html#standard-encodings # replacing 2.7 with specific version: # 3.0 up to current python version. # # Default value: utf-8 default_encoding = {3} # Connection timeout # PyRadio will wait for this number of seconds to get a station/server # message indicating that playback has actually started. # If this does not happen (within this number of seconds after the # connection is initiated), PyRadio will consider the station # unreachable, and display the "Failed to connect to: [station]" # message. # # Valid values: 5 - 60 # Default value: 10 connection_timeout = {4} # Default theme # Hardcooded themes: # dark (default) (8 colors) # light (8 colors) # dark_16_colors (16 colors dark theme alternative) # light_16_colors (16 colors light theme alternative) # black_on_white (bow) (256 colors) # white_on_black (wob) (256 colors) # Default value = 'dark' theme = {5} # Transparency setting # If False, theme colors will be used. # If True and a compositor is running, the stations' window # background will be transparent. If True and a compositor is # not running, the terminal's background color will be used. # Valid values: True, true, False, false # Default value: False use_transparency = {6} # Playlist management # # Specify whether you will be asked to confirm # every station deletion action # Valid values: True, true, False, false # Default value: True confirm_station_deletion = {7} # Specify whether you will be asked to confirm # playlist reloading, when the playlist has not # been modified within Pyradio # Valid values: True, true, False, false # Default value: True confirm_playlist_reload = {8} # Specify whether you will be asked to save a # modified playlist whenever it needs saving # Valid values: True, true, False, false # Default value: False auto_save_playlist = {9} ''' copyfile(self.config_file, self.config_file + '.restore') if self.opts['default_station'][1] is None: self.opts['default_station'][1] = '-1' try: with open(self.config_file, 'w') as cfgfile: cfgfile.write(txt.format(self.opts['player'][1], self.opts['default_playlist'][1], self.opts['default_station'][1], self.opts['default_encoding'][1], self.opts['connection_timeout'][1], self.opts['theme'][1], self.opts['use_transparency'][1], self.opts['confirm_station_deletion'][1], self.opts['confirm_playlist_reload'][1], self.opts['auto_save_playlist'][1])) except: if logger.isEnabledFor(logging.ERROR): logger.error('Error saving config') return -1 try: remove(self.config_file + '.restore') except: pass if logger.isEnabledFor(logging.INFO): logger.info('Config saved') self.opts['dirty_config'][1] = False return 0
[ "def", "save_config", "(", "self", ")", ":", "if", "not", "self", ".", "opts", "[", "'dirty_config'", "]", "[", "1", "]", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'Config not saved (not modified)'", ")", "return", "1", "txt", "=", "'''# PyRadio Configuration File\n\n# Player selection\n# This is the equivalent to the -u , --use-player command line parameter\n# Specify the player to use with PyRadio, or the player detection order\n# Example:\n# player = vlc\n# or\n# player = vlc,mpv, mplayer\n# Default value: mpv,mplayer,vlc\nplayer = {0}\n\n# Default playlist\n# This is the playlist to open if none is specified\n# You can scecify full path to CSV file, or if the playlist is in the\n# config directory, playlist name (filename without extension) or\n# playlist number (as reported by -ls command line option)\n# Default value: stations\ndefault_playlist = {1}\n\n# Default station\n# This is the equivalent to the -p , --play command line parameter\n# The station number within the default playlist to play\n# Value is 1..number of stations, \"-1\" or \"False\" means no auto play\n# \"0\" or \"Random\" means play a random station\n# Default value: False\ndefault_station = {2}\n\n# Default encoding\n# This is the encoding used by default when reading data provided by\n# a station (such as song title, etc.) If reading said data ends up\n# in an error, 'utf-8' will be used instead.\n#\n# A valid encoding list can be found at:\n# https://docs.python.org/2.7/library/codecs.html#standard-encodings\n# replacing 2.7 with specific version:\n# 3.0 up to current python version.\n#\n# Default value: utf-8\ndefault_encoding = {3}\n\n# Connection timeout\n# PyRadio will wait for this number of seconds to get a station/server\n# message indicating that playback has actually started.\n# If this does not happen (within this number of seconds after the\n# connection is initiated), PyRadio will consider the station\n# unreachable, and display the \"Failed to connect to: [station]\"\n# message.\n#\n# Valid values: 5 - 60\n# Default value: 10\nconnection_timeout = {4}\n\n# Default theme\n# Hardcooded themes:\n# dark (default) (8 colors)\n# light (8 colors)\n# dark_16_colors (16 colors dark theme alternative)\n# light_16_colors (16 colors light theme alternative)\n# black_on_white (bow) (256 colors)\n# white_on_black (wob) (256 colors)\n# Default value = 'dark'\ntheme = {5}\n\n# Transparency setting\n# If False, theme colors will be used.\n# If True and a compositor is running, the stations' window\n# background will be transparent. If True and a compositor is\n# not running, the terminal's background color will be used.\n# Valid values: True, true, False, false\n# Default value: False\nuse_transparency = {6}\n\n\n# Playlist management\n#\n# Specify whether you will be asked to confirm\n# every station deletion action\n# Valid values: True, true, False, false\n# Default value: True\nconfirm_station_deletion = {7}\n\n# Specify whether you will be asked to confirm\n# playlist reloading, when the playlist has not\n# been modified within Pyradio\n# Valid values: True, true, False, false\n# Default value: True\nconfirm_playlist_reload = {8}\n\n# Specify whether you will be asked to save a\n# modified playlist whenever it needs saving\n# Valid values: True, true, False, false\n# Default value: False\nauto_save_playlist = {9}\n\n'''", "copyfile", "(", "self", ".", "config_file", ",", "self", ".", "config_file", "+", "'.restore'", ")", "if", "self", ".", "opts", "[", "'default_station'", "]", "[", "1", "]", "is", "None", ":", "self", ".", "opts", "[", "'default_station'", "]", "[", "1", "]", "=", "'-1'", "try", ":", "with", "open", "(", "self", ".", "config_file", ",", "'w'", ")", "as", "cfgfile", ":", "cfgfile", ".", "write", "(", "txt", ".", "format", "(", "self", ".", "opts", "[", "'player'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'default_playlist'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'default_station'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'default_encoding'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'connection_timeout'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'theme'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'use_transparency'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'confirm_station_deletion'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'confirm_playlist_reload'", "]", "[", "1", "]", ",", "self", ".", "opts", "[", "'auto_save_playlist'", "]", "[", "1", "]", ")", ")", "except", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "ERROR", ")", ":", "logger", ".", "error", "(", "'Error saving config'", ")", "return", "-", "1", "try", ":", "remove", "(", "self", ".", "config_file", "+", "'.restore'", ")", "except", ":", "pass", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'Config saved'", ")", "self", ".", "opts", "[", "'dirty_config'", "]", "[", "1", "]", "=", "False", "return", "0" ]
Save config file Creates config.restore (back up file) Returns: -1: Error saving config 0: Config saved successfully 1: Config not saved (not modified
[ "Save", "config", "file" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L639-L773
17,648
coderholic/pyradio
pyradio/radio.py
PyRadio.ctrl_c_handler
def ctrl_c_handler(self, signum, frame): self.ctrl_c_pressed = True if self._cnf.dirty_playlist: """ Try to auto save playlist on exit Do not check result!!! """ self.saveCurrentPlaylist() """ Try to auto save config on exit Do not check result!!! """ self._cnf.save_config()
python
def ctrl_c_handler(self, signum, frame): self.ctrl_c_pressed = True if self._cnf.dirty_playlist: """ Try to auto save playlist on exit Do not check result!!! """ self.saveCurrentPlaylist() """ Try to auto save config on exit Do not check result!!! """ self._cnf.save_config()
[ "def", "ctrl_c_handler", "(", "self", ",", "signum", ",", "frame", ")", ":", "self", ".", "ctrl_c_pressed", "=", "True", "if", "self", ".", "_cnf", ".", "dirty_playlist", ":", "\"\"\" Try to auto save playlist on exit\n Do not check result!!! \"\"\"", "self", ".", "saveCurrentPlaylist", "(", ")", "self", ".", "_cnf", ".", "save_config", "(", ")" ]
Try to auto save config on exit Do not check result!!!
[ "Try", "to", "auto", "save", "config", "on", "exit", "Do", "not", "check", "result!!!" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L432-L440
17,649
coderholic/pyradio
pyradio/radio.py
PyRadio._goto_playing_station
def _goto_playing_station(self, changing_playlist=False): """ make sure playing station is visible """ if (self.player.isPlaying() or self.operation_mode == PLAYLIST_MODE) and \ (self.selection != self.playing or changing_playlist): if changing_playlist: self.startPos = 0 max_lines = self.bodyMaxY - 2 if logger.isEnabledFor(logging.INFO): logger.info('max_lines = {0}, self.playing = {1}'.format(max_lines, self.playing)) if self.number_of_items < max_lines: self.startPos = 0 elif self.playing < self.startPos or \ self.playing >= self.startPos + max_lines: if logger.isEnabledFor(logging.INFO): logger.info('=== _goto:adjusting startPos') if self.playing < max_lines: self.startPos = 0 if self.playing - int(max_lines/2) > 0: self.startPos = self.playing - int(max_lines/2) elif self.playing > self.number_of_items - max_lines: self.startPos = self.number_of_items - max_lines else: self.startPos = int(self.playing+1/max_lines) - int(max_lines/2) if logger.isEnabledFor(logging.INFO): logger.info('===== _goto:startPos = {0}, changing_playlist = {1}'.format(self.startPos, changing_playlist)) self.selection = self.playing self.refreshBody()
python
def _goto_playing_station(self, changing_playlist=False): """ make sure playing station is visible """ if (self.player.isPlaying() or self.operation_mode == PLAYLIST_MODE) and \ (self.selection != self.playing or changing_playlist): if changing_playlist: self.startPos = 0 max_lines = self.bodyMaxY - 2 if logger.isEnabledFor(logging.INFO): logger.info('max_lines = {0}, self.playing = {1}'.format(max_lines, self.playing)) if self.number_of_items < max_lines: self.startPos = 0 elif self.playing < self.startPos or \ self.playing >= self.startPos + max_lines: if logger.isEnabledFor(logging.INFO): logger.info('=== _goto:adjusting startPos') if self.playing < max_lines: self.startPos = 0 if self.playing - int(max_lines/2) > 0: self.startPos = self.playing - int(max_lines/2) elif self.playing > self.number_of_items - max_lines: self.startPos = self.number_of_items - max_lines else: self.startPos = int(self.playing+1/max_lines) - int(max_lines/2) if logger.isEnabledFor(logging.INFO): logger.info('===== _goto:startPos = {0}, changing_playlist = {1}'.format(self.startPos, changing_playlist)) self.selection = self.playing self.refreshBody()
[ "def", "_goto_playing_station", "(", "self", ",", "changing_playlist", "=", "False", ")", ":", "if", "(", "self", ".", "player", ".", "isPlaying", "(", ")", "or", "self", ".", "operation_mode", "==", "PLAYLIST_MODE", ")", "and", "(", "self", ".", "selection", "!=", "self", ".", "playing", "or", "changing_playlist", ")", ":", "if", "changing_playlist", ":", "self", ".", "startPos", "=", "0", "max_lines", "=", "self", ".", "bodyMaxY", "-", "2", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'max_lines = {0}, self.playing = {1}'", ".", "format", "(", "max_lines", ",", "self", ".", "playing", ")", ")", "if", "self", ".", "number_of_items", "<", "max_lines", ":", "self", ".", "startPos", "=", "0", "elif", "self", ".", "playing", "<", "self", ".", "startPos", "or", "self", ".", "playing", ">=", "self", ".", "startPos", "+", "max_lines", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'=== _goto:adjusting startPos'", ")", "if", "self", ".", "playing", "<", "max_lines", ":", "self", ".", "startPos", "=", "0", "if", "self", ".", "playing", "-", "int", "(", "max_lines", "/", "2", ")", ">", "0", ":", "self", ".", "startPos", "=", "self", ".", "playing", "-", "int", "(", "max_lines", "/", "2", ")", "elif", "self", ".", "playing", ">", "self", ".", "number_of_items", "-", "max_lines", ":", "self", ".", "startPos", "=", "self", ".", "number_of_items", "-", "max_lines", "else", ":", "self", ".", "startPos", "=", "int", "(", "self", ".", "playing", "+", "1", "/", "max_lines", ")", "-", "int", "(", "max_lines", "/", "2", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'===== _goto:startPos = {0}, changing_playlist = {1}'", ".", "format", "(", "self", ".", "startPos", ",", "changing_playlist", ")", ")", "self", ".", "selection", "=", "self", ".", "playing", "self", ".", "refreshBody", "(", ")" ]
make sure playing station is visible
[ "make", "sure", "playing", "station", "is", "visible" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L442-L468
17,650
coderholic/pyradio
pyradio/radio.py
PyRadio.setStation
def setStation(self, number): """ Select the given station number """ # If we press up at the first station, we go to the last one # and if we press down on the last one we go back to the first one. if number < 0: number = len(self.stations) - 1 elif number >= len(self.stations): number = 0 self.selection = number maxDisplayedItems = self.bodyMaxY - 2 if self.selection - self.startPos >= maxDisplayedItems: self.startPos = self.selection - maxDisplayedItems + 1 elif self.selection < self.startPos: self.startPos = self.selection
python
def setStation(self, number): """ Select the given station number """ # If we press up at the first station, we go to the last one # and if we press down on the last one we go back to the first one. if number < 0: number = len(self.stations) - 1 elif number >= len(self.stations): number = 0 self.selection = number maxDisplayedItems = self.bodyMaxY - 2 if self.selection - self.startPos >= maxDisplayedItems: self.startPos = self.selection - maxDisplayedItems + 1 elif self.selection < self.startPos: self.startPos = self.selection
[ "def", "setStation", "(", "self", ",", "number", ")", ":", "# If we press up at the first station, we go to the last one", "# and if we press down on the last one we go back to the first one.", "if", "number", "<", "0", ":", "number", "=", "len", "(", "self", ".", "stations", ")", "-", "1", "elif", "number", ">=", "len", "(", "self", ".", "stations", ")", ":", "number", "=", "0", "self", ".", "selection", "=", "number", "maxDisplayedItems", "=", "self", ".", "bodyMaxY", "-", "2", "if", "self", ".", "selection", "-", "self", ".", "startPos", ">=", "maxDisplayedItems", ":", "self", ".", "startPos", "=", "self", ".", "selection", "-", "maxDisplayedItems", "+", "1", "elif", "self", ".", "selection", "<", "self", ".", "startPos", ":", "self", ".", "startPos", "=", "self", ".", "selection" ]
Select the given station number
[ "Select", "the", "given", "station", "number" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L489-L504
17,651
coderholic/pyradio
pyradio/radio.py
PyRadio._format_playlist_line
def _format_playlist_line(self, lineNum, pad, station): """ format playlist line so that if fills self.maxX """ line = "{0}. {1}".format(str(lineNum + self.startPos + 1).rjust(pad), station[0]) f_data = ' [{0}, {1}]'.format(station[2], station[1]) if version_info < (3, 0): if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2: """ this is too long, try to shorten it by removing file size """ f_data = ' [{0}]'.format(station[1]) if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2: """ still too long. start removing chars """ while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3: f_data = f_data[:-1] f_data += ']' """ if too short, pad f_data to the right """ if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2: while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2: line += ' ' else: if len(line) + len(f_data) > self.bodyMaxX -2: """ this is too long, try to shorten it by removing file size """ f_data = ' [{0}]'.format(station[1]) if len(line) + len(f_data) > self.bodyMaxX - 2: """ still too long. start removing chars """ while len(line) + len(f_data) > self.bodyMaxX - 3: f_data = f_data[:-1] f_data += ']' """ if too short, pad f_data to the right """ if len(line) + len(f_data) < self.maxX - 2: while len(line) + len(f_data) < self.maxX - 2: line += ' ' line += f_data return line
python
def _format_playlist_line(self, lineNum, pad, station): """ format playlist line so that if fills self.maxX """ line = "{0}. {1}".format(str(lineNum + self.startPos + 1).rjust(pad), station[0]) f_data = ' [{0}, {1}]'.format(station[2], station[1]) if version_info < (3, 0): if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2: """ this is too long, try to shorten it by removing file size """ f_data = ' [{0}]'.format(station[1]) if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2: """ still too long. start removing chars """ while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3: f_data = f_data[:-1] f_data += ']' """ if too short, pad f_data to the right """ if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2: while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2: line += ' ' else: if len(line) + len(f_data) > self.bodyMaxX -2: """ this is too long, try to shorten it by removing file size """ f_data = ' [{0}]'.format(station[1]) if len(line) + len(f_data) > self.bodyMaxX - 2: """ still too long. start removing chars """ while len(line) + len(f_data) > self.bodyMaxX - 3: f_data = f_data[:-1] f_data += ']' """ if too short, pad f_data to the right """ if len(line) + len(f_data) < self.maxX - 2: while len(line) + len(f_data) < self.maxX - 2: line += ' ' line += f_data return line
[ "def", "_format_playlist_line", "(", "self", ",", "lineNum", ",", "pad", ",", "station", ")", ":", "line", "=", "\"{0}. {1}\"", ".", "format", "(", "str", "(", "lineNum", "+", "self", ".", "startPos", "+", "1", ")", ".", "rjust", "(", "pad", ")", ",", "station", "[", "0", "]", ")", "f_data", "=", "' [{0}, {1}]'", ".", "format", "(", "station", "[", "2", "]", ",", "station", "[", "1", "]", ")", "if", "version_info", "<", "(", "3", ",", "0", ")", ":", "if", "len", "(", "line", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "+", "len", "(", "f_data", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", ">", "self", ".", "bodyMaxX", "-", "2", ":", "\"\"\" this is too long, try to shorten it\n by removing file size \"\"\"", "f_data", "=", "' [{0}]'", ".", "format", "(", "station", "[", "1", "]", ")", "if", "len", "(", "line", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "+", "len", "(", "f_data", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", ">", "self", ".", "bodyMaxX", "-", "2", ":", "\"\"\" still too long. start removing chars \"\"\"", "while", "len", "(", "line", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "+", "len", "(", "f_data", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", ">", "self", ".", "bodyMaxX", "-", "3", ":", "f_data", "=", "f_data", "[", ":", "-", "1", "]", "f_data", "+=", "']'", "\"\"\" if too short, pad f_data to the right \"\"\"", "if", "len", "(", "line", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "+", "len", "(", "f_data", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "<", "self", ".", "maxX", "-", "2", ":", "while", "len", "(", "line", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "+", "len", "(", "f_data", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "<", "self", ".", "maxX", "-", "2", ":", "line", "+=", "' '", "else", ":", "if", "len", "(", "line", ")", "+", "len", "(", "f_data", ")", ">", "self", ".", "bodyMaxX", "-", "2", ":", "\"\"\" this is too long, try to shorten it\n by removing file size \"\"\"", "f_data", "=", "' [{0}]'", ".", "format", "(", "station", "[", "1", "]", ")", "if", "len", "(", "line", ")", "+", "len", "(", "f_data", ")", ">", "self", ".", "bodyMaxX", "-", "2", ":", "\"\"\" still too long. start removing chars \"\"\"", "while", "len", "(", "line", ")", "+", "len", "(", "f_data", ")", ">", "self", ".", "bodyMaxX", "-", "3", ":", "f_data", "=", "f_data", "[", ":", "-", "1", "]", "f_data", "+=", "']'", "\"\"\" if too short, pad f_data to the right \"\"\"", "if", "len", "(", "line", ")", "+", "len", "(", "f_data", ")", "<", "self", ".", "maxX", "-", "2", ":", "while", "len", "(", "line", ")", "+", "len", "(", "f_data", ")", "<", "self", ".", "maxX", "-", "2", ":", "line", "+=", "' '", "line", "+=", "f_data", "return", "line" ]
format playlist line so that if fills self.maxX
[ "format", "playlist", "line", "so", "that", "if", "fills", "self", ".", "maxX" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L772-L805
17,652
coderholic/pyradio
pyradio/config_window.py
PyRadioSelectEncodings._resize
def _resize(self, init=False): col, row = self._selection_to_col_row(self.selection) if not (self.startPos <= row <= self.startPos + self.list_maxY - 1): while row > self.startPos: self.startPos += 1 while row < self.startPos + self.list_maxY - 1: self.startPos -= 1 ''' if the selection at the end of the list, try to scroll down ''' if init and row > self.list_maxY: new_startPos = self._num_of_rows - self.list_maxY + 1 if row > new_startPos: if logger.isEnabledFor(logging.DEBUG): logger.debug('setting startPos at {}'.format(new_startPos)) self.startPos = new_startPos self.refresh_selection()
python
def _resize(self, init=False): col, row = self._selection_to_col_row(self.selection) if not (self.startPos <= row <= self.startPos + self.list_maxY - 1): while row > self.startPos: self.startPos += 1 while row < self.startPos + self.list_maxY - 1: self.startPos -= 1 ''' if the selection at the end of the list, try to scroll down ''' if init and row > self.list_maxY: new_startPos = self._num_of_rows - self.list_maxY + 1 if row > new_startPos: if logger.isEnabledFor(logging.DEBUG): logger.debug('setting startPos at {}'.format(new_startPos)) self.startPos = new_startPos self.refresh_selection()
[ "def", "_resize", "(", "self", ",", "init", "=", "False", ")", ":", "col", ",", "row", "=", "self", ".", "_selection_to_col_row", "(", "self", ".", "selection", ")", "if", "not", "(", "self", ".", "startPos", "<=", "row", "<=", "self", ".", "startPos", "+", "self", ".", "list_maxY", "-", "1", ")", ":", "while", "row", ">", "self", ".", "startPos", ":", "self", ".", "startPos", "+=", "1", "while", "row", "<", "self", ".", "startPos", "+", "self", ".", "list_maxY", "-", "1", ":", "self", ".", "startPos", "-=", "1", "if", "init", "and", "row", ">", "self", ".", "list_maxY", ":", "new_startPos", "=", "self", ".", "_num_of_rows", "-", "self", ".", "list_maxY", "+", "1", "if", "row", ">", "new_startPos", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'setting startPos at {}'", ".", "format", "(", "new_startPos", ")", ")", "self", ".", "startPos", "=", "new_startPos", "self", ".", "refresh_selection", "(", ")" ]
if the selection at the end of the list, try to scroll down
[ "if", "the", "selection", "at", "the", "end", "of", "the", "list", "try", "to", "scroll", "down" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config_window.py#L745-L760
17,653
coderholic/pyradio
pyradio/simple_curses_widgets.py
SimpleCursesLineEdit._get_char
def _get_char(self, win, char): def get_check_next_byte(): char = win.getch() if 128 <= char <= 191: return char else: raise UnicodeError bytes = [] if char <= 127: # 1 bytes bytes.append(char) #elif 194 <= char <= 223: elif 192 <= char <= 223: # 2 bytes bytes.append(char) bytes.append(get_check_next_byte()) elif 224 <= char <= 239: # 3 bytes bytes.append(char) bytes.append(get_check_next_byte()) bytes.append(get_check_next_byte()) elif 240 <= char <= 244: # 4 bytes bytes.append(char) bytes.append(get_check_next_byte()) bytes.append(get_check_next_byte()) bytes.append(get_check_next_byte()) #print('bytes = {}'.format(bytes)) """ no zero byte allowed """ while 0 in bytes: bytes.remove(0) if version_info < (3, 0): out = ''.join([chr(b) for b in bytes]) else: buf = bytearray(bytes) out = self._decode_string(buf) #out = buf.decode('utf-8') return out
python
def _get_char(self, win, char): def get_check_next_byte(): char = win.getch() if 128 <= char <= 191: return char else: raise UnicodeError bytes = [] if char <= 127: # 1 bytes bytes.append(char) #elif 194 <= char <= 223: elif 192 <= char <= 223: # 2 bytes bytes.append(char) bytes.append(get_check_next_byte()) elif 224 <= char <= 239: # 3 bytes bytes.append(char) bytes.append(get_check_next_byte()) bytes.append(get_check_next_byte()) elif 240 <= char <= 244: # 4 bytes bytes.append(char) bytes.append(get_check_next_byte()) bytes.append(get_check_next_byte()) bytes.append(get_check_next_byte()) #print('bytes = {}'.format(bytes)) """ no zero byte allowed """ while 0 in bytes: bytes.remove(0) if version_info < (3, 0): out = ''.join([chr(b) for b in bytes]) else: buf = bytearray(bytes) out = self._decode_string(buf) #out = buf.decode('utf-8') return out
[ "def", "_get_char", "(", "self", ",", "win", ",", "char", ")", ":", "def", "get_check_next_byte", "(", ")", ":", "char", "=", "win", ".", "getch", "(", ")", "if", "128", "<=", "char", "<=", "191", ":", "return", "char", "else", ":", "raise", "UnicodeError", "bytes", "=", "[", "]", "if", "char", "<=", "127", ":", "# 1 bytes", "bytes", ".", "append", "(", "char", ")", "#elif 194 <= char <= 223:", "elif", "192", "<=", "char", "<=", "223", ":", "# 2 bytes", "bytes", ".", "append", "(", "char", ")", "bytes", ".", "append", "(", "get_check_next_byte", "(", ")", ")", "elif", "224", "<=", "char", "<=", "239", ":", "# 3 bytes", "bytes", ".", "append", "(", "char", ")", "bytes", ".", "append", "(", "get_check_next_byte", "(", ")", ")", "bytes", ".", "append", "(", "get_check_next_byte", "(", ")", ")", "elif", "240", "<=", "char", "<=", "244", ":", "# 4 bytes", "bytes", ".", "append", "(", "char", ")", "bytes", ".", "append", "(", "get_check_next_byte", "(", ")", ")", "bytes", ".", "append", "(", "get_check_next_byte", "(", ")", ")", "bytes", ".", "append", "(", "get_check_next_byte", "(", ")", ")", "#print('bytes = {}'.format(bytes))", "while", "0", "in", "bytes", ":", "bytes", ".", "remove", "(", "0", ")", "if", "version_info", "<", "(", "3", ",", "0", ")", ":", "out", "=", "''", ".", "join", "(", "[", "chr", "(", "b", ")", "for", "b", "in", "bytes", "]", ")", "else", ":", "buf", "=", "bytearray", "(", "bytes", ")", "out", "=", "self", ".", "_decode_string", "(", "buf", ")", "#out = buf.decode('utf-8')", "return", "out" ]
no zero byte allowed
[ "no", "zero", "byte", "allowed" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/simple_curses_widgets.py#L342-L380
17,654
coderholic/pyradio
pyradio/edit.py
PyRadioSearch._get_history_next
def _get_history_next(self): """ callback function for key down """ if self._has_history: ret = self._input_history.return_history(1) self.string = ret self._curs_pos = len(ret)
python
def _get_history_next(self): """ callback function for key down """ if self._has_history: ret = self._input_history.return_history(1) self.string = ret self._curs_pos = len(ret)
[ "def", "_get_history_next", "(", "self", ")", ":", "if", "self", ".", "_has_history", ":", "ret", "=", "self", ".", "_input_history", ".", "return_history", "(", "1", ")", "self", ".", "string", "=", "ret", "self", ".", "_curs_pos", "=", "len", "(", "ret", ")" ]
callback function for key down
[ "callback", "function", "for", "key", "down" ]
c5219d350bccbccd49dbd627c1f886a952ea1963
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/edit.py#L49-L54
17,655
bids-standard/pybids
bids/analysis/analysis.py
apply_transformations
def apply_transformations(collection, transformations, select=None): ''' Apply all transformations to the variables in the collection. Args: transformations (list): List of transformations to apply. select (list): Optional list of names of variables to retain after all transformations are applied. ''' for t in transformations: kwargs = dict(t) func = kwargs.pop('name') cols = kwargs.pop('input', None) if isinstance(func, string_types): if func in ('and', 'or'): func += '_' if not hasattr(transform, func): raise ValueError("No transformation '%s' found!" % func) func = getattr(transform, func) func(collection, cols, **kwargs) if select is not None: transform.Select(collection, select) return collection
python
def apply_transformations(collection, transformations, select=None): ''' Apply all transformations to the variables in the collection. Args: transformations (list): List of transformations to apply. select (list): Optional list of names of variables to retain after all transformations are applied. ''' for t in transformations: kwargs = dict(t) func = kwargs.pop('name') cols = kwargs.pop('input', None) if isinstance(func, string_types): if func in ('and', 'or'): func += '_' if not hasattr(transform, func): raise ValueError("No transformation '%s' found!" % func) func = getattr(transform, func) func(collection, cols, **kwargs) if select is not None: transform.Select(collection, select) return collection
[ "def", "apply_transformations", "(", "collection", ",", "transformations", ",", "select", "=", "None", ")", ":", "for", "t", "in", "transformations", ":", "kwargs", "=", "dict", "(", "t", ")", "func", "=", "kwargs", ".", "pop", "(", "'name'", ")", "cols", "=", "kwargs", ".", "pop", "(", "'input'", ",", "None", ")", "if", "isinstance", "(", "func", ",", "string_types", ")", ":", "if", "func", "in", "(", "'and'", ",", "'or'", ")", ":", "func", "+=", "'_'", "if", "not", "hasattr", "(", "transform", ",", "func", ")", ":", "raise", "ValueError", "(", "\"No transformation '%s' found!\"", "%", "func", ")", "func", "=", "getattr", "(", "transform", ",", "func", ")", "func", "(", "collection", ",", "cols", ",", "*", "*", "kwargs", ")", "if", "select", "is", "not", "None", ":", "transform", ".", "Select", "(", "collection", ",", "select", ")", "return", "collection" ]
Apply all transformations to the variables in the collection. Args: transformations (list): List of transformations to apply. select (list): Optional list of names of variables to retain after all transformations are applied.
[ "Apply", "all", "transformations", "to", "the", "variables", "in", "the", "collection", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/analysis.py#L489-L513
17,656
bids-standard/pybids
bids/analysis/analysis.py
Analysis.setup
def setup(self, steps=None, drop_na=False, **kwargs): ''' Set up the sequence of steps for analysis. Args: steps (list): Optional list of steps to set up. Each element must be either an int giving the index of the step in the JSON config block list, or a str giving the (unique) name of the step, as specified in the JSON config. Steps that do not match either index or name will be skipped. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files. ''' # In the beginning, there was nothing input_nodes = None # Use inputs from model, and update with kwargs selectors = self.model.get('input', {}).copy() selectors.update(kwargs) for i, b in enumerate(self.steps): # Skip any steps whose names or indexes don't match block list if steps is not None and i not in steps and b.name not in steps: continue b.setup(input_nodes, drop_na=drop_na, **selectors) input_nodes = b.output_nodes
python
def setup(self, steps=None, drop_na=False, **kwargs): ''' Set up the sequence of steps for analysis. Args: steps (list): Optional list of steps to set up. Each element must be either an int giving the index of the step in the JSON config block list, or a str giving the (unique) name of the step, as specified in the JSON config. Steps that do not match either index or name will be skipped. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files. ''' # In the beginning, there was nothing input_nodes = None # Use inputs from model, and update with kwargs selectors = self.model.get('input', {}).copy() selectors.update(kwargs) for i, b in enumerate(self.steps): # Skip any steps whose names or indexes don't match block list if steps is not None and i not in steps and b.name not in steps: continue b.setup(input_nodes, drop_na=drop_na, **selectors) input_nodes = b.output_nodes
[ "def", "setup", "(", "self", ",", "steps", "=", "None", ",", "drop_na", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# In the beginning, there was nothing", "input_nodes", "=", "None", "# Use inputs from model, and update with kwargs", "selectors", "=", "self", ".", "model", ".", "get", "(", "'input'", ",", "{", "}", ")", ".", "copy", "(", ")", "selectors", ".", "update", "(", "kwargs", ")", "for", "i", ",", "b", "in", "enumerate", "(", "self", ".", "steps", ")", ":", "# Skip any steps whose names or indexes don't match block list", "if", "steps", "is", "not", "None", "and", "i", "not", "in", "steps", "and", "b", ".", "name", "not", "in", "steps", ":", "continue", "b", ".", "setup", "(", "input_nodes", ",", "drop_na", "=", "drop_na", ",", "*", "*", "selectors", ")", "input_nodes", "=", "b", ".", "output_nodes" ]
Set up the sequence of steps for analysis. Args: steps (list): Optional list of steps to set up. Each element must be either an int giving the index of the step in the JSON config block list, or a str giving the (unique) name of the step, as specified in the JSON config. Steps that do not match either index or name will be skipped. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files.
[ "Set", "up", "the", "sequence", "of", "steps", "for", "analysis", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/analysis.py#L62-L90
17,657
bids-standard/pybids
bids/analysis/analysis.py
Step.setup
def setup(self, input_nodes=None, drop_na=False, **kwargs): ''' Set up the Step and construct the design matrix. Args: input_nodes (list): Optional list of Node objects produced by the preceding Step in the analysis. If None, uses any inputs passed in at Step initialization. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files. kwargs: Optional keyword arguments to pass onto load_variables. ''' self.output_nodes = [] input_nodes = input_nodes or self.input_nodes or [] # TODO: remove the scan_length argument entirely once we switch tests # to use the synthetic dataset with image headers. if self.level != 'run': kwargs = kwargs.copy() kwargs.pop('scan_length', None) collections = self.layout.get_collections(self.level, drop_na=drop_na, **kwargs) objects = collections + input_nodes objects, kwargs = self._filter_objects(objects, kwargs) groups = self._group_objects(objects) # Set up and validate variable lists model = self.model or {} X = model.get('x', []) for grp in groups: # Split into separate lists of Collections and Nodes input_nodes = [o for o in grp if isinstance(o, AnalysisNode)] colls = list(set(grp) - set(input_nodes)) if input_nodes: node_coll = self._concatenate_input_nodes(input_nodes) colls.append(node_coll) coll = merge_collections(colls) if len(colls) > 1 else colls[0] coll = apply_transformations(coll, self.transformations) if X: transform.Select(coll, X) node = AnalysisNode(self.level, coll, self.contrasts, input_nodes, self.auto_contrasts) self.output_nodes.append(node)
python
def setup(self, input_nodes=None, drop_na=False, **kwargs): ''' Set up the Step and construct the design matrix. Args: input_nodes (list): Optional list of Node objects produced by the preceding Step in the analysis. If None, uses any inputs passed in at Step initialization. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files. kwargs: Optional keyword arguments to pass onto load_variables. ''' self.output_nodes = [] input_nodes = input_nodes or self.input_nodes or [] # TODO: remove the scan_length argument entirely once we switch tests # to use the synthetic dataset with image headers. if self.level != 'run': kwargs = kwargs.copy() kwargs.pop('scan_length', None) collections = self.layout.get_collections(self.level, drop_na=drop_na, **kwargs) objects = collections + input_nodes objects, kwargs = self._filter_objects(objects, kwargs) groups = self._group_objects(objects) # Set up and validate variable lists model = self.model or {} X = model.get('x', []) for grp in groups: # Split into separate lists of Collections and Nodes input_nodes = [o for o in grp if isinstance(o, AnalysisNode)] colls = list(set(grp) - set(input_nodes)) if input_nodes: node_coll = self._concatenate_input_nodes(input_nodes) colls.append(node_coll) coll = merge_collections(colls) if len(colls) > 1 else colls[0] coll = apply_transformations(coll, self.transformations) if X: transform.Select(coll, X) node = AnalysisNode(self.level, coll, self.contrasts, input_nodes, self.auto_contrasts) self.output_nodes.append(node)
[ "def", "setup", "(", "self", ",", "input_nodes", "=", "None", ",", "drop_na", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "output_nodes", "=", "[", "]", "input_nodes", "=", "input_nodes", "or", "self", ".", "input_nodes", "or", "[", "]", "# TODO: remove the scan_length argument entirely once we switch tests", "# to use the synthetic dataset with image headers.", "if", "self", ".", "level", "!=", "'run'", ":", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "kwargs", ".", "pop", "(", "'scan_length'", ",", "None", ")", "collections", "=", "self", ".", "layout", ".", "get_collections", "(", "self", ".", "level", ",", "drop_na", "=", "drop_na", ",", "*", "*", "kwargs", ")", "objects", "=", "collections", "+", "input_nodes", "objects", ",", "kwargs", "=", "self", ".", "_filter_objects", "(", "objects", ",", "kwargs", ")", "groups", "=", "self", ".", "_group_objects", "(", "objects", ")", "# Set up and validate variable lists", "model", "=", "self", ".", "model", "or", "{", "}", "X", "=", "model", ".", "get", "(", "'x'", ",", "[", "]", ")", "for", "grp", "in", "groups", ":", "# Split into separate lists of Collections and Nodes", "input_nodes", "=", "[", "o", "for", "o", "in", "grp", "if", "isinstance", "(", "o", ",", "AnalysisNode", ")", "]", "colls", "=", "list", "(", "set", "(", "grp", ")", "-", "set", "(", "input_nodes", ")", ")", "if", "input_nodes", ":", "node_coll", "=", "self", ".", "_concatenate_input_nodes", "(", "input_nodes", ")", "colls", ".", "append", "(", "node_coll", ")", "coll", "=", "merge_collections", "(", "colls", ")", "if", "len", "(", "colls", ")", ">", "1", "else", "colls", "[", "0", "]", "coll", "=", "apply_transformations", "(", "coll", ",", "self", ".", "transformations", ")", "if", "X", ":", "transform", ".", "Select", "(", "coll", ",", "X", ")", "node", "=", "AnalysisNode", "(", "self", ".", "level", ",", "coll", ",", "self", ".", "contrasts", ",", "input_nodes", ",", "self", ".", "auto_contrasts", ")", "self", ".", "output_nodes", ".", "append", "(", "node", ")" ]
Set up the Step and construct the design matrix. Args: input_nodes (list): Optional list of Node objects produced by the preceding Step in the analysis. If None, uses any inputs passed in at Step initialization. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files. kwargs: Optional keyword arguments to pass onto load_variables.
[ "Set", "up", "the", "Step", "and", "construct", "the", "design", "matrix", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/analysis.py#L168-L218
17,658
bids-standard/pybids
bids/reports/utils.py
get_slice_info
def get_slice_info(slice_times): """ Extract slice order from slice timing info. TODO: Be more specific with slice orders. Currently anything where there's some kind of skipping is interpreted as interleaved of some kind. Parameters ---------- slice_times : array-like A list of slice times in seconds or milliseconds or whatever. Returns ------- slice_order_name : :obj:`str` The name of the slice order sequence. """ # Slice order slice_times = remove_duplicates(slice_times) slice_order = sorted(range(len(slice_times)), key=lambda k: slice_times[k]) if slice_order == range(len(slice_order)): slice_order_name = 'sequential ascending' elif slice_order == reversed(range(len(slice_order))): slice_order_name = 'sequential descending' elif slice_order[0] < slice_order[1]: # We're allowing some wiggle room on interleaved. slice_order_name = 'interleaved ascending' elif slice_order[0] > slice_order[1]: slice_order_name = 'interleaved descending' else: slice_order = [str(s) for s in slice_order] raise Exception('Unknown slice order: [{0}]'.format(', '.join(slice_order))) return slice_order_name
python
def get_slice_info(slice_times): """ Extract slice order from slice timing info. TODO: Be more specific with slice orders. Currently anything where there's some kind of skipping is interpreted as interleaved of some kind. Parameters ---------- slice_times : array-like A list of slice times in seconds or milliseconds or whatever. Returns ------- slice_order_name : :obj:`str` The name of the slice order sequence. """ # Slice order slice_times = remove_duplicates(slice_times) slice_order = sorted(range(len(slice_times)), key=lambda k: slice_times[k]) if slice_order == range(len(slice_order)): slice_order_name = 'sequential ascending' elif slice_order == reversed(range(len(slice_order))): slice_order_name = 'sequential descending' elif slice_order[0] < slice_order[1]: # We're allowing some wiggle room on interleaved. slice_order_name = 'interleaved ascending' elif slice_order[0] > slice_order[1]: slice_order_name = 'interleaved descending' else: slice_order = [str(s) for s in slice_order] raise Exception('Unknown slice order: [{0}]'.format(', '.join(slice_order))) return slice_order_name
[ "def", "get_slice_info", "(", "slice_times", ")", ":", "# Slice order", "slice_times", "=", "remove_duplicates", "(", "slice_times", ")", "slice_order", "=", "sorted", "(", "range", "(", "len", "(", "slice_times", ")", ")", ",", "key", "=", "lambda", "k", ":", "slice_times", "[", "k", "]", ")", "if", "slice_order", "==", "range", "(", "len", "(", "slice_order", ")", ")", ":", "slice_order_name", "=", "'sequential ascending'", "elif", "slice_order", "==", "reversed", "(", "range", "(", "len", "(", "slice_order", ")", ")", ")", ":", "slice_order_name", "=", "'sequential descending'", "elif", "slice_order", "[", "0", "]", "<", "slice_order", "[", "1", "]", ":", "# We're allowing some wiggle room on interleaved.", "slice_order_name", "=", "'interleaved ascending'", "elif", "slice_order", "[", "0", "]", ">", "slice_order", "[", "1", "]", ":", "slice_order_name", "=", "'interleaved descending'", "else", ":", "slice_order", "=", "[", "str", "(", "s", ")", "for", "s", "in", "slice_order", "]", "raise", "Exception", "(", "'Unknown slice order: [{0}]'", ".", "format", "(", "', '", ".", "join", "(", "slice_order", ")", ")", ")", "return", "slice_order_name" ]
Extract slice order from slice timing info. TODO: Be more specific with slice orders. Currently anything where there's some kind of skipping is interpreted as interleaved of some kind. Parameters ---------- slice_times : array-like A list of slice times in seconds or milliseconds or whatever. Returns ------- slice_order_name : :obj:`str` The name of the slice order sequence.
[ "Extract", "slice", "order", "from", "slice", "timing", "info", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/utils.py#L70-L104
17,659
bids-standard/pybids
bids/reports/utils.py
get_sizestr
def get_sizestr(img): """ Extract and reformat voxel size, matrix size, field of view, and number of slices into pretty strings. Parameters ---------- img : :obj:`nibabel.Nifti1Image` Image from scan from which to derive parameters. Returns ------- n_slices : :obj:`int` Number of slices. voxel_size : :obj:`str` Voxel size string (e.g., '2x2x2') matrix_size : :obj:`str` Matrix size string (e.g., '128x128') fov : :obj:`str` Field of view string (e.g., '256x256') """ n_x, n_y, n_slices = img.shape[:3] import numpy as np voxel_dims = np.array(img.header.get_zooms()[:3]) matrix_size = '{0}x{1}'.format(num_to_str(n_x), num_to_str(n_y)) voxel_size = 'x'.join([num_to_str(s) for s in voxel_dims]) fov = [n_x, n_y] * voxel_dims[:2] fov = 'x'.join([num_to_str(s) for s in fov]) return n_slices, voxel_size, matrix_size, fov
python
def get_sizestr(img): """ Extract and reformat voxel size, matrix size, field of view, and number of slices into pretty strings. Parameters ---------- img : :obj:`nibabel.Nifti1Image` Image from scan from which to derive parameters. Returns ------- n_slices : :obj:`int` Number of slices. voxel_size : :obj:`str` Voxel size string (e.g., '2x2x2') matrix_size : :obj:`str` Matrix size string (e.g., '128x128') fov : :obj:`str` Field of view string (e.g., '256x256') """ n_x, n_y, n_slices = img.shape[:3] import numpy as np voxel_dims = np.array(img.header.get_zooms()[:3]) matrix_size = '{0}x{1}'.format(num_to_str(n_x), num_to_str(n_y)) voxel_size = 'x'.join([num_to_str(s) for s in voxel_dims]) fov = [n_x, n_y] * voxel_dims[:2] fov = 'x'.join([num_to_str(s) for s in fov]) return n_slices, voxel_size, matrix_size, fov
[ "def", "get_sizestr", "(", "img", ")", ":", "n_x", ",", "n_y", ",", "n_slices", "=", "img", ".", "shape", "[", ":", "3", "]", "import", "numpy", "as", "np", "voxel_dims", "=", "np", ".", "array", "(", "img", ".", "header", ".", "get_zooms", "(", ")", "[", ":", "3", "]", ")", "matrix_size", "=", "'{0}x{1}'", ".", "format", "(", "num_to_str", "(", "n_x", ")", ",", "num_to_str", "(", "n_y", ")", ")", "voxel_size", "=", "'x'", ".", "join", "(", "[", "num_to_str", "(", "s", ")", "for", "s", "in", "voxel_dims", "]", ")", "fov", "=", "[", "n_x", ",", "n_y", "]", "*", "voxel_dims", "[", ":", "2", "]", "fov", "=", "'x'", ".", "join", "(", "[", "num_to_str", "(", "s", ")", "for", "s", "in", "fov", "]", ")", "return", "n_slices", ",", "voxel_size", ",", "matrix_size", ",", "fov" ]
Extract and reformat voxel size, matrix size, field of view, and number of slices into pretty strings. Parameters ---------- img : :obj:`nibabel.Nifti1Image` Image from scan from which to derive parameters. Returns ------- n_slices : :obj:`int` Number of slices. voxel_size : :obj:`str` Voxel size string (e.g., '2x2x2') matrix_size : :obj:`str` Matrix size string (e.g., '128x128') fov : :obj:`str` Field of view string (e.g., '256x256')
[ "Extract", "and", "reformat", "voxel", "size", "matrix", "size", "field", "of", "view", "and", "number", "of", "slices", "into", "pretty", "strings", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/utils.py#L138-L166
17,660
bids-standard/pybids
bids/layout/layout.py
add_config_paths
def add_config_paths(**kwargs): """ Add to the pool of available configuration files for BIDSLayout. Args: kwargs: dictionary specifying where to find additional config files. Keys are names, values are paths to the corresponding .json file. Example: > add_config_paths(my_config='/path/to/config') > layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config']) """ for k, path in kwargs.items(): if not os.path.exists(path): raise ValueError( 'Configuration file "{}" does not exist'.format(k)) if k in cf.get_option('config_paths'): raise ValueError('Configuration {!r} already exists'.format(k)) kwargs.update(**cf.get_option('config_paths')) cf.set_option('config_paths', kwargs)
python
def add_config_paths(**kwargs): """ Add to the pool of available configuration files for BIDSLayout. Args: kwargs: dictionary specifying where to find additional config files. Keys are names, values are paths to the corresponding .json file. Example: > add_config_paths(my_config='/path/to/config') > layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config']) """ for k, path in kwargs.items(): if not os.path.exists(path): raise ValueError( 'Configuration file "{}" does not exist'.format(k)) if k in cf.get_option('config_paths'): raise ValueError('Configuration {!r} already exists'.format(k)) kwargs.update(**cf.get_option('config_paths')) cf.set_option('config_paths', kwargs)
[ "def", "add_config_paths", "(", "*", "*", "kwargs", ")", ":", "for", "k", ",", "path", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "ValueError", "(", "'Configuration file \"{}\" does not exist'", ".", "format", "(", "k", ")", ")", "if", "k", "in", "cf", ".", "get_option", "(", "'config_paths'", ")", ":", "raise", "ValueError", "(", "'Configuration {!r} already exists'", ".", "format", "(", "k", ")", ")", "kwargs", ".", "update", "(", "*", "*", "cf", ".", "get_option", "(", "'config_paths'", ")", ")", "cf", ".", "set_option", "(", "'config_paths'", ",", "kwargs", ")" ]
Add to the pool of available configuration files for BIDSLayout. Args: kwargs: dictionary specifying where to find additional config files. Keys are names, values are paths to the corresponding .json file. Example: > add_config_paths(my_config='/path/to/config') > layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config'])
[ "Add", "to", "the", "pool", "of", "available", "configuration", "files", "for", "BIDSLayout", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L77-L97
17,661
bids-standard/pybids
bids/layout/layout.py
BIDSLayout.add_derivatives
def add_derivatives(self, path, **kwargs): ''' Add BIDS-Derivatives datasets to tracking. Args: path (str, list): One or more paths to BIDS-Derivatives datasets. Each path can point to either a derivatives/ directory containing one more more pipeline directories, or to a single pipeline directory (e.g., derivatives/fmriprep). kwargs (dict): Optional keyword arguments to pass on to BIDSLayout() when initializing each of the derivative datasets. Note: Every derivatives directory intended for indexing MUST contain a valid dataset_description.json file. See the BIDS-Derivatives specification for details. ''' paths = listify(path) deriv_dirs = [] # Collect all paths that contain a dataset_description.json def check_for_description(dir): dd = os.path.join(dir, 'dataset_description.json') return os.path.exists(dd) for p in paths: p = os.path.abspath(p) if os.path.exists(p): if check_for_description(p): deriv_dirs.append(p) else: subdirs = [d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d))] for sd in subdirs: sd = os.path.join(p, sd) if check_for_description(sd): deriv_dirs.append(sd) if not deriv_dirs: warnings.warn("Derivative indexing was enabled, but no valid " "derivatives datasets were found in any of the " "provided or default locations. Please make sure " "all derivatives datasets you intend to index " "contain a 'dataset_description.json' file, as " "described in the BIDS-derivatives specification.") for deriv in deriv_dirs: dd = os.path.join(deriv, 'dataset_description.json') with open(dd, 'r', encoding='utf-8') as ddfd: description = json.load(ddfd) pipeline_name = description.get( 'PipelineDescription', {}).get('Name') if pipeline_name is None: raise ValueError("Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside dataset_description.json.") if pipeline_name in self.derivatives: raise ValueError("Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!") # Default config and sources values kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives'] kwargs['sources'] = kwargs.get('sources') or self self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs) # Consolidate all entities post-indexing. Note: no conflicts occur b/c # multiple entries with the same name all point to the same instance. for deriv in self.derivatives.values(): self.entities.update(deriv.entities)
python
def add_derivatives(self, path, **kwargs): ''' Add BIDS-Derivatives datasets to tracking. Args: path (str, list): One or more paths to BIDS-Derivatives datasets. Each path can point to either a derivatives/ directory containing one more more pipeline directories, or to a single pipeline directory (e.g., derivatives/fmriprep). kwargs (dict): Optional keyword arguments to pass on to BIDSLayout() when initializing each of the derivative datasets. Note: Every derivatives directory intended for indexing MUST contain a valid dataset_description.json file. See the BIDS-Derivatives specification for details. ''' paths = listify(path) deriv_dirs = [] # Collect all paths that contain a dataset_description.json def check_for_description(dir): dd = os.path.join(dir, 'dataset_description.json') return os.path.exists(dd) for p in paths: p = os.path.abspath(p) if os.path.exists(p): if check_for_description(p): deriv_dirs.append(p) else: subdirs = [d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d))] for sd in subdirs: sd = os.path.join(p, sd) if check_for_description(sd): deriv_dirs.append(sd) if not deriv_dirs: warnings.warn("Derivative indexing was enabled, but no valid " "derivatives datasets were found in any of the " "provided or default locations. Please make sure " "all derivatives datasets you intend to index " "contain a 'dataset_description.json' file, as " "described in the BIDS-derivatives specification.") for deriv in deriv_dirs: dd = os.path.join(deriv, 'dataset_description.json') with open(dd, 'r', encoding='utf-8') as ddfd: description = json.load(ddfd) pipeline_name = description.get( 'PipelineDescription', {}).get('Name') if pipeline_name is None: raise ValueError("Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside dataset_description.json.") if pipeline_name in self.derivatives: raise ValueError("Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!") # Default config and sources values kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives'] kwargs['sources'] = kwargs.get('sources') or self self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs) # Consolidate all entities post-indexing. Note: no conflicts occur b/c # multiple entries with the same name all point to the same instance. for deriv in self.derivatives.values(): self.entities.update(deriv.entities)
[ "def", "add_derivatives", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "paths", "=", "listify", "(", "path", ")", "deriv_dirs", "=", "[", "]", "# Collect all paths that contain a dataset_description.json", "def", "check_for_description", "(", "dir", ")", ":", "dd", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "'dataset_description.json'", ")", "return", "os", ".", "path", ".", "exists", "(", "dd", ")", "for", "p", "in", "paths", ":", "p", "=", "os", ".", "path", ".", "abspath", "(", "p", ")", "if", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "if", "check_for_description", "(", "p", ")", ":", "deriv_dirs", ".", "append", "(", "p", ")", "else", ":", "subdirs", "=", "[", "d", "for", "d", "in", "os", ".", "listdir", "(", "p", ")", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "p", ",", "d", ")", ")", "]", "for", "sd", "in", "subdirs", ":", "sd", "=", "os", ".", "path", ".", "join", "(", "p", ",", "sd", ")", "if", "check_for_description", "(", "sd", ")", ":", "deriv_dirs", ".", "append", "(", "sd", ")", "if", "not", "deriv_dirs", ":", "warnings", ".", "warn", "(", "\"Derivative indexing was enabled, but no valid \"", "\"derivatives datasets were found in any of the \"", "\"provided or default locations. Please make sure \"", "\"all derivatives datasets you intend to index \"", "\"contain a 'dataset_description.json' file, as \"", "\"described in the BIDS-derivatives specification.\"", ")", "for", "deriv", "in", "deriv_dirs", ":", "dd", "=", "os", ".", "path", ".", "join", "(", "deriv", ",", "'dataset_description.json'", ")", "with", "open", "(", "dd", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "ddfd", ":", "description", "=", "json", ".", "load", "(", "ddfd", ")", "pipeline_name", "=", "description", ".", "get", "(", "'PipelineDescription'", ",", "{", "}", ")", ".", "get", "(", "'Name'", ")", "if", "pipeline_name", "is", "None", ":", "raise", "ValueError", "(", "\"Every valid BIDS-derivatives dataset must \"", "\"have a PipelineDescription.Name field set \"", "\"inside dataset_description.json.\"", ")", "if", "pipeline_name", "in", "self", ".", "derivatives", ":", "raise", "ValueError", "(", "\"Pipeline name '%s' has already been added \"", "\"to this BIDSLayout. Every added pipeline \"", "\"must have a unique name!\"", ")", "# Default config and sources values", "kwargs", "[", "'config'", "]", "=", "kwargs", ".", "get", "(", "'config'", ")", "or", "[", "'bids'", ",", "'derivatives'", "]", "kwargs", "[", "'sources'", "]", "=", "kwargs", ".", "get", "(", "'sources'", ")", "or", "self", "self", ".", "derivatives", "[", "pipeline_name", "]", "=", "BIDSLayout", "(", "deriv", ",", "*", "*", "kwargs", ")", "# Consolidate all entities post-indexing. Note: no conflicts occur b/c", "# multiple entries with the same name all point to the same instance.", "for", "deriv", "in", "self", ".", "derivatives", ".", "values", "(", ")", ":", "self", ".", "entities", ".", "update", "(", "deriv", ".", "entities", ")" ]
Add BIDS-Derivatives datasets to tracking. Args: path (str, list): One or more paths to BIDS-Derivatives datasets. Each path can point to either a derivatives/ directory containing one more more pipeline directories, or to a single pipeline directory (e.g., derivatives/fmriprep). kwargs (dict): Optional keyword arguments to pass on to BIDSLayout() when initializing each of the derivative datasets. Note: Every derivatives directory intended for indexing MUST contain a valid dataset_description.json file. See the BIDS-Derivatives specification for details.
[ "Add", "BIDS", "-", "Derivatives", "datasets", "to", "tracking", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L352-L418
17,662
bids-standard/pybids
bids/layout/layout.py
BIDSLayout.get_file
def get_file(self, filename, scope='all'): ''' Returns the BIDSFile object with the specified path. Args: filename (str): The path of the file to retrieve. Must be either an absolute path, or relative to the root of this BIDSLayout. scope (str, list): Scope of the search space. If passed, only BIDSLayouts that match the specified scope will be searched. See BIDSLayout docstring for valid values. Returns: A BIDSFile, or None if no match was found. ''' filename = os.path.abspath(os.path.join(self.root, filename)) layouts = self._get_layouts_in_scope(scope) for ly in layouts: if filename in ly.files: return ly.files[filename] return None
python
def get_file(self, filename, scope='all'): ''' Returns the BIDSFile object with the specified path. Args: filename (str): The path of the file to retrieve. Must be either an absolute path, or relative to the root of this BIDSLayout. scope (str, list): Scope of the search space. If passed, only BIDSLayouts that match the specified scope will be searched. See BIDSLayout docstring for valid values. Returns: A BIDSFile, or None if no match was found. ''' filename = os.path.abspath(os.path.join(self.root, filename)) layouts = self._get_layouts_in_scope(scope) for ly in layouts: if filename in ly.files: return ly.files[filename] return None
[ "def", "get_file", "(", "self", ",", "filename", ",", "scope", "=", "'all'", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "root", ",", "filename", ")", ")", "layouts", "=", "self", ".", "_get_layouts_in_scope", "(", "scope", ")", "for", "ly", "in", "layouts", ":", "if", "filename", "in", "ly", ".", "files", ":", "return", "ly", ".", "files", "[", "filename", "]", "return", "None" ]
Returns the BIDSFile object with the specified path. Args: filename (str): The path of the file to retrieve. Must be either an absolute path, or relative to the root of this BIDSLayout. scope (str, list): Scope of the search space. If passed, only BIDSLayouts that match the specified scope will be searched. See BIDSLayout docstring for valid values. Returns: A BIDSFile, or None if no match was found.
[ "Returns", "the", "BIDSFile", "object", "with", "the", "specified", "path", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L600-L617
17,663
bids-standard/pybids
bids/layout/layout.py
BIDSLayout.get_collections
def get_collections(self, level, types=None, variables=None, merge=False, sampling_rate=None, skip_empty=False, **kwargs): """Return one or more variable Collections in the BIDS project. Args: level (str): The level of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. types (str, list): Types of variables to retrieve. All valid values reflect the filename stipulated in the BIDS spec for each kind of variable. Valid values include: 'events', 'physio', 'stim', 'scans', 'participants', 'sessions', and 'regressors'. variables (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current level. E.g., if level='subject', variables from all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If level='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. skip_empty (bool): Whether or not to skip empty Variables (i.e., where there are no rows/records in a file after applying any filtering operations like dropping NaNs). kwargs: Optional additional arguments to pass onto load_variables. """ from bids.variables import load_variables index = load_variables(self, types=types, levels=level, skip_empty=skip_empty, **kwargs) return index.get_collections(level, variables, merge, sampling_rate=sampling_rate)
python
def get_collections(self, level, types=None, variables=None, merge=False, sampling_rate=None, skip_empty=False, **kwargs): """Return one or more variable Collections in the BIDS project. Args: level (str): The level of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. types (str, list): Types of variables to retrieve. All valid values reflect the filename stipulated in the BIDS spec for each kind of variable. Valid values include: 'events', 'physio', 'stim', 'scans', 'participants', 'sessions', and 'regressors'. variables (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current level. E.g., if level='subject', variables from all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If level='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. skip_empty (bool): Whether or not to skip empty Variables (i.e., where there are no rows/records in a file after applying any filtering operations like dropping NaNs). kwargs: Optional additional arguments to pass onto load_variables. """ from bids.variables import load_variables index = load_variables(self, types=types, levels=level, skip_empty=skip_empty, **kwargs) return index.get_collections(level, variables, merge, sampling_rate=sampling_rate)
[ "def", "get_collections", "(", "self", ",", "level", ",", "types", "=", "None", ",", "variables", "=", "None", ",", "merge", "=", "False", ",", "sampling_rate", "=", "None", ",", "skip_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "from", "bids", ".", "variables", "import", "load_variables", "index", "=", "load_variables", "(", "self", ",", "types", "=", "types", ",", "levels", "=", "level", ",", "skip_empty", "=", "skip_empty", ",", "*", "*", "kwargs", ")", "return", "index", ".", "get_collections", "(", "level", ",", "variables", ",", "merge", ",", "sampling_rate", "=", "sampling_rate", ")" ]
Return one or more variable Collections in the BIDS project. Args: level (str): The level of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. types (str, list): Types of variables to retrieve. All valid values reflect the filename stipulated in the BIDS spec for each kind of variable. Valid values include: 'events', 'physio', 'stim', 'scans', 'participants', 'sessions', and 'regressors'. variables (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current level. E.g., if level='subject', variables from all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If level='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. skip_empty (bool): Whether or not to skip empty Variables (i.e., where there are no rows/records in a file after applying any filtering operations like dropping NaNs). kwargs: Optional additional arguments to pass onto load_variables.
[ "Return", "one", "or", "more", "variable", "Collections", "in", "the", "BIDS", "project", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L619-L648
17,664
bids-standard/pybids
bids/layout/layout.py
BIDSLayout.get_metadata
def get_metadata(self, path, include_entities=False, **kwargs): """Return metadata found in JSON sidecars for the specified file. Args: path (str): Path to the file to get metadata for. include_entities (bool): If True, all available entities extracted from the filename (rather than JSON sidecars) are included in the returned metadata dictionary. kwargs (dict): Optional keyword arguments to pass onto get_nearest(). Returns: A dictionary of key/value pairs extracted from all of the target file's associated JSON sidecars. Notes: A dictionary containing metadata extracted from all matching .json files is returned. In cases where the same key is found in multiple files, the values in files closer to the input filename will take precedence, per the inheritance rules in the BIDS specification. """ f = self.get_file(path) # For querying efficiency, store metadata in the MetadataIndex cache self.metadata_index.index_file(f.path) if include_entities: entities = f.entities results = entities else: results = {} results.update(self.metadata_index.file_index[path]) return results
python
def get_metadata(self, path, include_entities=False, **kwargs): """Return metadata found in JSON sidecars for the specified file. Args: path (str): Path to the file to get metadata for. include_entities (bool): If True, all available entities extracted from the filename (rather than JSON sidecars) are included in the returned metadata dictionary. kwargs (dict): Optional keyword arguments to pass onto get_nearest(). Returns: A dictionary of key/value pairs extracted from all of the target file's associated JSON sidecars. Notes: A dictionary containing metadata extracted from all matching .json files is returned. In cases where the same key is found in multiple files, the values in files closer to the input filename will take precedence, per the inheritance rules in the BIDS specification. """ f = self.get_file(path) # For querying efficiency, store metadata in the MetadataIndex cache self.metadata_index.index_file(f.path) if include_entities: entities = f.entities results = entities else: results = {} results.update(self.metadata_index.file_index[path]) return results
[ "def", "get_metadata", "(", "self", ",", "path", ",", "include_entities", "=", "False", ",", "*", "*", "kwargs", ")", ":", "f", "=", "self", ".", "get_file", "(", "path", ")", "# For querying efficiency, store metadata in the MetadataIndex cache", "self", ".", "metadata_index", ".", "index_file", "(", "f", ".", "path", ")", "if", "include_entities", ":", "entities", "=", "f", ".", "entities", "results", "=", "entities", "else", ":", "results", "=", "{", "}", "results", ".", "update", "(", "self", ".", "metadata_index", ".", "file_index", "[", "path", "]", ")", "return", "results" ]
Return metadata found in JSON sidecars for the specified file. Args: path (str): Path to the file to get metadata for. include_entities (bool): If True, all available entities extracted from the filename (rather than JSON sidecars) are included in the returned metadata dictionary. kwargs (dict): Optional keyword arguments to pass onto get_nearest(). Returns: A dictionary of key/value pairs extracted from all of the target file's associated JSON sidecars. Notes: A dictionary containing metadata extracted from all matching .json files is returned. In cases where the same key is found in multiple files, the values in files closer to the input filename will take precedence, per the inheritance rules in the BIDS specification.
[ "Return", "metadata", "found", "in", "JSON", "sidecars", "for", "the", "specified", "file", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L650-L684
17,665
bids-standard/pybids
bids/layout/layout.py
BIDSLayout.get_bval
def get_bval(self, path, **kwargs): """ Get bval file for passed path. """ result = self.get_nearest(path, extensions='bval', suffix='dwi', all_=True, **kwargs) return listify(result)[0]
python
def get_bval(self, path, **kwargs): """ Get bval file for passed path. """ result = self.get_nearest(path, extensions='bval', suffix='dwi', all_=True, **kwargs) return listify(result)[0]
[ "def", "get_bval", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "get_nearest", "(", "path", ",", "extensions", "=", "'bval'", ",", "suffix", "=", "'dwi'", ",", "all_", "=", "True", ",", "*", "*", "kwargs", ")", "return", "listify", "(", "result", ")", "[", "0", "]" ]
Get bval file for passed path.
[ "Get", "bval", "file", "for", "passed", "path", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L794-L798
17,666
bids-standard/pybids
bids/layout/layout.py
BIDSLayout.copy_files
def copy_files(self, files=None, path_patterns=None, symbolic_links=True, root=None, conflicts='fail', **kwargs): """ Copies one or more BIDSFiles to new locations defined by each BIDSFile's entities and the specified path_patterns. Args: files (list): Optional list of BIDSFile objects to write out. If none provided, use files from running a get() query using remaining **kwargs. path_patterns (str, list): Write patterns to pass to each file's write_file method. symbolic_links (bool): Whether to copy each file as a symbolic link or a deep copy. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 kwargs (kwargs): Optional key word arguments to pass into a get() query. """ _files = self.get(return_type='objects', **kwargs) if files: _files = list(set(files).intersection(_files)) for f in _files: f.copy(path_patterns, symbolic_link=symbolic_links, root=self.root, conflicts=conflicts)
python
def copy_files(self, files=None, path_patterns=None, symbolic_links=True, root=None, conflicts='fail', **kwargs): """ Copies one or more BIDSFiles to new locations defined by each BIDSFile's entities and the specified path_patterns. Args: files (list): Optional list of BIDSFile objects to write out. If none provided, use files from running a get() query using remaining **kwargs. path_patterns (str, list): Write patterns to pass to each file's write_file method. symbolic_links (bool): Whether to copy each file as a symbolic link or a deep copy. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 kwargs (kwargs): Optional key word arguments to pass into a get() query. """ _files = self.get(return_type='objects', **kwargs) if files: _files = list(set(files).intersection(_files)) for f in _files: f.copy(path_patterns, symbolic_link=symbolic_links, root=self.root, conflicts=conflicts)
[ "def", "copy_files", "(", "self", ",", "files", "=", "None", ",", "path_patterns", "=", "None", ",", "symbolic_links", "=", "True", ",", "root", "=", "None", ",", "conflicts", "=", "'fail'", ",", "*", "*", "kwargs", ")", ":", "_files", "=", "self", ".", "get", "(", "return_type", "=", "'objects'", ",", "*", "*", "kwargs", ")", "if", "files", ":", "_files", "=", "list", "(", "set", "(", "files", ")", ".", "intersection", "(", "_files", ")", ")", "for", "f", "in", "_files", ":", "f", ".", "copy", "(", "path_patterns", ",", "symbolic_link", "=", "symbolic_links", ",", "root", "=", "self", ".", "root", ",", "conflicts", "=", "conflicts", ")" ]
Copies one or more BIDSFiles to new locations defined by each BIDSFile's entities and the specified path_patterns. Args: files (list): Optional list of BIDSFile objects to write out. If none provided, use files from running a get() query using remaining **kwargs. path_patterns (str, list): Write patterns to pass to each file's write_file method. symbolic_links (bool): Whether to copy each file as a symbolic link or a deep copy. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 kwargs (kwargs): Optional key word arguments to pass into a get() query.
[ "Copies", "one", "or", "more", "BIDSFiles", "to", "new", "locations", "defined", "by", "each", "BIDSFile", "s", "entities", "and", "the", "specified", "path_patterns", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L950-L981
17,667
bids-standard/pybids
bids/layout/layout.py
MetadataIndex.index_file
def index_file(self, f, overwrite=False): """Index metadata for the specified file. Args: f (BIDSFile, str): A BIDSFile or path to an indexed file. overwrite (bool): If True, forces reindexing of the file even if an entry already exists. """ if isinstance(f, six.string_types): f = self.layout.get_file(f) if f.path in self.file_index and not overwrite: return if 'suffix' not in f.entities: # Skip files without suffixes return md = self._get_metadata(f.path) for md_key, md_val in md.items(): if md_key not in self.key_index: self.key_index[md_key] = {} self.key_index[md_key][f.path] = md_val self.file_index[f.path][md_key] = md_val
python
def index_file(self, f, overwrite=False): """Index metadata for the specified file. Args: f (BIDSFile, str): A BIDSFile or path to an indexed file. overwrite (bool): If True, forces reindexing of the file even if an entry already exists. """ if isinstance(f, six.string_types): f = self.layout.get_file(f) if f.path in self.file_index and not overwrite: return if 'suffix' not in f.entities: # Skip files without suffixes return md = self._get_metadata(f.path) for md_key, md_val in md.items(): if md_key not in self.key_index: self.key_index[md_key] = {} self.key_index[md_key][f.path] = md_val self.file_index[f.path][md_key] = md_val
[ "def", "index_file", "(", "self", ",", "f", ",", "overwrite", "=", "False", ")", ":", "if", "isinstance", "(", "f", ",", "six", ".", "string_types", ")", ":", "f", "=", "self", ".", "layout", ".", "get_file", "(", "f", ")", "if", "f", ".", "path", "in", "self", ".", "file_index", "and", "not", "overwrite", ":", "return", "if", "'suffix'", "not", "in", "f", ".", "entities", ":", "# Skip files without suffixes", "return", "md", "=", "self", ".", "_get_metadata", "(", "f", ".", "path", ")", "for", "md_key", ",", "md_val", "in", "md", ".", "items", "(", ")", ":", "if", "md_key", "not", "in", "self", ".", "key_index", ":", "self", ".", "key_index", "[", "md_key", "]", "=", "{", "}", "self", ".", "key_index", "[", "md_key", "]", "[", "f", ".", "path", "]", "=", "md_val", "self", ".", "file_index", "[", "f", ".", "path", "]", "[", "md_key", "]", "=", "md_val" ]
Index metadata for the specified file. Args: f (BIDSFile, str): A BIDSFile or path to an indexed file. overwrite (bool): If True, forces reindexing of the file even if an entry already exists.
[ "Index", "metadata", "for", "the", "specified", "file", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L1036-L1059
17,668
bids-standard/pybids
bids/layout/layout.py
MetadataIndex.search
def search(self, files=None, defined_fields=None, **kwargs): """Search files in the layout by metadata fields. Args: files (list): Optional list of names of files to search. If None, all files in the layout are scanned. defined_fields (list): Optional list of names of fields that must be defined in the JSON sidecar in order to consider the file a match, but which don't need to match any particular value. kwargs: Optional keyword arguments defining search constraints; keys are names of metadata fields, and values are the values to match those fields against (e.g., SliceTiming=0.017) would return all files that have a SliceTiming value of 0.071 in metadata. Returns: A list of filenames that match all constraints. """ if defined_fields is None: defined_fields = [] all_keys = set(defined_fields) | set(kwargs.keys()) if not all_keys: raise ValueError("At least one field to search on must be passed.") # If no list of files is passed, use all files in layout if files is None: files = set(self.layout.files.keys()) # Index metadata for any previously unseen files for f in files: self.index_file(f) # Get file intersection of all kwargs keys--this is fast filesets = [set(self.key_index.get(k, [])) for k in all_keys] matches = reduce(lambda x, y: x & y, filesets) if files is not None: matches &= set(files) if not matches: return [] def check_matches(f, key, val): if isinstance(val, six.string_types) and '*' in val: val = ('^%s$' % val).replace('*', ".*") return re.search(str(self.file_index[f][key]), val) is not None else: return val == self.file_index[f][key] # Serially check matches against each pattern, with early termination for k, val in kwargs.items(): matches = list(filter(lambda x: check_matches(x, k, val), matches)) if not matches: return [] return matches
python
def search(self, files=None, defined_fields=None, **kwargs): """Search files in the layout by metadata fields. Args: files (list): Optional list of names of files to search. If None, all files in the layout are scanned. defined_fields (list): Optional list of names of fields that must be defined in the JSON sidecar in order to consider the file a match, but which don't need to match any particular value. kwargs: Optional keyword arguments defining search constraints; keys are names of metadata fields, and values are the values to match those fields against (e.g., SliceTiming=0.017) would return all files that have a SliceTiming value of 0.071 in metadata. Returns: A list of filenames that match all constraints. """ if defined_fields is None: defined_fields = [] all_keys = set(defined_fields) | set(kwargs.keys()) if not all_keys: raise ValueError("At least one field to search on must be passed.") # If no list of files is passed, use all files in layout if files is None: files = set(self.layout.files.keys()) # Index metadata for any previously unseen files for f in files: self.index_file(f) # Get file intersection of all kwargs keys--this is fast filesets = [set(self.key_index.get(k, [])) for k in all_keys] matches = reduce(lambda x, y: x & y, filesets) if files is not None: matches &= set(files) if not matches: return [] def check_matches(f, key, val): if isinstance(val, six.string_types) and '*' in val: val = ('^%s$' % val).replace('*', ".*") return re.search(str(self.file_index[f][key]), val) is not None else: return val == self.file_index[f][key] # Serially check matches against each pattern, with early termination for k, val in kwargs.items(): matches = list(filter(lambda x: check_matches(x, k, val), matches)) if not matches: return [] return matches
[ "def", "search", "(", "self", ",", "files", "=", "None", ",", "defined_fields", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "defined_fields", "is", "None", ":", "defined_fields", "=", "[", "]", "all_keys", "=", "set", "(", "defined_fields", ")", "|", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "if", "not", "all_keys", ":", "raise", "ValueError", "(", "\"At least one field to search on must be passed.\"", ")", "# If no list of files is passed, use all files in layout", "if", "files", "is", "None", ":", "files", "=", "set", "(", "self", ".", "layout", ".", "files", ".", "keys", "(", ")", ")", "# Index metadata for any previously unseen files", "for", "f", "in", "files", ":", "self", ".", "index_file", "(", "f", ")", "# Get file intersection of all kwargs keys--this is fast", "filesets", "=", "[", "set", "(", "self", ".", "key_index", ".", "get", "(", "k", ",", "[", "]", ")", ")", "for", "k", "in", "all_keys", "]", "matches", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "&", "y", ",", "filesets", ")", "if", "files", "is", "not", "None", ":", "matches", "&=", "set", "(", "files", ")", "if", "not", "matches", ":", "return", "[", "]", "def", "check_matches", "(", "f", ",", "key", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", "and", "'*'", "in", "val", ":", "val", "=", "(", "'^%s$'", "%", "val", ")", ".", "replace", "(", "'*'", ",", "\".*\"", ")", "return", "re", ".", "search", "(", "str", "(", "self", ".", "file_index", "[", "f", "]", "[", "key", "]", ")", ",", "val", ")", "is", "not", "None", "else", ":", "return", "val", "==", "self", ".", "file_index", "[", "f", "]", "[", "key", "]", "# Serially check matches against each pattern, with early termination", "for", "k", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "matches", "=", "list", "(", "filter", "(", "lambda", "x", ":", "check_matches", "(", "x", ",", "k", ",", "val", ")", ",", "matches", ")", ")", "if", "not", "matches", ":", "return", "[", "]", "return", "matches" ]
Search files in the layout by metadata fields. Args: files (list): Optional list of names of files to search. If None, all files in the layout are scanned. defined_fields (list): Optional list of names of fields that must be defined in the JSON sidecar in order to consider the file a match, but which don't need to match any particular value. kwargs: Optional keyword arguments defining search constraints; keys are names of metadata fields, and values are the values to match those fields against (e.g., SliceTiming=0.017) would return all files that have a SliceTiming value of 0.071 in metadata. Returns: A list of filenames that match all constraints.
[ "Search", "files", "in", "the", "layout", "by", "metadata", "fields", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L1080-L1136
17,669
bids-standard/pybids
bids/analysis/auto_model.py
auto_model
def auto_model(layout, scan_length=None, one_vs_rest=False): '''Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses t-tests at each other level present to aggregate these results up. Args: layout (BIDSLayout) A BIDSLayout instance scan_length (Int) Scan length for loading event varibles in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest (Bool) Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns: models (list) list of model dictionaries for each task ''' base_name = split(layout.root)[-1] tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: # Populate model meta-data model = OrderedDict() model["Name"] = "_".join([base_name, task_name]) model["Description"] = ("Autogenerated model for the %s task from %s" % (task_name, base_name)) model["Input"] = {"Task": task_name} steps = [] # Make run level block transformations = OrderedDict(Name='Factor', Input=['trial_type']) run = OrderedDict(Level='Run', Name='Run', Transformations=[transformations]) # Get trial types run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = ["trial_type." + tt for tt in trial_types] # Add HRF run['Transformations'].append( OrderedDict(Name='Convolve', Input=trial_type_factors)) run_model = OrderedDict(X=trial_type_factors) run["Model"] = run_model if one_vs_rest: # if there are multiple trial types, build contrasts contrasts = [] for i, tt in enumerate(trial_types): cdict = OrderedDict() if len(trial_types) > 1: cdict["Name"] = "run_" + tt + "_vs_others" else: cdict["Name"] = "run_" + tt cdict["ConditionList"] = trial_type_factors # Calculate weights for contrast weights = np.ones(len(trial_types)) try: weights[trial_types != tt] = -1.0 / (len(trial_types) - 1) except ZeroDivisionError: pass cdict["Weights"] = list(weights) cdict["Type"] = "t" contrasts.append(cdict) run["Contrasts"] = contrasts steps.append(run) if one_vs_rest: # if there are multiple sessions, t-test run level contrasts at # session level sessions = layout.get_sessions() if len(sessions) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]] steps.append(_make_passthrough_contrast("Session", contrast_names)) subjects = layout.get_subjects() if len(subjects) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]] steps.append(_make_passthrough_contrast("Subject", contrast_names)) # get contrasts names from previous block contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]] steps.append(_make_passthrough_contrast("Dataset", contrast_names)) model["Steps"] = steps task_models.append(model) return task_models
python
def auto_model(layout, scan_length=None, one_vs_rest=False): '''Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses t-tests at each other level present to aggregate these results up. Args: layout (BIDSLayout) A BIDSLayout instance scan_length (Int) Scan length for loading event varibles in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest (Bool) Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns: models (list) list of model dictionaries for each task ''' base_name = split(layout.root)[-1] tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: # Populate model meta-data model = OrderedDict() model["Name"] = "_".join([base_name, task_name]) model["Description"] = ("Autogenerated model for the %s task from %s" % (task_name, base_name)) model["Input"] = {"Task": task_name} steps = [] # Make run level block transformations = OrderedDict(Name='Factor', Input=['trial_type']) run = OrderedDict(Level='Run', Name='Run', Transformations=[transformations]) # Get trial types run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = ["trial_type." + tt for tt in trial_types] # Add HRF run['Transformations'].append( OrderedDict(Name='Convolve', Input=trial_type_factors)) run_model = OrderedDict(X=trial_type_factors) run["Model"] = run_model if one_vs_rest: # if there are multiple trial types, build contrasts contrasts = [] for i, tt in enumerate(trial_types): cdict = OrderedDict() if len(trial_types) > 1: cdict["Name"] = "run_" + tt + "_vs_others" else: cdict["Name"] = "run_" + tt cdict["ConditionList"] = trial_type_factors # Calculate weights for contrast weights = np.ones(len(trial_types)) try: weights[trial_types != tt] = -1.0 / (len(trial_types) - 1) except ZeroDivisionError: pass cdict["Weights"] = list(weights) cdict["Type"] = "t" contrasts.append(cdict) run["Contrasts"] = contrasts steps.append(run) if one_vs_rest: # if there are multiple sessions, t-test run level contrasts at # session level sessions = layout.get_sessions() if len(sessions) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]] steps.append(_make_passthrough_contrast("Session", contrast_names)) subjects = layout.get_subjects() if len(subjects) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]] steps.append(_make_passthrough_contrast("Subject", contrast_names)) # get contrasts names from previous block contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]] steps.append(_make_passthrough_contrast("Dataset", contrast_names)) model["Steps"] = steps task_models.append(model) return task_models
[ "def", "auto_model", "(", "layout", ",", "scan_length", "=", "None", ",", "one_vs_rest", "=", "False", ")", ":", "base_name", "=", "split", "(", "layout", ".", "root", ")", "[", "-", "1", "]", "tasks", "=", "layout", ".", "entities", "[", "'task'", "]", ".", "unique", "(", ")", "task_models", "=", "[", "]", "for", "task_name", "in", "tasks", ":", "# Populate model meta-data", "model", "=", "OrderedDict", "(", ")", "model", "[", "\"Name\"", "]", "=", "\"_\"", ".", "join", "(", "[", "base_name", ",", "task_name", "]", ")", "model", "[", "\"Description\"", "]", "=", "(", "\"Autogenerated model for the %s task from %s\"", "%", "(", "task_name", ",", "base_name", ")", ")", "model", "[", "\"Input\"", "]", "=", "{", "\"Task\"", ":", "task_name", "}", "steps", "=", "[", "]", "# Make run level block", "transformations", "=", "OrderedDict", "(", "Name", "=", "'Factor'", ",", "Input", "=", "[", "'trial_type'", "]", ")", "run", "=", "OrderedDict", "(", "Level", "=", "'Run'", ",", "Name", "=", "'Run'", ",", "Transformations", "=", "[", "transformations", "]", ")", "# Get trial types", "run_nodes", "=", "load_variables", "(", "layout", ",", "task", "=", "task_name", ",", "levels", "=", "[", "'run'", "]", ",", "scan_length", "=", "scan_length", ")", "evs", "=", "[", "]", "for", "n", "in", "run_nodes", ".", "nodes", ":", "evs", ".", "extend", "(", "n", ".", "variables", "[", "'trial_type'", "]", ".", "values", ".", "values", ")", "trial_types", "=", "np", ".", "unique", "(", "evs", ")", "trial_type_factors", "=", "[", "\"trial_type.\"", "+", "tt", "for", "tt", "in", "trial_types", "]", "# Add HRF", "run", "[", "'Transformations'", "]", ".", "append", "(", "OrderedDict", "(", "Name", "=", "'Convolve'", ",", "Input", "=", "trial_type_factors", ")", ")", "run_model", "=", "OrderedDict", "(", "X", "=", "trial_type_factors", ")", "run", "[", "\"Model\"", "]", "=", "run_model", "if", "one_vs_rest", ":", "# if there are multiple trial types, build contrasts", "contrasts", "=", "[", "]", "for", "i", ",", "tt", "in", "enumerate", "(", "trial_types", ")", ":", "cdict", "=", "OrderedDict", "(", ")", "if", "len", "(", "trial_types", ")", ">", "1", ":", "cdict", "[", "\"Name\"", "]", "=", "\"run_\"", "+", "tt", "+", "\"_vs_others\"", "else", ":", "cdict", "[", "\"Name\"", "]", "=", "\"run_\"", "+", "tt", "cdict", "[", "\"ConditionList\"", "]", "=", "trial_type_factors", "# Calculate weights for contrast", "weights", "=", "np", ".", "ones", "(", "len", "(", "trial_types", ")", ")", "try", ":", "weights", "[", "trial_types", "!=", "tt", "]", "=", "-", "1.0", "/", "(", "len", "(", "trial_types", ")", "-", "1", ")", "except", "ZeroDivisionError", ":", "pass", "cdict", "[", "\"Weights\"", "]", "=", "list", "(", "weights", ")", "cdict", "[", "\"Type\"", "]", "=", "\"t\"", "contrasts", ".", "append", "(", "cdict", ")", "run", "[", "\"Contrasts\"", "]", "=", "contrasts", "steps", ".", "append", "(", "run", ")", "if", "one_vs_rest", ":", "# if there are multiple sessions, t-test run level contrasts at", "# session level", "sessions", "=", "layout", ".", "get_sessions", "(", ")", "if", "len", "(", "sessions", ")", ">", "1", ":", "# get contrasts names from previous block", "contrast_names", "=", "[", "cc", "[", "\"Name\"", "]", "for", "cc", "in", "steps", "[", "-", "1", "]", "[", "\"Contrasts\"", "]", "]", "steps", ".", "append", "(", "_make_passthrough_contrast", "(", "\"Session\"", ",", "contrast_names", ")", ")", "subjects", "=", "layout", ".", "get_subjects", "(", ")", "if", "len", "(", "subjects", ")", ">", "1", ":", "# get contrasts names from previous block", "contrast_names", "=", "[", "cc", "[", "\"Name\"", "]", "for", "cc", "in", "steps", "[", "-", "1", "]", "[", "\"Contrasts\"", "]", "]", "steps", ".", "append", "(", "_make_passthrough_contrast", "(", "\"Subject\"", ",", "contrast_names", ")", ")", "# get contrasts names from previous block", "contrast_names", "=", "[", "cc", "[", "\"Name\"", "]", "for", "cc", "in", "steps", "[", "-", "1", "]", "[", "\"Contrasts\"", "]", "]", "steps", ".", "append", "(", "_make_passthrough_contrast", "(", "\"Dataset\"", ",", "contrast_names", ")", ")", "model", "[", "\"Steps\"", "]", "=", "steps", "task_models", ".", "append", "(", "model", ")", "return", "task_models" ]
Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses t-tests at each other level present to aggregate these results up. Args: layout (BIDSLayout) A BIDSLayout instance scan_length (Int) Scan length for loading event varibles in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest (Bool) Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns: models (list) list of model dictionaries for each task
[ "Create", "a", "simple", "default", "model", "for", "each", "of", "the", "tasks", "in", "a", "BIDSLayout", ".", "Contrasts", "each", "trial", "type", "against", "all", "other", "trial", "types", "and", "trial", "types", "at", "the", "run", "level", "and", "then", "uses", "t", "-", "tests", "at", "each", "other", "level", "present", "to", "aggregate", "these", "results", "up", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/auto_model.py#L19-L122
17,670
bids-standard/pybids
bids/variables/variables.py
SimpleVariable.split
def split(self, grouper): ''' Split the current SparseRunVariable into multiple columns. Args: grouper (iterable): list to groupby, where each unique value will be taken as the name of the resulting column. Returns: A list of SparseRunVariables, one per unique value in the grouper. ''' data = self.to_df(condition=True, entities=True) data = data.drop('condition', axis=1) subsets = [] for i, (name, g) in enumerate(data.groupby(grouper)): name = '%s.%s' % (self.name, name) col = self.__class__(name=name, data=g, source=self.source, run_info=getattr(self, 'run_info', None)) subsets.append(col) return subsets
python
def split(self, grouper): ''' Split the current SparseRunVariable into multiple columns. Args: grouper (iterable): list to groupby, where each unique value will be taken as the name of the resulting column. Returns: A list of SparseRunVariables, one per unique value in the grouper. ''' data = self.to_df(condition=True, entities=True) data = data.drop('condition', axis=1) subsets = [] for i, (name, g) in enumerate(data.groupby(grouper)): name = '%s.%s' % (self.name, name) col = self.__class__(name=name, data=g, source=self.source, run_info=getattr(self, 'run_info', None)) subsets.append(col) return subsets
[ "def", "split", "(", "self", ",", "grouper", ")", ":", "data", "=", "self", ".", "to_df", "(", "condition", "=", "True", ",", "entities", "=", "True", ")", "data", "=", "data", ".", "drop", "(", "'condition'", ",", "axis", "=", "1", ")", "subsets", "=", "[", "]", "for", "i", ",", "(", "name", ",", "g", ")", "in", "enumerate", "(", "data", ".", "groupby", "(", "grouper", ")", ")", ":", "name", "=", "'%s.%s'", "%", "(", "self", ".", "name", ",", "name", ")", "col", "=", "self", ".", "__class__", "(", "name", "=", "name", ",", "data", "=", "g", ",", "source", "=", "self", ".", "source", ",", "run_info", "=", "getattr", "(", "self", ",", "'run_info'", ",", "None", ")", ")", "subsets", ".", "append", "(", "col", ")", "return", "subsets" ]
Split the current SparseRunVariable into multiple columns. Args: grouper (iterable): list to groupby, where each unique value will be taken as the name of the resulting column. Returns: A list of SparseRunVariables, one per unique value in the grouper.
[ "Split", "the", "current", "SparseRunVariable", "into", "multiple", "columns", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L240-L260
17,671
bids-standard/pybids
bids/variables/variables.py
SimpleVariable.select_rows
def select_rows(self, rows): ''' Truncate internal arrays to keep only the specified rows. Args: rows (array): An integer or boolean array identifying the indices of rows to keep. ''' self.values = self.values.iloc[rows] self.index = self.index.iloc[rows, :] for prop in self._property_columns: vals = getattr(self, prop)[rows] setattr(self, prop, vals)
python
def select_rows(self, rows): ''' Truncate internal arrays to keep only the specified rows. Args: rows (array): An integer or boolean array identifying the indices of rows to keep. ''' self.values = self.values.iloc[rows] self.index = self.index.iloc[rows, :] for prop in self._property_columns: vals = getattr(self, prop)[rows] setattr(self, prop, vals)
[ "def", "select_rows", "(", "self", ",", "rows", ")", ":", "self", ".", "values", "=", "self", ".", "values", ".", "iloc", "[", "rows", "]", "self", ".", "index", "=", "self", ".", "index", ".", "iloc", "[", "rows", ",", ":", "]", "for", "prop", "in", "self", ".", "_property_columns", ":", "vals", "=", "getattr", "(", "self", ",", "prop", ")", "[", "rows", "]", "setattr", "(", "self", ",", "prop", ",", "vals", ")" ]
Truncate internal arrays to keep only the specified rows. Args: rows (array): An integer or boolean array identifying the indices of rows to keep.
[ "Truncate", "internal", "arrays", "to", "keep", "only", "the", "specified", "rows", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L269-L280
17,672
bids-standard/pybids
bids/variables/variables.py
DenseRunVariable.split
def split(self, grouper): '''Split the current DenseRunVariable into multiple columns. Parameters ---------- grouper : :obj:`pandas.DataFrame` Binary DF specifying the design matrix to use for splitting. Number of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable`` will be generated for each column in the grouper. Returns ------- A list of DenseRunVariables, one per unique value in the grouper. ''' values = grouper.values * self.values.values df = pd.DataFrame(values, columns=grouper.columns) return [DenseRunVariable(name='%s.%s' % (self.name, name), values=df[name].values, run_info=self.run_info, source=self.source, sampling_rate=self.sampling_rate) for i, name in enumerate(df.columns)]
python
def split(self, grouper): '''Split the current DenseRunVariable into multiple columns. Parameters ---------- grouper : :obj:`pandas.DataFrame` Binary DF specifying the design matrix to use for splitting. Number of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable`` will be generated for each column in the grouper. Returns ------- A list of DenseRunVariables, one per unique value in the grouper. ''' values = grouper.values * self.values.values df = pd.DataFrame(values, columns=grouper.columns) return [DenseRunVariable(name='%s.%s' % (self.name, name), values=df[name].values, run_info=self.run_info, source=self.source, sampling_rate=self.sampling_rate) for i, name in enumerate(df.columns)]
[ "def", "split", "(", "self", ",", "grouper", ")", ":", "values", "=", "grouper", ".", "values", "*", "self", ".", "values", ".", "values", "df", "=", "pd", ".", "DataFrame", "(", "values", ",", "columns", "=", "grouper", ".", "columns", ")", "return", "[", "DenseRunVariable", "(", "name", "=", "'%s.%s'", "%", "(", "self", ".", "name", ",", "name", ")", ",", "values", "=", "df", "[", "name", "]", ".", "values", ",", "run_info", "=", "self", ".", "run_info", ",", "source", "=", "self", ".", "source", ",", "sampling_rate", "=", "self", ".", "sampling_rate", ")", "for", "i", ",", "name", "in", "enumerate", "(", "df", ".", "columns", ")", "]" ]
Split the current DenseRunVariable into multiple columns. Parameters ---------- grouper : :obj:`pandas.DataFrame` Binary DF specifying the design matrix to use for splitting. Number of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable`` will be generated for each column in the grouper. Returns ------- A list of DenseRunVariables, one per unique value in the grouper.
[ "Split", "the", "current", "DenseRunVariable", "into", "multiple", "columns", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L393-L414
17,673
bids-standard/pybids
bids/variables/variables.py
DenseRunVariable._build_entity_index
def _build_entity_index(self, run_info, sampling_rate): ''' Build the entity index from run information. ''' index = [] interval = int(round(1000. / sampling_rate)) _timestamps = [] for run in run_info: reps = int(math.ceil(run.duration * sampling_rate)) ent_vals = list(run.entities.values()) df = pd.DataFrame([ent_vals] * reps, columns=list(run.entities.keys())) ts = pd.date_range(0, periods=len(df), freq='%sms' % interval) _timestamps.append(ts.to_series()) index.append(df) self.timestamps = pd.concat(_timestamps, axis=0, sort=True) return pd.concat(index, axis=0, sort=True).reset_index(drop=True)
python
def _build_entity_index(self, run_info, sampling_rate): ''' Build the entity index from run information. ''' index = [] interval = int(round(1000. / sampling_rate)) _timestamps = [] for run in run_info: reps = int(math.ceil(run.duration * sampling_rate)) ent_vals = list(run.entities.values()) df = pd.DataFrame([ent_vals] * reps, columns=list(run.entities.keys())) ts = pd.date_range(0, periods=len(df), freq='%sms' % interval) _timestamps.append(ts.to_series()) index.append(df) self.timestamps = pd.concat(_timestamps, axis=0, sort=True) return pd.concat(index, axis=0, sort=True).reset_index(drop=True)
[ "def", "_build_entity_index", "(", "self", ",", "run_info", ",", "sampling_rate", ")", ":", "index", "=", "[", "]", "interval", "=", "int", "(", "round", "(", "1000.", "/", "sampling_rate", ")", ")", "_timestamps", "=", "[", "]", "for", "run", "in", "run_info", ":", "reps", "=", "int", "(", "math", ".", "ceil", "(", "run", ".", "duration", "*", "sampling_rate", ")", ")", "ent_vals", "=", "list", "(", "run", ".", "entities", ".", "values", "(", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "[", "ent_vals", "]", "*", "reps", ",", "columns", "=", "list", "(", "run", ".", "entities", ".", "keys", "(", ")", ")", ")", "ts", "=", "pd", ".", "date_range", "(", "0", ",", "periods", "=", "len", "(", "df", ")", ",", "freq", "=", "'%sms'", "%", "interval", ")", "_timestamps", ".", "append", "(", "ts", ".", "to_series", "(", ")", ")", "index", ".", "append", "(", "df", ")", "self", ".", "timestamps", "=", "pd", ".", "concat", "(", "_timestamps", ",", "axis", "=", "0", ",", "sort", "=", "True", ")", "return", "pd", ".", "concat", "(", "index", ",", "axis", "=", "0", ",", "sort", "=", "True", ")", ".", "reset_index", "(", "drop", "=", "True", ")" ]
Build the entity index from run information.
[ "Build", "the", "entity", "index", "from", "run", "information", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L416-L430
17,674
bids-standard/pybids
bids/variables/variables.py
DenseRunVariable.resample
def resample(self, sampling_rate, inplace=False, kind='linear'): '''Resample the Variable to the specified sampling rate. Parameters ---------- sampling_rate : :obj:`int`, :obj:`float` Target sampling rate (in Hz). inplace : :obj:`bool`, optional If True, performs resampling in-place. If False, returns a resampled copy of the current Variable. Default is False. kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates the kind of interpolation approach to use. See interp1d docs for valid values. Default is 'linear'. ''' if not inplace: var = self.clone() var.resample(sampling_rate, True, kind) return var if sampling_rate == self.sampling_rate: return old_sr = self.sampling_rate n = len(self.index) self.index = self._build_entity_index(self.run_info, sampling_rate) x = np.arange(n) num = len(self.index) from scipy.interpolate import interp1d f = interp1d(x, self.values.values.ravel(), kind=kind) x_new = np.linspace(0, n - 1, num=num) self.values = pd.DataFrame(f(x_new)) assert len(self.values) == len(self.index) self.sampling_rate = sampling_rate
python
def resample(self, sampling_rate, inplace=False, kind='linear'): '''Resample the Variable to the specified sampling rate. Parameters ---------- sampling_rate : :obj:`int`, :obj:`float` Target sampling rate (in Hz). inplace : :obj:`bool`, optional If True, performs resampling in-place. If False, returns a resampled copy of the current Variable. Default is False. kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates the kind of interpolation approach to use. See interp1d docs for valid values. Default is 'linear'. ''' if not inplace: var = self.clone() var.resample(sampling_rate, True, kind) return var if sampling_rate == self.sampling_rate: return old_sr = self.sampling_rate n = len(self.index) self.index = self._build_entity_index(self.run_info, sampling_rate) x = np.arange(n) num = len(self.index) from scipy.interpolate import interp1d f = interp1d(x, self.values.values.ravel(), kind=kind) x_new = np.linspace(0, n - 1, num=num) self.values = pd.DataFrame(f(x_new)) assert len(self.values) == len(self.index) self.sampling_rate = sampling_rate
[ "def", "resample", "(", "self", ",", "sampling_rate", ",", "inplace", "=", "False", ",", "kind", "=", "'linear'", ")", ":", "if", "not", "inplace", ":", "var", "=", "self", ".", "clone", "(", ")", "var", ".", "resample", "(", "sampling_rate", ",", "True", ",", "kind", ")", "return", "var", "if", "sampling_rate", "==", "self", ".", "sampling_rate", ":", "return", "old_sr", "=", "self", ".", "sampling_rate", "n", "=", "len", "(", "self", ".", "index", ")", "self", ".", "index", "=", "self", ".", "_build_entity_index", "(", "self", ".", "run_info", ",", "sampling_rate", ")", "x", "=", "np", ".", "arange", "(", "n", ")", "num", "=", "len", "(", "self", ".", "index", ")", "from", "scipy", ".", "interpolate", "import", "interp1d", "f", "=", "interp1d", "(", "x", ",", "self", ".", "values", ".", "values", ".", "ravel", "(", ")", ",", "kind", "=", "kind", ")", "x_new", "=", "np", ".", "linspace", "(", "0", ",", "n", "-", "1", ",", "num", "=", "num", ")", "self", ".", "values", "=", "pd", ".", "DataFrame", "(", "f", "(", "x_new", ")", ")", "assert", "len", "(", "self", ".", "values", ")", "==", "len", "(", "self", ".", "index", ")", "self", ".", "sampling_rate", "=", "sampling_rate" ]
Resample the Variable to the specified sampling rate. Parameters ---------- sampling_rate : :obj:`int`, :obj:`float` Target sampling rate (in Hz). inplace : :obj:`bool`, optional If True, performs resampling in-place. If False, returns a resampled copy of the current Variable. Default is False. kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates the kind of interpolation approach to use. See interp1d docs for valid values. Default is 'linear'.
[ "Resample", "the", "Variable", "to", "the", "specified", "sampling", "rate", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L432-L469
17,675
bids-standard/pybids
bids/variables/variables.py
DenseRunVariable.to_df
def to_df(self, condition=True, entities=True, timing=True, sampling_rate=None): '''Convert to a DataFrame, with columns for name and entities. Parameters ---------- condition : :obj:`bool` If True, adds a column for condition name, and names the amplitude column 'amplitude'. If False, returns just onset, duration, and amplitude, and gives the amplitude column the current column name. entities : :obj:`bool` If True, adds extra columns for all entities. timing : :obj:`bool` If True, includes onset and duration columns (even though events are sampled uniformly). If False, omits them. ''' if sampling_rate not in (None, self.sampling_rate): return self.resample(sampling_rate).to_df(condition, entities) df = super(DenseRunVariable, self).to_df(condition, entities) if timing: df['onset'] = self.timestamps.values.astype(float) / 1e+9 df['duration'] = 1. / self.sampling_rate return df
python
def to_df(self, condition=True, entities=True, timing=True, sampling_rate=None): '''Convert to a DataFrame, with columns for name and entities. Parameters ---------- condition : :obj:`bool` If True, adds a column for condition name, and names the amplitude column 'amplitude'. If False, returns just onset, duration, and amplitude, and gives the amplitude column the current column name. entities : :obj:`bool` If True, adds extra columns for all entities. timing : :obj:`bool` If True, includes onset and duration columns (even though events are sampled uniformly). If False, omits them. ''' if sampling_rate not in (None, self.sampling_rate): return self.resample(sampling_rate).to_df(condition, entities) df = super(DenseRunVariable, self).to_df(condition, entities) if timing: df['onset'] = self.timestamps.values.astype(float) / 1e+9 df['duration'] = 1. / self.sampling_rate return df
[ "def", "to_df", "(", "self", ",", "condition", "=", "True", ",", "entities", "=", "True", ",", "timing", "=", "True", ",", "sampling_rate", "=", "None", ")", ":", "if", "sampling_rate", "not", "in", "(", "None", ",", "self", ".", "sampling_rate", ")", ":", "return", "self", ".", "resample", "(", "sampling_rate", ")", ".", "to_df", "(", "condition", ",", "entities", ")", "df", "=", "super", "(", "DenseRunVariable", ",", "self", ")", ".", "to_df", "(", "condition", ",", "entities", ")", "if", "timing", ":", "df", "[", "'onset'", "]", "=", "self", ".", "timestamps", ".", "values", ".", "astype", "(", "float", ")", "/", "1e+9", "df", "[", "'duration'", "]", "=", "1.", "/", "self", ".", "sampling_rate", "return", "df" ]
Convert to a DataFrame, with columns for name and entities. Parameters ---------- condition : :obj:`bool` If True, adds a column for condition name, and names the amplitude column 'amplitude'. If False, returns just onset, duration, and amplitude, and gives the amplitude column the current column name. entities : :obj:`bool` If True, adds extra columns for all entities. timing : :obj:`bool` If True, includes onset and duration columns (even though events are sampled uniformly). If False, omits them.
[ "Convert", "to", "a", "DataFrame", "with", "columns", "for", "name", "and", "entities", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L471-L495
17,676
bids-standard/pybids
bids/variables/entities.py
NodeIndex.get_collections
def get_collections(self, unit, names=None, merge=False, sampling_rate=None, **entities): ''' Retrieve variable data for a specified level in the Dataset. Args: unit (str): The unit of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. names (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current unit. E.g., if unit='subject' and return_type= 'collection', variablesfrom all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If unit='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. entities: Optional constraints used to limit what gets returned. Returns: ''' nodes = self.get_nodes(unit, entities) var_sets = [] for n in nodes: var_set = list(n.variables.values()) var_set = [v for v in var_set if v.matches_entities(entities)] if names is not None: var_set = [v for v in var_set if v.name in names] # Additional filtering on Variables past run level, because their # contents are extracted from TSV files containing rows from # multiple observations if unit != 'run': var_set = [v.filter(entities) for v in var_set] var_sets.append(var_set) if merge: var_sets = [list(chain(*var_sets))] results = [] for vs in var_sets: if not vs: continue if unit == 'run': vs = clc.BIDSRunVariableCollection(vs, sampling_rate) else: vs = clc.BIDSVariableCollection(vs) results.append(vs) if merge: return results[0] if results else None return results
python
def get_collections(self, unit, names=None, merge=False, sampling_rate=None, **entities): ''' Retrieve variable data for a specified level in the Dataset. Args: unit (str): The unit of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. names (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current unit. E.g., if unit='subject' and return_type= 'collection', variablesfrom all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If unit='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. entities: Optional constraints used to limit what gets returned. Returns: ''' nodes = self.get_nodes(unit, entities) var_sets = [] for n in nodes: var_set = list(n.variables.values()) var_set = [v for v in var_set if v.matches_entities(entities)] if names is not None: var_set = [v for v in var_set if v.name in names] # Additional filtering on Variables past run level, because their # contents are extracted from TSV files containing rows from # multiple observations if unit != 'run': var_set = [v.filter(entities) for v in var_set] var_sets.append(var_set) if merge: var_sets = [list(chain(*var_sets))] results = [] for vs in var_sets: if not vs: continue if unit == 'run': vs = clc.BIDSRunVariableCollection(vs, sampling_rate) else: vs = clc.BIDSVariableCollection(vs) results.append(vs) if merge: return results[0] if results else None return results
[ "def", "get_collections", "(", "self", ",", "unit", ",", "names", "=", "None", ",", "merge", "=", "False", ",", "sampling_rate", "=", "None", ",", "*", "*", "entities", ")", ":", "nodes", "=", "self", ".", "get_nodes", "(", "unit", ",", "entities", ")", "var_sets", "=", "[", "]", "for", "n", "in", "nodes", ":", "var_set", "=", "list", "(", "n", ".", "variables", ".", "values", "(", ")", ")", "var_set", "=", "[", "v", "for", "v", "in", "var_set", "if", "v", ".", "matches_entities", "(", "entities", ")", "]", "if", "names", "is", "not", "None", ":", "var_set", "=", "[", "v", "for", "v", "in", "var_set", "if", "v", ".", "name", "in", "names", "]", "# Additional filtering on Variables past run level, because their", "# contents are extracted from TSV files containing rows from", "# multiple observations", "if", "unit", "!=", "'run'", ":", "var_set", "=", "[", "v", ".", "filter", "(", "entities", ")", "for", "v", "in", "var_set", "]", "var_sets", ".", "append", "(", "var_set", ")", "if", "merge", ":", "var_sets", "=", "[", "list", "(", "chain", "(", "*", "var_sets", ")", ")", "]", "results", "=", "[", "]", "for", "vs", "in", "var_sets", ":", "if", "not", "vs", ":", "continue", "if", "unit", "==", "'run'", ":", "vs", "=", "clc", ".", "BIDSRunVariableCollection", "(", "vs", ",", "sampling_rate", ")", "else", ":", "vs", "=", "clc", ".", "BIDSVariableCollection", "(", "vs", ")", "results", ".", "append", "(", "vs", ")", "if", "merge", ":", "return", "results", "[", "0", "]", "if", "results", "else", "None", "return", "results" ]
Retrieve variable data for a specified level in the Dataset. Args: unit (str): The unit of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. names (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current unit. E.g., if unit='subject' and return_type= 'collection', variablesfrom all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If unit='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. entities: Optional constraints used to limit what gets returned. Returns:
[ "Retrieve", "variable", "data", "for", "a", "specified", "level", "in", "the", "Dataset", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/entities.py#L65-L118
17,677
bids-standard/pybids
bids/variables/entities.py
NodeIndex.get_or_create_node
def get_or_create_node(self, level, entities, *args, **kwargs): ''' Retrieves a child Node based on the specified criteria, creating a new Node if necessary. Args: entities (dict): Dictionary of entities specifying which Node to return. args, kwargs: Optional positional or named arguments to pass onto class-specific initializers. These arguments are only used if a Node that matches the passed entities doesn't already exist, and a new one must be created. Returns: A Node instance. ''' result = self.get_nodes(level, entities) if result: if len(result) > 1: raise ValueError("More than one matching Node found! If you're" " expecting more than one Node, use " "get_nodes() instead of get_or_create_node()." ) return result[0] # Create Node if level == 'run': node = RunNode(entities, *args, **kwargs) else: node = Node(level, entities) entities = dict(entities, node_index=len(self.nodes), level=level) self.nodes.append(node) node_row = pd.Series(entities) self.index = self.index.append(node_row, ignore_index=True) return node
python
def get_or_create_node(self, level, entities, *args, **kwargs): ''' Retrieves a child Node based on the specified criteria, creating a new Node if necessary. Args: entities (dict): Dictionary of entities specifying which Node to return. args, kwargs: Optional positional or named arguments to pass onto class-specific initializers. These arguments are only used if a Node that matches the passed entities doesn't already exist, and a new one must be created. Returns: A Node instance. ''' result = self.get_nodes(level, entities) if result: if len(result) > 1: raise ValueError("More than one matching Node found! If you're" " expecting more than one Node, use " "get_nodes() instead of get_or_create_node()." ) return result[0] # Create Node if level == 'run': node = RunNode(entities, *args, **kwargs) else: node = Node(level, entities) entities = dict(entities, node_index=len(self.nodes), level=level) self.nodes.append(node) node_row = pd.Series(entities) self.index = self.index.append(node_row, ignore_index=True) return node
[ "def", "get_or_create_node", "(", "self", ",", "level", ",", "entities", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "get_nodes", "(", "level", ",", "entities", ")", "if", "result", ":", "if", "len", "(", "result", ")", ">", "1", ":", "raise", "ValueError", "(", "\"More than one matching Node found! If you're\"", "\" expecting more than one Node, use \"", "\"get_nodes() instead of get_or_create_node().\"", ")", "return", "result", "[", "0", "]", "# Create Node", "if", "level", "==", "'run'", ":", "node", "=", "RunNode", "(", "entities", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "node", "=", "Node", "(", "level", ",", "entities", ")", "entities", "=", "dict", "(", "entities", ",", "node_index", "=", "len", "(", "self", ".", "nodes", ")", ",", "level", "=", "level", ")", "self", ".", "nodes", ".", "append", "(", "node", ")", "node_row", "=", "pd", ".", "Series", "(", "entities", ")", "self", ".", "index", "=", "self", ".", "index", ".", "append", "(", "node_row", ",", "ignore_index", "=", "True", ")", "return", "node" ]
Retrieves a child Node based on the specified criteria, creating a new Node if necessary. Args: entities (dict): Dictionary of entities specifying which Node to return. args, kwargs: Optional positional or named arguments to pass onto class-specific initializers. These arguments are only used if a Node that matches the passed entities doesn't already exist, and a new one must be created. Returns: A Node instance.
[ "Retrieves", "a", "child", "Node", "based", "on", "the", "specified", "criteria", "creating", "a", "new", "Node", "if", "necessary", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/entities.py#L160-L198
17,678
bids-standard/pybids
bids/variables/kollekshuns.py
merge_collections
def merge_collections(collections, force_dense=False, sampling_rate='auto'): ''' Merge two or more collections at the same level of analysis. Args: collections (list): List of Collections to merge. sampling_rate (int, str): Sampling rate to use if it becomes necessary to resample DenseRunVariables. Either an integer or 'auto' (see merge_variables docstring for further explanation). Returns: A BIDSVariableCollection or BIDSRunVariableCollection, depending on the type of the input collections. ''' if len(listify(collections)) == 1: return collections levels = set([c.level for c in collections]) if len(levels) > 1: raise ValueError("At the moment, it's only possible to merge " "Collections at the same level of analysis. You " "passed collections at levels: %s." % levels) variables = list(chain(*[c.variables.values() for c in collections])) cls = collections[0].__class__ variables = cls.merge_variables(variables, sampling_rate=sampling_rate) if isinstance(collections[0], BIDSRunVariableCollection): if sampling_rate == 'auto': rates = [var.sampling_rate for var in variables if isinstance(var, DenseRunVariable)] sampling_rate = rates[0] if rates else None return cls(variables, sampling_rate) return cls(variables)
python
def merge_collections(collections, force_dense=False, sampling_rate='auto'): ''' Merge two or more collections at the same level of analysis. Args: collections (list): List of Collections to merge. sampling_rate (int, str): Sampling rate to use if it becomes necessary to resample DenseRunVariables. Either an integer or 'auto' (see merge_variables docstring for further explanation). Returns: A BIDSVariableCollection or BIDSRunVariableCollection, depending on the type of the input collections. ''' if len(listify(collections)) == 1: return collections levels = set([c.level for c in collections]) if len(levels) > 1: raise ValueError("At the moment, it's only possible to merge " "Collections at the same level of analysis. You " "passed collections at levels: %s." % levels) variables = list(chain(*[c.variables.values() for c in collections])) cls = collections[0].__class__ variables = cls.merge_variables(variables, sampling_rate=sampling_rate) if isinstance(collections[0], BIDSRunVariableCollection): if sampling_rate == 'auto': rates = [var.sampling_rate for var in variables if isinstance(var, DenseRunVariable)] sampling_rate = rates[0] if rates else None return cls(variables, sampling_rate) return cls(variables)
[ "def", "merge_collections", "(", "collections", ",", "force_dense", "=", "False", ",", "sampling_rate", "=", "'auto'", ")", ":", "if", "len", "(", "listify", "(", "collections", ")", ")", "==", "1", ":", "return", "collections", "levels", "=", "set", "(", "[", "c", ".", "level", "for", "c", "in", "collections", "]", ")", "if", "len", "(", "levels", ")", ">", "1", ":", "raise", "ValueError", "(", "\"At the moment, it's only possible to merge \"", "\"Collections at the same level of analysis. You \"", "\"passed collections at levels: %s.\"", "%", "levels", ")", "variables", "=", "list", "(", "chain", "(", "*", "[", "c", ".", "variables", ".", "values", "(", ")", "for", "c", "in", "collections", "]", ")", ")", "cls", "=", "collections", "[", "0", "]", ".", "__class__", "variables", "=", "cls", ".", "merge_variables", "(", "variables", ",", "sampling_rate", "=", "sampling_rate", ")", "if", "isinstance", "(", "collections", "[", "0", "]", ",", "BIDSRunVariableCollection", ")", ":", "if", "sampling_rate", "==", "'auto'", ":", "rates", "=", "[", "var", ".", "sampling_rate", "for", "var", "in", "variables", "if", "isinstance", "(", "var", ",", "DenseRunVariable", ")", "]", "sampling_rate", "=", "rates", "[", "0", "]", "if", "rates", "else", "None", "return", "cls", "(", "variables", ",", "sampling_rate", ")", "return", "cls", "(", "variables", ")" ]
Merge two or more collections at the same level of analysis. Args: collections (list): List of Collections to merge. sampling_rate (int, str): Sampling rate to use if it becomes necessary to resample DenseRunVariables. Either an integer or 'auto' (see merge_variables docstring for further explanation). Returns: A BIDSVariableCollection or BIDSRunVariableCollection, depending on the type of the input collections.
[ "Merge", "two", "or", "more", "collections", "at", "the", "same", "level", "of", "analysis", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L354-L390
17,679
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSVariableCollection.merge_variables
def merge_variables(variables, **kwargs): ''' Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables. ''' var_dict = OrderedDict() for v in variables: if v.name not in var_dict: var_dict[v.name] = [] var_dict[v.name].append(v) return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
python
def merge_variables(variables, **kwargs): ''' Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables. ''' var_dict = OrderedDict() for v in variables: if v.name not in var_dict: var_dict[v.name] = [] var_dict[v.name].append(v) return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
[ "def", "merge_variables", "(", "variables", ",", "*", "*", "kwargs", ")", ":", "var_dict", "=", "OrderedDict", "(", ")", "for", "v", "in", "variables", ":", "if", "v", ".", "name", "not", "in", "var_dict", ":", "var_dict", "[", "v", ".", "name", "]", "=", "[", "]", "var_dict", "[", "v", ".", "name", "]", ".", "append", "(", "v", ")", "return", "[", "merge_variables", "(", "vars_", ",", "*", "*", "kwargs", ")", "for", "vars_", "in", "list", "(", "var_dict", ".", "values", "(", ")", ")", "]" ]
Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables.
[ "Concatenates", "Variables", "along", "row", "axis", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L69-L86
17,680
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSVariableCollection.to_df
def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs): ''' Merge variables into a single pandas DataFrame. Args: variables (list): Optional list of column names to retain; if None, all variables are returned. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. fillna: Replace missing values with the specified value. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). Returns: A pandas DataFrame. ''' if variables is None: variables = list(self.variables.keys()) # Can receive already-selected Variables from sub-classes if not isinstance(variables[0], BIDSVariable): variables = [v for v in self.variables.values() if v.name in variables] dfs = [v.to_df(**kwargs) for v in variables] df = pd.concat(dfs, axis=0, sort=True) if format == 'long': return df.reset_index(drop=True).fillna(fillna) ind_cols = list(set(df.columns) - {'condition', 'amplitude'}) df['amplitude'] = df['amplitude'].fillna('n/a') df = df.pivot_table(index=ind_cols, columns='condition', values='amplitude', aggfunc='first') df = df.reset_index().replace('n/a', fillna) df.columns.name = None return df
python
def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs): ''' Merge variables into a single pandas DataFrame. Args: variables (list): Optional list of column names to retain; if None, all variables are returned. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. fillna: Replace missing values with the specified value. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). Returns: A pandas DataFrame. ''' if variables is None: variables = list(self.variables.keys()) # Can receive already-selected Variables from sub-classes if not isinstance(variables[0], BIDSVariable): variables = [v for v in self.variables.values() if v.name in variables] dfs = [v.to_df(**kwargs) for v in variables] df = pd.concat(dfs, axis=0, sort=True) if format == 'long': return df.reset_index(drop=True).fillna(fillna) ind_cols = list(set(df.columns) - {'condition', 'amplitude'}) df['amplitude'] = df['amplitude'].fillna('n/a') df = df.pivot_table(index=ind_cols, columns='condition', values='amplitude', aggfunc='first') df = df.reset_index().replace('n/a', fillna) df.columns.name = None return df
[ "def", "to_df", "(", "self", ",", "variables", "=", "None", ",", "format", "=", "'wide'", ",", "fillna", "=", "np", ".", "nan", ",", "*", "*", "kwargs", ")", ":", "if", "variables", "is", "None", ":", "variables", "=", "list", "(", "self", ".", "variables", ".", "keys", "(", ")", ")", "# Can receive already-selected Variables from sub-classes", "if", "not", "isinstance", "(", "variables", "[", "0", "]", ",", "BIDSVariable", ")", ":", "variables", "=", "[", "v", "for", "v", "in", "self", ".", "variables", ".", "values", "(", ")", "if", "v", ".", "name", "in", "variables", "]", "dfs", "=", "[", "v", ".", "to_df", "(", "*", "*", "kwargs", ")", "for", "v", "in", "variables", "]", "df", "=", "pd", ".", "concat", "(", "dfs", ",", "axis", "=", "0", ",", "sort", "=", "True", ")", "if", "format", "==", "'long'", ":", "return", "df", ".", "reset_index", "(", "drop", "=", "True", ")", ".", "fillna", "(", "fillna", ")", "ind_cols", "=", "list", "(", "set", "(", "df", ".", "columns", ")", "-", "{", "'condition'", ",", "'amplitude'", "}", ")", "df", "[", "'amplitude'", "]", "=", "df", "[", "'amplitude'", "]", ".", "fillna", "(", "'n/a'", ")", "df", "=", "df", ".", "pivot_table", "(", "index", "=", "ind_cols", ",", "columns", "=", "'condition'", ",", "values", "=", "'amplitude'", ",", "aggfunc", "=", "'first'", ")", "df", "=", "df", ".", "reset_index", "(", ")", ".", "replace", "(", "'n/a'", ",", "fillna", ")", "df", ".", "columns", ".", "name", "=", "None", "return", "df" ]
Merge variables into a single pandas DataFrame. Args: variables (list): Optional list of column names to retain; if None, all variables are returned. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. fillna: Replace missing values with the specified value. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). Returns: A pandas DataFrame.
[ "Merge", "variables", "into", "a", "single", "pandas", "DataFrame", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L88-L128
17,681
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSVariableCollection.from_df
def from_df(cls, data, entities=None, source='contrast'): ''' Create a Collection from a pandas DataFrame. Args: df (DataFrame): The DataFrame to convert to a Collection. Each column will be converted to a SimpleVariable. entities (DataFrame): An optional second DataFrame containing entity information. source (str): The value to set as the source for all Variables. Returns: A BIDSVariableCollection. ''' variables = [] for col in data.columns: _data = pd.DataFrame(data[col].values, columns=['amplitude']) if entities is not None: _data = pd.concat([_data, entities], axis=1, sort=True) variables.append(SimpleVariable(name=col, data=_data, source=source)) return BIDSVariableCollection(variables)
python
def from_df(cls, data, entities=None, source='contrast'): ''' Create a Collection from a pandas DataFrame. Args: df (DataFrame): The DataFrame to convert to a Collection. Each column will be converted to a SimpleVariable. entities (DataFrame): An optional second DataFrame containing entity information. source (str): The value to set as the source for all Variables. Returns: A BIDSVariableCollection. ''' variables = [] for col in data.columns: _data = pd.DataFrame(data[col].values, columns=['amplitude']) if entities is not None: _data = pd.concat([_data, entities], axis=1, sort=True) variables.append(SimpleVariable(name=col, data=_data, source=source)) return BIDSVariableCollection(variables)
[ "def", "from_df", "(", "cls", ",", "data", ",", "entities", "=", "None", ",", "source", "=", "'contrast'", ")", ":", "variables", "=", "[", "]", "for", "col", "in", "data", ".", "columns", ":", "_data", "=", "pd", ".", "DataFrame", "(", "data", "[", "col", "]", ".", "values", ",", "columns", "=", "[", "'amplitude'", "]", ")", "if", "entities", "is", "not", "None", ":", "_data", "=", "pd", ".", "concat", "(", "[", "_data", ",", "entities", "]", ",", "axis", "=", "1", ",", "sort", "=", "True", ")", "variables", ".", "append", "(", "SimpleVariable", "(", "name", "=", "col", ",", "data", "=", "_data", ",", "source", "=", "source", ")", ")", "return", "BIDSVariableCollection", "(", "variables", ")" ]
Create a Collection from a pandas DataFrame. Args: df (DataFrame): The DataFrame to convert to a Collection. Each column will be converted to a SimpleVariable. entities (DataFrame): An optional second DataFrame containing entity information. source (str): The value to set as the source for all Variables. Returns: A BIDSVariableCollection.
[ "Create", "a", "Collection", "from", "a", "pandas", "DataFrame", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L131-L150
17,682
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSVariableCollection.clone
def clone(self): ''' Returns a shallow copy of the current instance, except that all variables are deep-cloned. ''' clone = copy(self) clone.variables = {k: v.clone() for (k, v) in self.variables.items()} return clone
python
def clone(self): ''' Returns a shallow copy of the current instance, except that all variables are deep-cloned. ''' clone = copy(self) clone.variables = {k: v.clone() for (k, v) in self.variables.items()} return clone
[ "def", "clone", "(", "self", ")", ":", "clone", "=", "copy", "(", "self", ")", "clone", ".", "variables", "=", "{", "k", ":", "v", ".", "clone", "(", ")", "for", "(", "k", ",", "v", ")", "in", "self", ".", "variables", ".", "items", "(", ")", "}", "return", "clone" ]
Returns a shallow copy of the current instance, except that all variables are deep-cloned.
[ "Returns", "a", "shallow", "copy", "of", "the", "current", "instance", "except", "that", "all", "variables", "are", "deep", "-", "cloned", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L152-L158
17,683
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSVariableCollection._index_entities
def _index_entities(self): ''' Sets current instance's entities based on the existing index. Note: Only entity key/value pairs common to all rows in all contained Variables are returned. E.g., if a Collection contains Variables extracted from runs 1, 2 and 3 from subject '01', the returned dict will be {'subject': '01'}; the runs will be excluded as they vary across the Collection contents. ''' all_ents = pd.DataFrame.from_records( [v.entities for v in self.variables.values()]) constant = all_ents.apply(lambda x: x.nunique() == 1) if constant.empty: self.entities = {} else: keep = all_ents.columns[constant] ents = {k: all_ents[k].dropna().iloc[0] for k in keep} self.entities = {k: v for k, v in ents.items() if pd.notnull(v)}
python
def _index_entities(self): ''' Sets current instance's entities based on the existing index. Note: Only entity key/value pairs common to all rows in all contained Variables are returned. E.g., if a Collection contains Variables extracted from runs 1, 2 and 3 from subject '01', the returned dict will be {'subject': '01'}; the runs will be excluded as they vary across the Collection contents. ''' all_ents = pd.DataFrame.from_records( [v.entities for v in self.variables.values()]) constant = all_ents.apply(lambda x: x.nunique() == 1) if constant.empty: self.entities = {} else: keep = all_ents.columns[constant] ents = {k: all_ents[k].dropna().iloc[0] for k in keep} self.entities = {k: v for k, v in ents.items() if pd.notnull(v)}
[ "def", "_index_entities", "(", "self", ")", ":", "all_ents", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "[", "v", ".", "entities", "for", "v", "in", "self", ".", "variables", ".", "values", "(", ")", "]", ")", "constant", "=", "all_ents", ".", "apply", "(", "lambda", "x", ":", "x", ".", "nunique", "(", ")", "==", "1", ")", "if", "constant", ".", "empty", ":", "self", ".", "entities", "=", "{", "}", "else", ":", "keep", "=", "all_ents", ".", "columns", "[", "constant", "]", "ents", "=", "{", "k", ":", "all_ents", "[", "k", "]", ".", "dropna", "(", ")", ".", "iloc", "[", "0", "]", "for", "k", "in", "keep", "}", "self", ".", "entities", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "ents", ".", "items", "(", ")", "if", "pd", ".", "notnull", "(", "v", ")", "}" ]
Sets current instance's entities based on the existing index. Note: Only entity key/value pairs common to all rows in all contained Variables are returned. E.g., if a Collection contains Variables extracted from runs 1, 2 and 3 from subject '01', the returned dict will be {'subject': '01'}; the runs will be excluded as they vary across the Collection contents.
[ "Sets", "current", "instance", "s", "entities", "based", "on", "the", "existing", "index", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L164-L181
17,684
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSVariableCollection.match_variables
def match_variables(self, pattern, return_type='name'): ''' Return columns whose names match the provided regex pattern. Args: pattern (str): A regex pattern to match all variable names against. return_type (str): What to return. Must be one of: 'name': Returns a list of names of matching variables. 'variable': Returns a list of Variable objects whose names match. ''' pattern = re.compile(pattern) vars_ = [v for v in self.variables.values() if pattern.search(v.name)] return vars_ if return_type.startswith('var') \ else [v.name for v in vars_]
python
def match_variables(self, pattern, return_type='name'): ''' Return columns whose names match the provided regex pattern. Args: pattern (str): A regex pattern to match all variable names against. return_type (str): What to return. Must be one of: 'name': Returns a list of names of matching variables. 'variable': Returns a list of Variable objects whose names match. ''' pattern = re.compile(pattern) vars_ = [v for v in self.variables.values() if pattern.search(v.name)] return vars_ if return_type.startswith('var') \ else [v.name for v in vars_]
[ "def", "match_variables", "(", "self", ",", "pattern", ",", "return_type", "=", "'name'", ")", ":", "pattern", "=", "re", ".", "compile", "(", "pattern", ")", "vars_", "=", "[", "v", "for", "v", "in", "self", ".", "variables", ".", "values", "(", ")", "if", "pattern", ".", "search", "(", "v", ".", "name", ")", "]", "return", "vars_", "if", "return_type", ".", "startswith", "(", "'var'", ")", "else", "[", "v", ".", "name", "for", "v", "in", "vars_", "]" ]
Return columns whose names match the provided regex pattern. Args: pattern (str): A regex pattern to match all variable names against. return_type (str): What to return. Must be one of: 'name': Returns a list of names of matching variables. 'variable': Returns a list of Variable objects whose names match.
[ "Return", "columns", "whose", "names", "match", "the", "provided", "regex", "pattern", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L196-L209
17,685
bids-standard/pybids
bids/variables/kollekshuns.py
BIDSRunVariableCollection.to_df
def to_df(self, variables=None, format='wide', sparse=True, sampling_rate=None, include_sparse=True, include_dense=True, **kwargs): ''' Merge columns into a single pandas DataFrame. Args: variables (list): Optional list of variable names to retain; if None, all variables are written out. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. sparse (bool): If True, variables will be kept in a sparse format provided they are all internally represented as such. If False, a dense matrix (i.e., uniform sampling rate for all events) will be exported. Will be ignored if at least one variable is dense. sampling_rate (float): If a dense matrix is written out, the sampling rate (in Hz) to use for downsampling. Defaults to the value currently set in the instance. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame. ''' if not include_sparse and not include_dense: raise ValueError("You can't exclude both dense and sparse " "variables! That leaves nothing!") if variables is None: variables = list(self.variables.keys()) if not include_sparse: variables = [v for v in variables if isinstance(self.variables[v], DenseRunVariable)] if not include_dense: variables = [v for v in variables if not isinstance(self.variables[v], DenseRunVariable)] if not variables: return None _vars = [self.variables[v] for v in variables] if sparse and all(isinstance(v, SimpleVariable) for v in _vars): variables = _vars else: sampling_rate = sampling_rate or self.sampling_rate # Make sure all variables have the same sampling rate variables = list(self.resample(sampling_rate, variables, force_dense=True, in_place=False).values()) return super(BIDSRunVariableCollection, self).to_df(variables, format, **kwargs)
python
def to_df(self, variables=None, format='wide', sparse=True, sampling_rate=None, include_sparse=True, include_dense=True, **kwargs): ''' Merge columns into a single pandas DataFrame. Args: variables (list): Optional list of variable names to retain; if None, all variables are written out. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. sparse (bool): If True, variables will be kept in a sparse format provided they are all internally represented as such. If False, a dense matrix (i.e., uniform sampling rate for all events) will be exported. Will be ignored if at least one variable is dense. sampling_rate (float): If a dense matrix is written out, the sampling rate (in Hz) to use for downsampling. Defaults to the value currently set in the instance. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame. ''' if not include_sparse and not include_dense: raise ValueError("You can't exclude both dense and sparse " "variables! That leaves nothing!") if variables is None: variables = list(self.variables.keys()) if not include_sparse: variables = [v for v in variables if isinstance(self.variables[v], DenseRunVariable)] if not include_dense: variables = [v for v in variables if not isinstance(self.variables[v], DenseRunVariable)] if not variables: return None _vars = [self.variables[v] for v in variables] if sparse and all(isinstance(v, SimpleVariable) for v in _vars): variables = _vars else: sampling_rate = sampling_rate or self.sampling_rate # Make sure all variables have the same sampling rate variables = list(self.resample(sampling_rate, variables, force_dense=True, in_place=False).values()) return super(BIDSRunVariableCollection, self).to_df(variables, format, **kwargs)
[ "def", "to_df", "(", "self", ",", "variables", "=", "None", ",", "format", "=", "'wide'", ",", "sparse", "=", "True", ",", "sampling_rate", "=", "None", ",", "include_sparse", "=", "True", ",", "include_dense", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "not", "include_sparse", "and", "not", "include_dense", ":", "raise", "ValueError", "(", "\"You can't exclude both dense and sparse \"", "\"variables! That leaves nothing!\"", ")", "if", "variables", "is", "None", ":", "variables", "=", "list", "(", "self", ".", "variables", ".", "keys", "(", ")", ")", "if", "not", "include_sparse", ":", "variables", "=", "[", "v", "for", "v", "in", "variables", "if", "isinstance", "(", "self", ".", "variables", "[", "v", "]", ",", "DenseRunVariable", ")", "]", "if", "not", "include_dense", ":", "variables", "=", "[", "v", "for", "v", "in", "variables", "if", "not", "isinstance", "(", "self", ".", "variables", "[", "v", "]", ",", "DenseRunVariable", ")", "]", "if", "not", "variables", ":", "return", "None", "_vars", "=", "[", "self", ".", "variables", "[", "v", "]", "for", "v", "in", "variables", "]", "if", "sparse", "and", "all", "(", "isinstance", "(", "v", ",", "SimpleVariable", ")", "for", "v", "in", "_vars", ")", ":", "variables", "=", "_vars", "else", ":", "sampling_rate", "=", "sampling_rate", "or", "self", ".", "sampling_rate", "# Make sure all variables have the same sampling rate", "variables", "=", "list", "(", "self", ".", "resample", "(", "sampling_rate", ",", "variables", ",", "force_dense", "=", "True", ",", "in_place", "=", "False", ")", ".", "values", "(", ")", ")", "return", "super", "(", "BIDSRunVariableCollection", ",", "self", ")", ".", "to_df", "(", "variables", ",", "format", ",", "*", "*", "kwargs", ")" ]
Merge columns into a single pandas DataFrame. Args: variables (list): Optional list of variable names to retain; if None, all variables are written out. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. sparse (bool): If True, variables will be kept in a sparse format provided they are all internally represented as such. If False, a dense matrix (i.e., uniform sampling rate for all events) will be exported. Will be ignored if at least one variable is dense. sampling_rate (float): If a dense matrix is written out, the sampling rate (in Hz) to use for downsampling. Defaults to the value currently set in the instance. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame.
[ "Merge", "columns", "into", "a", "single", "pandas", "DataFrame", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L290-L351
17,686
bids-standard/pybids
bids/analysis/transformations/munge.py
Rename._transform
def _transform(self, var): ''' Rename happens automatically in the base class, so all we need to do is unset the original variable in the collection. ''' self.collection.variables.pop(var.name) return var.values
python
def _transform(self, var): ''' Rename happens automatically in the base class, so all we need to do is unset the original variable in the collection. ''' self.collection.variables.pop(var.name) return var.values
[ "def", "_transform", "(", "self", ",", "var", ")", ":", "self", ".", "collection", ".", "variables", ".", "pop", "(", "var", ".", "name", ")", "return", "var", ".", "values" ]
Rename happens automatically in the base class, so all we need to do is unset the original variable in the collection.
[ "Rename", "happens", "automatically", "in", "the", "base", "class", "so", "all", "we", "need", "to", "do", "is", "unset", "the", "original", "variable", "in", "the", "collection", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/transformations/munge.py#L215-L219
17,687
bids-standard/pybids
bids/layout/writing.py
replace_entities
def replace_entities(entities, pattern): """ Replaces all entity names in a given pattern with the corresponding values provided by entities. Args: entities (dict): A dictionary mapping entity names to entity values. pattern (str): A path pattern that contains entity names denoted by curly braces. Optional portions denoted by square braces. For example: 'sub-{subject}/[var-{name}/]{id}.csv' Accepted entity values, using regex matching, denoted within angle brackets. For example: 'sub-{subject<01|02>}/{task}.csv' Returns: A new string with the entity values inserted where entity names were denoted in the provided pattern. """ ents = re.findall(r'\{(.*?)\}', pattern) new_path = pattern for ent in ents: match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent) if match is None: return None name, valid, default = match.groups() default = default[1:] if default is not None else default if name in entities and valid is not None: ent_val = str(entities[name]) if not re.match(valid[1:-1], ent_val): if default is None: return None entities[name] = default ent_val = entities.get(name, default) if ent_val is None: return None new_path = new_path.replace('{%s}' % ent, str(ent_val)) return new_path
python
def replace_entities(entities, pattern): """ Replaces all entity names in a given pattern with the corresponding values provided by entities. Args: entities (dict): A dictionary mapping entity names to entity values. pattern (str): A path pattern that contains entity names denoted by curly braces. Optional portions denoted by square braces. For example: 'sub-{subject}/[var-{name}/]{id}.csv' Accepted entity values, using regex matching, denoted within angle brackets. For example: 'sub-{subject<01|02>}/{task}.csv' Returns: A new string with the entity values inserted where entity names were denoted in the provided pattern. """ ents = re.findall(r'\{(.*?)\}', pattern) new_path = pattern for ent in ents: match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent) if match is None: return None name, valid, default = match.groups() default = default[1:] if default is not None else default if name in entities and valid is not None: ent_val = str(entities[name]) if not re.match(valid[1:-1], ent_val): if default is None: return None entities[name] = default ent_val = entities.get(name, default) if ent_val is None: return None new_path = new_path.replace('{%s}' % ent, str(ent_val)) return new_path
[ "def", "replace_entities", "(", "entities", ",", "pattern", ")", ":", "ents", "=", "re", ".", "findall", "(", "r'\\{(.*?)\\}'", ",", "pattern", ")", "new_path", "=", "pattern", "for", "ent", "in", "ents", ":", "match", "=", "re", ".", "search", "(", "r'([^|<]+)(<.*?>)?(\\|.*)?'", ",", "ent", ")", "if", "match", "is", "None", ":", "return", "None", "name", ",", "valid", ",", "default", "=", "match", ".", "groups", "(", ")", "default", "=", "default", "[", "1", ":", "]", "if", "default", "is", "not", "None", "else", "default", "if", "name", "in", "entities", "and", "valid", "is", "not", "None", ":", "ent_val", "=", "str", "(", "entities", "[", "name", "]", ")", "if", "not", "re", ".", "match", "(", "valid", "[", "1", ":", "-", "1", "]", ",", "ent_val", ")", ":", "if", "default", "is", "None", ":", "return", "None", "entities", "[", "name", "]", "=", "default", "ent_val", "=", "entities", ".", "get", "(", "name", ",", "default", ")", "if", "ent_val", "is", "None", ":", "return", "None", "new_path", "=", "new_path", ".", "replace", "(", "'{%s}'", "%", "ent", ",", "str", "(", "ent_val", ")", ")", "return", "new_path" ]
Replaces all entity names in a given pattern with the corresponding values provided by entities. Args: entities (dict): A dictionary mapping entity names to entity values. pattern (str): A path pattern that contains entity names denoted by curly braces. Optional portions denoted by square braces. For example: 'sub-{subject}/[var-{name}/]{id}.csv' Accepted entity values, using regex matching, denoted within angle brackets. For example: 'sub-{subject<01|02>}/{task}.csv' Returns: A new string with the entity values inserted where entity names were denoted in the provided pattern.
[ "Replaces", "all", "entity", "names", "in", "a", "given", "pattern", "with", "the", "corresponding", "values", "provided", "by", "entities", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/writing.py#L16-L55
17,688
bids-standard/pybids
bids/layout/writing.py
write_contents_to_file
def write_contents_to_file(path, contents=None, link_to=None, content_mode='text', root=None, conflicts='fail'): """ Uses provided filename patterns to write contents to a new path, given a corresponding entity map. Args: path (str): Destination path of the desired contents. contents (str): Raw text or binary encoded string of contents to write to the new path. link_to (str): Optional path with which to create a symbolic link to. Used as an alternative to and takes priority over the contents argument. content_mode (str): Either 'text' or 'binary' to indicate the writing mode for the new file. Only relevant if contents is provided. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append' that defines the desired action when the output path already exists. 'fail' raises an exception; 'skip' does nothing; 'overwrite' overwrites the existing file; 'append' adds a suffix to each file copy, starting with 1. Default is 'fail'. """ if root is None and not isabs(path): root = os.getcwd() if root: path = join(root, path) if exists(path) or islink(path): if conflicts == 'fail': msg = 'A file at path {} already exists.' raise ValueError(msg.format(path)) elif conflicts == 'skip': msg = 'A file at path {} already exists, skipping writing file.' logging.warn(msg.format(path)) return elif conflicts == 'overwrite': if isdir(path): logging.warn('New path is a directory, not going to ' 'overwrite it, skipping instead.') return os.remove(path) elif conflicts == 'append': i = 1 while i < sys.maxsize: path_splits = splitext(path) path_splits[0] = path_splits[0] + '_%d' % i appended_filename = os.extsep.join(path_splits) if not exists(appended_filename) and \ not islink(appended_filename): path = appended_filename break i += 1 else: raise ValueError('Did not provide a valid conflicts parameter') if not exists(dirname(path)): os.makedirs(dirname(path)) if link_to: os.symlink(link_to, path) elif contents: mode = 'wb' if content_mode == 'binary' else 'w' with open(path, mode) as f: f.write(contents) else: raise ValueError('One of contents or link_to must be provided.')
python
def write_contents_to_file(path, contents=None, link_to=None, content_mode='text', root=None, conflicts='fail'): """ Uses provided filename patterns to write contents to a new path, given a corresponding entity map. Args: path (str): Destination path of the desired contents. contents (str): Raw text or binary encoded string of contents to write to the new path. link_to (str): Optional path with which to create a symbolic link to. Used as an alternative to and takes priority over the contents argument. content_mode (str): Either 'text' or 'binary' to indicate the writing mode for the new file. Only relevant if contents is provided. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append' that defines the desired action when the output path already exists. 'fail' raises an exception; 'skip' does nothing; 'overwrite' overwrites the existing file; 'append' adds a suffix to each file copy, starting with 1. Default is 'fail'. """ if root is None and not isabs(path): root = os.getcwd() if root: path = join(root, path) if exists(path) or islink(path): if conflicts == 'fail': msg = 'A file at path {} already exists.' raise ValueError(msg.format(path)) elif conflicts == 'skip': msg = 'A file at path {} already exists, skipping writing file.' logging.warn(msg.format(path)) return elif conflicts == 'overwrite': if isdir(path): logging.warn('New path is a directory, not going to ' 'overwrite it, skipping instead.') return os.remove(path) elif conflicts == 'append': i = 1 while i < sys.maxsize: path_splits = splitext(path) path_splits[0] = path_splits[0] + '_%d' % i appended_filename = os.extsep.join(path_splits) if not exists(appended_filename) and \ not islink(appended_filename): path = appended_filename break i += 1 else: raise ValueError('Did not provide a valid conflicts parameter') if not exists(dirname(path)): os.makedirs(dirname(path)) if link_to: os.symlink(link_to, path) elif contents: mode = 'wb' if content_mode == 'binary' else 'w' with open(path, mode) as f: f.write(contents) else: raise ValueError('One of contents or link_to must be provided.')
[ "def", "write_contents_to_file", "(", "path", ",", "contents", "=", "None", ",", "link_to", "=", "None", ",", "content_mode", "=", "'text'", ",", "root", "=", "None", ",", "conflicts", "=", "'fail'", ")", ":", "if", "root", "is", "None", "and", "not", "isabs", "(", "path", ")", ":", "root", "=", "os", ".", "getcwd", "(", ")", "if", "root", ":", "path", "=", "join", "(", "root", ",", "path", ")", "if", "exists", "(", "path", ")", "or", "islink", "(", "path", ")", ":", "if", "conflicts", "==", "'fail'", ":", "msg", "=", "'A file at path {} already exists.'", "raise", "ValueError", "(", "msg", ".", "format", "(", "path", ")", ")", "elif", "conflicts", "==", "'skip'", ":", "msg", "=", "'A file at path {} already exists, skipping writing file.'", "logging", ".", "warn", "(", "msg", ".", "format", "(", "path", ")", ")", "return", "elif", "conflicts", "==", "'overwrite'", ":", "if", "isdir", "(", "path", ")", ":", "logging", ".", "warn", "(", "'New path is a directory, not going to '", "'overwrite it, skipping instead.'", ")", "return", "os", ".", "remove", "(", "path", ")", "elif", "conflicts", "==", "'append'", ":", "i", "=", "1", "while", "i", "<", "sys", ".", "maxsize", ":", "path_splits", "=", "splitext", "(", "path", ")", "path_splits", "[", "0", "]", "=", "path_splits", "[", "0", "]", "+", "'_%d'", "%", "i", "appended_filename", "=", "os", ".", "extsep", ".", "join", "(", "path_splits", ")", "if", "not", "exists", "(", "appended_filename", ")", "and", "not", "islink", "(", "appended_filename", ")", ":", "path", "=", "appended_filename", "break", "i", "+=", "1", "else", ":", "raise", "ValueError", "(", "'Did not provide a valid conflicts parameter'", ")", "if", "not", "exists", "(", "dirname", "(", "path", ")", ")", ":", "os", ".", "makedirs", "(", "dirname", "(", "path", ")", ")", "if", "link_to", ":", "os", ".", "symlink", "(", "link_to", ",", "path", ")", "elif", "contents", ":", "mode", "=", "'wb'", "if", "content_mode", "==", "'binary'", "else", "'w'", "with", "open", "(", "path", ",", "mode", ")", "as", "f", ":", "f", ".", "write", "(", "contents", ")", "else", ":", "raise", "ValueError", "(", "'One of contents or link_to must be provided.'", ")" ]
Uses provided filename patterns to write contents to a new path, given a corresponding entity map. Args: path (str): Destination path of the desired contents. contents (str): Raw text or binary encoded string of contents to write to the new path. link_to (str): Optional path with which to create a symbolic link to. Used as an alternative to and takes priority over the contents argument. content_mode (str): Either 'text' or 'binary' to indicate the writing mode for the new file. Only relevant if contents is provided. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append' that defines the desired action when the output path already exists. 'fail' raises an exception; 'skip' does nothing; 'overwrite' overwrites the existing file; 'append' adds a suffix to each file copy, starting with 1. Default is 'fail'.
[ "Uses", "provided", "filename", "patterns", "to", "write", "contents", "to", "a", "new", "path", "given", "a", "corresponding", "entity", "map", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/writing.py#L109-L177
17,689
bids-standard/pybids
bids/reports/report.py
BIDSReport.generate
def generate(self, **kwargs): """Generate the methods section. Parameters ---------- task_converter : :obj:`dict`, optional A dictionary with information for converting task names from BIDS filename format to human-readable strings. Returns ------- counter : :obj:`collections.Counter` A dictionary of unique descriptions across subjects in the dataset, along with the number of times each pattern occurred. In cases where all subjects underwent the same protocol, the most common pattern is most likely the most complete. In cases where the dataset contains multiple protocols, each pattern will need to be inspected manually. Examples -------- >>> from os.path import join >>> from bids.layout import BIDSLayout >>> from bids.reports import BIDSReport >>> from bids.tests import get_test_data_path >>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic')) >>> report = BIDSReport(layout) >>> counter = report.generate(session='01') >>> counter.most_common()[0][0] """ descriptions = [] subjs = self.layout.get_subjects(**kwargs) kwargs = {k: v for k, v in kwargs.items() if k != 'subject'} for sid in subjs: descriptions.append(self._report_subject(subject=sid, **kwargs)) counter = Counter(descriptions) print('Number of patterns detected: {0}'.format(len(counter.keys()))) print(utils.reminder()) return counter
python
def generate(self, **kwargs): """Generate the methods section. Parameters ---------- task_converter : :obj:`dict`, optional A dictionary with information for converting task names from BIDS filename format to human-readable strings. Returns ------- counter : :obj:`collections.Counter` A dictionary of unique descriptions across subjects in the dataset, along with the number of times each pattern occurred. In cases where all subjects underwent the same protocol, the most common pattern is most likely the most complete. In cases where the dataset contains multiple protocols, each pattern will need to be inspected manually. Examples -------- >>> from os.path import join >>> from bids.layout import BIDSLayout >>> from bids.reports import BIDSReport >>> from bids.tests import get_test_data_path >>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic')) >>> report = BIDSReport(layout) >>> counter = report.generate(session='01') >>> counter.most_common()[0][0] """ descriptions = [] subjs = self.layout.get_subjects(**kwargs) kwargs = {k: v for k, v in kwargs.items() if k != 'subject'} for sid in subjs: descriptions.append(self._report_subject(subject=sid, **kwargs)) counter = Counter(descriptions) print('Number of patterns detected: {0}'.format(len(counter.keys()))) print(utils.reminder()) return counter
[ "def", "generate", "(", "self", ",", "*", "*", "kwargs", ")", ":", "descriptions", "=", "[", "]", "subjs", "=", "self", ".", "layout", ".", "get_subjects", "(", "*", "*", "kwargs", ")", "kwargs", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "!=", "'subject'", "}", "for", "sid", "in", "subjs", ":", "descriptions", ".", "append", "(", "self", ".", "_report_subject", "(", "subject", "=", "sid", ",", "*", "*", "kwargs", ")", ")", "counter", "=", "Counter", "(", "descriptions", ")", "print", "(", "'Number of patterns detected: {0}'", ".", "format", "(", "len", "(", "counter", ".", "keys", "(", ")", ")", ")", ")", "print", "(", "utils", ".", "reminder", "(", ")", ")", "return", "counter" ]
Generate the methods section. Parameters ---------- task_converter : :obj:`dict`, optional A dictionary with information for converting task names from BIDS filename format to human-readable strings. Returns ------- counter : :obj:`collections.Counter` A dictionary of unique descriptions across subjects in the dataset, along with the number of times each pattern occurred. In cases where all subjects underwent the same protocol, the most common pattern is most likely the most complete. In cases where the dataset contains multiple protocols, each pattern will need to be inspected manually. Examples -------- >>> from os.path import join >>> from bids.layout import BIDSLayout >>> from bids.reports import BIDSReport >>> from bids.tests import get_test_data_path >>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic')) >>> report = BIDSReport(layout) >>> counter = report.generate(session='01') >>> counter.most_common()[0][0]
[ "Generate", "the", "methods", "section", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/report.py#L53-L92
17,690
bids-standard/pybids
bids/reports/report.py
BIDSReport._report_subject
def _report_subject(self, subject, **kwargs): """Write a report for a single subject. Parameters ---------- subject : :obj:`str` Subject ID. Attributes ---------- layout : :obj:`bids.layout.BIDSLayout` Layout object for a BIDS dataset. config : :obj:`dict` Configuration info for methods generation. Returns ------- description : :obj:`str` A publication-ready report of the dataset's data acquisition information. Each scan type is given its own paragraph. """ description_list = [] # Remove sess from kwargs if provided, else set sess as all available sessions = kwargs.pop('session', self.layout.get_sessions(subject=subject, **kwargs)) if not sessions: sessions = [None] elif not isinstance(sessions, list): sessions = [sessions] for ses in sessions: niftis = self.layout.get(subject=subject, extensions='nii.gz', **kwargs) if niftis: description_list.append('For session {0}:'.format(ses)) description_list += parsing.parse_niftis(self.layout, niftis, subject, self.config, session=ses) metadata = self.layout.get_metadata(niftis[0].path) else: raise Exception('No niftis for subject {0}'.format(subject)) # Assume all data were converted the same way and use the last nifti # file's json for conversion information. if 'metadata' not in vars(): raise Exception('No valid jsons found. Cannot generate final ' 'paragraph.') description = '\n\t'.join(description_list) description = description.replace('\tFor session', '\nFor session') description += '\n\n{0}'.format(parsing.final_paragraph(metadata)) return description
python
def _report_subject(self, subject, **kwargs): """Write a report for a single subject. Parameters ---------- subject : :obj:`str` Subject ID. Attributes ---------- layout : :obj:`bids.layout.BIDSLayout` Layout object for a BIDS dataset. config : :obj:`dict` Configuration info for methods generation. Returns ------- description : :obj:`str` A publication-ready report of the dataset's data acquisition information. Each scan type is given its own paragraph. """ description_list = [] # Remove sess from kwargs if provided, else set sess as all available sessions = kwargs.pop('session', self.layout.get_sessions(subject=subject, **kwargs)) if not sessions: sessions = [None] elif not isinstance(sessions, list): sessions = [sessions] for ses in sessions: niftis = self.layout.get(subject=subject, extensions='nii.gz', **kwargs) if niftis: description_list.append('For session {0}:'.format(ses)) description_list += parsing.parse_niftis(self.layout, niftis, subject, self.config, session=ses) metadata = self.layout.get_metadata(niftis[0].path) else: raise Exception('No niftis for subject {0}'.format(subject)) # Assume all data were converted the same way and use the last nifti # file's json for conversion information. if 'metadata' not in vars(): raise Exception('No valid jsons found. Cannot generate final ' 'paragraph.') description = '\n\t'.join(description_list) description = description.replace('\tFor session', '\nFor session') description += '\n\n{0}'.format(parsing.final_paragraph(metadata)) return description
[ "def", "_report_subject", "(", "self", ",", "subject", ",", "*", "*", "kwargs", ")", ":", "description_list", "=", "[", "]", "# Remove sess from kwargs if provided, else set sess as all available", "sessions", "=", "kwargs", ".", "pop", "(", "'session'", ",", "self", ".", "layout", ".", "get_sessions", "(", "subject", "=", "subject", ",", "*", "*", "kwargs", ")", ")", "if", "not", "sessions", ":", "sessions", "=", "[", "None", "]", "elif", "not", "isinstance", "(", "sessions", ",", "list", ")", ":", "sessions", "=", "[", "sessions", "]", "for", "ses", "in", "sessions", ":", "niftis", "=", "self", ".", "layout", ".", "get", "(", "subject", "=", "subject", ",", "extensions", "=", "'nii.gz'", ",", "*", "*", "kwargs", ")", "if", "niftis", ":", "description_list", ".", "append", "(", "'For session {0}:'", ".", "format", "(", "ses", ")", ")", "description_list", "+=", "parsing", ".", "parse_niftis", "(", "self", ".", "layout", ",", "niftis", ",", "subject", ",", "self", ".", "config", ",", "session", "=", "ses", ")", "metadata", "=", "self", ".", "layout", ".", "get_metadata", "(", "niftis", "[", "0", "]", ".", "path", ")", "else", ":", "raise", "Exception", "(", "'No niftis for subject {0}'", ".", "format", "(", "subject", ")", ")", "# Assume all data were converted the same way and use the last nifti", "# file's json for conversion information.", "if", "'metadata'", "not", "in", "vars", "(", ")", ":", "raise", "Exception", "(", "'No valid jsons found. Cannot generate final '", "'paragraph.'", ")", "description", "=", "'\\n\\t'", ".", "join", "(", "description_list", ")", "description", "=", "description", ".", "replace", "(", "'\\tFor session'", ",", "'\\nFor session'", ")", "description", "+=", "'\\n\\n{0}'", ".", "format", "(", "parsing", ".", "final_paragraph", "(", "metadata", ")", ")", "return", "description" ]
Write a report for a single subject. Parameters ---------- subject : :obj:`str` Subject ID. Attributes ---------- layout : :obj:`bids.layout.BIDSLayout` Layout object for a BIDS dataset. config : :obj:`dict` Configuration info for methods generation. Returns ------- description : :obj:`str` A publication-ready report of the dataset's data acquisition information. Each scan type is given its own paragraph.
[ "Write", "a", "report", "for", "a", "single", "subject", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/report.py#L94-L147
17,691
bids-standard/pybids
bids/analysis/hrf.py
_gamma_difference_hrf
def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0., delay=6, undershoot=16., dispersion=1., u_dispersion=1., ratio=0.167): """ Compute an hrf as the difference of two gamma functions Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional (default=16) temporal oversampling factor time_length : float, optional (default=32) hrf kernel length, in seconds onset: float onset time of the hrf delay: float, optional delay parameter of the hrf (in s.) undershoot: float, optional undershoot parameter of the hrf (in s.) dispersion : float, optional dispersion parameter for the first gamma function u_dispersion : float, optional dispersion parameter for the second gamma function ratio : float, optional ratio of the two gamma components Returns ------- hrf : array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ from scipy.stats import gamma dt = tr / oversampling time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int)) time_stamps -= onset hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\ ratio * gamma.pdf( time_stamps, undershoot / u_dispersion, dt / u_dispersion) hrf /= hrf.sum() return hrf
python
def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0., delay=6, undershoot=16., dispersion=1., u_dispersion=1., ratio=0.167): """ Compute an hrf as the difference of two gamma functions Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional (default=16) temporal oversampling factor time_length : float, optional (default=32) hrf kernel length, in seconds onset: float onset time of the hrf delay: float, optional delay parameter of the hrf (in s.) undershoot: float, optional undershoot parameter of the hrf (in s.) dispersion : float, optional dispersion parameter for the first gamma function u_dispersion : float, optional dispersion parameter for the second gamma function ratio : float, optional ratio of the two gamma components Returns ------- hrf : array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ from scipy.stats import gamma dt = tr / oversampling time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int)) time_stamps -= onset hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\ ratio * gamma.pdf( time_stamps, undershoot / u_dispersion, dt / u_dispersion) hrf /= hrf.sum() return hrf
[ "def", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", "=", "50", ",", "time_length", "=", "32.", ",", "onset", "=", "0.", ",", "delay", "=", "6", ",", "undershoot", "=", "16.", ",", "dispersion", "=", "1.", ",", "u_dispersion", "=", "1.", ",", "ratio", "=", "0.167", ")", ":", "from", "scipy", ".", "stats", "import", "gamma", "dt", "=", "tr", "/", "oversampling", "time_stamps", "=", "np", ".", "linspace", "(", "0", ",", "time_length", ",", "np", ".", "rint", "(", "float", "(", "time_length", ")", "/", "dt", ")", ".", "astype", "(", "np", ".", "int", ")", ")", "time_stamps", "-=", "onset", "hrf", "=", "gamma", ".", "pdf", "(", "time_stamps", ",", "delay", "/", "dispersion", ",", "dt", "/", "dispersion", ")", "-", "ratio", "*", "gamma", ".", "pdf", "(", "time_stamps", ",", "undershoot", "/", "u_dispersion", ",", "dt", "/", "u_dispersion", ")", "hrf", "/=", "hrf", ".", "sum", "(", ")", "return", "hrf" ]
Compute an hrf as the difference of two gamma functions Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional (default=16) temporal oversampling factor time_length : float, optional (default=32) hrf kernel length, in seconds onset: float onset time of the hrf delay: float, optional delay parameter of the hrf (in s.) undershoot: float, optional undershoot parameter of the hrf (in s.) dispersion : float, optional dispersion parameter for the first gamma function u_dispersion : float, optional dispersion parameter for the second gamma function ratio : float, optional ratio of the two gamma components Returns ------- hrf : array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid
[ "Compute", "an", "hrf", "as", "the", "difference", "of", "two", "gamma", "functions" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L13-L61
17,692
bids-standard/pybids
bids/analysis/hrf.py
spm_hrf
def spm_hrf(tr, oversampling=50, time_length=32., onset=0.): """ Implementation of the SPM hrf model Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional temporal oversampling factor time_length : float, optional hrf kernel length, in seconds onset : float, optional hrf onset time, in seconds Returns ------- hrf: array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ return _gamma_difference_hrf(tr, oversampling, time_length, onset)
python
def spm_hrf(tr, oversampling=50, time_length=32., onset=0.): """ Implementation of the SPM hrf model Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional temporal oversampling factor time_length : float, optional hrf kernel length, in seconds onset : float, optional hrf onset time, in seconds Returns ------- hrf: array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ return _gamma_difference_hrf(tr, oversampling, time_length, onset)
[ "def", "spm_hrf", "(", "tr", ",", "oversampling", "=", "50", ",", "time_length", "=", "32.", ",", "onset", "=", "0.", ")", ":", "return", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ")" ]
Implementation of the SPM hrf model Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional temporal oversampling factor time_length : float, optional hrf kernel length, in seconds onset : float, optional hrf onset time, in seconds Returns ------- hrf: array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid
[ "Implementation", "of", "the", "SPM", "hrf", "model" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L64-L86
17,693
bids-standard/pybids
bids/analysis/hrf.py
glover_hrf
def glover_hrf(tr, oversampling=50, time_length=32., onset=0.): """ Implementation of the Glover hrf model Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional temporal oversampling factor time_length : float, optional hrf kernel length, in seconds onset : float, optional onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ return _gamma_difference_hrf(tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, u_dispersion=.9, ratio=.35)
python
def glover_hrf(tr, oversampling=50, time_length=32., onset=0.): """ Implementation of the Glover hrf model Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional temporal oversampling factor time_length : float, optional hrf kernel length, in seconds onset : float, optional onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ return _gamma_difference_hrf(tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, u_dispersion=.9, ratio=.35)
[ "def", "glover_hrf", "(", "tr", ",", "oversampling", "=", "50", ",", "time_length", "=", "32.", ",", "onset", "=", "0.", ")", ":", "return", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ",", "delay", "=", "6", ",", "undershoot", "=", "12.", ",", "dispersion", "=", ".9", ",", "u_dispersion", "=", ".9", ",", "ratio", "=", ".35", ")" ]
Implementation of the Glover hrf model Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional temporal oversampling factor time_length : float, optional hrf kernel length, in seconds onset : float, optional onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid
[ "Implementation", "of", "the", "Glover", "hrf", "model" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L89-L113
17,694
bids-standard/pybids
bids/analysis/hrf.py
spm_dispersion_derivative
def spm_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.): """Implementation of the SPM dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid """ dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf(tr, oversampling, time_length, onset, dispersion=1. + dd) + _gamma_difference_hrf(tr, oversampling, time_length, onset)) return dhrf
python
def spm_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.): """Implementation of the SPM dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid """ dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf(tr, oversampling, time_length, onset, dispersion=1. + dd) + _gamma_difference_hrf(tr, oversampling, time_length, onset)) return dhrf
[ "def", "spm_dispersion_derivative", "(", "tr", ",", "oversampling", "=", "50", ",", "time_length", "=", "32.", ",", "onset", "=", "0.", ")", ":", "dd", "=", ".01", "dhrf", "=", "1.", "/", "dd", "*", "(", "-", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ",", "dispersion", "=", "1.", "+", "dd", ")", "+", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ")", ")", "return", "dhrf" ]
Implementation of the SPM dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid
[ "Implementation", "of", "the", "SPM", "dispersion", "derivative", "hrf", "model" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L169-L196
17,695
bids-standard/pybids
bids/analysis/hrf.py
glover_dispersion_derivative
def glover_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.): """Implementation of the Glover dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid """ dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf( tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9 + dd, ratio=.35) + _gamma_difference_hrf( tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, ratio=.35)) return dhrf
python
def glover_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.): """Implementation of the Glover dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid """ dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf( tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9 + dd, ratio=.35) + _gamma_difference_hrf( tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, ratio=.35)) return dhrf
[ "def", "glover_dispersion_derivative", "(", "tr", ",", "oversampling", "=", "50", ",", "time_length", "=", "32.", ",", "onset", "=", "0.", ")", ":", "dd", "=", ".01", "dhrf", "=", "1.", "/", "dd", "*", "(", "-", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ",", "delay", "=", "6", ",", "undershoot", "=", "12.", ",", "dispersion", "=", ".9", "+", "dd", ",", "ratio", "=", ".35", ")", "+", "_gamma_difference_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ",", "delay", "=", "6", ",", "undershoot", "=", "12.", ",", "dispersion", "=", ".9", ",", "ratio", "=", ".35", ")", ")", "return", "dhrf" ]
Implementation of the Glover dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid
[ "Implementation", "of", "the", "Glover", "dispersion", "derivative", "hrf", "model" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L199-L230
17,696
bids-standard/pybids
bids/analysis/hrf.py
_sample_condition
def _sample_condition(exp_condition, frame_times, oversampling=50, min_onset=-24): """Make a possibly oversampled event regressor from condition information. Parameters ---------- exp_condition : arraylike of shape (3, n_events) yields description of events for this condition as a (onsets, durations, amplitudes) triplet frame_times : array of shape(n_scans) sample time points over_sampling : int, optional factor for oversampling event regressor min_onset : float, optional minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered Returns ------- regressor: array of shape(over_sampling * n_scans) possibly oversampled event regressor hr_frame_times : array of shape(over_sampling * n_scans) time points used for regressor sampling """ # Find the high-resolution frame_times n = frame_times.size min_onset = float(min_onset) n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) * (frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() - min_onset) * oversampling) + 1 hr_frame_times = np.linspace(frame_times.min() + min_onset, frame_times.max() * (1 + 1. / (n - 1)), np.rint(n_hr).astype(np.int)) # Get the condition information onsets, durations, values = tuple(map(np.asanyarray, exp_condition)) if (onsets < frame_times[0] + min_onset).any(): warnings.warn(('Some stimulus onsets are earlier than %s in the' ' experiment and are thus not considered in the model' % (frame_times[0] + min_onset)), UserWarning) # Set up the regressor timecourse tmax = len(hr_frame_times) regressor = np.zeros_like(hr_frame_times).astype(np.float) t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1) regressor[t_onset] += values t_offset = np.minimum( np.searchsorted(hr_frame_times, onsets + durations), tmax - 1) # Handle the case where duration is 0 by offsetting at t + 1 for i, t in enumerate(t_offset): if t < (tmax - 1) and t == t_onset[i]: t_offset[i] += 1 regressor[t_offset] -= values regressor = np.cumsum(regressor) return regressor, hr_frame_times
python
def _sample_condition(exp_condition, frame_times, oversampling=50, min_onset=-24): """Make a possibly oversampled event regressor from condition information. Parameters ---------- exp_condition : arraylike of shape (3, n_events) yields description of events for this condition as a (onsets, durations, amplitudes) triplet frame_times : array of shape(n_scans) sample time points over_sampling : int, optional factor for oversampling event regressor min_onset : float, optional minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered Returns ------- regressor: array of shape(over_sampling * n_scans) possibly oversampled event regressor hr_frame_times : array of shape(over_sampling * n_scans) time points used for regressor sampling """ # Find the high-resolution frame_times n = frame_times.size min_onset = float(min_onset) n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) * (frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() - min_onset) * oversampling) + 1 hr_frame_times = np.linspace(frame_times.min() + min_onset, frame_times.max() * (1 + 1. / (n - 1)), np.rint(n_hr).astype(np.int)) # Get the condition information onsets, durations, values = tuple(map(np.asanyarray, exp_condition)) if (onsets < frame_times[0] + min_onset).any(): warnings.warn(('Some stimulus onsets are earlier than %s in the' ' experiment and are thus not considered in the model' % (frame_times[0] + min_onset)), UserWarning) # Set up the regressor timecourse tmax = len(hr_frame_times) regressor = np.zeros_like(hr_frame_times).astype(np.float) t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1) regressor[t_onset] += values t_offset = np.minimum( np.searchsorted(hr_frame_times, onsets + durations), tmax - 1) # Handle the case where duration is 0 by offsetting at t + 1 for i, t in enumerate(t_offset): if t < (tmax - 1) and t == t_onset[i]: t_offset[i] += 1 regressor[t_offset] -= values regressor = np.cumsum(regressor) return regressor, hr_frame_times
[ "def", "_sample_condition", "(", "exp_condition", ",", "frame_times", ",", "oversampling", "=", "50", ",", "min_onset", "=", "-", "24", ")", ":", "# Find the high-resolution frame_times", "n", "=", "frame_times", ".", "size", "min_onset", "=", "float", "(", "min_onset", ")", "n_hr", "=", "(", "(", "n", "-", "1", ")", "*", "1.", "/", "(", "frame_times", ".", "max", "(", ")", "-", "frame_times", ".", "min", "(", ")", ")", "*", "(", "frame_times", ".", "max", "(", ")", "*", "(", "1", "+", "1.", "/", "(", "n", "-", "1", ")", ")", "-", "frame_times", ".", "min", "(", ")", "-", "min_onset", ")", "*", "oversampling", ")", "+", "1", "hr_frame_times", "=", "np", ".", "linspace", "(", "frame_times", ".", "min", "(", ")", "+", "min_onset", ",", "frame_times", ".", "max", "(", ")", "*", "(", "1", "+", "1.", "/", "(", "n", "-", "1", ")", ")", ",", "np", ".", "rint", "(", "n_hr", ")", ".", "astype", "(", "np", ".", "int", ")", ")", "# Get the condition information", "onsets", ",", "durations", ",", "values", "=", "tuple", "(", "map", "(", "np", ".", "asanyarray", ",", "exp_condition", ")", ")", "if", "(", "onsets", "<", "frame_times", "[", "0", "]", "+", "min_onset", ")", ".", "any", "(", ")", ":", "warnings", ".", "warn", "(", "(", "'Some stimulus onsets are earlier than %s in the'", "' experiment and are thus not considered in the model'", "%", "(", "frame_times", "[", "0", "]", "+", "min_onset", ")", ")", ",", "UserWarning", ")", "# Set up the regressor timecourse", "tmax", "=", "len", "(", "hr_frame_times", ")", "regressor", "=", "np", ".", "zeros_like", "(", "hr_frame_times", ")", ".", "astype", "(", "np", ".", "float", ")", "t_onset", "=", "np", ".", "minimum", "(", "np", ".", "searchsorted", "(", "hr_frame_times", ",", "onsets", ")", ",", "tmax", "-", "1", ")", "regressor", "[", "t_onset", "]", "+=", "values", "t_offset", "=", "np", ".", "minimum", "(", "np", ".", "searchsorted", "(", "hr_frame_times", ",", "onsets", "+", "durations", ")", ",", "tmax", "-", "1", ")", "# Handle the case where duration is 0 by offsetting at t + 1", "for", "i", ",", "t", "in", "enumerate", "(", "t_offset", ")", ":", "if", "t", "<", "(", "tmax", "-", "1", ")", "and", "t", "==", "t_onset", "[", "i", "]", ":", "t_offset", "[", "i", "]", "+=", "1", "regressor", "[", "t_offset", "]", "-=", "values", "regressor", "=", "np", ".", "cumsum", "(", "regressor", ")", "return", "regressor", ",", "hr_frame_times" ]
Make a possibly oversampled event regressor from condition information. Parameters ---------- exp_condition : arraylike of shape (3, n_events) yields description of events for this condition as a (onsets, durations, amplitudes) triplet frame_times : array of shape(n_scans) sample time points over_sampling : int, optional factor for oversampling event regressor min_onset : float, optional minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered Returns ------- regressor: array of shape(over_sampling * n_scans) possibly oversampled event regressor hr_frame_times : array of shape(over_sampling * n_scans) time points used for regressor sampling
[ "Make", "a", "possibly", "oversampled", "event", "regressor", "from", "condition", "information", "." ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L233-L295
17,697
bids-standard/pybids
bids/analysis/hrf.py
_resample_regressor
def _resample_regressor(hr_regressor, hr_frame_times, frame_times): """ this function sub-samples the regressors at frame times Parameters ---------- hr_regressor : array of shape(n_samples), the regressor time course sampled at high temporal resolution hr_frame_times : array of shape(n_samples), the corresponding time stamps frame_times: array of shape(n_scans), the desired time stamps Returns ------- regressor: array of shape(n_scans) the resampled regressor """ from scipy.interpolate import interp1d f = interp1d(hr_frame_times, hr_regressor) return f(frame_times).T
python
def _resample_regressor(hr_regressor, hr_frame_times, frame_times): """ this function sub-samples the regressors at frame times Parameters ---------- hr_regressor : array of shape(n_samples), the regressor time course sampled at high temporal resolution hr_frame_times : array of shape(n_samples), the corresponding time stamps frame_times: array of shape(n_scans), the desired time stamps Returns ------- regressor: array of shape(n_scans) the resampled regressor """ from scipy.interpolate import interp1d f = interp1d(hr_frame_times, hr_regressor) return f(frame_times).T
[ "def", "_resample_regressor", "(", "hr_regressor", ",", "hr_frame_times", ",", "frame_times", ")", ":", "from", "scipy", ".", "interpolate", "import", "interp1d", "f", "=", "interp1d", "(", "hr_frame_times", ",", "hr_regressor", ")", "return", "f", "(", "frame_times", ")", ".", "T" ]
this function sub-samples the regressors at frame times Parameters ---------- hr_regressor : array of shape(n_samples), the regressor time course sampled at high temporal resolution hr_frame_times : array of shape(n_samples), the corresponding time stamps frame_times: array of shape(n_scans), the desired time stamps Returns ------- regressor: array of shape(n_scans) the resampled regressor
[ "this", "function", "sub", "-", "samples", "the", "regressors", "at", "frame", "times" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L298-L319
17,698
bids-standard/pybids
bids/analysis/hrf.py
_orthogonalize
def _orthogonalize(X): """ Orthogonalize every column of design `X` w.r.t preceding columns Parameters ---------- X: array of shape(n, p) the data to be orthogonalized Returns ------- X: array of shape(n, p) the data after orthogonalization Notes ----- X is changed in place. The columns are not normalized """ if X.size == X.shape[0]: return X from scipy.linalg import pinv, norm for i in range(1, X.shape[1]): X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i])) # X[:, i] /= norm(X[:, i]) return X
python
def _orthogonalize(X): """ Orthogonalize every column of design `X` w.r.t preceding columns Parameters ---------- X: array of shape(n, p) the data to be orthogonalized Returns ------- X: array of shape(n, p) the data after orthogonalization Notes ----- X is changed in place. The columns are not normalized """ if X.size == X.shape[0]: return X from scipy.linalg import pinv, norm for i in range(1, X.shape[1]): X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i])) # X[:, i] /= norm(X[:, i]) return X
[ "def", "_orthogonalize", "(", "X", ")", ":", "if", "X", ".", "size", "==", "X", ".", "shape", "[", "0", "]", ":", "return", "X", "from", "scipy", ".", "linalg", "import", "pinv", ",", "norm", "for", "i", "in", "range", "(", "1", ",", "X", ".", "shape", "[", "1", "]", ")", ":", "X", "[", ":", ",", "i", "]", "-=", "np", ".", "dot", "(", "np", ".", "dot", "(", "X", "[", ":", ",", "i", "]", ",", "X", "[", ":", ",", ":", "i", "]", ")", ",", "pinv", "(", "X", "[", ":", ",", ":", "i", "]", ")", ")", "# X[:, i] /= norm(X[:, i])", "return", "X" ]
Orthogonalize every column of design `X` w.r.t preceding columns Parameters ---------- X: array of shape(n, p) the data to be orthogonalized Returns ------- X: array of shape(n, p) the data after orthogonalization Notes ----- X is changed in place. The columns are not normalized
[ "Orthogonalize", "every", "column", "of", "design", "X", "w", ".", "r", ".", "t", "preceding", "columns" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L322-L345
17,699
bids-standard/pybids
bids/analysis/hrf.py
_regressor_names
def _regressor_names(con_name, hrf_model, fir_delays=None): """ Returns a list of regressor names, computed from con-name and hrf type Parameters ---------- con_name: string identifier of the condition hrf_model: string or None, hrf model chosen fir_delays: 1D array_like, optional, Delays used in case of an FIR model Returns ------- names: list of strings, regressor names """ if hrf_model in ['glover', 'spm', None]: return [con_name] elif hrf_model in ["glover + derivative", 'spm + derivative']: return [con_name, con_name + "_derivative"] elif hrf_model in ['spm + derivative + dispersion', 'glover + derivative + dispersion']: return [con_name, con_name + "_derivative", con_name + "_dispersion"] elif hrf_model == 'fir': return [con_name + "_delay_%d" % i for i in fir_delays]
python
def _regressor_names(con_name, hrf_model, fir_delays=None): """ Returns a list of regressor names, computed from con-name and hrf type Parameters ---------- con_name: string identifier of the condition hrf_model: string or None, hrf model chosen fir_delays: 1D array_like, optional, Delays used in case of an FIR model Returns ------- names: list of strings, regressor names """ if hrf_model in ['glover', 'spm', None]: return [con_name] elif hrf_model in ["glover + derivative", 'spm + derivative']: return [con_name, con_name + "_derivative"] elif hrf_model in ['spm + derivative + dispersion', 'glover + derivative + dispersion']: return [con_name, con_name + "_derivative", con_name + "_dispersion"] elif hrf_model == 'fir': return [con_name + "_delay_%d" % i for i in fir_delays]
[ "def", "_regressor_names", "(", "con_name", ",", "hrf_model", ",", "fir_delays", "=", "None", ")", ":", "if", "hrf_model", "in", "[", "'glover'", ",", "'spm'", ",", "None", "]", ":", "return", "[", "con_name", "]", "elif", "hrf_model", "in", "[", "\"glover + derivative\"", ",", "'spm + derivative'", "]", ":", "return", "[", "con_name", ",", "con_name", "+", "\"_derivative\"", "]", "elif", "hrf_model", "in", "[", "'spm + derivative + dispersion'", ",", "'glover + derivative + dispersion'", "]", ":", "return", "[", "con_name", ",", "con_name", "+", "\"_derivative\"", ",", "con_name", "+", "\"_dispersion\"", "]", "elif", "hrf_model", "==", "'fir'", ":", "return", "[", "con_name", "+", "\"_delay_%d\"", "%", "i", "for", "i", "in", "fir_delays", "]" ]
Returns a list of regressor names, computed from con-name and hrf type Parameters ---------- con_name: string identifier of the condition hrf_model: string or None, hrf model chosen fir_delays: 1D array_like, optional, Delays used in case of an FIR model Returns ------- names: list of strings, regressor names
[ "Returns", "a", "list", "of", "regressor", "names", "computed", "from", "con", "-", "name", "and", "hrf", "type" ]
30d924ce770622bda0e390d613a8da42a2a20c32
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L348-L375